File: | cpus.c |
Location: | line 944, column 12 |
Description: | Access to field 'stopped' results in a dereference of a null pointer (loaded from field 'tqh_first') |
1 | /* | |||||
2 | * QEMU System Emulator | |||||
3 | * | |||||
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |||||
5 | * | |||||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |||||
7 | * of this software and associated documentation files (the "Software"), to deal | |||||
8 | * in the Software without restriction, including without limitation the rights | |||||
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||||
10 | * copies of the Software, and to permit persons to whom the Software is | |||||
11 | * furnished to do so, subject to the following conditions: | |||||
12 | * | |||||
13 | * The above copyright notice and this permission notice shall be included in | |||||
14 | * all copies or substantial portions of the Software. | |||||
15 | * | |||||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||||
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||||
22 | * THE SOFTWARE. | |||||
23 | */ | |||||
24 | ||||||
25 | /* Needed early for CONFIG_BSD etc. */ | |||||
26 | #include "config-host.h" | |||||
27 | ||||||
28 | #include "monitor/monitor.h" | |||||
29 | #include "sysemu/sysemu.h" | |||||
30 | #include "exec/gdbstub.h" | |||||
31 | #include "sysemu/dma.h" | |||||
32 | #include "sysemu/kvm.h" | |||||
33 | #include "qmp-commands.h" | |||||
34 | ||||||
35 | #include "qemu/thread.h" | |||||
36 | #include "sysemu/cpus.h" | |||||
37 | #include "sysemu/qtest.h" | |||||
38 | #include "qemu/main-loop.h" | |||||
39 | #include "qemu/bitmap.h" | |||||
40 | #include "qemu/seqlock.h" | |||||
41 | ||||||
42 | #ifndef _WIN32 | |||||
43 | #include "qemu/compatfd.h" | |||||
44 | #endif | |||||
45 | ||||||
46 | #ifdef CONFIG_LINUX1 | |||||
47 | ||||||
48 | #include <sys/prctl.h> | |||||
49 | ||||||
50 | #ifndef PR_MCE_KILL33 | |||||
51 | #define PR_MCE_KILL33 33 | |||||
52 | #endif | |||||
53 | ||||||
54 | #ifndef PR_MCE_KILL_SET1 | |||||
55 | #define PR_MCE_KILL_SET1 1 | |||||
56 | #endif | |||||
57 | ||||||
58 | #ifndef PR_MCE_KILL_EARLY1 | |||||
59 | #define PR_MCE_KILL_EARLY1 1 | |||||
60 | #endif | |||||
61 | ||||||
62 | #endif /* CONFIG_LINUX */ | |||||
63 | ||||||
64 | static CPUState *next_cpu; | |||||
65 | ||||||
66 | bool_Bool cpu_is_stopped(CPUState *cpu) | |||||
67 | { | |||||
68 | return cpu->stopped || !runstate_is_running(); | |||||
69 | } | |||||
70 | ||||||
71 | static bool_Bool cpu_thread_is_idle(CPUState *cpu) | |||||
72 | { | |||||
73 | if (cpu->stop || cpu->queued_work_first) { | |||||
74 | return false0; | |||||
75 | } | |||||
76 | if (cpu_is_stopped(cpu)) { | |||||
77 | return true1; | |||||
78 | } | |||||
79 | if (!cpu->halted || qemu_cpu_has_work(cpu) || | |||||
80 | kvm_halt_in_kernel()(kvm_halt_in_kernel_allowed)) { | |||||
81 | return false0; | |||||
82 | } | |||||
83 | return true1; | |||||
84 | } | |||||
85 | ||||||
86 | static bool_Bool all_cpu_threads_idle(void) | |||||
87 | { | |||||
88 | CPUState *cpu; | |||||
89 | ||||||
90 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
91 | if (!cpu_thread_is_idle(cpu)) { | |||||
92 | return false0; | |||||
93 | } | |||||
94 | } | |||||
95 | return true1; | |||||
96 | } | |||||
97 | ||||||
98 | /***********************************************************/ | |||||
99 | /* guest cycle counter */ | |||||
100 | ||||||
101 | /* Protected by TimersState seqlock */ | |||||
102 | ||||||
103 | /* Compensate for varying guest execution speed. */ | |||||
104 | static int64_t qemu_icount_bias; | |||||
105 | static int64_t vm_clock_warp_start; | |||||
106 | /* Conversion factor from emulated instructions to virtual clock ticks. */ | |||||
107 | static int icount_time_shift; | |||||
108 | /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ | |||||
109 | #define MAX_ICOUNT_SHIFT10 10 | |||||
110 | ||||||
111 | /* Only written by TCG thread */ | |||||
112 | static int64_t qemu_icount; | |||||
113 | ||||||
114 | static QEMUTimer *icount_rt_timer; | |||||
115 | static QEMUTimer *icount_vm_timer; | |||||
116 | static QEMUTimer *icount_warp_timer; | |||||
117 | ||||||
118 | typedef struct TimersState { | |||||
119 | /* Protected by BQL. */ | |||||
120 | int64_t cpu_ticks_prev; | |||||
121 | int64_t cpu_ticks_offset; | |||||
122 | ||||||
123 | /* cpu_clock_offset can be read out of BQL, so protect it with | |||||
124 | * this lock. | |||||
125 | */ | |||||
126 | QemuSeqLock vm_clock_seqlock; | |||||
127 | int64_t cpu_clock_offset; | |||||
128 | int32_t cpu_ticks_enabled; | |||||
129 | int64_t dummy; | |||||
130 | } TimersState; | |||||
131 | ||||||
132 | static TimersState timers_state; | |||||
133 | ||||||
134 | /* Return the virtual CPU time, based on the instruction counter. */ | |||||
135 | static int64_t cpu_get_icount_locked(void) | |||||
136 | { | |||||
137 | int64_t icount; | |||||
138 | CPUState *cpu = current_cputls__current_cpu; | |||||
139 | ||||||
140 | icount = qemu_icount; | |||||
141 | if (cpu) { | |||||
142 | CPUArchStatestruct CPUX86State *env = cpu->env_ptr; | |||||
143 | if (!can_do_io(env)) { | |||||
144 | fprintf(stderrstderr, "Bad clock read\n"); | |||||
145 | } | |||||
146 | icount -= (env->icount_decr.u16.low + env->icount_extra); | |||||
147 | } | |||||
148 | return qemu_icount_bias + (icount << icount_time_shift); | |||||
149 | } | |||||
150 | ||||||
151 | int64_t cpu_get_icount(void) | |||||
152 | { | |||||
153 | int64_t icount; | |||||
154 | unsigned start; | |||||
155 | ||||||
156 | do { | |||||
157 | start = seqlock_read_begin(&timers_state.vm_clock_seqlock); | |||||
158 | icount = cpu_get_icount_locked(); | |||||
159 | } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); | |||||
160 | ||||||
161 | return icount; | |||||
162 | } | |||||
163 | ||||||
164 | /* return the host CPU cycle counter and handle stop/restart */ | |||||
165 | /* Caller must hold the BQL */ | |||||
166 | int64_t cpu_get_ticks(void) | |||||
167 | { | |||||
168 | int64_t ticks; | |||||
169 | ||||||
170 | if (use_icount) { | |||||
171 | return cpu_get_icount(); | |||||
172 | } | |||||
173 | ||||||
174 | ticks = timers_state.cpu_ticks_offset; | |||||
175 | if (timers_state.cpu_ticks_enabled) { | |||||
176 | ticks += cpu_get_real_ticks(); | |||||
177 | } | |||||
178 | ||||||
179 | if (timers_state.cpu_ticks_prev > ticks) { | |||||
180 | /* Note: non increasing ticks may happen if the host uses | |||||
181 | software suspend */ | |||||
182 | timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; | |||||
183 | ticks = timers_state.cpu_ticks_prev; | |||||
184 | } | |||||
185 | ||||||
186 | timers_state.cpu_ticks_prev = ticks; | |||||
187 | return ticks; | |||||
188 | } | |||||
189 | ||||||
190 | static int64_t cpu_get_clock_locked(void) | |||||
191 | { | |||||
192 | int64_t ticks; | |||||
193 | ||||||
194 | ticks = timers_state.cpu_clock_offset; | |||||
195 | if (timers_state.cpu_ticks_enabled) { | |||||
196 | ticks += get_clock(); | |||||
197 | } | |||||
198 | ||||||
199 | return ticks; | |||||
200 | } | |||||
201 | ||||||
202 | /* return the host CPU monotonic timer and handle stop/restart */ | |||||
203 | int64_t cpu_get_clock(void) | |||||
204 | { | |||||
205 | int64_t ti; | |||||
206 | unsigned start; | |||||
207 | ||||||
208 | do { | |||||
209 | start = seqlock_read_begin(&timers_state.vm_clock_seqlock); | |||||
210 | ti = cpu_get_clock_locked(); | |||||
211 | } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); | |||||
212 | ||||||
213 | return ti; | |||||
214 | } | |||||
215 | ||||||
216 | /* enable cpu_get_ticks() | |||||
217 | * Caller must hold BQL which server as mutex for vm_clock_seqlock. | |||||
218 | */ | |||||
219 | void cpu_enable_ticks(void) | |||||
220 | { | |||||
221 | /* Here, the really thing protected by seqlock is cpu_clock_offset. */ | |||||
222 | seqlock_write_lock(&timers_state.vm_clock_seqlock); | |||||
223 | if (!timers_state.cpu_ticks_enabled) { | |||||
224 | timers_state.cpu_ticks_offset -= cpu_get_real_ticks(); | |||||
225 | timers_state.cpu_clock_offset -= get_clock(); | |||||
226 | timers_state.cpu_ticks_enabled = 1; | |||||
227 | } | |||||
228 | seqlock_write_unlock(&timers_state.vm_clock_seqlock); | |||||
229 | } | |||||
230 | ||||||
231 | /* disable cpu_get_ticks() : the clock is stopped. You must not call | |||||
232 | * cpu_get_ticks() after that. | |||||
233 | * Caller must hold BQL which server as mutex for vm_clock_seqlock. | |||||
234 | */ | |||||
235 | void cpu_disable_ticks(void) | |||||
236 | { | |||||
237 | /* Here, the really thing protected by seqlock is cpu_clock_offset. */ | |||||
238 | seqlock_write_lock(&timers_state.vm_clock_seqlock); | |||||
239 | if (timers_state.cpu_ticks_enabled) { | |||||
240 | timers_state.cpu_ticks_offset += cpu_get_real_ticks(); | |||||
241 | timers_state.cpu_clock_offset = cpu_get_clock_locked(); | |||||
242 | timers_state.cpu_ticks_enabled = 0; | |||||
243 | } | |||||
244 | seqlock_write_unlock(&timers_state.vm_clock_seqlock); | |||||
245 | } | |||||
246 | ||||||
247 | /* Correlation between real and virtual time is always going to be | |||||
248 | fairly approximate, so ignore small variation. | |||||
249 | When the guest is idle real and virtual time will be aligned in | |||||
250 | the IO wait loop. */ | |||||
251 | #define ICOUNT_WOBBLE(get_ticks_per_sec() / 10) (get_ticks_per_sec() / 10) | |||||
252 | ||||||
253 | static void icount_adjust(void) | |||||
254 | { | |||||
255 | int64_t cur_time; | |||||
256 | int64_t cur_icount; | |||||
257 | int64_t delta; | |||||
258 | ||||||
259 | /* Protected by TimersState mutex. */ | |||||
260 | static int64_t last_delta; | |||||
261 | ||||||
262 | /* If the VM is not running, then do nothing. */ | |||||
263 | if (!runstate_is_running()) { | |||||
264 | return; | |||||
265 | } | |||||
266 | ||||||
267 | seqlock_write_lock(&timers_state.vm_clock_seqlock); | |||||
268 | cur_time = cpu_get_clock_locked(); | |||||
269 | cur_icount = cpu_get_icount_locked(); | |||||
270 | ||||||
271 | delta = cur_icount - cur_time; | |||||
272 | /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ | |||||
273 | if (delta > 0 | |||||
274 | && last_delta + ICOUNT_WOBBLE(get_ticks_per_sec() / 10) < delta * 2 | |||||
275 | && icount_time_shift > 0) { | |||||
276 | /* The guest is getting too far ahead. Slow time down. */ | |||||
277 | icount_time_shift--; | |||||
278 | } | |||||
279 | if (delta < 0 | |||||
280 | && last_delta - ICOUNT_WOBBLE(get_ticks_per_sec() / 10) > delta * 2 | |||||
281 | && icount_time_shift < MAX_ICOUNT_SHIFT10) { | |||||
282 | /* The guest is getting too far behind. Speed time up. */ | |||||
283 | icount_time_shift++; | |||||
284 | } | |||||
285 | last_delta = delta; | |||||
286 | qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift); | |||||
287 | seqlock_write_unlock(&timers_state.vm_clock_seqlock); | |||||
288 | } | |||||
289 | ||||||
290 | static void icount_adjust_rt(void *opaque) | |||||
291 | { | |||||
292 | timer_mod(icount_rt_timer, | |||||
293 | qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000); | |||||
294 | icount_adjust(); | |||||
295 | } | |||||
296 | ||||||
297 | static void icount_adjust_vm(void *opaque) | |||||
298 | { | |||||
299 | timer_mod(icount_vm_timer, | |||||
300 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |||||
301 | get_ticks_per_sec() / 10); | |||||
302 | icount_adjust(); | |||||
303 | } | |||||
304 | ||||||
305 | static int64_t qemu_icount_round(int64_t count) | |||||
306 | { | |||||
307 | return (count + (1 << icount_time_shift) - 1) >> icount_time_shift; | |||||
308 | } | |||||
309 | ||||||
310 | static void icount_warp_rt(void *opaque) | |||||
311 | { | |||||
312 | /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start | |||||
313 | * changes from -1 to another value, so the race here is okay. | |||||
314 | */ | |||||
315 | if (atomic_read(&vm_clock_warp_start)(*(__typeof__(*&vm_clock_warp_start) *volatile) (&vm_clock_warp_start )) == -1) { | |||||
316 | return; | |||||
317 | } | |||||
318 | ||||||
319 | seqlock_write_lock(&timers_state.vm_clock_seqlock); | |||||
320 | if (runstate_is_running()) { | |||||
321 | int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |||||
322 | int64_t warp_delta; | |||||
323 | ||||||
324 | warp_delta = clock - vm_clock_warp_start; | |||||
325 | if (use_icount == 2) { | |||||
326 | /* | |||||
327 | * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too | |||||
328 | * far ahead of real time. | |||||
329 | */ | |||||
330 | int64_t cur_time = cpu_get_clock_locked(); | |||||
331 | int64_t cur_icount = cpu_get_icount_locked(); | |||||
332 | int64_t delta = cur_time - cur_icount; | |||||
333 | warp_delta = MIN(warp_delta, delta)(((warp_delta) < (delta)) ? (warp_delta) : (delta)); | |||||
334 | } | |||||
335 | qemu_icount_bias += warp_delta; | |||||
336 | } | |||||
337 | vm_clock_warp_start = -1; | |||||
338 | seqlock_write_unlock(&timers_state.vm_clock_seqlock); | |||||
339 | ||||||
340 | if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { | |||||
341 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |||||
342 | } | |||||
343 | } | |||||
344 | ||||||
345 | void qtest_clock_warp(int64_t dest) | |||||
346 | { | |||||
347 | int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); | |||||
348 | assert(qtest_enabled())((qtest_enabled()) ? (void) (0) : __assert_fail ("qtest_enabled()" , "/home/stefan/src/qemu/qemu.org/qemu/cpus.c", 348, __PRETTY_FUNCTION__ )); | |||||
349 | while (clock < dest) { | |||||
350 | int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |||||
351 | int64_t warp = MIN(dest - clock, deadline)(((dest - clock) < (deadline)) ? (dest - clock) : (deadline )); | |||||
352 | seqlock_write_lock(&timers_state.vm_clock_seqlock); | |||||
353 | qemu_icount_bias += warp; | |||||
354 | seqlock_write_unlock(&timers_state.vm_clock_seqlock); | |||||
355 | ||||||
356 | qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); | |||||
357 | clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); | |||||
358 | } | |||||
359 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |||||
360 | } | |||||
361 | ||||||
362 | void qemu_clock_warp(QEMUClockType type) | |||||
363 | { | |||||
364 | int64_t clock; | |||||
365 | int64_t deadline; | |||||
366 | ||||||
367 | /* | |||||
368 | * There are too many global variables to make the "warp" behavior | |||||
369 | * applicable to other clocks. But a clock argument removes the | |||||
370 | * need for if statements all over the place. | |||||
371 | */ | |||||
372 | if (type != QEMU_CLOCK_VIRTUAL || !use_icount) { | |||||
373 | return; | |||||
374 | } | |||||
375 | ||||||
376 | /* | |||||
377 | * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now. | |||||
378 | * This ensures that the deadline for the timer is computed correctly below. | |||||
379 | * This also makes sure that the insn counter is synchronized before the | |||||
380 | * CPU starts running, in case the CPU is woken by an event other than | |||||
381 | * the earliest QEMU_CLOCK_VIRTUAL timer. | |||||
382 | */ | |||||
383 | icount_warp_rt(NULL((void*)0)); | |||||
384 | timer_del(icount_warp_timer); | |||||
385 | if (!all_cpu_threads_idle()) { | |||||
386 | return; | |||||
387 | } | |||||
388 | ||||||
389 | if (qtest_enabled()) { | |||||
390 | /* When testing, qtest commands advance icount. */ | |||||
391 | return; | |||||
392 | } | |||||
393 | ||||||
394 | /* We want to use the earliest deadline from ALL vm_clocks */ | |||||
395 | clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |||||
396 | deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |||||
397 | if (deadline < 0) { | |||||
398 | return; | |||||
399 | } | |||||
400 | ||||||
401 | if (deadline > 0) { | |||||
402 | /* | |||||
403 | * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to | |||||
404 | * sleep. Otherwise, the CPU might be waiting for a future timer | |||||
405 | * interrupt to wake it up, but the interrupt never comes because | |||||
406 | * the vCPU isn't running any insns and thus doesn't advance the | |||||
407 | * QEMU_CLOCK_VIRTUAL. | |||||
408 | * | |||||
409 | * An extreme solution for this problem would be to never let VCPUs | |||||
410 | * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL | |||||
411 | * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL | |||||
412 | * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL | |||||
413 | * after some e"real" time, (related to the time left until the next | |||||
414 | * event) has passed. The QEMU_CLOCK_REALTIME timer will do this. | |||||
415 | * This avoids that the warps are visible externally; for example, | |||||
416 | * you will not be sending network packets continuously instead of | |||||
417 | * every 100ms. | |||||
418 | */ | |||||
419 | seqlock_write_lock(&timers_state.vm_clock_seqlock); | |||||
420 | if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) { | |||||
421 | vm_clock_warp_start = clock; | |||||
422 | } | |||||
423 | seqlock_write_unlock(&timers_state.vm_clock_seqlock); | |||||
424 | timer_mod_anticipate(icount_warp_timer, clock + deadline); | |||||
425 | } else if (deadline == 0) { | |||||
426 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |||||
427 | } | |||||
428 | } | |||||
429 | ||||||
430 | static const VMStateDescription vmstate_timers = { | |||||
431 | .name = "timer", | |||||
432 | .version_id = 2, | |||||
433 | .minimum_version_id = 1, | |||||
434 | .minimum_version_id_old = 1, | |||||
435 | .fields = (VMStateField[]) { | |||||
436 | VMSTATE_INT64(cpu_ticks_offset, TimersState){ .name = ("cpu_ticks_offset"), .version_id = (0), .field_exists = (((void*)0)), .size = sizeof(int64_t), .info = &(vmstate_info_int64 ), .flags = VMS_SINGLE, .offset = (__builtin_offsetof(TimersState , cpu_ticks_offset) + ((int64_t*)0 - (typeof(((TimersState *) 0)->cpu_ticks_offset)*)0)), }, | |||||
437 | VMSTATE_INT64(dummy, TimersState){ .name = ("dummy"), .version_id = (0), .field_exists = (((void *)0)), .size = sizeof(int64_t), .info = &(vmstate_info_int64 ), .flags = VMS_SINGLE, .offset = (__builtin_offsetof(TimersState , dummy) + ((int64_t*)0 - (typeof(((TimersState *)0)->dummy )*)0)), }, | |||||
438 | VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2){ .name = ("cpu_clock_offset"), .version_id = (2), .field_exists = (((void*)0)), .size = sizeof(int64_t), .info = &(vmstate_info_int64 ), .flags = VMS_SINGLE, .offset = (__builtin_offsetof(TimersState , cpu_clock_offset) + ((int64_t*)0 - (typeof(((TimersState *) 0)->cpu_clock_offset)*)0)), }, | |||||
439 | VMSTATE_END_OF_LIST(){} | |||||
440 | } | |||||
441 | }; | |||||
442 | ||||||
443 | void configure_icount(const char *option) | |||||
444 | { | |||||
445 | seqlock_init(&timers_state.vm_clock_seqlock, NULL((void*)0)); | |||||
446 | vmstate_register(NULL((void*)0), 0, &vmstate_timers, &timers_state); | |||||
447 | if (!option) { | |||||
448 | return; | |||||
449 | } | |||||
450 | ||||||
451 | icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME, | |||||
452 | icount_warp_rt, NULL((void*)0)); | |||||
453 | if (strcmp(option, "auto") != 0) { | |||||
454 | icount_time_shift = strtol(option, NULL((void*)0), 0); | |||||
455 | use_icount = 1; | |||||
456 | return; | |||||
457 | } | |||||
458 | ||||||
459 | use_icount = 2; | |||||
460 | ||||||
461 | /* 125MIPS seems a reasonable initial guess at the guest speed. | |||||
462 | It will be corrected fairly quickly anyway. */ | |||||
463 | icount_time_shift = 3; | |||||
464 | ||||||
465 | /* Have both realtime and virtual time triggers for speed adjustment. | |||||
466 | The realtime trigger catches emulated time passing too slowly, | |||||
467 | the virtual time trigger catches emulated time passing too fast. | |||||
468 | Realtime triggers occur even when idle, so use them less frequently | |||||
469 | than VM triggers. */ | |||||
470 | icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME, | |||||
471 | icount_adjust_rt, NULL((void*)0)); | |||||
472 | timer_mod(icount_rt_timer, | |||||
473 | qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000); | |||||
474 | icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, | |||||
475 | icount_adjust_vm, NULL((void*)0)); | |||||
476 | timer_mod(icount_vm_timer, | |||||
477 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |||||
478 | get_ticks_per_sec() / 10); | |||||
479 | } | |||||
480 | ||||||
481 | /***********************************************************/ | |||||
482 | void hw_error(const char *fmt, ...) | |||||
483 | { | |||||
484 | va_list ap; | |||||
485 | CPUState *cpu; | |||||
486 | ||||||
487 | va_start(ap, fmt)__builtin_va_start(ap, fmt); | |||||
488 | fprintf(stderrstderr, "qemu: hardware error: "); | |||||
489 | vfprintf(stderrstderr, fmt, ap); | |||||
490 | fprintf(stderrstderr, "\n"); | |||||
491 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
492 | fprintf(stderrstderr, "CPU #%d:\n", cpu->cpu_index); | |||||
493 | cpu_dump_state(cpu, stderrstderr, fprintf, CPU_DUMP_FPU); | |||||
494 | } | |||||
495 | va_end(ap)__builtin_va_end(ap); | |||||
496 | abort(); | |||||
497 | } | |||||
498 | ||||||
499 | void cpu_synchronize_all_states(void) | |||||
500 | { | |||||
501 | CPUState *cpu; | |||||
502 | ||||||
503 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
504 | cpu_synchronize_state(cpu); | |||||
505 | } | |||||
506 | } | |||||
507 | ||||||
508 | void cpu_synchronize_all_post_reset(void) | |||||
509 | { | |||||
510 | CPUState *cpu; | |||||
511 | ||||||
512 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
513 | cpu_synchronize_post_reset(cpu); | |||||
514 | } | |||||
515 | } | |||||
516 | ||||||
517 | void cpu_synchronize_all_post_init(void) | |||||
518 | { | |||||
519 | CPUState *cpu; | |||||
520 | ||||||
521 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
522 | cpu_synchronize_post_init(cpu); | |||||
523 | } | |||||
524 | } | |||||
525 | ||||||
526 | static int do_vm_stop(RunState state) | |||||
527 | { | |||||
528 | int ret = 0; | |||||
529 | ||||||
530 | if (runstate_is_running()) { | |||||
531 | cpu_disable_ticks(); | |||||
532 | pause_all_vcpus(); | |||||
533 | runstate_set(state); | |||||
534 | vm_state_notify(0, state); | |||||
535 | monitor_protocol_event(QEVENT_STOP, NULL((void*)0)); | |||||
536 | } | |||||
537 | ||||||
538 | bdrv_drain_all(); | |||||
539 | ret = bdrv_flush_all(); | |||||
540 | ||||||
541 | return ret; | |||||
542 | } | |||||
543 | ||||||
544 | static bool_Bool cpu_can_run(CPUState *cpu) | |||||
545 | { | |||||
546 | if (cpu->stop) { | |||||
547 | return false0; | |||||
548 | } | |||||
549 | if (cpu_is_stopped(cpu)) { | |||||
550 | return false0; | |||||
551 | } | |||||
552 | return true1; | |||||
553 | } | |||||
554 | ||||||
555 | static void cpu_handle_guest_debug(CPUState *cpu) | |||||
556 | { | |||||
557 | gdb_set_stop_cpu(cpu); | |||||
558 | qemu_system_debug_request(); | |||||
559 | cpu->stopped = true1; | |||||
560 | } | |||||
561 | ||||||
562 | static void cpu_signal(int sig) | |||||
563 | { | |||||
564 | if (current_cputls__current_cpu) { | |||||
565 | cpu_exit(current_cputls__current_cpu); | |||||
566 | } | |||||
567 | exit_request = 1; | |||||
568 | } | |||||
569 | ||||||
570 | #ifdef CONFIG_LINUX1 | |||||
571 | static void sigbus_reraise(void) | |||||
572 | { | |||||
573 | sigset_t set; | |||||
574 | struct sigaction action; | |||||
575 | ||||||
576 | memset(&action, 0, sizeof(action)); | |||||
577 | action.sa_handler__sigaction_handler.sa_handler = SIG_DFL((__sighandler_t) 0); | |||||
578 | if (!sigaction(SIGBUS7, &action, NULL((void*)0))) { | |||||
579 | raise(SIGBUS7); | |||||
580 | sigemptyset(&set); | |||||
581 | sigaddset(&set, SIGBUS7); | |||||
582 | sigprocmask(SIG_UNBLOCK1, &set, NULL((void*)0)); | |||||
583 | } | |||||
584 | perror("Failed to re-raise SIGBUS!\n"); | |||||
585 | abort(); | |||||
586 | } | |||||
587 | ||||||
588 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |||||
589 | void *ctx) | |||||
590 | { | |||||
591 | if (kvm_on_sigbus(siginfo->ssi_code, | |||||
592 | (void *)(intptr_t)siginfo->ssi_addr)) { | |||||
593 | sigbus_reraise(); | |||||
594 | } | |||||
595 | } | |||||
596 | ||||||
597 | static void qemu_init_sigbus(void) | |||||
598 | { | |||||
599 | struct sigaction action; | |||||
600 | ||||||
601 | memset(&action, 0, sizeof(action)); | |||||
602 | action.sa_flags = SA_SIGINFO4; | |||||
603 | action.sa_sigaction__sigaction_handler.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |||||
604 | sigaction(SIGBUS7, &action, NULL((void*)0)); | |||||
605 | ||||||
606 | prctl(PR_MCE_KILL33, PR_MCE_KILL_SET1, PR_MCE_KILL_EARLY1, 0, 0); | |||||
607 | } | |||||
608 | ||||||
609 | static void qemu_kvm_eat_signals(CPUState *cpu) | |||||
610 | { | |||||
611 | struct timespec ts = { 0, 0 }; | |||||
612 | siginfo_t siginfo; | |||||
613 | sigset_t waitset; | |||||
614 | sigset_t chkset; | |||||
615 | int r; | |||||
616 | ||||||
617 | sigemptyset(&waitset); | |||||
618 | sigaddset(&waitset, SIG_IPI10); | |||||
619 | sigaddset(&waitset, SIGBUS7); | |||||
620 | ||||||
621 | do { | |||||
622 | r = sigtimedwait(&waitset, &siginfo, &ts); | |||||
623 | if (r == -1 && !(errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EINTR4)) { | |||||
624 | perror("sigtimedwait"); | |||||
625 | exit(1); | |||||
626 | } | |||||
627 | ||||||
628 | switch (r) { | |||||
629 | case SIGBUS7: | |||||
630 | if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr_sifields._sigfault.si_addr)) { | |||||
631 | sigbus_reraise(); | |||||
632 | } | |||||
633 | break; | |||||
634 | default: | |||||
635 | break; | |||||
636 | } | |||||
637 | ||||||
638 | r = sigpending(&chkset); | |||||
639 | if (r == -1) { | |||||
640 | perror("sigpending"); | |||||
641 | exit(1); | |||||
642 | } | |||||
643 | } while (sigismember(&chkset, SIG_IPI10) || sigismember(&chkset, SIGBUS7)); | |||||
644 | } | |||||
645 | ||||||
646 | #else /* !CONFIG_LINUX */ | |||||
647 | ||||||
648 | static void qemu_init_sigbus(void) | |||||
649 | { | |||||
650 | } | |||||
651 | ||||||
652 | static void qemu_kvm_eat_signals(CPUState *cpu) | |||||
653 | { | |||||
654 | } | |||||
655 | #endif /* !CONFIG_LINUX */ | |||||
656 | ||||||
657 | #ifndef _WIN32 | |||||
658 | static void dummy_signal(int sig) | |||||
659 | { | |||||
660 | } | |||||
661 | ||||||
662 | static void qemu_kvm_init_cpu_signals(CPUState *cpu) | |||||
663 | { | |||||
664 | int r; | |||||
665 | sigset_t set; | |||||
666 | struct sigaction sigact; | |||||
667 | ||||||
668 | memset(&sigact, 0, sizeof(sigact)); | |||||
669 | sigact.sa_handler__sigaction_handler.sa_handler = dummy_signal; | |||||
670 | sigaction(SIG_IPI10, &sigact, NULL((void*)0)); | |||||
671 | ||||||
672 | pthread_sigmask(SIG_BLOCK0, NULL((void*)0), &set); | |||||
673 | sigdelset(&set, SIG_IPI10); | |||||
674 | sigdelset(&set, SIGBUS7); | |||||
675 | r = kvm_set_signal_mask(cpu, &set); | |||||
676 | if (r) { | |||||
677 | fprintf(stderrstderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |||||
678 | exit(1); | |||||
679 | } | |||||
680 | } | |||||
681 | ||||||
682 | static void qemu_tcg_init_cpu_signals(void) | |||||
683 | { | |||||
684 | sigset_t set; | |||||
685 | struct sigaction sigact; | |||||
686 | ||||||
687 | memset(&sigact, 0, sizeof(sigact)); | |||||
688 | sigact.sa_handler__sigaction_handler.sa_handler = cpu_signal; | |||||
689 | sigaction(SIG_IPI10, &sigact, NULL((void*)0)); | |||||
690 | ||||||
691 | sigemptyset(&set); | |||||
692 | sigaddset(&set, SIG_IPI10); | |||||
693 | pthread_sigmask(SIG_UNBLOCK1, &set, NULL((void*)0)); | |||||
694 | } | |||||
695 | ||||||
696 | #else /* _WIN32 */ | |||||
697 | static void qemu_kvm_init_cpu_signals(CPUState *cpu) | |||||
698 | { | |||||
699 | abort(); | |||||
700 | } | |||||
701 | ||||||
702 | static void qemu_tcg_init_cpu_signals(void) | |||||
703 | { | |||||
704 | } | |||||
705 | #endif /* _WIN32 */ | |||||
706 | ||||||
707 | static QemuMutex qemu_global_mutex; | |||||
708 | static QemuCond qemu_io_proceeded_cond; | |||||
709 | static bool_Bool iothread_requesting_mutex; | |||||
710 | ||||||
711 | static QemuThread io_thread; | |||||
712 | ||||||
713 | static QemuThread *tcg_cpu_thread; | |||||
714 | static QemuCond *tcg_halt_cond; | |||||
715 | ||||||
716 | /* cpu creation */ | |||||
717 | static QemuCond qemu_cpu_cond; | |||||
718 | /* system init */ | |||||
719 | static QemuCond qemu_pause_cond; | |||||
720 | static QemuCond qemu_work_cond; | |||||
721 | ||||||
722 | void qemu_init_cpu_loop(void) | |||||
723 | { | |||||
724 | qemu_init_sigbus(); | |||||
725 | qemu_cond_init(&qemu_cpu_cond); | |||||
726 | qemu_cond_init(&qemu_pause_cond); | |||||
727 | qemu_cond_init(&qemu_work_cond); | |||||
728 | qemu_cond_init(&qemu_io_proceeded_cond); | |||||
729 | qemu_mutex_init(&qemu_global_mutex); | |||||
730 | ||||||
731 | qemu_thread_get_self(&io_thread); | |||||
732 | } | |||||
733 | ||||||
734 | void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) | |||||
735 | { | |||||
736 | struct qemu_work_item wi; | |||||
737 | ||||||
738 | if (qemu_cpu_is_self(cpu)) { | |||||
739 | func(data); | |||||
740 | return; | |||||
741 | } | |||||
742 | ||||||
743 | wi.func = func; | |||||
744 | wi.data = data; | |||||
745 | wi.free = false0; | |||||
746 | if (cpu->queued_work_first == NULL((void*)0)) { | |||||
747 | cpu->queued_work_first = &wi; | |||||
748 | } else { | |||||
749 | cpu->queued_work_last->next = &wi; | |||||
750 | } | |||||
751 | cpu->queued_work_last = &wi; | |||||
752 | wi.next = NULL((void*)0); | |||||
753 | wi.done = false0; | |||||
754 | ||||||
755 | qemu_cpu_kick(cpu); | |||||
756 | while (!wi.done) { | |||||
757 | CPUState *self_cpu = current_cputls__current_cpu; | |||||
758 | ||||||
759 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |||||
760 | current_cputls__current_cpu = self_cpu; | |||||
761 | } | |||||
762 | } | |||||
763 | ||||||
764 | void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) | |||||
765 | { | |||||
766 | struct qemu_work_item *wi; | |||||
767 | ||||||
768 | if (qemu_cpu_is_self(cpu)) { | |||||
769 | func(data); | |||||
770 | return; | |||||
771 | } | |||||
772 | ||||||
773 | wi = g_malloc0(sizeof(struct qemu_work_item)); | |||||
774 | wi->func = func; | |||||
775 | wi->data = data; | |||||
776 | wi->free = true1; | |||||
777 | if (cpu->queued_work_first == NULL((void*)0)) { | |||||
778 | cpu->queued_work_first = wi; | |||||
779 | } else { | |||||
780 | cpu->queued_work_last->next = wi; | |||||
781 | } | |||||
782 | cpu->queued_work_last = wi; | |||||
783 | wi->next = NULL((void*)0); | |||||
784 | wi->done = false0; | |||||
785 | ||||||
786 | qemu_cpu_kick(cpu); | |||||
787 | } | |||||
788 | ||||||
789 | static void flush_queued_work(CPUState *cpu) | |||||
790 | { | |||||
791 | struct qemu_work_item *wi; | |||||
792 | ||||||
793 | if (cpu->queued_work_first == NULL((void*)0)) { | |||||
794 | return; | |||||
795 | } | |||||
796 | ||||||
797 | while ((wi = cpu->queued_work_first)) { | |||||
798 | cpu->queued_work_first = wi->next; | |||||
799 | wi->func(wi->data); | |||||
800 | wi->done = true1; | |||||
801 | if (wi->free) { | |||||
802 | g_free(wi); | |||||
803 | } | |||||
804 | } | |||||
805 | cpu->queued_work_last = NULL((void*)0); | |||||
806 | qemu_cond_broadcast(&qemu_work_cond); | |||||
807 | } | |||||
808 | ||||||
809 | static void qemu_wait_io_event_common(CPUState *cpu) | |||||
810 | { | |||||
811 | if (cpu->stop) { | |||||
812 | cpu->stop = false0; | |||||
813 | cpu->stopped = true1; | |||||
814 | qemu_cond_signal(&qemu_pause_cond); | |||||
815 | } | |||||
816 | flush_queued_work(cpu); | |||||
817 | cpu->thread_kicked = false0; | |||||
818 | } | |||||
819 | ||||||
820 | static void qemu_tcg_wait_io_event(void) | |||||
821 | { | |||||
822 | CPUState *cpu; | |||||
823 | ||||||
824 | while (all_cpu_threads_idle()) { | |||||
825 | /* Start accounting real time to the virtual clock if the CPUs | |||||
826 | are idle. */ | |||||
827 | qemu_clock_warp(QEMU_CLOCK_VIRTUAL); | |||||
828 | qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); | |||||
829 | } | |||||
830 | ||||||
831 | while (iothread_requesting_mutex) { | |||||
832 | qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); | |||||
833 | } | |||||
834 | ||||||
835 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
836 | qemu_wait_io_event_common(cpu); | |||||
837 | } | |||||
838 | } | |||||
839 | ||||||
840 | static void qemu_kvm_wait_io_event(CPUState *cpu) | |||||
841 | { | |||||
842 | while (cpu_thread_is_idle(cpu)) { | |||||
843 | qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); | |||||
844 | } | |||||
845 | ||||||
846 | qemu_kvm_eat_signals(cpu); | |||||
847 | qemu_wait_io_event_common(cpu); | |||||
848 | } | |||||
849 | ||||||
850 | static void *qemu_kvm_cpu_thread_fn(void *arg) | |||||
851 | { | |||||
852 | CPUState *cpu = arg; | |||||
853 | int r; | |||||
854 | ||||||
855 | qemu_mutex_lock(&qemu_global_mutex); | |||||
856 | qemu_thread_get_self(cpu->thread); | |||||
857 | cpu->thread_id = qemu_get_thread_id(); | |||||
858 | current_cputls__current_cpu = cpu; | |||||
859 | ||||||
860 | r = kvm_init_vcpu(cpu); | |||||
861 | if (r < 0) { | |||||
862 | fprintf(stderrstderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |||||
863 | exit(1); | |||||
864 | } | |||||
865 | ||||||
866 | qemu_kvm_init_cpu_signals(cpu); | |||||
867 | ||||||
868 | /* signal CPU creation */ | |||||
869 | cpu->created = true1; | |||||
870 | qemu_cond_signal(&qemu_cpu_cond); | |||||
871 | ||||||
872 | while (1) { | |||||
873 | if (cpu_can_run(cpu)) { | |||||
874 | r = kvm_cpu_exec(cpu); | |||||
875 | if (r == EXCP_DEBUG0x10002) { | |||||
876 | cpu_handle_guest_debug(cpu); | |||||
877 | } | |||||
878 | } | |||||
879 | qemu_kvm_wait_io_event(cpu); | |||||
880 | } | |||||
881 | ||||||
882 | return NULL((void*)0); | |||||
883 | } | |||||
884 | ||||||
885 | static void *qemu_dummy_cpu_thread_fn(void *arg) | |||||
886 | { | |||||
887 | #ifdef _WIN32 | |||||
888 | fprintf(stderrstderr, "qtest is not supported under Windows\n"); | |||||
889 | exit(1); | |||||
890 | #else | |||||
891 | CPUState *cpu = arg; | |||||
892 | sigset_t waitset; | |||||
893 | int r; | |||||
894 | ||||||
895 | qemu_mutex_lock_iothread(); | |||||
896 | qemu_thread_get_self(cpu->thread); | |||||
897 | cpu->thread_id = qemu_get_thread_id(); | |||||
898 | ||||||
899 | sigemptyset(&waitset); | |||||
900 | sigaddset(&waitset, SIG_IPI10); | |||||
901 | ||||||
902 | /* signal CPU creation */ | |||||
903 | cpu->created = true1; | |||||
904 | qemu_cond_signal(&qemu_cpu_cond); | |||||
905 | ||||||
906 | current_cputls__current_cpu = cpu; | |||||
907 | while (1) { | |||||
908 | current_cputls__current_cpu = NULL((void*)0); | |||||
909 | qemu_mutex_unlock_iothread(); | |||||
910 | do { | |||||
911 | int sig; | |||||
912 | r = sigwait(&waitset, &sig); | |||||
913 | } while (r == -1 && (errno(*__errno_location ()) == EAGAIN11 || errno(*__errno_location ()) == EINTR4)); | |||||
914 | if (r == -1) { | |||||
915 | perror("sigwait"); | |||||
916 | exit(1); | |||||
917 | } | |||||
918 | qemu_mutex_lock_iothread(); | |||||
919 | current_cputls__current_cpu = cpu; | |||||
920 | qemu_wait_io_event_common(cpu); | |||||
921 | } | |||||
922 | ||||||
923 | return NULL((void*)0); | |||||
924 | #endif | |||||
925 | } | |||||
926 | ||||||
927 | static void tcg_exec_all(void); | |||||
928 | ||||||
929 | static void *qemu_tcg_cpu_thread_fn(void *arg) | |||||
930 | { | |||||
931 | CPUState *cpu = arg; | |||||
932 | ||||||
933 | qemu_tcg_init_cpu_signals(); | |||||
934 | qemu_thread_get_self(cpu->thread); | |||||
935 | ||||||
936 | qemu_mutex_lock(&qemu_global_mutex); | |||||
937 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
938 | cpu->thread_id = qemu_get_thread_id(); | |||||
939 | cpu->created = true1; | |||||
940 | } | |||||
941 | qemu_cond_signal(&qemu_cpu_cond); | |||||
942 | ||||||
943 | /* wait for initial kick-off after machine start */ | |||||
944 | while (QTAILQ_FIRST(&cpus)((&cpus)->tqh_first)->stopped) { | |||||
| ||||||
| ||||||
945 | qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); | |||||
946 | ||||||
947 | /* process any pending work */ | |||||
948 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
949 | qemu_wait_io_event_common(cpu); | |||||
950 | } | |||||
951 | } | |||||
952 | ||||||
953 | while (1) { | |||||
954 | tcg_exec_all(); | |||||
955 | ||||||
956 | if (use_icount) { | |||||
957 | int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |||||
958 | ||||||
959 | if (deadline == 0) { | |||||
960 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |||||
961 | } | |||||
962 | } | |||||
963 | qemu_tcg_wait_io_event(); | |||||
964 | } | |||||
965 | ||||||
966 | return NULL((void*)0); | |||||
967 | } | |||||
968 | ||||||
969 | static void qemu_cpu_kick_thread(CPUState *cpu) | |||||
970 | { | |||||
971 | #ifndef _WIN32 | |||||
972 | int err; | |||||
973 | ||||||
974 | err = pthread_kill(cpu->thread->thread, SIG_IPI10); | |||||
975 | if (err) { | |||||
976 | fprintf(stderrstderr, "qemu:%s: %s", __func__, strerror(err)); | |||||
977 | exit(1); | |||||
978 | } | |||||
979 | #else /* _WIN32 */ | |||||
980 | if (!qemu_cpu_is_self(cpu)) { | |||||
981 | CONTEXT tcgContext; | |||||
982 | ||||||
983 | if (SuspendThread(cpu->hThread) == (DWORD)-1) { | |||||
984 | fprintf(stderrstderr, "qemu:%s: GetLastError:%lu\n", __func__, | |||||
985 | GetLastError()); | |||||
986 | exit(1); | |||||
987 | } | |||||
988 | ||||||
989 | /* On multi-core systems, we are not sure that the thread is actually | |||||
990 | * suspended until we can get the context. | |||||
991 | */ | |||||
992 | tcgContext.ContextFlags = CONTEXT_CONTROL; | |||||
993 | while (GetThreadContext(cpu->hThread, &tcgContext) != 0) { | |||||
994 | continue; | |||||
995 | } | |||||
996 | ||||||
997 | cpu_signal(0); | |||||
998 | ||||||
999 | if (ResumeThread(cpu->hThread) == (DWORD)-1) { | |||||
1000 | fprintf(stderrstderr, "qemu:%s: GetLastError:%lu\n", __func__, | |||||
1001 | GetLastError()); | |||||
1002 | exit(1); | |||||
1003 | } | |||||
1004 | } | |||||
1005 | #endif | |||||
1006 | } | |||||
1007 | ||||||
1008 | void qemu_cpu_kick(CPUState *cpu) | |||||
1009 | { | |||||
1010 | qemu_cond_broadcast(cpu->halt_cond); | |||||
1011 | if (!tcg_enabled() && !cpu->thread_kicked) { | |||||
1012 | qemu_cpu_kick_thread(cpu); | |||||
1013 | cpu->thread_kicked = true1; | |||||
1014 | } | |||||
1015 | } | |||||
1016 | ||||||
1017 | void qemu_cpu_kick_self(void) | |||||
1018 | { | |||||
1019 | #ifndef _WIN32 | |||||
1020 | assert(current_cpu)((tls__current_cpu) ? (void) (0) : __assert_fail ("tls__current_cpu" , "/home/stefan/src/qemu/qemu.org/qemu/cpus.c", 1020, __PRETTY_FUNCTION__ )); | |||||
1021 | ||||||
1022 | if (!current_cputls__current_cpu->thread_kicked) { | |||||
1023 | qemu_cpu_kick_thread(current_cputls__current_cpu); | |||||
1024 | current_cputls__current_cpu->thread_kicked = true1; | |||||
1025 | } | |||||
1026 | #else | |||||
1027 | abort(); | |||||
1028 | #endif | |||||
1029 | } | |||||
1030 | ||||||
1031 | bool_Bool qemu_cpu_is_self(CPUState *cpu) | |||||
1032 | { | |||||
1033 | return qemu_thread_is_self(cpu->thread); | |||||
1034 | } | |||||
1035 | ||||||
1036 | static bool_Bool qemu_in_vcpu_thread(void) | |||||
1037 | { | |||||
1038 | return current_cputls__current_cpu && qemu_cpu_is_self(current_cputls__current_cpu); | |||||
1039 | } | |||||
1040 | ||||||
1041 | void qemu_mutex_lock_iothread(void) | |||||
1042 | { | |||||
1043 | if (!tcg_enabled()) { | |||||
1044 | qemu_mutex_lock(&qemu_global_mutex); | |||||
1045 | } else { | |||||
1046 | iothread_requesting_mutex = true1; | |||||
1047 | if (qemu_mutex_trylock(&qemu_global_mutex)) { | |||||
1048 | qemu_cpu_kick_thread(first_cpu((&cpus)->tqh_first)); | |||||
1049 | qemu_mutex_lock(&qemu_global_mutex); | |||||
1050 | } | |||||
1051 | iothread_requesting_mutex = false0; | |||||
1052 | qemu_cond_broadcast(&qemu_io_proceeded_cond); | |||||
1053 | } | |||||
1054 | } | |||||
1055 | ||||||
1056 | void qemu_mutex_unlock_iothread(void) | |||||
1057 | { | |||||
1058 | qemu_mutex_unlock(&qemu_global_mutex); | |||||
1059 | } | |||||
1060 | ||||||
1061 | static int all_vcpus_paused(void) | |||||
1062 | { | |||||
1063 | CPUState *cpu; | |||||
1064 | ||||||
1065 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1066 | if (!cpu->stopped) { | |||||
1067 | return 0; | |||||
1068 | } | |||||
1069 | } | |||||
1070 | ||||||
1071 | return 1; | |||||
1072 | } | |||||
1073 | ||||||
1074 | void pause_all_vcpus(void) | |||||
1075 | { | |||||
1076 | CPUState *cpu; | |||||
1077 | ||||||
1078 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false0); | |||||
1079 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1080 | cpu->stop = true1; | |||||
1081 | qemu_cpu_kick(cpu); | |||||
1082 | } | |||||
1083 | ||||||
1084 | if (qemu_in_vcpu_thread()) { | |||||
1085 | cpu_stop_current(); | |||||
1086 | if (!kvm_enabled()(kvm_allowed)) { | |||||
1087 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1088 | cpu->stop = false0; | |||||
1089 | cpu->stopped = true1; | |||||
1090 | } | |||||
1091 | return; | |||||
1092 | } | |||||
1093 | } | |||||
1094 | ||||||
1095 | while (!all_vcpus_paused()) { | |||||
1096 | qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); | |||||
1097 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1098 | qemu_cpu_kick(cpu); | |||||
1099 | } | |||||
1100 | } | |||||
1101 | } | |||||
1102 | ||||||
1103 | void cpu_resume(CPUState *cpu) | |||||
1104 | { | |||||
1105 | cpu->stop = false0; | |||||
1106 | cpu->stopped = false0; | |||||
1107 | qemu_cpu_kick(cpu); | |||||
1108 | } | |||||
1109 | ||||||
1110 | void resume_all_vcpus(void) | |||||
1111 | { | |||||
1112 | CPUState *cpu; | |||||
1113 | ||||||
1114 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true1); | |||||
1115 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1116 | cpu_resume(cpu); | |||||
1117 | } | |||||
1118 | } | |||||
1119 | ||||||
1120 | static void qemu_tcg_init_vcpu(CPUState *cpu) | |||||
1121 | { | |||||
1122 | /* share a single thread for all cpus with TCG */ | |||||
1123 | if (!tcg_cpu_thread) { | |||||
1124 | cpu->thread = g_malloc0(sizeof(QemuThread)); | |||||
1125 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | |||||
1126 | qemu_cond_init(cpu->halt_cond); | |||||
1127 | tcg_halt_cond = cpu->halt_cond; | |||||
1128 | qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu, | |||||
1129 | QEMU_THREAD_JOINABLE0); | |||||
1130 | #ifdef _WIN32 | |||||
1131 | cpu->hThread = qemu_thread_get_handle(cpu->thread); | |||||
1132 | #endif | |||||
1133 | while (!cpu->created) { | |||||
1134 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |||||
1135 | } | |||||
1136 | tcg_cpu_thread = cpu->thread; | |||||
1137 | } else { | |||||
1138 | cpu->thread = tcg_cpu_thread; | |||||
1139 | cpu->halt_cond = tcg_halt_cond; | |||||
1140 | } | |||||
1141 | } | |||||
1142 | ||||||
1143 | static void qemu_kvm_start_vcpu(CPUState *cpu) | |||||
1144 | { | |||||
1145 | cpu->thread = g_malloc0(sizeof(QemuThread)); | |||||
1146 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | |||||
1147 | qemu_cond_init(cpu->halt_cond); | |||||
1148 | qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu, | |||||
1149 | QEMU_THREAD_JOINABLE0); | |||||
1150 | while (!cpu->created) { | |||||
1151 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |||||
1152 | } | |||||
1153 | } | |||||
1154 | ||||||
1155 | static void qemu_dummy_start_vcpu(CPUState *cpu) | |||||
1156 | { | |||||
1157 | cpu->thread = g_malloc0(sizeof(QemuThread)); | |||||
1158 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | |||||
1159 | qemu_cond_init(cpu->halt_cond); | |||||
1160 | qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu, | |||||
1161 | QEMU_THREAD_JOINABLE0); | |||||
1162 | while (!cpu->created) { | |||||
1163 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |||||
1164 | } | |||||
1165 | } | |||||
1166 | ||||||
1167 | void qemu_init_vcpu(CPUState *cpu) | |||||
1168 | { | |||||
1169 | cpu->nr_cores = smp_cores; | |||||
1170 | cpu->nr_threads = smp_threads; | |||||
1171 | cpu->stopped = true1; | |||||
1172 | if (kvm_enabled()(kvm_allowed)) { | |||||
1173 | qemu_kvm_start_vcpu(cpu); | |||||
1174 | } else if (tcg_enabled()) { | |||||
1175 | qemu_tcg_init_vcpu(cpu); | |||||
1176 | } else { | |||||
1177 | qemu_dummy_start_vcpu(cpu); | |||||
1178 | } | |||||
1179 | } | |||||
1180 | ||||||
1181 | void cpu_stop_current(void) | |||||
1182 | { | |||||
1183 | if (current_cputls__current_cpu) { | |||||
1184 | current_cputls__current_cpu->stop = false0; | |||||
1185 | current_cputls__current_cpu->stopped = true1; | |||||
1186 | cpu_exit(current_cputls__current_cpu); | |||||
1187 | qemu_cond_signal(&qemu_pause_cond); | |||||
1188 | } | |||||
1189 | } | |||||
1190 | ||||||
1191 | int vm_stop(RunState state) | |||||
1192 | { | |||||
1193 | if (qemu_in_vcpu_thread()) { | |||||
1194 | qemu_system_vmstop_request(state); | |||||
1195 | /* | |||||
1196 | * FIXME: should not return to device code in case | |||||
1197 | * vm_stop() has been requested. | |||||
1198 | */ | |||||
1199 | cpu_stop_current(); | |||||
1200 | return 0; | |||||
1201 | } | |||||
1202 | ||||||
1203 | return do_vm_stop(state); | |||||
1204 | } | |||||
1205 | ||||||
1206 | /* does a state transition even if the VM is already stopped, | |||||
1207 | current state is forgotten forever */ | |||||
1208 | int vm_stop_force_state(RunState state) | |||||
1209 | { | |||||
1210 | if (runstate_is_running()) { | |||||
1211 | return vm_stop(state); | |||||
1212 | } else { | |||||
1213 | runstate_set(state); | |||||
1214 | /* Make sure to return an error if the flush in a previous vm_stop() | |||||
1215 | * failed. */ | |||||
1216 | return bdrv_flush_all(); | |||||
1217 | } | |||||
1218 | } | |||||
1219 | ||||||
1220 | static int tcg_cpu_exec(CPUArchStatestruct CPUX86State *env) | |||||
1221 | { | |||||
1222 | int ret; | |||||
1223 | #ifdef CONFIG_PROFILER | |||||
1224 | int64_t ti; | |||||
1225 | #endif | |||||
1226 | ||||||
1227 | #ifdef CONFIG_PROFILER | |||||
1228 | ti = profile_getclock(); | |||||
1229 | #endif | |||||
1230 | if (use_icount) { | |||||
1231 | int64_t count; | |||||
1232 | int64_t deadline; | |||||
1233 | int decr; | |||||
1234 | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |||||
1235 | env->icount_decr.u16.low = 0; | |||||
1236 | env->icount_extra = 0; | |||||
1237 | deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |||||
1238 | ||||||
1239 | /* Maintain prior (possibly buggy) behaviour where if no deadline | |||||
1240 | * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than | |||||
1241 | * INT32_MAX nanoseconds ahead, we still use INT32_MAX | |||||
1242 | * nanoseconds. | |||||
1243 | */ | |||||
1244 | if ((deadline < 0) || (deadline > INT32_MAX(2147483647))) { | |||||
1245 | deadline = INT32_MAX(2147483647); | |||||
1246 | } | |||||
1247 | ||||||
1248 | count = qemu_icount_round(deadline); | |||||
1249 | qemu_icount += count; | |||||
1250 | decr = (count > 0xffff) ? 0xffff : count; | |||||
1251 | count -= decr; | |||||
1252 | env->icount_decr.u16.low = decr; | |||||
1253 | env->icount_extra = count; | |||||
1254 | } | |||||
1255 | ret = cpu_execcpu_x86_exec(env); | |||||
1256 | #ifdef CONFIG_PROFILER | |||||
1257 | qemu_time += profile_getclock() - ti; | |||||
1258 | #endif | |||||
1259 | if (use_icount) { | |||||
1260 | /* Fold pending instructions back into the | |||||
1261 | instruction counter, and clear the interrupt flag. */ | |||||
1262 | qemu_icount -= (env->icount_decr.u16.low | |||||
1263 | + env->icount_extra); | |||||
1264 | env->icount_decr.u32 = 0; | |||||
1265 | env->icount_extra = 0; | |||||
1266 | } | |||||
1267 | return ret; | |||||
1268 | } | |||||
1269 | ||||||
1270 | static void tcg_exec_all(void) | |||||
1271 | { | |||||
1272 | int r; | |||||
1273 | ||||||
1274 | /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ | |||||
1275 | qemu_clock_warp(QEMU_CLOCK_VIRTUAL); | |||||
1276 | ||||||
1277 | if (next_cpu == NULL((void*)0)) { | |||||
1278 | next_cpu = first_cpu((&cpus)->tqh_first); | |||||
1279 | } | |||||
1280 | for (; next_cpu != NULL((void*)0) && !exit_request; next_cpu = CPU_NEXT(next_cpu)((next_cpu)->node.tqe_next)) { | |||||
1281 | CPUState *cpu = next_cpu; | |||||
1282 | CPUArchStatestruct CPUX86State *env = cpu->env_ptr; | |||||
1283 | ||||||
1284 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, | |||||
1285 | (cpu->singlestep_enabled & SSTEP_NOTIMER0x4) == 0); | |||||
1286 | ||||||
1287 | if (cpu_can_run(cpu)) { | |||||
1288 | r = tcg_cpu_exec(env); | |||||
1289 | if (r == EXCP_DEBUG0x10002) { | |||||
1290 | cpu_handle_guest_debug(cpu); | |||||
1291 | break; | |||||
1292 | } | |||||
1293 | } else if (cpu->stop || cpu->stopped) { | |||||
1294 | break; | |||||
1295 | } | |||||
1296 | } | |||||
1297 | exit_request = 0; | |||||
1298 | } | |||||
1299 | ||||||
1300 | void set_numa_modes(void) | |||||
1301 | { | |||||
1302 | CPUState *cpu; | |||||
1303 | int i; | |||||
1304 | ||||||
1305 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1306 | for (i = 0; i < nb_numa_nodes; i++) { | |||||
1307 | if (test_bit(cpu->cpu_index, node_cpumask[i])) { | |||||
1308 | cpu->numa_node = i; | |||||
1309 | } | |||||
1310 | } | |||||
1311 | } | |||||
1312 | } | |||||
1313 | ||||||
1314 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) | |||||
1315 | { | |||||
1316 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |||||
1317 | #if defined(cpu_listx86_cpu_list) | |||||
1318 | cpu_listx86_cpu_list(f, cpu_fprintf); | |||||
1319 | #endif | |||||
1320 | } | |||||
1321 | ||||||
1322 | CpuInfoList *qmp_query_cpus(Error **errp) | |||||
1323 | { | |||||
1324 | CpuInfoList *head = NULL((void*)0), *cur_item = NULL((void*)0); | |||||
1325 | CPUState *cpu; | |||||
1326 | ||||||
1327 | CPU_FOREACH(cpu)for ((cpu) = ((&cpus)->tqh_first); (cpu); (cpu) = ((cpu )->node.tqe_next)) { | |||||
1328 | CpuInfoList *info; | |||||
1329 | #if defined(TARGET_I3861) | |||||
1330 | X86CPU *x86_cpu = X86_CPU(cpu)((X86CPU *)object_dynamic_cast_assert(((Object *)((cpu))), ("x86_64-cpu" ), "/home/stefan/src/qemu/qemu.org/qemu/cpus.c", 1330, __func__ )); | |||||
1331 | CPUX86State *env = &x86_cpu->env; | |||||
1332 | #elif defined(TARGET_PPC) | |||||
1333 | PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu); | |||||
1334 | CPUPPCState *env = &ppc_cpu->env; | |||||
1335 | #elif defined(TARGET_SPARC) | |||||
1336 | SPARCCPU *sparc_cpu = SPARC_CPU(cpu); | |||||
1337 | CPUSPARCState *env = &sparc_cpu->env; | |||||
1338 | #elif defined(TARGET_MIPS) | |||||
1339 | MIPSCPU *mips_cpu = MIPS_CPU(cpu); | |||||
1340 | CPUMIPSState *env = &mips_cpu->env; | |||||
1341 | #endif | |||||
1342 | ||||||
1343 | cpu_synchronize_state(cpu); | |||||
1344 | ||||||
1345 | info = g_malloc0(sizeof(*info)); | |||||
1346 | info->value = g_malloc0(sizeof(*info->value)); | |||||
1347 | info->value->CPU = cpu->cpu_index; | |||||
1348 | info->value->current = (cpu == first_cpu((&cpus)->tqh_first)); | |||||
1349 | info->value->halted = cpu->halted; | |||||
1350 | info->value->thread_id = cpu->thread_id; | |||||
1351 | #if defined(TARGET_I3861) | |||||
1352 | info->value->has_pc = true1; | |||||
1353 | info->value->pc = env->eip + env->segs[R_CS1].base; | |||||
1354 | #elif defined(TARGET_PPC) | |||||
1355 | info->value->has_nip = true1; | |||||
1356 | info->value->nip = env->nip; | |||||
1357 | #elif defined(TARGET_SPARC) | |||||
1358 | info->value->has_pc = true1; | |||||
1359 | info->value->pc = env->pc; | |||||
1360 | info->value->has_npc = true1; | |||||
1361 | info->value->npc = env->npc; | |||||
1362 | #elif defined(TARGET_MIPS) | |||||
1363 | info->value->has_PC = true1; | |||||
1364 | info->value->PC = env->active_tc.PC; | |||||
1365 | #endif | |||||
1366 | ||||||
1367 | /* XXX: waiting for the qapi to support GSList */ | |||||
1368 | if (!cur_item) { | |||||
1369 | head = cur_item = info; | |||||
1370 | } else { | |||||
1371 | cur_item->next = info; | |||||
1372 | cur_item = info; | |||||
1373 | } | |||||
1374 | } | |||||
1375 | ||||||
1376 | return head; | |||||
1377 | } | |||||
1378 | ||||||
1379 | void qmp_memsave(int64_t addr, int64_t size, const char *filename, | |||||
1380 | bool_Bool has_cpu, int64_t cpu_index, Error **errp) | |||||
1381 | { | |||||
1382 | FILE *f; | |||||
1383 | uint32_t l; | |||||
1384 | CPUState *cpu; | |||||
1385 | uint8_t buf[1024]; | |||||
1386 | ||||||
1387 | if (!has_cpu) { | |||||
1388 | cpu_index = 0; | |||||
1389 | } | |||||
1390 | ||||||
1391 | cpu = qemu_get_cpu(cpu_index); | |||||
1392 | if (cpu == NULL((void*)0)) { | |||||
1393 | error_set(errp, QERR_INVALID_PARAMETER_VALUEERROR_CLASS_GENERIC_ERROR, "Parameter '%s' expects %s", "cpu-index", | |||||
1394 | "a CPU number"); | |||||
1395 | return; | |||||
1396 | } | |||||
1397 | ||||||
1398 | f = fopen(filename, "wb"); | |||||
1399 | if (!f) { | |||||
1400 | error_setg_file_open(errp, errno(*__errno_location ()), filename); | |||||
1401 | return; | |||||
1402 | } | |||||
1403 | ||||||
1404 | while (size != 0) { | |||||
1405 | l = sizeof(buf); | |||||
1406 | if (l > size) | |||||
1407 | l = size; | |||||
1408 | if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { | |||||
1409 | error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr)error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Invalid addr 0x%016" "l" "x" "specified", addr); | |||||
1410 | goto exit; | |||||
1411 | } | |||||
1412 | if (fwrite(buf, 1, l, f) != l) { | |||||
1413 | error_set(errp, QERR_IO_ERRORERROR_CLASS_GENERIC_ERROR, "An IO error has occurred"); | |||||
1414 | goto exit; | |||||
1415 | } | |||||
1416 | addr += l; | |||||
1417 | size -= l; | |||||
1418 | } | |||||
1419 | ||||||
1420 | exit: | |||||
1421 | fclose(f); | |||||
1422 | } | |||||
1423 | ||||||
1424 | void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, | |||||
1425 | Error **errp) | |||||
1426 | { | |||||
1427 | FILE *f; | |||||
1428 | uint32_t l; | |||||
1429 | uint8_t buf[1024]; | |||||
1430 | ||||||
1431 | f = fopen(filename, "wb"); | |||||
1432 | if (!f) { | |||||
1433 | error_setg_file_open(errp, errno(*__errno_location ()), filename); | |||||
1434 | return; | |||||
1435 | } | |||||
1436 | ||||||
1437 | while (size != 0) { | |||||
1438 | l = sizeof(buf); | |||||
1439 | if (l > size) | |||||
1440 | l = size; | |||||
1441 | cpu_physical_memory_rw(addr, buf, l, 0); | |||||
1442 | if (fwrite(buf, 1, l, f) != l) { | |||||
1443 | error_set(errp, QERR_IO_ERRORERROR_CLASS_GENERIC_ERROR, "An IO error has occurred"); | |||||
1444 | goto exit; | |||||
1445 | } | |||||
1446 | addr += l; | |||||
1447 | size -= l; | |||||
1448 | } | |||||
1449 | ||||||
1450 | exit: | |||||
1451 | fclose(f); | |||||
1452 | } | |||||
1453 | ||||||
1454 | void qmp_inject_nmi(Error **errp) | |||||
1455 | { | |||||
1456 | #if defined(TARGET_I3861) | |||||
1457 | CPUState *cs; | |||||
1458 | ||||||
1459 | CPU_FOREACH(cs)for ((cs) = ((&cpus)->tqh_first); (cs); (cs) = ((cs)-> node.tqe_next)) { | |||||
1460 | X86CPU *cpu = X86_CPU(cs)((X86CPU *)object_dynamic_cast_assert(((Object *)((cs))), ("x86_64-cpu" ), "/home/stefan/src/qemu/qemu.org/qemu/cpus.c", 1460, __func__ )); | |||||
1461 | ||||||
1462 | if (!cpu->apic_state) { | |||||
1463 | cpu_interrupt(cs, CPU_INTERRUPT_NMI0x0200); | |||||
1464 | } else { | |||||
1465 | apic_deliver_nmi(cpu->apic_state); | |||||
1466 | } | |||||
1467 | } | |||||
1468 | #elif defined(TARGET_S390X) | |||||
1469 | CPUState *cs; | |||||
1470 | S390CPU *cpu; | |||||
1471 | ||||||
1472 | CPU_FOREACH(cs)for ((cs) = ((&cpus)->tqh_first); (cs); (cs) = ((cs)-> node.tqe_next)) { | |||||
1473 | cpu = S390_CPU(cs); | |||||
1474 | if (cpu->env.cpu_num == monitor_get_cpu_index()) { | |||||
1475 | if (s390_cpu_restart(S390_CPU(cs)) == -1) { | |||||
1476 | error_set(errp, QERR_UNSUPPORTEDERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported"); | |||||
1477 | return; | |||||
1478 | } | |||||
1479 | break; | |||||
1480 | } | |||||
1481 | } | |||||
1482 | #else | |||||
1483 | error_set(errp, QERR_UNSUPPORTEDERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported"); | |||||
1484 | #endif | |||||
1485 | } |