File: | block/mirror.c |
Location: | line 248, column 9 |
Description: | Dereference of null pointer |
1 | /* | |||||
2 | * Image mirroring | |||||
3 | * | |||||
4 | * Copyright Red Hat, Inc. 2012 | |||||
5 | * | |||||
6 | * Authors: | |||||
7 | * Paolo Bonzini <pbonzini@redhat.com> | |||||
8 | * | |||||
9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |||||
10 | * See the COPYING.LIB file in the top-level directory. | |||||
11 | * | |||||
12 | */ | |||||
13 | ||||||
14 | #include "trace.h" | |||||
15 | #include "block/blockjob.h" | |||||
16 | #include "block/block_int.h" | |||||
17 | #include "qemu/ratelimit.h" | |||||
18 | #include "qemu/bitmap.h" | |||||
19 | ||||||
20 | #define SLICE_TIME100000000ULL 100000000ULL /* ns */ | |||||
21 | #define MAX_IN_FLIGHT16 16 | |||||
22 | ||||||
23 | /* The mirroring buffer is a list of granularity-sized chunks. | |||||
24 | * Free chunks are organized in a list. | |||||
25 | */ | |||||
26 | typedef struct MirrorBuffer { | |||||
27 | QSIMPLEQ_ENTRY(MirrorBuffer)struct { struct MirrorBuffer *sqe_next; } next; | |||||
28 | } MirrorBuffer; | |||||
29 | ||||||
30 | typedef struct MirrorBlockJob { | |||||
31 | BlockJob common; | |||||
32 | RateLimit limit; | |||||
33 | BlockDriverState *target; | |||||
34 | BlockDriverState *base; | |||||
35 | bool_Bool is_none_mode; | |||||
36 | BlockdevOnError on_source_error, on_target_error; | |||||
37 | bool_Bool synced; | |||||
38 | bool_Bool should_complete; | |||||
39 | int64_t sector_num; | |||||
40 | int64_t granularity; | |||||
41 | size_t buf_size; | |||||
42 | unsigned long *cow_bitmap; | |||||
43 | BdrvDirtyBitmap *dirty_bitmap; | |||||
44 | HBitmapIter hbi; | |||||
45 | uint8_t *buf; | |||||
46 | QSIMPLEQ_HEAD(, MirrorBuffer)struct { struct MirrorBuffer *sqh_first; struct MirrorBuffer * *sqh_last; } buf_free; | |||||
47 | int buf_free_count; | |||||
48 | ||||||
49 | unsigned long *in_flight_bitmap; | |||||
50 | int in_flight; | |||||
51 | int ret; | |||||
52 | } MirrorBlockJob; | |||||
53 | ||||||
54 | typedef struct MirrorOp { | |||||
55 | MirrorBlockJob *s; | |||||
56 | QEMUIOVector qiov; | |||||
57 | int64_t sector_num; | |||||
58 | int nb_sectors; | |||||
59 | } MirrorOp; | |||||
60 | ||||||
61 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool_Bool read, | |||||
62 | int error) | |||||
63 | { | |||||
64 | s->synced = false0; | |||||
65 | if (read) { | |||||
66 | return block_job_error_action(&s->common, s->common.bs, | |||||
67 | s->on_source_error, true1, error); | |||||
68 | } else { | |||||
69 | return block_job_error_action(&s->common, s->target, | |||||
70 | s->on_target_error, false0, error); | |||||
71 | } | |||||
72 | } | |||||
73 | ||||||
74 | static void mirror_iteration_done(MirrorOp *op, int ret) | |||||
75 | { | |||||
76 | MirrorBlockJob *s = op->s; | |||||
77 | struct iovec *iov; | |||||
78 | int64_t chunk_num; | |||||
79 | int i, nb_chunks, sectors_per_chunk; | |||||
80 | ||||||
81 | trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); | |||||
82 | ||||||
83 | s->in_flight--; | |||||
84 | iov = op->qiov.iov; | |||||
85 | for (i = 0; i < op->qiov.niov; i++) { | |||||
86 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; | |||||
87 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next)do { (buf)->next.sqe_next = ((void*)0); *(&s->buf_free )->sqh_last = (buf); (&s->buf_free)->sqh_last = & (buf)->next.sqe_next; } while ( 0); | |||||
88 | s->buf_free_count++; | |||||
89 | } | |||||
90 | ||||||
91 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS9; | |||||
92 | chunk_num = op->sector_num / sectors_per_chunk; | |||||
93 | nb_chunks = op->nb_sectors / sectors_per_chunk; | |||||
94 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); | |||||
95 | if (s->cow_bitmap && ret >= 0) { | |||||
96 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); | |||||
97 | } | |||||
98 | ||||||
99 | g_slice_free(MirrorOp, op)do { if (1) g_slice_free1 (sizeof (MirrorOp), (op)); else (void ) ((MirrorOp*) 0 == (op)); } while (0); | |||||
100 | qemu_coroutine_enter(s->common.co, NULL((void*)0)); | |||||
101 | } | |||||
102 | ||||||
103 | static void mirror_write_complete(void *opaque, int ret) | |||||
104 | { | |||||
105 | MirrorOp *op = opaque; | |||||
106 | MirrorBlockJob *s = op->s; | |||||
107 | if (ret < 0) { | |||||
108 | BlockDriverState *source = s->common.bs; | |||||
109 | BlockErrorAction action; | |||||
110 | ||||||
111 | bdrv_set_dirty(source, op->sector_num, op->nb_sectors); | |||||
112 | action = mirror_error_action(s, false0, -ret); | |||||
113 | if (action == BDRV_ACTION_REPORT && s->ret >= 0) { | |||||
114 | s->ret = ret; | |||||
115 | } | |||||
116 | } | |||||
117 | mirror_iteration_done(op, ret); | |||||
118 | } | |||||
119 | ||||||
120 | static void mirror_read_complete(void *opaque, int ret) | |||||
121 | { | |||||
122 | MirrorOp *op = opaque; | |||||
123 | MirrorBlockJob *s = op->s; | |||||
124 | if (ret < 0) { | |||||
125 | BlockDriverState *source = s->common.bs; | |||||
126 | BlockErrorAction action; | |||||
127 | ||||||
128 | bdrv_set_dirty(source, op->sector_num, op->nb_sectors); | |||||
129 | action = mirror_error_action(s, true1, -ret); | |||||
130 | if (action == BDRV_ACTION_REPORT && s->ret >= 0) { | |||||
131 | s->ret = ret; | |||||
132 | } | |||||
133 | ||||||
134 | mirror_iteration_done(op, ret); | |||||
135 | return; | |||||
136 | } | |||||
137 | bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, | |||||
138 | mirror_write_complete, op); | |||||
139 | } | |||||
140 | ||||||
141 | static void coroutine_fn mirror_iteration(MirrorBlockJob *s) | |||||
142 | { | |||||
143 | BlockDriverState *source = s->common.bs; | |||||
144 | int nb_sectors, sectors_per_chunk, nb_chunks; | |||||
145 | int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; | |||||
146 | MirrorOp *op; | |||||
147 | ||||||
148 | s->sector_num = hbitmap_iter_next(&s->hbi); | |||||
149 | if (s->sector_num < 0) { | |||||
150 | bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi); | |||||
151 | s->sector_num = hbitmap_iter_next(&s->hbi); | |||||
152 | trace_mirror_restart_iter(s, | |||||
153 | bdrv_get_dirty_count(source, s->dirty_bitmap)); | |||||
154 | assert(s->sector_num >= 0)((s->sector_num >= 0) ? (void) (0) : __assert_fail ("s->sector_num >= 0" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 154, __PRETTY_FUNCTION__ )); | |||||
155 | } | |||||
156 | ||||||
157 | hbitmap_next_sector = s->sector_num; | |||||
158 | sector_num = s->sector_num; | |||||
159 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS9; | |||||
160 | end = s->common.len >> BDRV_SECTOR_BITS9; | |||||
161 | ||||||
162 | /* Extend the QEMUIOVector to include all adjacent blocks that will | |||||
163 | * be copied in this operation. | |||||
164 | * | |||||
165 | * We have to do this if we have no backing file yet in the destination, | |||||
166 | * and the cluster size is very large. Then we need to do COW ourselves. | |||||
167 | * The first time a cluster is copied, copy it entirely. Note that, | |||||
168 | * because both the granularity and the cluster size are powers of two, | |||||
169 | * the number of sectors to copy cannot exceed one cluster. | |||||
170 | * | |||||
171 | * We also want to extend the QEMUIOVector to include more adjacent | |||||
172 | * dirty blocks if possible, to limit the number of I/O operations and | |||||
173 | * run efficiently even with a small granularity. | |||||
174 | */ | |||||
175 | nb_chunks = 0; | |||||
176 | nb_sectors = 0; | |||||
177 | next_sector = sector_num; | |||||
178 | next_chunk = sector_num / sectors_per_chunk; | |||||
179 | ||||||
180 | /* Wait for I/O to this cluster (from a previous iteration) to be done. */ | |||||
181 | while (test_bit(next_chunk, s->in_flight_bitmap)) { | |||||
182 | trace_mirror_yield_in_flight(s, sector_num, s->in_flight); | |||||
183 | qemu_coroutine_yield(); | |||||
184 | } | |||||
185 | ||||||
186 | do { | |||||
187 | int added_sectors, added_chunks; | |||||
188 | ||||||
189 | if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || | |||||
190 | test_bit(next_chunk, s->in_flight_bitmap)) { | |||||
191 | assert(nb_sectors > 0)((nb_sectors > 0) ? (void) (0) : __assert_fail ("nb_sectors > 0" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 191, __PRETTY_FUNCTION__ )); | |||||
192 | break; | |||||
193 | } | |||||
194 | ||||||
195 | added_sectors = sectors_per_chunk; | |||||
196 | if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { | |||||
197 | bdrv_round_to_clusters(s->target, | |||||
198 | next_sector, added_sectors, | |||||
199 | &next_sector, &added_sectors); | |||||
200 | ||||||
201 | /* On the first iteration, the rounding may make us copy | |||||
202 | * sectors before the first dirty one. | |||||
203 | */ | |||||
204 | if (next_sector < sector_num) { | |||||
205 | assert(nb_sectors == 0)((nb_sectors == 0) ? (void) (0) : __assert_fail ("nb_sectors == 0" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 205, __PRETTY_FUNCTION__ )); | |||||
206 | sector_num = next_sector; | |||||
207 | next_chunk = next_sector / sectors_per_chunk; | |||||
208 | } | |||||
209 | } | |||||
210 | ||||||
211 | added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors))(((added_sectors) < (end - (sector_num + nb_sectors))) ? ( added_sectors) : (end - (sector_num + nb_sectors))); | |||||
212 | added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; | |||||
213 | ||||||
214 | /* When doing COW, it may happen that there is not enough space for | |||||
215 | * a full cluster. Wait if that is the case. | |||||
216 | */ | |||||
217 | while (nb_chunks == 0 && s->buf_free_count < added_chunks) { | |||||
218 | trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); | |||||
219 | qemu_coroutine_yield(); | |||||
220 | } | |||||
221 | if (s->buf_free_count < nb_chunks + added_chunks) { | |||||
222 | trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); | |||||
223 | break; | |||||
224 | } | |||||
225 | ||||||
226 | /* We have enough free space to copy these sectors. */ | |||||
227 | bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); | |||||
228 | ||||||
229 | nb_sectors += added_sectors; | |||||
230 | nb_chunks += added_chunks; | |||||
231 | next_sector += added_sectors; | |||||
232 | next_chunk += added_chunks; | |||||
233 | } while (next_sector < end); | |||||
234 | ||||||
235 | /* Allocate a MirrorOp that is used as an AIO callback. */ | |||||
236 | op = g_slice_new(MirrorOp)((MirrorOp*) g_slice_alloc (sizeof (MirrorOp))); | |||||
237 | op->s = s; | |||||
238 | op->sector_num = sector_num; | |||||
239 | op->nb_sectors = nb_sectors; | |||||
240 | ||||||
241 | /* Now make a QEMUIOVector taking enough granularity-sized chunks | |||||
242 | * from s->buf_free. | |||||
243 | */ | |||||
244 | qemu_iovec_init(&op->qiov, nb_chunks); | |||||
245 | next_sector = sector_num; | |||||
246 | while (nb_chunks-- > 0) { | |||||
247 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free)((&s->buf_free)->sqh_first); | |||||
248 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next)do { if (((&s->buf_free)->sqh_first = (&s->buf_free )->sqh_first->next.sqe_next) == ((void*)0)) (&s-> buf_free)->sqh_last = &(&s->buf_free)->sqh_first ; } while ( 0); | |||||
| ||||||
249 | s->buf_free_count--; | |||||
250 | qemu_iovec_add(&op->qiov, buf, s->granularity); | |||||
251 | ||||||
252 | /* Advance the HBitmapIter in parallel, so that we do not examine | |||||
253 | * the same sector twice. | |||||
254 | */ | |||||
255 | if (next_sector > hbitmap_next_sector | |||||
256 | && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { | |||||
257 | hbitmap_next_sector = hbitmap_iter_next(&s->hbi); | |||||
258 | } | |||||
259 | ||||||
260 | next_sector += sectors_per_chunk; | |||||
261 | } | |||||
262 | ||||||
263 | bdrv_reset_dirty(source, sector_num, nb_sectors); | |||||
264 | ||||||
265 | /* Copy the dirty cluster. */ | |||||
266 | s->in_flight++; | |||||
267 | trace_mirror_one_iteration(s, sector_num, nb_sectors); | |||||
268 | bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, | |||||
269 | mirror_read_complete, op); | |||||
270 | } | |||||
271 | ||||||
272 | static void mirror_free_init(MirrorBlockJob *s) | |||||
273 | { | |||||
274 | int granularity = s->granularity; | |||||
275 | size_t buf_size = s->buf_size; | |||||
276 | uint8_t *buf = s->buf; | |||||
277 | ||||||
278 | assert(s->buf_free_count == 0)((s->buf_free_count == 0) ? (void) (0) : __assert_fail ("s->buf_free_count == 0" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 278, __PRETTY_FUNCTION__ )); | |||||
279 | QSIMPLEQ_INIT(&s->buf_free)do { (&s->buf_free)->sqh_first = ((void*)0); (& s->buf_free)->sqh_last = &(&s->buf_free)-> sqh_first; } while ( 0); | |||||
280 | while (buf_size != 0) { | |||||
281 | MirrorBuffer *cur = (MirrorBuffer *)buf; | |||||
282 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next)do { (cur)->next.sqe_next = ((void*)0); *(&s->buf_free )->sqh_last = (cur); (&s->buf_free)->sqh_last = & (cur)->next.sqe_next; } while ( 0); | |||||
283 | s->buf_free_count++; | |||||
284 | buf_size -= granularity; | |||||
285 | buf += granularity; | |||||
286 | } | |||||
287 | } | |||||
288 | ||||||
289 | static void mirror_drain(MirrorBlockJob *s) | |||||
290 | { | |||||
291 | while (s->in_flight > 0) { | |||||
292 | qemu_coroutine_yield(); | |||||
293 | } | |||||
294 | } | |||||
295 | ||||||
296 | static void coroutine_fn mirror_run(void *opaque) | |||||
297 | { | |||||
298 | MirrorBlockJob *s = opaque; | |||||
299 | BlockDriverState *bs = s->common.bs; | |||||
300 | int64_t sector_num, end, sectors_per_chunk, length; | |||||
301 | uint64_t last_pause_ns; | |||||
302 | BlockDriverInfo bdi; | |||||
303 | char backing_filename[1024]; | |||||
304 | int ret = 0; | |||||
305 | int n; | |||||
306 | ||||||
307 | if (block_job_is_cancelled(&s->common)) { | |||||
| ||||||
308 | goto immediate_exit; | |||||
309 | } | |||||
310 | ||||||
311 | s->common.len = bdrv_getlength(bs); | |||||
312 | if (s->common.len <= 0) { | |||||
313 | block_job_completed(&s->common, s->common.len); | |||||
314 | return; | |||||
315 | } | |||||
316 | ||||||
317 | length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity; | |||||
318 | s->in_flight_bitmap = bitmap_new(length); | |||||
319 | ||||||
320 | /* If we have no backing file yet in the destination, we cannot let | |||||
321 | * the destination do COW. Instead, we copy sectors around the | |||||
322 | * dirty data if needed. We need a bitmap to do that. | |||||
323 | */ | |||||
324 | bdrv_get_backing_filename(s->target, backing_filename, | |||||
325 | sizeof(backing_filename)); | |||||
326 | if (backing_filename[0] && !s->target->backing_hd) { | |||||
327 | bdrv_get_info(s->target, &bdi); | |||||
328 | if (s->granularity < bdi.cluster_size) { | |||||
329 | s->buf_size = MAX(s->buf_size, bdi.cluster_size)(((s->buf_size) > (bdi.cluster_size)) ? (s->buf_size ) : (bdi.cluster_size)); | |||||
330 | s->cow_bitmap = bitmap_new(length); | |||||
331 | } | |||||
332 | } | |||||
333 | ||||||
334 | end = s->common.len >> BDRV_SECTOR_BITS9; | |||||
335 | s->buf = qemu_blockalign(bs, s->buf_size); | |||||
336 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS9; | |||||
337 | mirror_free_init(s); | |||||
338 | ||||||
339 | if (!s->is_none_mode) { | |||||
340 | /* First part, loop on the sectors and initialize the dirty bitmap. */ | |||||
341 | BlockDriverState *base = s->base; | |||||
342 | for (sector_num = 0; sector_num < end; ) { | |||||
343 | int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; | |||||
344 | ret = bdrv_is_allocated_above(bs, base, | |||||
345 | sector_num, next - sector_num, &n); | |||||
346 | ||||||
347 | if (ret < 0) { | |||||
348 | goto immediate_exit; | |||||
349 | } | |||||
350 | ||||||
351 | assert(n > 0)((n > 0) ? (void) (0) : __assert_fail ("n > 0", "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c" , 351, __PRETTY_FUNCTION__)); | |||||
352 | if (ret == 1) { | |||||
353 | bdrv_set_dirty(bs, sector_num, n); | |||||
354 | sector_num = next; | |||||
355 | } else { | |||||
356 | sector_num += n; | |||||
357 | } | |||||
358 | } | |||||
359 | } | |||||
360 | ||||||
361 | bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi); | |||||
362 | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |||||
363 | for (;;) { | |||||
364 | uint64_t delay_ns; | |||||
365 | int64_t cnt; | |||||
366 | bool_Bool should_complete; | |||||
367 | ||||||
368 | if (s->ret < 0) { | |||||
369 | ret = s->ret; | |||||
370 | goto immediate_exit; | |||||
371 | } | |||||
372 | ||||||
373 | cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); | |||||
374 | ||||||
375 | /* Note that even when no rate limit is applied we need to yield | |||||
376 | * periodically with no pending I/O so that qemu_aio_flush() returns. | |||||
377 | * We do so every SLICE_TIME nanoseconds, or when there is an error, | |||||
378 | * or when the source is clean, whichever comes first. | |||||
379 | */ | |||||
380 | if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME100000000ULL && | |||||
381 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | |||||
382 | if (s->in_flight == MAX_IN_FLIGHT16 || s->buf_free_count == 0 || | |||||
383 | (cnt == 0 && s->in_flight > 0)) { | |||||
384 | trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); | |||||
385 | qemu_coroutine_yield(); | |||||
386 | continue; | |||||
387 | } else if (cnt != 0) { | |||||
388 | mirror_iteration(s); | |||||
389 | continue; | |||||
390 | } | |||||
391 | } | |||||
392 | ||||||
393 | should_complete = false0; | |||||
394 | if (s->in_flight == 0 && cnt == 0) { | |||||
395 | trace_mirror_before_flush(s); | |||||
396 | ret = bdrv_flush(s->target); | |||||
397 | if (ret < 0) { | |||||
398 | if (mirror_error_action(s, false0, -ret) == BDRV_ACTION_REPORT) { | |||||
399 | goto immediate_exit; | |||||
400 | } | |||||
401 | } else { | |||||
402 | /* We're out of the streaming phase. From now on, if the job | |||||
403 | * is cancelled we will actually complete all pending I/O and | |||||
404 | * report completion. This way, block-job-cancel will leave | |||||
405 | * the target in a consistent state. | |||||
406 | */ | |||||
407 | s->common.offset = end * BDRV_SECTOR_SIZE(1ULL << 9); | |||||
408 | if (!s->synced) { | |||||
409 | block_job_ready(&s->common); | |||||
410 | s->synced = true1; | |||||
411 | } | |||||
412 | ||||||
413 | should_complete = s->should_complete || | |||||
414 | block_job_is_cancelled(&s->common); | |||||
415 | cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); | |||||
416 | } | |||||
417 | } | |||||
418 | ||||||
419 | if (cnt == 0 && should_complete) { | |||||
420 | /* The dirty bitmap is not updated while operations are pending. | |||||
421 | * If we're about to exit, wait for pending operations before | |||||
422 | * calling bdrv_get_dirty_count(bs), or we may exit while the | |||||
423 | * source has dirty data to copy! | |||||
424 | * | |||||
425 | * Note that I/O can be submitted by the guest while | |||||
426 | * mirror_populate runs. | |||||
427 | */ | |||||
428 | trace_mirror_before_drain(s, cnt); | |||||
429 | bdrv_drain_all(); | |||||
430 | cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); | |||||
431 | } | |||||
432 | ||||||
433 | ret = 0; | |||||
434 | trace_mirror_before_sleep(s, cnt, s->synced); | |||||
435 | if (!s->synced) { | |||||
436 | /* Publish progress */ | |||||
437 | s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE(1ULL << 9); | |||||
438 | ||||||
439 | if (s->common.speed) { | |||||
440 | delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk); | |||||
441 | } else { | |||||
442 | delay_ns = 0; | |||||
443 | } | |||||
444 | ||||||
445 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); | |||||
446 | if (block_job_is_cancelled(&s->common)) { | |||||
447 | break; | |||||
448 | } | |||||
449 | } else if (!should_complete) { | |||||
450 | delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME100000000ULL : 0); | |||||
451 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); | |||||
452 | } else if (cnt == 0) { | |||||
453 | /* The two disks are in sync. Exit and report successful | |||||
454 | * completion. | |||||
455 | */ | |||||
456 | assert(QLIST_EMPTY(&bs->tracked_requests))((((&bs->tracked_requests)->lh_first == ((void*)0)) ) ? (void) (0) : __assert_fail ("((&bs->tracked_requests)->lh_first == ((void*)0))" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 456, __PRETTY_FUNCTION__ )); | |||||
457 | s->common.cancelled = false0; | |||||
458 | break; | |||||
459 | } | |||||
460 | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |||||
461 | } | |||||
462 | ||||||
463 | immediate_exit: | |||||
464 | if (s->in_flight > 0) { | |||||
465 | /* We get here only if something went wrong. Either the job failed, | |||||
466 | * or it was cancelled prematurely so that we do not guarantee that | |||||
467 | * the target is a copy of the source. | |||||
468 | */ | |||||
469 | assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)))((ret < 0 || (!s->synced && block_job_is_cancelled (&s->common))) ? (void) (0) : __assert_fail ("ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 469, __PRETTY_FUNCTION__ )); | |||||
470 | mirror_drain(s); | |||||
471 | } | |||||
472 | ||||||
473 | assert(s->in_flight == 0)((s->in_flight == 0) ? (void) (0) : __assert_fail ("s->in_flight == 0" , "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c", 473, __PRETTY_FUNCTION__ )); | |||||
474 | qemu_vfree(s->buf); | |||||
475 | g_free(s->cow_bitmap); | |||||
476 | g_free(s->in_flight_bitmap); | |||||
477 | bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); | |||||
478 | bdrv_iostatus_disable(s->target); | |||||
479 | if (s->should_complete && ret == 0) { | |||||
480 | if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) { | |||||
481 | bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL((void*)0)); | |||||
482 | } | |||||
483 | bdrv_swap(s->target, s->common.bs); | |||||
484 | if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) { | |||||
485 | /* drop the bs loop chain formed by the swap: break the loop then | |||||
486 | * trigger the unref from the top one */ | |||||
487 | BlockDriverState *p = s->base->backing_hd; | |||||
488 | s->base->backing_hd = NULL((void*)0); | |||||
489 | bdrv_unref(p); | |||||
490 | } | |||||
491 | } | |||||
492 | bdrv_unref(s->target); | |||||
493 | block_job_completed(&s->common, ret); | |||||
494 | } | |||||
495 | ||||||
496 | static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) | |||||
497 | { | |||||
498 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common)({ const typeof(((MirrorBlockJob *) 0)->common) *__mptr = ( job); (MirrorBlockJob *) ((char *) __mptr - __builtin_offsetof (MirrorBlockJob, common));}); | |||||
499 | ||||||
500 | if (speed < 0) { | |||||
501 | error_set(errp, QERR_INVALID_PARAMETERERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'", "speed"); | |||||
502 | return; | |||||
503 | } | |||||
504 | ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE(1ULL << 9), SLICE_TIME100000000ULL); | |||||
505 | } | |||||
506 | ||||||
507 | static void mirror_iostatus_reset(BlockJob *job) | |||||
508 | { | |||||
509 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common)({ const typeof(((MirrorBlockJob *) 0)->common) *__mptr = ( job); (MirrorBlockJob *) ((char *) __mptr - __builtin_offsetof (MirrorBlockJob, common));}); | |||||
510 | ||||||
511 | bdrv_iostatus_reset(s->target); | |||||
512 | } | |||||
513 | ||||||
514 | static void mirror_complete(BlockJob *job, Error **errp) | |||||
515 | { | |||||
516 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common)({ const typeof(((MirrorBlockJob *) 0)->common) *__mptr = ( job); (MirrorBlockJob *) ((char *) __mptr - __builtin_offsetof (MirrorBlockJob, common));}); | |||||
517 | Error *local_err = NULL((void*)0); | |||||
518 | int ret; | |||||
519 | ||||||
520 | ret = bdrv_open_backing_file(s->target, NULL((void*)0), &local_err); | |||||
521 | if (ret < 0) { | |||||
522 | char backing_filename[PATH_MAX4096]; | |||||
523 | bdrv_get_full_backing_filename(s->target, backing_filename, | |||||
524 | sizeof(backing_filename)); | |||||
525 | error_propagate(errp, local_err); | |||||
526 | return; | |||||
527 | } | |||||
528 | if (!s->synced) { | |||||
529 | error_set(errp, QERR_BLOCK_JOB_NOT_READYERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed", job->bs->device_name); | |||||
530 | return; | |||||
531 | } | |||||
532 | ||||||
533 | s->should_complete = true1; | |||||
534 | block_job_resume(job); | |||||
535 | } | |||||
536 | ||||||
537 | static const BlockJobDriver mirror_job_driver = { | |||||
538 | .instance_size = sizeof(MirrorBlockJob), | |||||
539 | .job_type = BLOCK_JOB_TYPE_MIRROR, | |||||
540 | .set_speed = mirror_set_speed, | |||||
541 | .iostatus_reset= mirror_iostatus_reset, | |||||
542 | .complete = mirror_complete, | |||||
543 | }; | |||||
544 | ||||||
545 | static const BlockJobDriver commit_active_job_driver = { | |||||
546 | .instance_size = sizeof(MirrorBlockJob), | |||||
547 | .job_type = BLOCK_JOB_TYPE_COMMIT, | |||||
548 | .set_speed = mirror_set_speed, | |||||
549 | .iostatus_reset | |||||
550 | = mirror_iostatus_reset, | |||||
551 | .complete = mirror_complete, | |||||
552 | }; | |||||
553 | ||||||
554 | static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, | |||||
555 | int64_t speed, int64_t granularity, | |||||
556 | int64_t buf_size, | |||||
557 | BlockdevOnError on_source_error, | |||||
558 | BlockdevOnError on_target_error, | |||||
559 | BlockDriverCompletionFunc *cb, | |||||
560 | void *opaque, Error **errp, | |||||
561 | const BlockJobDriver *driver, | |||||
562 | bool_Bool is_none_mode, BlockDriverState *base) | |||||
563 | { | |||||
564 | MirrorBlockJob *s; | |||||
565 | ||||||
566 | if (granularity == 0) { | |||||
567 | /* Choose the default granularity based on the target file's cluster | |||||
568 | * size, clamped between 4k and 64k. */ | |||||
569 | BlockDriverInfo bdi; | |||||
570 | if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) { | |||||
571 | granularity = MAX(4096, bdi.cluster_size)(((4096) > (bdi.cluster_size)) ? (4096) : (bdi.cluster_size )); | |||||
572 | granularity = MIN(65536, granularity)(((65536) < (granularity)) ? (65536) : (granularity)); | |||||
573 | } else { | |||||
574 | granularity = 65536; | |||||
575 | } | |||||
576 | } | |||||
577 | ||||||
578 | assert ((granularity & (granularity - 1)) == 0)(((granularity & (granularity - 1)) == 0) ? (void) (0) : __assert_fail ("(granularity & (granularity - 1)) == 0", "/home/stefan/src/qemu/qemu.org/qemu/block/mirror.c" , 578, __PRETTY_FUNCTION__)); | |||||
579 | ||||||
580 | if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || | |||||
581 | on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && | |||||
582 | !bdrv_iostatus_is_enabled(bs)) { | |||||
583 | error_set(errp, QERR_INVALID_PARAMETERERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'", "on-source-error"); | |||||
584 | return; | |||||
585 | } | |||||
586 | ||||||
587 | ||||||
588 | s = block_job_create(driver, bs, speed, cb, opaque, errp); | |||||
589 | if (!s) { | |||||
590 | return; | |||||
591 | } | |||||
592 | ||||||
593 | s->on_source_error = on_source_error; | |||||
594 | s->on_target_error = on_target_error; | |||||
595 | s->target = target; | |||||
596 | s->is_none_mode = is_none_mode; | |||||
597 | s->base = base; | |||||
598 | s->granularity = granularity; | |||||
599 | s->buf_size = MAX(buf_size, granularity)(((buf_size) > (granularity)) ? (buf_size) : (granularity) ); | |||||
600 | ||||||
601 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity); | |||||
602 | bdrv_set_enable_write_cache(s->target, true1); | |||||
603 | bdrv_set_on_error(s->target, on_target_error, on_target_error); | |||||
604 | bdrv_iostatus_enable(s->target); | |||||
605 | s->common.co = qemu_coroutine_create(mirror_run); | |||||
606 | trace_mirror_start(bs, s, s->common.co, opaque); | |||||
607 | qemu_coroutine_enter(s->common.co, s); | |||||
608 | } | |||||
609 | ||||||
610 | void mirror_start(BlockDriverState *bs, BlockDriverState *target, | |||||
611 | int64_t speed, int64_t granularity, int64_t buf_size, | |||||
612 | MirrorSyncMode mode, BlockdevOnError on_source_error, | |||||
613 | BlockdevOnError on_target_error, | |||||
614 | BlockDriverCompletionFunc *cb, | |||||
615 | void *opaque, Error **errp) | |||||
616 | { | |||||
617 | bool_Bool is_none_mode; | |||||
618 | BlockDriverState *base; | |||||
619 | ||||||
620 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; | |||||
621 | base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL((void*)0); | |||||
622 | mirror_start_job(bs, target, speed, granularity, buf_size, | |||||
623 | on_source_error, on_target_error, cb, opaque, errp, | |||||
624 | &mirror_job_driver, is_none_mode, base); | |||||
625 | } | |||||
626 | ||||||
627 | void commit_active_start(BlockDriverState *bs, BlockDriverState *base, | |||||
628 | int64_t speed, | |||||
629 | BlockdevOnError on_error, | |||||
630 | BlockDriverCompletionFunc *cb, | |||||
631 | void *opaque, Error **errp) | |||||
632 | { | |||||
633 | if (bdrv_reopen(base, bs->open_flags, errp)) { | |||||
634 | return; | |||||
635 | } | |||||
636 | bdrv_ref(base); | |||||
637 | mirror_start_job(bs, base, speed, 0, 0, | |||||
638 | on_error, on_error, cb, opaque, errp, | |||||
639 | &commit_active_job_driver, false0, base); | |||||
640 | } |