220 (3402B)
1 #!/usr/bin/env bash 2 # group: rw auto 3 # 4 # max limits on compression in huge qcow2 files 5 # 6 # Copyright (C) 2018 Red Hat, Inc. 7 # 8 # This program is free software; you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation; either version 2 of the License, or 11 # (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with this program. If not, see <http://www.gnu.org/licenses/>. 20 # 21 22 seq=$(basename $0) 23 echo "QA output created by $seq" 24 25 status=1 # failure is the default! 26 27 _cleanup() 28 { 29 _cleanup_test_img 30 } 31 trap "_cleanup; exit \$status" 0 1 2 3 15 32 33 # get standard environment, filters and checks 34 . ./common.rc 35 . ./common.filter 36 . ./common.pattern 37 38 _supported_fmt qcow2 39 _supported_proto file fuse 40 _supported_os Linux 41 # To use a different refcount width but 16 bits we need compat=1.1, 42 # and external data files do not support compressed clusters. 43 _unsupported_imgopts 'compat=0.10' data_file 44 45 echo "== Creating huge file ==" 46 47 # Sanity check: We require a file system that permits the creation 48 # of a HUGE (but very sparse) file. tmpfs works, ext4 does not. 49 _require_large_file 513T 50 51 _make_test_img -o 'cluster_size=2M,refcount_bits=1' 513T 52 53 echo "== Populating refcounts ==" 54 # We want an image with 256M refcounts * 2M clusters = 512T referenced. 55 # Each 2M cluster holds 16M refcounts; the refcount table initially uses 56 # 1 refblock, so we need to add 15 more. The refcount table lives at 2M, 57 # first refblock at 4M, L2 at 6M, so our remaining additions start at 8M. 58 # Then, for each refblock, mark it as fully populated. 59 to_hex() { 60 printf %016x\\n $1 | sed 's/\(..\)/\\x\1/g' 61 } 62 truncate --size=38m "$TEST_IMG" 63 entry=$((0x200000)) 64 $QEMU_IO_PROG -f raw -c "w -P 0xff 4m 2m" "$TEST_IMG" | _filter_qemu_io 65 for i in {1..15}; do 66 offs=$((0x600000 + i*0x200000)) 67 poke_file "$TEST_IMG" $((i*8 + entry)) $(to_hex $offs) 68 $QEMU_IO_PROG -f raw -c "w -P 0xff $offs 2m" "$TEST_IMG" | _filter_qemu_io 69 done 70 71 echo "== Checking file before ==" 72 # FIXME: 'qemu-img check' doesn't diagnose refcounts beyond the end of 73 # the file as leaked clusters 74 _check_test_img 2>&1 | sed '/^Leaked cluster/d' 75 stat -c 'image size %s' "$TEST_IMG" 76 77 echo "== Trying to write compressed cluster ==" 78 # Given our file size, the next available cluster at 512T lies beyond the 79 # maximum offset that a compressed 2M cluster can reside in 80 $QEMU_IO_PROG -c 'w -c 0 2m' "$TEST_IMG" | _filter_qemu_io 81 # The attempt failed, but ended up allocating a new refblock 82 stat -c 'image size %s' "$TEST_IMG" 83 84 echo "== Writing normal cluster ==" 85 # The failed write should not corrupt the image, so a normal write succeeds 86 $QEMU_IO_PROG -c 'w 0 2m' "$TEST_IMG" | _filter_qemu_io 87 88 echo "== Checking file after ==" 89 # qemu-img now sees the millions of leaked clusters, thanks to the allocations 90 # at 512T. Undo many of our faked references to speed up the check. 91 $QEMU_IO_PROG -f raw -c "w -z 5m 1m" -c "w -z 8m 30m" "$TEST_IMG" | 92 _filter_qemu_io 93 _check_test_img 2>&1 | sed '/^Leaked cluster/d' 94 95 # success, all done 96 echo "*** done" 97 rm -f $seq.full 98 status=0