Code
Starting resizing the disk
[Command] lvresize -L 100.000G /dev/vg0/vsv1020-dTpIHBQ9ZrEegdsR-7kDsaKLO1RaoDHjF 2>&1
[Retval] 5
[Output]: Show
[Command] qemu-nbd -f raw --connect=/dev/nbd0 /dev/vg0/vsv1020-dTpIHBQ9ZrEegdsR-7kDsaKLO1RaoDHjF
[Retval] 0
[Output]: Show
[Command] fdisk -l /dev/nbd0
[Retval] 0
[Output]: Show
[Command] /sbin/kpartx /dev/nbd0
[Retval] 0
[Output]: Hide
Array
(
)
[Command] qemu-nbd --disconnect /dev/nbd0
[Retval] 0
[Output]: Hide
Array
(
[0] => /dev/nbd0 disconnected
)
[Command] growpart
[Retval] 2
[Output]: Hide [Error]: Hide
Array
(
[0] => growpart disk partition
[1] => rewrite partition table so that partition takes up all the space it can
[2] => options:
[3] => -h | --help print Usage and exit
[4] => --free-percent F resize so that specified percentage F of the disk is
[5] => not used in total (not just by this partition). This
[6] => is useful for consumer SSD or SD cards where a small
[7] => percentage unallocated can improve device lifetime.
[8] => --fudge F if part could be resized, but change would be less
[9] => than 'F' bytes, do not resize (default: 1048576)
[10] => -N | --dry-run only report what would be done, show new 'sfdisk -d'
[11] => -v | --verbose increase verbosity / debug
[12] => -u | --update R update the the kernel partition table info after
[13] => growing this requires kernel support and
[14] => 'partx --update'
[15] => R is one of:
[16] => - 'auto' : [default] update partition if possible
[17] => - 'force' : try despite sanity checks (fail on
[18] => failure)
[19] => - 'off' : do not attempt
[20] => - 'on' : fail if sanity checks indicate no
[21] => support
[22] =>
[23] => Example:
[24] => - growpart /dev/sda 1
[25] => Resize partition 1 on /dev/sda
[26] =>
[27] => - growpart --free-percent=10 /dev/sda 1
[28] => Resize partition 1 on /dev/sda so that 10% of the disk is unallocated
[29] => must supply disk and partition-number
)
Array
(
)
No NBD device passed to disconnect - disconnect_nbd()
This is not the correct partition scheme that we support
Array
Array
This partition cannot grow
I did some research on the forum and found some posts with similar errors but there was no solution. Some other commands that may be useful to find the error:
df -h
Code
[root@hestia ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 4.0M 0 4.0M 0% /dev
tmpfs 32G 0 32G 0% /dev/shm
tmpfs 13G 9.4M 13G 1% /run
efivarfs 512K 245K 263K 49% /sys/firmware/efi/efivars
/dev/sdb3 40G 7.9G 33G 20% /
/dev/sdb2 1014M 214M 801M 22% /boot
/dev/sdb1 511M 2.5M 509M 1% /boot/efi
tmpfs 6.3G 4.0K 6.3G 1% /run/user/0
lsblk:
Code
[root@hestia ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 447.1G 0 disk
└─sda1 8:1 0 447.1G 0 part
├─vg1-thin_pool_tmeta 253:0 0 100M 0 lvm
│ └─vg1-thin_pool 253:6 0 400G 0 lvm
└─vg1-thin_pool_tdata 253:1 0 400G 0 lvm
└─vg1-thin_pool 253:6 0 400G 0 lvm
sdb 8:16 0 447.1G 0 disk
├─sdb1 8:17 0 511M 0 part /boot/efi
├─sdb2 8:18 0 1G 0 part /boot
├─sdb3 8:19 0 40G 0 part /
├─sdb4 8:20 0 4G 0 part [SWAP]
├─sdb5 8:21 0 2M 0 part
└─sdb6 8:22 0 401.6G 0 part
├─vg0-thin_pool_tmeta 253:2 0 88M 0 lvm
│ └─vg0-thin_pool-tpool 253:4 0 350G 0 lvm
│ ├─vg0-thin_pool 253:5 0 350G 1 lvm
│ └─vg0-vsv1020--dTpIHBQ9ZrEegdsR--7kDsaKLO1RaoDHjF 253:7 0 100G 0 lvm
└─vg0-thin_pool_tdata 253:3 0 350G 0 lvm
└─vg0-thin_pool-tpool 253:4 0 350G 0 lvm
├─vg0-thin_pool 253:5 0 350G 1 lvm
└─vg0-vsv1020--dTpIHBQ9ZrEegdsR--7kDsaKLO1RaoDHjF 253:7 0 100G 0 lvm
nbd0 43:0 0 0B 0 disk
nbd1 43:16 0 0B 0 disk
nbd2 43:32 0 0B 0 disk
nbd3 43:48 0 0B 0 disk
nbd4 43:64 0 0B 0 disk
nbd5 43:80 0 0B 0 disk
nbd6 43:96 0 0B 0 disk
nbd7 43:112 0 0B 0 disk
nbd8 43:128 0 0B 0 disk
nbd9 43:144 0 0B 0 disk
nbd10 43:160 0 0B 0 disk
nbd11 43:176 0 0B 0 disk
nbd12 43:192 0 0B 0 disk
nbd13 43:208 0 0B 0 disk
nbd14 43:224 0 0B 0 disk
nbd15 43:240 0 0B 0 disk
nbd16 43:256 0 0B 0 disk
nbd17 43:272 0 0B 0 disk
nbd18 43:288 0 0B 0 disk
nbd19 43:304 0 0B 0 disk
nbd20 43:320 0 0B 0 disk
nbd21 43:336 0 0B 0 disk
nbd22 43:352 0 0B 0 disk
nbd23 43:368 0 0B 0 disk
nbd24 43:384 0 0B 0 disk
nbd25 43:400 0 0B 0 disk
nbd26 43:416 0 0B 0 disk
nbd27 43:432 0 0B 0 disk
nbd28 43:448 0 0B 0 disk
nbd29 43:464 0 0B 0 disk
nbd30 43:480 0 0B 0 disk
nbd31 43:496 0 0B 0 disk
nbd32 43:512 0 0B 0 disk
nbd33 43:528 0 0B 0 disk
nbd34 43:544 0 0B 0 disk
nbd35 43:560 0 0B 0 disk
nbd36 43:576 0 0B 0 disk
nbd37 43:592 0 0B 0 disk
nbd38 43:608 0 0B 0 disk
nbd39 43:624 0 0B 0 disk
nbd40 43:640 0 0B 0 disk
nbd41 43:656 0 0B 0 disk
nbd42 43:672 0 0B 0 disk
nbd43 43:688 0 0B 0 disk
nbd44 43:704 0 0B 0 disk
nbd45 43:720 0 0B 0 disk
nbd46 43:736 0 0B 0 disk
nbd47 43:752 0 0B 0 disk
nbd48 43:768 0 0B 0 disk
nbd49 43:784 0 0B 0 disk
lsmod |grep nbd
Code
[root@hestia ~]# lsmod |grep nbd
nbd 69632 0
systemctl status zzvirtservice
Code
[root@hestia ~]# systemctl status zzvirtservice
● zzvirtservice.service - ZZVirtService Service
Loaded: loaded (/etc/systemd/system/zzvirtservice.service; enabled; preset: disabled)
Active: active (exited) since Sun 2025-05-18 20:21:05 UTC; 1h 24min ago
CPU: 274ms
May 18 20:21:04 hestia.akamura.com.br systemd[1]: Starting ZZVirtService Service...
May 18 20:21:04 hestia.akamura.com.br zzvirtservice[1668]: Sun May 18 20:21:04 UTC 2025: Starting...
May 18 20:21:04 hestia.akamura.com.br zzvirtservice[1670]: Verifying whether MySQL service running:>
May 18 20:21:04 hestia.akamura.com.br zzvirtservice[1670]: Virtualior MySQL service running, contin>
May 18 20:21:04 hestia.akamura.com.br systemctl[1821]: Failed to start ebtables.service: Unit ebtab>
May 18 20:21:05 hestia.akamura.com.br zzvirtservice[1668]: Sun May 18 20:21:05 UTC 2025: Started
May 18 20:21:05 hestia.akamura.com.br zzvirtservice[1870]: /usr/bin/systemctl
May 18 20:21:05 hestia.akamura.com.br systemd[1]: Started ZZVirtService Service.
ps aux |grep -E 'nbd|addvs|sfdisk|gdisk'
Code
[root@hestia ~]# ps aux |grep -E 'nbd|addvs|sfdisk|gdisk'
root 271 0.0 0.0 0 0 ? I< 20:20 0:00 [kworker/u49:0-nbd0-recv]
root 1692 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd0-]
root 1693 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd1-]
root 1694 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd2-]
root 1696 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd3-]
root 1700 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd4-]
root 1702 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd5-]
root 1704 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd6-]
root 1707 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd7-]
root 1708 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd8-]
root 1712 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd9-]
root 1714 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd10]
root 1723 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd11]
root 1726 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd12]
root 1729 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd13]
root 1731 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd14]
root 1733 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd15]
root 1734 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd16]
root 1737 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd17]
root 1739 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd18]
root 1741 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd19]
root 1743 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd20]
root 1745 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd21]
root 1747 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd22]
root 1749 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd23]
root 1751 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd24]
root 1753 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd25]
root 1754 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd26]
root 1758 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd27]
root 1760 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd28]
root 1762 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd29]
root 1763 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd30]
root 1764 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd31]
root 1765 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd32]
root 1766 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd33]
root 1768 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd34]
root 1771 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd35]
root 1776 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd36]
root 1781 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd37]
root 1790 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd38]
root 1792 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd39]
root 1793 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd40]
root 1794 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd41]
root 1799 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd42]
root 1801 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd43]
root 1803 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd44]
root 1805 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd45]
root 1807 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd46]
root 1809 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd47]
root 1810 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd48]
root 1812 0.0 0.0 0 0 ? I< 20:21 0:00 [kworker/R-nbd49]
root 12457 0.0 0.0 3884 2176 pts/0 S+ 21:46 0:00 grep --color=auto -E nbd|addvs|sfdisk|gdisk
ls -lh /sbin/sfdisk
Code
[root@hestia ~]# ls -lh /sbin/sfdisk
-rwxr-xr-x 1 root root 67K Apr 7 2017 /sbin/sfdisk
cat /proc/mdstat
Code
[root@hestia ~]# cat /proc/mdstat
Personalities :
unused devices: <none>
Thanks!!
]]>