Auf dem root-Server die Platte platt machen
erst mal schaun, was noch da ist :
root@grml ~ # mdadm --detail --scan
ARRAY /dev/md/2 metadata=1.0 name=any:2 UUID=54289a6b:662e4b17:1c386489:6405f578
ARRAY /dev/md/1 metadata=1.0 name=any:1 UUID=cc32f60d:da3320f2:16990c05:0b6fa77d
ARRAY /dev/md/0 metadata=1.0 name=any:0 UUID=0ab0005c:f4e003a1:4c63d6ca:0162ef6c
root@grml ~ # ls -l /dev/disk/by-uuid
total 0
lrwxrwxrwx 1 root root 9 Jan 15 07:28 652a2a86-65cb-425c-9760-bba641245f0d -> ../../md2
lrwxrwxrwx 1 root root 9 Jan 15 07:28 77506b7e-c0d2-4712-8461-e94f0ffe1c78 -> ../../md1
lrwxrwxrwx 1 root root 9 Jan 15 07:28 f735ff65-bc3c-4685-b987-204b3a5a97aa -> ../../md0
root@grml ~ # mdadm --stop /dev/md0
mdadm: stopped /dev/md0
root@grml ~ # mdadm --stop /dev/md1
mdadm: stopped /dev/md1
root@grml ~ # mdadm --stop /dev/md2
mdadm: stopped /dev/md2
root@grml ~ # mdadm --zero-superblock /dev/sda3 /dev/sdb3
root@grml ~ # mdadm --zero-superblock /dev/sda2 /dev/sdb2
root@grml ~ # mdadm --zero-superblock /dev/sda1 /dev/sdb1
root@grml ~ # blkid
/dev/sda1: PTUUID="e52ec6bf" PTTYPE="dos" PARTUUID="00032cd8-01"
/dev/sdb1: PARTUUID="000e29e1-01"
es ist jetzt also alles weg
root@grml ~ # blkid -o list
device fs_type label mount point UUID
-------------------------------------------------------------------------------------------------------------------------------------------------------------
/dev/sda1 (not mounted)
/dev/sda2 (not mounted)
/dev/sda3 (not mounted)
/dev/sdb1 (not mounted)
/dev/sdb2 (not mounted)
/dev/sdb3 (not mounted)
.
die 3 Raid1 Partitionen erzeugen
das sind die 3 Befehle:
mdadm --create /dev/md0 --level=1 --raid-devices=2 /dev/sda1 /dev/sdb1 --metadata=1.0 -N any:0
mdadm --create /dev/md1 --level=1 --raid-devices=2 /dev/sda2 /dev/sdb2 --metadata=1.0 -N any:1
mdadm --create /dev/md2 --level=1 --raid-devices=2 /dev/sda3 /dev/sdb3 --metadata=1.0 -N any:2
root@grml ~ # mdadm --create /dev/md0 --level=1 --raid-devices=2 /dev/sda1 /dev/sdb1 --metadata=1.0 -N any:0 :(
mdadm: /dev/sda1 appears to be part of a raid array:
level=raid0 devices=0 ctime=Thu Jan 1 00:00:00 1970
Continue creating array? y
mdadm: array /dev/md0 started.
root@grml ~ # mdadm --create /dev/md1 --level=1 --raid-devices=2 /dev/sda2 /dev/sdb2 --metadata=1.0 -N any:1 :(
mdadm: array /dev/md1 started.
root@grml ~ # mdadm --create /dev/md2 --level=1 --raid-devices=2 /dev/sda3 /dev/sdb3 --metadata=1.0 -N any:2
mdadm: array /dev/md2 started.
root@grml ~ # blkid
/dev/sda1: UUID="144c71f9-edb0-bf56-1f90-6a459683edce" UUID_SUB="327e58c0-c1ae-3d4b-61e7-813422f73e87" LABEL="any:0" TYPE="linux_raid_member" PARTUUID="00032cd8-01"
/dev/sda2: UUID="2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6" UUID_SUB="283e5a76-632f-40c3-6c47-c8f63fab628a" LABEL="any:1" TYPE="linux_raid_member" PARTUUID="00032cd8-02"
/dev/sda3: UUID="29ece338-4dfa-cc16-4512-233506a87de1" UUID_SUB="a03a9a51-36e6-5f5f-48ae-cd94a7300ca0" LABEL="any:2" TYPE="linux_raid_member" PARTUUID="00032cd8-03"
/dev/sdb1: UUID="144c71f9-edb0-bf56-1f90-6a459683edce" UUID_SUB="c80957ad-ffb5-ea0a-ba46-1588f008927a" LABEL="any:0" TYPE="linux_raid_member" PARTUUID="000e29e1-01"
/dev/sdb2: UUID="2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6" UUID_SUB="980eafc4-1cc3-530d-6cbb-ad020399b3de" LABEL="any:1" TYPE="linux_raid_member" PARTUUID="000e29e1-02"
/dev/sdb3: UUID="29ece338-4dfa-cc16-4512-233506a87de1" UUID_SUB="1ed75269-2ccb-742c-6efe-9c857d80797e" LABEL="any:2" TYPE="linux_raid_member" PARTUUID="000e29e1-03"
/dev/md0p1: LABEL="any:0" UUID="c237746c-5504-4301-96ab-94dd95b0da6b" TYPE="swap" PARTUUID="e52ec6bf-01"
/dev/md1p1: LABEL="any:1" UUID="419273e2-675e-486a-9387-7ad1305bcedb" TYPE="ext4" PARTUUID="e44dc355-01"
/dev/md2p1: LABEL="any:2" UUID="982c030c-6da0-4adb-80fc-04af9bdfee6b" TYPE="ext4" PARTUUID="1859f603-01"
/dev/sda5: UUID="a6fd1988-87a3-429b-9292-60833d72497a" TYPE="ext4" PARTUUID="00032cd8-05"
/dev/sdb5: UUID="4616245c-0306-4a0e-8d90-d31778f8999c" TYPE="ext4" PARTUUID="000e29e1-05"
/dev/md2: PTUUID="1859f603" PTTYPE="dos"
/dev/md1: PTUUID="e44dc355" PTTYPE="dos"
/dev/md0: PTUUID="e52ec6bf" PTTYPE="dos"
root@grml ~ # blkid -o list
device fs_type label mount point UUID
------------------------------------------------------------------------------------------------------------------
/dev/loop0 squashfs /lib/live/mount/rootfs/grml64-full_testing.squashfs
/dev/sda1 linux_raid_member any:0 (in use) 144c71f9-edb0-bf56-1f90-6a459683edce
/dev/sda2 linux_raid_member any:1 (in use) 2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6
/dev/sda3 linux_raid_member any:2 (in use) 29ece338-4dfa-cc16-4512-233506a87de1
/dev/sdb1 linux_raid_member any:0 (in use) 144c71f9-edb0-bf56-1f90-6a459683edce
/dev/sdb2 linux_raid_member any:1 (in use) 2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6
/dev/sdb3 linux_raid_member any:2 (in use) 29ece338-4dfa-cc16-4512-233506a87de1
/dev/md0p1 swap any:0 (not mounted) c237746c-5504-4301-96ab-94dd95b0da6b
/dev/md1p1 ext4 any:1 (not mounted) 419273e2-675e-486a-9387-7ad1305bcedb
/dev/md2p1 ext4 any:2 (not mounted) 982c030c-6da0-4adb-80fc-04af9bdfee6b
/dev/sda5 ext4 (not mounted) a6fd1988-87a3-429b-9292-60833d72497a
/dev/sdb5 ext4 (not mounted) 4616245c-0306-4a0e-8d90-d31778f8999c
/dev/md2 (not mounted)
/dev/md1 (not mounted)
/dev/md0 (not mounted)
root@grml ~ # mdadm --detail --scan
ARRAY /dev/md2 metadata=1.0 name=any:2 UUID=29ece338:4dfacc16:45122335:06a87de1
ARRAY /dev/md1 metadata=1.0 name=any:1 UUID=2c8c7bae:5ff4d738:fb75bcf9:3cb4a6d6
ARRAY /dev/md0 metadata=1.0 name=any:0 UUID=144c71f9:edb0bf56:1f906a45:9683edce
so geht es also nicht
Das Ganze nochmal - geht so aber auch nicht
also ein reboot und dann anzeigen lassen
root@grml ~ # mdadm --detail --scan
ARRAY /dev/md/2 metadata=1.0 name=any:2 UUID=29ece338:4dfacc16:45122335:06a87de1
ARRAY /dev/md/1 metadata=1.0 name=any:1 UUID=2c8c7bae:5ff4d738:fb75bcf9:3cb4a6d6
ARRAY /dev/md/0 metadata=1.0 name=any:0 UUID=144c71f9:edb0bf56:1f906a45:9683edce
root@grml ~ # blkid -o list
device fs_type label mount point UUID
---------------------------------------------------------------------------------------------------------------------------
/dev/sdb1 linux_raid_member any:0 (in use) 144c71f9-edb0-bf56-1f90-6a459683edce
/dev/sdb2 linux_raid_member any:1 (in use) 2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6
/dev/sdb3 linux_raid_member any:2 (in use) 29ece338-4dfa-cc16-4512-233506a87de1
/dev/sdb5 ext4 (not mounted) 4616245c-0306-4a0e-8d90-d31778f8999c
/dev/sda1 linux_raid_member any:0 (in use) 144c71f9-edb0-bf56-1f90-6a459683edce
/dev/sda2 linux_raid_member any:1 (in use) 2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6
/dev/sda3 linux_raid_member any:2 (in use) 29ece338-4dfa-cc16-4512-233506a87de1
/dev/sda5 ext4 (not mounted) a6fd1988-87a3-429b-9292-60833d72497a
/dev/md2p1 ext4 any:2 (not mounted) 982c030c-6da0-4adb-80fc-04af9bdfee6b
/dev/md1p1 ext4 any:1 (not mounted) 419273e2-675e-486a-9387-7ad1305bcedb
/dev/md0p1 swap any:0 (not mounted) c237746c-5504-4301-96ab-94dd95b0da6b
/dev/md2 (not mounted)
/dev/md1 (not mounted)
/dev/md0 (not mounted)
root@grml ~ # mdadm --stop /dev/md0p1
mdadm: stopped /dev/md0p1
root@grml ~ # mdadm --stop /dev/md1p1
mdadm: stopped /dev/md1p1
root@grml ~ # mdadm --stop /dev/md2p1
mdadm: stopped /dev/md2p1
root@grml ~ # blkid -o list
device fs_type label mount point UUID
---------------------------------------------------------------------------------------------------------------------------
/dev/sdb1 linux_raid_member any:0 (not mounted) 144c71f9-edb0-bf56-1f90-6a459683edce
/dev/sdb2 linux_raid_member any:1 (not mounted) 2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6
/dev/sdb3 linux_raid_member any:2 (not mounted) 29ece338-4dfa-cc16-4512-233506a87de1
/dev/sdb5 ext4 (not mounted) 4616245c-0306-4a0e-8d90-d31778f8999c
/dev/sda1 linux_raid_member any:0 (not mounted) 144c71f9-edb0-bf56-1f90-6a459683edce
/dev/sda2 linux_raid_member any:1 (not mounted) 2c8c7bae-5ff4-d738-fb75-bcf93cb4a6d6
/dev/sda3 linux_raid_member any:2 (not mounted) 29ece338-4dfa-cc16-4512-233506a87de1
/dev/sda5 ext4 (not mounted) a6fd1988-87a3-429b-9292-60833d72497a
das RAID neu starten
root@grml ~ # mdadm --assemble --scan :(
mdadm: /dev/md/0 has been started with 2 drives.
mdadm: /dev/md/1 has been started with 2 drives.
mdadm: /dev/md/2 has been started with 2 drives.
und prüfen :
root@grml ~ # cat /proc/mdstat
Personalities : [raid1]
md0 : active (auto-read-only) raid1 sda1[0] sdb1[1]
8392576 blocks super 1.0 [2/2] [UU]
md1 : active (auto-read-only) raid1 sda2[0] sdb2[1]
2047936 blocks super 1.0 [2/2] [UU]
md2 : active (auto-read-only) raid1 sda3[0] sdb3[1]
22527872 blocks super 1.0 [2/2] [UU]
unused devices: <none>
hier noch einen Kommentar gefunden :
You can activate Raid md0 with the below command
mdadm -A /dev/md0
and this command to update mdadm.conf file
mdadm --examine --scan >> /etc/mdadm/mdadm.conf
.
Der Istzustand auf dem Musterserver
[xen9-muster-server - root] ~ $ blkid -o list
device fs_type label mount point UUID
------------------------------------------------------------------------------------------------
/dev/sda1 linux_raid_member any:0 (in use) e298c15c-28a5-786c-c9af-f6ba512da00d
/dev/sda2 linux_raid_member any:1 (in use) 408cb03d-9048-fa54-d49d-e72538e59231
/dev/sda3 linux_raid_member any:2 (in use) 8f6e045d-6fa1-55cf-6a7d-30084fa7e575
/dev/sdb1 linux_raid_member any:0 (in use) e298c15c-28a5-786c-c9af-f6ba512da00d
/dev/sdb2 linux_raid_member any:1 (in use) 408cb03d-9048-fa54-d49d-e72538e59231
/dev/sdb3 linux_raid_member any:2 (in use) 8f6e045d-6fa1-55cf-6a7d-30084fa7e575
/dev/md0 swap any:0 <swap> a48e718f-2539-4498-a509-0c4d55718b63
/dev/md1 ext4 any:1 /boot dcdbd2f1-6e0c-4337-aaef-3d9f88c4935d
/dev/md2 ext4 any:2 / 91d9f94b-3208-49b8-a573-9460b24a587e
/dev/sdc1 vfat desinfDATA (not mounted) 090A-B0A5
/dev/sdc2 vfat desinfSYS (not mounted) 0733-3C7E
/dev/sdc3 ext4 desinfSIGS (not mounted) f1e534f6-2a60-4109-9d45-807ebaf7776e
/dev/sdc4 vfat (not mounted) 0933-2B9A
[xen9-muster-server - root] ~ $ mdadm --detail --scan
ARRAY /dev/md/any:0 metadata=1.0 name=any:0 UUID=e298c15c:28a5786c:c9aff6ba:512da00d
ARRAY /dev/md/any:1 metadata=1.0 name=any:1 UUID=408cb03d:9048fa54:d49de725:38e59231
ARRAY /dev/md/any:2 metadata=1.0 name=any:2 UUID=8f6e045d:6fa155cf:6a7d3008:4fa7e575
[xen9-muster-server - root] ~ $ cat /proc/mdstat
Personalities : [raid1]
md0 : active raid1 sdb1[1] sda1[0]
8384448 blocks super 1.0 [2/2] [UU]
bitmap: 0/1 pages [0KB], 65536KB chunk
md1 : active raid1 sda2[0] sdb2[1]
2097088 blocks super 1.0 [2/2] [UU]
bitmap: 0/1 pages [0KB], 65536KB chunk
md2 : active raid1 sda3[0] sdb3[1]
23068544 blocks super 1.0 [2/2] [UU]
bitmap: 1/1 pages [4KB], 65536KB chunk
unused devices: <none>
.