PDA

Archiv verlassen und diese Seite im Standarddesign anzeigen : Heartbeat und kein Ende Failover



Huhn Hur Tu
25.03.10, 13:46
Stand: Heartbeat Resourcen Migration funktioniert wenn beide aktive und nicht standby sind.
Wenn ich jedoch im Moment Server standby schalte übernimmt Server2 nicht. Das gleiche gilt wenn ich Server2 den Stecker ziehe. Das Seltsame daran ist, wenn ich den Stecker ziehe startet Heartbeat nicht.
Was mache ich wieder falsch.

Heartbeat3 & Pacemaker

Config:


server1:~# crm
crm(live)# configure
crm(live)configure# show
node $id="3e20966a-ed64-4972-8f5a-88be0977f759" server1 \
attributes standby="off"
node $id="5262f929-1082-4a85-aa05-7bd1992f15be" server2 \
attributes standby="off"
primitive pri_apache2 ocf:heartbeat:apache \
op monitor interval="15s" \
params configfile="/etc/apache2/apache2.conf" httpd="/usr/sbin/apache2" port="80" \
meta target-role="started"
primitive pri_drbd_service ocf:linbit:drbd \
params drbd_resource="t3" \
op monitor interval="15s" \
meta target-role="started" is-managed="true"
primitive pri_fs_drbd_t3 ocf:heartbeat:Filesystem \
params device="/dev/drbd0" directory="/mnt/drbd_daten" fstype="ext3" \
meta target-role="started"
primitive pri_gemeinsame_IP ocf:heartbeat:IPaddr2 \
params ip="192.168.1.253" cidr_netmask="24" nic="eth1" \
meta is-managed="true"
group t3 pri_fs_drbd_t3 pri_gemeinsame_IP pri_apache2
ms ms_drbd_service pri_drbd_service \
meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
location cli-prefer-pri_gemeinsame_IP t3 \
rule $id="cli-prefer-rule-pri_gemeinsame_IP" inf: #uname eq server1
location cli-standby-t3 t3 \
rule $id="cli-standby-rule-t3" -inf: #uname eq server2
colocation apache_on_drbd inf: t3 ms_drbd_service:Master
order apache_after_drbd inf: ms_drbd_service:promote t3:start
property $id="cib-bootstrap-options" \
dc-version="1.0.7-54d7869bfe3691eb723b1d47810e5585d8246b58" \
cluster-infrastructure="Heartbeat" \
startup-fencing="false" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
expected-quorum-votes="0" \
last-lrm-refresh="1269443761" \
default-resource-stickiness="infinity"


und das ganze nochmal in XML


<cib validate-with="pacemaker-1.0" crm_feature_set="3.0.1" have-quorum="1" admin_epoch="0" epoch="220" num_updates="0" cib-last-written="Wed Mar 24 17:11:45 2010" dc-uuid="5262f929-1082-4a85-aa05-7bd1992f15be">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.0.7-54d7869bfe3691eb723b1d47810e5585d8246b58"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="Heartbeat"/>
<nvpair id="cib-bootstrap-options-startup-fencing" name="startup-fencing" value="false"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="0"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1269443761"/>
<nvpair id="cib-bootstrap-options-default-resource-stickiness" name="default-resource-stickiness" value="infinity"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="3e20966a-ed64-4972-8f5a-88be0977f759" type="normal" uname="server1">
<instance_attributes id="nodes-3e20966a-ed64-4972-8f5a-88be0977f759">
<nvpair id="nodes-3e20966a-ed64-4972-8f5a-88be0977f759-standby" name="standby" value="off"/>
</instance_attributes>
</node>
<node uname="server2" type="normal" id="5262f929-1082-4a85-aa05-7bd1992f15be">
<instance_attributes id="nodes-5262f929-1082-4a85-aa05-7bd1992f15be">
<nvpair name="standby" id="nodes-5262f929-1082-4a85-aa05-7bd1992f15be-standby" value="off"/>
</instance_attributes>
</node>
</nodes>
<resources>
<master id="ms_drbd_service">
<meta_attributes id="ms_drbd_service-meta_attributes">
<nvpair id="ms_drbd_service-meta_attributes-master-max" name="master-max" value="1"/>
<nvpair id="ms_drbd_service-meta_attributes-master-node-max" name="master-node-max" value="1"/>
<nvpair id="ms_drbd_service-meta_attributes-clone-max" name="clone-max" value="2"/>
<nvpair id="ms_drbd_service-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
<nvpair id="ms_drbd_service-meta_attributes-notify" name="notify" value="true"/>
</meta_attributes>
<primitive class="ocf" id="pri_drbd_service" provider="linbit" type="drbd">
<instance_attributes id="pri_drbd_service-instance_attributes">
<nvpair id="pri_drbd_service-instance_attributes-drbd_resource" name="drbd_resource" value="t3"/>
</instance_attributes>
<operations>
<op id="pri_drbd_service-monitor-15s" interval="15s" name="monitor"/>
</operations>
<meta_attributes id="pri_drbd_service-meta_attributes">
<nvpair id="pri_drbd_service-meta_attributes-target-role" name="target-role" value="started"/>
<nvpair id="pri_drbd_service-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
</master>
<group id="t3">
<primitive class="ocf" id="pri_fs_drbd_t3" provider="heartbeat" type="Filesystem">
<instance_attributes id="pri_fs_drbd_t3-instance_attributes">
<nvpair id="pri_fs_drbd_t3-instance_attributes-device" name="device" value="/dev/drbd0"/>
<nvpair id="pri_fs_drbd_t3-instance_attributes-directory" name="directory" value="/mnt/drbd_daten"/>
<nvpair id="pri_fs_drbd_t3-instance_attributes-fstype" name="fstype" value="ext3"/>
</instance_attributes>
<meta_attributes id="pri_fs_drbd_t3-meta_attributes">
<nvpair id="pri_fs_drbd_t3-meta_attributes-target-role" name="target-role" value="started"/>
</meta_attributes>
</primitive>
<primitive class="ocf" id="pri_gemeinsame_IP" provider="heartbeat" type="IPaddr2">
<instance_attributes id="pri_gemeinsame_IP-instance_attributes">
<nvpair id="pri_gemeinsame_IP-instance_attributes-ip" name="ip" value="192.168.1.253"/>
<nvpair id="pri_gemeinsame_IP-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="pri_gemeinsame_IP-instance_attributes-nic" name="nic" value="eth1"/>
</instance_attributes>
<meta_attributes id="pri_gemeinsame_IP-meta_attributes">
<nvpair id="pri_gemeinsame_IP-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<primitive class="ocf" id="pri_apache2" provider="heartbeat" type="apache">
<operations>
<op id="pri_apache2-monitor-15s" interval="15s" name="monitor"/>
</operations>
<instance_attributes id="pri_apache2-instance_attributes">
<nvpair id="pri_apache2-instance_attributes-configfile" name="configfile" value="/etc/apache2/apache2.conf"/>
<nvpair id="pri_apache2-instance_attributes-httpd" name="httpd" value="/usr/sbin/apache2"/>
<nvpair id="pri_apache2-instance_attributes-port" name="port" value="80"/>
</instance_attributes>
<meta_attributes id="pri_apache2-meta_attributes">
<nvpair id="pri_apache2-meta_attributes-target-role" name="target-role" value="started"/>
</meta_attributes>
</primitive>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-pri_gemeinsame_IP" rsc="t3">
<rule id="cli-prefer-rule-pri_gemeinsame_IP" score="INFINITY">
<expression attribute="#uname" id="cli-prefer-expr-pri_gemeinsame_IP" operation="eq" value="server1"/>
</rule>
</rsc_location>
<rsc_colocation id="apache_on_drbd" rsc="t3" score="INFINITY" with-rsc="ms_drbd_service" with-rsc-role="Master"/>
<rsc_order first="ms_drbd_service" first-action="promote" id="apache_after_drbd" score="INFINITY" then="t3" then-action="start"/>
<rsc_location id="cli-standby-t3" rsc="t3">
<rule id="cli-standby-rule-t3" score="-INFINITY" boolean-op="and">
<expression id="cli-standby-expr-t3" attribute="#uname" operation="eq" value="server2" type="string"/>
</rule>
</rsc_location>
</constraints>
<rsc_defaults/>
<op_defaults>
<meta_attributes id="op_defaults-options"/>
</op_defaults>
</configuration>
</cib>


Gruss Stefan

Huhn Hur Tu
29.03.10, 13:20
Ich denke ich habe es. Ich hab bei einer rumspielerei in der Config die NodeIDs gekillt. Die wurden zwar wieder angelegt aber nicht so richtig wie mir scheint:-)

Gruss Stefan

Huhn Hur Tu
30.06.10, 14:12
Bis zum ende hatte ich Probs mit dem Failover ueber Pingd, das wurde draus

http://pf-lug.de/v8/showtopic.php?id=24&page=1#p95

Gruss Stefan