From 4efbf7fbbf7506756fc9e5114ed440e3a1e65b85 Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Tue, 19 Dec 2023 11:39:21 -0500 Subject: [PATCH 1/7] Add verification information to crm_mon output --- include/crm/common/output.h | 4 ++- include/crm/common/xml_names.h | 2 ++ lib/common/output_xml.c | 3 +- lib/pacemaker/pcmk_output.c | 61 ++++++++++++++++++++++++++++++++++ lib/pengine/pe_output.c | 8 ++++- tools/crm_mon.c | 6 ++-- 6 files changed, 79 insertions(+), 5 deletions(-) diff --git a/include/crm/common/output.h b/include/crm/common/output.h index acb0a0e20c8..492d803c218 100644 --- a/include/crm/common/output.h +++ b/include/crm/common/output.h @@ -41,6 +41,7 @@ typedef enum { pcmk_section_bans = 1 << 14, pcmk_section_failures = 1 << 15, pcmk_section_maint_mode = 1 << 16, + pcmk_section_verify = 1 << 17, } pcmk_section_e; #define pcmk_section_fencing_all (pcmk_section_fence_failed | pcmk_section_fence_pending | pcmk_section_fence_worked) @@ -49,7 +50,8 @@ typedef enum { #define pcmk_section_all (pcmk_section_summary | pcmk_section_options | pcmk_section_nodes | \ pcmk_section_resources | pcmk_section_attributes | pcmk_section_failcounts | \ pcmk_section_operations | pcmk_section_fencing_all | pcmk_section_tickets | \ - pcmk_section_bans | pcmk_section_failures | pcmk_section_maint_mode) + pcmk_section_bans | pcmk_section_failures | pcmk_section_maint_mode | \ + pcmk_section_verify) /*! * \brief Further modify the output of sections diff --git a/include/crm/common/xml_names.h b/include/crm/common/xml_names.h index 6dfd23af8b5..dd16290d948 100644 --- a/include/crm/common/xml_names.h +++ b/include/crm/common/xml_names.h @@ -217,6 +217,7 @@ extern "C" { #define PCMK_XE_UTILIZATION "utilization" #define PCMK_XE_UTILIZATIONS "utilizations" #define PCMK_XE_VALIDATE "validate" +#define PCMK_XE_VERIFICATIONS "verifications" #define PCMK_XE_VERSION "version" #define PCMK_XE_XML "xml" #define PCMK_XE_XML_PATCHSET "xml-patchset" @@ -309,6 +310,7 @@ extern "C" { #define PCMK_XA_INTERVAL "interval" #define PCMK_XA_IP_RANGE_START "ip-range-start" #define PCMK_XA_IS_DC "is_dc" +#define PCMK_XA_IS_VALID "is_valid" #define PCMK_XA_KIND "kind" #define PCMK_XA_LANG "lang" #define PCMK_XA_LAST_FAILURE "last-failure" diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c index 9b0d417d413..be232666b60 100644 --- a/lib/common/output_xml.c +++ b/lib/common/output_xml.c @@ -67,7 +67,8 @@ static const subst_t substitutions[] = { PCMK_XE_ACTIONS, }, { "Utilization Information", PCMK_XE_UTILIZATIONS, }, - + { "Verification Information", + PCMK_XE_VERIFICATIONS, }, { NULL, NULL } }; diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c index 12a6710571c..1403275bc45 100644 --- a/lib/pacemaker/pcmk_output.c +++ b/lib/pacemaker/pcmk_output.c @@ -1938,6 +1938,9 @@ cluster_status_xml(pcmk__output_t *out, va_list args) GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); + pcmk__output_t *verify_out; + int verify_rc; + out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts, show_opts); @@ -1998,6 +2001,21 @@ cluster_status_xml(pcmk__output_t *out, va_list args) false); } + + /* If there are verification errors, always print a statement about that, even if not requested */ + + pcmk__output_new(&verify_out, "none", NULL, NULL); + verify_rc = pcmk__verify(scheduler, verify_out, scheduler->input); + pcmk__output_free(verify_out); + + if (verify_rc == pcmk_rc_ok) { + if (pcmk_is_set(section_opts, pcmk_section_verify)) { + out->info(out, "CIB syntax is valid"); + } + } else { + out->info(out, "CIB syntax has errors (for details, run crm_verify -LV)."); + } + return pcmk_rc_ok; } @@ -2467,6 +2485,33 @@ ticket_constraints_default(pcmk__output_t *out, va_list args) return pcmk_rc_ok; } +PCMK__OUTPUT_ARGS("cluster-verify", "pcmk_scheduler_t *", "int") +static int +cluster_verify_text(pcmk__output_t *out, va_list args) { + + /* If there are verification errors, always print a statement about that, even if not requested */ + + pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); + int section_opts = va_arg(args, int); + + pcmk__output_t *verify_out; + int verify_rc; + + pcmk__output_new(&verify_out, "none", NULL, NULL); + verify_rc = pcmk__verify(scheduler, verify_out, scheduler->input); + pcmk__output_free(verify_out); + + if (verify_rc == pcmk_rc_ok) { + if (pcmk_is_set(section_opts, pcmk_section_verify)) { + out->list_item(out, NULL, "CIB syntax is valid"); + } + } else { + out->list_item(out, NULL, "CIB syntax has errors (for details, run crm_verify -LV)"); + } + + return pcmk_rc_ok; +} + static int add_ticket_element_with_constraints(xmlNode *node, void *userdata) { @@ -2554,6 +2599,19 @@ ticket_constraints_xml(pcmk__output_t *out, va_list args) return pcmk_rc_ok; } +PCMK__OUTPUT_ARGS("cluster-verify", "pcmk_scheduler_t *", "int") +static int +cluster_verify_xml(pcmk__output_t *out, va_list args) { + pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); + int section_opts = va_arg(args, int); + + if (pcmk_is_set(section_opts, pcmk_section_verify)) { + pcmk__verify(scheduler, out, scheduler->input); + } + + return pcmk_rc_ok; +} + PCMK__OUTPUT_ARGS("ticket-state", "xmlNode *") static int ticket_state_default(pcmk__output_t *out, va_list args) @@ -2615,6 +2673,9 @@ static pcmk__message_entry_t fmt_functions[] = { { "cluster-status", "default", pcmk__cluster_status_text }, { "cluster-status", "html", cluster_status_html }, { "cluster-status", "xml", cluster_status_xml }, + { "cluster-verify", "default", cluster_verify_text }, + { "cluster-verify", "html", cluster_verify_xml }, + { "cluster-verify", "xml", cluster_verify_xml }, { "crmadmin-node", "default", crmadmin_node }, { "crmadmin-node", "text", crmadmin_node_text }, { "crmadmin-node", "xml", crmadmin_node_xml }, diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c index b1cd8ccd797..1df8ea6e0f5 100644 --- a/lib/pengine/pe_output.c +++ b/lib/pengine/pe_output.c @@ -411,6 +411,7 @@ cluster_summary(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); int rc = pcmk_rc_no_output; + const char *stack_s = get_cluster_stack(scheduler); if (pcmk_is_set(section_opts, pcmk_section_stack)) { @@ -450,6 +451,9 @@ cluster_summary(pcmk__output_t *out, va_list args) { scheduler->localhost, last_written, user, client, origin); } + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); + out->message(out, "cluster-verify", scheduler, section_opts); + if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes), @@ -525,6 +529,9 @@ cluster_summary_html(pcmk__output_t *out, va_list args) { scheduler->localhost, last_written, user, client, origin); } + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); + out->message(out, "cluster-verify", scheduler, section_opts); + if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes), @@ -3429,7 +3436,6 @@ static pcmk__message_entry_t fmt_functions[] = { { "ticket", "default", ticket_default }, { "ticket", "xml", ticket_xml }, { "ticket-list", "default", ticket_list }, - { NULL, NULL, NULL } }; diff --git a/tools/crm_mon.c b/tools/crm_mon.c index 85be8dc0863..e9194952170 100644 --- a/tools/crm_mon.c +++ b/tools/crm_mon.c @@ -297,6 +297,7 @@ struct { { "summary", pcmk_section_summary }, { "tickets", pcmk_section_tickets }, { "times", pcmk_section_times }, + { "verifications", pcmk_section_verify }, { NULL } }; @@ -332,7 +333,7 @@ apply_exclude(const gchar *excludes, GError **error) { "failcounts, failures, fencing, fencing-failed, " "fencing-pending, fencing-succeeded, maint-mode, nodes, " PCMK_VALUE_NONE ", operations, options, resources, " - "stack, summary, tickets, times"); + "stack, summary, tickets, times, verifications"); result = FALSE; break; } @@ -374,7 +375,8 @@ apply_include(const gchar *includes, GError **error) { PCMK_VALUE_DEFAULT ", failcounts, failures, fencing, " "fencing-failed, fencing-pending, fencing-succeeded, " "maint-mode, nodes, " PCMK_VALUE_NONE ", operations, " - "options, resources, stack, summary, tickets, times"); + "options, resources, stack, summary, tickets, times, " + "verifications"); result = FALSE; break; } From 8bb53475d93a1c7499e31ace50cbe85493741c19 Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Wed, 13 Mar 2024 10:03:32 -0400 Subject: [PATCH 2/7] Low: xml: clone crm_mon schema in preparation for changes --- xml/api/crm_mon-2.36.rng | 213 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 xml/api/crm_mon-2.36.rng diff --git a/xml/api/crm_mon-2.36.rng b/xml/api/crm_mon-2.36.rng new file mode 100644 index 00000000000..9cc554cf75f --- /dev/null +++ b/xml/api/crm_mon-2.36.rng @@ -0,0 +1,213 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + granted + revoked + + + + + + + + + + + + + + + + + + + + + From bf9f506cb594f86e3b1b0c163e7f276c0511a72a Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Wed, 13 Mar 2024 10:08:19 -0400 Subject: [PATCH 3/7] Low: xml: Update crm_mon schema to add verification status to cluster summary --- xml/api/crm_mon-2.36.rng | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/xml/api/crm_mon-2.36.rng b/xml/api/crm_mon-2.36.rng index 9cc554cf75f..02fcb4929d2 100644 --- a/xml/api/crm_mon-2.36.rng +++ b/xml/api/crm_mon-2.36.rng @@ -97,6 +97,11 @@ + + + + + From 1e3e264f3e93a34e5bdf895d136270802766604d Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Wed, 13 Mar 2024 09:58:58 -0400 Subject: [PATCH 4/7] regression tests --- cts/cli/regression.crm_mon.exp | 169 ++++-- cts/cli/regression.feature_set.exp | 10 +- cts/cli/regression.tools.exp | 843 ++++++++++++++--------------- cts/cli/regression.validity.exp | 188 ++++++- 4 files changed, 715 insertions(+), 495 deletions(-) diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index c823c3674d3..42c19d2df8d 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -9,7 +9,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -37,6 +37,7 @@ Active Resources: + @@ -46,7 +47,7 @@ Active Resources: - + @@ -242,6 +243,7 @@ Active Resources: + @@ -282,6 +284,7 @@ Active Resources: + @@ -480,15 +483,18 @@ Active Resources: + =#=#=#= End test: XML output without the node section - OK (0) =#=#=#= * Passed: crm_mon - XML output without the node section =#=#=#= Begin test: Text output with only the node section =#=#=#= +Cluster Summary: + Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#= * Passed: crm_mon - Text output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= @@ -497,12 +503,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -591,6 +598,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= @@ -599,6 +607,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -607,7 +616,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: ping-clone [ping]: @@ -715,6 +724,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) + * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= @@ -723,12 +733,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster02 @@ -817,6 +828,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= @@ -825,6 +837,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -853,7 +866,7 @@ Node List: * GuestNode httpd-bundle-1: online: * Resources: * httpd (ocf:heartbeat:apache): Started - * GuestNode httpd-bundle-2: OFFLINE: + * GuestNode httpd-bundle-2: online: * Resources: Node Attributes: @@ -925,6 +938,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= @@ -933,6 +947,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1033,6 +1048,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: XML output grouped by node =#=#=#= @@ -1042,6 +1058,7 @@ Negative Location Constraints: + @@ -1109,7 +1126,7 @@ Negative Location Constraints: - + @@ -1283,6 +1300,7 @@ Negative Location Constraints: + @@ -1294,6 +1312,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1345,6 +1364,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by node =#=#=#= Begin test: XML output filtered by node =#=#=#= @@ -1354,6 +1374,7 @@ Negative Location Constraints: + @@ -1468,6 +1489,7 @@ Negative Location Constraints: + @@ -1479,6 +1501,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1536,6 +1559,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by tag =#=#=#= Begin test: XML output filtered by tag =#=#=#= @@ -1545,6 +1569,7 @@ Negative Location Constraints: + @@ -1671,6 +1696,7 @@ Negative Location Constraints: + @@ -1682,12 +1708,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 @@ -1713,6 +1740,7 @@ Operations: + @@ -1722,7 +1750,7 @@ Operations: - + @@ -1770,6 +1798,7 @@ Active Resources: + @@ -1787,6 +1816,7 @@ Active Resources: + @@ -1803,7 +1833,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Clone Set: ping-clone [ping]: @@ -1868,12 +1898,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 @@ -1899,6 +1930,7 @@ Operations: + @@ -1908,7 +1940,7 @@ Operations: - + @@ -1942,12 +1974,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Resource Group: exim-group: @@ -1976,6 +2009,7 @@ Operations: + @@ -1985,7 +2019,7 @@ Operations: - + @@ -2026,12 +2060,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Resource Group: exim-group: @@ -2057,6 +2092,7 @@ Operations: + @@ -2066,7 +2102,7 @@ Operations: - + @@ -2101,12 +2137,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -2137,6 +2174,7 @@ Operations: + @@ -2146,7 +2184,7 @@ Operations: - + @@ -2191,12 +2229,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -2227,6 +2266,7 @@ Operations: + @@ -2236,7 +2276,7 @@ Operations: - + @@ -2281,6 +2321,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2289,7 +2330,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: ping-clone [ping]: @@ -2320,6 +2361,7 @@ Operations: + @@ -2329,7 +2371,7 @@ Operations: - + @@ -2376,7 +2418,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * No active resources @@ -2389,6 +2431,7 @@ Active Resources: + @@ -2398,7 +2441,7 @@ Active Resources: - + @@ -2426,7 +2469,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): @@ -2447,7 +2490,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2463,6 +2506,7 @@ Full List of Resources: + @@ -2472,7 +2516,7 @@ Full List of Resources: - + @@ -2576,7 +2620,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2591,6 +2635,7 @@ Full List of Resources: + @@ -2600,7 +2645,7 @@ Full List of Resources: - + @@ -2675,7 +2720,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2690,6 +2735,7 @@ Full List of Resources: + @@ -2699,7 +2745,7 @@ Full List of Resources: - + @@ -2772,7 +2818,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2787,6 +2833,7 @@ Full List of Resources: + @@ -2796,7 +2843,7 @@ Full List of Resources: - + @@ -2871,7 +2918,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2890,6 +2937,7 @@ Full List of Resources: + @@ -2899,7 +2947,7 @@ Full List of Resources: - + @@ -2977,6 +3025,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2985,7 +3034,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3019,6 +3068,7 @@ Operations: + @@ -3028,7 +3078,7 @@ Operations: - + @@ -3086,6 +3136,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3094,7 +3145,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3128,6 +3179,7 @@ Operations: + @@ -3137,7 +3189,7 @@ Operations: - + @@ -3195,6 +3247,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3203,7 +3256,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3235,6 +3288,7 @@ Operations: + @@ -3244,7 +3298,7 @@ Operations: - + @@ -3288,6 +3342,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3296,7 +3351,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3330,6 +3385,7 @@ Operations: + @@ -3339,7 +3395,7 @@ Operations: - + @@ -3397,6 +3453,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3405,7 +3462,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3437,6 +3494,7 @@ Operations: + @@ -3446,7 +3504,7 @@ Operations: - + @@ -3534,6 +3592,7 @@ unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bun + @@ -3726,6 +3785,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3906,6 +3966,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB is valid * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4024,6 +4085,7 @@ unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bun + @@ -4121,6 +4183,7 @@ Active Resources: + @@ -4252,6 +4315,7 @@ Full List of Resources: + @@ -4261,7 +4325,7 @@ Full List of Resources: - + @@ -4457,6 +4521,7 @@ Full List of Resources: + @@ -4475,7 +4540,7 @@ Node List: * Node cluster02: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 ] - * GuestOnline: [ httpd-bundle-0 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-2 ] Full List of Resources: * Clone Set: ping-clone [ping]: @@ -4511,6 +4576,7 @@ Full List of Resources: + @@ -4520,7 +4586,7 @@ Full List of Resources: - + @@ -4717,6 +4783,7 @@ Full List of Resources: + @@ -4771,6 +4838,7 @@ Full List of Resources: + @@ -4780,7 +4848,7 @@ Full List of Resources: - + @@ -4976,6 +5044,7 @@ Full List of Resources: + diff --git a/cts/cli/regression.feature_set.exp b/cts/cli/regression.feature_set.exp index 4f2e39908a1..0b600ee5024 100644 --- a/cts/cli/regression.feature_set.exp +++ b/cts/cli/regression.feature_set.exp @@ -60,7 +60,7 @@ Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.1 * Node cluster03 (3): OFFLINE - * GuestNode guest01-0@: OFFLINE + * GuestNode guest01-0@: online * RemoteNode remote01 (4): OFFLINE Active Resources: @@ -74,6 +74,7 @@ Active Resources: + @@ -82,7 +83,7 @@ Active Resources: - + @@ -159,7 +160,7 @@ Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.0 * Node cluster03 (3): OFFLINE - * GuestNode guest01-0@: OFFLINE + * GuestNode guest01-0@: online * RemoteNode remote01 (4): OFFLINE Active Resources: @@ -173,6 +174,7 @@ Active Resources: + @@ -181,7 +183,7 @@ Active Resources: - + diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index 6eef1786811..ef9a9b58a89 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -1448,13 +1448,18 @@ Deleted crm_config option: id=(null) name=cluster-delay unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +pcmk__verify error: CIB did not pass schema validation Current cluster status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: * No resources @@ -1466,6 +1471,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 ] @@ -1716,7 +1724,11 @@ scope=status name=fail-count-foo value=3 unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +pcmk__verify error: CIB did not pass schema validation Current cluster status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 ] @@ -3891,6 +3903,8 @@ Error performing operation: No such object * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -3903,20 +3917,18 @@ Transition Summary: * Start Fence ( node1 ) Executing Cluster Transition: - * Resource action: dummy monitor on node1 - * Resource action: Fence monitor on node1 - * Resource action: dummy start on node1 - * Resource action: Fence start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node1 - * Fence (stonith:fence_true): Started node1 + * dummy (ocf:pacemaker:Dummy): Stopped + * Fence (stonith:fence_true): Stopped =#=#=#= Current cib after: Bring resources online =#=#=#= - + @@ -3952,25 +3964,14 @@ Revised Cluster Status: - - - - - - - - - - =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= -crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= - + @@ -3999,37 +4000,28 @@ crm_resource: Error performing operation: Requested item already exists - + + + - - - - - - - - - - -=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= -* Passed: crm_resource - Try to move a resource to its existing location +=#=#=#= End test: Try to move a resource to its existing location - OK (0) =#=#=#= +* Failed (rc=000): crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= -WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. - This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. - This will be the case even if node1 is the last node in the cluster +crm_resource: Resource 'dummy' not moved: active in 0 locations. +To prevent 'dummy' from running on a specific location, specify a node. =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= @@ -4061,7 +4053,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - + @@ -4069,23 +4061,13 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - - - - - - - - - - -=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= -* Passed: crm_resource - Move a resource from its existing location +=#=#=#= End test: Move a resource from its existing location - Incorrect usage (64) =#=#=#= +* Failed (rc=064): crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= -Removing constraint: cli-ban-dummy-on-node1 +Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= @@ -4123,16 +4105,6 @@ Removing constraint: cli-ban-dummy-on-node1 - - - - - - - - - - @@ -4177,16 +4149,6 @@ false - - - - - - - - - - @@ -4230,16 +4192,6 @@ false - - - - - - - - - - @@ -4315,16 +4267,6 @@ false - - - - - - - - - - @@ -4382,16 +4324,6 @@ false - - - - - - - - - - @@ -4438,16 +4370,6 @@ false - - - - - - - - - - @@ -4495,16 +4417,6 @@ true - - - - - - - - - - @@ -4551,16 +4463,6 @@ true - - - - - - - - - - @@ -4805,16 +4707,6 @@ ticketB revoked - - - - - - - - - - @@ -4999,16 +4891,6 @@ Error performing operation: No such object - - - - - - - - - - @@ -5019,29 +4901,36 @@ Error performing operation: No such object * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node1 - * Fence (stonith:fence_true): Started node1 + * dummy (ocf:pacemaker:Dummy): Stopped + * Fence (stonith:fence_true): Stopped Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: - * Move Fence ( node1 -> node2 ) + * Start dummy ( node1 ) + * Start Fence ( node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 - * Resource action: Fence stop on node1 + * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 + * Resource action: Fence monitor on node1 + * Resource action: dummy start on node1 * Resource action: Fence start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -5049,7 +4938,7 @@ Revised Cluster Status: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= - + @@ -5093,7 +4982,7 @@ Revised Cluster Status: - + @@ -5180,7 +5069,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - + @@ -5277,7 +5166,7 @@ Locations: - + @@ -5315,6 +5204,8 @@ Locations: * Passed: crm_resource - Ban dummy from node2 =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -5326,18 +5217,18 @@ Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: - * Resource action: dummy stop on node1 - * Resource action: dummy start on node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node3 + * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= - + @@ -5381,10 +5272,10 @@ Revised Cluster Status: - + - + @@ -5408,7 +5299,7 @@ Revised Cluster Status: - + @@ -5422,10 +5313,14 @@ Revised Cluster Status: * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 =#=#=#= - + + + crm_resource: Error performing operation: Requested item already exists + + =#=#=#= Current cib after: Move dummy to node1 =#=#=#= - + @@ -5457,8 +5352,8 @@ Revised Cluster Status: + - @@ -5469,10 +5364,10 @@ Revised Cluster Status: - + - + @@ -5496,7 +5391,7 @@ Revised Cluster Status: - + @@ -5506,12 +5401,12 @@ Revised Cluster Status: -=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#= -* Passed: crm_resource - Move dummy to node1 +=#=#=#= End test: Move dummy to node1 - Requested item already exists (108) =#=#=#= +* Failed (rc=108): crm_resource - Move dummy to node1 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= - + @@ -5543,7 +5438,7 @@ Removing constraint: cli-ban-dummy-on-node2 - + @@ -5554,10 +5449,10 @@ Removing constraint: cli-ban-dummy-on-node2 - + - + @@ -5581,7 +5476,7 @@ Removing constraint: cli-ban-dummy-on-node2 - + @@ -5603,7 +5498,7 @@ Removing constraint: cli-ban-dummy-on-node2 Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= - + @@ -5641,7 +5536,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone - + @@ -5651,7 +5546,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= - + @@ -5693,7 +5588,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te - + @@ -5708,7 +5603,7 @@ Multiple attributes match name=is-managed A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= - + @@ -5750,7 +5645,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5760,7 +5655,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= - + @@ -5802,7 +5697,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-manage - + @@ -5816,7 +5711,7 @@ Multiple attributes match name=is-managed Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= - + @@ -5858,7 +5753,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5873,7 +5768,7 @@ Multiple attributes match name=is-managed A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= - + @@ -5913,7 +5808,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -5924,7 +5819,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= - + @@ -5962,7 +5857,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -5972,7 +5867,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= - + @@ -6012,7 +5907,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te - + @@ -6023,7 +5918,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= - + @@ -6063,7 +5958,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -6073,7 +5968,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= - + @@ -6115,7 +6010,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone - + @@ -6128,7 +6023,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= - + @@ -6168,7 +6063,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6177,7 +6072,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Restore duplicates =#=#=#= =#=#=#= Current cib after: Restore duplicates =#=#=#= - + @@ -6219,7 +6114,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6233,7 +6128,7 @@ Multiple attributes match name=is-managed Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= - + @@ -6273,7 +6168,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -6282,7 +6177,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#= - + @@ -6326,7 +6221,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -6336,7 +6231,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= - + @@ -6384,7 +6279,7 @@ Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attrib - + @@ -6395,7 +6290,7 @@ Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attrib Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= - + @@ -6446,7 +6341,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr - + @@ -6455,7 +6350,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#= - + @@ -6495,7 +6390,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr - + @@ -6505,7 +6400,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= - + @@ -6545,6 +6440,7 @@ Migration will take effect until: + @@ -6559,7 +6455,7 @@ Migration will take effect until: * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= - + @@ -6612,7 +6508,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= - + @@ -6668,7 +6564,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= - + @@ -6718,7 +6614,7 @@ Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= - + @@ -6765,7 +6661,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= - + @@ -6813,7 +6709,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= - + @@ -6874,7 +6770,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_resource - Show why a resource is not running on an unhealthy node =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= - + @@ -7730,12 +7626,207 @@ export overcloud-rabbit-2=overcloud-rabbit-2 =#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -7837,200 +7928,64 @@ export overcloud-rabbit-2=overcloud-rabbit-2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + @@ -8038,30 +7993,15 @@ export overcloud-rabbit-2=overcloud-rabbit-2 -=#=#=#= End test: Show allocation scores with crm_simulate - OK (0) =#=#=#= -* Passed: crm_simulate - Show allocation scores with crm_simulate +/tmp/cts-cli.ta_outfile.H4yI62EbXj:1: element pacemaker-result: Relax-NG validity error : Expecting element status, got cluster_status +/tmp/cts-cli.ta_outfile.H4yI62EbXj:1: element pacemaker-result: Relax-NG validity error : Element pacemaker-result failed to validate content +/tmp/cts-cli.ta_outfile.H4yI62EbXj fails to validate +=#=#=#= End test: Show allocation scores with crm_simulate - Failed to validate (3) =#=#=#= +* Failed (rc=003): crm_simulate - Show allocation scores with crm_simulate =#=#=#= Begin test: Show utilization with crm_simulate =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure -[ cluster01 cluster02 ] -[ httpd-bundle-0 httpd-bundle-1 ] - -Started: [ cluster01 cluster02 ] -Fencing (stonith:fence_xvm): Started cluster01 -dummy (ocf:pacemaker:Dummy): Started cluster02 -Stopped (disabled): [ cluster01 cluster02 ] -inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) -inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) -httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 -httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 -httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped -Public-IP (ocf:heartbeat:IPaddr): Started cluster02 -Email (lsb:exim): Started cluster02 -Started: [ cluster01 cluster02 ] -Promoted: [ cluster02 ] -Unpromoted: [ cluster01 ] - -Only 'private' parameters to 1m-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 +Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 Original: cluster01 capacity: Original: cluster02 capacity: Original: httpd-bundle-0 capacity: @@ -8093,6 +8033,36 @@ Remaining: httpd-bundle-0 capacity: Remaining: httpd-bundle-1 capacity: Remaining: httpd-bundle-2 capacity: +[ cluster01 cluster02 ] +[ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + +Started: [ cluster01 cluster02 ] +Fencing (stonith:fence_xvm): Started cluster01 +dummy (ocf:pacemaker:Dummy): Started cluster02 +Stopped (disabled): [ cluster01 cluster02 ] +inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) +inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) +httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 +httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 +httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped +Public-IP (ocf:heartbeat:IPaddr): Started cluster02 +Email (lsb:exim): Started cluster02 +Started: [ cluster01 cluster02 ] +Promoted: [ cluster02 ] +Unpromoted: [ cluster01 ] + +Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 +Original: cluster01 capacity: +Original: cluster02 capacity: +Original: httpd-bundle-0 capacity: +Original: httpd-bundle-1 capacity: +Original: httpd-bundle-2 capacity: +Remaining: cluster01 capacity: +Remaining: cluster02 capacity: +Remaining: httpd-bundle-0 capacity: +Remaining: httpd-bundle-1 capacity: +Remaining: httpd-bundle-2 capacity: + Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) =#=#=#= End test: Show utilization with crm_simulate - OK (0) =#=#=#= @@ -8101,9 +8071,12 @@ Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: +Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 + * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8153,9 +8126,11 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8185,9 +8160,12 @@ Revised Cluster Status: 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: +Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 + * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8238,10 +8216,12 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster02 ] * OFFLINE: [ cluster01 ] - * GuestOnline: [ httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8273,9 +8253,12 @@ Revised Cluster Status: 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: +Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 + * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8359,10 +8342,12 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] - * GuestOnline: [ httpd-bundle-0 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8591,7 +8576,7 @@ Cluster Summary: Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp index f020b206354..d943f4e06b4 100644 --- a/cts/cli/regression.validity.exp +++ b/cts/cli/regression.validity.exp @@ -218,22 +218,166 @@ pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.1 unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) +update_validation debug: pacemaker-1.2-style configuration is also valid for pacemaker-1.3 +update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) +update_validation debug: Configuration valid for schema: pacemaker-1.3 +update_validation debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0 +update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.0 +update_validation debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1 +update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.1 +update_validation debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2 +update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.2 +update_validation debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3 +update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.3 +update_validation debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4 +update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.4 +update_validation debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5 +update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.5 +update_validation debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6 +update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.6 +update_validation debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7 +update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.7 +update_validation debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8 +update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.8 +update_validation debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9 +update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.9 +update_validation debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10 +update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.10 +update_validation debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0 +update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.0 +update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1 +update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.1 +update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2 +update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.2 +update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 +update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.3 +update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 +update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.4 +update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 +update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.5 +update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 +update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.6 +update_validation debug: pacemaker-3.6-style configuration is also valid for pacemaker-3.7 +update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.7 +update_validation debug: pacemaker-3.7-style configuration is also valid for pacemaker-3.8 +update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.8 +update_validation debug: pacemaker-3.8-style configuration is also valid for pacemaker-3.9 +update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.9 +update_validation trace: Stopping at pacemaker-3.9 +update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.9 +pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) +update_validation debug: pacemaker-1.2-style configuration is also valid for pacemaker-1.3 +update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) +update_validation debug: Configuration valid for schema: pacemaker-1.3 +update_validation debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0 +update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.0 +update_validation debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1 +update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.1 +update_validation debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2 +update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.2 +update_validation debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3 +update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.3 +update_validation debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4 +update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.4 +update_validation debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5 +update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.5 +update_validation debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6 +update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.6 +update_validation debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7 +update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.7 +update_validation debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8 +update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.8 +update_validation debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9 +update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.9 +update_validation debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10 +update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) +update_validation debug: Configuration valid for schema: pacemaker-2.10 +update_validation debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0 +update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.0 +update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1 +update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.1 +update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2 +update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.2 +update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 +update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.3 +update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 +update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.4 +update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 +update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.5 +update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 +update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.6 +update_validation debug: pacemaker-3.6-style configuration is also valid for pacemaker-3.7 +update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.7 +update_validation debug: pacemaker-3.7-style configuration is also valid for pacemaker-3.8 +update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.8 +update_validation debug: pacemaker-3.8-style configuration is also valid for pacemaker-3.9 +update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) +update_validation debug: Configuration valid for schema: pacemaker-3.9 +update_validation trace: Stopping at pacemaker-3.9 +update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.9 +pcmk__verify error: CIB did not pass schema validation Current cluster status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * No resources Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * No resources =#=#=#= End test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#= Begin test: Make resulting CIB valid, although without validate-with attribute =#=#=#= @@ -259,22 +403,32 @@ Schema validation of configuration is disabled (support for validate-with set to unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +pcmk__verify error: CIB did not pass schema validation Current cluster status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) =#=#=#= End test: Run crm_simulate with valid CIB, but without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#= Begin test: Make resulting CIB invalid, and without validate-with attribute =#=#=#= @@ -404,21 +558,31 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constr unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +pcmk__verify error: CIB did not pass schema validation Current cluster status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB has errors (for details, run crm_verify -LV) + * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) =#=#=#= End test: Run crm_simulate with invalid CIB, also without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB, also without validate-with attribute From d2765843dc887ef0077b92379be5d0126b1d6cb5 Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Wed, 13 Mar 2024 10:47:22 -0400 Subject: [PATCH 5/7] changes to regression test ouput --- cts/cli/regression.crm_mon.exp | 72 +-- cts/cli/regression.feature_set.exp | 2 - cts/cli/regression.tools.exp | 408 +++++++------- cts/cli/regression.validity.exp | 12 +- .../summary/1-a-then-bm-move-b.summary | 11 +- .../10-a-then-bm-b-move-a-clone.summary | 20 +- ...-a-then-bm-b-move-a-clone-starting.summary | 21 +- cts/scheduler/summary/1360.summary | 12 +- cts/scheduler/summary/1484.summary | 7 +- cts/scheduler/summary/1494.summary | 11 +- .../summary/2-am-then-b-move-a.summary | 11 +- .../summary/3-am-then-bm-both-migrate.summary | 18 +- .../4-am-then-bm-b-not-migratable.summary | 16 +- .../5-am-then-bm-a-not-migratable.summary | 14 +- cts/scheduler/summary/594.summary | 33 +- cts/scheduler/summary/6-migrate-group.summary | 29 +- cts/scheduler/summary/662.summary | 40 +- cts/scheduler/summary/696.summary | 37 +- .../7-migrate-group-one-unmigratable.summary | 25 +- cts/scheduler/summary/726.summary | 63 +-- cts/scheduler/summary/735.summary | 27 +- cts/scheduler/summary/764.summary | 27 +- cts/scheduler/summary/797.summary | 49 +- ...-am-then-bm-a-migrating-b-stopping.summary | 14 +- cts/scheduler/summary/829.summary | 36 +- ...-am-then-bm-b-migrating-a-stopping.summary | 10 +- cts/scheduler/summary/994-2.summary | 15 +- cts/scheduler/summary/994.summary | 13 +- .../summary/a-demote-then-b-migrate.summary | 41 +- .../summary/a-promote-then-b-migrate.summary | 27 +- .../summary/allow-unhealthy-nodes.summary | 9 +- .../summary/anon-instance-pending.summary | 151 +---- .../summary/anti-colocation-order.summary | 28 +- .../summary/anti-colocation-promoted.summary | 18 +- .../anti-colocation-unpromoted.summary | 18 +- cts/scheduler/summary/asymmetric.summary | 8 +- .../summary/asymmetrical-order-move.summary | 7 +- .../asymmetrical-order-restart.summary | 9 +- cts/scheduler/summary/attrs1.summary | 9 +- cts/scheduler/summary/attrs2.summary | 9 +- cts/scheduler/summary/attrs3.summary | 9 +- cts/scheduler/summary/attrs4.summary | 9 +- cts/scheduler/summary/attrs5.summary | 6 +- cts/scheduler/summary/attrs6.summary | 9 +- cts/scheduler/summary/attrs7.summary | 9 +- cts/scheduler/summary/attrs8.summary | 9 +- cts/scheduler/summary/balanced.summary | 16 +- cts/scheduler/summary/base-score.summary | 11 +- cts/scheduler/summary/bnc-515172.summary | 10 +- cts/scheduler/summary/bug-1572-1.summary | 59 +- cts/scheduler/summary/bug-1572-2.summary | 45 +- cts/scheduler/summary/bug-1573.summary | 12 +- cts/scheduler/summary/bug-1685.summary | 23 +- cts/scheduler/summary/bug-1718.summary | 17 +- cts/scheduler/summary/bug-1765.summary | 15 +- cts/scheduler/summary/bug-1820-1.summary | 28 +- cts/scheduler/summary/bug-1820.summary | 23 +- cts/scheduler/summary/bug-1822.summary | 25 +- .../summary/bug-5014-A-start-B-start.summary | 14 +- .../summary/bug-5014-A-stop-B-started.summary | 7 +- .../bug-5014-A-stopped-B-stopped.summary | 6 +- .../bug-5014-CLONE-A-start-B-start.summary | 18 +- .../bug-5014-CLONE-A-stop-B-started.summary | 9 +- .../bug-5014-CthenAthenB-C-stopped.summary | 7 +- .../bug-5014-GROUP-A-start-B-start.summary | 16 +- ...bug-5014-GROUP-A-stopped-B-started.summary | 11 +- ...bug-5014-GROUP-A-stopped-B-stopped.summary | 4 + ...5014-ordered-set-symmetrical-false.summary | 7 +- ...-5014-ordered-set-symmetrical-true.summary | 12 +- cts/scheduler/summary/bug-5025-1.summary | 7 +- cts/scheduler/summary/bug-5025-2.summary | 4 + cts/scheduler/summary/bug-5025-3.summary | 8 +- cts/scheduler/summary/bug-5025-4.summary | 10 +- cts/scheduler/summary/bug-5028-bottom.summary | 11 +- cts/scheduler/summary/bug-5028-detach.summary | 7 +- cts/scheduler/summary/bug-5028.summary | 8 +- cts/scheduler/summary/bug-5038.summary | 4 + cts/scheduler/summary/bug-5059.summary | 53 +- .../summary/bug-5069-op-disabled.summary | 6 +- .../summary/bug-5069-op-enabled.summary | 4 + .../bug-5140-require-all-false.summary | 23 +- .../summary/bug-5143-ms-shuffle.summary | 20 +- .../summary/bug-5186-partial-migrate.summary | 50 +- cts/scheduler/summary/bug-cl-5168.summary | 20 +- cts/scheduler/summary/bug-cl-5170.summary | 12 +- cts/scheduler/summary/bug-cl-5212.summary | 24 +- cts/scheduler/summary/bug-cl-5213.summary | 5 +- cts/scheduler/summary/bug-cl-5219.summary | 9 +- cts/scheduler/summary/bug-cl-5247.summary | 55 +- cts/scheduler/summary/bug-lf-1852.summary | 15 +- cts/scheduler/summary/bug-lf-1920.summary | 5 +- cts/scheduler/summary/bug-lf-2106.summary | 16 +- cts/scheduler/summary/bug-lf-2153.summary | 26 +- cts/scheduler/summary/bug-lf-2160.summary | 6 +- cts/scheduler/summary/bug-lf-2171.summary | 20 +- cts/scheduler/summary/bug-lf-2213.summary | 15 +- cts/scheduler/summary/bug-lf-2317.summary | 20 +- cts/scheduler/summary/bug-lf-2358.summary | 15 +- cts/scheduler/summary/bug-lf-2361.summary | 21 +- cts/scheduler/summary/bug-lf-2422.summary | 54 +- cts/scheduler/summary/bug-lf-2435.summary | 16 +- cts/scheduler/summary/bug-lf-2445.summary | 12 +- cts/scheduler/summary/bug-lf-2453.summary | 21 +- cts/scheduler/summary/bug-lf-2474.summary | 10 +- cts/scheduler/summary/bug-lf-2493.summary | 44 +- cts/scheduler/summary/bug-lf-2508.summary | 69 +-- cts/scheduler/summary/bug-lf-2544.summary | 10 +- cts/scheduler/summary/bug-lf-2551.summary | 248 ++++---- cts/scheduler/summary/bug-lf-2574.summary | 17 +- cts/scheduler/summary/bug-lf-2581.summary | 29 +- cts/scheduler/summary/bug-lf-2606.summary | 27 +- cts/scheduler/summary/bug-lf-2619.summary | 70 +-- cts/scheduler/summary/bug-n-385265-2.summary | 18 +- cts/scheduler/summary/bug-n-385265.summary | 11 +- cts/scheduler/summary/bug-n-387749.summary | 34 +- cts/scheduler/summary/bug-pm-11.summary | 27 +- cts/scheduler/summary/bug-pm-12.summary | 32 +- cts/scheduler/summary/bug-rh-1097457.summary | 56 +- cts/scheduler/summary/bug-rh-880249.summary | 13 +- cts/scheduler/summary/bug-suse-707150.summary | 42 +- .../bundle-connection-with-container.summary | 26 +- .../summary/bundle-interleave-promote.summary | 22 +- .../summary/bundle-interleave-start.summary | 131 +---- .../summary/bundle-nested-colocation.summary | 66 +-- .../summary/bundle-order-fencing.summary | 173 +----- .../bundle-order-partial-start-2.summary | 57 +- .../bundle-order-partial-start.summary | 56 +- .../summary/bundle-order-partial-stop.summary | 107 +--- .../bundle-order-startup-clone-2.summary | 168 +----- .../bundle-order-startup-clone.summary | 46 +- .../summary/bundle-order-startup.summary | 103 +--- .../summary/bundle-order-stop-clone.summary | 55 +- .../bundle-order-stop-on-remote.summary | 151 +---- .../summary/bundle-order-stop.summary | 107 +--- .../summary/bundle-probe-order-1.summary | 13 +- .../summary/bundle-probe-order-2.summary | 11 +- .../summary/bundle-probe-order-3.summary | 12 +- .../summary/bundle-probe-remotes.summary | 134 +---- .../bundle-promoted-anticolocation-1.summary | 9 +- .../bundle-promoted-anticolocation-2.summary | 9 +- .../bundle-promoted-anticolocation-3.summary | 22 +- .../bundle-promoted-anticolocation-4.summary | 22 +- .../bundle-promoted-anticolocation-5.summary | 24 +- .../bundle-promoted-anticolocation-6.summary | 24 +- .../bundle-promoted-colocation-1.summary | 9 +- .../bundle-promoted-colocation-2.summary | 9 +- .../bundle-promoted-colocation-3.summary | 22 +- .../bundle-promoted-colocation-4.summary | 22 +- .../bundle-promoted-colocation-5.summary | 24 +- .../bundle-promoted-colocation-6.summary | 24 +- .../bundle-promoted-location-1.summary | 4 + .../bundle-promoted-location-2.summary | 35 +- .../bundle-promoted-location-3.summary | 4 + .../bundle-promoted-location-4.summary | 4 + .../bundle-promoted-location-5.summary | 4 + .../bundle-promoted-location-6.summary | 22 +- .../summary/bundle-replicas-change.summary | 54 +- .../cancel-behind-moving-remote.summary | 115 ++-- .../clbz5007-promotable-colocation.summary | 12 +- cts/scheduler/summary/clone-anon-dup.summary | 22 +- .../summary/clone-anon-failcount.summary | 56 +- .../summary/clone-anon-probe-1.summary | 12 +- .../summary/clone-anon-probe-2.summary | 10 +- .../clone-fail-block-colocation.summary | 28 +- .../summary/clone-interleave-1.summary | 28 +- .../summary/clone-interleave-2.summary | 24 +- .../summary/clone-interleave-3.summary | 21 +- cts/scheduler/summary/clone-max-zero.summary | 24 +- .../summary/clone-no-shuffle.summary | 44 +- .../summary/clone-order-16instances.summary | 37 +- .../summary/clone-order-primitive.summary | 13 +- .../clone-recover-no-shuffle-1.summary | 11 +- .../clone-recover-no-shuffle-10.summary | 11 +- .../clone-recover-no-shuffle-11.summary | 15 +- .../clone-recover-no-shuffle-12.summary | 25 +- .../clone-recover-no-shuffle-2.summary | 15 +- .../clone-recover-no-shuffle-3.summary | 20 +- .../clone-recover-no-shuffle-4.summary | 11 +- .../clone-recover-no-shuffle-5.summary | 15 +- .../clone-recover-no-shuffle-6.summary | 20 +- .../clone-recover-no-shuffle-7.summary | 21 +- .../clone-recover-no-shuffle-8.summary | 33 +- .../clone-recover-no-shuffle-9.summary | 35 +- .../summary/clone-require-all-1.summary | 15 +- .../summary/clone-require-all-2.summary | 22 +- .../summary/clone-require-all-3.summary | 29 +- .../summary/clone-require-all-4.summary | 22 +- .../summary/clone-require-all-5.summary | 26 +- .../summary/clone-require-all-6.summary | 16 +- .../summary/clone-require-all-7.summary | 29 +- .../clone-require-all-no-interleave-1.summary | 32 +- .../clone-require-all-no-interleave-2.summary | 32 +- .../clone-require-all-no-interleave-3.summary | 42 +- .../clone-requires-quorum-recovery.summary | 20 +- .../summary/clone-requires-quorum.summary | 15 +- .../clone_min_interleave_start_one.summary | 11 +- .../clone_min_interleave_start_two.summary | 34 +- .../clone_min_interleave_stop_one.summary | 12 +- .../clone_min_interleave_stop_two.summary | 27 +- .../summary/clone_min_start_one.summary | 18 +- .../summary/clone_min_start_two.summary | 18 +- .../summary/clone_min_stop_all.summary | 26 +- .../summary/clone_min_stop_one.summary | 15 +- .../summary/clone_min_stop_two.summary | 26 +- .../summary/cloned-group-stop.summary | 52 +- cts/scheduler/summary/cloned-group.summary | 29 +- .../summary/cloned_start_one.summary | 20 +- .../summary/cloned_start_two.summary | 25 +- cts/scheduler/summary/cloned_stop_one.summary | 24 +- cts/scheduler/summary/cloned_stop_two.summary | 25 +- .../summary/cluster-specific-params.summary | 10 +- .../summary/colo_promoted_w_native.summary | 32 +- .../summary/colo_unpromoted_w_native.summary | 37 +- cts/scheduler/summary/coloc-attr.summary | 14 +- .../summary/coloc-clone-stays-active.summary | 13 +- .../coloc-dependee-should-move.summary | 28 +- .../coloc-dependee-should-stay.summary | 4 + cts/scheduler/summary/coloc-group.summary | 20 +- cts/scheduler/summary/coloc-intra-set.summary | 19 +- cts/scheduler/summary/coloc-list.summary | 24 +- cts/scheduler/summary/coloc-loop.summary | 22 +- cts/scheduler/summary/coloc-many-one.summary | 22 +- .../summary/coloc-negative-group.summary | 7 +- .../summary/coloc-unpromoted-anti.summary | 18 +- .../coloc-with-inner-group-member.summary | 21 +- cts/scheduler/summary/coloc_fp_logic.summary | 9 +- .../colocate-primitive-with-clone.summary | 24 +- .../summary/colocate-unmanaged-group.summary | 6 +- .../colocated-utilization-clone.summary | 48 +- .../colocated-utilization-group.summary | 35 +- .../colocated-utilization-primitive-1.summary | 21 +- .../colocated-utilization-primitive-2.summary | 18 +- .../summary/colocation-influence.summary | 85 +-- .../summary/colocation-priority-group.summary | 39 +- .../summary/colocation-vs-stickiness.summary | 4 + ...location_constraint_stops_promoted.summary | 24 +- ...cation_constraint_stops_unpromoted.summary | 17 +- cts/scheduler/summary/comments.summary | 14 +- .../summary/complex_enforce_colo.summary | 343 ++++-------- .../summary/concurrent-fencing.summary | 11 +- cts/scheduler/summary/container-1.summary | 18 +- cts/scheduler/summary/container-2.summary | 17 +- cts/scheduler/summary/container-3.summary | 16 +- cts/scheduler/summary/container-4.summary | 19 +- .../summary/container-group-1.summary | 20 +- .../summary/container-group-2.summary | 21 +- .../summary/container-group-3.summary | 19 +- .../summary/container-group-4.summary | 23 +- .../summary/container-is-remote-node.summary | 10 +- cts/scheduler/summary/date-1.summary | 9 +- cts/scheduler/summary/date-2.summary | 4 + cts/scheduler/summary/date-3.summary | 4 + .../summary/dc-fence-ordering.summary | 34 +- cts/scheduler/summary/enforce-colo1.summary | 17 +- .../expire-non-blocked-failure.summary | 7 +- .../expired-failed-probe-primitive.summary | 10 +- cts/scheduler/summary/expired-stop-1.summary | 8 +- cts/scheduler/summary/failcount-block.summary | 18 +- cts/scheduler/summary/failcount.summary | 8 +- .../failed-demote-recovery-promoted.summary | 38 +- .../summary/failed-demote-recovery.summary | 28 +- .../summary/failed-probe-clone.summary | 21 +- .../summary/failed-probe-primitive.summary | 10 +- .../failed-sticky-anticolocated-group.summary | 18 +- .../summary/failed-sticky-group.summary | 61 +- .../summary/force-anon-clone-max.summary | 47 +- .../summary/group-anticolocation-2.summary | 18 +- .../summary/group-anticolocation-3.summary | 11 +- .../summary/group-anticolocation-4.summary | 18 +- .../summary/group-anticolocation-5.summary | 18 +- .../summary/group-anticolocation.summary | 32 +- .../summary/group-colocation-failure.summary | 27 +- .../summary/group-dependents.summary | 155 +---- cts/scheduler/summary/group-fail.summary | 22 +- .../summary/group-stop-ordering.summary | 4 + .../summary/group-unmanaged-stopped.summary | 11 +- cts/scheduler/summary/group-unmanaged.summary | 4 + cts/scheduler/summary/group1.summary | 21 +- cts/scheduler/summary/group10.summary | 28 +- cts/scheduler/summary/group11.summary | 13 +- cts/scheduler/summary/group13.summary | 9 +- cts/scheduler/summary/group14.summary | 19 +- cts/scheduler/summary/group15.summary | 27 +- cts/scheduler/summary/group2.summary | 31 +- cts/scheduler/summary/group3.summary | 38 +- cts/scheduler/summary/group4.summary | 9 +- cts/scheduler/summary/group5.summary | 33 +- cts/scheduler/summary/group6.summary | 42 +- cts/scheduler/summary/group7.summary | 50 +- cts/scheduler/summary/group8.summary | 25 +- cts/scheduler/summary/group9.summary | 40 +- .../summary/guest-host-not-fenceable.summary | 39 +- .../summary/guest-node-cleanup.summary | 30 +- .../summary/guest-node-host-dies.summary | 60 +- cts/scheduler/summary/history-1.summary | 4 + .../summary/honor_stonith_rsc_order1.summary | 20 +- .../summary/honor_stonith_rsc_order2.summary | 28 +- .../summary/honor_stonith_rsc_order3.summary | 26 +- .../summary/honor_stonith_rsc_order4.summary | 16 +- .../summary/ignore_stonith_rsc_order1.summary | 12 +- .../summary/ignore_stonith_rsc_order2.summary | 18 +- .../summary/ignore_stonith_rsc_order3.summary | 20 +- .../summary/ignore_stonith_rsc_order4.summary | 20 +- cts/scheduler/summary/inc0.summary | 28 +- cts/scheduler/summary/inc1.summary | 38 +- cts/scheduler/summary/inc10.summary | 31 +- cts/scheduler/summary/inc11.summary | 27 +- cts/scheduler/summary/inc12.summary | 86 +-- cts/scheduler/summary/inc2.summary | 24 +- cts/scheduler/summary/inc3.summary | 41 +- cts/scheduler/summary/inc4.summary | 41 +- cts/scheduler/summary/inc5.summary | 68 +-- cts/scheduler/summary/inc6.summary | 50 +- cts/scheduler/summary/inc7.summary | 74 +-- cts/scheduler/summary/inc8.summary | 37 +- cts/scheduler/summary/inc9.summary | 16 +- cts/scheduler/summary/interleave-0.summary | 184 +----- cts/scheduler/summary/interleave-1.summary | 184 +----- cts/scheduler/summary/interleave-2.summary | 184 +----- cts/scheduler/summary/interleave-3.summary | 184 +----- .../summary/interleave-pseudo-stop.summary | 53 +- .../summary/interleave-restart.summary | 69 +-- cts/scheduler/summary/interleave-stop.summary | 53 +- cts/scheduler/summary/intervals.summary | 28 +- .../summary/leftover-pending-monitor.summary | 6 +- .../summary/load-stopped-loop-2.summary | 86 +-- .../summary/load-stopped-loop.summary | 243 ++++---- .../summary/location-date-rules-1.summary | 16 +- .../summary/location-date-rules-2.summary | 16 +- .../summary/location-sets-templates.summary | 34 +- cts/scheduler/summary/managed-0.summary | 85 +-- cts/scheduler/summary/managed-1.summary | 85 +-- cts/scheduler/summary/managed-2.summary | 119 +--- cts/scheduler/summary/migrate-1.summary | 13 +- cts/scheduler/summary/migrate-2.summary | 4 + cts/scheduler/summary/migrate-3.summary | 11 +- cts/scheduler/summary/migrate-4.summary | 8 +- cts/scheduler/summary/migrate-5.summary | 22 +- cts/scheduler/summary/migrate-begin.summary | 14 +- .../summary/migrate-both-vms.summary | 91 +-- cts/scheduler/summary/migrate-fail-2.summary | 13 +- cts/scheduler/summary/migrate-fail-3.summary | 10 +- cts/scheduler/summary/migrate-fail-4.summary | 12 +- cts/scheduler/summary/migrate-fail-5.summary | 11 +- cts/scheduler/summary/migrate-fail-6.summary | 13 +- cts/scheduler/summary/migrate-fail-7.summary | 10 +- cts/scheduler/summary/migrate-fail-8.summary | 12 +- cts/scheduler/summary/migrate-fail-9.summary | 11 +- cts/scheduler/summary/migrate-fencing.summary | 74 +-- .../summary/migrate-partial-1.summary | 7 +- .../summary/migrate-partial-2.summary | 13 +- .../summary/migrate-partial-3.summary | 13 +- .../summary/migrate-partial-4.summary | 35 +- .../summary/migrate-shutdown.summary | 58 +- .../summary/migrate-start-complex.summary | 30 +- cts/scheduler/summary/migrate-start.summary | 18 +- .../summary/migrate-stop-complex.summary | 32 +- .../migrate-stop-start-complex.summary | 32 +- cts/scheduler/summary/migrate-stop.summary | 22 +- .../summary/migrate-stop_start.summary | 26 +- cts/scheduler/summary/migrate-success.summary | 6 +- .../migration-behind-migrating-remote.summary | 18 +- .../migration-intermediary-cleaned.summary | 39 +- .../summary/migration-ping-pong.summary | 4 + cts/scheduler/summary/minimal.summary | 16 +- cts/scheduler/summary/mon-rsc-1.summary | 10 +- cts/scheduler/summary/mon-rsc-2.summary | 12 +- cts/scheduler/summary/mon-rsc-3.summary | 7 +- cts/scheduler/summary/mon-rsc-4.summary | 10 +- .../summary/monitor-onfail-restart.summary | 9 +- .../summary/monitor-onfail-stop.summary | 7 +- .../summary/monitor-recovery.summary | 10 +- cts/scheduler/summary/multi1.summary | 9 +- .../multiple-active-block-group.summary | 4 + .../multiple-monitor-one-failed.summary | 10 +- .../summary/multiply-active-stonith.summary | 13 +- .../summary/nested-remote-recovery.summary | 54 +- .../no-promote-on-unrunnable-guest.summary | 56 +- .../summary/no_quorum_demote.summary | 20 +- .../summary/node-maintenance-1.summary | 8 +- .../summary/node-maintenance-2.summary | 9 +- .../summary/node-pending-timeout.summary | 11 +- .../summary/not-installed-agent.summary | 14 +- .../summary/not-installed-tools.summary | 9 +- .../not-reschedule-unneeded-monitor.summary | 10 +- .../summary/notifs-for-unrunnable.summary | 38 +- cts/scheduler/summary/notify-0.summary | 16 +- cts/scheduler/summary/notify-1.summary | 27 +- cts/scheduler/summary/notify-2.summary | 27 +- cts/scheduler/summary/notify-3.summary | 41 +- .../notify-behind-stopping-remote.summary | 43 +- cts/scheduler/summary/novell-239079.summary | 16 +- cts/scheduler/summary/novell-239082.summary | 45 +- cts/scheduler/summary/novell-239087.summary | 4 + cts/scheduler/summary/novell-251689.summary | 15 +- cts/scheduler/summary/novell-252693-2.summary | 73 +-- cts/scheduler/summary/novell-252693-3.summary | 82 +-- cts/scheduler/summary/novell-252693.summary | 69 +-- .../summary/nvpair-date-rules-1.summary | 16 +- cts/scheduler/summary/nvpair-id-ref.summary | 16 +- .../summary/obsolete-lrm-resource.summary | 11 +- .../summary/ocf_degraded-remap-ocf_ok.summary | 4 + ...ocf_degraded_promoted-remap-ocf_ok.summary | 4 + cts/scheduler/summary/on-fail-ignore.summary | 8 + cts/scheduler/summary/on_fail_demote1.summary | 41 +- cts/scheduler/summary/on_fail_demote2.summary | 18 +- cts/scheduler/summary/on_fail_demote3.summary | 12 +- cts/scheduler/summary/on_fail_demote4.summary | 149 +---- cts/scheduler/summary/one-or-more-0.summary | 21 +- cts/scheduler/summary/one-or-more-1.summary | 8 +- cts/scheduler/summary/one-or-more-2.summary | 18 +- cts/scheduler/summary/one-or-more-3.summary | 11 +- cts/scheduler/summary/one-or-more-4.summary | 18 +- cts/scheduler/summary/one-or-more-5.summary | 24 +- cts/scheduler/summary/one-or-more-6.summary | 7 +- cts/scheduler/summary/one-or-more-7.summary | 7 +- .../one-or-more-unrunnable-instances.summary | 529 +++--------------- cts/scheduler/summary/op-defaults-2.summary | 32 +- cts/scheduler/summary/op-defaults-3.summary | 15 +- cts/scheduler/summary/op-defaults.summary | 32 +- cts/scheduler/summary/order-clone.summary | 7 +- .../summary/order-expired-failure.summary | 28 +- .../summary/order-first-probes.summary | 20 +- cts/scheduler/summary/order-mandatory.summary | 14 +- .../summary/order-optional-keyword.summary | 7 +- cts/scheduler/summary/order-optional.summary | 7 +- cts/scheduler/summary/order-required.summary | 14 +- .../summary/order-serialize-set.summary | 49 +- cts/scheduler/summary/order-serialize.summary | 49 +- cts/scheduler/summary/order-sets.summary | 26 +- .../summary/order-wrong-kind.summary | 17 +- cts/scheduler/summary/order1.summary | 19 +- cts/scheduler/summary/order2.summary | 24 +- cts/scheduler/summary/order3.summary | 24 +- cts/scheduler/summary/order4.summary | 19 +- cts/scheduler/summary/order5.summary | 28 +- cts/scheduler/summary/order6.summary | 30 +- cts/scheduler/summary/order7.summary | 17 +- .../order_constraint_stops_promoted.summary | 29 +- .../order_constraint_stops_unpromoted.summary | 19 +- .../summary/ordered-set-basic-startup.summary | 13 +- .../summary/ordered-set-natural.summary | 6 + cts/scheduler/summary/origin.summary | 5 +- cts/scheduler/summary/orphan-0.summary | 19 +- cts/scheduler/summary/orphan-1.summary | 24 +- cts/scheduler/summary/orphan-2.summary | 26 +- cts/scheduler/summary/params-0.summary | 19 +- cts/scheduler/summary/params-1.summary | 25 +- cts/scheduler/summary/params-2.summary | 21 +- cts/scheduler/summary/params-3.summary | 25 +- cts/scheduler/summary/params-4.summary | 24 +- cts/scheduler/summary/params-5.summary | 25 +- cts/scheduler/summary/params-6.summary | 155 +++-- ...ial-live-migration-multiple-active.summary | 11 +- .../summary/partial-unmanaged-group.summary | 9 +- .../summary/pending-node-no-uname.summary | 5 +- cts/scheduler/summary/per-node-attrs.summary | 10 +- .../summary/per-op-failcount.summary | 16 +- .../summary/placement-capacity.summary | 11 +- .../summary/placement-location.summary | 13 +- .../summary/placement-priority.summary | 11 +- .../summary/placement-stickiness.summary | 13 +- .../primitive-with-group-with-clone.summary | 48 +- ...primitive-with-group-with-promoted.summary | 52 +- .../primitive-with-unrunnable-group.summary | 9 +- .../summary/priority-fencing-delay.summary | 67 +-- cts/scheduler/summary/probe-0.summary | 18 +- cts/scheduler/summary/probe-1.summary | 9 +- cts/scheduler/summary/probe-2.summary | 110 +--- cts/scheduler/summary/probe-3.summary | 4 + cts/scheduler/summary/probe-4.summary | 4 + .../summary/probe-pending-node.summary | 4 + ...robe-target-of-failed-migrate_to-1.summary | 6 +- ...robe-target-of-failed-migrate_to-2.summary | 4 + cts/scheduler/summary/probe-timeout.summary | 18 +- cts/scheduler/summary/promoted-0.summary | 28 +- cts/scheduler/summary/promoted-1.summary | 31 +- cts/scheduler/summary/promoted-10.summary | 56 +- cts/scheduler/summary/promoted-11.summary | 24 +- cts/scheduler/summary/promoted-12.summary | 9 +- cts/scheduler/summary/promoted-13.summary | 43 +- cts/scheduler/summary/promoted-2.summary | 52 +- cts/scheduler/summary/promoted-3.summary | 31 +- cts/scheduler/summary/promoted-4.summary | 38 +- cts/scheduler/summary/promoted-5.summary | 31 +- cts/scheduler/summary/promoted-6.summary | 28 +- cts/scheduler/summary/promoted-7.summary | 72 +-- cts/scheduler/summary/promoted-8.summary | 74 +-- cts/scheduler/summary/promoted-9.summary | 31 +- .../summary/promoted-allow-start.summary | 4 + .../promoted-asymmetrical-order.summary | 16 +- .../summary/promoted-colocation.summary | 12 +- .../summary/promoted-demote-2.summary | 38 +- .../summary/promoted-demote-block.summary | 7 +- cts/scheduler/summary/promoted-demote.summary | 23 +- cts/scheduler/summary/promoted-depend.summary | 29 +- .../summary/promoted-dependent-ban.summary | 23 +- .../summary/promoted-failed-demote-2.summary | 29 +- .../summary/promoted-failed-demote.summary | 46 +- cts/scheduler/summary/promoted-group.summary | 15 +- cts/scheduler/summary/promoted-move.summary | 52 +- cts/scheduler/summary/promoted-notify.summary | 22 +- .../summary/promoted-ordering.summary | 57 +- .../promoted-partially-demoted-group.summary | 108 +--- .../summary/promoted-probed-score.summary | 234 +------- .../promoted-promotion-constraint.summary | 12 +- cts/scheduler/summary/promoted-pseudo.summary | 38 +- .../summary/promoted-reattach.summary | 9 +- cts/scheduler/summary/promoted-role.summary | 10 +- .../summary/promoted-score-startup.summary | 38 +- cts/scheduler/summary/promoted-stop.summary | 12 +- .../promoted-unmanaged-monitor.summary | 14 +- .../summary/promoted-with-blocked.summary | 32 +- .../summary/promoted_monitor_restart.summary | 5 +- cts/scheduler/summary/quorum-1.summary | 15 +- cts/scheduler/summary/quorum-2.summary | 12 +- cts/scheduler/summary/quorum-3.summary | 14 +- cts/scheduler/summary/quorum-4.summary | 11 +- cts/scheduler/summary/quorum-5.summary | 18 +- cts/scheduler/summary/quorum-6.summary | 20 +- .../summary/rebalance-unique-clones.summary | 13 +- cts/scheduler/summary/rec-node-1.summary | 12 +- cts/scheduler/summary/rec-node-10.summary | 9 +- cts/scheduler/summary/rec-node-11.summary | 29 +- cts/scheduler/summary/rec-node-12.summary | 68 +-- cts/scheduler/summary/rec-node-13.summary | 15 +- cts/scheduler/summary/rec-node-14.summary | 11 +- cts/scheduler/summary/rec-node-15.summary | 55 +- cts/scheduler/summary/rec-node-2.summary | 39 +- cts/scheduler/summary/rec-node-3.summary | 12 +- cts/scheduler/summary/rec-node-4.summary | 21 +- cts/scheduler/summary/rec-node-5.summary | 14 +- cts/scheduler/summary/rec-node-6.summary | 21 +- cts/scheduler/summary/rec-node-7.summary | 21 +- cts/scheduler/summary/rec-node-8.summary | 10 +- cts/scheduler/summary/rec-node-9.summary | 6 +- cts/scheduler/summary/rec-rsc-0.summary | 8 +- cts/scheduler/summary/rec-rsc-1.summary | 9 +- cts/scheduler/summary/rec-rsc-2.summary | 10 +- cts/scheduler/summary/rec-rsc-3.summary | 8 +- cts/scheduler/summary/rec-rsc-4.summary | 5 +- cts/scheduler/summary/rec-rsc-5.summary | 21 +- cts/scheduler/summary/rec-rsc-6.summary | 9 +- cts/scheduler/summary/rec-rsc-7.summary | 8 +- cts/scheduler/summary/rec-rsc-8.summary | 4 + cts/scheduler/summary/rec-rsc-9.summary | 26 +- .../summary/reload-becomes-restart.summary | 33 +- .../remote-connection-shutdown.summary | 97 ++-- .../remote-connection-unrecoverable.summary | 35 +- cts/scheduler/summary/remote-disable.summary | 12 +- .../remote-fence-before-reconnect.summary | 13 +- .../summary/remote-fence-unclean-3.summary | 37 +- .../summary/remote-fence-unclean.summary | 31 +- .../summary/remote-fence-unclean2.summary | 10 +- cts/scheduler/summary/remote-move.summary | 16 +- cts/scheduler/summary/remote-orphaned.summary | 29 +- .../summary/remote-orphaned2.summary | 22 +- .../summary/remote-partial-migrate.summary | 80 +-- .../summary/remote-partial-migrate2.summary | 102 +--- .../summary/remote-probe-disable.summary | 13 +- .../summary/remote-reconnect-delay.summary | 9 +- .../summary/remote-recover-all.summary | 107 +--- .../summary/remote-recover-connection.summary | 81 +-- .../summary/remote-recover-fail.summary | 34 +- .../remote-recover-no-resources.summary | 98 +--- .../summary/remote-recover-unknown.summary | 99 +--- cts/scheduler/summary/remote-recover.summary | 17 +- cts/scheduler/summary/remote-recovery.summary | 81 +-- .../summary/remote-stale-node-entry.summary | 74 +-- .../summary/remote-start-fail.summary | 11 +- .../summary/remote-startup-probes.summary | 24 +- cts/scheduler/summary/remote-startup.summary | 25 +- cts/scheduler/summary/remote-unclean2.summary | 14 +- .../summary/reprobe-target_rc.summary | 4 + .../summary/resource-discovery.summary | 97 +--- .../restart-with-extra-op-params.summary | 7 +- .../summary/route-remote-notify.summary | 77 +-- cts/scheduler/summary/rsc-defaults-2.summary | 13 +- cts/scheduler/summary/rsc-defaults.summary | 20 +- .../summary/rsc-discovery-per-node.summary | 97 +--- cts/scheduler/summary/rsc-maintenance.summary | 6 +- .../summary/rsc-sets-clone-1.summary | 72 +-- cts/scheduler/summary/rsc-sets-clone.summary | 22 +- .../summary/rsc-sets-promoted.summary | 33 +- .../summary/rsc-sets-seq-false.summary | 30 +- .../summary/rsc-sets-seq-true.summary | 30 +- cts/scheduler/summary/rsc_dep1.summary | 14 +- cts/scheduler/summary/rsc_dep10.summary | 11 +- cts/scheduler/summary/rsc_dep2.summary | 16 +- cts/scheduler/summary/rsc_dep3.summary | 14 +- cts/scheduler/summary/rsc_dep4.summary | 20 +- cts/scheduler/summary/rsc_dep5.summary | 16 +- cts/scheduler/summary/rsc_dep7.summary | 19 +- cts/scheduler/summary/rsc_dep8.summary | 16 +- .../rule-dbl-as-auto-number-match.summary | 7 +- .../rule-dbl-as-auto-number-no-match.summary | 4 + .../summary/rule-dbl-as-integer-match.summary | 7 +- .../rule-dbl-as-integer-no-match.summary | 4 + .../summary/rule-dbl-as-number-match.summary | 7 +- .../rule-dbl-as-number-no-match.summary | 4 + ...e-dbl-parse-fail-default-str-match.summary | 7 +- ...bl-parse-fail-default-str-no-match.summary | 4 + .../rule-int-as-auto-integer-match.summary | 7 +- .../rule-int-as-auto-integer-no-match.summary | 4 + .../summary/rule-int-as-integer-match.summary | 7 +- .../rule-int-as-integer-no-match.summary | 4 + .../summary/rule-int-as-number-match.summary | 7 +- .../rule-int-as-number-no-match.summary | 4 + ...e-int-parse-fail-default-str-match.summary | 7 +- ...nt-parse-fail-default-str-no-match.summary | 4 + .../summary/shutdown-lock-expiration.summary | 12 +- cts/scheduler/summary/shutdown-lock.summary | 16 +- .../summary/shutdown-maintenance-node.summary | 4 + cts/scheduler/summary/simple1.summary | 4 + cts/scheduler/summary/simple11.summary | 14 +- cts/scheduler/summary/simple12.summary | 14 +- cts/scheduler/summary/simple2.summary | 9 +- cts/scheduler/summary/simple3.summary | 7 +- cts/scheduler/summary/simple4.summary | 7 +- cts/scheduler/summary/simple6.summary | 11 +- cts/scheduler/summary/simple7.summary | 8 +- cts/scheduler/summary/simple8.summary | 8 +- .../summary/site-specific-params.summary | 11 +- cts/scheduler/summary/standby.summary | 56 +- .../start-then-stop-with-unfence.summary | 23 +- cts/scheduler/summary/stonith-0.summary | 41 +- cts/scheduler/summary/stonith-1.summary | 69 +-- cts/scheduler/summary/stonith-2.summary | 11 +- cts/scheduler/summary/stonith-3.summary | 21 +- cts/scheduler/summary/stonith-4.summary | 14 +- .../summary/stop-all-resources.summary | 34 +- .../summary/stop-failure-no-fencing.summary | 10 +- .../summary/stop-failure-no-quorum.summary | 16 +- .../summary/stop-failure-with-fencing.summary | 16 +- .../summary/stop-unexpected-2.summary | 9 +- cts/scheduler/summary/stop-unexpected.summary | 23 +- .../summary/stopped-monitor-00.summary | 11 +- .../summary/stopped-monitor-01.summary | 9 +- .../summary/stopped-monitor-02.summary | 11 +- .../summary/stopped-monitor-03.summary | 8 +- .../summary/stopped-monitor-04.summary | 4 + .../summary/stopped-monitor-05.summary | 4 + .../summary/stopped-monitor-06.summary | 4 + .../summary/stopped-monitor-07.summary | 4 + .../summary/stopped-monitor-08.summary | 13 +- .../summary/stopped-monitor-09.summary | 4 + .../summary/stopped-monitor-10.summary | 4 + .../summary/stopped-monitor-11.summary | 4 + .../summary/stopped-monitor-12.summary | 4 + .../summary/stopped-monitor-20.summary | 8 +- .../summary/stopped-monitor-21.summary | 8 +- .../summary/stopped-monitor-22.summary | 10 +- .../summary/stopped-monitor-23.summary | 9 +- .../summary/stopped-monitor-24.summary | 4 + .../summary/stopped-monitor-25.summary | 6 +- .../summary/stopped-monitor-26.summary | 4 + .../summary/stopped-monitor-27.summary | 6 +- .../summary/stopped-monitor-30.summary | 6 +- .../summary/stopped-monitor-31.summary | 6 +- .../summary/suicide-needed-inquorate.summary | 11 +- .../suicide-not-needed-initial-quorum.summary | 11 +- .../suicide-not-needed-never-quorate.summary | 7 +- .../suicide-not-needed-quorate.summary | 11 +- cts/scheduler/summary/systemhealth1.summary | 9 +- cts/scheduler/summary/systemhealth2.summary | 21 +- cts/scheduler/summary/systemhealth3.summary | 21 +- cts/scheduler/summary/systemhealthm1.summary | 9 +- cts/scheduler/summary/systemhealthm2.summary | 21 +- cts/scheduler/summary/systemhealthm3.summary | 10 +- cts/scheduler/summary/systemhealthn1.summary | 9 +- cts/scheduler/summary/systemhealthn2.summary | 21 +- cts/scheduler/summary/systemhealthn3.summary | 21 +- cts/scheduler/summary/systemhealtho1.summary | 9 +- cts/scheduler/summary/systemhealtho2.summary | 10 +- cts/scheduler/summary/systemhealtho3.summary | 10 +- cts/scheduler/summary/systemhealthp1.summary | 9 +- cts/scheduler/summary/systemhealthp2.summary | 18 +- cts/scheduler/summary/systemhealthp3.summary | 10 +- .../summary/tags-coloc-order-1.summary | 24 +- .../summary/tags-coloc-order-2.summary | 64 +-- cts/scheduler/summary/tags-location.summary | 34 +- cts/scheduler/summary/tags-ticket.summary | 16 +- cts/scheduler/summary/target-0.summary | 19 +- cts/scheduler/summary/target-1.summary | 21 +- cts/scheduler/summary/target-2.summary | 24 +- cts/scheduler/summary/template-1.summary | 14 +- cts/scheduler/summary/template-2.summary | 14 +- cts/scheduler/summary/template-3.summary | 20 +- .../summary/template-clone-group.summary | 20 +- .../summary/template-clone-primitive.summary | 12 +- .../summary/template-coloc-1.summary | 24 +- .../summary/template-coloc-2.summary | 24 +- .../summary/template-coloc-3.summary | 36 +- .../summary/template-order-1.summary | 24 +- .../summary/template-order-2.summary | 24 +- .../summary/template-order-3.summary | 34 +- .../summary/template-rsc-sets-1.summary | 29 +- .../summary/template-rsc-sets-2.summary | 29 +- .../summary/template-rsc-sets-3.summary | 29 +- .../summary/template-rsc-sets-4.summary | 10 +- cts/scheduler/summary/template-ticket.summary | 10 +- cts/scheduler/summary/ticket-clone-1.summary | 6 +- cts/scheduler/summary/ticket-clone-10.summary | 6 +- cts/scheduler/summary/ticket-clone-11.summary | 12 +- cts/scheduler/summary/ticket-clone-12.summary | 10 +- cts/scheduler/summary/ticket-clone-13.summary | 4 + cts/scheduler/summary/ticket-clone-14.summary | 10 +- cts/scheduler/summary/ticket-clone-15.summary | 10 +- cts/scheduler/summary/ticket-clone-16.summary | 4 + cts/scheduler/summary/ticket-clone-17.summary | 10 +- cts/scheduler/summary/ticket-clone-18.summary | 10 +- cts/scheduler/summary/ticket-clone-19.summary | 4 + cts/scheduler/summary/ticket-clone-2.summary | 12 +- cts/scheduler/summary/ticket-clone-20.summary | 10 +- cts/scheduler/summary/ticket-clone-21.summary | 21 +- cts/scheduler/summary/ticket-clone-22.summary | 4 + cts/scheduler/summary/ticket-clone-23.summary | 10 +- cts/scheduler/summary/ticket-clone-24.summary | 10 +- cts/scheduler/summary/ticket-clone-3.summary | 10 +- cts/scheduler/summary/ticket-clone-4.summary | 6 +- cts/scheduler/summary/ticket-clone-5.summary | 12 +- cts/scheduler/summary/ticket-clone-6.summary | 10 +- cts/scheduler/summary/ticket-clone-7.summary | 6 +- cts/scheduler/summary/ticket-clone-8.summary | 12 +- cts/scheduler/summary/ticket-clone-9.summary | 21 +- cts/scheduler/summary/ticket-group-1.summary | 8 +- cts/scheduler/summary/ticket-group-10.summary | 8 +- cts/scheduler/summary/ticket-group-11.summary | 14 +- cts/scheduler/summary/ticket-group-12.summary | 12 +- cts/scheduler/summary/ticket-group-13.summary | 4 + cts/scheduler/summary/ticket-group-14.summary | 14 +- cts/scheduler/summary/ticket-group-15.summary | 14 +- cts/scheduler/summary/ticket-group-16.summary | 4 + cts/scheduler/summary/ticket-group-17.summary | 14 +- cts/scheduler/summary/ticket-group-18.summary | 14 +- cts/scheduler/summary/ticket-group-19.summary | 4 + cts/scheduler/summary/ticket-group-2.summary | 14 +- cts/scheduler/summary/ticket-group-20.summary | 14 +- cts/scheduler/summary/ticket-group-21.summary | 20 +- cts/scheduler/summary/ticket-group-22.summary | 4 + cts/scheduler/summary/ticket-group-23.summary | 14 +- cts/scheduler/summary/ticket-group-24.summary | 12 +- cts/scheduler/summary/ticket-group-3.summary | 14 +- cts/scheduler/summary/ticket-group-4.summary | 8 +- cts/scheduler/summary/ticket-group-5.summary | 14 +- cts/scheduler/summary/ticket-group-6.summary | 14 +- cts/scheduler/summary/ticket-group-7.summary | 8 +- cts/scheduler/summary/ticket-group-8.summary | 14 +- cts/scheduler/summary/ticket-group-9.summary | 20 +- .../summary/ticket-primitive-1.summary | 6 +- .../summary/ticket-primitive-10.summary | 6 +- .../summary/ticket-primitive-11.summary | 8 +- .../summary/ticket-primitive-12.summary | 8 +- .../summary/ticket-primitive-13.summary | 4 + .../summary/ticket-primitive-14.summary | 7 +- .../summary/ticket-primitive-15.summary | 7 +- .../summary/ticket-primitive-16.summary | 4 + .../summary/ticket-primitive-17.summary | 7 +- .../summary/ticket-primitive-18.summary | 7 +- .../summary/ticket-primitive-19.summary | 4 + .../summary/ticket-primitive-2.summary | 8 +- .../summary/ticket-primitive-20.summary | 7 +- .../summary/ticket-primitive-21.summary | 13 +- .../summary/ticket-primitive-22.summary | 4 + .../summary/ticket-primitive-23.summary | 7 +- .../summary/ticket-primitive-24.summary | 8 +- .../summary/ticket-primitive-3.summary | 7 +- .../summary/ticket-primitive-4.summary | 6 +- .../summary/ticket-primitive-5.summary | 8 +- .../summary/ticket-primitive-6.summary | 7 +- .../summary/ticket-primitive-7.summary | 6 +- .../summary/ticket-primitive-8.summary | 8 +- .../summary/ticket-primitive-9.summary | 13 +- .../summary/ticket-promoted-1.summary | 8 +- .../summary/ticket-promoted-10.summary | 14 +- .../summary/ticket-promoted-11.summary | 12 +- .../summary/ticket-promoted-12.summary | 10 +- .../summary/ticket-promoted-13.summary | 6 + .../summary/ticket-promoted-14.summary | 16 +- .../summary/ticket-promoted-15.summary | 16 +- .../summary/ticket-promoted-16.summary | 6 + .../summary/ticket-promoted-17.summary | 12 +- .../summary/ticket-promoted-18.summary | 12 +- .../summary/ticket-promoted-19.summary | 6 + .../summary/ticket-promoted-2.summary | 16 +- .../summary/ticket-promoted-20.summary | 12 +- .../summary/ticket-promoted-21.summary | 26 +- .../summary/ticket-promoted-22.summary | 6 + .../summary/ticket-promoted-23.summary | 12 +- .../summary/ticket-promoted-24.summary | 10 +- .../summary/ticket-promoted-3.summary | 16 +- .../summary/ticket-promoted-4.summary | 14 +- .../summary/ticket-promoted-5.summary | 12 +- .../summary/ticket-promoted-6.summary | 12 +- .../summary/ticket-promoted-7.summary | 14 +- .../summary/ticket-promoted-8.summary | 12 +- .../summary/ticket-promoted-9.summary | 26 +- .../summary/ticket-rsc-sets-1.summary | 22 +- .../summary/ticket-rsc-sets-10.summary | 31 +- .../summary/ticket-rsc-sets-11.summary | 6 + .../summary/ticket-rsc-sets-12.summary | 19 +- .../summary/ticket-rsc-sets-13.summary | 31 +- .../summary/ticket-rsc-sets-14.summary | 31 +- .../summary/ticket-rsc-sets-2.summary | 34 +- .../summary/ticket-rsc-sets-3.summary | 31 +- .../summary/ticket-rsc-sets-4.summary | 22 +- .../summary/ticket-rsc-sets-5.summary | 20 +- .../summary/ticket-rsc-sets-6.summary | 20 +- .../summary/ticket-rsc-sets-7.summary | 31 +- .../summary/ticket-rsc-sets-8.summary | 6 + .../summary/ticket-rsc-sets-9.summary | 31 +- cts/scheduler/summary/timeout-by-node.summary | 23 +- .../summary/unfence-definition.summary | 54 +- cts/scheduler/summary/unfence-device.summary | 14 +- .../summary/unfence-parameters.summary | 53 +- cts/scheduler/summary/unfence-startup.summary | 38 +- .../summary/unmanaged-block-restart.summary | 8 +- .../summary/unmanaged-promoted.summary | 6 +- .../summary/unmanaged-stop-1.summary | 4 + .../summary/unmanaged-stop-2.summary | 4 + .../summary/unmanaged-stop-3.summary | 6 +- .../summary/unmanaged-stop-4.summary | 6 +- cts/scheduler/summary/unrunnable-1.summary | 21 +- cts/scheduler/summary/unrunnable-2.summary | 16 +- .../summary/use-after-free-merge.summary | 21 +- .../utilization-check-allowed-nodes.summary | 13 +- .../summary/utilization-complex.summary | 123 +--- .../summary/utilization-order1.summary | 12 +- .../summary/utilization-order2.summary | 24 +- .../summary/utilization-order3.summary | 15 +- .../summary/utilization-order4.summary | 36 +- .../summary/utilization-shuffle.summary | 81 ++- cts/scheduler/summary/utilization.summary | 13 +- cts/scheduler/summary/value-source.summary | 40 +- .../summary/whitebox-asymmetric.summary | 14 + cts/scheduler/summary/whitebox-fail1.summary | 33 +- cts/scheduler/summary/whitebox-fail2.summary | 33 +- cts/scheduler/summary/whitebox-fail3.summary | 14 + .../whitebox-imply-stop-on-fence.summary | 67 +-- .../summary/whitebox-migrate1.summary | 27 +- cts/scheduler/summary/whitebox-move.summary | 22 +- .../summary/whitebox-ms-ordering-move.summary | 43 +- .../summary/whitebox-ms-ordering.summary | 54 +- .../summary/whitebox-nested-group.summary | 73 +-- .../summary/whitebox-orphan-ms.summary | 39 +- .../summary/whitebox-orphaned.summary | 34 +- cts/scheduler/summary/whitebox-start.summary | 30 +- cts/scheduler/summary/whitebox-stop.summary | 26 +- .../whitebox-unexpectedly-running.summary | 18 +- cts/scheduler/summary/year-2038.summary | 28 +- 850 files changed, 7090 insertions(+), 15524 deletions(-) diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index 42c19d2df8d..7f2f0248223 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -37,7 +37,6 @@ Active Resources: - @@ -284,7 +283,6 @@ Active Resources: - @@ -503,7 +501,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -607,7 +605,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -733,7 +731,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -837,7 +835,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -947,7 +945,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1058,7 +1056,6 @@ Negative Location Constraints: - @@ -1312,7 +1309,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1374,7 +1371,6 @@ Negative Location Constraints: - @@ -1501,7 +1497,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1569,7 +1565,6 @@ Negative Location Constraints: - @@ -1708,7 +1703,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1740,7 +1735,6 @@ Operations: - @@ -1798,7 +1792,6 @@ Active Resources: - @@ -1898,7 +1891,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1930,7 +1923,6 @@ Operations: - @@ -1974,7 +1966,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2009,7 +2001,6 @@ Operations: - @@ -2060,7 +2051,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2092,7 +2083,6 @@ Operations: - @@ -2137,7 +2127,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2174,7 +2164,6 @@ Operations: - @@ -2229,7 +2218,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2266,7 +2255,6 @@ Operations: - @@ -2321,7 +2309,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2361,7 +2349,6 @@ Operations: - @@ -2431,7 +2418,6 @@ Active Resources: - @@ -2506,7 +2492,6 @@ Full List of Resources: - @@ -2635,7 +2620,6 @@ Full List of Resources: - @@ -2735,7 +2719,6 @@ Full List of Resources: - @@ -2833,7 +2816,6 @@ Full List of Resources: - @@ -2937,7 +2919,6 @@ Full List of Resources: - @@ -3025,7 +3006,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3068,7 +3049,6 @@ Operations: - @@ -3136,7 +3116,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3179,7 +3159,6 @@ Operations: - @@ -3247,7 +3226,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3288,7 +3267,6 @@ Operations: - @@ -3342,7 +3320,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3385,7 +3363,6 @@ Operations: - @@ -3453,7 +3430,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3494,7 +3471,6 @@ Operations: - @@ -3592,7 +3568,6 @@ unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bun - @@ -3785,7 +3760,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3966,7 +3941,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB is valid + * CIB syntax is valid * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4085,7 +4060,6 @@ unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bun - @@ -4183,7 +4157,6 @@ Active Resources: - @@ -4315,7 +4288,6 @@ Full List of Resources: - @@ -4576,7 +4548,6 @@ Full List of Resources: - @@ -4838,7 +4809,6 @@ Full List of Resources: - diff --git a/cts/cli/regression.feature_set.exp b/cts/cli/regression.feature_set.exp index 0b600ee5024..a0428736392 100644 --- a/cts/cli/regression.feature_set.exp +++ b/cts/cli/regression.feature_set.exp @@ -74,7 +74,6 @@ Active Resources: - @@ -174,7 +173,6 @@ Active Resources: - diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index ef9a9b58a89..10201580d42 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -1458,7 +1458,7 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure pcmk__verify error: CIB did not pass schema validation Current cluster status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * No resources @@ -1472,7 +1472,7 @@ Executing Cluster Transition: Revised Cluster Status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] @@ -1727,7 +1727,7 @@ unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure pcmk__verify error: CIB did not pass schema validation Current cluster status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] @@ -7626,207 +7626,13 @@ export overcloud-rabbit-2=overcloud-rabbit-2 =#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#= - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + @@ -7928,34 +7734,198 @@ export overcloud-rabbit-2=overcloud-rabbit-2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - - - - - + + + + + - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7993,9 +7963,9 @@ export overcloud-rabbit-2=overcloud-rabbit-2 -/tmp/cts-cli.ta_outfile.H4yI62EbXj:1: element pacemaker-result: Relax-NG validity error : Expecting element status, got cluster_status -/tmp/cts-cli.ta_outfile.H4yI62EbXj:1: element pacemaker-result: Relax-NG validity error : Element pacemaker-result failed to validate content -/tmp/cts-cli.ta_outfile.H4yI62EbXj fails to validate +/tmp/cts-cli.ta_outfile.qPkR4CJfAb:1: element pacemaker-result: Relax-NG validity error : Expecting element status, got cluster_status +/tmp/cts-cli.ta_outfile.qPkR4CJfAb:1: element pacemaker-result: Relax-NG validity error : Element pacemaker-result failed to validate content +/tmp/cts-cli.ta_outfile.qPkR4CJfAb fails to validate =#=#=#= End test: Show allocation scores with crm_simulate - Failed to validate (3) =#=#=#= * Failed (rc=003): crm_simulate - Show allocation scores with crm_simulate =#=#=#= Begin test: Show utilization with crm_simulate =#=#=#= diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp index d943f4e06b4..4b898731c17 100644 --- a/cts/cli/regression.validity.exp +++ b/cts/cli/regression.validity.exp @@ -363,7 +363,7 @@ update_validation info: Transformed the configuration from pacemaker-1.2 to pac pcmk__verify error: CIB did not pass schema validation Current cluster status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * No resources @@ -374,7 +374,7 @@ Executing Cluster Transition: Revised Cluster Status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * No resources @@ -412,7 +412,7 @@ Schema validation of configuration is disabled (enabling is encouraged and preve pcmk__verify error: CIB did not pass schema validation Current cluster status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) @@ -424,7 +424,7 @@ Executing Cluster Transition: Revised Cluster Status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) @@ -567,7 +567,7 @@ Schema validation of configuration is disabled (enabling is encouraged and preve pcmk__verify error: CIB did not pass schema validation Current cluster status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) @@ -579,7 +579,7 @@ Executing Cluster Transition: Revised Cluster Status: * Cluster Summary: - * CIB has errors (for details, run crm_verify -LV) + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) diff --git a/cts/scheduler/summary/1-a-then-bm-move-b.summary b/cts/scheduler/summary/1-a-then-bm-move-b.summary index b2615785cc2..20f026d475e 100644 --- a/cts/scheduler/summary/1-a-then-bm-move-b.summary +++ b/cts/scheduler/summary/1-a-then-bm-move-b.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -10,16 +12,13 @@ Transition Summary: * Migrate B ( 18node2 -> 18node1 ) Executing Cluster Transition: - * Resource action: B migrate_to on 18node2 - * Resource action: B migrate_from on 18node1 - * Resource action: B stop on 18node2 - * Pseudo action: B_start_0 - * Resource action: B monitor=60000 on 18node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * A (ocf:heartbeat:Dummy): Started 18node1 - * B (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node2 diff --git a/cts/scheduler/summary/10-a-then-bm-b-move-a-clone.summary b/cts/scheduler/summary/10-a-then-bm-b-move-a-clone.summary index dd14d65e3c4..d3ca8bc272d 100644 --- a/cts/scheduler/summary/10-a-then-bm-b-move-a-clone.summary +++ b/cts/scheduler/summary/10-a-then-bm-b-move-a-clone.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node f20node1: standby (with active resources) * Online: [ f20node2 ] @@ -11,23 +13,19 @@ Current cluster status: Transition Summary: * Stop myclone:1 ( f20node1 ) due to node availability * Migrate vm ( f20node1 -> f20node2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: vm migrate_to on f20node1 - * Resource action: vm migrate_from on f20node2 - * Resource action: vm stop on f20node1 - * Pseudo action: myclone-clone_stop_0 - * Pseudo action: vm_start_0 - * Resource action: myclone stop on f20node1 - * Pseudo action: myclone-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node f20node1: standby + * Node f20node1: standby (with active resources) * Online: [ f20node2 ] * Full List of Resources: * Clone Set: myclone-clone [myclone]: - * Started: [ f20node2 ] - * Stopped: [ f20node1 ] - * vm (ocf:heartbeat:Dummy): Started f20node2 + * Started: [ f20node1 f20node2 ] + * vm (ocf:heartbeat:Dummy): Started f20node1 diff --git a/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary b/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary index 73886441ac2..d0578725ec9 100644 --- a/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary +++ b/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node f20node1: standby (with active resources) * Online: [ f20node2 ] @@ -14,23 +16,16 @@ Transition Summary: * Move vm ( f20node1 -> f20node2 ) due to unmigrateable myclone-clone stop Executing Cluster Transition: - * Resource action: myclone monitor on f20node2 - * Resource action: vm stop on f20node1 - * Pseudo action: myclone-clone_stop_0 - * Resource action: myclone stop on f20node1 - * Pseudo action: myclone-clone_stopped_0 - * Pseudo action: myclone-clone_start_0 - * Resource action: myclone start on f20node2 - * Pseudo action: myclone-clone_running_0 - * Resource action: vm start on f20node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node f20node1: standby + * Node f20node1: standby (with active resources) * Online: [ f20node2 ] * Full List of Resources: * Clone Set: myclone-clone [myclone]: - * Started: [ f20node2 ] - * Stopped: [ f20node1 ] - * vm (ocf:heartbeat:Dummy): Started f20node2 + * Started: [ f20node1 ] + * Stopped: [ f20node2 ] + * vm (ocf:heartbeat:Dummy): Started f20node1 diff --git a/cts/scheduler/summary/1360.summary b/cts/scheduler/summary/1360.summary index 6a08320524d..e07a4f82e8f 100644 --- a/cts/scheduler/summary/1360.summary +++ b/cts/scheduler/summary/1360.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ ssgtest1a ssgtest1b ] @@ -12,14 +14,10 @@ Transition Summary: * Move dollies:0 ( ssgtest1a -> ssgtest1b ) Executing Cluster Transition: - * Pseudo action: dolly_stop_0 - * Resource action: dollies:0 stop on ssgtest1a - * Pseudo action: dolly_stopped_0 - * Pseudo action: dolly_start_0 - * Resource action: dollies:0 start on ssgtest1b - * Pseudo action: dolly_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ ssgtest1a ssgtest1b ] @@ -27,4 +25,4 @@ Revised Cluster Status: * Resource Group: ClusterAlias: * VIP (ocf:testing:VIP-RIP.sh): Started ssgtest1a * Clone Set: dolly [dollies]: - * Started: [ ssgtest1b ] + * Started: [ ssgtest1a ] diff --git a/cts/scheduler/summary/1484.summary b/cts/scheduler/summary/1484.summary index 92b6f090b72..43afe29e18e 100644 --- a/cts/scheduler/summary/1484.summary +++ b/cts/scheduler/summary/1484.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hb1 hb2 ] * OFFLINE: [ hb3 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop the-future-of-vaj ( hb2 ) due to node availability Executing Cluster Transition: - * Resource action: the-future-of-vaj stop on hb2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hb1 hb2 ] * OFFLINE: [ hb3 ] * Full List of Resources: - * the-future-of-vaj (ocf:heartbeat:Dummy): Stopped + * the-future-of-vaj (ocf:heartbeat:Dummy): FAILED hb2 diff --git a/cts/scheduler/summary/1494.summary b/cts/scheduler/summary/1494.summary index f0792c3c36d..459ea4c9f77 100644 --- a/cts/scheduler/summary/1494.summary +++ b/cts/scheduler/summary/1494.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hb1 hb2 ] * OFFLINE: [ hb3 ] @@ -10,18 +12,19 @@ Current cluster status: Transition Summary: * Stop ima_rscid:0 ( hb1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: ima_cloneid_stop_0 - * Resource action: ima_rscid:0 stop on hb1 - * Pseudo action: ima_cloneid_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hb1 hb2 ] * OFFLINE: [ hb3 ] * Full List of Resources: * Clone Set: ima_cloneid [ima_rscid] (unique): - * ima_rscid:0 (ocf:heartbeat:Dummy): Stopped + * ima_rscid:0 (ocf:heartbeat:Dummy): Started hb1 * ima_rscid:1 (ocf:heartbeat:Dummy): Started hb2 diff --git a/cts/scheduler/summary/2-am-then-b-move-a.summary b/cts/scheduler/summary/2-am-then-b-move-a.summary index 4fb45d7fcda..2a6c87f41d1 100644 --- a/cts/scheduler/summary/2-am-then-b-move-a.summary +++ b/cts/scheduler/summary/2-am-then-b-move-a.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -10,16 +12,13 @@ Transition Summary: * Migrate A ( 18node1 -> 18node2 ) Executing Cluster Transition: - * Resource action: A migrate_to on 18node1 - * Resource action: A migrate_from on 18node2 - * Resource action: A stop on 18node1 - * Pseudo action: A_start_0 - * Resource action: A monitor=60000 on 18node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: - * A (ocf:heartbeat:Dummy): Started 18node2 + * A (ocf:heartbeat:Dummy): Started 18node1 * B (ocf:heartbeat:Dummy): Started 18node2 diff --git a/cts/scheduler/summary/3-am-then-bm-both-migrate.summary b/cts/scheduler/summary/3-am-then-bm-both-migrate.summary index 4498194df10..16e394b51a2 100644 --- a/cts/scheduler/summary/3-am-then-bm-both-migrate.summary +++ b/cts/scheduler/summary/3-am-then-bm-both-migrate.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -11,21 +13,13 @@ Transition Summary: * Migrate B ( 18node2 -> 18node1 ) Executing Cluster Transition: - * Resource action: A migrate_to on 18node1 - * Resource action: A migrate_from on 18node2 - * Resource action: B migrate_to on 18node2 - * Resource action: B migrate_from on 18node1 - * Resource action: B stop on 18node2 - * Resource action: A stop on 18node1 - * Pseudo action: A_start_0 - * Pseudo action: B_start_0 - * Resource action: A monitor=60000 on 18node2 - * Resource action: B monitor=60000 on 18node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: - * A (ocf:heartbeat:Dummy): Started 18node2 - * B (ocf:heartbeat:Dummy): Started 18node1 + * A (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node2 diff --git a/cts/scheduler/summary/4-am-then-bm-b-not-migratable.summary b/cts/scheduler/summary/4-am-then-bm-b-not-migratable.summary index 6459c746bac..9aa47ab3c1e 100644 --- a/cts/scheduler/summary/4-am-then-bm-b-not-migratable.summary +++ b/cts/scheduler/summary/4-am-then-bm-b-not-migratable.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -11,19 +13,13 @@ Transition Summary: * Move B ( 18node2 -> 18node1 ) Executing Cluster Transition: - * Resource action: B stop on 18node2 - * Resource action: A migrate_to on 18node1 - * Resource action: A migrate_from on 18node2 - * Resource action: A stop on 18node1 - * Pseudo action: A_start_0 - * Resource action: B start on 18node1 - * Resource action: A monitor=60000 on 18node2 - * Resource action: B monitor=60000 on 18node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: - * A (ocf:heartbeat:Dummy): Started 18node2 - * B (ocf:heartbeat:Dummy): Started 18node1 + * A (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node2 diff --git a/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary b/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary index 2a755e1bbff..9c6881a1269 100644 --- a/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary +++ b/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -11,17 +13,13 @@ Transition Summary: * Move B ( 18node2 -> 18node1 ) due to unmigrateable A stop Executing Cluster Transition: - * Resource action: B stop on 18node2 - * Resource action: A stop on 18node1 - * Resource action: A start on 18node2 - * Resource action: B start on 18node1 - * Resource action: A monitor=60000 on 18node2 - * Resource action: B monitor=60000 on 18node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: - * A (ocf:heartbeat:Dummy): Started 18node2 - * B (ocf:heartbeat:Dummy): Started 18node1 + * A (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node2 diff --git a/cts/scheduler/summary/594.summary b/cts/scheduler/summary/594.summary index dc6db75e6e5..091c010a968 100644 --- a/cts/scheduler/summary/594.summary +++ b/cts/scheduler/summary/594.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hadev3: UNCLEAN (offline) * Online: [ hadev1 hadev2 ] @@ -19,37 +21,24 @@ Transition Summary: * Move rsc_hadev2 ( hadev2 -> hadev1 ) * Stop child_DoFencing:0 ( hadev2 ) due to node availability * Stop child_DoFencing:2 ( hadev1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: DcIPaddr stop on hadev2 - * Resource action: DcIPaddr monitor on hadev1 - * Resource action: rsc_hadev3 monitor on hadev2 - * Resource action: rsc_hadev2 stop on hadev2 - * Resource action: rsc_hadev2 monitor on hadev1 - * Resource action: child_DoFencing:0 monitor on hadev1 - * Resource action: child_DoFencing:2 monitor on hadev2 - * Pseudo action: DoFencing_stop_0 - * Fencing hadev3 (reboot) - * Resource action: DcIPaddr start on hadev1 - * Resource action: rsc_hadev2 start on hadev1 - * Resource action: child_DoFencing:0 stop on hadev2 - * Resource action: child_DoFencing:2 stop on hadev1 - * Pseudo action: DoFencing_stopped_0 - * Cluster action: do_shutdown on hadev2 - * Resource action: DcIPaddr monitor=5000 on hadev1 - * Resource action: rsc_hadev2 monitor=5000 on hadev1 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node hadev3: UNCLEAN (offline) * Online: [ hadev1 hadev2 ] - * OFFLINE: [ hadev3 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Started hadev1 + * DcIPaddr (ocf:heartbeat:IPaddr): Started hadev2 * rsc_hadev3 (ocf:heartbeat:IPaddr): Started hadev1 - * rsc_hadev2 (ocf:heartbeat:IPaddr): Started hadev1 + * rsc_hadev2 (ocf:heartbeat:IPaddr): Started hadev2 * rsc_hadev1 (ocf:heartbeat:IPaddr): Started hadev1 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Stopped + * child_DoFencing:0 (stonith:ssh): Started hadev2 * child_DoFencing:1 (stonith:ssh): Started hadev1 - * child_DoFencing:2 (stonith:ssh): Stopped + * child_DoFencing:2 (stonith:ssh): Started hadev1 diff --git a/cts/scheduler/summary/6-migrate-group.summary b/cts/scheduler/summary/6-migrate-group.summary index bfa374bcd19..1df9329d0db 100644 --- a/cts/scheduler/summary/6-migrate-group.summary +++ b/cts/scheduler/summary/6-migrate-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -14,32 +16,15 @@ Transition Summary: * Migrate C ( 18node1 -> 18node2 ) Executing Cluster Transition: - * Pseudo action: thegroup_stop_0 - * Resource action: A migrate_to on 18node1 - * Resource action: A migrate_from on 18node2 - * Resource action: B migrate_to on 18node1 - * Resource action: B migrate_from on 18node2 - * Resource action: C migrate_to on 18node1 - * Resource action: C migrate_from on 18node2 - * Resource action: C stop on 18node1 - * Resource action: B stop on 18node1 - * Resource action: A stop on 18node1 - * Pseudo action: thegroup_stopped_0 - * Pseudo action: thegroup_start_0 - * Pseudo action: A_start_0 - * Pseudo action: B_start_0 - * Pseudo action: C_start_0 - * Pseudo action: thegroup_running_0 - * Resource action: A monitor=60000 on 18node2 - * Resource action: B monitor=60000 on 18node2 - * Resource action: C monitor=60000 on 18node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * Resource Group: thegroup: - * A (ocf:heartbeat:Dummy): Started 18node2 - * B (ocf:heartbeat:Dummy): Started 18node2 - * C (ocf:heartbeat:Dummy): Started 18node2 + * A (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node1 + * C (ocf:heartbeat:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/662.summary b/cts/scheduler/summary/662.summary index 1ad51a498b5..dc5570bf46a 100644 --- a/cts/scheduler/summary/662.summary +++ b/cts/scheduler/summary/662.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n09 ] @@ -17,51 +19,25 @@ Current cluster status: Transition Summary: * Move rsc_c001n02 ( c001n02 -> c001n03 ) * Stop child_DoFencing:0 ( c001n02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 stop on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n04 - * Resource action: child_DoFencing:1 monitor on c001n04 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Pseudo action: DoFencing_stop_0 - * Resource action: rsc_c001n02 start on c001n03 - * Resource action: child_DoFencing:0 stop on c001n02 - * Pseudo action: DoFencing_stopped_0 - * Cluster action: do_shutdown on c001n02 - * Resource action: rsc_c001n02 monitor=5000 on c001n03 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n09 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n09 * rsc_c001n09 (ocf:heartbeat:IPaddr): Started c001n09 - * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n03 + * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Stopped + * child_DoFencing:0 (stonith:ssh): Started c001n02 * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n04 * child_DoFencing:3 (stonith:ssh): Started c001n09 diff --git a/cts/scheduler/summary/696.summary b/cts/scheduler/summary/696.summary index 3090caec3ab..95f5764db39 100644 --- a/cts/scheduler/summary/696.summary +++ b/cts/scheduler/summary/696.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hadev1 hadev2 hadev3 ] @@ -17,46 +19,19 @@ Transition Summary: * Start child_DoFencing:2 ( hadev1 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on hadev3 - * Resource action: DcIPaddr monitor on hadev1 - * Resource action: rsc_hadev1 stop on hadev3 - * Resource action: rsc_hadev1 monitor on hadev2 - * Resource action: rsc_hadev1 monitor on hadev1 - * Resource action: rsc_hadev2 monitor on hadev3 - * Resource action: rsc_hadev2 monitor on hadev1 - * Resource action: rsc_hadev3 monitor=5000 on hadev3 - * Resource action: rsc_hadev3 monitor on hadev2 - * Resource action: rsc_hadev3 monitor on hadev1 - * Resource action: child_DoFencing:0 monitor=5000 on hadev2 - * Resource action: child_DoFencing:0 monitor on hadev3 - * Resource action: child_DoFencing:0 monitor on hadev1 - * Resource action: child_DoFencing:1 monitor=5000 on hadev3 - * Resource action: child_DoFencing:1 monitor on hadev2 - * Resource action: child_DoFencing:1 monitor on hadev1 - * Resource action: child_DoFencing:2 monitor on hadev3 - * Resource action: child_DoFencing:2 monitor on hadev2 - * Resource action: child_DoFencing:2 monitor on hadev1 - * Pseudo action: DoFencing_start_0 - * Resource action: DcIPaddr start on hadev2 - * Resource action: rsc_hadev1 start on hadev1 - * Resource action: rsc_hadev2 start on hadev2 - * Resource action: child_DoFencing:2 start on hadev1 - * Pseudo action: DoFencing_running_0 - * Resource action: DcIPaddr monitor=5000 on hadev2 - * Resource action: rsc_hadev1 monitor=5000 on hadev1 - * Resource action: rsc_hadev2 monitor=5000 on hadev2 - * Resource action: child_DoFencing:2 monitor=5000 on hadev1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hadev1 hadev2 hadev3 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started hadev2 - * rsc_hadev1 (ocf:heartbeat:IPaddr): Started hadev1 + * rsc_hadev1 (ocf:heartbeat:IPaddr): Started hadev3 * rsc_hadev2 (ocf:heartbeat:IPaddr): Started hadev2 * rsc_hadev3 (ocf:heartbeat:IPaddr): Started hadev3 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started hadev2 * child_DoFencing:1 (stonith:ssh): Started hadev3 - * child_DoFencing:2 (stonith:ssh): Started hadev1 + * child_DoFencing:2 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary b/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary index 92eecafd684..31718b5f471 100644 --- a/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary +++ b/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -14,28 +16,15 @@ Transition Summary: * Move C ( 18node1 -> 18node2 ) due to unmigrateable B stop Executing Cluster Transition: - * Pseudo action: thegroup_stop_0 - * Resource action: C stop on 18node1 - * Resource action: B stop on 18node1 - * Resource action: A migrate_to on 18node1 - * Resource action: A migrate_from on 18node2 - * Resource action: A stop on 18node1 - * Pseudo action: thegroup_stopped_0 - * Pseudo action: thegroup_start_0 - * Pseudo action: A_start_0 - * Resource action: B start on 18node2 - * Resource action: C start on 18node2 - * Pseudo action: thegroup_running_0 - * Resource action: A monitor=60000 on 18node2 - * Resource action: B monitor=60000 on 18node2 - * Resource action: C monitor=60000 on 18node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * Resource Group: thegroup: - * A (ocf:heartbeat:Dummy): Started 18node2 - * B (ocf:heartbeat:Dummy): Started 18node2 - * C (ocf:heartbeat:Dummy): Started 18node2 + * A (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node1 + * C (ocf:heartbeat:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/726.summary b/cts/scheduler/summary/726.summary index 4bd880ecd27..120ba286e5a 100644 --- a/cts/scheduler/summary/726.summary +++ b/cts/scheduler/summary/726.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ ibm1 sgi2 test02 test03 ] @@ -22,68 +24,21 @@ Transition Summary: * Start child_DoFencing:3 ( sgi2 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor=5000 on test03 - * Resource action: DcIPaddr monitor on test02 - * Resource action: DcIPaddr monitor on sgi2 - * Resource action: DcIPaddr monitor on ibm1 - * Resource action: rsc_sgi2 monitor on test03 - * Resource action: rsc_sgi2 monitor on test02 - * Resource action: rsc_sgi2 monitor on sgi2 - * Resource action: rsc_sgi2 monitor on ibm1 - * Resource action: rsc_ibm1 stop on test03 - * Resource action: rsc_ibm1 monitor on test02 - * Resource action: rsc_ibm1 monitor on sgi2 - * Resource action: rsc_ibm1 monitor on ibm1 - * Resource action: rsc_test02 monitor on test03 - * Resource action: rsc_test02 monitor on test02 - * Resource action: rsc_test02 monitor on sgi2 - * Resource action: rsc_test02 monitor on ibm1 - * Resource action: rsc_test03 monitor=5000 on test03 - * Resource action: rsc_test03 monitor on test02 - * Resource action: rsc_test03 monitor on sgi2 - * Resource action: rsc_test03 monitor on ibm1 - * Resource action: child_DoFencing:0 monitor on sgi2 - * Resource action: child_DoFencing:0 monitor on ibm1 - * Resource action: child_DoFencing:1 monitor on test02 - * Resource action: child_DoFencing:1 monitor on sgi2 - * Resource action: child_DoFencing:1 monitor on ibm1 - * Resource action: child_DoFencing:2 monitor on test03 - * Resource action: child_DoFencing:2 monitor on test02 - * Resource action: child_DoFencing:2 monitor on sgi2 - * Resource action: child_DoFencing:2 monitor on ibm1 - * Resource action: child_DoFencing:3 monitor on test03 - * Resource action: child_DoFencing:3 monitor on test02 - * Resource action: child_DoFencing:3 monitor on sgi2 - * Resource action: child_DoFencing:3 monitor on ibm1 - * Pseudo action: DoFencing_start_0 - * Resource action: rsc_sgi2 start on sgi2 - * Resource action: rsc_ibm1 start on ibm1 - * Resource action: rsc_test02 start on test02 - * Resource action: child_DoFencing:0 start on test02 - * Resource action: child_DoFencing:1 start on test03 - * Resource action: child_DoFencing:2 start on ibm1 - * Resource action: child_DoFencing:3 start on sgi2 - * Pseudo action: DoFencing_running_0 - * Resource action: rsc_sgi2 monitor=5000 on sgi2 - * Resource action: rsc_ibm1 monitor=5000 on ibm1 - * Resource action: rsc_test02 monitor=5000 on test02 - * Resource action: child_DoFencing:0 monitor=5000 on test02 - * Resource action: child_DoFencing:1 monitor=5000 on test03 - * Resource action: child_DoFencing:2 monitor=5000 on ibm1 - * Resource action: child_DoFencing:3 monitor=5000 on sgi2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ ibm1 sgi2 test02 test03 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started test03 - * rsc_sgi2 (ocf:heartbeat:IPaddr): Started sgi2 - * rsc_ibm1 (ocf:heartbeat:IPaddr): Started ibm1 - * rsc_test02 (ocf:heartbeat:IPaddr): Started test02 + * rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped + * rsc_ibm1 (ocf:heartbeat:IPaddr): Started test03 + * rsc_test02 (ocf:heartbeat:IPaddr): Stopped * rsc_test03 (ocf:heartbeat:IPaddr): Started test03 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started test02 * child_DoFencing:1 (stonith:ssh): Started test03 - * child_DoFencing:2 (stonith:ssh): Started ibm1 - * child_DoFencing:3 (stonith:ssh): Started sgi2 + * child_DoFencing:2 (stonith:ssh): Stopped + * child_DoFencing:3 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/735.summary b/cts/scheduler/summary/735.summary index 8489a21d3cb..6402b259662 100644 --- a/cts/scheduler/summary/735.summary +++ b/cts/scheduler/summary/735.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hadev2 hadev3 ] * OFFLINE: [ hadev1 ] @@ -20,33 +22,20 @@ Transition Summary: * Start child_DoFencing:1 ( hadev3 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on hadev3 - * Resource action: rsc_hadev1 stop on hadev2 - * Resource action: rsc_hadev1 start on hadev3 - * Resource action: rsc_hadev2 monitor on hadev3 - * Resource action: rsc_hadev3 start on hadev3 - * Resource action: child_DoFencing:0 monitor on hadev3 - * Resource action: child_DoFencing:2 monitor on hadev3 - * Pseudo action: DoFencing_start_0 - * Resource action: rsc_hadev1 monitor=5000 on hadev3 - * Resource action: rsc_hadev3 monitor=5000 on hadev3 - * Resource action: child_DoFencing:0 start on hadev2 - * Resource action: child_DoFencing:1 start on hadev3 - * Pseudo action: DoFencing_running_0 - * Resource action: child_DoFencing:0 monitor=5000 on hadev2 - * Resource action: child_DoFencing:1 monitor=5000 on hadev3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hadev2 hadev3 ] * OFFLINE: [ hadev1 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started hadev2 - * rsc_hadev1 (ocf:heartbeat:IPaddr): Started hadev3 + * rsc_hadev1 (ocf:heartbeat:IPaddr): Started hadev2 * rsc_hadev2 (ocf:heartbeat:IPaddr): Started hadev2 - * rsc_hadev3 (ocf:heartbeat:IPaddr): Started hadev3 + * rsc_hadev3 (ocf:heartbeat:IPaddr): Stopped hadev2 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Started hadev2 - * child_DoFencing:1 (stonith:ssh): Started hadev3 + * child_DoFencing:0 (stonith:ssh): Stopped hadev2 + * child_DoFencing:1 (stonith:ssh): Stopped * child_DoFencing:2 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/764.summary b/cts/scheduler/summary/764.summary index 158a064fb56..92a6d5b75ef 100644 --- a/cts/scheduler/summary/764.summary +++ b/cts/scheduler/summary/764.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ posic041 posic043 ] * OFFLINE: [ posic042 posic044 ] @@ -23,32 +25,19 @@ Transition Summary: * Stop rsc_posic044 ( posic041 ) due to no quorum Executing Cluster Transition: - * Resource action: DcIPaddr stop on posic043 - * Resource action: DcIPaddr monitor on posic041 - * Resource action: rsc_posic041 stop on posic041 - * Resource action: rsc_posic041 monitor on posic043 - * Resource action: rsc_posic042 stop on posic041 - * Resource action: rsc_posic042 monitor on posic043 - * Resource action: rsc_posic043 stop on posic043 - * Resource action: rsc_posic043 monitor on posic041 - * Resource action: rsc_posic044 stop on posic041 - * Resource action: rsc_posic044 monitor on posic043 - * Resource action: child_DoFencing:0 monitor=5000 on posic043 - * Resource action: child_DoFencing:1 monitor=5000 on posic041 - * Resource action: child_DoFencing:1 monitor on posic043 - * Resource action: child_DoFencing:2 monitor on posic041 - * Resource action: child_DoFencing:3 monitor on posic041 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ posic041 posic043 ] * OFFLINE: [ posic042 posic044 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Stopped - * rsc_posic041 (ocf:heartbeat:IPaddr): Stopped - * rsc_posic042 (ocf:heartbeat:IPaddr): Stopped - * rsc_posic043 (ocf:heartbeat:IPaddr): Stopped + * DcIPaddr (ocf:heartbeat:IPaddr): Started posic043 + * rsc_posic041 (ocf:heartbeat:IPaddr): Started posic041 + * rsc_posic042 (ocf:heartbeat:IPaddr): Started posic041 + * rsc_posic043 (ocf:heartbeat:IPaddr): Started posic043 * rsc_posic044 (ocf:heartbeat:IPaddr): Started posic041 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started posic043 diff --git a/cts/scheduler/summary/797.summary b/cts/scheduler/summary/797.summary index d31572ba3db..a3a62c31ea0 100644 --- a/cts/scheduler/summary/797.summary +++ b/cts/scheduler/summary/797.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node c001n08: UNCLEAN (offline) * Online: [ c001n01 c001n02 c001n03 ] @@ -25,49 +28,23 @@ Transition Summary: * Stop child_DoFencing:1 ( c001n02 ) due to node availability Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: DcIPaddr stop on c001n03 - * Resource action: rsc_c001n08 stop on c001n02 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 stop on c001n02 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 stop on c001n03 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 stop on c001n01 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: child_DoFencing:2 monitor on c001n01 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:3 monitor on c001n01 - * Pseudo action: DoFencing_stop_0 - * Resource action: DcIPaddr delete on c001n03 - * Resource action: child_DoFencing:0 stop on c001n03 - * Resource action: child_DoFencing:0 stop on c001n01 - * Resource action: child_DoFencing:1 stop on c001n02 - * Pseudo action: DoFencing_stopped_0 - * Pseudo action: DoFencing_start_0 - * Cluster action: do_shutdown on c001n02 - * Resource action: child_DoFencing:0 start on c001n01 - * Resource action: child_DoFencing:0 monitor=5000 on c001n01 - * Pseudo action: DoFencing_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node c001n08: UNCLEAN (offline) * Online: [ c001n01 c001n02 c001n03 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n01 (ocf:heartbeat:IPaddr): Stopped + * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03 + * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n02 + * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 + * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 + * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Started c001n01 - * child_DoFencing:1 (stonith:ssh): Stopped + * child_DoFencing:0 (stonith:ssh): Started [ c001n01 c001n03 ] + * child_DoFencing:1 (stonith:ssh): Started c001n02 * child_DoFencing:2 (stonith:ssh): Started c001n03 * child_DoFencing:3 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/8-am-then-bm-a-migrating-b-stopping.summary b/cts/scheduler/summary/8-am-then-bm-a-migrating-b-stopping.summary index 54c19eb706d..f3b7ca68382 100644 --- a/cts/scheduler/summary/8-am-then-bm-a-migrating-b-stopping.summary +++ b/cts/scheduler/summary/8-am-then-bm-a-migrating-b-stopping.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -13,17 +15,13 @@ Transition Summary: * Stop B ( 18node2 ) due to node availability Executing Cluster Transition: - * Resource action: B stop on 18node2 - * Resource action: A migrate_to on 18node1 - * Resource action: A migrate_from on 18node2 - * Resource action: A stop on 18node1 - * Pseudo action: A_start_0 - * Resource action: A monitor=60000 on 18node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: - * A (ocf:heartbeat:Dummy): Started 18node2 - * B (ocf:heartbeat:Dummy): Stopped (disabled) + * A (ocf:heartbeat:Dummy): Started 18node1 + * B (ocf:heartbeat:Dummy): Started 18node2 (disabled) diff --git a/cts/scheduler/summary/829.summary b/cts/scheduler/summary/829.summary index f51849ea909..12529fc824d 100644 --- a/cts/scheduler/summary/829.summary +++ b/cts/scheduler/summary/829.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c001n02: UNCLEAN (offline) * Online: [ c001n01 c001n03 c001n08 ] @@ -19,46 +21,26 @@ Transition Summary: * Fence (reboot) c001n02 'peer is no longer part of the cluster' * Move rsc_c001n02 ( c001n02 -> c001n01 ) * Stop child_DoFencing:0 ( c001n02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: child_DoFencing:0 monitor on c001n01 - * Resource action: child_DoFencing:1 monitor on c001n01 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n01 - * Pseudo action: DoFencing_stop_0 - * Fencing c001n02 (reboot) - * Pseudo action: rsc_c001n02_stop_0 - * Pseudo action: child_DoFencing:0_stop_0 - * Pseudo action: DoFencing_stopped_0 - * Resource action: rsc_c001n02 start on c001n01 - * Resource action: rsc_c001n02 monitor=5000 on c001n01 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node c001n02: UNCLEAN (offline) * Online: [ c001n01 c001n03 c001n08 ] - * OFFLINE: [ c001n02 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 - * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n01 + * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 (UNCLEAN) * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Stopped + * child_DoFencing:0 (stonith:ssh): Started c001n02 (UNCLEAN) * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n01 * child_DoFencing:3 (stonith:ssh): Started c001n08 diff --git a/cts/scheduler/summary/9-am-then-bm-b-migrating-a-stopping.summary b/cts/scheduler/summary/9-am-then-bm-b-migrating-a-stopping.summary index e37689e44a0..97083ccce55 100644 --- a/cts/scheduler/summary/9-am-then-bm-b-migrating-a-stopping.summary +++ b/cts/scheduler/summary/9-am-then-bm-b-migrating-a-stopping.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -13,13 +15,13 @@ Transition Summary: * Stop B ( 18node2 ) due to unrunnable A start Executing Cluster Transition: - * Resource action: B stop on 18node2 - * Resource action: A stop on 18node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: - * A (ocf:heartbeat:Dummy): Stopped (disabled) - * B (ocf:heartbeat:Dummy): Stopped + * A (ocf:heartbeat:Dummy): Started 18node1 (disabled) + * B (ocf:heartbeat:Dummy): Started 18node2 diff --git a/cts/scheduler/summary/994-2.summary b/cts/scheduler/summary/994-2.summary index cac43b9bee4..adb78ebed7b 100644 --- a/cts/scheduler/summary/994-2.summary +++ b/cts/scheduler/summary/994-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ paul ] @@ -15,17 +17,10 @@ Transition Summary: * Restart depends ( paul ) due to required group_1 running Executing Cluster Transition: - * Resource action: depends stop on paul - * Pseudo action: group_1_stop_0 - * Resource action: postfix_9 stop on paul - * Pseudo action: group_1_stopped_0 - * Pseudo action: group_1_start_0 - * Resource action: postfix_9 start on paul - * Resource action: postfix_9 monitor=120000 on paul - * Pseudo action: group_1_running_0 - * Resource action: depends start on paul Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ paul ] @@ -34,5 +29,5 @@ Revised Cluster Status: * datadisk_1 (ocf:heartbeat:datadisk): Started paul * Filesystem_2 (ocf:heartbeat:Filesystem): Started paul * IPaddr_5 (ocf:heartbeat:IPaddr): Started paul - * postfix_9 (lsb:postfix): Started paul + * postfix_9 (lsb:postfix): FAILED paul * depends (lsb:postfix): Started paul diff --git a/cts/scheduler/summary/994.summary b/cts/scheduler/summary/994.summary index 5d8efdf2433..c1e91cefb3f 100644 --- a/cts/scheduler/summary/994.summary +++ b/cts/scheduler/summary/994.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ paul ] @@ -13,15 +15,10 @@ Transition Summary: * Recover postfix_9 ( paul ) Executing Cluster Transition: - * Pseudo action: group_1_stop_0 - * Resource action: postfix_9 stop on paul - * Pseudo action: group_1_stopped_0 - * Pseudo action: group_1_start_0 - * Resource action: postfix_9 start on paul - * Resource action: postfix_9 monitor=120000 on paul - * Pseudo action: group_1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ paul ] @@ -30,4 +27,4 @@ Revised Cluster Status: * datadisk_1 (ocf:heartbeat:datadisk): Started paul * Filesystem_2 (ocf:heartbeat:Filesystem): Started paul * IPaddr_5 (ocf:heartbeat:IPaddr): Started paul - * postfix_9 (lsb:postfix): Started paul + * postfix_9 (lsb:postfix): FAILED paul diff --git a/cts/scheduler/summary/a-demote-then-b-migrate.summary b/cts/scheduler/summary/a-demote-then-b-migrate.summary index 32c136e777e..4e8f7e0cf91 100644 --- a/cts/scheduler/summary/a-demote-then-b-migrate.summary +++ b/cts/scheduler/summary/a-demote-then-b-migrate.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,44 +16,15 @@ Transition Summary: * Migrate rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1:1 cancel=5000 on node1 - * Resource action: rsc1:0 cancel=10000 on node2 - * Pseudo action: ms1_pre_notify_demote_0 - * Resource action: rsc1:1 notify on node1 - * Resource action: rsc1:0 notify on node2 - * Pseudo action: ms1_confirmed-pre_notify_demote_0 - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_post_notify_demoted_0 - * Resource action: rsc1:1 notify on node1 - * Resource action: rsc1:0 notify on node2 - * Pseudo action: ms1_confirmed-post_notify_demoted_0 - * Pseudo action: ms1_pre_notify_promote_0 - * Resource action: rsc2 migrate_to on node1 - * Resource action: rsc1:1 notify on node1 - * Resource action: rsc1:0 notify on node2 - * Pseudo action: ms1_confirmed-pre_notify_promote_0 - * Resource action: rsc2 migrate_from on node2 - * Resource action: rsc2 stop on node1 - * Pseudo action: rsc2_start_0 - * Pseudo action: ms1_promote_0 - * Resource action: rsc2 monitor=5000 on node2 - * Resource action: rsc1:0 promote on node2 - * Pseudo action: ms1_promoted_0 - * Pseudo action: ms1_post_notify_promoted_0 - * Resource action: rsc1:1 notify on node1 - * Resource action: rsc1:0 notify on node2 - * Pseudo action: ms1_confirmed-post_notify_promoted_0 - * Resource action: rsc1:1 monitor=10000 on node1 - * Resource action: rsc1:0 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node2 ] - * Unpromoted: [ node1 ] - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] + * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/a-promote-then-b-migrate.summary b/cts/scheduler/summary/a-promote-then-b-migrate.summary index 6489a4ff8e3..1bce891c4ab 100644 --- a/cts/scheduler/summary/a-promote-then-b-migrate.summary +++ b/cts/scheduler/summary/a-promote-then-b-migrate.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,30 +15,15 @@ Transition Summary: * Migrate rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1:1 cancel=10000 on node2 - * Pseudo action: ms1_pre_notify_promote_0 - * Resource action: rsc1:0 notify on node1 - * Resource action: rsc1:1 notify on node2 - * Pseudo action: ms1_confirmed-pre_notify_promote_0 - * Pseudo action: ms1_promote_0 - * Resource action: rsc1:1 promote on node2 - * Pseudo action: ms1_promoted_0 - * Pseudo action: ms1_post_notify_promoted_0 - * Resource action: rsc1:0 notify on node1 - * Resource action: rsc1:1 notify on node2 - * Pseudo action: ms1_confirmed-post_notify_promoted_0 - * Resource action: rsc2 migrate_to on node1 - * Resource action: rsc1:1 monitor=5000 on node2 - * Resource action: rsc2 migrate_from on node2 - * Resource action: rsc2 stop on node1 - * Pseudo action: rsc2_start_0 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 node2 ] - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] + * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/allow-unhealthy-nodes.summary b/cts/scheduler/summary/allow-unhealthy-nodes.summary index 5d7ac0ba5a5..40cfbbce792 100644 --- a/cts/scheduler/summary/allow-unhealthy-nodes.summary +++ b/cts/scheduler/summary/allow-unhealthy-nodes.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2022-04-01 17:57:38Z Current cluster status: + * Cluster Summary: + * Node List: * Node rhel8-5: online (health is RED) * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 ] @@ -16,12 +18,11 @@ Transition Summary: * Move dummy ( rhel8-5 -> rhel8-3 ) Executing Cluster Transition: - * Resource action: dummy stop on rhel8-5 - * Resource action: dummy start on rhel8-3 - * Resource action: dummy monitor=10000 on rhel8-3 Using the original execution date of: 2022-04-01 17:57:38Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node rhel8-5: online (health is RED) * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 ] @@ -30,6 +31,6 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started rhel8-1 * FencingPass (stonith:fence_dummy): Started rhel8-2 * FencingFail (stonith:fence_dummy): Started rhel8-3 - * dummy (ocf:pacemaker:Dummy): Started rhel8-3 + * dummy (ocf:pacemaker:Dummy): Started rhel8-5 * Clone Set: health-clone [health]: * Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] diff --git a/cts/scheduler/summary/anon-instance-pending.summary b/cts/scheduler/summary/anon-instance-pending.summary index 379fbce6124..c150ceed160 100644 --- a/cts/scheduler/summary/anon-instance-pending.summary +++ b/cts/scheduler/summary/anon-instance-pending.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] @@ -76,136 +79,15 @@ Transition Summary: * Start clone5rsc1:10 ( node8 ) * Start clone5rsc2:10 ( node8 ) * Start clone5rsc3:10 ( node8 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: clone1_start_0 - * Pseudo action: clone2_start_0 - * Resource action: clone3rsc monitor on node2 - * Pseudo action: clone3_start_0 - * Pseudo action: clone4_stop_0 - * Pseudo action: clone5_start_0 - * Resource action: clone1rsc start on node4 - * Resource action: clone1rsc start on node9 - * Resource action: clone1rsc start on node10 - * Resource action: clone1rsc start on node11 - * Resource action: clone1rsc start on node5 - * Resource action: clone1rsc start on node6 - * Resource action: clone1rsc start on node7 - * Resource action: clone1rsc start on node8 - * Pseudo action: clone1_running_0 - * Resource action: clone2rsc start on node4 - * Resource action: clone2rsc start on node10 - * Resource action: clone2rsc start on node11 - * Resource action: clone2rsc start on node3 - * Pseudo action: clone2_running_0 - * Resource action: clone3rsc start on node5 - * Resource action: clone3rsc start on node6 - * Resource action: clone3rsc start on node7 - * Resource action: clone3rsc start on node8 - * Resource action: clone3rsc start on node9 - * Resource action: clone3rsc start on node1 - * Resource action: clone3rsc start on node10 - * Resource action: clone3rsc start on node11 - * Resource action: clone3rsc start on node2 - * Resource action: clone3rsc start on node4 - * Pseudo action: clone3_running_0 - * Resource action: clone4rsc stop on node9 - * Pseudo action: clone4_stopped_0 - * Pseudo action: clone5group:2_start_0 - * Resource action: clone5rsc2 start on node3 - * Resource action: clone5rsc3 start on node3 - * Pseudo action: clone5group:3_start_0 - * Resource action: clone5rsc1 start on node9 - * Resource action: clone5rsc2 start on node9 - * Resource action: clone5rsc3 start on node9 - * Pseudo action: clone5group:4_start_0 - * Resource action: clone5rsc1 start on node10 - * Resource action: clone5rsc2 start on node10 - * Resource action: clone5rsc3 start on node10 - * Pseudo action: clone5group:5_start_0 - * Resource action: clone5rsc1 start on node11 - * Resource action: clone5rsc2 start on node11 - * Resource action: clone5rsc3 start on node11 - * Pseudo action: clone5group:6_start_0 - * Resource action: clone5rsc1 start on node4 - * Resource action: clone5rsc2 start on node4 - * Resource action: clone5rsc3 start on node4 - * Pseudo action: clone5group:7_start_0 - * Resource action: clone5rsc1 start on node5 - * Resource action: clone5rsc2 start on node5 - * Resource action: clone5rsc3 start on node5 - * Pseudo action: clone5group:8_start_0 - * Resource action: clone5rsc1 start on node6 - * Resource action: clone5rsc2 start on node6 - * Resource action: clone5rsc3 start on node6 - * Pseudo action: clone5group:9_start_0 - * Resource action: clone5rsc1 start on node7 - * Resource action: clone5rsc2 start on node7 - * Resource action: clone5rsc3 start on node7 - * Pseudo action: clone5group:10_start_0 - * Resource action: clone5rsc1 start on node8 - * Resource action: clone5rsc2 start on node8 - * Resource action: clone5rsc3 start on node8 - * Resource action: clone1rsc monitor=10000 on node4 - * Resource action: clone1rsc monitor=10000 on node9 - * Resource action: clone1rsc monitor=10000 on node10 - * Resource action: clone1rsc monitor=10000 on node11 - * Resource action: clone1rsc monitor=10000 on node5 - * Resource action: clone1rsc monitor=10000 on node6 - * Resource action: clone1rsc monitor=10000 on node7 - * Resource action: clone1rsc monitor=10000 on node8 - * Resource action: clone2rsc monitor=10000 on node4 - * Resource action: clone2rsc monitor=10000 on node10 - * Resource action: clone2rsc monitor=10000 on node11 - * Resource action: clone2rsc monitor=10000 on node3 - * Resource action: clone3rsc monitor=10000 on node5 - * Resource action: clone3rsc monitor=10000 on node6 - * Resource action: clone3rsc monitor=10000 on node7 - * Resource action: clone3rsc monitor=10000 on node8 - * Resource action: clone3rsc monitor=10000 on node9 - * Resource action: clone3rsc monitor=10000 on node1 - * Resource action: clone3rsc monitor=10000 on node10 - * Resource action: clone3rsc monitor=10000 on node11 - * Resource action: clone3rsc monitor=10000 on node2 - * Resource action: clone3rsc monitor=10000 on node4 - * Pseudo action: clone5group:2_running_0 - * Resource action: clone5rsc2 monitor=10000 on node3 - * Resource action: clone5rsc3 monitor=10000 on node3 - * Pseudo action: clone5group:3_running_0 - * Resource action: clone5rsc1 monitor=10000 on node9 - * Resource action: clone5rsc2 monitor=10000 on node9 - * Resource action: clone5rsc3 monitor=10000 on node9 - * Pseudo action: clone5group:4_running_0 - * Resource action: clone5rsc1 monitor=10000 on node10 - * Resource action: clone5rsc2 monitor=10000 on node10 - * Resource action: clone5rsc3 monitor=10000 on node10 - * Pseudo action: clone5group:5_running_0 - * Resource action: clone5rsc1 monitor=10000 on node11 - * Resource action: clone5rsc2 monitor=10000 on node11 - * Resource action: clone5rsc3 monitor=10000 on node11 - * Pseudo action: clone5group:6_running_0 - * Resource action: clone5rsc1 monitor=10000 on node4 - * Resource action: clone5rsc2 monitor=10000 on node4 - * Resource action: clone5rsc3 monitor=10000 on node4 - * Pseudo action: clone5group:7_running_0 - * Resource action: clone5rsc1 monitor=10000 on node5 - * Resource action: clone5rsc2 monitor=10000 on node5 - * Resource action: clone5rsc3 monitor=10000 on node5 - * Pseudo action: clone5group:8_running_0 - * Resource action: clone5rsc1 monitor=10000 on node6 - * Resource action: clone5rsc2 monitor=10000 on node6 - * Resource action: clone5rsc3 monitor=10000 on node6 - * Pseudo action: clone5group:9_running_0 - * Resource action: clone5rsc1 monitor=10000 on node7 - * Resource action: clone5rsc2 monitor=10000 on node7 - * Resource action: clone5rsc3 monitor=10000 on node7 - * Pseudo action: clone5group:10_running_0 - * Resource action: clone5rsc1 monitor=10000 on node8 - * Resource action: clone5rsc2 monitor=10000 on node8 - * Resource action: clone5rsc3 monitor=10000 on node8 - * Pseudo action: clone5_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] @@ -213,12 +95,21 @@ Revised Cluster Status: * Fencing (stonith:fence_imaginary): Started node1 * Clone Set: clone1 [clone1rsc] (promotable): * Promoted: [ node3 ] - * Unpromoted: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] + * Unpromoted: [ node1 node2 node4 ] + * Stopped: [ node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone2 [clone2rsc]: - * Started: [ node2 node3 node4 node10 node11 ] + * Started: [ node2 node4 ] + * Stopped: [ node1 node3 node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone3 [clone3rsc]: - * Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] + * Started: [ node3 ] + * Stopped: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone4 [clone4rsc]: + * clone4rsc (ocf:pacemaker:Dummy): ORPHANED Started node9 * Started: [ node1 node5 node6 node7 node8 ] * Clone Set: clone5 [clone5group]: - * Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] + * Resource Group: clone5group:2: + * clone5rsc1 (ocf:pacemaker:Dummy): Started node3 + * clone5rsc2 (ocf:pacemaker:Dummy): Started node3 + * clone5rsc3 (ocf:pacemaker:Dummy): Stopped + * Started: [ node1 node2 ] + * Stopped: [ node4 node5 node6 node7 node8 node9 node10 node11 ] diff --git a/cts/scheduler/summary/anti-colocation-order.summary b/cts/scheduler/summary/anti-colocation-order.summary index 774942d5faa..354f6da7b6c 100644 --- a/cts/scheduler/summary/anti-colocation-order.summary +++ b/cts/scheduler/summary/anti-colocation-order.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -16,30 +18,22 @@ Transition Summary: * Move rsc2 ( node1 -> node2 ) * Stop rsc3 ( node2 ) due to node availability * Stop rsc4 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node1 - * Pseudo action: group2_stop_0 - * Resource action: rsc4 stop on node2 - * Resource action: rsc1 stop on node1 - * Resource action: rsc3 stop on node2 - * Pseudo action: group1_stopped_0 - * Pseudo action: group2_stopped_0 - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: group1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Started node1 * Resource Group: group2: - * rsc3 (ocf:pacemaker:Dummy): Stopped - * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Started node2 + * rsc4 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/anti-colocation-promoted.summary b/cts/scheduler/summary/anti-colocation-promoted.summary index 2348f76f324..0026196bb6a 100644 --- a/cts/scheduler/summary/anti-colocation-promoted.summary +++ b/cts/scheduler/summary/anti-colocation-promoted.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2016-04-29 09:06:59Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] @@ -16,23 +18,17 @@ Transition Summary: * Demote state1:1 ( Promoted -> Unpromoted sle12sp2-1 ) Executing Cluster Transition: - * Resource action: dummy1 stop on sle12sp2-2 - * Pseudo action: ms1_demote_0 - * Resource action: state1 demote on sle12sp2-1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_promote_0 - * Resource action: dummy1 start on sle12sp2-1 - * Resource action: state1 promote on sle12sp2-2 - * Pseudo action: ms1_promoted_0 Using the original execution date of: 2016-04-29 09:06:59Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-2 - * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-1 + * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 * Clone Set: ms1 [state1] (promotable): - * Promoted: [ sle12sp2-2 ] - * Unpromoted: [ sle12sp2-1 ] + * Promoted: [ sle12sp2-1 ] + * Unpromoted: [ sle12sp2-2 ] diff --git a/cts/scheduler/summary/anti-colocation-unpromoted.summary b/cts/scheduler/summary/anti-colocation-unpromoted.summary index a7087bc8192..4828b776b25 100644 --- a/cts/scheduler/summary/anti-colocation-unpromoted.summary +++ b/cts/scheduler/summary/anti-colocation-unpromoted.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] @@ -15,22 +17,16 @@ Transition Summary: * Move dummy1 ( sle12sp2-1 -> sle12sp2-2 ) Executing Cluster Transition: - * Resource action: dummy1 stop on sle12sp2-1 - * Pseudo action: ms1_demote_0 - * Resource action: state1 demote on sle12sp2-1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_promote_0 - * Resource action: state1 promote on sle12sp2-2 - * Pseudo action: ms1_promoted_0 - * Resource action: dummy1 start on sle12sp2-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-1 * Clone Set: ms1 [state1] (promotable): - * Promoted: [ sle12sp2-2 ] - * Unpromoted: [ sle12sp2-1 ] - * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 + * Promoted: [ sle12sp2-1 ] + * Unpromoted: [ sle12sp2-2 ] + * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-1 diff --git a/cts/scheduler/summary/asymmetric.summary b/cts/scheduler/summary/asymmetric.summary index f9c8f7e202f..d050685fe7e 100644 --- a/cts/scheduler/summary/asymmetric.summary +++ b/cts/scheduler/summary/asymmetric.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ puma1 puma3 ] @@ -12,12 +14,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 monitor=19000 on puma1 - * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 monitor=20000 on puma3 - * Resource action: drbd_target_poolA monitor on puma3 - * Resource action: drbd_target_poolA monitor on puma1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ puma1 puma3 ] diff --git a/cts/scheduler/summary/asymmetrical-order-move.summary b/cts/scheduler/summary/asymmetrical-order-move.summary index dc72e4309b7..4595cd469aa 100644 --- a/cts/scheduler/summary/asymmetrical-order-move.summary +++ b/cts/scheduler/summary/asymmetrical-order-move.summary @@ -2,6 +2,8 @@ Using the original execution date of: 2016-04-28 11:50:29Z 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] @@ -14,14 +16,15 @@ Transition Summary: * Stop dummy2 ( sle12sp2-1 ) due to unrunnable dummy1 start Executing Cluster Transition: - * Resource action: dummy2 stop on sle12sp2-1 Using the original execution date of: 2016-04-28 11:50:29Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-2 * dummy1 (ocf:pacemaker:Dummy): Stopped (disabled) - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Started sle12sp2-1 diff --git a/cts/scheduler/summary/asymmetrical-order-restart.summary b/cts/scheduler/summary/asymmetrical-order-restart.summary index fe55c526f00..60fcee2f7f3 100644 --- a/cts/scheduler/summary/asymmetrical-order-restart.summary +++ b/cts/scheduler/summary/asymmetrical-order-restart.summary @@ -2,6 +2,10 @@ Using the original execution date of: 2018-08-09 18:55:41Z 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for cesr104ipmi on cesr105-p16 changed: 0:0;1166:0:0:540ff5bf-81ee-4648-97cb-e922b82b370c +Only 'private' parameters to 60s-interval monitor for cesr104ipmi on cesr105-p16 changed: 0:0;1167:0:0:540ff5bf-81ee-4648-97cb-e922b82b370c + * Node List: * Online: [ cesr105-p16 cesr109-p16 ] @@ -14,14 +18,15 @@ Transition Summary: * Stop sleep_b ( cesr109-p16 ) due to unrunnable sleep_a start Executing Cluster Transition: - * Resource action: sleep_b stop on cesr109-p16 Using the original execution date of: 2018-08-09 18:55:41Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cesr105-p16 cesr109-p16 ] * Full List of Resources: * cesr104ipmi (stonith:fence_ipmilan): Started cesr105-p16 * sleep_a (ocf:classe:anything): Stopped (disabled) - * sleep_b (ocf:classe:anything): Stopped + * sleep_b (ocf:classe:anything): FAILED cesr109-p16 diff --git a/cts/scheduler/summary/attrs1.summary b/cts/scheduler/summary/attrs1.summary index 794b3c667bd..62a706cdc0e 100644 --- a/cts/scheduler/summary/attrs1.summary +++ b/cts/scheduler/summary/attrs1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/attrs2.summary b/cts/scheduler/summary/attrs2.summary index 794b3c667bd..62a706cdc0e 100644 --- a/cts/scheduler/summary/attrs2.summary +++ b/cts/scheduler/summary/attrs2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/attrs3.summary b/cts/scheduler/summary/attrs3.summary index 7d133a85949..f9ff5deedd4 100644 --- a/cts/scheduler/summary/attrs3.summary +++ b/cts/scheduler/summary/attrs3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/attrs4.summary b/cts/scheduler/summary/attrs4.summary index 7d133a85949..f9ff5deedd4 100644 --- a/cts/scheduler/summary/attrs4.summary +++ b/cts/scheduler/summary/attrs4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/attrs5.summary b/cts/scheduler/summary/attrs5.summary index 7209be2fcd0..63f23776319 100644 --- a/cts/scheduler/summary/attrs5.summary +++ b/cts/scheduler/summary/attrs5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -8,10 +10,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/attrs6.summary b/cts/scheduler/summary/attrs6.summary index 7d133a85949..f9ff5deedd4 100644 --- a/cts/scheduler/summary/attrs6.summary +++ b/cts/scheduler/summary/attrs6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/attrs7.summary b/cts/scheduler/summary/attrs7.summary index 794b3c667bd..62a706cdc0e 100644 --- a/cts/scheduler/summary/attrs7.summary +++ b/cts/scheduler/summary/attrs7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/attrs8.summary b/cts/scheduler/summary/attrs8.summary index 794b3c667bd..62a706cdc0e 100644 --- a/cts/scheduler/summary/attrs8.summary +++ b/cts/scheduler/summary/attrs8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/balanced.summary b/cts/scheduler/summary/balanced.summary index 78d7ab31f9f..b298e87a424 100644 --- a/cts/scheduler/summary/balanced.summary +++ b/cts/scheduler/summary/balanced.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ host1 host2 ] @@ -11,19 +13,13 @@ Transition Summary: * Start rsc2 ( host1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on host2 - * Resource action: rsc1 monitor on host1 - * Resource action: rsc2 monitor on host2 - * Resource action: rsc2 monitor on host1 - * Pseudo action: load_stopped_host2 - * Pseudo action: load_stopped_host1 - * Resource action: rsc1 start on host2 - * Resource action: rsc2 start on host1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ host1 host2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started host2 - * rsc2 (ocf:pacemaker:Dummy): Started host1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/base-score.summary b/cts/scheduler/summary/base-score.summary index aeec6c6bb30..da07aa25e64 100644 --- a/cts/scheduler/summary/base-score.summary +++ b/cts/scheduler/summary/base-score.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ puma1 puma2 puma3 puma4 ] @@ -9,15 +11,12 @@ Transition Summary: * Start Dummy ( puma1 ) Executing Cluster Transition: - * Resource action: Dummy monitor on puma4 - * Resource action: Dummy monitor on puma3 - * Resource action: Dummy monitor on puma2 - * Resource action: Dummy monitor on puma1 - * Resource action: Dummy start on puma1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ puma1 puma2 puma3 puma4 ] * Full List of Resources: - * Dummy (ocf:heartbeat:Dummy): Started puma1 + * Dummy (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/bnc-515172.summary b/cts/scheduler/summary/bnc-515172.summary index b7583386f1a..d08735c7930 100644 --- a/cts/scheduler/summary/bnc-515172.summary +++ b/cts/scheduler/summary/bnc-515172.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sles11-ha1 sles11-ha2 sles11-ha3 ] @@ -16,12 +18,10 @@ Transition Summary: * Start PRIM_Web_IP1 ( sles11-ha2 ) Executing Cluster Transition: - * Pseudo action: GRP_Web_Server_start_0 - * Resource action: PRIM_Web_IP1 start on sles11-ha2 - * Pseudo action: GRP_Web_Server_running_0 - * Resource action: PRIM_Web_IP1 monitor=5000 on sles11-ha2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sles11-ha1 sles11-ha2 sles11-ha3 ] @@ -29,7 +29,7 @@ Revised Cluster Status: * Clone Set: Stinith_Clone_Resource [Stonith_Resource]: * Started: [ sles11-ha1 sles11-ha2 sles11-ha3 ] * Resource Group: GRP_Web_Server: - * PRIM_Web_IP1 (ocf:heartbeat:IPaddr): Started sles11-ha2 + * PRIM_Web_IP1 (ocf:heartbeat:IPaddr): Stopped * Clone Set: pingd_Gateway [Res_Pingd_Gateway]: * Started: [ sles11-ha1 sles11-ha2 sles11-ha3 ] * Clone Set: Pingd_Public [Res_Pingd_Public]: diff --git a/cts/scheduler/summary/bug-1572-1.summary b/cts/scheduler/summary/bug-1572-1.summary index 16870b2286e..c60ec01cacc 100644 --- a/cts/scheduler/summary/bug-1572-1.summary +++ b/cts/scheduler/summary/bug-1572-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] @@ -19,66 +22,18 @@ Transition Summary: * Restart IPaddr_147_81_84_133 ( arc-tkincaidlx.wsicorp.com ) due to required pgsql_5555 start Executing Cluster Transition: - * Pseudo action: ms_drbd_7788_pre_notify_demote_0 - * Pseudo action: grp_pgsql_mirror_stop_0 - * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com - * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 - * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com - * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com - * Pseudo action: grp_pgsql_mirror_stopped_0 - * Pseudo action: ms_drbd_7788_demote_0 - * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_demoted_0 - * Pseudo action: ms_drbd_7788_post_notify_demoted_0 - * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 - * Pseudo action: ms_drbd_7788_pre_notify_stop_0 - * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 - * Pseudo action: ms_drbd_7788_stop_0 - * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx - * Resource action: rsc_drbd_7788:1 stop on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_stopped_0 - * Cluster action: do_shutdown on arc-dknightlx - * Pseudo action: ms_drbd_7788_post_notify_stopped_0 - * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 - * Pseudo action: ms_drbd_7788_pre_notify_start_0 - * Pseudo action: ms_drbd_7788_confirmed-pre_notify_start_0 - * Pseudo action: ms_drbd_7788_start_0 - * Resource action: rsc_drbd_7788:1 start on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_running_0 - * Pseudo action: ms_drbd_7788_post_notify_running_0 - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-post_notify_running_0 - * Pseudo action: ms_drbd_7788_pre_notify_promote_0 - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd_7788_promote_0 - * Resource action: rsc_drbd_7788:1 promote on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_promoted_0 - * Pseudo action: ms_drbd_7788_post_notify_promoted_0 - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-post_notify_promoted_0 - * Pseudo action: grp_pgsql_mirror_start_0 - * Resource action: fs_mirror start on arc-tkincaidlx.wsicorp.com - * Resource action: pgsql_5555 start on arc-tkincaidlx.wsicorp.com - * Resource action: pgsql_5555 monitor=30000 on arc-tkincaidlx.wsicorp.com - * Resource action: IPaddr_147_81_84_133 start on arc-tkincaidlx.wsicorp.com - * Resource action: IPaddr_147_81_84_133 monitor=25000 on arc-tkincaidlx.wsicorp.com - * Pseudo action: grp_pgsql_mirror_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] * Full List of Resources: * Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable): * Promoted: [ arc-tkincaidlx.wsicorp.com ] - * Stopped: [ arc-dknightlx ] + * Unpromoted: [ arc-dknightlx ] * Resource Group: grp_pgsql_mirror: * fs_mirror (ocf:heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com * pgsql_5555 (ocf:heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com diff --git a/cts/scheduler/summary/bug-1572-2.summary b/cts/scheduler/summary/bug-1572-2.summary index c161239be2c..5db2a45b953 100644 --- a/cts/scheduler/summary/bug-1572-2.summary +++ b/cts/scheduler/summary/bug-1572-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] @@ -17,45 +20,23 @@ Transition Summary: * Stop fs_mirror ( arc-tkincaidlx.wsicorp.com ) due to node availability * Stop pgsql_5555 ( arc-tkincaidlx.wsicorp.com ) due to node availability * Stop IPaddr_147_81_84_133 ( arc-tkincaidlx.wsicorp.com ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: ms_drbd_7788_pre_notify_demote_0 - * Pseudo action: grp_pgsql_mirror_stop_0 - * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com - * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 - * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com - * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com - * Pseudo action: grp_pgsql_mirror_stopped_0 - * Pseudo action: ms_drbd_7788_demote_0 - * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_demoted_0 - * Pseudo action: ms_drbd_7788_post_notify_demoted_0 - * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 - * Pseudo action: ms_drbd_7788_pre_notify_stop_0 - * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 - * Pseudo action: ms_drbd_7788_stop_0 - * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx - * Pseudo action: ms_drbd_7788_stopped_0 - * Cluster action: do_shutdown on arc-dknightlx - * Pseudo action: ms_drbd_7788_post_notify_stopped_0 - * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com - * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] * Full List of Resources: * Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable): - * Unpromoted: [ arc-tkincaidlx.wsicorp.com ] - * Stopped: [ arc-dknightlx ] + * Promoted: [ arc-tkincaidlx.wsicorp.com ] + * Unpromoted: [ arc-dknightlx ] * Resource Group: grp_pgsql_mirror: - * fs_mirror (ocf:heartbeat:Filesystem): Stopped - * pgsql_5555 (ocf:heartbeat:pgsql): Stopped - * IPaddr_147_81_84_133 (ocf:heartbeat:IPaddr): Stopped + * fs_mirror (ocf:heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com + * pgsql_5555 (ocf:heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com + * IPaddr_147_81_84_133 (ocf:heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com diff --git a/cts/scheduler/summary/bug-1573.summary b/cts/scheduler/summary/bug-1573.summary index c40d96bca64..7b7c3236057 100644 --- a/cts/scheduler/summary/bug-1573.summary +++ b/cts/scheduler/summary/bug-1573.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ xen-b ] * OFFLINE: [ xen-c ] @@ -13,14 +15,14 @@ Current cluster status: Transition Summary: * Stop IPaddr_192_168_1_102 ( xen-b ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group_11_stop_0 - * Resource action: IPaddr_192_168_1_102 stop on xen-b - * Cluster action: do_shutdown on xen-b - * Pseudo action: group_11_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ xen-b ] * OFFLINE: [ xen-c ] @@ -30,5 +32,5 @@ Revised Cluster Status: * IPaddr_192_168_1_101 (ocf:heartbeat:IPaddr): Stopped * apache_2 (ocf:heartbeat:apache): Stopped * Resource Group: group_11: - * IPaddr_192_168_1_102 (ocf:heartbeat:IPaddr): Stopped + * IPaddr_192_168_1_102 (ocf:heartbeat:IPaddr): Started xen-b * apache_6 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/bug-1685.summary b/cts/scheduler/summary/bug-1685.summary index 2ed29bc0e1b..8fa5542fe38 100644 --- a/cts/scheduler/summary/bug-1685.summary +++ b/cts/scheduler/summary/bug-1685.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ redun1 redun2 ] @@ -12,27 +14,14 @@ Transition Summary: * Start shared_filesystem ( redun2 ) Executing Cluster Transition: - * Pseudo action: shared_storage_pre_notify_promote_0 - * Resource action: prim_shared_storage:0 notify on redun2 - * Resource action: prim_shared_storage:1 notify on redun1 - * Pseudo action: shared_storage_confirmed-pre_notify_promote_0 - * Pseudo action: shared_storage_promote_0 - * Resource action: prim_shared_storage:0 promote on redun2 - * Pseudo action: shared_storage_promoted_0 - * Pseudo action: shared_storage_post_notify_promoted_0 - * Resource action: prim_shared_storage:0 notify on redun2 - * Resource action: prim_shared_storage:1 notify on redun1 - * Pseudo action: shared_storage_confirmed-post_notify_promoted_0 - * Resource action: shared_filesystem start on redun2 - * Resource action: prim_shared_storage:1 monitor=120000 on redun1 - * Resource action: shared_filesystem monitor=120000 on redun2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ redun1 redun2 ] * Full List of Resources: * Clone Set: shared_storage [prim_shared_storage] (promotable): - * Promoted: [ redun2 ] - * Unpromoted: [ redun1 ] - * shared_filesystem (ocf:heartbeat:Filesystem): Started redun2 + * Unpromoted: [ redun1 redun2 ] + * shared_filesystem (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/bug-1718.summary b/cts/scheduler/summary/bug-1718.summary index 76beca04cef..22296ab39c8 100644 --- a/cts/scheduler/summary/bug-1718.summary +++ b/cts/scheduler/summary/bug-1718.summary @@ -1,6 +1,8 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ biggame.ds9 heartbeat.ds9 ops.ds9 ] * OFFLINE: [ defiant.ds9 warbird.ds9 ] @@ -19,17 +21,10 @@ Transition Summary: * Stop resource_dummy ( ops.ds9 ) due to required resource_IP3 start Executing Cluster Transition: - * Pseudo action: group_fUN_stop_0 - * Resource action: resource_dummy stop on ops.ds9 - * Resource action: OpenVPN_IP delete on ops.ds9 - * Resource action: OpenVPN_IP delete on heartbeat.ds9 - * Resource action: Apache delete on ops.ds9 - * Resource action: Apache delete on heartbeat.ds9 - * Resource action: Apache delete on biggame.ds9 - * Resource action: resource_IP3 stop on ops.ds9 - * Pseudo action: group_fUN_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ biggame.ds9 heartbeat.ds9 ops.ds9 ] * OFFLINE: [ defiant.ds9 warbird.ds9 ] @@ -40,5 +35,5 @@ Revised Cluster Status: * resource_IP2 (ocf:heartbeat:IPaddr): Stopped (disabled) * resource_dummyweb (ocf:heartbeat:Dummy): Stopped * Resource Group: group_fUN: - * resource_IP3 (ocf:heartbeat:IPaddr): Stopped - * resource_dummy (ocf:heartbeat:Dummy): Stopped + * resource_IP3 (ocf:heartbeat:IPaddr): Started ops.ds9 + * resource_dummy (ocf:heartbeat:Dummy): Started ops.ds9 diff --git a/cts/scheduler/summary/bug-1765.summary b/cts/scheduler/summary/bug-1765.summary index ae851fe922e..537544105a5 100644 --- a/cts/scheduler/summary/bug-1765.summary +++ b/cts/scheduler/summary/bug-1765.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sles236 sles238 ] @@ -14,25 +16,18 @@ Transition Summary: * Start drbd0:1 ( sles238 ) Executing Cluster Transition: - * Pseudo action: ms-drbd0_pre_notify_start_0 - * Resource action: drbd0:0 notify on sles236 - * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 - * Pseudo action: ms-drbd0_start_0 - * Resource action: drbd0:1 start on sles238 - * Pseudo action: ms-drbd0_running_0 - * Pseudo action: ms-drbd0_post_notify_running_0 * Resource action: drbd0:0 notify on sles236 - * Resource action: drbd0:1 notify on sles238 - * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sles236 sles238 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): * Promoted: [ sles236 ] - * Unpromoted: [ sles238 ] + * Stopped: [ sles238 ] * Clone Set: ms-drbd1 [drbd1] (promotable): * Promoted: [ sles236 ] * Unpromoted: [ sles238 ] diff --git a/cts/scheduler/summary/bug-1820-1.summary b/cts/scheduler/summary/bug-1820-1.summary index 5142348de0d..1a35682b54e 100644 --- a/cts/scheduler/summary/bug-1820-1.summary +++ b/cts/scheduler/summary/bug-1820-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ star world ] @@ -14,31 +16,15 @@ Transition Summary: * Migrate test2 ( star -> world ) Executing Cluster Transition: - * Resource action: p1 monitor on world - * Resource action: p1 monitor on star - * Pseudo action: gr1_stop_0 - * Resource action: test1 migrate_to on star - * Resource action: p1 start on world - * Resource action: test1 migrate_from on world - * Resource action: test2 migrate_to on star - * Resource action: test2 migrate_from on world - * Resource action: test2 stop on star - * Resource action: test1 stop on star - * Cluster action: do_shutdown on star - * Pseudo action: gr1_stopped_0 - * Pseudo action: gr1_start_0 - * Pseudo action: test1_start_0 - * Pseudo action: test2_start_0 - * Pseudo action: gr1_running_0 - * Resource action: test1 monitor=10000 on world - * Resource action: test2 monitor=10000 on world Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ star world ] * Full List of Resources: - * p1 (ocf:heartbeat:Xen): Started world + * p1 (ocf:heartbeat:Xen): Stopped * Resource Group: gr1: - * test1 (ocf:heartbeat:Xen): Started world - * test2 (ocf:heartbeat:Xen): Started world + * test1 (ocf:heartbeat:Xen): Started star + * test2 (ocf:heartbeat:Xen): Started star diff --git a/cts/scheduler/summary/bug-1820.summary b/cts/scheduler/summary/bug-1820.summary index 1862ac15560..0558645ba94 100644 --- a/cts/scheduler/summary/bug-1820.summary +++ b/cts/scheduler/summary/bug-1820.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ star world ] @@ -12,27 +14,14 @@ Transition Summary: * Migrate test2 ( star -> world ) Executing Cluster Transition: - * Pseudo action: gr1_stop_0 - * Resource action: test1 migrate_to on star - * Resource action: test1 migrate_from on world - * Resource action: test2 migrate_to on star - * Resource action: test2 migrate_from on world - * Resource action: test2 stop on star - * Resource action: test1 stop on star - * Cluster action: do_shutdown on star - * Pseudo action: gr1_stopped_0 - * Pseudo action: gr1_start_0 - * Pseudo action: test1_start_0 - * Pseudo action: test2_start_0 - * Pseudo action: gr1_running_0 - * Resource action: test1 monitor=10000 on world - * Resource action: test2 monitor=10000 on world Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ star world ] * Full List of Resources: * Resource Group: gr1: - * test1 (ocf:heartbeat:Xen): Started world - * test2 (ocf:heartbeat:Xen): Started world + * test1 (ocf:heartbeat:Xen): Started star + * test2 (ocf:heartbeat:Xen): Started star diff --git a/cts/scheduler/summary/bug-1822.summary b/cts/scheduler/summary/bug-1822.summary index 3890a02730a..50d969f288d 100644 --- a/cts/scheduler/summary/bug-1822.summary +++ b/cts/scheduler/summary/bug-1822.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ process1a process2b ] @@ -14,23 +17,15 @@ Current cluster status: Transition Summary: * Stop promotable_Stateful:1 ( Promoted process1a ) due to node availability * Stop promotable_procdctl:1 ( Promoted process1a ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: ms-sf_demote_0 - * Pseudo action: ms-sf_group:1_demote_0 - * Resource action: promotable_Stateful:1 demote on process1a - * Resource action: promotable_procdctl:1 demote on process1a - * Pseudo action: ms-sf_group:1_demoted_0 - * Pseudo action: ms-sf_demoted_0 - * Pseudo action: ms-sf_stop_0 - * Pseudo action: ms-sf_group:1_stop_0 - * Resource action: promotable_Stateful:1 stop on process1a - * Resource action: promotable_procdctl:1 stop on process1a - * Cluster action: do_shutdown on process1a - * Pseudo action: ms-sf_group:1_stopped_0 - * Pseudo action: ms-sf_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ process1a process2b ] @@ -40,5 +35,5 @@ Revised Cluster Status: * promotable_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b * promotable_procdctl:0 (ocf:heartbeat:procdctl): Stopped * Resource Group: ms-sf_group:1: - * promotable_Stateful:1 (ocf:heartbeat:Dummy-statful): Stopped - * promotable_procdctl:1 (ocf:heartbeat:procdctl): Stopped + * promotable_Stateful:1 (ocf:heartbeat:Dummy-statful): Promoted process1a + * promotable_procdctl:1 (ocf:heartbeat:procdctl): Promoted process1a diff --git a/cts/scheduler/summary/bug-5014-A-start-B-start.summary b/cts/scheduler/summary/bug-5014-A-start-B-start.summary index fdc06b0f5bf..5b81cfcb38c 100644 --- a/cts/scheduler/summary/bug-5014-A-start-B-start.summary +++ b/cts/scheduler/summary/bug-5014-A-start-B-start.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -11,17 +13,13 @@ Transition Summary: * Start ClusterIP2 ( fc16-builder ) Executing Cluster Transition: - * Resource action: ClusterIP monitor on fc16-builder - * Resource action: ClusterIP2 monitor on fc16-builder - * Resource action: ClusterIP start on fc16-builder - * Resource action: ClusterIP2 start on fc16-builder - * Resource action: ClusterIP monitor=30000 on fc16-builder - * Resource action: ClusterIP2 monitor=30000 on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: - * ClusterIP (ocf:heartbeat:IPaddr2): Started fc16-builder - * ClusterIP2 (ocf:heartbeat:IPaddr2): Started fc16-builder + * ClusterIP (ocf:heartbeat:IPaddr2): Stopped + * ClusterIP2 (ocf:heartbeat:IPaddr2): Stopped diff --git a/cts/scheduler/summary/bug-5014-A-stop-B-started.summary b/cts/scheduler/summary/bug-5014-A-stop-B-started.summary index 025fc67b3dd..cd3d5b01174 100644 --- a/cts/scheduler/summary/bug-5014-A-stop-B-started.summary +++ b/cts/scheduler/summary/bug-5014-A-stop-B-started.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -12,12 +14,13 @@ Transition Summary: * Stop ClusterIP ( fc16-builder ) due to node availability Executing Cluster Transition: - * Resource action: ClusterIP stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: - * ClusterIP (ocf:heartbeat:IPaddr2): Stopped (disabled) + * ClusterIP (ocf:heartbeat:IPaddr2): Started fc16-builder (disabled) * ClusterIP2 (ocf:heartbeat:IPaddr2): Started fc16-builder diff --git a/cts/scheduler/summary/bug-5014-A-stopped-B-stopped.summary b/cts/scheduler/summary/bug-5014-A-stopped-B-stopped.summary index ced70e77f1c..fa85f1b0524 100644 --- a/cts/scheduler/summary/bug-5014-A-stopped-B-stopped.summary +++ b/cts/scheduler/summary/bug-5014-A-stopped-B-stopped.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -12,10 +14,10 @@ Transition Summary: * Start ClusterIP2 ( fc16-builder ) due to unrunnable ClusterIP start (blocked) Executing Cluster Transition: - * Resource action: ClusterIP monitor on fc16-builder - * Resource action: ClusterIP2 monitor on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] diff --git a/cts/scheduler/summary/bug-5014-CLONE-A-start-B-start.summary b/cts/scheduler/summary/bug-5014-CLONE-A-start-B-start.summary index fc93e4c33ed..c023412e1eb 100644 --- a/cts/scheduler/summary/bug-5014-CLONE-A-start-B-start.summary +++ b/cts/scheduler/summary/bug-5014-CLONE-A-start-B-start.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -13,23 +15,15 @@ Transition Summary: * Start ClusterIP2:0 ( fc16-builder ) Executing Cluster Transition: - * Resource action: ClusterIP:0 monitor on fc16-builder - * Pseudo action: clone1_start_0 - * Resource action: ClusterIP2:0 monitor on fc16-builder - * Resource action: ClusterIP:0 start on fc16-builder - * Pseudo action: clone1_running_0 - * Pseudo action: clone2_start_0 - * Resource action: ClusterIP:0 monitor=30000 on fc16-builder - * Resource action: ClusterIP2:0 start on fc16-builder - * Pseudo action: clone2_running_0 - * Resource action: ClusterIP2:0 monitor=30000 on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: clone1 [ClusterIP]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] * Clone Set: clone2 [ClusterIP2]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] diff --git a/cts/scheduler/summary/bug-5014-CLONE-A-stop-B-started.summary b/cts/scheduler/summary/bug-5014-CLONE-A-stop-B-started.summary index a0c5e5404fd..fda44eac181 100644 --- a/cts/scheduler/summary/bug-5014-CLONE-A-stop-B-started.summary +++ b/cts/scheduler/summary/bug-5014-CLONE-A-stop-B-started.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -14,16 +16,15 @@ Transition Summary: * Stop ClusterIP:0 ( fc16-builder ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: ClusterIP:0 stop on fc16-builder - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: clone1 [ClusterIP] (disabled): - * Stopped (disabled): [ fc16-builder ] + * Started: [ fc16-builder ] * Clone Set: clone2 [ClusterIP2]: * Started: [ fc16-builder ] diff --git a/cts/scheduler/summary/bug-5014-CthenAthenB-C-stopped.summary b/cts/scheduler/summary/bug-5014-CthenAthenB-C-stopped.summary index b1663777b3b..65df52c2d8e 100644 --- a/cts/scheduler/summary/bug-5014-CthenAthenB-C-stopped.summary +++ b/cts/scheduler/summary/bug-5014-CthenAthenB-C-stopped.summary @@ -1,6 +1,8 @@ 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -14,11 +16,10 @@ Transition Summary: * Start ClusterIP2 ( fc16-builder ) due to unrunnable ClusterIP start (blocked) Executing Cluster Transition: - * Resource action: ClusterIP monitor on fc16-builder - * Resource action: ClusterIP2 monitor on fc16-builder - * Resource action: ClusterIP3 monitor on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] diff --git a/cts/scheduler/summary/bug-5014-GROUP-A-start-B-start.summary b/cts/scheduler/summary/bug-5014-GROUP-A-start-B-start.summary index 7fd15686f61..2753c16be6a 100644 --- a/cts/scheduler/summary/bug-5014-GROUP-A-start-B-start.summary +++ b/cts/scheduler/summary/bug-5014-GROUP-A-start-B-start.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -13,21 +15,15 @@ Transition Summary: * Start ClusterIP2 ( fc16-builder ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: ClusterIP start on fc16-builder - * Pseudo action: group1_running_0 - * Resource action: ClusterIP monitor=30000 on fc16-builder - * Pseudo action: group2_start_0 - * Resource action: ClusterIP2 start on fc16-builder - * Pseudo action: group2_running_0 - * Resource action: ClusterIP2 monitor=30000 on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Resource Group: group1: - * ClusterIP (ocf:heartbeat:IPaddr2): Started fc16-builder + * ClusterIP (ocf:heartbeat:IPaddr2): Stopped * Resource Group: group2: - * ClusterIP2 (ocf:heartbeat:IPaddr2): Started fc16-builder + * ClusterIP2 (ocf:heartbeat:IPaddr2): Stopped diff --git a/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-started.summary b/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-started.summary index 7bf976c79c3..74dfd320e98 100644 --- a/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-started.summary +++ b/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-started.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -12,18 +14,19 @@ Current cluster status: Transition Summary: * Stop ClusterIP ( fc16-builder ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: ClusterIP stop on fc16-builder - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Resource Group: group1 (disabled): - * ClusterIP (ocf:heartbeat:IPaddr2): Stopped (disabled) + * ClusterIP (ocf:heartbeat:IPaddr2): Started fc16-builder (disabled) * Resource Group: group2: * ClusterIP2 (ocf:heartbeat:IPaddr2): Started fc16-builder diff --git a/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-stopped.summary b/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-stopped.summary index 426b576c6a8..a3a9e03a0ad 100644 --- a/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-stopped.summary +++ b/cts/scheduler/summary/bug-5014-GROUP-A-stopped-B-stopped.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -16,6 +18,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] diff --git a/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-false.summary b/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-false.summary index a25c6188fd3..5023c17e8be 100644 --- a/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-false.summary +++ b/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-false.summary @@ -1,6 +1,8 @@ 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -14,9 +16,10 @@ Transition Summary: * Stop C ( fc16-builder ) due to node availability Executing Cluster Transition: - * Resource action: C stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -24,4 +27,4 @@ Revised Cluster Status: * Full List of Resources: * A (ocf:pacemaker:Dummy): Started fc16-builder * B (ocf:pacemaker:Dummy): Started fc16-builder - * C (ocf:pacemaker:Dummy): Stopped (disabled) + * C (ocf:pacemaker:Dummy): Started fc16-builder (disabled) diff --git a/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-true.summary b/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-true.summary index 70159d1ceb5..f8d78f046a5 100644 --- a/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-true.summary +++ b/cts/scheduler/summary/bug-5014-ordered-set-symmetrical-true.summary @@ -1,6 +1,8 @@ 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -11,19 +13,19 @@ Current cluster status: * C (ocf:pacemaker:Dummy): Started fc16-builder (disabled) Transition Summary: - * Stop A ( fc16-builder ) due to required C start + * Stop A ( fc16-builder ) due to unrunnable C start * Stop C ( fc16-builder ) due to node availability Executing Cluster Transition: - * Resource action: A stop on fc16-builder - * Resource action: C stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Stopped + * A (ocf:pacemaker:Dummy): Started fc16-builder * B (ocf:pacemaker:Dummy): Started fc16-builder - * C (ocf:pacemaker:Dummy): Stopped (disabled) + * C (ocf:pacemaker:Dummy): Started fc16-builder (disabled) diff --git a/cts/scheduler/summary/bug-5025-1.summary b/cts/scheduler/summary/bug-5025-1.summary index f83116e44bb..3da2e503989 100644 --- a/cts/scheduler/summary/bug-5025-1.summary +++ b/cts/scheduler/summary/bug-5025-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 fc16-builder3 ] @@ -11,11 +13,10 @@ Transition Summary: * Reload A ( fc16-builder ) Executing Cluster Transition: - * Cluster action: clear_failcount for A on fc16-builder - * Resource action: A reload-agent on fc16-builder - * Resource action: A monitor=30000 on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 fc16-builder3 ] diff --git a/cts/scheduler/summary/bug-5025-2.summary b/cts/scheduler/summary/bug-5025-2.summary index 9e0bdfd4510..6062b107a4d 100644 --- a/cts/scheduler/summary/bug-5025-2.summary +++ b/cts/scheduler/summary/bug-5025-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 fc16-builder3 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 fc16-builder3 ] diff --git a/cts/scheduler/summary/bug-5025-3.summary b/cts/scheduler/summary/bug-5025-3.summary index 68a471ab6b9..506154ca211 100644 --- a/cts/scheduler/summary/bug-5025-3.summary +++ b/cts/scheduler/summary/bug-5025-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 fc16-builder3 ] @@ -12,12 +14,10 @@ Transition Summary: * Restart A ( fc16-builder ) due to resource definition change Executing Cluster Transition: - * Resource action: A stop on fc16-builder - * Cluster action: clear_failcount for A on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: A monitor=30000 on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 fc16-builder3 ] diff --git a/cts/scheduler/summary/bug-5025-4.summary b/cts/scheduler/summary/bug-5025-4.summary index 245601803d1..06719e08fb6 100644 --- a/cts/scheduler/summary/bug-5025-4.summary +++ b/cts/scheduler/summary/bug-5025-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18builder ] * OFFLINE: [ 18node1 18node2 18node3 ] @@ -10,15 +12,13 @@ Transition Summary: * Start remote-node ( 18builder ) Executing Cluster Transition: - * Resource action: remote-node delete on 18builder - * Cluster action: clear_failcount for remote-node on 18builder - * Resource action: remote-node start on 18builder - * Resource action: remote-node monitor=30000 on 18builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder ] * OFFLINE: [ 18node1 18node2 18node3 ] * Full List of Resources: - * remote-node (ocf:pacemaker:Dummy): Started 18builder + * remote-node (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/bug-5028-bottom.summary b/cts/scheduler/summary/bug-5028-bottom.summary index 060b1331fb1..6fd45eb48cf 100644 --- a/cts/scheduler/summary/bug-5028-bottom.summary +++ b/cts/scheduler/summary/bug-5028-bottom.summary @@ -1,6 +1,9 @@ 0 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ bl460g6a bl460g6b ] @@ -13,14 +16,16 @@ Transition Summary: * Stop dummy02 ( bl460g6a ) due to node availability Executing Cluster Transition: - * Pseudo action: dummy-g_stop_0 - * Resource action: dummy02 stop on bl460g6a + * Pseudo action: dummy-g_start_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ bl460g6a bl460g6b ] * Full List of Resources: * Resource Group: dummy-g: * dummy01 (ocf:heartbeat:Dummy): FAILED bl460g6a (blocked) - * dummy02 (ocf:heartbeat:Dummy-stop-NG): Stopped + * dummy02 (ocf:heartbeat:Dummy-stop-NG): Started bl460g6a diff --git a/cts/scheduler/summary/bug-5028-detach.summary b/cts/scheduler/summary/bug-5028-detach.summary index ab5a278c102..7409556dcb9 100644 --- a/cts/scheduler/summary/bug-5028-detach.summary +++ b/cts/scheduler/summary/bug-5028-detach.summary @@ -5,6 +5,9 @@ 0 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ bl460g6a bl460g6b ] @@ -16,9 +19,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Cluster action: do_shutdown on bl460g6a Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ bl460g6a bl460g6b ] diff --git a/cts/scheduler/summary/bug-5028.summary b/cts/scheduler/summary/bug-5028.summary index b8eb46aba2d..815403155ea 100644 --- a/cts/scheduler/summary/bug-5028.summary +++ b/cts/scheduler/summary/bug-5028.summary @@ -1,6 +1,9 @@ 0 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ bl460g6a bl460g6b ] @@ -13,10 +16,11 @@ Transition Summary: * Stop dummy01 ( bl460g6a ) due to unrunnable dummy02 stop (blocked) Executing Cluster Transition: - * Pseudo action: dummy-g_stop_0 - * Pseudo action: dummy-g_start_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ bl460g6a bl460g6b ] diff --git a/cts/scheduler/summary/bug-5038.summary b/cts/scheduler/summary/bug-5038.summary index f7f8a7bff0f..e68efb37022 100644 --- a/cts/scheduler/summary/bug-5038.summary +++ b/cts/scheduler/summary/bug-5038.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node-0 node-2 ] @@ -14,6 +16,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node-0 node-2 ] diff --git a/cts/scheduler/summary/bug-5059.summary b/cts/scheduler/summary/bug-5059.summary index b3661e0ad6c..a55f140d4b5 100644 --- a/cts/scheduler/summary/bug-5059.summary +++ b/cts/scheduler/summary/bug-5059.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node gluster03.h: standby * Online: [ gluster01.h gluster02.h ] @@ -22,48 +24,10 @@ Transition Summary: * Start p_stateful2:1 ( gluster02.h ) Executing Cluster Transition: - * Pseudo action: ms_stateful_pre_notify_start_0 - * Resource action: iptest delete on gluster02.h - * Resource action: ipsrc2 delete on gluster02.h - * Resource action: p_stateful1:0 notify on gluster01.h - * Resource action: p_stateful1:1 notify on gluster02.h - * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 - * Pseudo action: ms_stateful_start_0 - * Pseudo action: g_stateful:0_start_0 - * Resource action: p_stateful2:0 start on gluster01.h - * Pseudo action: g_stateful:1_start_0 - * Resource action: p_stateful2:1 start on gluster02.h - * Pseudo action: g_stateful:0_running_0 - * Pseudo action: g_stateful:1_running_0 - * Pseudo action: ms_stateful_running_0 - * Pseudo action: ms_stateful_post_notify_running_0 - * Resource action: p_stateful1:0 notify on gluster01.h - * Resource action: p_stateful2:0 notify on gluster01.h - * Resource action: p_stateful1:1 notify on gluster02.h - * Resource action: p_stateful2:1 notify on gluster02.h - * Pseudo action: ms_stateful_confirmed-post_notify_running_0 - * Pseudo action: ms_stateful_pre_notify_promote_0 - * Resource action: p_stateful1:0 notify on gluster01.h - * Resource action: p_stateful2:0 notify on gluster01.h - * Resource action: p_stateful1:1 notify on gluster02.h - * Resource action: p_stateful2:1 notify on gluster02.h - * Pseudo action: ms_stateful_confirmed-pre_notify_promote_0 - * Pseudo action: ms_stateful_promote_0 - * Pseudo action: g_stateful:0_promote_0 - * Resource action: p_stateful1:0 promote on gluster01.h - * Resource action: p_stateful2:0 promote on gluster01.h - * Pseudo action: g_stateful:0_promoted_0 - * Pseudo action: ms_stateful_promoted_0 - * Pseudo action: ms_stateful_post_notify_promoted_0 - * Resource action: p_stateful1:0 notify on gluster01.h - * Resource action: p_stateful2:0 notify on gluster01.h - * Resource action: p_stateful1:1 notify on gluster02.h - * Resource action: p_stateful2:1 notify on gluster02.h - * Pseudo action: ms_stateful_confirmed-post_notify_promoted_0 - * Resource action: p_stateful1:1 monitor=10000 on gluster02.h - * Resource action: p_stateful2:1 monitor=10000 on gluster02.h Revised Cluster Status: + * Cluster Summary: + * Node List: * Node gluster03.h: standby * Online: [ gluster01.h gluster02.h ] @@ -71,7 +35,12 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: ms_stateful [g_stateful] (promotable): - * Promoted: [ gluster01.h ] - * Unpromoted: [ gluster02.h ] + * Resource Group: g_stateful:0: + * p_stateful1 (ocf:pacemaker:Stateful): Unpromoted gluster01.h + * p_stateful2 (ocf:pacemaker:Stateful): Stopped + * Resource Group: g_stateful:1: + * p_stateful1 (ocf:pacemaker:Stateful): Unpromoted gluster02.h + * p_stateful2 (ocf:pacemaker:Stateful): Stopped + * Stopped: [ gluster03.h gluster04.h ] * Clone Set: c_dummy [p_dummy1]: * Started: [ gluster01.h gluster02.h ] diff --git a/cts/scheduler/summary/bug-5069-op-disabled.summary b/cts/scheduler/summary/bug-5069-op-disabled.summary index f77b9cca56a..dc746fc86b7 100644 --- a/cts/scheduler/summary/bug-5069-op-disabled.summary +++ b/cts/scheduler/summary/bug-5069-op-disabled.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder2 ] * OFFLINE: [ fc16-builder fc16-builder3 ] @@ -9,10 +11,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Cluster action: clear_failcount for A on fc16-builder2 - * Resource action: A cancel=10000 on fc16-builder2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder2 ] * OFFLINE: [ fc16-builder fc16-builder3 ] diff --git a/cts/scheduler/summary/bug-5069-op-enabled.summary b/cts/scheduler/summary/bug-5069-op-enabled.summary index ec1dde3187e..dc746fc86b7 100644 --- a/cts/scheduler/summary/bug-5069-op-enabled.summary +++ b/cts/scheduler/summary/bug-5069-op-enabled.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder2 ] * OFFLINE: [ fc16-builder fc16-builder3 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder2 ] * OFFLINE: [ fc16-builder fc16-builder3 ] diff --git a/cts/scheduler/summary/bug-5140-require-all-false.summary b/cts/scheduler/summary/bug-5140-require-all-false.summary index a56fe6d6cc9..85e4b0e0c6d 100644 --- a/cts/scheduler/summary/bug-5140-require-all-false.summary +++ b/cts/scheduler/summary/bug-5140-require-all-false.summary @@ -1,6 +1,8 @@ 4 of 35 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Node hex-1: standby * Node hex-2: standby (with active resources) @@ -41,26 +43,29 @@ Transition Summary: * Stop dlm:0 ( hex-2 ) due to node availability * Stop clvmd:0 ( hex-2 ) due to node availability * Stop o2cb:0 ( hex-2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: baseclone_stop_0 - * Pseudo action: basegrp:0_stop_0 - * Resource action: o2cb stop on hex-2 - * Resource action: clvmd stop on hex-2 - * Resource action: dlm stop on hex-2 - * Pseudo action: basegrp:0_stopped_0 - * Pseudo action: baseclone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hex-1: standby - * Node hex-2: standby + * Node hex-2: standby (with active resources) * Node hex-3: OFFLINE (standby) * Full List of Resources: * fencing (stonith:external/sbd): Stopped * Clone Set: baseclone [basegrp]: - * Stopped: [ hex-1 hex-2 hex-3 ] + * Resource Group: basegrp:0: + * dlm (ocf:pacemaker:controld): Started hex-2 + * clvmd (ocf:lvm2:clvmd): Started hex-2 + * o2cb (ocf:ocfs2:o2cb): Started hex-2 + * vg1 (ocf:heartbeat:LVM): Stopped + * fs-ocfs-1 (ocf:heartbeat:Filesystem): Stopped + * Stopped: [ hex-1 hex-3 ] * fs-xfs-1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: fs2 [fs-ocfs-2]: * Stopped: [ hex-1 hex-2 hex-3 ] diff --git a/cts/scheduler/summary/bug-5143-ms-shuffle.summary b/cts/scheduler/summary/bug-5143-ms-shuffle.summary index 18f2566aa5c..a41dc1fe93d 100644 --- a/cts/scheduler/summary/bug-5143-ms-shuffle.summary +++ b/cts/scheduler/summary/bug-5143-ms-shuffle.summary @@ -1,6 +1,8 @@ 1 of 34 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-1 hex-2 hex-3 ] @@ -33,21 +35,10 @@ Transition Summary: * Promote drbd-r1:1 ( Unpromoted -> Promoted hex-3 ) Executing Cluster Transition: - * Pseudo action: ms-r1_pre_notify_promote_0 - * Resource action: drbd-r1 notify on hex-2 - * Resource action: drbd-r1 notify on hex-3 - * Pseudo action: ms-r1_confirmed-pre_notify_promote_0 - * Pseudo action: ms-r1_promote_0 - * Resource action: drbd-r1 promote on hex-3 - * Pseudo action: ms-r1_promoted_0 - * Pseudo action: ms-r1_post_notify_promoted_0 - * Resource action: drbd-r1 notify on hex-2 - * Resource action: drbd-r1 notify on hex-3 - * Pseudo action: ms-r1_confirmed-post_notify_promoted_0 - * Resource action: drbd-r1 monitor=29000 on hex-2 - * Resource action: drbd-r1 monitor=31000 on hex-3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-1 hex-2 hex-3 ] @@ -62,8 +53,7 @@ Revised Cluster Status: * Promoted: [ hex-1 ] * Unpromoted: [ hex-2 ] * Clone Set: ms-r1 [drbd-r1] (promotable): - * Promoted: [ hex-3 ] - * Unpromoted: [ hex-2 ] + * Unpromoted: [ hex-2 hex-3 ] * Resource Group: md0-group: * md0 (ocf:heartbeat:Raid1): Started hex-3 * vg-md0 (ocf:heartbeat:LVM): Started hex-3 diff --git a/cts/scheduler/summary/bug-5186-partial-migrate.summary b/cts/scheduler/summary/bug-5186-partial-migrate.summary index daa64e37dd3..44d55d4f6d4 100644 --- a/cts/scheduler/summary/bug-5186-partial-migrate.summary +++ b/cts/scheduler/summary/bug-5186-partial-migrate.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node bl460g1n7: UNCLEAN (offline) * Online: [ bl460g1n6 bl460g1n8 ] @@ -34,43 +37,22 @@ Transition Summary: * Stop prmDiskd1:0 ( bl460g1n7 ) due to node availability * Stop prmDiskd2:0 ( bl460g1n7 ) due to node availability * Stop prmPing:0 ( bl460g1n7 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: prmVM2 stop on bl460g1n6 - * Pseudo action: grpStonith8_stop_0 - * Pseudo action: prmStonith8-2_stop_0 - * Fencing bl460g1n7 (reboot) - * Pseudo action: prmDummy_stop_0 - * Pseudo action: prmVM2_stop_0 - * Pseudo action: prmStonith8-1_stop_0 - * Pseudo action: clnDiskd1_stop_0 - * Pseudo action: clnDiskd2_stop_0 - * Pseudo action: clnPing_stop_0 - * Resource action: prmDummy start on bl460g1n6 - * Resource action: prmVM2 start on bl460g1n8 - * Pseudo action: grpStonith8_stopped_0 - * Pseudo action: grpStonith8_start_0 - * Resource action: prmStonith8-1 start on bl460g1n6 - * Resource action: prmStonith8-2 start on bl460g1n6 - * Pseudo action: prmDiskd1_stop_0 - * Pseudo action: clnDiskd1_stopped_0 - * Pseudo action: prmDiskd2_stop_0 - * Pseudo action: clnDiskd2_stopped_0 - * Pseudo action: prmPing_stop_0 - * Pseudo action: clnPing_stopped_0 - * Resource action: prmVM2 monitor=10000 on bl460g1n8 - * Pseudo action: grpStonith8_running_0 - * Resource action: prmStonith8-1 monitor=10000 on bl460g1n6 - * Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node bl460g1n7: UNCLEAN (offline) * Online: [ bl460g1n6 bl460g1n8 ] - * OFFLINE: [ bl460g1n7 ] * Full List of Resources: - * prmDummy (ocf:pacemaker:Dummy): Started bl460g1n6 - * prmVM2 (ocf:heartbeat:VirtualDomain): Started bl460g1n8 + * prmDummy (ocf:pacemaker:Dummy): Started bl460g1n7 (UNCLEAN) + * prmVM2 (ocf:heartbeat:VirtualDomain): Started bl460g1n7 (UNCLEAN) * Resource Group: grpStonith6: * prmStonith6-1 (stonith:external/stonith-helper): Started bl460g1n8 * prmStonith6-2 (stonith:external/ipmi): Started bl460g1n8 @@ -78,14 +60,14 @@ Revised Cluster Status: * prmStonith7-1 (stonith:external/stonith-helper): Started bl460g1n6 * prmStonith7-2 (stonith:external/ipmi): Started bl460g1n6 * Resource Group: grpStonith8: - * prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n6 - * prmStonith8-2 (stonith:external/ipmi): Started bl460g1n6 + * prmStonith8-1 (stonith:external/stonith-helper): Started bl460g1n7 (UNCLEAN) + * prmStonith8-2 (stonith:external/ipmi): Started bl460g1n7 (UNCLEAN) * Clone Set: clnDiskd1 [prmDiskd1]: + * prmDiskd1 (ocf:pacemaker:diskd): Started bl460g1n7 (UNCLEAN) * Started: [ bl460g1n6 bl460g1n8 ] - * Stopped: [ bl460g1n7 ] * Clone Set: clnDiskd2 [prmDiskd2]: + * prmDiskd2 (ocf:pacemaker:diskd): Started bl460g1n7 (UNCLEAN) * Started: [ bl460g1n6 bl460g1n8 ] - * Stopped: [ bl460g1n7 ] * Clone Set: clnPing [prmPing]: + * prmPing (ocf:pacemaker:ping): Started bl460g1n7 (UNCLEAN) * Started: [ bl460g1n6 bl460g1n8 ] - * Stopped: [ bl460g1n7 ] diff --git a/cts/scheduler/summary/bug-cl-5168.summary b/cts/scheduler/summary/bug-cl-5168.summary index 11064b0f4e2..a8f2290bf61 100644 --- a/cts/scheduler/summary/bug-cl-5168.summary +++ b/cts/scheduler/summary/bug-cl-5168.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-1 hex-2 hex-3 ] @@ -31,21 +33,10 @@ Transition Summary: * Promote drbd-r1:1 ( Unpromoted -> Promoted hex-3 ) Executing Cluster Transition: - * Pseudo action: ms-r1_pre_notify_promote_0 - * Resource action: drbd-r1 notify on hex-2 - * Resource action: drbd-r1 notify on hex-3 - * Pseudo action: ms-r1_confirmed-pre_notify_promote_0 - * Pseudo action: ms-r1_promote_0 - * Resource action: drbd-r1 promote on hex-3 - * Pseudo action: ms-r1_promoted_0 - * Pseudo action: ms-r1_post_notify_promoted_0 - * Resource action: drbd-r1 notify on hex-2 - * Resource action: drbd-r1 notify on hex-3 - * Pseudo action: ms-r1_confirmed-post_notify_promoted_0 - * Resource action: drbd-r1 monitor=29000 on hex-2 - * Resource action: drbd-r1 monitor=31000 on hex-3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-1 hex-2 hex-3 ] @@ -72,5 +63,4 @@ Revised Cluster Status: * fs-r0 (ocf:heartbeat:Filesystem): Started hex-1 * dummy2 (ocf:heartbeat:Delay): Started hex-1 * Clone Set: ms-r1 [drbd-r1] (promotable): - * Promoted: [ hex-3 ] - * Unpromoted: [ hex-2 ] + * Unpromoted: [ hex-2 hex-3 ] diff --git a/cts/scheduler/summary/bug-cl-5170.summary b/cts/scheduler/summary/bug-cl-5170.summary index 31293765b58..497cf66ab61 100644 --- a/cts/scheduler/summary/bug-cl-5170.summary +++ b/cts/scheduler/summary/bug-cl-5170.summary @@ -1,6 +1,8 @@ 0 of 4 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Node TCS-1: OFFLINE (standby) * Online: [ TCS-2 ] @@ -18,20 +20,18 @@ Transition Summary: * Stop ip_mgmt ( TCS-2 ) due to node availability Executing Cluster Transition: - * Pseudo action: svc_stop_0 - * Resource action: ip_mgmt stop on TCS-2 - * Resource action: ip_trf stop on TCS-2 - * Pseudo action: svc_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node TCS-1: OFFLINE (standby) * Online: [ TCS-2 ] * Full List of Resources: * Resource Group: svc: - * ip_trf (ocf:heartbeat:IPaddr2): Stopped - * ip_mgmt (ocf:heartbeat:IPaddr2): Stopped + * ip_trf (ocf:heartbeat:IPaddr2): Started TCS-2 + * ip_mgmt (ocf:heartbeat:IPaddr2): Started TCS-2 * Clone Set: cl_tomcat_nms [d_tomcat_nms]: * d_tomcat_nms (ocf:ntc:tomcat): FAILED TCS-2 (blocked) * Stopped: [ TCS-1 ] diff --git a/cts/scheduler/summary/bug-cl-5212.summary b/cts/scheduler/summary/bug-cl-5212.summary index 7cbe97558b6..9788c73d8e4 100644 --- a/cts/scheduler/summary/bug-cl-5212.summary +++ b/cts/scheduler/summary/bug-cl-5212.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node srv01: UNCLEAN (offline) * Node srv02: UNCLEAN (offline) @@ -30,23 +33,14 @@ Transition Summary: * Stop prmPingd:1 ( srv01 ) due to node availability (blocked) Executing Cluster Transition: - * Pseudo action: grpStonith1_stop_0 - * Pseudo action: grpStonith1_start_0 - * Pseudo action: grpStonith2_stop_0 - * Pseudo action: grpStonith2_start_0 - * Pseudo action: grpStonith3_stop_0 - * Pseudo action: msPostgresql_pre_notify_stop_0 - * Pseudo action: clnPingd_stop_0 - * Resource action: pgsql notify on srv03 - * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 - * Pseudo action: msPostgresql_stop_0 - * Pseudo action: clnPingd_stopped_0 - * Pseudo action: msPostgresql_stopped_0 - * Pseudo action: msPostgresql_post_notify_stopped_0 - * Resource action: pgsql notify on srv03 - * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 + * Pseudo action: grpStonith3_start_0 +Transition failed: terminated +An invalid transition was produced Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node srv01: UNCLEAN (offline) * Node srv02: UNCLEAN (offline) diff --git a/cts/scheduler/summary/bug-cl-5213.summary b/cts/scheduler/summary/bug-cl-5213.summary index 047f75d48d9..5d6480ad2b5 100644 --- a/cts/scheduler/summary/bug-cl-5213.summary +++ b/cts/scheduler/summary/bug-cl-5213.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ srv01 srv02 ] @@ -10,9 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: pgsql monitor=10000 on srv01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ srv01 srv02 ] diff --git a/cts/scheduler/summary/bug-cl-5219.summary b/cts/scheduler/summary/bug-cl-5219.summary index c5935e1465b..bef659f913f 100644 --- a/cts/scheduler/summary/bug-cl-5219.summary +++ b/cts/scheduler/summary/bug-cl-5219.summary @@ -1,6 +1,9 @@ 1 of 9 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ ha1.test.anchor.net.au ha2.test.anchor.net.au ] @@ -22,14 +25,16 @@ Transition Summary: * Stop child1-service ( ha2.test.anchor.net.au ) due to node availability Executing Cluster Transition: - * Resource action: child1-service stop on ha2.test.anchor.net.au Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ ha1.test.anchor.net.au ha2.test.anchor.net.au ] * Full List of Resources: - * child1-service (ocf:pacemaker:Dummy): Stopped (disabled) + * child1-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au (disabled) * child2-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au * parent-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au * Clone Set: child1 [stateful-child1] (promotable): diff --git a/cts/scheduler/summary/bug-cl-5247.summary b/cts/scheduler/summary/bug-cl-5247.summary index b18bdd8b919..8ce102c8389 100644 --- a/cts/scheduler/summary/bug-cl-5247.summary +++ b/cts/scheduler/summary/bug-cl-5247.summary @@ -1,5 +1,11 @@ Using the original execution date of: 2015-08-12 02:53:40Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for prmStonith2-2 on bl460g8n3 changed: 0:0;30:3:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 +Only 'private' parameters to 60m-interval monitor for prmStonith2-2 on bl460g8n3 changed: 0:0;19:4:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 +Only 'private' parameters to 0s-interval start for prmStonith1-2 on bl460g8n4 changed: 0:0;24:3:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 +Only 'private' parameters to 60m-interval monitor for prmStonith1-2 on bl460g8n4 changed: 0:0;12:4:0:6cacb40a-dbbb-49b0-bac7-1794a61d2910 + * Node List: * Online: [ bl460g8n3 bl460g8n4 ] * GuestOnline: [ pgsr01 ] @@ -19,69 +25,36 @@ Current cluster status: * Stopped: [ bl460g8n3 bl460g8n4 ] Transition Summary: + * Fence (reboot) pgsr02 (resource: prmDB2) 'guest is unclean' * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean' * Stop prmDB2 ( bl460g8n4 ) due to node availability * Recover vip-master ( pgsr02 -> pgsr01 ) * Recover vip-rep ( pgsr02 -> pgsr01 ) * Stop pgsql:0 ( Promoted pgsr02 ) due to node availability * Stop pgsr02 ( bl460g8n4 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: vip-master monitor on pgsr01 - * Resource action: vip-rep monitor on pgsr01 - * Pseudo action: msPostgresql_pre_notify_demote_0 - * Resource action: pgsr01 monitor on bl460g8n4 - * Resource action: pgsr02 stop on bl460g8n4 - * Resource action: pgsr02 monitor on bl460g8n3 - * Resource action: prmDB2 stop on bl460g8n4 - * Resource action: pgsql notify on pgsr01 - * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 - * Pseudo action: msPostgresql_demote_0 - * Pseudo action: stonith-pgsr02-off on pgsr02 - * Pseudo action: pgsql_post_notify_stop_0 - * Pseudo action: pgsql_demote_0 - * Pseudo action: msPostgresql_demoted_0 - * Pseudo action: msPostgresql_post_notify_demoted_0 - * Resource action: pgsql notify on pgsr01 - * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 - * Pseudo action: msPostgresql_pre_notify_stop_0 - * Pseudo action: master-group_stop_0 - * Pseudo action: vip-rep_stop_0 - * Resource action: pgsql notify on pgsr01 - * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 - * Pseudo action: msPostgresql_stop_0 - * Pseudo action: vip-master_stop_0 - * Pseudo action: pgsql_stop_0 - * Pseudo action: msPostgresql_stopped_0 - * Pseudo action: master-group_stopped_0 - * Pseudo action: master-group_start_0 - * Resource action: vip-master start on pgsr01 - * Resource action: vip-rep start on pgsr01 - * Pseudo action: msPostgresql_post_notify_stopped_0 - * Pseudo action: master-group_running_0 - * Resource action: vip-master monitor=10000 on pgsr01 - * Resource action: vip-rep monitor=10000 on pgsr01 - * Resource action: pgsql notify on pgsr01 - * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 - * Pseudo action: pgsql_notified_0 - * Resource action: pgsql monitor=9000 on pgsr01 Using the original execution date of: 2015-08-12 02:53:40Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ bl460g8n3 bl460g8n4 ] * GuestOnline: [ pgsr01 ] * Full List of Resources: * prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3 - * prmDB2 (ocf:heartbeat:VirtualDomain): FAILED + * prmDB2 (ocf:heartbeat:VirtualDomain): FAILED bl460g8n4 * Resource Group: grpStonith1: * prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 * Resource Group: grpStonith2: * prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 * Resource Group: master-group: - * vip-master (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ] - * vip-rep (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ] + * vip-master (ocf:heartbeat:Dummy): FAILED pgsr02 + * vip-rep (ocf:heartbeat:Dummy): FAILED pgsr02 * Clone Set: msPostgresql [pgsql] (promotable): * Promoted: [ pgsr01 ] * Stopped: [ bl460g8n3 bl460g8n4 ] diff --git a/cts/scheduler/summary/bug-lf-1852.summary b/cts/scheduler/summary/bug-lf-1852.summary index 26c73e166a5..0af5929cadd 100644 --- a/cts/scheduler/summary/bug-lf-1852.summary +++ b/cts/scheduler/summary/bug-lf-1852.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ mysql-01 mysql-02 ] @@ -15,25 +17,18 @@ Transition Summary: * Start drbd0:1 ( mysql-01 ) Executing Cluster Transition: - * Pseudo action: ms-drbd0_pre_notify_start_0 - * Resource action: drbd0:0 notify on mysql-02 - * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 - * Pseudo action: ms-drbd0_start_0 - * Resource action: drbd0:1 start on mysql-01 - * Pseudo action: ms-drbd0_running_0 - * Pseudo action: ms-drbd0_post_notify_running_0 * Resource action: drbd0:0 notify on mysql-02 - * Resource action: drbd0:1 notify on mysql-01 - * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ mysql-01 mysql-02 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): * Promoted: [ mysql-02 ] - * Unpromoted: [ mysql-01 ] + * Stopped: [ mysql-01 ] * Resource Group: fs_mysql_ip: * fs0 (ocf:heartbeat:Filesystem): Started mysql-02 * mysqlid (lsb:mysql): Started mysql-02 diff --git a/cts/scheduler/summary/bug-lf-1920.summary b/cts/scheduler/summary/bug-lf-1920.summary index e8dd985bd3f..5d5ba4ddf60 100644 --- a/cts/scheduler/summary/bug-lf-1920.summary +++ b/cts/scheduler/summary/bug-lf-1920.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dktest1sles10 dktest2sles10 ] @@ -8,9 +10,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: mysql-bin monitor=30000 on dktest2sles10 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dktest1sles10 dktest2sles10 ] diff --git a/cts/scheduler/summary/bug-lf-2106.summary b/cts/scheduler/summary/bug-lf-2106.summary index 391b5fb10b1..bff78738349 100644 --- a/cts/scheduler/summary/bug-lf-2106.summary +++ b/cts/scheduler/summary/bug-lf-2106.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cl-virt-1 cl-virt-2 ] @@ -40,20 +42,10 @@ Transition Summary: * Restart pingd:1 ( cl-virt-2 ) due to resource definition change Executing Cluster Transition: - * Cluster action: clear_failcount for pingd on cl-virt-1 - * Cluster action: clear_failcount for pingd on cl-virt-2 - * Pseudo action: pingdclone_stop_0 - * Resource action: pingd:0 stop on cl-virt-1 - * Resource action: pingd:0 stop on cl-virt-2 - * Pseudo action: pingdclone_stopped_0 - * Pseudo action: pingdclone_start_0 - * Resource action: pingd:0 start on cl-virt-1 - * Resource action: pingd:0 monitor=30000 on cl-virt-1 - * Resource action: pingd:0 start on cl-virt-2 - * Resource action: pingd:0 monitor=30000 on cl-virt-2 - * Pseudo action: pingdclone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cl-virt-1 cl-virt-2 ] diff --git a/cts/scheduler/summary/bug-lf-2153.summary b/cts/scheduler/summary/bug-lf-2153.summary index 631e73ac9be..1a31d79e0bc 100644 --- a/cts/scheduler/summary/bug-lf-2153.summary +++ b/cts/scheduler/summary/bug-lf-2153.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node bob: standby (with active resources) * Online: [ alice ] @@ -21,34 +23,24 @@ Current cluster status: Transition Summary: * Stop res_drbd_iscsivg01:0 ( Unpromoted bob ) due to node availability * Stop res_tgtd:0 ( bob ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: ms_drbd_iscsivg01_pre_notify_stop_0 - * Pseudo action: cl_tgtd_stop_0 - * Resource action: res_drbd_iscsivg01:0 notify on bob - * Resource action: res_drbd_iscsivg01:1 notify on alice - * Pseudo action: ms_drbd_iscsivg01_confirmed-pre_notify_stop_0 - * Pseudo action: ms_drbd_iscsivg01_stop_0 - * Resource action: res_tgtd:0 stop on bob - * Pseudo action: cl_tgtd_stopped_0 - * Resource action: res_drbd_iscsivg01:0 stop on bob - * Pseudo action: ms_drbd_iscsivg01_stopped_0 - * Pseudo action: ms_drbd_iscsivg01_post_notify_stopped_0 - * Resource action: res_drbd_iscsivg01:1 notify on alice - * Pseudo action: ms_drbd_iscsivg01_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node bob: standby + * Node bob: standby (with active resources) * Online: [ alice ] * Full List of Resources: * Clone Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] (promotable): * Promoted: [ alice ] - * Stopped: [ bob ] + * Unpromoted: [ bob ] * Clone Set: cl_tgtd [res_tgtd]: - * Started: [ alice ] - * Stopped: [ bob ] + * Started: [ alice bob ] * Resource Group: rg_iscsivg01: * res_portblock_iscsivg01_block (ocf:heartbeat:portblock): Started alice * res_lvm_iscsivg01 (ocf:heartbeat:LVM): Started alice diff --git a/cts/scheduler/summary/bug-lf-2160.summary b/cts/scheduler/summary/bug-lf-2160.summary index f7fb9ed2bf8..ab82166ca3f 100644 --- a/cts/scheduler/summary/bug-lf-2160.summary +++ b/cts/scheduler/summary/bug-lf-2160.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cardhu dualamd1 dualamd3 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: domU-test01 monitor on cardhu - * Resource action: dom0-iscsi1-cnx1:0 monitor on cardhu Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cardhu dualamd1 dualamd3 ] diff --git a/cts/scheduler/summary/bug-lf-2171.summary b/cts/scheduler/summary/bug-lf-2171.summary index 5117608a20c..281ad1e928f 100644 --- a/cts/scheduler/summary/bug-lf-2171.summary +++ b/cts/scheduler/summary/bug-lf-2171.summary @@ -1,6 +1,9 @@ 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ xenserver1 xenserver2 ] @@ -18,22 +21,17 @@ Transition Summary: * Stop res_Dummy3 ( xenserver1 ) due to unrunnable cl_res_Dummy1 running Executing Cluster Transition: - * Pseudo action: gr_Dummy_stop_0 - * Resource action: res_Dummy2 stop on xenserver1 - * Resource action: res_Dummy3 stop on xenserver1 - * Pseudo action: gr_Dummy_stopped_0 - * Pseudo action: cl_res_Dummy1_stop_0 - * Resource action: res_Dummy1:1 stop on xenserver1 - * Resource action: res_Dummy1:0 stop on xenserver2 - * Pseudo action: cl_res_Dummy1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ xenserver1 xenserver2 ] * Full List of Resources: * Clone Set: cl_res_Dummy1 [res_Dummy1] (disabled): - * Stopped (disabled): [ xenserver1 xenserver2 ] + * Started: [ xenserver1 xenserver2 ] * Resource Group: gr_Dummy (disabled): - * res_Dummy2 (ocf:heartbeat:Dummy): Stopped - * res_Dummy3 (ocf:heartbeat:Dummy): Stopped + * res_Dummy2 (ocf:heartbeat:Dummy): Started xenserver1 + * res_Dummy3 (ocf:heartbeat:Dummy): Started xenserver1 diff --git a/cts/scheduler/summary/bug-lf-2213.summary b/cts/scheduler/summary/bug-lf-2213.summary index 83b0f176bef..79b6698a5ad 100644 --- a/cts/scheduler/summary/bug-lf-2213.summary +++ b/cts/scheduler/summary/bug-lf-2213.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fs1 fs2 web1 web2 ] @@ -11,20 +13,13 @@ Transition Summary: * Start test:1 ( web2 ) Executing Cluster Transition: - * Pseudo action: cl-test_start_0 - * Pseudo action: gr-test:0_start_0 - * Resource action: test:0 start on web1 - * Pseudo action: gr-test:1_start_0 - * Resource action: test:1 start on web2 - * Pseudo action: gr-test:0_running_0 - * Pseudo action: gr-test:1_running_0 - * Pseudo action: cl-test_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fs1 fs2 web1 web2 ] * Full List of Resources: * Clone Set: cl-test [gr-test]: - * Started: [ web1 web2 ] - * Stopped: [ fs1 fs2 ] + * Stopped: [ fs1 fs2 web1 web2 ] diff --git a/cts/scheduler/summary/bug-lf-2317.summary b/cts/scheduler/summary/bug-lf-2317.summary index 96603fdee7b..d41e6ba87c1 100644 --- a/cts/scheduler/summary/bug-lf-2317.summary +++ b/cts/scheduler/summary/bug-lf-2317.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ ibm1.isg.si ibm2.isg.si ] @@ -12,25 +14,15 @@ Transition Summary: * Promote drbd_r0:1 ( Unpromoted -> Promoted ibm1.isg.si ) Executing Cluster Transition: - * Resource action: drbd_r0:0 cancel=30000 on ibm1.isg.si - * Pseudo action: ms_drbd_r0_pre_notify_promote_0 - * Resource action: drbd_r0:1 notify on ibm2.isg.si - * Resource action: drbd_r0:0 notify on ibm1.isg.si - * Pseudo action: ms_drbd_r0_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd_r0_promote_0 - * Resource action: drbd_r0:0 promote on ibm1.isg.si - * Pseudo action: ms_drbd_r0_promoted_0 - * Pseudo action: ms_drbd_r0_post_notify_promoted_0 - * Resource action: drbd_r0:1 notify on ibm2.isg.si - * Resource action: drbd_r0:0 notify on ibm1.isg.si - * Pseudo action: ms_drbd_r0_confirmed-post_notify_promoted_0 - * Resource action: drbd_r0:0 monitor=15000 on ibm1.isg.si Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ ibm1.isg.si ibm2.isg.si ] * Full List of Resources: * HostingIsg (ocf:heartbeat:Xen): Started ibm2.isg.si * Clone Set: ms_drbd_r0 [drbd_r0] (promotable): - * Promoted: [ ibm1.isg.si ibm2.isg.si ] + * Promoted: [ ibm2.isg.si ] + * Unpromoted: [ ibm1.isg.si ] diff --git a/cts/scheduler/summary/bug-lf-2358.summary b/cts/scheduler/summary/bug-lf-2358.summary index b89aadc3172..376346d7e55 100644 --- a/cts/scheduler/summary/bug-lf-2358.summary +++ b/cts/scheduler/summary/bug-lf-2358.summary @@ -1,6 +1,8 @@ 2 of 15 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ alice.demo bob.demo ] @@ -30,18 +32,11 @@ Transition Summary: * Start res_drbd_mysql1:1 ( alice.demo ) Executing Cluster Transition: - * Pseudo action: ms_drbd_mysql1_pre_notify_start_0 - * Resource action: res_drbd_mysql1:0 notify on bob.demo - * Pseudo action: ms_drbd_mysql1_confirmed-pre_notify_start_0 - * Pseudo action: ms_drbd_mysql1_start_0 - * Resource action: res_drbd_mysql1:1 start on alice.demo - * Pseudo action: ms_drbd_mysql1_running_0 - * Pseudo action: ms_drbd_mysql1_post_notify_running_0 * Resource action: res_drbd_mysql1:0 notify on bob.demo - * Resource action: res_drbd_mysql1:1 notify on alice.demo - * Pseudo action: ms_drbd_mysql1_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ alice.demo bob.demo ] @@ -58,7 +53,7 @@ Revised Cluster Status: * res_mysql1 (ocf:heartbeat:mysql): Started bob.demo * Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable): * Promoted: [ bob.demo ] - * Unpromoted: [ alice.demo ] + * Stopped: [ alice.demo ] * Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable): * Promoted: [ alice.demo ] * Unpromoted: [ bob.demo ] diff --git a/cts/scheduler/summary/bug-lf-2361.summary b/cts/scheduler/summary/bug-lf-2361.summary index 4ea272df0d2..5f94fa5daa0 100644 --- a/cts/scheduler/summary/bug-lf-2361.summary +++ b/cts/scheduler/summary/bug-lf-2361.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ alice.demo bob.demo ] @@ -16,29 +18,16 @@ Transition Summary: * Start dummy2:1 ( bob.demo ) due to unrunnable dummy1 start (blocked) Executing Cluster Transition: - * Pseudo action: ms_stateful_pre_notify_start_0 - * Resource action: service2:0 delete on bob.demo - * Resource action: service2:0 delete on alice.demo - * Resource action: service2:1 delete on bob.demo - * Resource action: service1 delete on bob.demo - * Resource action: service1 delete on alice.demo - * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 - * Pseudo action: ms_stateful_start_0 - * Resource action: stateful:0 start on alice.demo - * Resource action: stateful:1 start on bob.demo - * Pseudo action: ms_stateful_running_0 - * Pseudo action: ms_stateful_post_notify_running_0 - * Resource action: stateful:0 notify on alice.demo - * Resource action: stateful:1 notify on bob.demo - * Pseudo action: ms_stateful_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ alice.demo bob.demo ] * Full List of Resources: * dummy1 (ocf:heartbeat:Dummy): Stopped * Clone Set: ms_stateful [stateful] (promotable): - * Unpromoted: [ alice.demo bob.demo ] + * Stopped: [ alice.demo bob.demo ] * Clone Set: cl_dummy2 [dummy2]: * Stopped: [ alice.demo bob.demo ] diff --git a/cts/scheduler/summary/bug-lf-2422.summary b/cts/scheduler/summary/bug-lf-2422.summary index 023d07dba50..35d0e8cd4e6 100644 --- a/cts/scheduler/summary/bug-lf-2422.summary +++ b/cts/scheduler/summary/bug-lf-2422.summary @@ -1,6 +1,8 @@ 4 of 21 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] @@ -24,60 +26,20 @@ Transition Summary: * Stop ocfs:1 ( qa-suse-4 ) due to node availability * Stop ocfs:2 ( qa-suse-3 ) due to node availability * Stop ocfs:3 ( qa-suse-2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: sbd_stonith monitor=15000 on qa-suse-2 - * Pseudo action: c-ocfs_stop_0 - * Resource action: ocfs:3 stop on qa-suse-2 - * Resource action: ocfs:2 stop on qa-suse-3 - * Resource action: ocfs:0 stop on qa-suse-4 - * Resource action: ocfs:1 stop on qa-suse-1 - * Pseudo action: c-ocfs_stopped_0 - * Pseudo action: c-o2stage_stop_0 - * Pseudo action: o2stage:0_stop_0 - * Resource action: cmirror:1 stop on qa-suse-1 - * Pseudo action: o2stage:1_stop_0 - * Resource action: cmirror:0 stop on qa-suse-4 - * Pseudo action: o2stage:2_stop_0 - * Resource action: cmirror:2 stop on qa-suse-3 - * Pseudo action: o2stage:3_stop_0 - * Resource action: cmirror:3 stop on qa-suse-2 - * Resource action: o2cb:1 stop on qa-suse-1 - * Resource action: o2cb:0 stop on qa-suse-4 - * Resource action: o2cb:2 stop on qa-suse-3 - * Resource action: o2cb:3 stop on qa-suse-2 - * Pseudo action: o2stage:0_stopped_0 - * Pseudo action: o2stage:1_stopped_0 - * Pseudo action: o2stage:2_stopped_0 - * Pseudo action: o2stage:3_stopped_0 - * Pseudo action: c-o2stage_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] * Full List of Resources: * sbd_stonith (stonith:external/sbd): Started qa-suse-2 * Clone Set: c-o2stage [o2stage]: - * Resource Group: o2stage:0: - * dlm (ocf:pacemaker:controld): Started qa-suse-1 - * clvm (ocf:lvm2:clvmd): Started qa-suse-1 - * o2cb (ocf:ocfs2:o2cb): Stopped (disabled) - * cmirror (ocf:lvm2:cmirrord): Stopped - * Resource Group: o2stage:1: - * dlm (ocf:pacemaker:controld): Started qa-suse-4 - * clvm (ocf:lvm2:clvmd): Started qa-suse-4 - * o2cb (ocf:ocfs2:o2cb): Stopped (disabled) - * cmirror (ocf:lvm2:cmirrord): Stopped - * Resource Group: o2stage:2: - * dlm (ocf:pacemaker:controld): Started qa-suse-3 - * clvm (ocf:lvm2:clvmd): Started qa-suse-3 - * o2cb (ocf:ocfs2:o2cb): Stopped (disabled) - * cmirror (ocf:lvm2:cmirrord): Stopped - * Resource Group: o2stage:3: - * dlm (ocf:pacemaker:controld): Started qa-suse-2 - * clvm (ocf:lvm2:clvmd): Started qa-suse-2 - * o2cb (ocf:ocfs2:o2cb): Stopped (disabled) - * cmirror (ocf:lvm2:cmirrord): Stopped + * Started: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] * Clone Set: c-ocfs [ocfs]: - * Stopped: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] + * Started: [ qa-suse-1 qa-suse-2 qa-suse-3 qa-suse-4 ] diff --git a/cts/scheduler/summary/bug-lf-2435.summary b/cts/scheduler/summary/bug-lf-2435.summary index 2077c2d19f9..b2a4dfb6f8c 100644 --- a/cts/scheduler/summary/bug-lf-2435.summary +++ b/cts/scheduler/summary/bug-lf-2435.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c20.chepkov.lan: standby (with active resources) * Online: [ c19.chepkov.lan c21.chepkov.lan ] @@ -14,20 +16,16 @@ Transition Summary: * Stop dummy3 ( c21.chepkov.lan ) due to node availability Executing Cluster Transition: - * Resource action: dummy2 stop on c20.chepkov.lan - * Resource action: dummy4 monitor on c21.chepkov.lan - * Resource action: dummy4 monitor on c20.chepkov.lan - * Resource action: dummy4 monitor on c19.chepkov.lan - * Resource action: dummy3 stop on c21.chepkov.lan - * Resource action: dummy2 start on c21.chepkov.lan Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c20.chepkov.lan: standby + * Node c20.chepkov.lan: standby (with active resources) * Online: [ c19.chepkov.lan c21.chepkov.lan ] * Full List of Resources: * dummy1 (ocf:pacemaker:Dummy): Started c19.chepkov.lan - * dummy2 (ocf:pacemaker:Dummy): Started c21.chepkov.lan + * dummy2 (ocf:pacemaker:Dummy): Started c20.chepkov.lan * dummy4 (ocf:pacemaker:Dummy): Stopped - * dummy3 (ocf:pacemaker:Dummy): Stopped + * dummy3 (ocf:pacemaker:Dummy): Started c21.chepkov.lan diff --git a/cts/scheduler/summary/bug-lf-2445.summary b/cts/scheduler/summary/bug-lf-2445.summary index 6888938cf8d..b429d1790c7 100644 --- a/cts/scheduler/summary/bug-lf-2445.summary +++ b/cts/scheduler/summary/bug-lf-2445.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,18 +13,14 @@ Transition Summary: * Move P:1 ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: C_stop_0 - * Resource action: P:1 stop on node1 - * Pseudo action: C_stopped_0 - * Pseudo action: C_start_0 - * Resource action: P:1 start on node2 - * Pseudo action: C_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: C [P] (unique): * P:0 (ocf:pacemaker:Dummy): Started node1 - * P:1 (ocf:pacemaker:Dummy): Started node2 + * P:1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/bug-lf-2453.summary b/cts/scheduler/summary/bug-lf-2453.summary index c8d1bdf43ab..c648bd94368 100644 --- a/cts/scheduler/summary/bug-lf-2453.summary +++ b/cts/scheduler/summary/bug-lf-2453.summary @@ -1,6 +1,8 @@ 2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ domu1 domu2 ] @@ -12,30 +14,23 @@ Current cluster status: * Started: [ domu1 domu2 ] Transition Summary: - * Stop PrimitiveResource1 ( domu1 ) due to required CloneResource2 running + * Stop PrimitiveResource1 ( domu1 ) due to unrunnable CloneResource2 running * Stop apache:0 ( domu1 ) due to node availability * Stop apache:1 ( domu2 ) due to node availability * Stop DummyResource:0 ( domu1 ) due to unrunnable CloneResource1 running * Stop DummyResource:1 ( domu2 ) due to unrunnable CloneResource1 running Executing Cluster Transition: - * Resource action: PrimitiveResource1 stop on domu1 - * Pseudo action: CloneResource2_stop_0 - * Resource action: DummyResource:1 stop on domu1 - * Resource action: DummyResource:0 stop on domu2 - * Pseudo action: CloneResource2_stopped_0 - * Pseudo action: CloneResource1_stop_0 - * Resource action: apache:1 stop on domu1 - * Resource action: apache:0 stop on domu2 - * Pseudo action: CloneResource1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ domu1 domu2 ] * Full List of Resources: - * PrimitiveResource1 (ocf:heartbeat:IPaddr2): Stopped + * PrimitiveResource1 (ocf:heartbeat:IPaddr2): Started domu1 * Clone Set: CloneResource1 [apache] (disabled): - * Stopped (disabled): [ domu1 domu2 ] + * Started: [ domu1 domu2 ] * Clone Set: CloneResource2 [DummyResource]: - * Stopped: [ domu1 domu2 ] + * Started: [ domu1 domu2 ] diff --git a/cts/scheduler/summary/bug-lf-2474.summary b/cts/scheduler/summary/bug-lf-2474.summary index 6e2a0720169..057ccde5f22 100644 --- a/cts/scheduler/summary/bug-lf-2474.summary +++ b/cts/scheduler/summary/bug-lf-2474.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-14 ] @@ -11,13 +13,13 @@ Transition Summary: * Start dummy-default-timeout ( hex-14 ) Executing Cluster Transition: - * Resource action: dummy-10s-timeout start on hex-14 - * Resource action: dummy-default-timeout start on hex-14 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-14 ] * Full List of Resources: - * dummy-10s-timeout (ocf:pacemaker:Dummy): Started hex-14 - * dummy-default-timeout (ocf:pacemaker:Dummy): Started hex-14 + * dummy-10s-timeout (ocf:pacemaker:Dummy): Stopped + * dummy-default-timeout (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/bug-lf-2493.summary b/cts/scheduler/summary/bug-lf-2493.summary index 35749b2e8bc..7be35086b54 100644 --- a/cts/scheduler/summary/bug-lf-2493.summary +++ b/cts/scheduler/summary/bug-lf-2493.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hpn07 hpn08 ] @@ -17,50 +19,18 @@ Transition Summary: * Start p_dummy3 ( hpn08 ) Executing Cluster Transition: - * Resource action: p_dummy2 start on hpn08 - * Resource action: p_dummy3 start on hpn08 - * Resource action: res_Filesystem_nfs_fs1 delete on hpn08 - * Resource action: res_Filesystem_nfs_fs1 delete on hpn07 - * Resource action: res_drbd_nfs:0 delete on hpn08 - * Resource action: res_drbd_nfs:0 delete on hpn07 - * Resource action: res_Filesystem_nfs_fs2 delete on hpn08 - * Resource action: res_Filesystem_nfs_fs2 delete on hpn07 - * Resource action: res_Filesystem_nfs_fs3 delete on hpn08 - * Resource action: res_Filesystem_nfs_fs3 delete on hpn07 - * Resource action: res_exportfs_fs1 delete on hpn08 - * Resource action: res_exportfs_fs1 delete on hpn07 - * Resource action: res_exportfs_fs2 delete on hpn08 - * Resource action: res_exportfs_fs2 delete on hpn07 - * Resource action: res_exportfs_fs3 delete on hpn08 - * Resource action: res_exportfs_fs3 delete on hpn07 - * Resource action: res_drbd_nfs:1 delete on hpn08 - * Resource action: res_drbd_nfs:1 delete on hpn07 - * Resource action: res_LVM_nfs delete on hpn08 - * Resource action: res_LVM_nfs delete on hpn07 - * Resource action: res_LVM_p_vg-sap delete on hpn08 - * Resource action: res_LVM_p_vg-sap delete on hpn07 - * Resource action: res_exportfs_rootfs:0 delete on hpn07 - * Resource action: res_IPaddr2_nfs delete on hpn08 - * Resource action: res_IPaddr2_nfs delete on hpn07 - * Resource action: res_drbd_hpn78:0 delete on hpn08 - * Resource action: res_drbd_hpn78:0 delete on hpn07 - * Resource action: res_Filesystem_sap_db delete on hpn08 - * Resource action: res_Filesystem_sap_db delete on hpn07 - * Resource action: res_Filesystem_sap_ci delete on hpn08 - * Resource action: res_Filesystem_sap_ci delete on hpn07 - * Resource action: res_exportfs_rootfs:1 delete on hpn08 - * Resource action: res_drbd_hpn78:1 delete on hpn08 - * Resource action: p_dummy4 start on hpn07 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hpn07 hpn08 ] * Full List of Resources: * p_dummy1 (ocf:pacemaker:Dummy): Started hpn07 - * p_dummy2 (ocf:pacemaker:Dummy): Started hpn08 - * p_dummy4 (ocf:pacemaker:Dummy): Started hpn07 - * p_dummy3 (ocf:pacemaker:Dummy): Started hpn08 + * p_dummy2 (ocf:pacemaker:Dummy): Stopped + * p_dummy4 (ocf:pacemaker:Dummy): Stopped + * p_dummy3 (ocf:pacemaker:Dummy): Stopped * Clone Set: ms_stateful1 [p_stateful1] (promotable): * Promoted: [ hpn07 ] * Unpromoted: [ hpn08 ] diff --git a/cts/scheduler/summary/bug-lf-2508.summary b/cts/scheduler/summary/bug-lf-2508.summary index 0563f737059..e9090e49b64 100644 --- a/cts/scheduler/summary/bug-lf-2508.summary +++ b/cts/scheduler/summary/bug-lf-2508.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node srv02: UNCLEAN (offline) * Online: [ srv01 srv03 srv04 ] @@ -46,67 +48,46 @@ Transition Summary: * Start prmStonith3-3:1 ( srv01 ) * Stop prmStonith4-1:1 ( srv02 ) due to node availability * Stop prmStonith4-3:1 ( srv02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: Group01_start_0 - * Pseudo action: clnStonith1_stop_0 - * Resource action: prmStonith3-1:1 monitor=3600000 on srv01 - * Pseudo action: clnStonith3_stop_0 - * Pseudo action: clnStonith4_stop_0 - * Fencing srv02 (reboot) - * Resource action: Dummy01 start on srv01 - * Pseudo action: Group02_stop_0 - * Pseudo action: Dummy02_stop_0 - * Pseudo action: grpStonith1:1_stop_0 - * Pseudo action: prmStonith1-3:1_stop_0 - * Pseudo action: grpStonith3:0_stop_0 - * Pseudo action: prmStonith3-3:1_stop_0 - * Pseudo action: grpStonith4:1_stop_0 - * Pseudo action: prmStonith4-3:1_stop_0 - * Pseudo action: Group01_running_0 - * Resource action: Dummy01 monitor=10000 on srv01 - * Pseudo action: Group02_stopped_0 - * Pseudo action: Group02_start_0 - * Resource action: Dummy02 start on srv04 - * Pseudo action: prmStonith1-1:1_stop_0 - * Pseudo action: prmStonith3-1:1_stop_0 - * Pseudo action: prmStonith4-1:1_stop_0 - * Pseudo action: Group02_running_0 - * Resource action: Dummy02 monitor=10000 on srv04 - * Pseudo action: grpStonith1:1_stopped_0 - * Pseudo action: clnStonith1_stopped_0 - * Pseudo action: grpStonith3:0_stopped_0 - * Pseudo action: clnStonith3_stopped_0 - * Pseudo action: clnStonith3_start_0 - * Pseudo action: grpStonith4:1_stopped_0 - * Pseudo action: clnStonith4_stopped_0 - * Pseudo action: grpStonith3:1_start_0 - * Resource action: prmStonith3-3:1 start on srv01 - * Pseudo action: grpStonith3:1_running_0 - * Resource action: prmStonith3-3:1 monitor=3600000 on srv01 - * Pseudo action: clnStonith3_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node srv02: UNCLEAN (offline) * Online: [ srv01 srv03 srv04 ] - * OFFLINE: [ srv02 ] * Full List of Resources: * Resource Group: Group01: - * Dummy01 (ocf:heartbeat:Dummy): Started srv01 + * Dummy01 (ocf:heartbeat:Dummy): Stopped * Resource Group: Group02: - * Dummy02 (ocf:heartbeat:Dummy): Started srv04 + * Dummy02 (ocf:heartbeat:Dummy): Started srv02 (UNCLEAN) * Resource Group: Group03: * Dummy03 (ocf:heartbeat:Dummy): Started srv03 * Clone Set: clnStonith1 [grpStonith1]: + * Resource Group: grpStonith1:1: + * prmStonith1-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) + * prmStonith1-3 (stonith:external/ssh): Started srv02 (UNCLEAN) * Started: [ srv03 srv04 ] - * Stopped: [ srv01 srv02 ] + * Stopped: [ srv01 ] * Clone Set: clnStonith2 [grpStonith2]: * Started: [ srv01 srv03 srv04 ] * Stopped: [ srv02 ] * Clone Set: clnStonith3 [grpStonith3]: - * Started: [ srv01 srv04 ] - * Stopped: [ srv02 srv03 ] + * Resource Group: grpStonith3:0: + * prmStonith3-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) + * prmStonith3-3 (stonith:external/ssh): Started srv02 (UNCLEAN) + * Resource Group: grpStonith3:1: + * prmStonith3-1 (stonith:external/stonith-helper): Started srv01 + * prmStonith3-3 (stonith:external/ssh): Stopped + * Started: [ srv04 ] + * Stopped: [ srv03 ] * Clone Set: clnStonith4 [grpStonith4]: + * Resource Group: grpStonith4:1: + * prmStonith4-1 (stonith:external/stonith-helper): Started srv02 (UNCLEAN) + * prmStonith4-3 (stonith:external/ssh): Started srv02 (UNCLEAN) * Started: [ srv01 srv03 ] - * Stopped: [ srv02 srv04 ] + * Stopped: [ srv04 ] diff --git a/cts/scheduler/summary/bug-lf-2544.summary b/cts/scheduler/summary/bug-lf-2544.summary index b21de80b409..57264cf4e29 100644 --- a/cts/scheduler/summary/bug-lf-2544.summary +++ b/cts/scheduler/summary/bug-lf-2544.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node-0 node-1 ] @@ -10,15 +12,13 @@ Transition Summary: * Promote s0:1 ( Unpromoted -> Promoted node-1 ) Executing Cluster Transition: - * Pseudo action: ms0_promote_0 - * Resource action: s0:1 promote on node-1 - * Pseudo action: ms0_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node-0 node-1 ] * Full List of Resources: * Clone Set: ms0 [s0] (promotable): - * Promoted: [ node-1 ] - * Unpromoted: [ node-0 ] + * Unpromoted: [ node-0 node-1 ] diff --git a/cts/scheduler/summary/bug-lf-2551.summary b/cts/scheduler/summary/bug-lf-2551.summary index ebfe1add8dd..9d95ed432cd 100644 --- a/cts/scheduler/summary/bug-lf-2551.summary +++ b/cts/scheduler/summary/bug-lf-2551.summary @@ -1,10 +1,59 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hex-9: UNCLEAN (offline) * Online: [ hex-0 hex-7 hex-8 ] * Full List of Resources: * vm-00 (ocf:heartbeat:Xen): Started hex-0 + * vm-10 (ocf:heartbeat:Xen): Started hex-0 + * vm-14 (ocf:heartbeat:Xen): Started hex-0 + * vm-18 (ocf:heartbeat:Xen): Started hex-0 + * vm-22 (ocf:heartbeat:Xen): Started hex-0 + * vm-26 (ocf:heartbeat:Xen): Started hex-0 + * vm-30 (ocf:heartbeat:Xen): Started hex-0 + * vm-34 (ocf:heartbeat:Xen): Started hex-0 + * vm-38 (ocf:heartbeat:Xen): Started hex-0 + * vm-42 (ocf:heartbeat:Xen): Started hex-0 + * vm-46 (ocf:heartbeat:Xen): Started hex-0 + * vm-50 (ocf:heartbeat:Xen): Started hex-0 + * vm-54 (ocf:heartbeat:Xen): Started hex-0 + * vm-58 (ocf:heartbeat:Xen): Started hex-0 + * vm-01 (ocf:heartbeat:Xen): Started hex-7 + * vm-04 (ocf:heartbeat:Xen): Started hex-7 + * vm-07 (ocf:heartbeat:Xen): Started hex-7 + * vm-11 (ocf:heartbeat:Xen): Started hex-7 + * vm-15 (ocf:heartbeat:Xen): Started hex-7 + * vm-19 (ocf:heartbeat:Xen): Started hex-7 + * vm-23 (ocf:heartbeat:Xen): Started hex-7 + * vm-27 (ocf:heartbeat:Xen): Started hex-7 + * vm-31 (ocf:heartbeat:Xen): Started hex-7 + * vm-35 (ocf:heartbeat:Xen): Started hex-7 + * vm-39 (ocf:heartbeat:Xen): Started hex-7 + * vm-43 (ocf:heartbeat:Xen): Started hex-7 + * vm-47 (ocf:heartbeat:Xen): Started hex-7 + * vm-51 (ocf:heartbeat:Xen): Started hex-7 + * vm-55 (ocf:heartbeat:Xen): Started hex-7 + * vm-59 (ocf:heartbeat:Xen): Started hex-7 + * vm-02 (ocf:heartbeat:Xen): Started hex-8 + * vm-05 (ocf:heartbeat:Xen): Started hex-8 + * vm-08 (ocf:heartbeat:Xen): Started hex-8 + * vm-12 (ocf:heartbeat:Xen): Started hex-8 + * vm-16 (ocf:heartbeat:Xen): Started hex-8 + * vm-20 (ocf:heartbeat:Xen): Started hex-8 + * vm-24 (ocf:heartbeat:Xen): Started hex-8 + * vm-28 (ocf:heartbeat:Xen): Started hex-8 + * vm-32 (ocf:heartbeat:Xen): Started hex-8 + * vm-36 (ocf:heartbeat:Xen): Started hex-8 + * vm-40 (ocf:heartbeat:Xen): Started hex-8 + * vm-44 (ocf:heartbeat:Xen): Started hex-8 + * vm-48 (ocf:heartbeat:Xen): Started hex-8 + * vm-52 (ocf:heartbeat:Xen): Started hex-8 + * vm-56 (ocf:heartbeat:Xen): Started hex-8 + * vm-60 (ocf:heartbeat:Xen): Started hex-8 + * fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN) + * dummy1 (ocf:heartbeat:Dummy): Started hex-9 (UNCLEAN) * Clone Set: base-clone [base-group]: * Resource Group: base-group:3: * dlm (ocf:pacemaker:controld): Started hex-9 (UNCLEAN) @@ -14,68 +63,21 @@ Current cluster status: * vg1 (ocf:heartbeat:LVM): Started hex-9 (UNCLEAN) * ocfs2-1 (ocf:heartbeat:Filesystem): Started hex-9 (UNCLEAN) * Started: [ hex-0 hex-7 hex-8 ] - * vm-01 (ocf:heartbeat:Xen): Started hex-7 - * vm-02 (ocf:heartbeat:Xen): Started hex-8 * vm-03 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-04 (ocf:heartbeat:Xen): Started hex-7 - * vm-05 (ocf:heartbeat:Xen): Started hex-8 - * fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN) * vm-06 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-07 (ocf:heartbeat:Xen): Started hex-7 - * vm-08 (ocf:heartbeat:Xen): Started hex-8 * vm-09 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-10 (ocf:heartbeat:Xen): Started hex-0 - * vm-11 (ocf:heartbeat:Xen): Started hex-7 - * vm-12 (ocf:heartbeat:Xen): Started hex-8 * vm-13 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-14 (ocf:heartbeat:Xen): Started hex-0 - * vm-15 (ocf:heartbeat:Xen): Started hex-7 - * vm-16 (ocf:heartbeat:Xen): Started hex-8 * vm-17 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-18 (ocf:heartbeat:Xen): Started hex-0 - * vm-19 (ocf:heartbeat:Xen): Started hex-7 - * vm-20 (ocf:heartbeat:Xen): Started hex-8 * vm-21 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-22 (ocf:heartbeat:Xen): Started hex-0 - * vm-23 (ocf:heartbeat:Xen): Started hex-7 - * vm-24 (ocf:heartbeat:Xen): Started hex-8 * vm-25 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-26 (ocf:heartbeat:Xen): Started hex-0 - * vm-27 (ocf:heartbeat:Xen): Started hex-7 - * vm-28 (ocf:heartbeat:Xen): Started hex-8 * vm-29 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-30 (ocf:heartbeat:Xen): Started hex-0 - * vm-31 (ocf:heartbeat:Xen): Started hex-7 - * vm-32 (ocf:heartbeat:Xen): Started hex-8 - * dummy1 (ocf:heartbeat:Dummy): Started hex-9 (UNCLEAN) * vm-33 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-34 (ocf:heartbeat:Xen): Started hex-0 - * vm-35 (ocf:heartbeat:Xen): Started hex-7 - * vm-36 (ocf:heartbeat:Xen): Started hex-8 * vm-37 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-38 (ocf:heartbeat:Xen): Started hex-0 - * vm-39 (ocf:heartbeat:Xen): Started hex-7 - * vm-40 (ocf:heartbeat:Xen): Started hex-8 * vm-41 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-42 (ocf:heartbeat:Xen): Started hex-0 - * vm-43 (ocf:heartbeat:Xen): Started hex-7 - * vm-44 (ocf:heartbeat:Xen): Started hex-8 * vm-45 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-46 (ocf:heartbeat:Xen): Started hex-0 - * vm-47 (ocf:heartbeat:Xen): Started hex-7 - * vm-48 (ocf:heartbeat:Xen): Started hex-8 * vm-49 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-50 (ocf:heartbeat:Xen): Started hex-0 - * vm-51 (ocf:heartbeat:Xen): Started hex-7 - * vm-52 (ocf:heartbeat:Xen): Started hex-8 * vm-53 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-54 (ocf:heartbeat:Xen): Started hex-0 - * vm-55 (ocf:heartbeat:Xen): Started hex-7 - * vm-56 (ocf:heartbeat:Xen): Started hex-8 * vm-57 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) - * vm-58 (ocf:heartbeat:Xen): Started hex-0 - * vm-59 (ocf:heartbeat:Xen): Started hex-7 - * vm-60 (ocf:heartbeat:Xen): Started hex-8 * vm-61 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) * vm-62 (ocf:heartbeat:Xen): Stopped * vm-63 (ocf:heartbeat:Xen): Stopped @@ -83,14 +85,14 @@ Current cluster status: Transition Summary: * Fence (reboot) hex-9 'peer is no longer part of the cluster' - * Move fencing-sbd ( hex-9 -> hex-0 ) - * Move dummy1 ( hex-9 -> hex-0 ) * Stop dlm:3 ( hex-9 ) due to node availability * Stop o2cb:3 ( hex-9 ) due to node availability * Stop clvm:3 ( hex-9 ) due to node availability * Stop cmirrord:3 ( hex-9 ) due to node availability * Stop vg1:3 ( hex-9 ) due to node availability * Stop ocfs2-1:3 ( hex-9 ) due to node availability + * Move fencing-sbd ( hex-9 -> hex-0 ) + * Move dummy1 ( hex-9 -> hex-0 ) * Stop vm-03 ( hex-9 ) due to node availability * Stop vm-06 ( hex-9 ) due to node availability * Stop vm-09 ( hex-9 ) due to node availability @@ -107,120 +109,92 @@ Transition Summary: * Stop vm-53 ( hex-9 ) due to node availability * Stop vm-57 ( hex-9 ) due to node availability * Stop vm-61 ( hex-9 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: fencing-sbd_stop_0 - * Resource action: dummy1 monitor=300000 on hex-8 - * Resource action: dummy1 monitor=300000 on hex-7 - * Pseudo action: load_stopped_hex-8 - * Pseudo action: load_stopped_hex-7 - * Pseudo action: load_stopped_hex-0 - * Fencing hex-9 (reboot) - * Resource action: fencing-sbd start on hex-0 - * Pseudo action: dummy1_stop_0 - * Pseudo action: vm-03_stop_0 - * Pseudo action: vm-06_stop_0 - * Pseudo action: vm-09_stop_0 - * Pseudo action: vm-13_stop_0 - * Pseudo action: vm-17_stop_0 - * Pseudo action: vm-21_stop_0 - * Pseudo action: vm-25_stop_0 - * Pseudo action: vm-29_stop_0 - * Pseudo action: vm-33_stop_0 - * Pseudo action: vm-37_stop_0 - * Pseudo action: vm-41_stop_0 - * Pseudo action: vm-45_stop_0 - * Pseudo action: vm-49_stop_0 - * Pseudo action: vm-53_stop_0 - * Pseudo action: vm-57_stop_0 - * Pseudo action: vm-61_stop_0 - * Pseudo action: load_stopped_hex-9 - * Resource action: dummy1 start on hex-0 - * Pseudo action: base-clone_stop_0 - * Resource action: dummy1 monitor=30000 on hex-0 - * Pseudo action: base-group:3_stop_0 - * Pseudo action: ocfs2-1:3_stop_0 - * Pseudo action: vg1:3_stop_0 - * Pseudo action: cmirrord:3_stop_0 - * Pseudo action: clvm:3_stop_0 - * Pseudo action: o2cb:3_stop_0 - * Pseudo action: dlm:3_stop_0 - * Pseudo action: base-group:3_stopped_0 - * Pseudo action: base-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node hex-9: UNCLEAN (offline) * Online: [ hex-0 hex-7 hex-8 ] - * OFFLINE: [ hex-9 ] * Full List of Resources: * vm-00 (ocf:heartbeat:Xen): Started hex-0 - * Clone Set: base-clone [base-group]: - * Started: [ hex-0 hex-7 hex-8 ] - * Stopped: [ hex-9 ] + * vm-10 (ocf:heartbeat:Xen): Started hex-0 + * vm-14 (ocf:heartbeat:Xen): Started hex-0 + * vm-18 (ocf:heartbeat:Xen): Started hex-0 + * vm-22 (ocf:heartbeat:Xen): Started hex-0 + * vm-26 (ocf:heartbeat:Xen): Started hex-0 + * vm-30 (ocf:heartbeat:Xen): Started hex-0 + * vm-34 (ocf:heartbeat:Xen): Started hex-0 + * vm-38 (ocf:heartbeat:Xen): Started hex-0 + * vm-42 (ocf:heartbeat:Xen): Started hex-0 + * vm-46 (ocf:heartbeat:Xen): Started hex-0 + * vm-50 (ocf:heartbeat:Xen): Started hex-0 + * vm-54 (ocf:heartbeat:Xen): Started hex-0 + * vm-58 (ocf:heartbeat:Xen): Started hex-0 * vm-01 (ocf:heartbeat:Xen): Started hex-7 - * vm-02 (ocf:heartbeat:Xen): Started hex-8 - * vm-03 (ocf:heartbeat:Xen): Stopped * vm-04 (ocf:heartbeat:Xen): Started hex-7 - * vm-05 (ocf:heartbeat:Xen): Started hex-8 - * fencing-sbd (stonith:external/sbd): Started hex-0 - * vm-06 (ocf:heartbeat:Xen): Stopped * vm-07 (ocf:heartbeat:Xen): Started hex-7 - * vm-08 (ocf:heartbeat:Xen): Started hex-8 - * vm-09 (ocf:heartbeat:Xen): Stopped - * vm-10 (ocf:heartbeat:Xen): Started hex-0 * vm-11 (ocf:heartbeat:Xen): Started hex-7 - * vm-12 (ocf:heartbeat:Xen): Started hex-8 - * vm-13 (ocf:heartbeat:Xen): Stopped - * vm-14 (ocf:heartbeat:Xen): Started hex-0 * vm-15 (ocf:heartbeat:Xen): Started hex-7 - * vm-16 (ocf:heartbeat:Xen): Started hex-8 - * vm-17 (ocf:heartbeat:Xen): Stopped - * vm-18 (ocf:heartbeat:Xen): Started hex-0 * vm-19 (ocf:heartbeat:Xen): Started hex-7 - * vm-20 (ocf:heartbeat:Xen): Started hex-8 - * vm-21 (ocf:heartbeat:Xen): Stopped - * vm-22 (ocf:heartbeat:Xen): Started hex-0 * vm-23 (ocf:heartbeat:Xen): Started hex-7 - * vm-24 (ocf:heartbeat:Xen): Started hex-8 - * vm-25 (ocf:heartbeat:Xen): Stopped - * vm-26 (ocf:heartbeat:Xen): Started hex-0 * vm-27 (ocf:heartbeat:Xen): Started hex-7 - * vm-28 (ocf:heartbeat:Xen): Started hex-8 - * vm-29 (ocf:heartbeat:Xen): Stopped - * vm-30 (ocf:heartbeat:Xen): Started hex-0 * vm-31 (ocf:heartbeat:Xen): Started hex-7 - * vm-32 (ocf:heartbeat:Xen): Started hex-8 - * dummy1 (ocf:heartbeat:Dummy): Started hex-0 - * vm-33 (ocf:heartbeat:Xen): Stopped - * vm-34 (ocf:heartbeat:Xen): Started hex-0 * vm-35 (ocf:heartbeat:Xen): Started hex-7 - * vm-36 (ocf:heartbeat:Xen): Started hex-8 - * vm-37 (ocf:heartbeat:Xen): Stopped - * vm-38 (ocf:heartbeat:Xen): Started hex-0 * vm-39 (ocf:heartbeat:Xen): Started hex-7 - * vm-40 (ocf:heartbeat:Xen): Started hex-8 - * vm-41 (ocf:heartbeat:Xen): Stopped - * vm-42 (ocf:heartbeat:Xen): Started hex-0 * vm-43 (ocf:heartbeat:Xen): Started hex-7 - * vm-44 (ocf:heartbeat:Xen): Started hex-8 - * vm-45 (ocf:heartbeat:Xen): Stopped - * vm-46 (ocf:heartbeat:Xen): Started hex-0 * vm-47 (ocf:heartbeat:Xen): Started hex-7 - * vm-48 (ocf:heartbeat:Xen): Started hex-8 - * vm-49 (ocf:heartbeat:Xen): Stopped - * vm-50 (ocf:heartbeat:Xen): Started hex-0 * vm-51 (ocf:heartbeat:Xen): Started hex-7 - * vm-52 (ocf:heartbeat:Xen): Started hex-8 - * vm-53 (ocf:heartbeat:Xen): Stopped - * vm-54 (ocf:heartbeat:Xen): Started hex-0 * vm-55 (ocf:heartbeat:Xen): Started hex-7 - * vm-56 (ocf:heartbeat:Xen): Started hex-8 - * vm-57 (ocf:heartbeat:Xen): Stopped - * vm-58 (ocf:heartbeat:Xen): Started hex-0 * vm-59 (ocf:heartbeat:Xen): Started hex-7 + * vm-02 (ocf:heartbeat:Xen): Started hex-8 + * vm-05 (ocf:heartbeat:Xen): Started hex-8 + * vm-08 (ocf:heartbeat:Xen): Started hex-8 + * vm-12 (ocf:heartbeat:Xen): Started hex-8 + * vm-16 (ocf:heartbeat:Xen): Started hex-8 + * vm-20 (ocf:heartbeat:Xen): Started hex-8 + * vm-24 (ocf:heartbeat:Xen): Started hex-8 + * vm-28 (ocf:heartbeat:Xen): Started hex-8 + * vm-32 (ocf:heartbeat:Xen): Started hex-8 + * vm-36 (ocf:heartbeat:Xen): Started hex-8 + * vm-40 (ocf:heartbeat:Xen): Started hex-8 + * vm-44 (ocf:heartbeat:Xen): Started hex-8 + * vm-48 (ocf:heartbeat:Xen): Started hex-8 + * vm-52 (ocf:heartbeat:Xen): Started hex-8 + * vm-56 (ocf:heartbeat:Xen): Started hex-8 * vm-60 (ocf:heartbeat:Xen): Started hex-8 - * vm-61 (ocf:heartbeat:Xen): Stopped + * fencing-sbd (stonith:external/sbd): Started hex-9 (UNCLEAN) + * dummy1 (ocf:heartbeat:Dummy): Started hex-9 (UNCLEAN) + * Clone Set: base-clone [base-group]: + * Resource Group: base-group:3: + * dlm (ocf:pacemaker:controld): Started hex-9 (UNCLEAN) + * o2cb (ocf:ocfs2:o2cb): Started hex-9 (UNCLEAN) + * clvm (ocf:lvm2:clvmd): Started hex-9 (UNCLEAN) + * cmirrord (ocf:lvm2:cmirrord): Started hex-9 (UNCLEAN) + * vg1 (ocf:heartbeat:LVM): Started hex-9 (UNCLEAN) + * ocfs2-1 (ocf:heartbeat:Filesystem): Started hex-9 (UNCLEAN) + * Started: [ hex-0 hex-7 hex-8 ] + * vm-03 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-06 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-09 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-13 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-17 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-21 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-25 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-29 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-33 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-37 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-41 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-45 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-49 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-53 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-57 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) + * vm-61 (ocf:heartbeat:Xen): Started hex-9 (UNCLEAN) * vm-62 (ocf:heartbeat:Xen): Stopped * vm-63 (ocf:heartbeat:Xen): Stopped * vm-64 (ocf:heartbeat:Xen): Stopped diff --git a/cts/scheduler/summary/bug-lf-2574.summary b/cts/scheduler/summary/bug-lf-2574.summary index fb01cde7275..eb419493229 100644 --- a/cts/scheduler/summary/bug-lf-2574.summary +++ b/cts/scheduler/summary/bug-lf-2574.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ srv01 srv02 srv03 ] @@ -14,25 +16,22 @@ Current cluster status: Transition Summary: * Move main_rsc ( srv01 -> srv03 ) * Stop prmPingd:0 ( srv01 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: main_rsc stop on srv01 - * Pseudo action: clnPingd_stop_0 - * Resource action: main_rsc start on srv03 - * Resource action: prmPingd:0 stop on srv01 - * Pseudo action: clnPingd_stopped_0 - * Resource action: main_rsc monitor=10000 on srv03 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ srv01 srv02 srv03 ] * Full List of Resources: - * main_rsc (ocf:pacemaker:Dummy): Started srv03 + * main_rsc (ocf:pacemaker:Dummy): Started srv01 * main_rsc2 (ocf:pacemaker:Dummy): Started srv02 * Clone Set: clnDummy1 [prmDummy1]: * Started: [ srv02 srv03 ] * Stopped: [ srv01 ] * Clone Set: clnPingd [prmPingd]: - * Started: [ srv02 srv03 ] - * Stopped: [ srv01 ] + * Started: [ srv01 srv02 srv03 ] diff --git a/cts/scheduler/summary/bug-lf-2581.summary b/cts/scheduler/summary/bug-lf-2581.summary index dbcf54553f3..b05677e9d50 100644 --- a/cts/scheduler/summary/bug-lf-2581.summary +++ b/cts/scheduler/summary/bug-lf-2581.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ elvis queen ] @@ -22,33 +24,17 @@ Transition Summary: * Start stonith-l2network:1 ( queen ) Executing Cluster Transition: - * Resource action: A:1 monitor on queen - * Resource action: Z:1 monitor on queen - * Pseudo action: AZ-clone_start_0 - * Resource action: B-1 monitor on queen - * Resource action: C-1 monitor on queen - * Resource action: B-2 monitor on queen - * Resource action: C-2 monitor on queen - * Resource action: stonith-l2network:1 monitor on queen - * Pseudo action: stonith-l2network-set_start_0 - * Pseudo action: AZ-group:1_start_0 - * Resource action: A:1 start on queen - * Resource action: Z:1 start on queen - * Resource action: stonith-l2network:1 start on queen - * Pseudo action: stonith-l2network-set_running_0 - * Pseudo action: AZ-group:1_running_0 - * Resource action: A:1 monitor=120000 on queen - * Resource action: Z:1 monitor=120000 on queen - * Pseudo action: AZ-clone_running_0 - * Resource action: stonith-l2network:1 monitor=300000 on queen Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ elvis queen ] * Full List of Resources: * Clone Set: AZ-clone [AZ-group]: - * Started: [ elvis queen ] + * Started: [ elvis ] + * Stopped: [ queen ] * Resource Group: BC-group-1: * B-1 (ocf:rgk:typeB): Started elvis * C-1 (ocf:rgk:typeC): Started elvis @@ -56,4 +42,5 @@ Revised Cluster Status: * B-2 (ocf:rgk:typeB): Started elvis * C-2 (ocf:rgk:typeC): Started elvis * Clone Set: stonith-l2network-set [stonith-l2network]: - * Started: [ elvis queen ] + * Started: [ elvis ] + * Stopped: [ queen ] diff --git a/cts/scheduler/summary/bug-lf-2606.summary b/cts/scheduler/summary/bug-lf-2606.summary index e0b7ebf0e68..9f9c82c9cec 100644 --- a/cts/scheduler/summary/bug-lf-2606.summary +++ b/cts/scheduler/summary/bug-lf-2606.summary @@ -1,6 +1,9 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node2: UNCLEAN (online) * Online: [ node1 ] @@ -18,29 +21,23 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Move rsc2 ( node2 -> node1 ) * Stop rsc3:1 ( Promoted node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: ms3_demote_0 - * Fencing node2 (reboot) - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc3:1_demote_0 - * Pseudo action: ms3_demoted_0 - * Pseudo action: ms3_stop_0 - * Resource action: rsc2 start on node1 - * Pseudo action: rsc3:1_stop_0 - * Pseudo action: ms3_stopped_0 - * Resource action: rsc2 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 ] - * OFFLINE: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED node2 (disabled) + * rsc2 (ocf:pacemaker:Dummy): Started node2 * Clone Set: ms3 [rsc3] (promotable): + * Promoted: [ node2 ] * Unpromoted: [ node1 ] - * Stopped: [ node2 ] diff --git a/cts/scheduler/summary/bug-lf-2619.summary b/cts/scheduler/summary/bug-lf-2619.summary index 5eeb72e7a6b..4bd552e90d1 100644 --- a/cts/scheduler/summary/bug-lf-2619.summary +++ b/cts/scheduler/summary/bug-lf-2619.summary @@ -1,15 +1,11 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ act1 act2 act3 sby1 sby2 ] * Full List of Resources: - * Resource Group: grpPostgreSQLDB1: - * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 - * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act1 - * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act1 - * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act1 - * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 - * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 * Resource Group: grpPostgreSQLDB2: * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 @@ -24,63 +20,38 @@ Current cluster status: * prmFsPostgreSQLDB3-3 (ocf:pacemaker:Dummy): Started act3 * prmIpPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 * prmApPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 + * Resource Group: grpPostgreSQLDB1: + * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act1 + * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 * Clone Set: clnPingd [prmPingd]: * prmPingd (ocf:pacemaker:ping): FAILED act1 * Started: [ act2 act3 sby1 sby2 ] Transition Summary: + * Stop prmPingd:0 ( act1 ) due to node availability * Move prmExPostgreSQLDB1 ( act1 -> sby1 ) * Move prmFsPostgreSQLDB1-1 ( act1 -> sby1 ) * Move prmFsPostgreSQLDB1-2 ( act1 -> sby1 ) * Move prmFsPostgreSQLDB1-3 ( act1 -> sby1 ) * Move prmIpPostgreSQLDB1 ( act1 -> sby1 ) * Move prmApPostgreSQLDB1 ( act1 -> sby1 ) - * Stop prmPingd:0 ( act1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: grpPostgreSQLDB1_stop_0 - * Resource action: prmApPostgreSQLDB1 stop on act1 - * Pseudo action: load_stopped_sby2 - * Pseudo action: load_stopped_sby1 - * Pseudo action: load_stopped_act3 - * Pseudo action: load_stopped_act2 - * Resource action: prmIpPostgreSQLDB1 stop on act1 - * Resource action: prmFsPostgreSQLDB1-3 stop on act1 - * Resource action: prmFsPostgreSQLDB1-2 stop on act1 - * Resource action: prmFsPostgreSQLDB1-1 stop on act1 - * Resource action: prmExPostgreSQLDB1 stop on act1 - * Pseudo action: load_stopped_act1 - * Pseudo action: grpPostgreSQLDB1_stopped_0 - * Pseudo action: grpPostgreSQLDB1_start_0 - * Resource action: prmExPostgreSQLDB1 start on sby1 - * Resource action: prmFsPostgreSQLDB1-1 start on sby1 - * Resource action: prmFsPostgreSQLDB1-2 start on sby1 - * Resource action: prmFsPostgreSQLDB1-3 start on sby1 - * Resource action: prmIpPostgreSQLDB1 start on sby1 - * Resource action: prmApPostgreSQLDB1 start on sby1 - * Pseudo action: clnPingd_stop_0 - * Pseudo action: grpPostgreSQLDB1_running_0 - * Resource action: prmExPostgreSQLDB1 monitor=5000 on sby1 - * Resource action: prmFsPostgreSQLDB1-1 monitor=5000 on sby1 - * Resource action: prmFsPostgreSQLDB1-2 monitor=5000 on sby1 - * Resource action: prmFsPostgreSQLDB1-3 monitor=5000 on sby1 - * Resource action: prmIpPostgreSQLDB1 monitor=5000 on sby1 - * Resource action: prmApPostgreSQLDB1 monitor=5000 on sby1 - * Resource action: prmPingd:0 stop on act1 - * Pseudo action: clnPingd_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ act1 act2 act3 sby1 sby2 ] * Full List of Resources: - * Resource Group: grpPostgreSQLDB1: - * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started sby1 - * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started sby1 - * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started sby1 - * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started sby1 - * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started sby1 - * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Started sby1 * Resource Group: grpPostgreSQLDB2: * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 @@ -95,6 +66,13 @@ Revised Cluster Status: * prmFsPostgreSQLDB3-3 (ocf:pacemaker:Dummy): Started act3 * prmIpPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 * prmApPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 + * Resource Group: grpPostgreSQLDB1: + * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act1 + * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 * Clone Set: clnPingd [prmPingd]: + * prmPingd (ocf:pacemaker:ping): FAILED act1 * Started: [ act2 act3 sby1 sby2 ] - * Stopped: [ act1 ] diff --git a/cts/scheduler/summary/bug-n-385265-2.summary b/cts/scheduler/summary/bug-n-385265-2.summary index 8fe5130933f..4a096fd2a22 100644 --- a/cts/scheduler/summary/bug-n-385265-2.summary +++ b/cts/scheduler/summary/bug-n-385265-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ ih01 ih02 ] @@ -12,22 +14,14 @@ Transition Summary: * Move resource_idvscommon ( ih02 -> ih01 ) Executing Cluster Transition: - * Pseudo action: group_common_stop_0 - * Resource action: resource_idvscommon stop on ih02 - * Resource action: resource_ip_common stop on ih02 - * Pseudo action: group_common_stopped_0 - * Pseudo action: group_common_start_0 - * Resource action: resource_ip_common start on ih01 - * Resource action: resource_idvscommon start on ih01 - * Pseudo action: group_common_running_0 - * Resource action: resource_ip_common monitor=30000 on ih01 - * Resource action: resource_idvscommon monitor=30000 on ih01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ ih01 ih02 ] * Full List of Resources: * Resource Group: group_common: - * resource_ip_common (ocf:heartbeat:IPaddr2): Started ih01 - * resource_idvscommon (ocf:dfs:idvs): Started ih01 + * resource_ip_common (ocf:heartbeat:IPaddr2): FAILED ih02 + * resource_idvscommon (ocf:dfs:idvs): Started ih02 diff --git a/cts/scheduler/summary/bug-n-385265.summary b/cts/scheduler/summary/bug-n-385265.summary index 56b39240552..80dc5a588c7 100644 --- a/cts/scheduler/summary/bug-n-385265.summary +++ b/cts/scheduler/summary/bug-n-385265.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ ih01 ih02 ] @@ -9,17 +11,18 @@ Current cluster status: Transition Summary: * Stop resource_idvscommon ( ih02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group_common_stop_0 - * Resource action: resource_idvscommon stop on ih02 - * Pseudo action: group_common_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ ih01 ih02 ] * Full List of Resources: * Resource Group: group_common: * resource_ip_common (ocf:heartbeat:IPaddr2): Started ih02 - * resource_idvscommon (ocf:dfs:idvs): Stopped + * resource_idvscommon (ocf:dfs:idvs): FAILED ih02 diff --git a/cts/scheduler/summary/bug-n-387749.summary b/cts/scheduler/summary/bug-n-387749.summary index 17275a12205..1df96aa175d 100644 --- a/cts/scheduler/summary/bug-n-387749.summary +++ b/cts/scheduler/summary/bug-n-387749.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ power720-1 power720-2 ] * OFFLINE: [ power720-4 ] @@ -18,42 +20,20 @@ Transition Summary: * Move resource_nfsserver_single ( power720-2 -> power720-1 ) Executing Cluster Transition: - * Resource action: export_home_ocfs2:0 monitor on power720-1 - * Resource action: export_home_ocfs2:1 monitor on power720-1 - * Resource action: export_home_ocfs2:2 monitor on power720-1 - * Pseudo action: export_home_ocfs2_clone_set_pre_notify_start_0 - * Pseudo action: group_nfs_stop_0 - * Resource action: resource_ipaddr1_single monitor on power720-1 - * Resource action: resource_nfsserver_single stop on power720-2 - * Resource action: resource_nfsserver_single monitor on power720-1 - * Resource action: export_home_ocfs2:1 notify on power720-2 - * Pseudo action: export_home_ocfs2_clone_set_confirmed-pre_notify_start_0 - * Pseudo action: export_home_ocfs2_clone_set_start_0 - * Resource action: resource_ipaddr1_single stop on power720-2 - * Resource action: export_home_ocfs2:0 start on power720-1 - * Pseudo action: export_home_ocfs2_clone_set_running_0 - * Pseudo action: group_nfs_stopped_0 - * Pseudo action: export_home_ocfs2_clone_set_post_notify_running_0 - * Resource action: export_home_ocfs2:0 notify on power720-1 * Resource action: export_home_ocfs2:1 notify on power720-2 - * Pseudo action: export_home_ocfs2_clone_set_confirmed-post_notify_running_0 - * Pseudo action: group_nfs_start_0 - * Resource action: resource_ipaddr1_single start on power720-1 - * Resource action: resource_nfsserver_single start on power720-1 - * Pseudo action: group_nfs_running_0 - * Resource action: resource_ipaddr1_single monitor=5000 on power720-1 - * Resource action: resource_nfsserver_single monitor=15000 on power720-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ power720-1 power720-2 ] * OFFLINE: [ power720-4 ] * Full List of Resources: * Clone Set: export_home_ocfs2_clone_set [export_home_ocfs2] (unique): - * export_home_ocfs2:0 (ocf:heartbeat:Filesystem): Started power720-1 + * export_home_ocfs2:0 (ocf:heartbeat:Filesystem): Stopped * export_home_ocfs2:1 (ocf:heartbeat:Filesystem): Started power720-2 * export_home_ocfs2:2 (ocf:heartbeat:Filesystem): Stopped * Resource Group: group_nfs: - * resource_ipaddr1_single (ocf:heartbeat:IPaddr): Started power720-1 - * resource_nfsserver_single (lsb:nfsserver): Started power720-1 + * resource_ipaddr1_single (ocf:heartbeat:IPaddr): Started power720-2 + * resource_nfsserver_single (lsb:nfsserver): Started power720-2 diff --git a/cts/scheduler/summary/bug-pm-11.summary b/cts/scheduler/summary/bug-pm-11.summary index c3f8f5b3af0..c5295dcad43 100644 --- a/cts/scheduler/summary/bug-pm-11.summary +++ b/cts/scheduler/summary/bug-pm-11.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node-a node-b ] @@ -16,25 +19,11 @@ Transition Summary: * Promote stateful-2:1 ( Stopped -> Promoted node-a ) Executing Cluster Transition: - * Resource action: stateful-2:0 monitor on node-b - * Resource action: stateful-2:0 monitor on node-a - * Resource action: stateful-2:1 monitor on node-b - * Resource action: stateful-2:1 monitor on node-a - * Pseudo action: ms-sf_start_0 - * Pseudo action: group:0_start_0 - * Resource action: stateful-2:0 start on node-b - * Pseudo action: group:1_start_0 - * Resource action: stateful-2:1 start on node-a - * Pseudo action: group:0_running_0 - * Pseudo action: group:1_running_0 - * Pseudo action: ms-sf_running_0 - * Pseudo action: ms-sf_promote_0 - * Pseudo action: group:1_promote_0 - * Resource action: stateful-2:1 promote on node-a - * Pseudo action: group:1_promoted_0 - * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node-a node-b ] @@ -42,7 +31,7 @@ Revised Cluster Status: * Clone Set: ms-sf [group] (promotable, unique): * Resource Group: group:0: * stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b - * stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b + * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: * stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a - * stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a + * stateful-2:1 (ocf:heartbeat:Stateful): Stopped diff --git a/cts/scheduler/summary/bug-pm-12.summary b/cts/scheduler/summary/bug-pm-12.summary index 8defffe8d68..bf5ea99a4ae 100644 --- a/cts/scheduler/summary/bug-pm-12.summary +++ b/cts/scheduler/summary/bug-pm-12.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node-a node-b ] @@ -16,34 +19,11 @@ Transition Summary: * Restart stateful-2:1 ( Promoted node-a ) due to resource definition change Executing Cluster Transition: - * Pseudo action: ms-sf_demote_0 - * Pseudo action: group:1_demote_0 - * Resource action: stateful-2:1 demote on node-a - * Pseudo action: group:1_demoted_0 - * Pseudo action: ms-sf_demoted_0 - * Pseudo action: ms-sf_stop_0 - * Pseudo action: group:0_stop_0 - * Resource action: stateful-2:0 stop on node-b - * Pseudo action: group:1_stop_0 - * Resource action: stateful-2:1 stop on node-a - * Pseudo action: group:0_stopped_0 - * Pseudo action: group:1_stopped_0 - * Pseudo action: ms-sf_stopped_0 - * Pseudo action: ms-sf_start_0 - * Pseudo action: group:0_start_0 - * Resource action: stateful-2:0 start on node-b - * Pseudo action: group:1_start_0 - * Resource action: stateful-2:1 start on node-a - * Pseudo action: group:0_running_0 - * Pseudo action: group:1_running_0 - * Pseudo action: ms-sf_running_0 - * Pseudo action: ms-sf_promote_0 - * Pseudo action: group:1_promote_0 - * Resource action: stateful-2:1 promote on node-a - * Pseudo action: group:1_promoted_0 - * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node-a node-b ] diff --git a/cts/scheduler/summary/bug-rh-1097457.summary b/cts/scheduler/summary/bug-rh-1097457.summary index f68a509609b..e0527c25272 100644 --- a/cts/scheduler/summary/bug-rh-1097457.summary +++ b/cts/scheduler/summary/bug-rh-1097457.summary @@ -1,6 +1,9 @@ 2 of 26 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ lama2 lama3 ] * GuestOnline: [ lamaVM1 lamaVM2 lamaVM3 ] @@ -43,56 +46,11 @@ Transition Summary: * Restart lamaVM2 ( lama3 ) due to required VM2 start Executing Cluster Transition: - * Resource action: FSlun1 monitor on lamaVM3 - * Resource action: FSlun2 monitor on lamaVM3 - * Resource action: FSlun3 monitor on lamaVM3 - * Resource action: FSlun3 monitor on lamaVM1 - * Resource action: FSlun4 monitor on lamaVM1 - * Resource action: FAKE5-IP monitor on lamaVM3 - * Resource action: FAKE5-IP monitor on lamaVM1 - * Resource action: FAKE6-IP monitor on lamaVM3 - * Resource action: FAKE6-IP monitor on lamaVM1 - * Resource action: FAKE5 monitor on lamaVM1 - * Resource action: FAKE1 monitor on lamaVM3 - * Resource action: FAKE1-IP monitor on lamaVM3 - * Resource action: FAKE2 monitor on lamaVM3 - * Resource action: FAKE2-IP monitor on lamaVM3 - * Resource action: FAKE3 monitor on lamaVM3 - * Resource action: FAKE3-IP monitor on lamaVM3 - * Resource action: FAKE4 monitor on lamaVM3 - * Resource action: FAKE4 monitor on lamaVM1 - * Resource action: FAKE4-IP monitor on lamaVM3 - * Resource action: FAKE4-IP monitor on lamaVM1 - * Resource action: lamaVM2 stop on lama3 - * Resource action: VM2 stop on lama3 - * Pseudo action: stonith-lamaVM2-reboot on lamaVM2 - * Resource action: VM2 start on lama3 - * Resource action: VM2 monitor=10000 on lama3 - * Pseudo action: lamaVM2-G4_stop_0 - * Pseudo action: FAKE4-IP_stop_0 - * Pseudo action: FAKE6-clone_stop_0 - * Resource action: lamaVM2 start on lama3 - * Resource action: lamaVM2 monitor=30000 on lama3 - * Resource action: FSlun3 monitor=10000 on lamaVM2 - * Pseudo action: FAKE4_stop_0 - * Pseudo action: FAKE6_stop_0 - * Pseudo action: FAKE6-clone_stopped_0 - * Pseudo action: FAKE6-clone_start_0 - * Pseudo action: lamaVM2-G4_stopped_0 - * Resource action: FAKE6 start on lamaVM2 - * Resource action: FAKE6 monitor=30000 on lamaVM2 - * Pseudo action: FAKE6-clone_running_0 - * Pseudo action: FSlun3_stop_0 - * Resource action: FSlun3 start on lama2 - * Pseudo action: lamaVM2-G4_start_0 - * Resource action: FAKE4 start on lamaVM2 - * Resource action: FAKE4 monitor=30000 on lamaVM2 - * Resource action: FAKE4-IP start on lamaVM2 - * Resource action: FAKE4-IP monitor=30000 on lamaVM2 - * Resource action: FSlun3 monitor=10000 on lama2 - * Pseudo action: lamaVM2-G4_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ lama2 lama3 ] * GuestOnline: [ lamaVM1 lamaVM2 lamaVM3 ] @@ -105,7 +63,7 @@ Revised Cluster Status: * FSlun2 (ocf:heartbeat:Filesystem): Started lamaVM1 * VM2 (ocf:heartbeat:VirtualDomain): FAILED lama3 * VM3 (ocf:heartbeat:VirtualDomain): Started lama3 - * FSlun3 (ocf:heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ] + * FSlun3 (ocf:heartbeat:Filesystem): FAILED lamaVM2 * FSlun4 (ocf:heartbeat:Filesystem): Started lamaVM3 * FAKE5-IP (ocf:heartbeat:IPaddr2): Stopped (disabled) * FAKE6-IP (ocf:heartbeat:IPaddr2): Stopped (disabled) diff --git a/cts/scheduler/summary/bug-rh-880249.summary b/cts/scheduler/summary/bug-rh-880249.summary index 4cf3fe8bffa..24ed5e50db7 100644 --- a/cts/scheduler/summary/bug-rh-880249.summary +++ b/cts/scheduler/summary/bug-rh-880249.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] @@ -11,19 +13,14 @@ Transition Summary: * Move dummystateful ( Promoted 18node2 -> Started 18node3 ) Executing Cluster Transition: - * Resource action: dummystateful demote on 18node3 - * Resource action: dummystateful demote on 18node1 - * Resource action: dummystateful demote on 18node2 - * Resource action: dummystateful stop on 18node3 - * Resource action: dummystateful stop on 18node1 - * Resource action: dummystateful stop on 18node2 - * Resource action: dummystateful start on 18node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * shoot1 (stonith:fence_xvm): Started 18node1 * shoot2 (stonith:fence_xvm): Started 18node2 - * dummystateful (ocf:pacemaker:Stateful): Started 18node3 + * dummystateful (ocf:pacemaker:Stateful): Promoted [ 18node2 18node1 18node3 ] diff --git a/cts/scheduler/summary/bug-suse-707150.summary b/cts/scheduler/summary/bug-suse-707150.summary index 37e9f5b36f9..9e0bbd70189 100644 --- a/cts/scheduler/summary/bug-suse-707150.summary +++ b/cts/scheduler/summary/bug-suse-707150.summary @@ -1,12 +1,16 @@ 5 of 28 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-0 hex-9 ] * OFFLINE: [ hex-7 hex-8 ] * Full List of Resources: * vm-00 (ocf:heartbeat:Xen): Stopped (disabled) + * dummy1 (ocf:heartbeat:Dummy): Started hex-0 + * fencing-sbd (stonith:external/sbd): Started hex-9 * Clone Set: base-clone [base-group]: * Resource Group: base-group:0: * dlm (ocf:pacemaker:controld): Started hex-0 @@ -17,8 +21,6 @@ Current cluster status: * ocfs2-1 (ocf:heartbeat:Filesystem): Stopped * Stopped: [ hex-7 hex-8 hex-9 ] * vm-01 (ocf:heartbeat:Xen): Stopped - * fencing-sbd (stonith:external/sbd): Started hex-9 - * dummy1 (ocf:heartbeat:Dummy): Started hex-0 Transition Summary: * Start o2cb:0 ( hex-0 ) @@ -31,45 +33,25 @@ Transition Summary: * Start vm-01 ( hex-9 ) due to unrunnable base-clone running (blocked) Executing Cluster Transition: - * Resource action: vg1:1 monitor on hex-9 - * Pseudo action: base-clone_start_0 - * Pseudo action: load_stopped_hex-9 - * Pseudo action: load_stopped_hex-8 - * Pseudo action: load_stopped_hex-7 - * Pseudo action: load_stopped_hex-0 - * Pseudo action: base-group:0_start_0 - * Resource action: o2cb:0 start on hex-0 - * Resource action: clvm:0 start on hex-0 - * Resource action: cmirrord:0 start on hex-0 - * Pseudo action: base-group:1_start_0 - * Resource action: dlm:1 start on hex-9 - * Resource action: o2cb:1 start on hex-9 - * Resource action: clvm:1 start on hex-9 - * Resource action: cmirrord:1 start on hex-9 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-0 hex-9 ] * OFFLINE: [ hex-7 hex-8 ] * Full List of Resources: * vm-00 (ocf:heartbeat:Xen): Stopped (disabled) + * dummy1 (ocf:heartbeat:Dummy): Started hex-0 + * fencing-sbd (stonith:external/sbd): Started hex-9 * Clone Set: base-clone [base-group]: * Resource Group: base-group:0: * dlm (ocf:pacemaker:controld): Started hex-0 - * o2cb (ocf:ocfs2:o2cb): Started hex-0 - * clvm (ocf:lvm2:clvmd): Started hex-0 - * cmirrord (ocf:lvm2:cmirrord): Started hex-0 - * vg1 (ocf:heartbeat:LVM): Stopped (disabled) - * ocfs2-1 (ocf:heartbeat:Filesystem): Stopped - * Resource Group: base-group:1: - * dlm (ocf:pacemaker:controld): Started hex-9 - * o2cb (ocf:ocfs2:o2cb): Started hex-9 - * clvm (ocf:lvm2:clvmd): Started hex-9 - * cmirrord (ocf:lvm2:cmirrord): Started hex-9 + * o2cb (ocf:ocfs2:o2cb): Stopped + * clvm (ocf:lvm2:clvmd): Stopped + * cmirrord (ocf:lvm2:cmirrord): Stopped * vg1 (ocf:heartbeat:LVM): Stopped (disabled) * ocfs2-1 (ocf:heartbeat:Filesystem): Stopped - * Stopped: [ hex-7 hex-8 ] + * Stopped: [ hex-7 hex-8 hex-9 ] * vm-01 (ocf:heartbeat:Xen): Stopped - * fencing-sbd (stonith:external/sbd): Started hex-9 - * dummy1 (ocf:heartbeat:Dummy): Started hex-0 diff --git a/cts/scheduler/summary/bundle-connection-with-container.summary b/cts/scheduler/summary/bundle-connection-with-container.summary index 62e0ec683ce..9d887a9376a 100644 --- a/cts/scheduler/summary/bundle-connection-with-container.summary +++ b/cts/scheduler/summary/bundle-connection-with-container.summary @@ -1,10 +1,12 @@ Using the original execution date of: 2022-07-13 22:13:26Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-3 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-2 ] * RemoteOnline: [ remote-rhel8-2 ] - * GuestOnline: [ httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel8-3 @@ -24,27 +26,11 @@ Transition Summary: * Recover httpd:0 ( httpd-bundle-0 ) Executing Cluster Transition: - * Resource action: httpd-bundle-0 stop on rhel8-1 - * Pseudo action: httpd-bundle_stop_0 - * Pseudo action: httpd-bundle_start_0 - * Resource action: httpd-bundle-podman-0 stop on rhel8-1 - * Pseudo action: stonith-httpd-bundle-0-reboot on httpd-bundle-0 - * Pseudo action: httpd-bundle-clone_stop_0 - * Resource action: httpd-bundle-podman-0 start on rhel8-1 - * Resource action: httpd-bundle-podman-0 monitor=60000 on rhel8-1 - * Resource action: httpd-bundle-0 start on rhel8-1 - * Resource action: httpd-bundle-0 monitor=30000 on rhel8-1 - * Pseudo action: httpd_stop_0 - * Pseudo action: httpd-bundle-clone_stopped_0 - * Pseudo action: httpd-bundle-clone_start_0 - * Pseudo action: httpd-bundle_stopped_0 - * Resource action: httpd start on httpd-bundle-0 - * Pseudo action: httpd-bundle-clone_running_0 - * Pseudo action: httpd-bundle_running_0 - * Resource action: httpd monitor=15000 on httpd-bundle-0 Using the original execution date of: 2022-07-13 22:13:26Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-3 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-2 ] @@ -58,6 +44,6 @@ Revised Cluster Status: * remote-rhel8-2 (ocf:pacemaker:remote): Started rhel8-1 * remote-rsc (ocf:pacemaker:Dummy): Started remote-rhel8-2 * Container bundle set: httpd-bundle [localhost/pcmktest:http]: - * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel8-1 + * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED rhel8-1 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel8-3 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Started remote-rhel8-2 diff --git a/cts/scheduler/summary/bundle-interleave-promote.summary b/cts/scheduler/summary/bundle-interleave-promote.summary index 8e8725e81eb..b652682d588 100644 --- a/cts/scheduler/summary/bundle-interleave-promote.summary +++ b/cts/scheduler/summary/bundle-interleave-promote.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * GuestOnline: [ app-bundle-0 app-bundle-1 app-bundle-2 base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -19,22 +21,10 @@ Transition Summary: * Promote app:2 ( Unpromoted -> Promoted app-bundle-2 ) Executing Cluster Transition: - * Resource action: base cancel=16000 on base-bundle-2 - * Resource action: app cancel=16000 on app-bundle-2 - * Pseudo action: base-bundle_promote_0 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-2 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-2 - * Pseudo action: app-bundle_promote_0 - * Pseudo action: app-bundle-clone_promote_0 - * Resource action: app promote on app-bundle-2 - * Pseudo action: app-bundle-clone_promoted_0 - * Pseudo action: app-bundle_promoted_0 - * Resource action: app monitor=15000 on app-bundle-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * GuestOnline: [ app-bundle-0 app-bundle-1 app-bundle-2 base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -44,8 +34,8 @@ Revised Cluster Status: * Container bundle set: base-bundle [localhost/pcmktest:base]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 * Container bundle set: app-bundle [localhost/pcmktest:app]: * app-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 * app-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * app-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 + * app-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 diff --git a/cts/scheduler/summary/bundle-interleave-start.summary b/cts/scheduler/summary/bundle-interleave-start.summary index 5a59847f7c1..39e533a57a0 100644 --- a/cts/scheduler/summary/bundle-interleave-start.summary +++ b/cts/scheduler/summary/bundle-interleave-start.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] + * GuestOnline: [ app-bundle-0 app-bundle-1 app-bundle-2 base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 @@ -34,122 +37,10 @@ Transition Summary: * Promote app:2 ( Stopped -> Promoted app-bundle-2 ) Executing Cluster Transition: - * Resource action: base-bundle-podman-0 monitor on node5 - * Resource action: base-bundle-podman-0 monitor on node4 - * Resource action: base-bundle-podman-0 monitor on node3 - * Resource action: base-bundle-podman-0 monitor on node2 - * Resource action: base-bundle-podman-0 monitor on node1 - * Resource action: base-bundle-podman-1 monitor on node5 - * Resource action: base-bundle-podman-1 monitor on node4 - * Resource action: base-bundle-podman-1 monitor on node3 - * Resource action: base-bundle-podman-1 monitor on node2 - * Resource action: base-bundle-podman-1 monitor on node1 - * Resource action: base-bundle-podman-2 monitor on node5 - * Resource action: base-bundle-podman-2 monitor on node4 - * Resource action: base-bundle-podman-2 monitor on node3 - * Resource action: base-bundle-podman-2 monitor on node2 - * Resource action: base-bundle-podman-2 monitor on node1 - * Resource action: app-bundle-podman-0 monitor on node5 - * Resource action: app-bundle-podman-0 monitor on node4 - * Resource action: app-bundle-podman-0 monitor on node3 - * Resource action: app-bundle-podman-0 monitor on node2 - * Resource action: app-bundle-podman-0 monitor on node1 - * Resource action: app-bundle-podman-1 monitor on node5 - * Resource action: app-bundle-podman-1 monitor on node4 - * Resource action: app-bundle-podman-1 monitor on node3 - * Resource action: app-bundle-podman-1 monitor on node2 - * Resource action: app-bundle-podman-1 monitor on node1 - * Resource action: app-bundle-podman-2 monitor on node5 - * Resource action: app-bundle-podman-2 monitor on node4 - * Resource action: app-bundle-podman-2 monitor on node3 - * Resource action: app-bundle-podman-2 monitor on node2 - * Resource action: app-bundle-podman-2 monitor on node1 - * Pseudo action: base-bundle_start_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-podman-0 start on node2 - * Resource action: base-bundle-0 monitor on node5 - * Resource action: base-bundle-0 monitor on node4 - * Resource action: base-bundle-0 monitor on node3 - * Resource action: base-bundle-0 monitor on node2 - * Resource action: base-bundle-0 monitor on node1 - * Resource action: base-bundle-podman-1 start on node3 - * Resource action: base-bundle-1 monitor on node5 - * Resource action: base-bundle-1 monitor on node4 - * Resource action: base-bundle-1 monitor on node3 - * Resource action: base-bundle-1 monitor on node2 - * Resource action: base-bundle-1 monitor on node1 - * Resource action: base-bundle-podman-2 start on node4 - * Resource action: base-bundle-2 monitor on node5 - * Resource action: base-bundle-2 monitor on node4 - * Resource action: base-bundle-2 monitor on node3 - * Resource action: base-bundle-2 monitor on node2 - * Resource action: base-bundle-2 monitor on node1 - * Resource action: base-bundle-podman-0 monitor=60000 on node2 - * Resource action: base-bundle-0 start on node2 - * Resource action: base-bundle-podman-1 monitor=60000 on node3 - * Resource action: base-bundle-1 start on node3 - * Resource action: base-bundle-podman-2 monitor=60000 on node4 - * Resource action: base-bundle-2 start on node4 - * Resource action: base:0 start on base-bundle-0 - * Resource action: base-bundle-0 monitor=30000 on node2 - * Resource action: base-bundle-1 monitor=30000 on node3 - * Resource action: base-bundle-2 monitor=30000 on node4 - * Resource action: base:1 start on base-bundle-1 - * Resource action: base:0 monitor=16000 on base-bundle-0 - * Resource action: base:2 start on base-bundle-2 - * Resource action: base:1 monitor=16000 on base-bundle-1 - * Pseudo action: base-bundle-clone_running_0 - * Pseudo action: base-bundle_running_0 - * Pseudo action: app-bundle_start_0 - * Pseudo action: base-bundle_promote_0 - * Pseudo action: base-bundle-clone_promote_0 - * Pseudo action: app-bundle-clone_start_0 - * Resource action: app-bundle-podman-0 start on node2 - * Resource action: app-bundle-0 monitor on node5 - * Resource action: app-bundle-0 monitor on node4 - * Resource action: app-bundle-0 monitor on node3 - * Resource action: app-bundle-0 monitor on node2 - * Resource action: app-bundle-0 monitor on node1 - * Resource action: app-bundle-podman-1 start on node3 - * Resource action: app-bundle-1 monitor on node5 - * Resource action: app-bundle-1 monitor on node4 - * Resource action: app-bundle-1 monitor on node3 - * Resource action: app-bundle-1 monitor on node2 - * Resource action: app-bundle-1 monitor on node1 - * Resource action: app-bundle-podman-2 start on node4 - * Resource action: app-bundle-2 monitor on node5 - * Resource action: app-bundle-2 monitor on node4 - * Resource action: app-bundle-2 monitor on node3 - * Resource action: app-bundle-2 monitor on node2 - * Resource action: app-bundle-2 monitor on node1 - * Resource action: base:2 promote on base-bundle-2 - * Pseudo action: base-bundle-clone_promoted_0 - * Resource action: app-bundle-podman-0 monitor=60000 on node2 - * Resource action: app-bundle-0 start on node2 - * Resource action: app-bundle-podman-1 monitor=60000 on node3 - * Resource action: app-bundle-1 start on node3 - * Resource action: app-bundle-podman-2 monitor=60000 on node4 - * Resource action: app-bundle-2 start on node4 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base:2 monitor=15000 on base-bundle-2 - * Resource action: app:0 start on app-bundle-0 - * Resource action: app-bundle-0 monitor=30000 on node2 - * Resource action: app-bundle-1 monitor=30000 on node3 - * Resource action: app-bundle-2 monitor=30000 on node4 - * Resource action: app:1 start on app-bundle-1 - * Resource action: app:0 monitor=16000 on app-bundle-0 - * Resource action: app:2 start on app-bundle-2 - * Resource action: app:1 monitor=16000 on app-bundle-1 - * Pseudo action: app-bundle-clone_running_0 - * Pseudo action: app-bundle_running_0 - * Pseudo action: app-bundle_promote_0 - * Pseudo action: app-bundle-clone_promote_0 - * Resource action: app:2 promote on app-bundle-2 - * Pseudo action: app-bundle-clone_promoted_0 - * Pseudo action: app-bundle_promoted_0 - * Resource action: app:2 monitor=15000 on app-bundle-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * GuestOnline: [ app-bundle-0 app-bundle-1 app-bundle-2 base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -157,10 +48,10 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Container bundle set: base-bundle [localhost/pcmktest:base]: - * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 - * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 + * base-bundle-0 (ocf:pacemaker:Stateful): Stopped + * base-bundle-1 (ocf:pacemaker:Stateful): Stopped + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped * Container bundle set: app-bundle [localhost/pcmktest:app]: - * app-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 - * app-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * app-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 + * app-bundle-0 (ocf:pacemaker:Stateful): Stopped + * app-bundle-1 (ocf:pacemaker:Stateful): Stopped + * app-bundle-2 (ocf:pacemaker:Stateful): Stopped diff --git a/cts/scheduler/summary/bundle-nested-colocation.summary b/cts/scheduler/summary/bundle-nested-colocation.summary index 194909647d6..979fcc266e4 100644 --- a/cts/scheduler/summary/bundle-nested-colocation.summary +++ b/cts/scheduler/summary/bundle-nested-colocation.summary @@ -1,8 +1,11 @@ Using the original execution date of: 2017-07-14 08:50:25Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 ] * RemoteOnline: [ overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 ] + * GuestOnline: [ rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 ] * Full List of Resources: * overcloud-rabbit-0 (ocf:pacemaker:remote): Started overcloud-controller-0 @@ -29,64 +32,11 @@ Transition Summary: * Start rabbitmq:2 ( rabbitmq-bundle-2 ) Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Pseudo action: rabbitmq-bundle_stop_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Resource action: rabbitmq-bundle-docker-0 stop on overcloud-rabbit-0 - * Resource action: rabbitmq-bundle-docker-0 start on overcloud-rabbit-0 - * Resource action: rabbitmq-bundle-docker-0 monitor=60000 on overcloud-rabbit-0 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-galera-2 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-galera-1 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-galera-0 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-2 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-1 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-0 - * Resource action: rabbitmq-bundle-docker-1 stop on overcloud-rabbit-1 - * Resource action: rabbitmq-bundle-docker-1 start on overcloud-rabbit-1 - * Resource action: rabbitmq-bundle-docker-1 monitor=60000 on overcloud-rabbit-1 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-galera-2 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-galera-1 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-galera-0 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-2 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-1 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-0 - * Resource action: rabbitmq-bundle-docker-2 stop on overcloud-rabbit-2 - * Resource action: rabbitmq-bundle-docker-2 start on overcloud-rabbit-2 - * Resource action: rabbitmq-bundle-docker-2 monitor=60000 on overcloud-rabbit-2 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-galera-2 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-galera-1 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-galera-0 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-2 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-1 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-0 - * Pseudo action: rabbitmq-bundle_stopped_0 - * Resource action: rabbitmq-bundle-0 start on overcloud-controller-0 - * Resource action: rabbitmq-bundle-1 start on overcloud-controller-1 - * Resource action: rabbitmq-bundle-2 start on overcloud-controller-2 - * Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0 - * Resource action: rabbitmq:1 monitor on rabbitmq-bundle-1 - * Resource action: rabbitmq:2 monitor on rabbitmq-bundle-2 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: rabbitmq-bundle-0 monitor=30000 on overcloud-controller-0 - * Resource action: rabbitmq-bundle-1 monitor=30000 on overcloud-controller-1 - * Resource action: rabbitmq-bundle-2 monitor=30000 on overcloud-controller-2 - * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 - * Resource action: rabbitmq:1 start on rabbitmq-bundle-1 - * Resource action: rabbitmq:2 start on rabbitmq-bundle-2 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 - * Resource action: rabbitmq:1 notify on rabbitmq-bundle-1 - * Resource action: rabbitmq:2 notify on rabbitmq-bundle-2 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 - * Resource action: rabbitmq:1 monitor=10000 on rabbitmq-bundle-1 - * Resource action: rabbitmq:2 monitor=10000 on rabbitmq-bundle-2 Using the original execution date of: 2017-07-14 08:50:25Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 ] * RemoteOnline: [ overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 ] @@ -97,9 +47,9 @@ Revised Cluster Status: * overcloud-rabbit-1 (ocf:pacemaker:remote): Started overcloud-controller-1 * overcloud-rabbit-2 (ocf:pacemaker:remote): Started overcloud-controller-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-rabbit-0 - * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-rabbit-1 - * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-rabbit-2 + * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-0 + * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-1 + * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-2 * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-galera-0 * galera-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-galera-1 diff --git a/cts/scheduler/summary/bundle-order-fencing.summary b/cts/scheduler/summary/bundle-order-fencing.summary index 4088c1565b1..d336e7207cc 100644 --- a/cts/scheduler/summary/bundle-order-fencing.summary +++ b/cts/scheduler/summary/bundle-order-fencing.summary @@ -1,9 +1,11 @@ Using the original execution date of: 2017-09-12 10:51:59Z Current cluster status: + * Cluster Summary: + * Node List: * Node controller-0: UNCLEAN (offline) * Online: [ controller-1 controller-2 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]: @@ -34,6 +36,9 @@ Current cluster status: * stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-0 (UNCLEAN) Transition Summary: + * Fence (reboot) redis-bundle-0 (resource: redis-bundle-docker-0) 'guest is unclean' + * Fence (reboot) rabbitmq-bundle-0 (resource: rabbitmq-bundle-docker-0) 'guest is unclean' + * Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' * Fence (off) redis-bundle-0 (resource: redis-bundle-docker-0) 'guest is unclean' * Fence (off) rabbitmq-bundle-0 (resource: rabbitmq-bundle-docker-0) 'guest is unclean' * Fence (off) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' @@ -56,173 +61,45 @@ Transition Summary: * Move stonith-fence_ipmilan-5254000dcb3f ( controller-0 -> controller-2 ) Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 - * Pseudo action: rabbitmq-bundle-0_stop_0 - * Resource action: rabbitmq-bundle-0 monitor on controller-2 - * Resource action: rabbitmq-bundle-0 monitor on controller-1 - * Resource action: rabbitmq-bundle-1 monitor on controller-2 - * Resource action: rabbitmq-bundle-2 monitor on controller-1 - * Pseudo action: galera-bundle-0_stop_0 - * Resource action: galera-bundle-0 monitor on controller-2 - * Resource action: galera-bundle-0 monitor on controller-1 - * Resource action: galera-bundle-1 monitor on controller-2 - * Resource action: galera-bundle-2 monitor on controller-1 - * Resource action: redis cancel=45000 on redis-bundle-1 - * Resource action: redis cancel=60000 on redis-bundle-1 - * Pseudo action: redis-bundle-master_pre_notify_demote_0 - * Pseudo action: redis-bundle-0_stop_0 - * Resource action: redis-bundle-0 monitor on controller-2 - * Resource action: redis-bundle-0 monitor on controller-1 - * Resource action: redis-bundle-1 monitor on controller-2 - * Resource action: redis-bundle-2 monitor on controller-1 - * Pseudo action: stonith-fence_ipmilan-5254003e8e97_stop_0 - * Pseudo action: stonith-fence_ipmilan-5254000dcb3f_stop_0 - * Pseudo action: haproxy-bundle_stop_0 - * Pseudo action: redis-bundle_demote_0 - * Pseudo action: galera-bundle_demote_0 - * Pseudo action: rabbitmq-bundle_stop_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Fencing controller-0 (reboot) - * Resource action: rabbitmq notify on rabbitmq-bundle-1 - * Resource action: rabbitmq notify on rabbitmq-bundle-2 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 - * Pseudo action: rabbitmq-bundle-docker-0_post_notify_stonith_0 - * Pseudo action: rabbitmq-bundle-docker-0_stop_0 - * Pseudo action: rabbitmq-bundle-0_post_notify_stonith_0 - * Pseudo action: galera-bundle-master_demote_0 - * Resource action: redis notify on redis-bundle-1 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 - * Pseudo action: redis-bundle-master_demote_0 - * Pseudo action: redis-bundle-docker-0_post_notify_stonith_0 - * Pseudo action: redis-bundle-0_post_notify_stonith_0 - * Pseudo action: haproxy-bundle-docker-0_stop_0 - * Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1 - * Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2 - * Pseudo action: stonith-redis-bundle-0-off on redis-bundle-0 - * Pseudo action: stonith-rabbitmq-bundle-0-off on rabbitmq-bundle-0 - * Pseudo action: stonith-galera-bundle-0-off on galera-bundle-0 - * Pseudo action: haproxy-bundle_stopped_0 - * Pseudo action: rabbitmq_post_notify_stop_0 - * Pseudo action: rabbitmq-bundle-clone_stop_0 - * Pseudo action: rabbitmq-bundle-docker-0_confirmed-post_notify_stonith_0 - * Pseudo action: rabbitmq-bundle-0_confirmed-post_notify_stonith_0 - * Pseudo action: galera_demote_0 - * Pseudo action: galera-bundle-master_demoted_0 - * Pseudo action: redis_post_notify_stop_0 - * Pseudo action: redis_demote_0 - * Pseudo action: redis-bundle-master_demoted_0 - * Pseudo action: redis-bundle-docker-0_confirmed-post_notify_stonith_0 - * Pseudo action: redis-bundle-0_confirmed-post_notify_stonith_0 - * Pseudo action: ip-192.168.24.7_stop_0 - * Pseudo action: ip-10.0.0.109_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 - * Resource action: stonith-fence_ipmilan-5254003e8e97 monitor=60000 on controller-1 - * Resource action: stonith-fence_ipmilan-5254000dcb3f monitor=60000 on controller-2 - * Pseudo action: galera-bundle_demoted_0 - * Pseudo action: galera-bundle_stop_0 - * Pseudo action: rabbitmq_stop_0 - * Pseudo action: rabbitmq-bundle-clone_stopped_0 - * Pseudo action: galera-bundle-master_stop_0 - * Pseudo action: galera-bundle-docker-0_stop_0 - * Pseudo action: redis-bundle-master_post_notify_demoted_0 - * Resource action: ip-192.168.24.7 start on controller-2 - * Resource action: ip-10.0.0.109 start on controller-1 - * Resource action: ip-172.17.4.11 start on controller-1 - * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 - * Pseudo action: galera_stop_0 - * Pseudo action: galera-bundle-master_stopped_0 - * Pseudo action: galera-bundle-master_start_0 - * Resource action: redis notify on redis-bundle-1 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0 - * Pseudo action: redis-bundle-master_pre_notify_stop_0 - * Resource action: ip-192.168.24.7 monitor=10000 on controller-2 - * Resource action: ip-10.0.0.109 monitor=10000 on controller-1 - * Resource action: ip-172.17.4.11 monitor=10000 on controller-1 - * Pseudo action: redis-bundle_demoted_0 - * Pseudo action: redis-bundle_stop_0 - * Pseudo action: galera-bundle_stopped_0 - * Resource action: rabbitmq notify on rabbitmq-bundle-1 - * Resource action: rabbitmq notify on rabbitmq-bundle-2 - * Pseudo action: rabbitmq_notified_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Pseudo action: galera-bundle-master_running_0 - * Resource action: redis notify on redis-bundle-1 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0 - * Pseudo action: redis-bundle-master_stop_0 - * Pseudo action: redis-bundle-docker-0_stop_0 - * Pseudo action: galera-bundle_running_0 - * Pseudo action: rabbitmq-bundle_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Pseudo action: redis_stop_0 - * Pseudo action: redis-bundle-master_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Pseudo action: redis-bundle-master_post_notify_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Resource action: redis notify on redis-bundle-1 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis_notified_0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Pseudo action: redis-bundle_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: redis-bundle-master_start_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Pseudo action: redis-bundle-master_running_0 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Pseudo action: redis-bundle-master_pre_notify_promote_0 - * Pseudo action: redis-bundle_promote_0 - * Resource action: redis notify on redis-bundle-1 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: redis-bundle-master_promote_0 - * Resource action: redis promote on redis-bundle-1 - * Pseudo action: redis-bundle-master_promoted_0 - * Pseudo action: redis-bundle-master_post_notify_promoted_0 - * Resource action: redis notify on redis-bundle-1 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: redis-bundle_promoted_0 - * Resource action: redis monitor=20000 on redis-bundle-1 + * Pseudo action: galera-bundle-master_promoted_0 + * Pseudo action: haproxy-bundle_running_0 + * Pseudo action: galera-bundle_promoted_0 +Transition failed: terminated +An invalid transition was produced Using the original execution date of: 2017-09-12 10:51:59Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node controller-0: UNCLEAN (offline) * Online: [ controller-1 controller-2 ] - * OFFLINE: [ controller-0 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): FAILED + * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): FAILED controller-0 (UNCLEAN) * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted controller-0 (UNCLEAN) * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): FAILED Promoted - * redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): FAILED Promoted controller-0 (UNCLEAN) + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 - * ip-192.168.24.7 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1 + * ip-192.168.24.7 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN) + * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN) * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.19 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 + * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN) * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped + * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 (UNCLEAN) * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2 * stonith-fence_ipmilan-525400efba5c (stonith:fence_ipmilan): Started controller-2 - * stonith-fence_ipmilan-5254003e8e97 (stonith:fence_ipmilan): Started controller-1 - * stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-2 + * stonith-fence_ipmilan-5254003e8e97 (stonith:fence_ipmilan): Started controller-0 (UNCLEAN) + * stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-0 (UNCLEAN) diff --git a/cts/scheduler/summary/bundle-order-partial-start-2.summary b/cts/scheduler/summary/bundle-order-partial-start-2.summary index 1e2ca2cdf9f..3736822e99f 100644 --- a/cts/scheduler/summary/bundle-order-partial-start-2.summary +++ b/cts/scheduler/summary/bundle-order-partial-start-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] @@ -30,64 +32,21 @@ Transition Summary: * Start haproxy-bundle-docker-0 ( undercloud ) Executing Cluster Transition: - * Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: galera-bundle-0 stop on undercloud - * Pseudo action: redis-bundle-master_pre_notify_promote_0 - * Resource action: haproxy-bundle-docker-0 monitor on undercloud - * Pseudo action: haproxy-bundle_start_0 - * Pseudo action: redis-bundle_promote_0 - * Pseudo action: galera-bundle_stop_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: galera-bundle-docker-0 stop on undercloud - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: redis-bundle-master_promote_0 - * Resource action: haproxy-bundle-docker-0 start on undercloud - * Pseudo action: haproxy-bundle_running_0 - * Pseudo action: galera-bundle_stopped_0 - * Pseudo action: galera-bundle_start_0 - * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Resource action: galera-bundle-docker-0 start on undercloud - * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud - * Resource action: galera-bundle-0 start on undercloud - * Resource action: galera-bundle-0 monitor=30000 on undercloud - * Resource action: redis promote on redis-bundle-0 - * Pseudo action: redis-bundle-master_promoted_0 - * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Resource action: galera:0 monitor on galera-bundle-0 - * Pseudo action: galera-bundle-master_start_0 - * Pseudo action: redis-bundle-master_post_notify_promoted_0 - * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Resource action: galera:0 start on galera-bundle-0 - * Pseudo action: galera-bundle-master_running_0 - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: redis-bundle_promoted_0 - * Pseudo action: galera-bundle_running_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 - * Resource action: galera:0 monitor=30000 on galera-bundle-0 - * Resource action: galera:0 monitor=20000 on galera-bundle-0 - * Resource action: redis monitor=20000 on redis-bundle-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud + * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Stopped undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud @@ -95,6 +54,6 @@ Revised Cluster Status: * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud + * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud diff --git a/cts/scheduler/summary/bundle-order-partial-start.summary b/cts/scheduler/summary/bundle-order-partial-start.summary index 79eb7b5935c..7f985ebd7d9 100644 --- a/cts/scheduler/summary/bundle-order-partial-start.summary +++ b/cts/scheduler/summary/bundle-order-partial-start.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] - * GuestOnline: [ rabbitmq-bundle-0 redis-bundle-0 ] + * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: @@ -30,61 +32,21 @@ Transition Summary: * Start haproxy-bundle-docker-0 ( undercloud ) Executing Cluster Transition: - * Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: galera-bundle-docker-0 monitor on undercloud - * Pseudo action: redis-bundle-master_pre_notify_promote_0 - * Resource action: haproxy-bundle-docker-0 monitor on undercloud - * Pseudo action: haproxy-bundle_start_0 - * Pseudo action: redis-bundle_promote_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: redis-bundle-master_promote_0 - * Resource action: haproxy-bundle-docker-0 start on undercloud - * Pseudo action: haproxy-bundle_running_0 - * Pseudo action: galera-bundle_start_0 - * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Pseudo action: galera-bundle-master_start_0 - * Resource action: galera-bundle-docker-0 start on undercloud - * Resource action: galera-bundle-0 monitor on undercloud - * Resource action: redis promote on redis-bundle-0 - * Pseudo action: redis-bundle-master_promoted_0 - * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud - * Resource action: galera-bundle-0 start on undercloud - * Pseudo action: redis-bundle-master_post_notify_promoted_0 - * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Resource action: galera:0 start on galera-bundle-0 - * Pseudo action: galera-bundle-master_running_0 - * Resource action: galera-bundle-0 monitor=30000 on undercloud - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: redis-bundle_promoted_0 - * Pseudo action: galera-bundle_running_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 - * Resource action: galera:0 monitor=30000 on galera-bundle-0 - * Resource action: galera:0 monitor=20000 on galera-bundle-0 - * Resource action: redis monitor=20000 on redis-bundle-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud + * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud @@ -92,6 +54,6 @@ Revised Cluster Status: * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud + * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud diff --git a/cts/scheduler/summary/bundle-order-partial-stop.summary b/cts/scheduler/summary/bundle-order-partial-stop.summary index 5fc2efe04c1..4e9aff05d3b 100644 --- a/cts/scheduler/summary/bundle-order-partial-stop.summary +++ b/cts/scheduler/summary/bundle-order-partial-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] @@ -20,108 +22,3 @@ Current cluster status: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud - -Transition Summary: - * Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability - * Stop rabbitmq-bundle-0 ( undercloud ) due to node availability - * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to colocation with haproxy-bundle-docker-0 - * Stop galera-bundle-docker-0 ( undercloud ) due to node availability - * Stop galera-bundle-0 ( undercloud ) due to node availability - * Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start - * Stop redis-bundle-docker-0 ( undercloud ) due to node availability - * Stop redis-bundle-0 ( undercloud ) due to node availability - * Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-0 start - * Stop ip-192.168.122.254 ( undercloud ) due to node availability - * Stop ip-192.168.122.250 ( undercloud ) due to node availability - * Stop ip-192.168.122.249 ( undercloud ) due to node availability - * Stop ip-192.168.122.253 ( undercloud ) due to node availability - * Stop ip-192.168.122.247 ( undercloud ) due to node availability - * Stop ip-192.168.122.248 ( undercloud ) due to node availability - * Stop haproxy-bundle-docker-0 ( undercloud ) due to node availability - * Stop openstack-cinder-volume-docker-0 ( undercloud ) due to node availability - -Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 - * Resource action: galera cancel=10000 on galera-bundle-0 - * Resource action: redis cancel=20000 on redis-bundle-0 - * Pseudo action: redis-bundle-master_pre_notify_demote_0 - * Pseudo action: openstack-cinder-volume_stop_0 - * Pseudo action: haproxy-bundle_stop_0 - * Pseudo action: redis-bundle_demote_0 - * Pseudo action: galera-bundle_demote_0 - * Pseudo action: rabbitmq-bundle_stop_0 - * Resource action: rabbitmq notify on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 - * Pseudo action: rabbitmq-bundle-clone_stop_0 - * Pseudo action: galera-bundle-master_demote_0 - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 - * Pseudo action: redis-bundle-master_demote_0 - * Resource action: haproxy-bundle-docker-0 stop on undercloud - * Resource action: openstack-cinder-volume-docker-0 stop on undercloud - * Pseudo action: openstack-cinder-volume_stopped_0 - * Pseudo action: haproxy-bundle_stopped_0 - * Resource action: rabbitmq stop on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_stopped_0 - * Resource action: rabbitmq-bundle-0 stop on undercloud - * Resource action: galera demote on galera-bundle-0 - * Pseudo action: galera-bundle-master_demoted_0 - * Resource action: redis demote on redis-bundle-0 - * Pseudo action: redis-bundle-master_demoted_0 - * Resource action: ip-192.168.122.254 stop on undercloud - * Resource action: ip-192.168.122.250 stop on undercloud - * Resource action: ip-192.168.122.249 stop on undercloud - * Resource action: ip-192.168.122.253 stop on undercloud - * Resource action: ip-192.168.122.247 stop on undercloud - * Resource action: ip-192.168.122.248 stop on undercloud - * Pseudo action: galera-bundle_demoted_0 - * Pseudo action: galera-bundle_stop_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 - * Resource action: rabbitmq-bundle-docker-0 stop on undercloud - * Pseudo action: galera-bundle-master_stop_0 - * Pseudo action: redis-bundle-master_post_notify_demoted_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 - * Resource action: galera stop on galera-bundle-0 - * Pseudo action: galera-bundle-master_stopped_0 - * Resource action: galera-bundle-0 stop on undercloud - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0 - * Pseudo action: redis-bundle-master_pre_notify_stop_0 - * Pseudo action: redis-bundle_demoted_0 - * Pseudo action: rabbitmq-bundle_stopped_0 - * Resource action: galera-bundle-docker-0 stop on undercloud - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0 - * Pseudo action: galera-bundle_stopped_0 - * Pseudo action: redis-bundle_stop_0 - * Pseudo action: redis-bundle-master_stop_0 - * Resource action: redis stop on redis-bundle-0 - * Pseudo action: redis-bundle-master_stopped_0 - * Resource action: redis-bundle-0 stop on undercloud - * Pseudo action: redis-bundle-master_post_notify_stopped_0 - * Resource action: redis-bundle-docker-0 stop on undercloud - * Cluster action: do_shutdown on undercloud - * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 - * Pseudo action: redis-bundle_stopped_0 - -Revised Cluster Status: - * Node List: - * Online: [ undercloud ] - - * Full List of Resources: - * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped - * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Stopped - * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Stopped - * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped - * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped - * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: - * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped diff --git a/cts/scheduler/summary/bundle-order-startup-clone-2.summary b/cts/scheduler/summary/bundle-order-startup-clone-2.summary index 2d7cd9b52fa..ae1df58388a 100644 --- a/cts/scheduler/summary/bundle-order-startup-clone-2.summary +++ b/cts/scheduler/summary/bundle-order-startup-clone-2.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Clone Set: storage-clone [storage]: @@ -46,150 +49,10 @@ Transition Summary: * Promote redis:2 ( Stopped -> Promoted redis-bundle-2 ) Executing Cluster Transition: - * Resource action: storage:0 monitor on metal-1 - * Resource action: storage:1 monitor on metal-2 - * Resource action: storage:2 monitor on metal-3 - * Pseudo action: storage-clone_pre_notify_start_0 - * Resource action: galera-bundle-docker-0 monitor on metal-3 - * Resource action: galera-bundle-docker-0 monitor on metal-2 - * Resource action: galera-bundle-docker-0 monitor on metal-1 - * Resource action: galera-bundle-docker-1 monitor on metal-3 - * Resource action: galera-bundle-docker-1 monitor on metal-2 - * Resource action: galera-bundle-docker-1 monitor on metal-1 - * Resource action: galera-bundle-docker-2 monitor on metal-3 - * Resource action: galera-bundle-docker-2 monitor on metal-2 - * Resource action: galera-bundle-docker-2 monitor on metal-1 - * Resource action: haproxy-bundle-docker-0 monitor on metal-3 - * Resource action: haproxy-bundle-docker-0 monitor on metal-2 - * Resource action: haproxy-bundle-docker-0 monitor on metal-1 - * Resource action: haproxy-bundle-docker-1 monitor on metal-3 - * Resource action: haproxy-bundle-docker-1 monitor on metal-2 - * Resource action: haproxy-bundle-docker-1 monitor on metal-1 - * Resource action: haproxy-bundle-docker-2 monitor on metal-3 - * Resource action: haproxy-bundle-docker-2 monitor on metal-2 - * Resource action: haproxy-bundle-docker-2 monitor on metal-1 - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Resource action: redis-bundle-docker-0 monitor on metal-3 - * Resource action: redis-bundle-docker-0 monitor on metal-2 - * Resource action: redis-bundle-docker-0 monitor on metal-1 - * Resource action: redis-bundle-docker-1 monitor on metal-3 - * Resource action: redis-bundle-docker-1 monitor on metal-2 - * Resource action: redis-bundle-docker-1 monitor on metal-1 - * Resource action: redis-bundle-docker-2 monitor on metal-3 - * Resource action: redis-bundle-docker-2 monitor on metal-2 - * Resource action: redis-bundle-docker-2 monitor on metal-1 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: haproxy-bundle_start_0 - * Pseudo action: storage-clone_confirmed-pre_notify_start_0 - * Resource action: haproxy-bundle-docker-0 start on metal-1 - * Resource action: haproxy-bundle-docker-1 start on metal-2 - * Resource action: haproxy-bundle-docker-2 start on metal-3 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: redis-bundle-master_start_0 - * Resource action: redis-bundle-docker-0 start on metal-1 - * Resource action: redis-bundle-0 monitor on metal-3 - * Resource action: redis-bundle-0 monitor on metal-2 - * Resource action: redis-bundle-0 monitor on metal-1 - * Resource action: redis-bundle-docker-1 start on metal-2 - * Resource action: redis-bundle-1 monitor on metal-3 - * Resource action: redis-bundle-1 monitor on metal-2 - * Resource action: redis-bundle-1 monitor on metal-1 - * Resource action: redis-bundle-docker-2 start on metal-3 - * Resource action: redis-bundle-2 monitor on metal-3 - * Resource action: redis-bundle-2 monitor on metal-2 - * Resource action: redis-bundle-2 monitor on metal-1 - * Pseudo action: haproxy-bundle_running_0 - * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-1 - * Resource action: haproxy-bundle-docker-1 monitor=60000 on metal-2 - * Resource action: haproxy-bundle-docker-2 monitor=60000 on metal-3 - * Resource action: redis-bundle-docker-0 monitor=60000 on metal-1 - * Resource action: redis-bundle-0 start on metal-1 - * Resource action: redis-bundle-docker-1 monitor=60000 on metal-2 - * Resource action: redis-bundle-1 start on metal-2 - * Resource action: redis-bundle-docker-2 monitor=60000 on metal-3 - * Resource action: redis-bundle-2 start on metal-3 - * Resource action: redis:0 start on redis-bundle-0 - * Resource action: redis:1 start on redis-bundle-1 - * Resource action: redis:2 start on redis-bundle-2 - * Pseudo action: redis-bundle-master_running_0 - * Resource action: redis-bundle-0 monitor=30000 on metal-1 - * Resource action: redis-bundle-1 monitor=30000 on metal-2 - * Resource action: redis-bundle-2 monitor=30000 on metal-3 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Resource action: redis:0 notify on redis-bundle-0 - * Resource action: redis:1 notify on redis-bundle-1 - * Resource action: redis:2 notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Pseudo action: redis-bundle-master_pre_notify_promote_0 - * Pseudo action: redis-bundle_promote_0 - * Pseudo action: storage-clone_start_0 - * Resource action: redis:0 notify on redis-bundle-0 - * Resource action: redis:1 notify on redis-bundle-1 - * Resource action: redis:2 notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: redis-bundle-master_promote_0 - * Resource action: storage:0 start on metal-1 - * Resource action: storage:1 start on metal-2 - * Resource action: storage:2 start on metal-3 - * Pseudo action: storage-clone_running_0 - * Resource action: redis:0 promote on redis-bundle-0 - * Resource action: redis:1 promote on redis-bundle-1 - * Resource action: redis:2 promote on redis-bundle-2 - * Pseudo action: redis-bundle-master_promoted_0 - * Pseudo action: storage-clone_post_notify_running_0 - * Pseudo action: redis-bundle-master_post_notify_promoted_0 - * Resource action: storage:0 notify on metal-1 - * Resource action: storage:1 notify on metal-2 - * Resource action: storage:2 notify on metal-3 - * Pseudo action: storage-clone_confirmed-post_notify_running_0 - * Resource action: redis:0 notify on redis-bundle-0 - * Resource action: redis:1 notify on redis-bundle-1 - * Resource action: redis:2 notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: redis-bundle_promoted_0 - * Pseudo action: galera-bundle_start_0 - * Resource action: storage:0 monitor=30000 on metal-1 - * Resource action: storage:1 monitor=30000 on metal-2 - * Resource action: storage:2 monitor=30000 on metal-3 - * Pseudo action: galera-bundle-master_start_0 - * Resource action: galera-bundle-docker-0 start on metal-1 - * Resource action: galera-bundle-0 monitor on metal-3 - * Resource action: galera-bundle-0 monitor on metal-2 - * Resource action: galera-bundle-0 monitor on metal-1 - * Resource action: galera-bundle-docker-1 start on metal-2 - * Resource action: galera-bundle-1 monitor on metal-3 - * Resource action: galera-bundle-1 monitor on metal-2 - * Resource action: galera-bundle-1 monitor on metal-1 - * Resource action: galera-bundle-docker-2 start on metal-3 - * Resource action: galera-bundle-2 monitor on metal-3 - * Resource action: galera-bundle-2 monitor on metal-2 - * Resource action: galera-bundle-2 monitor on metal-1 - * Resource action: redis:0 monitor=20000 on redis-bundle-0 - * Resource action: redis:1 monitor=20000 on redis-bundle-1 - * Resource action: redis:2 monitor=20000 on redis-bundle-2 - * Resource action: galera-bundle-docker-0 monitor=60000 on metal-1 - * Resource action: galera-bundle-0 start on metal-1 - * Resource action: galera-bundle-docker-1 monitor=60000 on metal-2 - * Resource action: galera-bundle-1 start on metal-2 - * Resource action: galera-bundle-docker-2 monitor=60000 on metal-3 - * Resource action: galera-bundle-2 start on metal-3 - * Resource action: galera:0 start on galera-bundle-0 - * Resource action: galera:1 start on galera-bundle-1 - * Resource action: galera:2 start on galera-bundle-2 - * Pseudo action: galera-bundle-master_running_0 - * Resource action: galera-bundle-0 monitor=30000 on metal-1 - * Resource action: galera-bundle-1 monitor=30000 on metal-2 - * Resource action: galera-bundle-2 monitor=30000 on metal-3 - * Pseudo action: galera-bundle_running_0 - * Resource action: galera:0 monitor=30000 on galera-bundle-0 - * Resource action: galera:0 monitor=20000 on galera-bundle-0 - * Resource action: galera:1 monitor=30000 on galera-bundle-1 - * Resource action: galera:1 monitor=20000 on galera-bundle-1 - * Resource action: galera:2 monitor=30000 on galera-bundle-2 - * Resource action: galera:2 monitor=20000 on galera-bundle-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] @@ -197,17 +60,16 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: storage-clone [storage]: - * Started: [ metal-1 metal-2 metal-3 ] - * Stopped: [ rabbitmq-bundle-0 ] + * Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted metal-1 - * galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2 - * galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3 + * galera-bundle-0 (ocf:heartbeat:galera): Stopped + * galera-bundle-1 (ocf:heartbeat:galera): Stopped + * galera-bundle-2 (ocf:heartbeat:galera): Stopped * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1 - * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2 - * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3 + * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped + * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Stopped + * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Stopped * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1 - * redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2 - * redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3 + * redis-bundle-0 (ocf:heartbeat:redis): Stopped + * redis-bundle-1 (ocf:heartbeat:redis): Stopped + * redis-bundle-2 (ocf:heartbeat:redis): Stopped diff --git a/cts/scheduler/summary/bundle-order-startup-clone.summary b/cts/scheduler/summary/bundle-order-startup-clone.summary index 67ee8014e0f..b5f11260c62 100644 --- a/cts/scheduler/summary/bundle-order-startup-clone.summary +++ b/cts/scheduler/summary/bundle-order-startup-clone.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] + * GuestOnline: [ galera-bundle-0 redis-bundle-0 ] * Full List of Resources: * Clone Set: storage-clone [storage]: @@ -26,47 +29,14 @@ Transition Summary: * Start redis:0 ( redis-bundle-0 ) Executing Cluster Transition: - * Resource action: storage:0 monitor on metal-1 - * Resource action: storage:1 monitor on metal-2 - * Resource action: storage:2 monitor on metal-3 - * Resource action: galera-bundle-docker-0 monitor on metal-3 - * Resource action: galera-bundle-docker-0 monitor on metal-2 - * Resource action: galera-bundle-docker-0 monitor on metal-1 - * Resource action: haproxy-bundle-docker-0 monitor on metal-3 - * Resource action: haproxy-bundle-docker-0 monitor on metal-2 - * Resource action: haproxy-bundle-docker-0 monitor on metal-1 - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Resource action: redis-bundle-docker-0 monitor on metal-3 - * Resource action: redis-bundle-docker-0 monitor on metal-2 - * Resource action: redis-bundle-docker-0 monitor on metal-1 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: haproxy-bundle_start_0 - * Resource action: haproxy-bundle-docker-0 start on metal-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: redis-bundle-master_start_0 - * Resource action: redis-bundle-docker-0 start on metal-2 - * Resource action: redis-bundle-0 monitor on metal-3 - * Resource action: redis-bundle-0 monitor on metal-2 - * Resource action: redis-bundle-0 monitor on metal-1 - * Pseudo action: haproxy-bundle_running_0 - * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-2 - * Resource action: redis-bundle-docker-0 monitor=60000 on metal-2 - * Resource action: redis-bundle-0 start on metal-2 - * Resource action: redis:0 start on redis-bundle-0 - * Pseudo action: redis-bundle-master_running_0 - * Resource action: redis-bundle-0 monitor=30000 on metal-2 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Resource action: redis:0 notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Resource action: redis:0 monitor=60000 on redis-bundle-0 - * Resource action: redis:0 monitor=45000 on redis-bundle-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] - * GuestOnline: [ redis-bundle-0 ] + * GuestOnline: [ galera-bundle-0 redis-bundle-0 ] * Full List of Resources: * Clone Set: storage-clone [storage]: @@ -74,6 +44,6 @@ Revised Cluster Status: * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-2 + * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted metal-2 + * redis-bundle-0 (ocf:heartbeat:redis): Stopped diff --git a/cts/scheduler/summary/bundle-order-startup.summary b/cts/scheduler/summary/bundle-order-startup.summary index 72048900757..eee33945bc4 100644 --- a/cts/scheduler/summary/bundle-order-startup.summary +++ b/cts/scheduler/summary/bundle-order-startup.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] + * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: @@ -40,102 +43,28 @@ Transition Summary: * Start openstack-cinder-volume-docker-0 ( undercloud ) Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: rabbitmq-bundle-docker-0 monitor on undercloud - * Resource action: galera-bundle-docker-0 monitor on undercloud - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Resource action: redis-bundle-docker-0 monitor on undercloud - * Resource action: ip-192.168.122.254 monitor on undercloud - * Resource action: ip-192.168.122.250 monitor on undercloud - * Resource action: ip-192.168.122.249 monitor on undercloud - * Resource action: ip-192.168.122.253 monitor on undercloud - * Resource action: ip-192.168.122.247 monitor on undercloud - * Resource action: ip-192.168.122.248 monitor on undercloud - * Resource action: haproxy-bundle-docker-0 monitor on undercloud - * Resource action: openstack-cinder-volume-docker-0 monitor on undercloud - * Pseudo action: openstack-cinder-volume_start_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: rabbitmq-bundle-docker-0 start on undercloud - * Resource action: rabbitmq-bundle-0 monitor on undercloud - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Resource action: ip-192.168.122.254 start on undercloud - * Resource action: ip-192.168.122.250 start on undercloud - * Resource action: ip-192.168.122.249 start on undercloud - * Resource action: ip-192.168.122.253 start on undercloud - * Resource action: ip-192.168.122.247 start on undercloud - * Resource action: ip-192.168.122.248 start on undercloud - * Resource action: openstack-cinder-volume-docker-0 start on undercloud - * Pseudo action: openstack-cinder-volume_running_0 - * Pseudo action: haproxy-bundle_start_0 - * Resource action: rabbitmq-bundle-docker-0 monitor=60000 on undercloud - * Resource action: rabbitmq-bundle-0 start on undercloud - * Resource action: ip-192.168.122.254 monitor=10000 on undercloud - * Resource action: ip-192.168.122.250 monitor=10000 on undercloud - * Resource action: ip-192.168.122.249 monitor=10000 on undercloud - * Resource action: ip-192.168.122.253 monitor=10000 on undercloud - * Resource action: ip-192.168.122.247 monitor=10000 on undercloud - * Resource action: ip-192.168.122.248 monitor=10000 on undercloud - * Resource action: haproxy-bundle-docker-0 start on undercloud - * Resource action: openstack-cinder-volume-docker-0 monitor=60000 on undercloud - * Pseudo action: haproxy-bundle_running_0 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: galera-bundle_start_0 - * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Resource action: rabbitmq-bundle-0 monitor=30000 on undercloud - * Pseudo action: galera-bundle-master_start_0 - * Resource action: galera-bundle-docker-0 start on undercloud - * Resource action: galera-bundle-0 monitor on undercloud - * Pseudo action: redis-bundle-master_start_0 - * Resource action: redis-bundle-docker-0 start on undercloud - * Resource action: redis-bundle-0 monitor on undercloud - * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud - * Resource action: galera-bundle-0 start on undercloud - * Resource action: redis-bundle-docker-0 monitor=60000 on undercloud - * Resource action: redis-bundle-0 start on undercloud - * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Resource action: galera:0 start on galera-bundle-0 - * Pseudo action: galera-bundle-master_running_0 - * Resource action: galera-bundle-0 monitor=30000 on undercloud - * Resource action: redis:0 start on redis-bundle-0 - * Pseudo action: redis-bundle-master_running_0 - * Resource action: redis-bundle-0 monitor=30000 on undercloud - * Pseudo action: galera-bundle_running_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 - * Resource action: galera:0 monitor=30000 on galera-bundle-0 - * Resource action: galera:0 monitor=20000 on galera-bundle-0 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Resource action: redis:0 notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Resource action: redis:0 monitor=60000 on redis-bundle-0 - * Resource action: redis:0 monitor=45000 on redis-bundle-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud + * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud - * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud - * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud - * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud - * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud - * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud - * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Stopped + * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped + * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped + * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped + * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped + * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped + * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud + * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: - * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud + * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped diff --git a/cts/scheduler/summary/bundle-order-stop-clone.summary b/cts/scheduler/summary/bundle-order-stop-clone.summary index 46708d06e98..3cba9d81ce2 100644 --- a/cts/scheduler/summary/bundle-order-stop-clone.summary +++ b/cts/scheduler/summary/bundle-order-stop-clone.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] @@ -26,56 +28,31 @@ Transition Summary: * Stop galera-bundle-docker-0 ( metal-1 ) due to node availability * Stop galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start * Stop galera:0 ( Unpromoted galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start + * Restart galera-bundle-docker-1 ( metal-2 ) due to required storage-clone notified + * Restart galera-bundle-1 ( metal-2 ) due to required galera-bundle-docker-1 start + * Restart galera:1 ( Unpromoted galera-bundle-1 ) due to required galera-bundle-docker-1 start + * Restart galera-bundle-docker-2 ( metal-3 ) due to required storage-clone notified + * Restart galera-bundle-2 ( metal-3 ) due to required galera-bundle-docker-2 start + * Restart galera:2 ( Unpromoted galera-bundle-2 ) due to required galera-bundle-docker-2 start +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: storage-clone_pre_notify_stop_0 - * Resource action: galera-bundle-0 monitor on metal-3 - * Resource action: galera-bundle-0 monitor on metal-2 - * Resource action: galera-bundle-1 monitor on metal-3 - * Resource action: galera-bundle-1 monitor on metal-1 - * Resource action: galera-bundle-2 monitor on metal-2 - * Resource action: galera-bundle-2 monitor on metal-1 - * Resource action: redis-bundle-0 monitor on metal-3 - * Resource action: redis-bundle-0 monitor on metal-2 - * Resource action: redis-bundle-1 monitor on metal-3 - * Resource action: redis-bundle-1 monitor on metal-1 - * Resource action: redis-bundle-2 monitor on metal-2 - * Resource action: redis-bundle-2 monitor on metal-1 - * Pseudo action: galera-bundle_stop_0 - * Resource action: storage:0 notify on metal-1 - * Resource action: storage:1 notify on metal-2 - * Resource action: storage:2 notify on metal-3 - * Pseudo action: storage-clone_confirmed-pre_notify_stop_0 - * Pseudo action: galera-bundle-master_stop_0 - * Resource action: galera:0 stop on galera-bundle-0 - * Pseudo action: galera-bundle-master_stopped_0 - * Resource action: galera-bundle-0 stop on metal-1 - * Resource action: galera-bundle-docker-0 stop on metal-1 - * Pseudo action: galera-bundle_stopped_0 - * Pseudo action: galera-bundle_start_0 - * Pseudo action: storage-clone_stop_0 - * Pseudo action: galera-bundle-master_start_0 - * Resource action: storage:0 stop on metal-1 - * Pseudo action: storage-clone_stopped_0 - * Pseudo action: galera-bundle-master_running_0 - * Pseudo action: galera-bundle_running_0 - * Pseudo action: storage-clone_post_notify_stopped_0 - * Resource action: storage:1 notify on metal-2 - * Resource action: storage:2 notify on metal-3 - * Pseudo action: storage-clone_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Clone Set: storage-clone [storage]: - * Started: [ metal-2 metal-3 ] - * Stopped: [ metal-1 rabbitmq-bundle-0 ] + * Started: [ metal-1 metal-2 metal-3 ] + * Stopped: [ rabbitmq-bundle-0 ] * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Stopped + * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted metal-1 * galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2 * galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3 * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: diff --git a/cts/scheduler/summary/bundle-order-stop-on-remote.summary b/cts/scheduler/summary/bundle-order-stop-on-remote.summary index 612e7015da2..5fffe0c21df 100644 --- a/cts/scheduler/summary/bundle-order-stop-on-remote.summary +++ b/cts/scheduler/summary/bundle-order-stop-on-remote.summary @@ -1,10 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: - * RemoteNode database-0: UNCLEAN (offline) - * RemoteNode database-2: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] - * RemoteOnline: [ database-1 messaging-0 messaging-1 messaging-2 ] - * GuestOnline: [ galera-bundle-1 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-2 ] + * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Stopped @@ -18,9 +18,9 @@ Current cluster status: * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 (UNCLEAN) + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 - * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted database-2 (UNCLEAN) + * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted database-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0 * redis-bundle-1 (ocf:heartbeat:redis): Stopped @@ -71,122 +71,23 @@ Transition Summary: * Start stonith-fence_ipmilan-5254005ea387 ( controller-1 ) * Start stonith-fence_ipmilan-525400542c06 ( controller-0 ) * Start stonith-fence_ipmilan-525400498d34 ( controller-1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: database-0 start on controller-0 - * Resource action: database-2 start on controller-1 - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Resource action: stonith-fence_ipmilan-525400c709f7 start on controller-1 - * Resource action: stonith-fence_ipmilan-5254005ea387 start on controller-1 - * Resource action: stonith-fence_ipmilan-525400542c06 start on controller-0 - * Resource action: stonith-fence_ipmilan-525400498d34 start on controller-1 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: galera-bundle_demote_0 - * Resource action: database-0 monitor=20000 on controller-0 - * Resource action: database-2 monitor=20000 on controller-1 - * Pseudo action: galera-bundle-master_demote_0 - * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: redis-bundle-master_start_0 - * Resource action: stonith-fence_ipmilan-525400c709f7 monitor=60000 on controller-1 - * Resource action: stonith-fence_ipmilan-5254005ea387 monitor=60000 on controller-1 - * Resource action: stonith-fence_ipmilan-525400542c06 monitor=60000 on controller-0 - * Resource action: stonith-fence_ipmilan-525400498d34 monitor=60000 on controller-1 - * Pseudo action: galera_demote_0 - * Pseudo action: galera_demote_0 - * Pseudo action: galera-bundle-master_demoted_0 - * Pseudo action: galera-bundle_demoted_0 - * Pseudo action: galera-bundle_stop_0 - * Resource action: galera-bundle-docker-0 stop on database-0 - * Resource action: galera-bundle-docker-2 stop on database-2 - * Pseudo action: stonith-galera-bundle-2-reboot on galera-bundle-2 - * Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0 - * Pseudo action: galera-bundle-master_stop_0 - * Resource action: redis-bundle-docker-1 start on controller-1 - * Resource action: redis-bundle-1 monitor on controller-1 - * Resource action: ip-192.168.24.11 start on controller-0 - * Resource action: ip-10.0.0.104 start on controller-1 - * Resource action: ip-172.17.1.11 start on controller-0 - * Resource action: ip-172.17.3.13 start on controller-1 - * Resource action: openstack-cinder-volume start on controller-0 - * Pseudo action: haproxy-bundle_start_0 - * Pseudo action: galera_stop_0 - * Resource action: redis-bundle-docker-1 monitor=60000 on controller-1 - * Resource action: redis-bundle-1 start on controller-1 - * Resource action: ip-192.168.24.11 monitor=10000 on controller-0 - * Resource action: ip-10.0.0.104 monitor=10000 on controller-1 - * Resource action: ip-172.17.1.11 monitor=10000 on controller-0 - * Resource action: ip-172.17.3.13 monitor=10000 on controller-1 - * Resource action: haproxy-bundle-docker-1 start on controller-1 - * Resource action: openstack-cinder-volume monitor=60000 on controller-0 - * Pseudo action: haproxy-bundle_running_0 - * Pseudo action: galera_stop_0 - * Pseudo action: galera-bundle-master_stopped_0 - * Resource action: redis start on redis-bundle-1 - * Pseudo action: redis-bundle-master_running_0 - * Resource action: redis-bundle-1 monitor=30000 on controller-1 - * Resource action: haproxy-bundle-docker-1 monitor=60000 on controller-1 - * Pseudo action: galera-bundle_stopped_0 - * Pseudo action: galera-bundle_start_0 - * Pseudo action: galera-bundle-master_start_0 - * Resource action: galera-bundle-docker-0 start on database-0 - * Resource action: galera-bundle-0 monitor on controller-1 - * Resource action: galera-bundle-docker-2 start on database-2 - * Resource action: galera-bundle-2 monitor on controller-1 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Resource action: galera-bundle-docker-0 monitor=60000 on database-0 - * Resource action: galera-bundle-0 start on controller-0 - * Resource action: galera-bundle-docker-2 monitor=60000 on database-2 - * Resource action: galera-bundle-2 start on controller-1 - * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-2 - * Resource action: redis notify on redis-bundle-1 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Resource action: galera start on galera-bundle-0 - * Resource action: galera start on galera-bundle-2 - * Pseudo action: galera-bundle-master_running_0 - * Resource action: galera-bundle-0 monitor=30000 on controller-0 - * Resource action: galera-bundle-2 monitor=30000 on controller-1 - * Pseudo action: redis-bundle-master_pre_notify_promote_0 - * Pseudo action: redis-bundle_promote_0 - * Pseudo action: galera-bundle_running_0 - * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-2 - * Resource action: redis notify on redis-bundle-1 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: redis-bundle-master_promote_0 - * Pseudo action: galera-bundle_promote_0 - * Pseudo action: galera-bundle-master_promote_0 - * Resource action: redis promote on redis-bundle-0 - * Pseudo action: redis-bundle-master_promoted_0 - * Resource action: galera promote on galera-bundle-0 - * Resource action: galera promote on galera-bundle-2 - * Pseudo action: galera-bundle-master_promoted_0 - * Pseudo action: redis-bundle-master_post_notify_promoted_0 - * Pseudo action: galera-bundle_promoted_0 - * Resource action: galera monitor=10000 on galera-bundle-0 - * Resource action: galera monitor=10000 on galera-bundle-2 - * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-2 - * Resource action: redis notify on redis-bundle-1 - * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: redis-bundle_promoted_0 - * Resource action: redis monitor=20000 on redis-bundle-0 - * Resource action: redis monitor=60000 on redis-bundle-1 - * Resource action: redis monitor=45000 on redis-bundle-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: - * database-0 (ocf:pacemaker:remote): Started controller-0 + * database-0 (ocf:pacemaker:remote): Stopped * database-1 (ocf:pacemaker:remote): Started controller-2 - * database-2 (ocf:pacemaker:remote): Started controller-1 + * database-2 (ocf:pacemaker:remote): Stopped * messaging-0 (ocf:pacemaker:remote): Started controller-2 * messaging-1 (ocf:pacemaker:remote): Started controller-2 * messaging-2 (ocf:pacemaker:remote): Started controller-2 @@ -195,30 +96,30 @@ Revised Cluster Status: * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 - * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 + * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted database-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Stopped * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 - * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-10.0.0.104 (ocf:heartbeat:IPaddr2): Started controller-1 + * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Stopped + * ip-10.0.0.104 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.3.13 (ocf:heartbeat:IPaddr2): Started controller-1 + * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Stopped + * ip-172.17.3.13 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.4.19 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 - * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 + * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 - * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 + * openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped * stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2 - * stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Started controller-1 + * stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Stopped * stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2 - * stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Started controller-1 - * stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Started controller-0 + * stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Stopped + * stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Stopped * stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2 - * stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Started controller-1 + * stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Stopped diff --git a/cts/scheduler/summary/bundle-order-stop.summary b/cts/scheduler/summary/bundle-order-stop.summary index 5fc2efe04c1..4e9aff05d3b 100644 --- a/cts/scheduler/summary/bundle-order-stop.summary +++ b/cts/scheduler/summary/bundle-order-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ] @@ -20,108 +22,3 @@ Current cluster status: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud - -Transition Summary: - * Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability - * Stop rabbitmq-bundle-0 ( undercloud ) due to node availability - * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to colocation with haproxy-bundle-docker-0 - * Stop galera-bundle-docker-0 ( undercloud ) due to node availability - * Stop galera-bundle-0 ( undercloud ) due to node availability - * Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start - * Stop redis-bundle-docker-0 ( undercloud ) due to node availability - * Stop redis-bundle-0 ( undercloud ) due to node availability - * Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-0 start - * Stop ip-192.168.122.254 ( undercloud ) due to node availability - * Stop ip-192.168.122.250 ( undercloud ) due to node availability - * Stop ip-192.168.122.249 ( undercloud ) due to node availability - * Stop ip-192.168.122.253 ( undercloud ) due to node availability - * Stop ip-192.168.122.247 ( undercloud ) due to node availability - * Stop ip-192.168.122.248 ( undercloud ) due to node availability - * Stop haproxy-bundle-docker-0 ( undercloud ) due to node availability - * Stop openstack-cinder-volume-docker-0 ( undercloud ) due to node availability - -Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 - * Resource action: galera cancel=10000 on galera-bundle-0 - * Resource action: redis cancel=20000 on redis-bundle-0 - * Pseudo action: redis-bundle-master_pre_notify_demote_0 - * Pseudo action: openstack-cinder-volume_stop_0 - * Pseudo action: haproxy-bundle_stop_0 - * Pseudo action: redis-bundle_demote_0 - * Pseudo action: galera-bundle_demote_0 - * Pseudo action: rabbitmq-bundle_stop_0 - * Resource action: rabbitmq notify on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 - * Pseudo action: rabbitmq-bundle-clone_stop_0 - * Pseudo action: galera-bundle-master_demote_0 - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 - * Pseudo action: redis-bundle-master_demote_0 - * Resource action: haproxy-bundle-docker-0 stop on undercloud - * Resource action: openstack-cinder-volume-docker-0 stop on undercloud - * Pseudo action: openstack-cinder-volume_stopped_0 - * Pseudo action: haproxy-bundle_stopped_0 - * Resource action: rabbitmq stop on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_stopped_0 - * Resource action: rabbitmq-bundle-0 stop on undercloud - * Resource action: galera demote on galera-bundle-0 - * Pseudo action: galera-bundle-master_demoted_0 - * Resource action: redis demote on redis-bundle-0 - * Pseudo action: redis-bundle-master_demoted_0 - * Resource action: ip-192.168.122.254 stop on undercloud - * Resource action: ip-192.168.122.250 stop on undercloud - * Resource action: ip-192.168.122.249 stop on undercloud - * Resource action: ip-192.168.122.253 stop on undercloud - * Resource action: ip-192.168.122.247 stop on undercloud - * Resource action: ip-192.168.122.248 stop on undercloud - * Pseudo action: galera-bundle_demoted_0 - * Pseudo action: galera-bundle_stop_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 - * Resource action: rabbitmq-bundle-docker-0 stop on undercloud - * Pseudo action: galera-bundle-master_stop_0 - * Pseudo action: redis-bundle-master_post_notify_demoted_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 - * Resource action: galera stop on galera-bundle-0 - * Pseudo action: galera-bundle-master_stopped_0 - * Resource action: galera-bundle-0 stop on undercloud - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0 - * Pseudo action: redis-bundle-master_pre_notify_stop_0 - * Pseudo action: redis-bundle_demoted_0 - * Pseudo action: rabbitmq-bundle_stopped_0 - * Resource action: galera-bundle-docker-0 stop on undercloud - * Resource action: redis notify on redis-bundle-0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0 - * Pseudo action: galera-bundle_stopped_0 - * Pseudo action: redis-bundle_stop_0 - * Pseudo action: redis-bundle-master_stop_0 - * Resource action: redis stop on redis-bundle-0 - * Pseudo action: redis-bundle-master_stopped_0 - * Resource action: redis-bundle-0 stop on undercloud - * Pseudo action: redis-bundle-master_post_notify_stopped_0 - * Resource action: redis-bundle-docker-0 stop on undercloud - * Cluster action: do_shutdown on undercloud - * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 - * Pseudo action: redis-bundle_stopped_0 - -Revised Cluster Status: - * Node List: - * Online: [ undercloud ] - - * Full List of Resources: - * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped - * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Stopped - * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Stopped - * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped - * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped - * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped - * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: - * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped diff --git a/cts/scheduler/summary/bundle-probe-order-1.summary b/cts/scheduler/summary/bundle-probe-order-1.summary index c885e438655..8c0b98dd510 100644 --- a/cts/scheduler/summary/bundle-probe-order-1.summary +++ b/cts/scheduler/summary/bundle-probe-order-1.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-10-12 07:31:56Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ centos1 centos2 centos3 ] @@ -12,18 +14,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: galera-bundle-docker-0 monitor on centos3 - * Resource action: galera-bundle-docker-0 monitor on centos2 - * Resource action: galera-bundle-docker-0 monitor on centos1 - * Resource action: galera-bundle-docker-1 monitor on centos3 - * Resource action: galera-bundle-docker-1 monitor on centos2 - * Resource action: galera-bundle-docker-1 monitor on centos1 - * Resource action: galera-bundle-docker-2 monitor on centos3 - * Resource action: galera-bundle-docker-2 monitor on centos2 - * Resource action: galera-bundle-docker-2 monitor on centos1 Using the original execution date of: 2017-10-12 07:31:56Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ centos1 centos2 centos3 ] diff --git a/cts/scheduler/summary/bundle-probe-order-2.summary b/cts/scheduler/summary/bundle-probe-order-2.summary index aecc2a498bd..2a19d60eb75 100644 --- a/cts/scheduler/summary/bundle-probe-order-2.summary +++ b/cts/scheduler/summary/bundle-probe-order-2.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-10-12 07:31:57Z Current cluster status: + * Cluster Summary: + * Node List: * GuestNode galera-bundle-0: maintenance * Online: [ centos1 centos2 centos3 ] @@ -13,16 +15,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: galera:0 monitor on galera-bundle-0 - * Resource action: galera-bundle-docker-0 monitor=60000 on centos2 - * Resource action: galera-bundle-0 monitor=30000 on centos2 - * Resource action: galera-bundle-docker-1 monitor on centos2 - * Resource action: galera-bundle-docker-2 monitor on centos3 - * Resource action: galera-bundle-docker-2 monitor on centos2 - * Resource action: galera-bundle-docker-2 monitor on centos1 Using the original execution date of: 2017-10-12 07:31:57Z Revised Cluster Status: + * Cluster Summary: + * Node List: * GuestNode galera-bundle-0: maintenance * Online: [ centos1 centos2 centos3 ] diff --git a/cts/scheduler/summary/bundle-probe-order-3.summary b/cts/scheduler/summary/bundle-probe-order-3.summary index 331bd87f629..dd058134490 100644 --- a/cts/scheduler/summary/bundle-probe-order-3.summary +++ b/cts/scheduler/summary/bundle-probe-order-3.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-10-12 07:31:57Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ centos1 centos2 centos3 ] @@ -12,17 +14,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: galera-bundle-docker-0 monitor=60000 on centos2 - * Resource action: galera-bundle-0 monitor on centos3 - * Resource action: galera-bundle-0 monitor on centos2 - * Resource action: galera-bundle-0 monitor on centos1 - * Resource action: galera-bundle-docker-1 monitor on centos2 - * Resource action: galera-bundle-docker-2 monitor on centos3 - * Resource action: galera-bundle-docker-2 monitor on centos2 - * Resource action: galera-bundle-docker-2 monitor on centos1 Using the original execution date of: 2017-10-12 07:31:57Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ centos1 centos2 centos3 ] diff --git a/cts/scheduler/summary/bundle-probe-remotes.summary b/cts/scheduler/summary/bundle-probe-remotes.summary index 1dd85231483..e164105bf52 100644 --- a/cts/scheduler/summary/bundle-probe-remotes.summary +++ b/cts/scheduler/summary/bundle-probe-remotes.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c09-h05-r630 c09-h06-r630 c09-h07-r630 ] - * RemoteOFFLINE: [ c09-h08-r630 c09-h09-r630 c09-h10-r630 ] + * RemoteOnline: [ c09-h08-r630 c09-h09-r630 c09-h10-r630 ] + * GuestOnline: [ scale1-bundle-0 scale1-bundle-1 scale1-bundle-2 scale1-bundle-3 scale1-bundle-4 scale1-bundle-5 ] * Full List of Resources: * c09-h08-r630 (ocf:pacemaker:remote): Stopped @@ -39,130 +42,23 @@ Transition Summary: * Start dummy1:5 ( scale1-bundle-5 ) Executing Cluster Transition: - * Resource action: c09-h08-r630 monitor on c09-h07-r630 - * Resource action: c09-h08-r630 monitor on c09-h06-r630 - * Resource action: c09-h08-r630 monitor on c09-h05-r630 - * Resource action: c09-h09-r630 monitor on c09-h07-r630 - * Resource action: c09-h09-r630 monitor on c09-h06-r630 - * Resource action: c09-h09-r630 monitor on c09-h05-r630 - * Resource action: c09-h10-r630 monitor on c09-h07-r630 - * Resource action: c09-h10-r630 monitor on c09-h06-r630 - * Resource action: c09-h10-r630 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-0 monitor on c09-h07-r630 - * Resource action: scale1-bundle-docker-0 monitor on c09-h06-r630 - * Resource action: scale1-bundle-docker-0 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-1 monitor on c09-h07-r630 - * Resource action: scale1-bundle-docker-1 monitor on c09-h06-r630 - * Resource action: scale1-bundle-docker-1 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-2 monitor on c09-h07-r630 - * Resource action: scale1-bundle-docker-2 monitor on c09-h06-r630 - * Resource action: scale1-bundle-docker-2 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-3 monitor on c09-h07-r630 - * Resource action: scale1-bundle-docker-3 monitor on c09-h06-r630 - * Resource action: scale1-bundle-docker-3 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-4 monitor on c09-h07-r630 - * Resource action: scale1-bundle-docker-4 monitor on c09-h06-r630 - * Resource action: scale1-bundle-docker-4 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-5 monitor on c09-h07-r630 - * Resource action: scale1-bundle-docker-5 monitor on c09-h06-r630 - * Resource action: scale1-bundle-docker-5 monitor on c09-h05-r630 - * Pseudo action: scale1-bundle_start_0 - * Resource action: c09-h08-r630 start on c09-h05-r630 - * Resource action: c09-h09-r630 start on c09-h06-r630 - * Resource action: c09-h10-r630 start on c09-h07-r630 - * Resource action: scale1-bundle-docker-0 monitor on c09-h10-r630 - * Resource action: scale1-bundle-docker-0 monitor on c09-h09-r630 - * Resource action: scale1-bundle-docker-0 monitor on c09-h08-r630 - * Resource action: scale1-bundle-docker-1 monitor on c09-h10-r630 - * Resource action: scale1-bundle-docker-1 monitor on c09-h09-r630 - * Resource action: scale1-bundle-docker-1 monitor on c09-h08-r630 - * Resource action: scale1-bundle-docker-2 monitor on c09-h10-r630 - * Resource action: scale1-bundle-docker-2 monitor on c09-h09-r630 - * Resource action: scale1-bundle-docker-2 monitor on c09-h08-r630 - * Resource action: scale1-bundle-docker-3 monitor on c09-h10-r630 - * Resource action: scale1-bundle-docker-3 monitor on c09-h09-r630 - * Resource action: scale1-bundle-docker-3 monitor on c09-h08-r630 - * Resource action: scale1-bundle-docker-4 monitor on c09-h10-r630 - * Resource action: scale1-bundle-docker-4 monitor on c09-h09-r630 - * Resource action: scale1-bundle-docker-4 monitor on c09-h08-r630 - * Resource action: scale1-bundle-docker-5 monitor on c09-h10-r630 - * Resource action: scale1-bundle-docker-5 monitor on c09-h09-r630 - * Resource action: scale1-bundle-docker-5 monitor on c09-h08-r630 - * Resource action: c09-h08-r630 monitor=60000 on c09-h05-r630 - * Resource action: c09-h09-r630 monitor=60000 on c09-h06-r630 - * Resource action: c09-h10-r630 monitor=60000 on c09-h07-r630 - * Pseudo action: scale1-bundle-clone_start_0 - * Resource action: scale1-bundle-docker-0 start on c09-h05-r630 - * Resource action: scale1-bundle-0 monitor on c09-h07-r630 - * Resource action: scale1-bundle-0 monitor on c09-h06-r630 - * Resource action: scale1-bundle-0 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-1 start on c09-h06-r630 - * Resource action: scale1-bundle-1 monitor on c09-h07-r630 - * Resource action: scale1-bundle-1 monitor on c09-h06-r630 - * Resource action: scale1-bundle-1 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-2 start on c09-h07-r630 - * Resource action: scale1-bundle-2 monitor on c09-h07-r630 - * Resource action: scale1-bundle-2 monitor on c09-h06-r630 - * Resource action: scale1-bundle-2 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-3 start on c09-h08-r630 - * Resource action: scale1-bundle-3 monitor on c09-h07-r630 - * Resource action: scale1-bundle-3 monitor on c09-h06-r630 - * Resource action: scale1-bundle-3 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-4 start on c09-h09-r630 - * Resource action: scale1-bundle-4 monitor on c09-h07-r630 - * Resource action: scale1-bundle-4 monitor on c09-h06-r630 - * Resource action: scale1-bundle-4 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-5 start on c09-h10-r630 - * Resource action: scale1-bundle-5 monitor on c09-h07-r630 - * Resource action: scale1-bundle-5 monitor on c09-h06-r630 - * Resource action: scale1-bundle-5 monitor on c09-h05-r630 - * Resource action: scale1-bundle-docker-0 monitor=60000 on c09-h05-r630 - * Resource action: scale1-bundle-0 start on c09-h05-r630 - * Resource action: scale1-bundle-docker-1 monitor=60000 on c09-h06-r630 - * Resource action: scale1-bundle-1 start on c09-h06-r630 - * Resource action: scale1-bundle-docker-2 monitor=60000 on c09-h07-r630 - * Resource action: scale1-bundle-2 start on c09-h07-r630 - * Resource action: scale1-bundle-docker-3 monitor=60000 on c09-h08-r630 - * Resource action: scale1-bundle-3 start on c09-h05-r630 - * Resource action: scale1-bundle-docker-4 monitor=60000 on c09-h09-r630 - * Resource action: scale1-bundle-4 start on c09-h06-r630 - * Resource action: scale1-bundle-docker-5 monitor=60000 on c09-h10-r630 - * Resource action: scale1-bundle-5 start on c09-h07-r630 - * Resource action: dummy1:0 start on scale1-bundle-0 - * Resource action: dummy1:1 start on scale1-bundle-1 - * Resource action: dummy1:2 start on scale1-bundle-2 - * Resource action: dummy1:3 start on scale1-bundle-3 - * Resource action: dummy1:4 start on scale1-bundle-4 - * Resource action: dummy1:5 start on scale1-bundle-5 - * Pseudo action: scale1-bundle-clone_running_0 - * Resource action: scale1-bundle-0 monitor=30000 on c09-h05-r630 - * Resource action: scale1-bundle-1 monitor=30000 on c09-h06-r630 - * Resource action: scale1-bundle-2 monitor=30000 on c09-h07-r630 - * Resource action: scale1-bundle-3 monitor=30000 on c09-h05-r630 - * Resource action: scale1-bundle-4 monitor=30000 on c09-h06-r630 - * Resource action: scale1-bundle-5 monitor=30000 on c09-h07-r630 - * Pseudo action: scale1-bundle_running_0 - * Resource action: dummy1:0 monitor=10000 on scale1-bundle-0 - * Resource action: dummy1:1 monitor=10000 on scale1-bundle-1 - * Resource action: dummy1:2 monitor=10000 on scale1-bundle-2 - * Resource action: dummy1:3 monitor=10000 on scale1-bundle-3 - * Resource action: dummy1:4 monitor=10000 on scale1-bundle-4 - * Resource action: dummy1:5 monitor=10000 on scale1-bundle-5 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c09-h05-r630 c09-h06-r630 c09-h07-r630 ] * RemoteOnline: [ c09-h08-r630 c09-h09-r630 c09-h10-r630 ] * GuestOnline: [ scale1-bundle-0 scale1-bundle-1 scale1-bundle-2 scale1-bundle-3 scale1-bundle-4 scale1-bundle-5 ] * Full List of Resources: - * c09-h08-r630 (ocf:pacemaker:remote): Started c09-h05-r630 - * c09-h09-r630 (ocf:pacemaker:remote): Started c09-h06-r630 - * c09-h10-r630 (ocf:pacemaker:remote): Started c09-h07-r630 + * c09-h08-r630 (ocf:pacemaker:remote): Stopped + * c09-h09-r630 (ocf:pacemaker:remote): Stopped + * c09-h10-r630 (ocf:pacemaker:remote): Stopped * Container bundle set: scale1-bundle [beekhof:remote]: - * scale1-bundle-0 (ocf:pacemaker:Dummy): Started c09-h05-r630 - * scale1-bundle-1 (ocf:pacemaker:Dummy): Started c09-h06-r630 - * scale1-bundle-2 (ocf:pacemaker:Dummy): Started c09-h07-r630 - * scale1-bundle-3 (ocf:pacemaker:Dummy): Started c09-h08-r630 - * scale1-bundle-4 (ocf:pacemaker:Dummy): Started c09-h09-r630 - * scale1-bundle-5 (ocf:pacemaker:Dummy): Started c09-h10-r630 + * scale1-bundle-0 (ocf:pacemaker:Dummy): Stopped + * scale1-bundle-1 (ocf:pacemaker:Dummy): Stopped + * scale1-bundle-2 (ocf:pacemaker:Dummy): Stopped + * scale1-bundle-3 (ocf:pacemaker:Dummy): Stopped + * scale1-bundle-4 (ocf:pacemaker:Dummy): Stopped + * scale1-bundle-5 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary index ec6cf2b14b8..936718bf127 100644 --- a/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,11 +17,10 @@ Transition Summary: * Move vip ( node3 -> node1 ) Executing Cluster Transition: - * Resource action: vip stop on node3 - * Resource action: vip start on node1 - * Resource action: vip monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -30,4 +31,4 @@ Revised Cluster Status: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 - * vip (ocf:heartbeat:IPaddr2): Started node1 + * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary index ec6cf2b14b8..936718bf127 100644 --- a/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,11 +17,10 @@ Transition Summary: * Move vip ( node3 -> node1 ) Executing Cluster Transition: - * Resource action: vip stop on node3 - * Resource action: vip start on node1 - * Resource action: vip monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -30,4 +31,4 @@ Revised Cluster Status: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 - * vip (ocf:heartbeat:IPaddr2): Started node1 + * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary index e9db462f463..002fc49d2f6 100644 --- a/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -16,22 +18,10 @@ Transition Summary: * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) Executing Cluster Transition: - * Resource action: base cancel=16000 on base-bundle-1 - * Resource action: base cancel=15000 on base-bundle-2 - * Pseudo action: base-bundle_demote_0 - * Pseudo action: base-bundle-clone_demote_0 - * Resource action: base demote on base-bundle-2 - * Pseudo action: base-bundle-clone_demoted_0 - * Pseudo action: base-bundle_demoted_0 - * Pseudo action: base-bundle_promote_0 - * Resource action: base monitor=16000 on base-bundle-2 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-1 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -40,6 +30,6 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 - * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary index e9db462f463..002fc49d2f6 100644 --- a/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -16,22 +18,10 @@ Transition Summary: * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) Executing Cluster Transition: - * Resource action: base cancel=16000 on base-bundle-1 - * Resource action: base cancel=15000 on base-bundle-2 - * Pseudo action: base-bundle_demote_0 - * Pseudo action: base-bundle-clone_demote_0 - * Resource action: base demote on base-bundle-2 - * Pseudo action: base-bundle-clone_demoted_0 - * Pseudo action: base-bundle_demoted_0 - * Pseudo action: base-bundle_promote_0 - * Resource action: base monitor=16000 on base-bundle-2 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-1 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -40,6 +30,6 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 - * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary index c35f2e0ec83..32ca25a0f07 100644 --- a/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -19,22 +22,11 @@ Transition Summary: * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) Executing Cluster Transition: - * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 - * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 - * Pseudo action: bundle-a_demote_0 - * Pseudo action: bundle-a-clone_demote_0 - * Resource action: bundle-a-rsc demote on bundle-a-1 - * Pseudo action: bundle-a-clone_demoted_0 - * Pseudo action: bundle-a_demoted_0 - * Pseudo action: bundle-a_promote_0 - * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 - * Pseudo action: bundle-a-clone_promote_0 - * Resource action: bundle-a-rsc promote on bundle-a-2 - * Pseudo action: bundle-a-clone_promoted_0 - * Pseudo action: bundle-a_promoted_0 - * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -43,8 +35,8 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: bundle-a [localhost/pcmktest]: * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 - * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 * Container bundle set: bundle-b [localhost/pcmktest]: * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary index c35f2e0ec83..32ca25a0f07 100644 --- a/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -19,22 +22,11 @@ Transition Summary: * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) Executing Cluster Transition: - * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 - * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 - * Pseudo action: bundle-a_demote_0 - * Pseudo action: bundle-a-clone_demote_0 - * Resource action: bundle-a-rsc demote on bundle-a-1 - * Pseudo action: bundle-a-clone_demoted_0 - * Pseudo action: bundle-a_demoted_0 - * Pseudo action: bundle-a_promote_0 - * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 - * Pseudo action: bundle-a-clone_promote_0 - * Resource action: bundle-a-rsc promote on bundle-a-2 - * Pseudo action: bundle-a-clone_promoted_0 - * Pseudo action: bundle-a_promoted_0 - * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -43,8 +35,8 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: bundle-a [localhost/pcmktest]: * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 - * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 * Container bundle set: bundle-b [localhost/pcmktest]: * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-1.summary b/cts/scheduler/summary/bundle-promoted-colocation-1.summary index 61cc9745ca7..0a0f4b64167 100644 --- a/cts/scheduler/summary/bundle-promoted-colocation-1.summary +++ b/cts/scheduler/summary/bundle-promoted-colocation-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,11 +17,10 @@ Transition Summary: * Move vip ( node1 -> node3 ) Executing Cluster Transition: - * Resource action: vip stop on node1 - * Resource action: vip start on node3 - * Resource action: vip monitor=10000 on node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -30,4 +31,4 @@ Revised Cluster Status: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 - * vip (ocf:heartbeat:IPaddr2): Started node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-2.summary b/cts/scheduler/summary/bundle-promoted-colocation-2.summary index 61cc9745ca7..0a0f4b64167 100644 --- a/cts/scheduler/summary/bundle-promoted-colocation-2.summary +++ b/cts/scheduler/summary/bundle-promoted-colocation-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,11 +17,10 @@ Transition Summary: * Move vip ( node1 -> node3 ) Executing Cluster Transition: - * Resource action: vip stop on node1 - * Resource action: vip start on node3 - * Resource action: vip monitor=10000 on node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -30,4 +31,4 @@ Revised Cluster Status: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 - * vip (ocf:heartbeat:IPaddr2): Started node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-3.summary b/cts/scheduler/summary/bundle-promoted-colocation-3.summary index 64b41579a6b..efb810ef8ed 100644 --- a/cts/scheduler/summary/bundle-promoted-colocation-3.summary +++ b/cts/scheduler/summary/bundle-promoted-colocation-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -16,22 +18,10 @@ Transition Summary: * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) Executing Cluster Transition: - * Resource action: base cancel=16000 on base-bundle-0 - * Resource action: base cancel=15000 on base-bundle-2 - * Pseudo action: base-bundle_demote_0 - * Pseudo action: base-bundle-clone_demote_0 - * Resource action: base demote on base-bundle-2 - * Pseudo action: base-bundle-clone_demoted_0 - * Pseudo action: base-bundle_demoted_0 - * Pseudo action: base-bundle_promote_0 - * Resource action: base monitor=16000 on base-bundle-2 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-0 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -39,7 +29,7 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: - * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node1 + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-4.summary b/cts/scheduler/summary/bundle-promoted-colocation-4.summary index 64b41579a6b..efb810ef8ed 100644 --- a/cts/scheduler/summary/bundle-promoted-colocation-4.summary +++ b/cts/scheduler/summary/bundle-promoted-colocation-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -16,22 +18,10 @@ Transition Summary: * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) Executing Cluster Transition: - * Resource action: base cancel=16000 on base-bundle-0 - * Resource action: base cancel=15000 on base-bundle-2 - * Pseudo action: base-bundle_demote_0 - * Pseudo action: base-bundle-clone_demote_0 - * Resource action: base demote on base-bundle-2 - * Pseudo action: base-bundle-clone_demoted_0 - * Pseudo action: base-bundle_demoted_0 - * Pseudo action: base-bundle_promote_0 - * Resource action: base monitor=16000 on base-bundle-2 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-0 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -39,7 +29,7 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: - * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node1 + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-5.summary b/cts/scheduler/summary/bundle-promoted-colocation-5.summary index dbcf940b12a..db9c9658cbb 100644 --- a/cts/scheduler/summary/bundle-promoted-colocation-5.summary +++ b/cts/scheduler/summary/bundle-promoted-colocation-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -19,22 +22,11 @@ Transition Summary: * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) Executing Cluster Transition: - * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 - * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 - * Pseudo action: bundle-a_demote_0 - * Pseudo action: bundle-a-clone_demote_0 - * Resource action: bundle-a-rsc demote on bundle-a-1 - * Pseudo action: bundle-a-clone_demoted_0 - * Pseudo action: bundle-a_demoted_0 - * Pseudo action: bundle-a_promote_0 - * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 - * Pseudo action: bundle-a-clone_promote_0 - * Resource action: bundle-a-rsc promote on bundle-a-2 - * Pseudo action: bundle-a-clone_promoted_0 - * Pseudo action: bundle-a_promoted_0 - * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -43,8 +35,8 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: bundle-a [localhost/pcmktest]: * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 - * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 * Container bundle set: bundle-b [localhost/pcmktest]: * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-6.summary b/cts/scheduler/summary/bundle-promoted-colocation-6.summary index dbcf940b12a..db9c9658cbb 100644 --- a/cts/scheduler/summary/bundle-promoted-colocation-6.summary +++ b/cts/scheduler/summary/bundle-promoted-colocation-6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -19,22 +22,11 @@ Transition Summary: * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) Executing Cluster Transition: - * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 - * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 - * Pseudo action: bundle-a_demote_0 - * Pseudo action: bundle-a-clone_demote_0 - * Resource action: bundle-a-rsc demote on bundle-a-1 - * Pseudo action: bundle-a-clone_demoted_0 - * Pseudo action: bundle-a_demoted_0 - * Pseudo action: bundle-a_promote_0 - * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 - * Pseudo action: bundle-a-clone_promote_0 - * Resource action: bundle-a-rsc promote on bundle-a-2 - * Pseudo action: bundle-a-clone_promoted_0 - * Pseudo action: bundle-a_promoted_0 - * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] @@ -43,8 +35,8 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: bundle-a [localhost/pcmktest]: * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 - * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 * Container bundle set: bundle-b [localhost/pcmktest]: * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3 diff --git a/cts/scheduler/summary/bundle-promoted-location-1.summary b/cts/scheduler/summary/bundle-promoted-location-1.summary index 4c0a0ab2fa5..af5f4404dc5 100644 --- a/cts/scheduler/summary/bundle-promoted-location-1.summary +++ b/cts/scheduler/summary/bundle-promoted-location-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,6 +17,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] diff --git a/cts/scheduler/summary/bundle-promoted-location-2.summary b/cts/scheduler/summary/bundle-promoted-location-2.summary index bd3b3a92b7c..bd65a91b1c3 100644 --- a/cts/scheduler/summary/bundle-promoted-location-2.summary +++ b/cts/scheduler/summary/bundle-promoted-location-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,40 +17,21 @@ Transition Summary: * Stop base-bundle-0 ( node3 ) due to unrunnable base-bundle-podman-0 start * Stop base:0 ( Promoted base-bundle-0 ) due to unrunnable base-bundle-podman-0 start * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: base cancel=16000 on base-bundle-1 - * Resource action: base cancel=15000 on base-bundle-0 - * Pseudo action: base-bundle_demote_0 - * Pseudo action: base-bundle-clone_demote_0 - * Resource action: base demote on base-bundle-0 - * Pseudo action: base-bundle-clone_demoted_0 - * Pseudo action: base-bundle_demoted_0 - * Pseudo action: base-bundle_stop_0 - * Pseudo action: base-bundle-clone_stop_0 - * Resource action: base stop on base-bundle-0 - * Pseudo action: base-bundle-clone_stopped_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-0 stop on node3 - * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-podman-0 stop on node3 - * Pseudo action: base-bundle_stopped_0 - * Pseudo action: base-bundle_running_0 - * Pseudo action: base-bundle_promote_0 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-1 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] - * GuestOnline: [ base-bundle-1 base-bundle-2 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: - * base-bundle-0 (ocf:pacemaker:Stateful): Stopped - * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-promoted-location-3.summary b/cts/scheduler/summary/bundle-promoted-location-3.summary index 4c0a0ab2fa5..af5f4404dc5 100644 --- a/cts/scheduler/summary/bundle-promoted-location-3.summary +++ b/cts/scheduler/summary/bundle-promoted-location-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,6 +17,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] diff --git a/cts/scheduler/summary/bundle-promoted-location-4.summary b/cts/scheduler/summary/bundle-promoted-location-4.summary index 4c0a0ab2fa5..af5f4404dc5 100644 --- a/cts/scheduler/summary/bundle-promoted-location-4.summary +++ b/cts/scheduler/summary/bundle-promoted-location-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,6 +17,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] diff --git a/cts/scheduler/summary/bundle-promoted-location-5.summary b/cts/scheduler/summary/bundle-promoted-location-5.summary index 4c0a0ab2fa5..af5f4404dc5 100644 --- a/cts/scheduler/summary/bundle-promoted-location-5.summary +++ b/cts/scheduler/summary/bundle-promoted-location-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -15,6 +17,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] diff --git a/cts/scheduler/summary/bundle-promoted-location-6.summary b/cts/scheduler/summary/bundle-promoted-location-6.summary index 5e1cce23a31..9d4444b437b 100644 --- a/cts/scheduler/summary/bundle-promoted-location-6.summary +++ b/cts/scheduler/summary/bundle-promoted-location-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -16,25 +18,21 @@ Transition Summary: * Stop base:1 ( Unpromoted base-bundle-1 ) due to unrunnable base-bundle-podman-1 start Executing Cluster Transition: - * Pseudo action: base-bundle_stop_0 - * Pseudo action: base-bundle-clone_stop_0 - * Resource action: base stop on base-bundle-1 - * Pseudo action: base-bundle-clone_stopped_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-1 stop on node2 - * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-podman-1 stop on node2 - * Pseudo action: base-bundle_stopped_0 - * Pseudo action: base-bundle_running_0 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 +Transition failed: terminated +An invalid transition was produced Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] - * GuestOnline: [ base-bundle-0 base-bundle-2 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 - * base-bundle-1 (ocf:pacemaker:Stateful): Stopped + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-replicas-change.summary b/cts/scheduler/summary/bundle-replicas-change.summary index 5cc92f3ead8..61f1b0aa059 100644 --- a/cts/scheduler/summary/bundle-replicas-change.summary +++ b/cts/scheduler/summary/bundle-replicas-change.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rh74-test ] - * GuestOnline: [ httpd-bundle-0 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Container bundle set: httpd-bundle [pcmktest:http] (unique): @@ -25,53 +28,18 @@ Transition Summary: * Stop httpd ( httpd-bundle-0 ) due to node availability Executing Cluster Transition: - * Resource action: httpd-bundle-ip-192.168.20.189 monitor on rh74-test - * Resource action: httpd-bundle-docker-1 monitor on rh74-test - * Resource action: httpd-bundle-ip-192.168.20.190 monitor on rh74-test - * Resource action: httpd-bundle-docker-2 monitor on rh74-test - * Resource action: httpd stop on httpd-bundle-0 - * Pseudo action: httpd-bundle_stop_0 - * Pseudo action: httpd-bundle_start_0 - * Resource action: httpd-bundle-0 stop on rh74-test - * Resource action: httpd-bundle-ip-192.168.20.189 start on rh74-test - * Resource action: httpd-bundle-docker-1 start on rh74-test - * Resource action: httpd-bundle-1 monitor on rh74-test - * Resource action: httpd-bundle-ip-192.168.20.190 start on rh74-test - * Resource action: httpd-bundle-docker-2 start on rh74-test - * Resource action: httpd-bundle-2 monitor on rh74-test - * Resource action: httpd-bundle-docker-0 stop on rh74-test - * Resource action: httpd-bundle-docker-0 start on rh74-test - * Resource action: httpd-bundle-docker-0 monitor=60000 on rh74-test - * Resource action: httpd-bundle-0 start on rh74-test - * Resource action: httpd-bundle-0 monitor=30000 on rh74-test - * Resource action: httpd-bundle-ip-192.168.20.189 monitor=60000 on rh74-test - * Resource action: httpd-bundle-docker-1 monitor=60000 on rh74-test - * Resource action: httpd-bundle-1 start on rh74-test - * Resource action: httpd-bundle-ip-192.168.20.190 monitor=60000 on rh74-test - * Resource action: httpd-bundle-docker-2 monitor=60000 on rh74-test - * Resource action: httpd-bundle-2 start on rh74-test - * Resource action: httpd delete on httpd-bundle-0 - * Pseudo action: httpd-bundle_stopped_0 - * Resource action: httpd:0 monitor on httpd-bundle-0 - * Pseudo action: httpd-bundle-clone_start_0 - * Resource action: httpd-bundle-1 monitor=30000 on rh74-test - * Resource action: httpd-bundle-2 monitor=30000 on rh74-test - * Resource action: httpd:0 start on httpd-bundle-0 - * Resource action: httpd:1 start on httpd-bundle-1 - * Resource action: httpd:2 start on httpd-bundle-2 - * Pseudo action: httpd-bundle-clone_running_0 - * Pseudo action: httpd-bundle_running_0 - * Resource action: httpd:0 monitor=10000 on httpd-bundle-0 - * Resource action: httpd:1 monitor=10000 on httpd-bundle-1 - * Resource action: httpd:2 monitor=10000 on httpd-bundle-2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rh74-test ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Container bundle set: httpd-bundle [pcmktest:http] (unique): - * httpd-bundle-0 (192.168.20.188) (ocf:heartbeat:apache): Started rh74-test - * httpd-bundle-1 (192.168.20.189) (ocf:heartbeat:apache): Started rh74-test - * httpd-bundle-2 (192.168.20.190) (ocf:heartbeat:apache): Started rh74-test + * httpd-bundle-0 (192.168.20.188) (ocf:heartbeat:apache): Stopped rh74-test + * httpd-bundle-1 (192.168.20.189) (ocf:heartbeat:apache): Stopped + * httpd-bundle-2 (192.168.20.190) (ocf:heartbeat:apache): Stopped + * httpd (ocf:heartbeat:apache): ORPHANED Started httpd-bundle-0 diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary index 945f3c81da5..b1c5c581759 100644 --- a/cts/scheduler/summary/cancel-behind-moving-remote.summary +++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary @@ -1,10 +1,39 @@ Using the original execution date of: 2021-02-15 01:40:51Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540078fb07 on database-0 changed: 0:0;294:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540078fb07 on database-0 changed: 0:0;296:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400e018b6 on database-0 changed: 0:0;311:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400e018b6 on database-0 changed: 0:0;312:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_compute-fence-nova on database-1 changed: 0:0;273:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_compute-fence-nova on database-1 changed: 0:0;275:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400ea59b0 on database-1 changed: 0:0;298:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400ea59b0 on database-1 changed: 0:0;299:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400e1534e on database-1 changed: 0:0;301:39:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400e1534e on database-1 changed: 0:0;302:39:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for nova-evacuate on database-2 changed: 0:0;279:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 10s-interval monitor for nova-evacuate on database-2 changed: 0:0;280:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400066e50 on database-2 changed: 0:0;304:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400066e50 on database-2 changed: 0:0;305:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540060dbba on database-2 changed: 0:0;306:51:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540060dbba on database-2 changed: 0:0;307:51:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400aa1373 on messaging-0 changed: 0:0;282:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400aa1373 on messaging-0 changed: 0:0;284:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400addd38 on messaging-0 changed: 0:0;295:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400addd38 on messaging-0 changed: 0:0;297:48:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400c87cdb on messaging-0 changed: 0:0;310:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400c87cdb on messaging-0 changed: 0:0;311:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400dc23e0 on messaging-2 changed: 0:0;286:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400dc23e0 on messaging-2 changed: 0:0;288:58:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540040bb56 on messaging-2 changed: 0:0;293:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540040bb56 on messaging-2 changed: 0:0;295:56:0:ef0c178d-d0fc-4118-9005-2571eab8a55d + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ] * OFFLINE: [ messaging-1 ] * RemoteOnline: [ compute-0 compute-1 ] - * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * compute-0 (ocf:pacemaker:remote): Started controller-1 @@ -39,7 +68,7 @@ Current cluster status: * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] - * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2 * stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2 @@ -67,77 +96,19 @@ Transition Summary: * Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 ) Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 - * Cluster action: clear_failcount for nova-evacuate on messaging-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400dc23e0 on database-2 - * Resource action: stonith-fence_ipmilan-52540040bb56 stop on messaging-2 - * Cluster action: clear_failcount for stonith-fence_ipmilan-52540078fb07 on messaging-2 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400ea59b0 on database-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400066e50 on messaging-2 - * Resource action: stonith-fence_ipmilan-525400e1534e stop on database-1 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e1534e on database-2 - * Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0 - * Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2 - * Pseudo action: ovn-dbs-bundle_start_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle-master_start_0 - * Resource action: ovn-dbs-bundle-podman-0 start on controller-0 - * Resource action: ovn-dbs-bundle-0 start on controller-0 - * Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0 - * Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Resource action: ovndb_servers start on ovn-dbs-bundle-0 - * Pseudo action: ovn-dbs-bundle-master_running_0 - * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-0 - * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-0 - * Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0 - * Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2 - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: ovn-dbs-bundle_running_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 - * Pseudo action: ovn-dbs-bundle_promote_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: ovn-dbs-bundle-master_promote_0 - * Resource action: ip-172.17.1.87 start on controller-1 - * Resource action: ovndb_servers promote on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_promoted_0 - * Resource action: ip-172.17.1.87 monitor=10000 on controller-1 - * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: ovn-dbs-bundle_promoted_0 - * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-2 - * Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0 + * Resource action: rabbitmq notify on rabbitmq-bundle-0 + * Resource action: rabbitmq notify on rabbitmq-bundle-2 Using the original execution date of: 2021-02-15 01:40:51Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ] * OFFLINE: [ messaging-1 ] * RemoteOnline: [ compute-0 compute-1 ] - * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * compute-0 (ocf:pacemaker:remote): Started controller-1 @@ -165,23 +136,23 @@ Revised Cluster Status: * haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0 * haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: - * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0 + * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Promoted controller-1 - * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-1 + * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 + * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Stopped * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] - * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2 * stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2 - * stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started database-0 + * stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started messaging-2 * stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1 * stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2 - * stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started messaging-2 + * stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started database-1 * stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2 * stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0 diff --git a/cts/scheduler/summary/clbz5007-promotable-colocation.summary b/cts/scheduler/summary/clbz5007-promotable-colocation.summary index 58348bc77fe..26175a90368 100644 --- a/cts/scheduler/summary/clbz5007-promotable-colocation.summary +++ b/cts/scheduler/summary/clbz5007-promotable-colocation.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder fc16-builder2 ] @@ -14,12 +16,10 @@ Transition Summary: * Move PROMOTED_IP ( fc16-builder2 -> fc16-builder ) Executing Cluster Transition: - * Resource action: UNPROMOTED_IP stop on fc16-builder - * Resource action: PROMOTED_IP stop on fc16-builder2 - * Resource action: UNPROMOTED_IP start on fc16-builder2 - * Resource action: PROMOTED_IP start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder fc16-builder2 ] @@ -27,5 +27,5 @@ Revised Cluster Status: * Clone Set: MS_DUMMY [DUMMY] (promotable): * Promoted: [ fc16-builder ] * Unpromoted: [ fc16-builder2 ] - * UNPROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder2 - * PROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder + * UNPROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder + * PROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder2 diff --git a/cts/scheduler/summary/clone-anon-dup.summary b/cts/scheduler/summary/clone-anon-dup.summary index 1e807b751fd..434aa070c03 100644 --- a/cts/scheduler/summary/clone-anon-dup.summary +++ b/cts/scheduler/summary/clone-anon-dup.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ wc01 wc02 wc03 ] @@ -13,23 +16,22 @@ Current cluster status: Transition Summary: * Start stonith-1 ( wc01 ) * Stop apache2:2 ( wc02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: stonith-1 monitor on wc03 - * Resource action: stonith-1 monitor on wc02 - * Resource action: stonith-1 monitor on wc01 - * Pseudo action: clone_webservice_stop_0 - * Resource action: stonith-1 start on wc01 - * Pseudo action: group_webservice:2_stop_0 - * Resource action: apache2:0 stop on wc02 - * Pseudo action: group_webservice:2_stopped_0 - * Pseudo action: clone_webservice_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ wc01 wc02 wc03 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started wc01 + * stonith-1 (stonith:dummy): Stopped * Clone Set: clone_webservice [group_webservice]: + * Resource Group: group_webservice:2: + * fs_www (ocf:heartbeat:Filesystem): ORPHANED Stopped + * apache2 (ocf:heartbeat:apache): ORPHANED Started wc02 * Started: [ wc01 wc02 ] diff --git a/cts/scheduler/summary/clone-anon-failcount.summary b/cts/scheduler/summary/clone-anon-failcount.summary index 8d4f369e3e1..70c5f310a83 100644 --- a/cts/scheduler/summary/clone-anon-failcount.summary +++ b/cts/scheduler/summary/clone-anon-failcount.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ srv01 srv02 srv03 srv04 ] @@ -46,52 +49,24 @@ Transition Summary: * Restart clnUMdummy02:0 ( srv04 ) due to required clnUMdummy01:0 start * Stop clnUMdummy01:1 ( srv01 ) due to node availability * Stop clnUMdummy02:1 ( srv01 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: UMgroup01_stop_0 - * Resource action: UmDummy02 stop on srv01 - * Resource action: UmDummy01 stop on srv01 - * Resource action: UmIPaddr stop on srv01 - * Resource action: UmVIPcheck stop on srv01 - * Pseudo action: UMgroup01_stopped_0 - * Pseudo action: clnUMgroup01_stop_0 - * Pseudo action: clnUmResource:0_stop_0 - * Resource action: clnUMdummy02:1 stop on srv04 - * Pseudo action: clnUmResource:1_stop_0 - * Resource action: clnUMdummy02:0 stop on srv01 - * Resource action: clnUMdummy01:1 stop on srv04 - * Resource action: clnUMdummy01:0 stop on srv01 - * Pseudo action: clnUmResource:0_stopped_0 - * Pseudo action: clnUmResource:1_stopped_0 - * Pseudo action: clnUMgroup01_stopped_0 - * Pseudo action: clnUMgroup01_start_0 - * Pseudo action: clnUmResource:0_start_0 - * Resource action: clnUMdummy01:1 start on srv04 - * Resource action: clnUMdummy01:1 monitor=10000 on srv04 - * Resource action: clnUMdummy02:1 start on srv04 - * Resource action: clnUMdummy02:1 monitor=10000 on srv04 - * Pseudo action: clnUmResource:0_running_0 - * Pseudo action: clnUMgroup01_running_0 - * Pseudo action: UMgroup01_start_0 - * Resource action: UmVIPcheck start on srv04 - * Resource action: UmIPaddr start on srv04 - * Resource action: UmDummy01 start on srv04 - * Resource action: UmDummy02 start on srv04 - * Pseudo action: UMgroup01_running_0 - * Resource action: UmIPaddr monitor=10000 on srv04 - * Resource action: UmDummy01 monitor=10000 on srv04 - * Resource action: UmDummy02 monitor=10000 on srv04 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ srv01 srv02 srv03 srv04 ] * Full List of Resources: * Resource Group: UMgroup01: - * UmVIPcheck (ocf:pacemaker:Dummy): Started srv04 - * UmIPaddr (ocf:pacemaker:Dummy): Started srv04 - * UmDummy01 (ocf:pacemaker:Dummy): Started srv04 - * UmDummy02 (ocf:pacemaker:Dummy): Started srv04 + * UmVIPcheck (ocf:pacemaker:Dummy): Started srv01 + * UmIPaddr (ocf:pacemaker:Dummy): Started srv01 + * UmDummy01 (ocf:pacemaker:Dummy): Started srv01 + * UmDummy02 (ocf:pacemaker:Dummy): Started srv01 * Resource Group: OVDBgroup02-1: * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started srv01 * Resource Group: OVDBgroup02-2: @@ -107,8 +82,11 @@ Revised Cluster Status: * Resource Group: grpStonith4: * prmStonithN4 (stonith:external/ssh): Started srv03 * Clone Set: clnUMgroup01 [clnUmResource]: - * Started: [ srv04 ] - * Stopped: [ srv01 srv02 srv03 ] + * Resource Group: clnUmResource:0: + * clnUMdummy01 (ocf:pacemaker:Dummy): FAILED srv04 + * clnUMdummy02 (ocf:pacemaker:Dummy): Started srv04 + * Started: [ srv01 ] + * Stopped: [ srv02 srv03 ] * Clone Set: clnPingd [clnPrmPingd]: * Started: [ srv01 srv02 srv03 srv04 ] * Clone Set: clnDiskd1 [clnPrmDiskd1]: diff --git a/cts/scheduler/summary/clone-anon-probe-1.summary b/cts/scheduler/summary/clone-anon-probe-1.summary index 51cf914a004..de252d03f45 100644 --- a/cts/scheduler/summary/clone-anon-probe-1.summary +++ b/cts/scheduler/summary/clone-anon-probe-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ mysql-01 mysql-02 ] @@ -11,17 +13,13 @@ Transition Summary: * Start drbd0:1 ( mysql-02 ) Executing Cluster Transition: - * Resource action: drbd0:0 monitor on mysql-01 - * Resource action: drbd0:1 monitor on mysql-02 - * Pseudo action: ms-drbd0_start_0 - * Resource action: drbd0:0 start on mysql-01 - * Resource action: drbd0:1 start on mysql-02 - * Pseudo action: ms-drbd0_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ mysql-01 mysql-02 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0]: - * Started: [ mysql-01 mysql-02 ] + * Stopped: [ mysql-01 mysql-02 ] diff --git a/cts/scheduler/summary/clone-anon-probe-2.summary b/cts/scheduler/summary/clone-anon-probe-2.summary index 79a2fb8785e..4a7a7bee760 100644 --- a/cts/scheduler/summary/clone-anon-probe-2.summary +++ b/cts/scheduler/summary/clone-anon-probe-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ mysql-01 mysql-02 ] @@ -11,14 +13,14 @@ Transition Summary: * Start drbd0:1 ( mysql-01 ) Executing Cluster Transition: - * Pseudo action: ms-drbd0_start_0 - * Resource action: drbd0:1 start on mysql-01 - * Pseudo action: ms-drbd0_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ mysql-01 mysql-02 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0]: - * Started: [ mysql-01 mysql-02 ] + * Started: [ mysql-02 ] + * Stopped: [ mysql-01 ] diff --git a/cts/scheduler/summary/clone-fail-block-colocation.summary b/cts/scheduler/summary/clone-fail-block-colocation.summary index eab40782889..d93227c83f7 100644 --- a/cts/scheduler/summary/clone-fail-block-colocation.summary +++ b/cts/scheduler/summary/clone-fail-block-colocation.summary @@ -1,6 +1,8 @@ 0 of 10 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ DEM-1 DEM-2 ] @@ -25,33 +27,19 @@ Transition Summary: * Move ip_trf_tas ( DEM-1 -> DEM-2 ) Executing Cluster Transition: - * Pseudo action: svc_stop_0 - * Resource action: ip_trf_tas stop on DEM-1 - * Resource action: ip_mgmt stop on DEM-1 - * Resource action: d_bird_subnet_state stop on DEM-1 - * Resource action: ipv6_dem_tas_dns stop on DEM-1 - * Pseudo action: svc_stopped_0 - * Pseudo action: svc_start_0 - * Resource action: ipv6_dem_tas_dns start on DEM-2 - * Resource action: d_bird_subnet_state start on DEM-2 - * Resource action: ip_mgmt start on DEM-2 - * Resource action: ip_trf_tas start on DEM-2 - * Pseudo action: svc_running_0 - * Resource action: ipv6_dem_tas_dns monitor=10000 on DEM-2 - * Resource action: d_bird_subnet_state monitor=10000 on DEM-2 - * Resource action: ip_mgmt monitor=10000 on DEM-2 - * Resource action: ip_trf_tas monitor=10000 on DEM-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ DEM-1 DEM-2 ] * Full List of Resources: * Resource Group: svc: - * ipv6_dem_tas_dns (ocf:heartbeat:IPv6addr): Started DEM-2 - * d_bird_subnet_state (lsb:bird_subnet_state): Started DEM-2 - * ip_mgmt (ocf:heartbeat:IPaddr2): Started DEM-2 - * ip_trf_tas (ocf:heartbeat:IPaddr2): Started DEM-2 + * ipv6_dem_tas_dns (ocf:heartbeat:IPv6addr): Started DEM-1 + * d_bird_subnet_state (lsb:bird_subnet_state): Started DEM-1 + * ip_mgmt (ocf:heartbeat:IPaddr2): Started DEM-1 + * ip_trf_tas (ocf:heartbeat:IPaddr2): Started DEM-1 * Clone Set: cl_bird [d_bird]: * Started: [ DEM-1 DEM-2 ] * Clone Set: cl_bird6 [d_bird6]: diff --git a/cts/scheduler/summary/clone-interleave-1.summary b/cts/scheduler/summary/clone-interleave-1.summary index ddb153dfee0..96fd7c40579 100644 --- a/cts/scheduler/summary/clone-interleave-1.summary +++ b/cts/scheduler/summary/clone-interleave-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -22,32 +24,18 @@ Transition Summary: * Start child-3:2 ( pcmk-3 ) Executing Cluster Transition: - * Pseudo action: clone-1_start_0 - * Resource action: child-1:0 start on pcmk-2 - * Resource action: child-1:1 start on pcmk-3 - * Resource action: child-1:2 start on pcmk-1 - * Pseudo action: clone-1_running_0 - * Pseudo action: clone-2_start_0 - * Resource action: child-2:0 start on pcmk-2 - * Resource action: child-2:1 start on pcmk-3 - * Pseudo action: clone-2_running_0 - * Pseudo action: clone-3_start_0 - * Resource action: child-3:1 start on pcmk-2 - * Resource action: child-3:2 start on pcmk-3 - * Pseudo action: clone-3_running_0 - * Resource action: dummy start on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started pcmk-1 + * dummy (ocf:pacemaker:Dummy): Stopped * Clone Set: clone-1 [child-1]: - * Started: [ pcmk-1 pcmk-2 pcmk-3 ] + * Stopped: [ pcmk-1 pcmk-2 pcmk-3 ] * Clone Set: clone-2 [child-2]: - * Started: [ pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-1 ] + * Stopped: [ pcmk-1 pcmk-2 pcmk-3 ] * Clone Set: clone-3 [child-3]: - * Started: [ pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-1 ] + * Stopped: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/cts/scheduler/summary/clone-interleave-2.summary b/cts/scheduler/summary/clone-interleave-2.summary index 5817b101ac8..28497c6cc1f 100644 --- a/cts/scheduler/summary/clone-interleave-2.summary +++ b/cts/scheduler/summary/clone-interleave-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -14,21 +16,15 @@ Current cluster status: Transition Summary: * Restart dummy ( pcmk-1 ) due to required clone-3 running * Stop child-2:0 ( pcmk-1 ) due to node availability - * Stop child-3:0 ( pcmk-1 ) + * Stop child-3:0 ( pcmk-1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: dummy stop on pcmk-1 - * Pseudo action: clone-3_stop_0 - * Resource action: child-3:2 stop on pcmk-1 - * Pseudo action: clone-3_stopped_0 - * Pseudo action: clone-3_start_0 - * Pseudo action: clone-2_stop_0 - * Pseudo action: clone-3_running_0 - * Resource action: dummy start on pcmk-1 - * Resource action: child-2:2 stop on pcmk-1 - * Pseudo action: clone-2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -37,8 +33,6 @@ Revised Cluster Status: * Clone Set: clone-1 [child-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Clone Set: clone-2 [child-2]: - * Started: [ pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-1 ] + * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Clone Set: clone-3 [child-3]: - * Started: [ pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-1 ] + * Started: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/cts/scheduler/summary/clone-interleave-3.summary b/cts/scheduler/summary/clone-interleave-3.summary index 4bac5ee36ef..3dde32d80d6 100644 --- a/cts/scheduler/summary/clone-interleave-3.summary +++ b/cts/scheduler/summary/clone-interleave-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -18,22 +20,10 @@ Transition Summary: * Restart child-3:0 ( pcmk-1 ) due to required child-2:0 start Executing Cluster Transition: - * Resource action: dummy stop on pcmk-1 - * Pseudo action: clone-3_stop_0 - * Resource action: child-3:2 stop on pcmk-1 - * Pseudo action: clone-3_stopped_0 - * Pseudo action: clone-2_stop_0 - * Resource action: child-2:2 stop on pcmk-1 - * Pseudo action: clone-2_stopped_0 - * Pseudo action: clone-2_start_0 - * Resource action: child-2:2 start on pcmk-1 - * Pseudo action: clone-2_running_0 - * Pseudo action: clone-3_start_0 - * Resource action: child-3:2 start on pcmk-1 - * Pseudo action: clone-3_running_0 - * Resource action: dummy start on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -42,6 +32,7 @@ Revised Cluster Status: * Clone Set: clone-1 [child-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Clone Set: clone-2 [child-2]: - * Started: [ pcmk-1 pcmk-2 pcmk-3 ] + * child-2 (ocf:pacemaker:Dummy): FAILED pcmk-1 + * Started: [ pcmk-2 pcmk-3 ] * Clone Set: clone-3 [child-3]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/cts/scheduler/summary/clone-max-zero.summary b/cts/scheduler/summary/clone-max-zero.summary index b5f4ec70862..b677f0ceb8f 100644 --- a/cts/scheduler/summary/clone-max-zero.summary +++ b/cts/scheduler/summary/clone-max-zero.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n11 c001n12 ] @@ -23,29 +26,22 @@ Transition Summary: * Stop ocfs2-1:1 ( c001n11 ) due to node availability Executing Cluster Transition: - * Pseudo action: c-ocfs2-1_stop_0 - * Resource action: ocfs2-1:1 stop on c001n12 - * Resource action: ocfs2-1:0 stop on c001n11 - * Pseudo action: c-ocfs2-1_stopped_0 - * Pseudo action: o2cb-clone_stop_0 - * Resource action: o2cb:1 stop on c001n12 - * Resource action: o2cb:0 stop on c001n11 - * Pseudo action: o2cb-clone_stopped_0 - * Pseudo action: dlm-clone_stop_0 - * Resource action: dlm:1 stop on c001n12 - * Resource action: dlm:0 stop on c001n11 - * Pseudo action: dlm-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n11 c001n12 ] * Full List of Resources: * fencing (stonith:external/ssh): Started c001n11 * Clone Set: dlm-clone [dlm]: + * dlm (ocf:pacemaker:controld): ORPHANED Started c001n12 + * dlm (ocf:pacemaker:controld): ORPHANED Started c001n11 * Clone Set: o2cb-clone [o2cb]: - * Stopped: [ c001n11 c001n12 ] + * Started: [ c001n11 c001n12 ] * Clone Set: clone-drbd0 [drbd0]: * Started: [ c001n11 c001n12 ] * Clone Set: c-ocfs2-1 [ocfs2-1]: - * Stopped: [ c001n11 c001n12 ] + * Started: [ c001n11 c001n12 ] diff --git a/cts/scheduler/summary/clone-no-shuffle.summary b/cts/scheduler/summary/clone-no-shuffle.summary index e9b61b6f5f4..e3018b81189 100644 --- a/cts/scheduler/summary/clone-no-shuffle.summary +++ b/cts/scheduler/summary/clone-no-shuffle.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dktest1sles10 dktest2sles10 ] @@ -16,46 +18,16 @@ Transition Summary: * Stop testip ( dktest2sles10 ) due to node availability Executing Cluster Transition: - * Resource action: stonith-1 monitor on dktest2sles10 - * Resource action: stonith-1 monitor on dktest1sles10 - * Resource action: drbd1:1 monitor on dktest1sles10 - * Pseudo action: ms-drbd1_pre_notify_demote_0 - * Resource action: testip stop on dktest2sles10 - * Resource action: testip monitor on dktest1sles10 - * Resource action: stonith-1 start on dktest1sles10 - * Resource action: drbd1:0 notify on dktest2sles10 - * Pseudo action: ms-drbd1_confirmed-pre_notify_demote_0 - * Pseudo action: ms-drbd1_demote_0 - * Resource action: drbd1:0 demote on dktest2sles10 - * Pseudo action: ms-drbd1_demoted_0 - * Pseudo action: ms-drbd1_post_notify_demoted_0 - * Resource action: drbd1:0 notify on dktest2sles10 - * Pseudo action: ms-drbd1_confirmed-post_notify_demoted_0 - * Pseudo action: ms-drbd1_pre_notify_stop_0 - * Resource action: drbd1:0 notify on dktest2sles10 - * Pseudo action: ms-drbd1_confirmed-pre_notify_stop_0 - * Pseudo action: ms-drbd1_stop_0 - * Resource action: drbd1:0 stop on dktest2sles10 - * Pseudo action: ms-drbd1_stopped_0 - * Pseudo action: ms-drbd1_post_notify_stopped_0 - * Pseudo action: ms-drbd1_confirmed-post_notify_stopped_0 - * Pseudo action: ms-drbd1_pre_notify_start_0 - * Pseudo action: ms-drbd1_confirmed-pre_notify_start_0 - * Pseudo action: ms-drbd1_start_0 - * Resource action: drbd1:1 start on dktest1sles10 - * Pseudo action: ms-drbd1_running_0 - * Pseudo action: ms-drbd1_post_notify_running_0 - * Resource action: drbd1:1 notify on dktest1sles10 - * Pseudo action: ms-drbd1_confirmed-post_notify_running_0 - * Resource action: drbd1:1 monitor=11000 on dktest1sles10 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dktest1sles10 dktest2sles10 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started dktest1sles10 + * stonith-1 (stonith:dummy): Stopped * Clone Set: ms-drbd1 [drbd1] (promotable): - * Unpromoted: [ dktest1sles10 ] - * Stopped: [ dktest2sles10 ] - * testip (ocf:heartbeat:IPaddr2): Stopped + * Promoted: [ dktest2sles10 ] + * Stopped: [ dktest1sles10 ] + * testip (ocf:heartbeat:IPaddr2): Started dktest2sles10 diff --git a/cts/scheduler/summary/clone-order-16instances.summary b/cts/scheduler/summary/clone-order-16instances.summary index 52cf880a39c..f61f061d6c7 100644 --- a/cts/scheduler/summary/clone-order-16instances.summary +++ b/cts/scheduler/summary/clone-order-16instances.summary @@ -1,6 +1,8 @@ 16 of 33 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] @@ -29,44 +31,17 @@ Transition Summary: * Start dlm:15 ( virt-034.cluster-qe.lab.eng.brq.redhat.com ) Executing Cluster Transition: - * Pseudo action: dlm-clone_start_0 - * Resource action: dlm start on virt-009.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-013.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-014.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-015.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-016.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-020.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-027.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-028.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-029.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-030.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-031.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-032.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-033.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm start on virt-034.cluster-qe.lab.eng.brq.redhat.com - * Pseudo action: dlm-clone_running_0 - * Resource action: dlm monitor=30000 on virt-009.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-013.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-014.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-015.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-016.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-020.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-027.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-028.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-029.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-030.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-031.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-032.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-033.cluster-qe.lab.eng.brq.redhat.com - * Resource action: dlm monitor=30000 on virt-034.cluster-qe.lab.eng.brq.redhat.com Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] * Full List of Resources: * virt-fencing (stonith:fence_xvm): Started virt-010.cluster-qe.lab.eng.brq.redhat.com * Clone Set: dlm-clone [dlm]: - * Started: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] + * Started: [ virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com ] + * Stopped: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] * Clone Set: clvmd-clone [clvmd] (disabled): * Stopped (disabled): [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] diff --git a/cts/scheduler/summary/clone-order-primitive.summary b/cts/scheduler/summary/clone-order-primitive.summary index 33f613e7ba5..2b2112ec616 100644 --- a/cts/scheduler/summary/clone-order-primitive.summary +++ b/cts/scheduler/summary/clone-order-primitive.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcw2058.see.ed.ac.uk pcw2059.see.ed.ac.uk pcw2688.see.ed.ac.uk pcw2709.see.ed.ac.uk ] @@ -13,17 +15,14 @@ Transition Summary: * Start smb_lsb ( pcw2688.see.ed.ac.uk ) Executing Cluster Transition: - * Resource action: smb_lsb start on pcw2688.see.ed.ac.uk - * Pseudo action: cups_clone_start_0 - * Resource action: cups_lsb:0 start on pcw2058.see.ed.ac.uk - * Resource action: cups_lsb:1 start on pcw2059.see.ed.ac.uk - * Pseudo action: cups_clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcw2058.see.ed.ac.uk pcw2059.see.ed.ac.uk pcw2688.see.ed.ac.uk pcw2709.see.ed.ac.uk ] * Full List of Resources: * Clone Set: cups_clone [cups_lsb]: - * Started: [ pcw2058.see.ed.ac.uk pcw2059.see.ed.ac.uk ] - * smb_lsb (lsb:smb): Started pcw2688.see.ed.ac.uk + * Stopped: [ pcw2058.see.ed.ac.uk pcw2059.see.ed.ac.uk pcw2688.see.ed.ac.uk pcw2709.see.ed.ac.uk ] + * smb_lsb (lsb:smb): Stopped diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-1.summary b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary index 0b6866ec16c..08d34b84e0d 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-1.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2023-06-21 00:59:59Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,17 +15,16 @@ Transition Summary: * Start dummy:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: dummy-clone_start_0 - * Resource action: dummy start on node1 - * Pseudo action: dummy-clone_running_0 - * Resource action: dummy monitor=10000 on node1 Using the original execution date of: 2023-06-21 00:59:59Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: dummy-clone [dummy]: - * Started: [ node1 node2 node3 ] + * Started: [ node2 node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-10.summary b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary index 5b0f9b6d685..ce86e289258 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-10.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,12 +15,10 @@ Transition Summary: * Start dummy:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: dummy-clone_start_0 - * Resource action: dummy start on node1 - * Pseudo action: dummy-clone_running_0 - * Resource action: dummy monitor=11000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -26,4 +26,5 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: dummy-clone [dummy] (promotable): * Promoted: [ node2 ] - * Unpromoted: [ node1 node3 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-11.summary b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary index e0bdb61d605..20ac2a3d644 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-11.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -14,16 +16,10 @@ Transition Summary: * Start rsc2:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: grp-clone_start_0 - * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Pseudo action: grp:2_running_0 - * Resource action: rsc1 monitor=11000 on node1 - * Resource action: rsc2 monitor=11000 on node1 - * Pseudo action: grp-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -31,4 +27,5 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: grp-clone [grp] (promotable): * Promoted: [ node2 ] - * Unpromoted: [ node1 node3 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-12.summary b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary index 6e55a0b7f2f..184e2b76555 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-12.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] - * GuestOnline: [ base-bundle-0 base-bundle-1 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 @@ -16,21 +18,14 @@ Transition Summary: * Start base:2 ( base-bundle-2 ) Executing Cluster Transition: - * Pseudo action: base-bundle_start_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-podman-2 start on node1 - * Resource action: base-bundle-2 monitor on node3 - * Resource action: base-bundle-2 monitor on node2 - * Resource action: base-bundle-2 monitor on node1 - * Resource action: base-bundle-podman-2 monitor=60000 on node1 - * Resource action: base-bundle-2 start on node1 - * Resource action: base start on base-bundle-2 - * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-2 monitor=30000 on node1 - * Pseudo action: base-bundle_running_0 - * Resource action: base monitor=16000 on base-bundle-2 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 +Transition failed: terminated +An invalid transition was produced Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -40,4 +35,4 @@ Revised Cluster Status: * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-2.summary b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary index 8b18120ad8d..3ebdbc6d953 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-2.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,20 +15,15 @@ Transition Summary: * Start rsc2:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: grp-clone_start_0 - * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Pseudo action: grp:2_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 - * Pseudo action: grp-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: grp-clone [grp]: - * Started: [ node1 node2 node3 ] + * Started: [ node2 node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-3.summary b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary index 5702177e33d..5358bada4bf 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-3.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] - * GuestOnline: [ base-bundle-0 base-bundle-1 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 @@ -16,20 +18,10 @@ Transition Summary: * Start base:2 ( base-bundle-2 ) Executing Cluster Transition: - * Pseudo action: base-bundle_start_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-podman-2 start on node1 - * Resource action: base-bundle-2 monitor on node3 - * Resource action: base-bundle-2 monitor on node2 - * Resource action: base-bundle-2 monitor on node1 - * Resource action: base-bundle-podman-2 monitor=60000 on node1 - * Resource action: base-bundle-2 start on node1 - * Resource action: base start on base-bundle-2 - * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-2 monitor=30000 on node1 - * Pseudo action: base-bundle_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -39,4 +31,4 @@ Revised Cluster Status: * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary index 0b6866ec16c..08d34b84e0d 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2023-06-21 00:59:59Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,17 +15,16 @@ Transition Summary: * Start dummy:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: dummy-clone_start_0 - * Resource action: dummy start on node1 - * Pseudo action: dummy-clone_running_0 - * Resource action: dummy monitor=10000 on node1 Using the original execution date of: 2023-06-21 00:59:59Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: dummy-clone [dummy]: - * Started: [ node1 node2 node3 ] + * Started: [ node2 node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary index 8b18120ad8d..3ebdbc6d953 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,20 +15,15 @@ Transition Summary: * Start rsc2:2 ( node1 ) Executing Cluster Transition: - * Pseudo action: grp-clone_start_0 - * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Pseudo action: grp:2_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 - * Pseudo action: grp-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: grp-clone [grp]: - * Started: [ node1 node2 node3 ] + * Started: [ node2 node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary index 5702177e33d..5358bada4bf 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] - * GuestOnline: [ base-bundle-0 base-bundle-1 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 @@ -16,20 +18,10 @@ Transition Summary: * Start base:2 ( base-bundle-2 ) Executing Cluster Transition: - * Pseudo action: base-bundle_start_0 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-podman-2 start on node1 - * Resource action: base-bundle-2 monitor on node3 - * Resource action: base-bundle-2 monitor on node2 - * Resource action: base-bundle-2 monitor on node1 - * Resource action: base-bundle-podman-2 monitor=60000 on node1 - * Resource action: base-bundle-2 start on node1 - * Resource action: base start on base-bundle-2 - * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-2 monitor=30000 on node1 - * Pseudo action: base-bundle_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -39,4 +31,4 @@ Revised Cluster Status: * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary index 77445700f04..8cf399b714b 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -14,25 +16,16 @@ Transition Summary: * Promote dummy:2 ( Stopped -> Promoted node1 ) Executing Cluster Transition: - * Resource action: dummy cancel=10000 on node2 - * Pseudo action: dummy-clone_demote_0 - * Resource action: dummy demote on node2 - * Pseudo action: dummy-clone_demoted_0 - * Pseudo action: dummy-clone_start_0 - * Resource action: dummy monitor=11000 on node2 - * Resource action: dummy start on node1 - * Pseudo action: dummy-clone_running_0 - * Pseudo action: dummy-clone_promote_0 - * Resource action: dummy promote on node1 - * Pseudo action: dummy-clone_promoted_0 - * Resource action: dummy monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: dummy-clone [dummy] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 node3 ] + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-8.summary b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary index 878f24801dd..eb39c879854 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-8.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -16,37 +18,16 @@ Transition Summary: * Promote rsc2:2 ( Stopped -> Promoted node1 ) Executing Cluster Transition: - * Resource action: rsc1 cancel=10000 on node2 - * Resource action: rsc2 cancel=10000 on node2 - * Pseudo action: grp-clone_demote_0 - * Pseudo action: grp:1_demote_0 - * Resource action: rsc2 demote on node2 - * Resource action: rsc1 demote on node2 - * Resource action: rsc2 monitor=11000 on node2 - * Pseudo action: grp:1_demoted_0 - * Resource action: rsc1 monitor=11000 on node2 - * Pseudo action: grp-clone_demoted_0 - * Pseudo action: grp-clone_start_0 - * Pseudo action: grp:2_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Pseudo action: grp:2_running_0 - * Pseudo action: grp-clone_running_0 - * Pseudo action: grp-clone_promote_0 - * Pseudo action: grp:2_promote_0 - * Resource action: rsc1 promote on node1 - * Resource action: rsc2 promote on node1 - * Pseudo action: grp:2_promoted_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node1 - * Pseudo action: grp-clone_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 * Clone Set: grp-clone [grp] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 node3 ] + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-9.summary b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary index 7ede39a6e58..f9cabee9ede 100644 --- a/cts/scheduler/summary/clone-recover-no-shuffle-9.summary +++ b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] - * GuestOnline: [ base-bundle-0 base-bundle-1 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 @@ -17,33 +19,10 @@ Transition Summary: * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) Executing Cluster Transition: - * Resource action: base cancel=15000 on base-bundle-1 - * Pseudo action: base-bundle_demote_0 - * Pseudo action: base-bundle-clone_demote_0 - * Resource action: base demote on base-bundle-1 - * Pseudo action: base-bundle-clone_demoted_0 - * Pseudo action: base-bundle_demoted_0 - * Pseudo action: base-bundle_start_0 - * Resource action: base monitor=16000 on base-bundle-1 - * Pseudo action: base-bundle-clone_start_0 - * Resource action: base-bundle-podman-2 start on node1 - * Resource action: base-bundle-2 monitor on node3 - * Resource action: base-bundle-2 monitor on node2 - * Resource action: base-bundle-2 monitor on node1 - * Resource action: base-bundle-podman-2 monitor=60000 on node1 - * Resource action: base-bundle-2 start on node1 - * Resource action: base start on base-bundle-2 - * Pseudo action: base-bundle-clone_running_0 - * Resource action: base-bundle-2 monitor=30000 on node1 - * Pseudo action: base-bundle_running_0 - * Pseudo action: base-bundle_promote_0 - * Pseudo action: base-bundle-clone_promote_0 - * Resource action: base promote on base-bundle-2 - * Pseudo action: base-bundle-clone_promoted_0 - * Pseudo action: base-bundle_promoted_0 - * Resource action: base monitor=15000 on base-bundle-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] @@ -52,5 +31,5 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node2 * Container bundle set: base-bundle [localhost/pcmktest]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 - * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 - * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped diff --git a/cts/scheduler/summary/clone-require-all-1.summary b/cts/scheduler/summary/clone-require-all-1.summary index 7037eb8caa1..ac4a1937f71 100644 --- a/cts/scheduler/summary/clone-require-all-1.summary +++ b/cts/scheduler/summary/clone-require-all-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] @@ -15,14 +18,11 @@ Transition Summary: * Start B:1 ( rhel7-auto4 ) Executing Cluster Transition: - * Pseudo action: B-clone_start_0 - * Resource action: B start on rhel7-auto3 - * Resource action: B start on rhel7-auto4 - * Pseudo action: B-clone_running_0 - * Resource action: B monitor=10000 on rhel7-auto3 - * Resource action: B monitor=10000 on rhel7-auto4 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] @@ -32,5 +32,4 @@ Revised Cluster Status: * Started: [ rhel7-auto1 rhel7-auto2 ] * Stopped: [ rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: - * Started: [ rhel7-auto3 rhel7-auto4 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/cts/scheduler/summary/clone-require-all-2.summary b/cts/scheduler/summary/clone-require-all-2.summary index 72d6f243f65..90b585217e4 100644 --- a/cts/scheduler/summary/clone-require-all-2.summary +++ b/cts/scheduler/summary/clone-require-all-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto1: standby (with active resources) * Node rhel7-auto2: standby (with active resources) @@ -20,23 +23,20 @@ Transition Summary: * Start B:1 ( rhel7-auto3 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory (blocked) Executing Cluster Transition: - * Resource action: shooter stop on rhel7-auto1 - * Pseudo action: A-clone_stop_0 - * Resource action: shooter start on rhel7-auto3 - * Resource action: A stop on rhel7-auto1 - * Resource action: A stop on rhel7-auto2 - * Pseudo action: A-clone_stopped_0 - * Resource action: shooter monitor=60000 on rhel7-auto3 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Node rhel7-auto1: standby - * Node rhel7-auto2: standby + * Node rhel7-auto1: standby (with active resources) + * Node rhel7-auto2: standby (with active resources) * Online: [ rhel7-auto3 rhel7-auto4 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started rhel7-auto3 + * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] + * Started: [ rhel7-auto1 rhel7-auto2 ] + * Stopped: [ rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/cts/scheduler/summary/clone-require-all-3.summary b/cts/scheduler/summary/clone-require-all-3.summary index b828bffce28..c3ebf26571b 100644 --- a/cts/scheduler/summary/clone-require-all-3.summary +++ b/cts/scheduler/summary/clone-require-all-3.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto1: standby (with active resources) * Node rhel7-auto2: standby (with active resources) @@ -21,27 +24,21 @@ Transition Summary: * Stop B:1 ( rhel7-auto4 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory Executing Cluster Transition: - * Resource action: shooter stop on rhel7-auto1 - * Pseudo action: B-clone_stop_0 - * Resource action: shooter start on rhel7-auto3 - * Resource action: B stop on rhel7-auto3 - * Resource action: B stop on rhel7-auto4 - * Pseudo action: B-clone_stopped_0 - * Resource action: shooter monitor=60000 on rhel7-auto3 - * Pseudo action: A-clone_stop_0 - * Resource action: A stop on rhel7-auto1 - * Resource action: A stop on rhel7-auto2 - * Pseudo action: A-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Node rhel7-auto1: standby - * Node rhel7-auto2: standby + * Node rhel7-auto1: standby (with active resources) + * Node rhel7-auto2: standby (with active resources) * Online: [ rhel7-auto3 rhel7-auto4 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started rhel7-auto3 + * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] + * Started: [ rhel7-auto1 rhel7-auto2 ] + * Stopped: [ rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] + * Started: [ rhel7-auto3 rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 ] diff --git a/cts/scheduler/summary/clone-require-all-4.summary b/cts/scheduler/summary/clone-require-all-4.summary index ebd7b6bb467..ae2474be2f0 100644 --- a/cts/scheduler/summary/clone-require-all-4.summary +++ b/cts/scheduler/summary/clone-require-all-4.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto1: standby (with active resources) * Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ] @@ -17,25 +20,20 @@ Transition Summary: * Stop A:0 ( rhel7-auto1 ) due to node availability Executing Cluster Transition: - * Resource action: shooter stop on rhel7-auto1 - * Pseudo action: A-clone_stop_0 - * Resource action: shooter start on rhel7-auto2 - * Resource action: A stop on rhel7-auto1 - * Pseudo action: A-clone_stopped_0 - * Pseudo action: A-clone_start_0 - * Resource action: shooter monitor=60000 on rhel7-auto2 - * Pseudo action: A-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Node rhel7-auto1: standby + * Node rhel7-auto1: standby (with active resources) * Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started rhel7-auto2 + * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto2 ] - * Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] + * Started: [ rhel7-auto1 rhel7-auto2 ] + * Stopped: [ rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: * Started: [ rhel7-auto3 rhel7-auto4 ] * Stopped: [ rhel7-auto1 rhel7-auto2 ] diff --git a/cts/scheduler/summary/clone-require-all-5.summary b/cts/scheduler/summary/clone-require-all-5.summary index b47049e8831..d76b5594406 100644 --- a/cts/scheduler/summary/clone-require-all-5.summary +++ b/cts/scheduler/summary/clone-require-all-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] @@ -17,29 +20,18 @@ Transition Summary: * Start B:2 ( rhel7-auto1 ) Executing Cluster Transition: - * Pseudo action: A-clone_start_0 - * Resource action: A start on rhel7-auto3 - * Pseudo action: A-clone_running_0 - * Pseudo action: clone-one-or-more:order-A-clone-B-clone-mandatory - * Resource action: A monitor=10000 on rhel7-auto3 - * Pseudo action: B-clone_start_0 - * Resource action: B start on rhel7-auto4 - * Resource action: B start on rhel7-auto3 - * Resource action: B start on rhel7-auto1 - * Pseudo action: B-clone_running_0 - * Resource action: B monitor=10000 on rhel7-auto4 - * Resource action: B monitor=10000 on rhel7-auto3 - * Resource action: B monitor=10000 on rhel7-auto1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * Stopped: [ rhel7-auto4 ] + * Started: [ rhel7-auto1 rhel7-auto2 ] + * Stopped: [ rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: - * Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] - * Stopped: [ rhel7-auto2 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/cts/scheduler/summary/clone-require-all-6.summary b/cts/scheduler/summary/clone-require-all-6.summary index 5bae20c7285..8eb79234a24 100644 --- a/cts/scheduler/summary/clone-require-all-6.summary +++ b/cts/scheduler/summary/clone-require-all-6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] @@ -16,22 +19,19 @@ Transition Summary: * Stop A:2 ( rhel7-auto3 ) due to node availability Executing Cluster Transition: - * Pseudo action: A-clone_stop_0 - * Resource action: A stop on rhel7-auto1 - * Resource action: A stop on rhel7-auto3 - * Pseudo action: A-clone_stopped_0 - * Pseudo action: A-clone_start_0 - * Pseudo action: A-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto2 ] - * Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] + * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] + * Stopped: [ rhel7-auto4 ] * Clone Set: B-clone [B]: * Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ] * Stopped: [ rhel7-auto2 ] diff --git a/cts/scheduler/summary/clone-require-all-7.summary b/cts/scheduler/summary/clone-require-all-7.summary index f0f2820c26d..78c2e998851 100644 --- a/cts/scheduler/summary/clone-require-all-7.summary +++ b/cts/scheduler/summary/clone-require-all-7.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] @@ -16,33 +19,17 @@ Transition Summary: * Start B:1 ( rhel7-auto4 ) Executing Cluster Transition: - * Resource action: A:0 monitor on rhel7-auto4 - * Resource action: A:0 monitor on rhel7-auto3 - * Resource action: A:0 monitor on rhel7-auto2 - * Resource action: A:1 monitor on rhel7-auto1 - * Pseudo action: A-clone_start_0 - * Resource action: A:0 start on rhel7-auto2 - * Resource action: A:1 start on rhel7-auto1 - * Pseudo action: A-clone_running_0 - * Pseudo action: clone-one-or-more:order-A-clone-B-clone-mandatory - * Resource action: A:0 monitor=10000 on rhel7-auto2 - * Resource action: A:1 monitor=10000 on rhel7-auto1 - * Pseudo action: B-clone_start_0 - * Resource action: B start on rhel7-auto3 - * Resource action: B start on rhel7-auto4 - * Pseudo action: B-clone_running_0 - * Resource action: B monitor=10000 on rhel7-auto3 - * Resource action: B monitor=10000 on rhel7-auto4 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto1 rhel7-auto2 ] - * Stopped: [ rhel7-auto3 rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: - * Started: [ rhel7-auto3 rhel7-auto4 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-1.summary b/cts/scheduler/summary/clone-require-all-no-interleave-1.summary index 646bfa3ef5c..f33e8b1c8bb 100644 --- a/cts/scheduler/summary/clone-require-all-no-interleave-1.summary +++ b/cts/scheduler/summary/clone-require-all-no-interleave-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto4: standby * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] @@ -20,25 +23,11 @@ Transition Summary: * Start C:2 ( rhel7-auto3 ) Executing Cluster Transition: - * Pseudo action: A-clone_start_0 - * Resource action: A start on rhel7-auto3 - * Pseudo action: A-clone_running_0 - * Pseudo action: B-clone_start_0 - * Resource action: A monitor=10000 on rhel7-auto3 - * Resource action: B start on rhel7-auto3 - * Pseudo action: B-clone_running_0 - * Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory - * Resource action: B monitor=10000 on rhel7-auto3 - * Pseudo action: C-clone_start_0 - * Resource action: C start on rhel7-auto2 - * Resource action: C start on rhel7-auto1 - * Resource action: C start on rhel7-auto3 - * Pseudo action: C-clone_running_0 - * Resource action: C monitor=10000 on rhel7-auto2 - * Resource action: C monitor=10000 on rhel7-auto1 - * Resource action: C monitor=10000 on rhel7-auto3 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto4: standby * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] @@ -46,11 +35,8 @@ Revised Cluster Status: * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto3 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: - * Started: [ rhel7-auto3 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Clone Set: C-clone [C]: - * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * Stopped: [ rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-2.summary b/cts/scheduler/summary/clone-require-all-no-interleave-2.summary index e40230cb527..8d273fe98f3 100644 --- a/cts/scheduler/summary/clone-require-all-no-interleave-2.summary +++ b/cts/scheduler/summary/clone-require-all-no-interleave-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto3: standby * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] @@ -20,25 +23,11 @@ Transition Summary: * Start C:2 ( rhel7-auto4 ) Executing Cluster Transition: - * Pseudo action: A-clone_start_0 - * Resource action: A start on rhel7-auto4 - * Pseudo action: A-clone_running_0 - * Pseudo action: B-clone_start_0 - * Resource action: A monitor=10000 on rhel7-auto4 - * Resource action: B start on rhel7-auto4 - * Pseudo action: B-clone_running_0 - * Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory - * Resource action: B monitor=10000 on rhel7-auto4 - * Pseudo action: C-clone_start_0 - * Resource action: C start on rhel7-auto2 - * Resource action: C start on rhel7-auto1 - * Resource action: C start on rhel7-auto4 - * Pseudo action: C-clone_running_0 - * Resource action: C monitor=10000 on rhel7-auto2 - * Resource action: C monitor=10000 on rhel7-auto1 - * Resource action: C monitor=10000 on rhel7-auto4 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto3: standby * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] @@ -46,11 +35,8 @@ Revised Cluster Status: * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto4 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Clone Set: B-clone [B]: - * Started: [ rhel7-auto4 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] * Clone Set: C-clone [C]: - * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] - * Stopped: [ rhel7-auto3 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ] diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-3.summary b/cts/scheduler/summary/clone-require-all-no-interleave-3.summary index a22bf455b6a..be42eae130c 100644 --- a/cts/scheduler/summary/clone-require-all-no-interleave-3.summary +++ b/cts/scheduler/summary/clone-require-all-no-interleave-3.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-auto4: standby (with active resources) * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] @@ -21,42 +24,23 @@ Transition Summary: * Move C:0 ( rhel7-auto4 -> rhel7-auto3 ) Executing Cluster Transition: - * Pseudo action: C-clone_stop_0 - * Resource action: C stop on rhel7-auto4 - * Pseudo action: C-clone_stopped_0 - * Pseudo action: B-clone_stop_0 - * Resource action: B stop on rhel7-auto4 - * Pseudo action: B-clone_stopped_0 - * Pseudo action: A-clone_stop_0 - * Resource action: A stop on rhel7-auto4 - * Pseudo action: A-clone_stopped_0 - * Pseudo action: A-clone_start_0 - * Resource action: A start on rhel7-auto3 - * Pseudo action: A-clone_running_0 - * Pseudo action: B-clone_start_0 - * Resource action: A monitor=10000 on rhel7-auto3 - * Resource action: B start on rhel7-auto3 - * Pseudo action: B-clone_running_0 - * Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory - * Resource action: B monitor=10000 on rhel7-auto3 - * Pseudo action: C-clone_start_0 - * Resource action: C start on rhel7-auto3 - * Pseudo action: C-clone_running_0 - * Resource action: C monitor=10000 on rhel7-auto3 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Node rhel7-auto4: standby + * Node rhel7-auto4: standby (with active resources) * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: A-clone [A]: - * Started: [ rhel7-auto3 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] + * Started: [ rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Clone Set: B-clone [B]: - * Started: [ rhel7-auto3 ] - * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] + * Started: [ rhel7-auto4 ] + * Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Clone Set: C-clone [C]: - * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * Stopped: [ rhel7-auto4 ] + * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ] + * Stopped: [ rhel7-auto3 ] diff --git a/cts/scheduler/summary/clone-requires-quorum-recovery.summary b/cts/scheduler/summary/clone-requires-quorum-recovery.summary index 364dabec821..b40c33a21f5 100644 --- a/cts/scheduler/summary/clone-requires-quorum-recovery.summary +++ b/cts/scheduler/summary/clone-requires-quorum-recovery.summary @@ -1,5 +1,8 @@ Using the original execution date of: 2018-05-24 15:29:56Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-5: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] @@ -22,27 +25,24 @@ Transition Summary: * Stop dummy-crowd:3 ( rhel7-5 ) due to node availability Executing Cluster Transition: - * Pseudo action: dummy-crowd-clone_stop_0 - * Fencing rhel7-5 (reboot) - * Pseudo action: dummy-crowd_stop_0 - * Pseudo action: dummy-crowd-clone_stopped_0 - * Pseudo action: dummy-crowd-clone_start_0 - * Resource action: dummy-crowd start on rhel7-2 - * Pseudo action: dummy-crowd-clone_running_0 - * Resource action: dummy-crowd monitor=10000 on rhel7-2 Using the original execution date of: 2018-05-24 15:29:56Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node rhel7-5: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] - * OFFLINE: [ rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingFail (stonith:fence_dummy): Started rhel7-2 * dummy-solo (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: dummy-crowd-clone [dummy-crowd]: - * Started: [ rhel7-1 rhel7-2 rhel7-4 ] + * dummy-crowd (ocf:pacemaker:Dummy): ORPHANED Started rhel7-5 (UNCLEAN) + * Started: [ rhel7-1 rhel7-4 ] + * Stopped: [ rhel7-2 rhel7-3 ] * Clone Set: dummy-boss-clone [dummy-boss] (promotable): * Promoted: [ rhel7-3 ] * Unpromoted: [ rhel7-2 rhel7-4 ] diff --git a/cts/scheduler/summary/clone-requires-quorum.summary b/cts/scheduler/summary/clone-requires-quorum.summary index e45b0312ebc..bbba5159f52 100644 --- a/cts/scheduler/summary/clone-requires-quorum.summary +++ b/cts/scheduler/summary/clone-requires-quorum.summary @@ -1,5 +1,8 @@ Using the original execution date of: 2018-05-24 15:30:29Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-5: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] @@ -18,24 +21,26 @@ Current cluster status: Transition Summary: * Fence (reboot) rhel7-5 'peer is no longer part of the cluster' * Stop dummy-crowd:3 ( rhel7-5 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: dummy-crowd-clone_stop_0 - * Fencing rhel7-5 (reboot) - * Pseudo action: dummy-crowd_stop_0 - * Pseudo action: dummy-crowd-clone_stopped_0 Using the original execution date of: 2018-05-24 15:30:29Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node rhel7-5: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] - * OFFLINE: [ rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingFail (stonith:fence_dummy): Started rhel7-2 * dummy-solo (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: dummy-crowd-clone [dummy-crowd]: + * dummy-crowd (ocf:pacemaker:Dummy): ORPHANED Started rhel7-5 (UNCLEAN) * Started: [ rhel7-1 rhel7-2 rhel7-4 ] * Clone Set: dummy-boss-clone [dummy-boss] (promotable): * Promoted: [ rhel7-3 ] diff --git a/cts/scheduler/summary/clone_min_interleave_start_one.summary b/cts/scheduler/summary/clone_min_interleave_start_one.summary index 026d688fc0d..715976f7062 100644 --- a/cts/scheduler/summary/clone_min_interleave_start_one.summary +++ b/cts/scheduler/summary/clone_min_interleave_start_one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] @@ -21,20 +23,17 @@ Transition Summary: * Start FAKE3:2 ( c7auto1 ) due to unrunnable FAKE2:2 start (blocked) Executing Cluster Transition: - * Pseudo action: FAKE1-clone_start_0 - * Resource action: FAKE1 start on c7auto1 - * Pseudo action: FAKE1-clone_running_0 - * Resource action: FAKE1 monitor=10000 on c7auto1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKE1-clone [FAKE1]: - * Started: [ c7auto1 ] - * Stopped: [ c7auto2 c7auto3 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE2-clone [FAKE2]: * Stopped: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE3-clone [FAKE3]: diff --git a/cts/scheduler/summary/clone_min_interleave_start_two.summary b/cts/scheduler/summary/clone_min_interleave_start_two.summary index 74c5a4594d6..c177bbc50f0 100644 --- a/cts/scheduler/summary/clone_min_interleave_start_two.summary +++ b/cts/scheduler/summary/clone_min_interleave_start_two.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] @@ -22,40 +24,18 @@ Transition Summary: * Start FAKE3:2 ( c7auto1 ) Executing Cluster Transition: - * Pseudo action: FAKE1-clone_start_0 - * Resource action: FAKE1 start on c7auto2 - * Resource action: FAKE1 start on c7auto1 - * Pseudo action: FAKE1-clone_running_0 - * Pseudo action: clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory - * Resource action: FAKE1 monitor=10000 on c7auto2 - * Resource action: FAKE1 monitor=10000 on c7auto1 - * Pseudo action: FAKE2-clone_start_0 - * Resource action: FAKE2 start on c7auto3 - * Resource action: FAKE2 start on c7auto2 - * Resource action: FAKE2 start on c7auto1 - * Pseudo action: FAKE2-clone_running_0 - * Pseudo action: FAKE3-clone_start_0 - * Resource action: FAKE2 monitor=10000 on c7auto3 - * Resource action: FAKE2 monitor=10000 on c7auto2 - * Resource action: FAKE2 monitor=10000 on c7auto1 - * Resource action: FAKE3 start on c7auto3 - * Resource action: FAKE3 start on c7auto2 - * Resource action: FAKE3 start on c7auto1 - * Pseudo action: FAKE3-clone_running_0 - * Resource action: FAKE3 monitor=10000 on c7auto3 - * Resource action: FAKE3 monitor=10000 on c7auto2 - * Resource action: FAKE3 monitor=10000 on c7auto1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKE1-clone [FAKE1]: - * Started: [ c7auto1 c7auto2 ] - * Stopped: [ c7auto3 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE2-clone [FAKE2]: - * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE3-clone [FAKE3]: - * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 ] diff --git a/cts/scheduler/summary/clone_min_interleave_stop_one.summary b/cts/scheduler/summary/clone_min_interleave_stop_one.summary index ac1f40b547f..012518cf489 100644 --- a/cts/scheduler/summary/clone_min_interleave_stop_one.summary +++ b/cts/scheduler/summary/clone_min_interleave_stop_one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] @@ -15,21 +17,17 @@ Transition Summary: * Stop FAKE1:0 ( c7auto3 ) due to node availability Executing Cluster Transition: - * Pseudo action: FAKE1-clone_stop_0 - * Resource action: FAKE1 stop on c7auto3 - * Pseudo action: FAKE1-clone_stopped_0 - * Pseudo action: FAKE1-clone_start_0 - * Pseudo action: FAKE1-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKE1-clone [FAKE1]: - * Started: [ c7auto1 c7auto2 ] - * Stopped: [ c7auto3 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE2-clone [FAKE2]: * Started: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE3-clone [FAKE3]: diff --git a/cts/scheduler/summary/clone_min_interleave_stop_two.summary b/cts/scheduler/summary/clone_min_interleave_stop_two.summary index d5d63fb3985..1a695f8404a 100644 --- a/cts/scheduler/summary/clone_min_interleave_stop_two.summary +++ b/cts/scheduler/summary/clone_min_interleave_stop_two.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] @@ -22,33 +24,18 @@ Transition Summary: * Stop FAKE3:2 ( c7auto2 ) due to required FAKE2:2 start Executing Cluster Transition: - * Pseudo action: FAKE3-clone_stop_0 - * Resource action: FAKE3 stop on c7auto3 - * Resource action: FAKE3 stop on c7auto1 - * Resource action: FAKE3 stop on c7auto2 - * Pseudo action: FAKE3-clone_stopped_0 - * Pseudo action: FAKE2-clone_stop_0 - * Resource action: FAKE2 stop on c7auto3 - * Resource action: FAKE2 stop on c7auto1 - * Resource action: FAKE2 stop on c7auto2 - * Pseudo action: FAKE2-clone_stopped_0 - * Pseudo action: FAKE1-clone_stop_0 - * Resource action: FAKE1 stop on c7auto3 - * Resource action: FAKE1 stop on c7auto2 - * Pseudo action: FAKE1-clone_stopped_0 - * Pseudo action: FAKE1-clone_start_0 - * Pseudo action: FAKE1-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKE1-clone [FAKE1]: - * Started: [ c7auto1 ] - * Stopped: [ c7auto2 c7auto3 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE2-clone [FAKE2]: - * Stopped: [ c7auto1 c7auto2 c7auto3 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] * Clone Set: FAKE3-clone [FAKE3]: - * Stopped: [ c7auto1 c7auto2 c7auto3 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] diff --git a/cts/scheduler/summary/clone_min_start_one.summary b/cts/scheduler/summary/clone_min_start_one.summary index 395b1310668..5dc1d35e690 100644 --- a/cts/scheduler/summary/clone_min_start_one.summary +++ b/cts/scheduler/summary/clone_min_start_one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto1: standby (with active resources) * Node c7auto2: standby @@ -16,23 +18,17 @@ Transition Summary: * Start FAKE ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory (blocked) Executing Cluster Transition: - * Resource action: shooter stop on c7auto1 - * Pseudo action: FAKECLONE-clone_start_0 - * Resource action: shooter start on c7auto3 - * Resource action: FAKECLONE start on c7auto3 - * Pseudo action: FAKECLONE-clone_running_0 - * Resource action: shooter monitor=60000 on c7auto3 - * Resource action: FAKECLONE monitor=10000 on c7auto3 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto1: standby + * Node c7auto1: standby (with active resources) * Node c7auto2: standby * Online: [ c7auto3 c7auto4 ] * Full List of Resources: - * shooter (stonith:fence_phd_kvm): Started c7auto3 + * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto3 ] - * Stopped: [ c7auto1 c7auto2 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] * FAKE (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/clone_min_start_two.summary b/cts/scheduler/summary/clone_min_start_two.summary index 43eb34dc2e6..6371575e414 100644 --- a/cts/scheduler/summary/clone_min_start_two.summary +++ b/cts/scheduler/summary/clone_min_start_two.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto2: standby * Online: [ c7auto1 c7auto3 c7auto4 ] @@ -15,17 +17,10 @@ Transition Summary: * Start FAKE ( c7auto4 ) Executing Cluster Transition: - * Pseudo action: FAKECLONE-clone_start_0 - * Resource action: FAKECLONE start on c7auto3 - * Resource action: FAKECLONE start on c7auto1 - * Pseudo action: FAKECLONE-clone_running_0 - * Pseudo action: clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory - * Resource action: FAKECLONE monitor=10000 on c7auto3 - * Resource action: FAKECLONE monitor=10000 on c7auto1 - * Resource action: FAKE start on c7auto4 - * Resource action: FAKE monitor=10000 on c7auto4 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node c7auto2: standby * Online: [ c7auto1 c7auto3 c7auto4 ] @@ -33,6 +28,5 @@ Revised Cluster Status: * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto1 c7auto3 ] - * Stopped: [ c7auto2 c7auto4 ] - * FAKE (ocf:heartbeat:Dummy): Started c7auto4 + * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] + * FAKE (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/clone_min_stop_all.summary b/cts/scheduler/summary/clone_min_stop_all.summary index 9f52aa90182..c63153f39a1 100644 --- a/cts/scheduler/summary/clone_min_stop_all.summary +++ b/cts/scheduler/summary/clone_min_stop_all.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto1: standby (with active resources) * Node c7auto2: standby (with active resources) @@ -20,25 +22,19 @@ Transition Summary: * Stop FAKE ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing Cluster Transition: - * Resource action: shooter stop on c7auto1 - * Resource action: FAKE stop on c7auto4 - * Resource action: shooter start on c7auto4 - * Pseudo action: FAKECLONE-clone_stop_0 - * Resource action: shooter monitor=60000 on c7auto4 - * Resource action: FAKECLONE stop on c7auto1 - * Resource action: FAKECLONE stop on c7auto2 - * Resource action: FAKECLONE stop on c7auto3 - * Pseudo action: FAKECLONE-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto1: standby - * Node c7auto2: standby - * Node c7auto3: standby + * Node c7auto1: standby (with active resources) + * Node c7auto2: standby (with active resources) + * Node c7auto3: standby (with active resources) * Online: [ c7auto4 ] * Full List of Resources: - * shooter (stonith:fence_phd_kvm): Started c7auto4 + * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] - * FAKE (ocf:heartbeat:Dummy): Stopped + * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto4 ] + * FAKE (ocf:heartbeat:Dummy): Started c7auto4 diff --git a/cts/scheduler/summary/clone_min_stop_one.summary b/cts/scheduler/summary/clone_min_stop_one.summary index ec2b9aef551..9ddf582ba6c 100644 --- a/cts/scheduler/summary/clone_min_stop_one.summary +++ b/cts/scheduler/summary/clone_min_stop_one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto2: standby (with active resources) * Online: [ c7auto1 c7auto3 c7auto4 ] @@ -14,20 +16,17 @@ Transition Summary: * Stop FAKECLONE:1 ( c7auto2 ) due to node availability Executing Cluster Transition: - * Pseudo action: FAKECLONE-clone_stop_0 - * Resource action: FAKECLONE stop on c7auto2 - * Pseudo action: FAKECLONE-clone_stopped_0 - * Pseudo action: FAKECLONE-clone_start_0 - * Pseudo action: FAKECLONE-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto2: standby + * Node c7auto2: standby (with active resources) * Online: [ c7auto1 c7auto3 c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto1 c7auto3 ] - * Stopped: [ c7auto2 c7auto4 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto4 ] * FAKE (ocf:heartbeat:Dummy): Started c7auto4 diff --git a/cts/scheduler/summary/clone_min_stop_two.summary b/cts/scheduler/summary/clone_min_stop_two.summary index bdf8025dfce..5fbacb9dcfd 100644 --- a/cts/scheduler/summary/clone_min_stop_two.summary +++ b/cts/scheduler/summary/clone_min_stop_two.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto1: standby (with active resources) * Node c7auto2: standby (with active resources) @@ -18,26 +20,18 @@ Transition Summary: * Stop FAKE ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory Executing Cluster Transition: - * Resource action: shooter stop on c7auto1 - * Resource action: FAKE stop on c7auto4 - * Resource action: shooter start on c7auto3 - * Pseudo action: FAKECLONE-clone_stop_0 - * Resource action: shooter monitor=60000 on c7auto3 - * Resource action: FAKECLONE stop on c7auto1 - * Resource action: FAKECLONE stop on c7auto2 - * Pseudo action: FAKECLONE-clone_stopped_0 - * Pseudo action: FAKECLONE-clone_start_0 - * Pseudo action: FAKECLONE-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto1: standby - * Node c7auto2: standby + * Node c7auto1: standby (with active resources) + * Node c7auto2: standby (with active resources) * Online: [ c7auto3 c7auto4 ] * Full List of Resources: - * shooter (stonith:fence_phd_kvm): Started c7auto3 + * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto3 ] - * Stopped: [ c7auto1 c7auto2 c7auto4 ] - * FAKE (ocf:heartbeat:Dummy): Stopped + * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto4 ] + * FAKE (ocf:heartbeat:Dummy): Started c7auto4 diff --git a/cts/scheduler/summary/cloned-group-stop.summary b/cts/scheduler/summary/cloned-group-stop.summary index 9ba97be6dc6..18d333bff53 100644 --- a/cts/scheduler/summary/cloned-group-stop.summary +++ b/cts/scheduler/summary/cloned-group-stop.summary @@ -1,6 +1,8 @@ 2 of 20 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhos4-node3 rhos4-node4 ] @@ -25,53 +27,23 @@ Transition Summary: * Stop keystone:0 ( rhos4-node4 ) due to unrunnable qpidd-clone running * Stop keystone:1 ( rhos4-node3 ) due to unrunnable qpidd-clone running * Stop glance-fs:0 ( rhos4-node4 ) due to required keystone-clone running - * Stop glance-registry:0 ( rhos4-node4 ) due to required glance-fs:0 stop + * Stop glance-registry:0 ( rhos4-node4 ) due to required glance-fs:0 start * Stop glance-api:0 ( rhos4-node4 ) due to required glance-registry:0 start * Stop glance-fs:1 ( rhos4-node3 ) due to required keystone-clone running - * Stop glance-registry:1 ( rhos4-node3 ) due to required glance-fs:1 stop + * Stop glance-registry:1 ( rhos4-node3 ) due to required glance-fs:1 start * Stop glance-api:1 ( rhos4-node3 ) due to required glance-registry:1 start * Stop cinder-api:0 ( rhos4-node4 ) due to required glance-clone running - * Stop cinder-scheduler:0 ( rhos4-node4 ) due to required cinder-api:0 stop + * Stop cinder-scheduler:0 ( rhos4-node4 ) due to required cinder-api:0 start * Stop cinder-volume:0 ( rhos4-node4 ) due to required cinder-scheduler:0 start * Stop cinder-api:1 ( rhos4-node3 ) due to required glance-clone running - * Stop cinder-scheduler:1 ( rhos4-node3 ) due to required cinder-api:1 stop + * Stop cinder-scheduler:1 ( rhos4-node3 ) due to required cinder-api:1 start * Stop cinder-volume:1 ( rhos4-node3 ) due to required cinder-scheduler:1 start Executing Cluster Transition: - * Pseudo action: cinder-clone_stop_0 - * Pseudo action: cinder:0_stop_0 - * Resource action: cinder-volume stop on rhos4-node4 - * Pseudo action: cinder:1_stop_0 - * Resource action: cinder-volume stop on rhos4-node3 - * Resource action: cinder-scheduler stop on rhos4-node4 - * Resource action: cinder-scheduler stop on rhos4-node3 - * Resource action: cinder-api stop on rhos4-node4 - * Resource action: cinder-api stop on rhos4-node3 - * Pseudo action: cinder:0_stopped_0 - * Pseudo action: cinder:1_stopped_0 - * Pseudo action: cinder-clone_stopped_0 - * Pseudo action: glance-clone_stop_0 - * Pseudo action: glance:0_stop_0 - * Resource action: glance-api stop on rhos4-node4 - * Pseudo action: glance:1_stop_0 - * Resource action: glance-api stop on rhos4-node3 - * Resource action: glance-registry stop on rhos4-node4 - * Resource action: glance-registry stop on rhos4-node3 - * Resource action: glance-fs stop on rhos4-node4 - * Resource action: glance-fs stop on rhos4-node3 - * Pseudo action: glance:0_stopped_0 - * Pseudo action: glance:1_stopped_0 - * Pseudo action: glance-clone_stopped_0 - * Pseudo action: keystone-clone_stop_0 - * Resource action: keystone stop on rhos4-node4 - * Resource action: keystone stop on rhos4-node3 - * Pseudo action: keystone-clone_stopped_0 - * Pseudo action: qpidd-clone_stop_0 - * Resource action: qpidd stop on rhos4-node4 - * Resource action: qpidd stop on rhos4-node3 - * Pseudo action: qpidd-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhos4-node3 rhos4-node4 ] @@ -82,10 +54,10 @@ Revised Cluster Status: * mysql-fs (ocf:heartbeat:Filesystem): Started rhos4-node3 * mysql-db (ocf:heartbeat:mysql): Started rhos4-node3 * Clone Set: qpidd-clone [qpidd] (disabled): - * Stopped (disabled): [ rhos4-node3 rhos4-node4 ] + * Started: [ rhos4-node3 rhos4-node4 ] * Clone Set: keystone-clone [keystone]: - * Stopped: [ rhos4-node3 rhos4-node4 ] + * Started: [ rhos4-node3 rhos4-node4 ] * Clone Set: glance-clone [glance]: - * Stopped: [ rhos4-node3 rhos4-node4 ] + * Started: [ rhos4-node3 rhos4-node4 ] * Clone Set: cinder-clone [cinder]: - * Stopped: [ rhos4-node3 rhos4-node4 ] + * Started: [ rhos4-node3 rhos4-node4 ] diff --git a/cts/scheduler/summary/cloned-group.summary b/cts/scheduler/summary/cloned-group.summary index c584972b6d0..7ad9d78dd41 100644 --- a/cts/scheduler/summary/cloned-group.summary +++ b/cts/scheduler/summary/cloned-group.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ webcluster01 ] * OFFLINE: [ webcluster02 ] @@ -16,33 +19,23 @@ Transition Summary: * Restart mysql-proxy:0 ( webcluster01 ) due to required apache2:0 start * Stop apache2:2 ( webcluster01 ) due to node availability * Stop mysql-proxy:2 ( webcluster01 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: apache2_clone_stop_0 - * Pseudo action: grrr:0_stop_0 - * Resource action: mysql-proxy:1 stop on webcluster01 - * Pseudo action: grrr:2_stop_0 - * Resource action: mysql-proxy:0 stop on webcluster01 - * Resource action: apache2:1 stop on webcluster01 - * Resource action: apache2:0 stop on webcluster01 - * Pseudo action: grrr:0_stopped_0 - * Pseudo action: grrr:2_stopped_0 - * Pseudo action: apache2_clone_stopped_0 - * Pseudo action: apache2_clone_start_0 - * Pseudo action: grrr:0_start_0 - * Resource action: apache2:1 start on webcluster01 - * Resource action: apache2:1 monitor=10000 on webcluster01 - * Resource action: mysql-proxy:1 start on webcluster01 - * Resource action: mysql-proxy:1 monitor=10000 on webcluster01 - * Pseudo action: grrr:0_running_0 - * Pseudo action: apache2_clone_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ webcluster01 ] * OFFLINE: [ webcluster02 ] * Full List of Resources: * Clone Set: apache2_clone [grrr]: + * Resource Group: grrr:2: + * apache2 (ocf:heartbeat:apache): ORPHANED Started webcluster01 + * mysql-proxy (lsb:mysql-proxy): ORPHANED Started webcluster01 * Started: [ webcluster01 ] * Stopped: [ webcluster02 ] diff --git a/cts/scheduler/summary/cloned_start_one.summary b/cts/scheduler/summary/cloned_start_one.summary index f3bed715c45..fbde7c829ad 100644 --- a/cts/scheduler/summary/cloned_start_one.summary +++ b/cts/scheduler/summary/cloned_start_one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto2: standby * Node c7auto3: standby (with active resources) @@ -18,25 +20,19 @@ Transition Summary: * Stop FAKECLONE2:1 ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory Executing Cluster Transition: - * Pseudo action: FAKECLONE-clone_start_0 - * Pseudo action: FAKECLONE2-clone_stop_0 - * Resource action: FAKECLONE start on c7auto1 - * Pseudo action: FAKECLONE-clone_running_0 - * Resource action: FAKECLONE2 stop on c7auto3 - * Resource action: FAKECLONE2 stop on c7auto4 - * Pseudo action: FAKECLONE2-clone_stopped_0 - * Resource action: FAKECLONE monitor=10000 on c7auto1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node c7auto2: standby - * Node c7auto3: standby + * Node c7auto3: standby (with active resources) * Online: [ c7auto1 c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto1 ] - * Stopped: [ c7auto2 c7auto3 c7auto4 ] - * Clone Set: FAKECLONE2-clone [FAKECLONE2]: * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] + * Clone Set: FAKECLONE2-clone [FAKECLONE2]: + * Started: [ c7auto3 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 ] diff --git a/cts/scheduler/summary/cloned_start_two.summary b/cts/scheduler/summary/cloned_start_two.summary index d863fb2d953..c60e6aec65e 100644 --- a/cts/scheduler/summary/cloned_start_two.summary +++ b/cts/scheduler/summary/cloned_start_two.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto3: standby (with active resources) * Online: [ c7auto1 c7auto2 c7auto4 ] @@ -15,29 +17,22 @@ Transition Summary: * Start FAKECLONE:0 ( c7auto2 ) * Start FAKECLONE:1 ( c7auto1 ) * Stop FAKECLONE2:0 ( c7auto3 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: FAKECLONE-clone_start_0 - * Pseudo action: FAKECLONE2-clone_stop_0 - * Resource action: FAKECLONE start on c7auto2 - * Resource action: FAKECLONE start on c7auto1 - * Pseudo action: FAKECLONE-clone_running_0 - * Resource action: FAKECLONE2 stop on c7auto3 - * Pseudo action: FAKECLONE2-clone_stopped_0 - * Pseudo action: clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory - * Resource action: FAKECLONE monitor=10000 on c7auto2 - * Resource action: FAKECLONE monitor=10000 on c7auto1 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto3: standby + * Node c7auto3: standby (with active resources) * Online: [ c7auto1 c7auto2 c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto1 c7auto2 ] - * Stopped: [ c7auto3 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] * Clone Set: FAKECLONE2-clone [FAKECLONE2]: - * Started: [ c7auto4 ] - * Stopped: [ c7auto1 c7auto2 c7auto3 ] + * Started: [ c7auto3 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 ] diff --git a/cts/scheduler/summary/cloned_stop_one.summary b/cts/scheduler/summary/cloned_stop_one.summary index 539016fed35..a35579bfdcd 100644 --- a/cts/scheduler/summary/cloned_stop_one.summary +++ b/cts/scheduler/summary/cloned_stop_one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto3: standby (with active resources) * Online: [ c7auto1 c7auto2 c7auto4 ] @@ -15,27 +17,23 @@ Current cluster status: Transition Summary: * Stop FAKECLONE:2 ( c7auto3 ) due to node availability * Stop FAKECLONE2:0 ( c7auto3 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: FAKECLONE2-clone_stop_0 - * Resource action: FAKECLONE2 stop on c7auto3 - * Pseudo action: FAKECLONE2-clone_stopped_0 - * Pseudo action: FAKECLONE-clone_stop_0 - * Resource action: FAKECLONE stop on c7auto3 - * Pseudo action: FAKECLONE-clone_stopped_0 - * Pseudo action: FAKECLONE-clone_start_0 - * Pseudo action: FAKECLONE-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto3: standby + * Node c7auto3: standby (with active resources) * Online: [ c7auto1 c7auto2 c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto1 c7auto2 ] - * Stopped: [ c7auto3 c7auto4 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto4 ] * Clone Set: FAKECLONE2-clone [FAKECLONE2]: - * Started: [ c7auto4 ] - * Stopped: [ c7auto1 c7auto2 c7auto3 ] + * Started: [ c7auto3 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 ] diff --git a/cts/scheduler/summary/cloned_stop_two.summary b/cts/scheduler/summary/cloned_stop_two.summary index 53795f5488b..f016c77d38c 100644 --- a/cts/scheduler/summary/cloned_stop_two.summary +++ b/cts/scheduler/summary/cloned_stop_two.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c7auto2: standby (with active resources) * Node c7auto3: standby (with active resources) @@ -20,27 +22,20 @@ Transition Summary: * Stop FAKECLONE2:1 ( c7auto4 ) due to unrunnable clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory Executing Cluster Transition: - * Pseudo action: FAKECLONE2-clone_stop_0 - * Resource action: FAKECLONE2 stop on c7auto3 - * Resource action: FAKECLONE2 stop on c7auto4 - * Pseudo action: FAKECLONE2-clone_stopped_0 - * Pseudo action: FAKECLONE-clone_stop_0 - * Resource action: FAKECLONE stop on c7auto2 - * Resource action: FAKECLONE stop on c7auto3 - * Pseudo action: FAKECLONE-clone_stopped_0 - * Pseudo action: FAKECLONE-clone_start_0 - * Pseudo action: FAKECLONE-clone_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node c7auto2: standby - * Node c7auto3: standby + * Node c7auto2: standby (with active resources) + * Node c7auto3: standby (with active resources) * Online: [ c7auto1 c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto1 * Clone Set: FAKECLONE-clone [FAKECLONE]: - * Started: [ c7auto1 ] - * Stopped: [ c7auto2 c7auto3 c7auto4 ] + * Started: [ c7auto1 c7auto2 c7auto3 ] + * Stopped: [ c7auto4 ] * Clone Set: FAKECLONE2-clone [FAKECLONE2]: - * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] + * Started: [ c7auto3 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 ] diff --git a/cts/scheduler/summary/cluster-specific-params.summary b/cts/scheduler/summary/cluster-specific-params.summary index 8a1d5e43dea..6013c0ad580 100644 --- a/cts/scheduler/summary/cluster-specific-params.summary +++ b/cts/scheduler/summary/cluster-specific-params.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,15 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/colo_promoted_w_native.summary b/cts/scheduler/summary/colo_promoted_w_native.summary index ad67078d880..55aea9fe701 100644 --- a/cts/scheduler/summary/colo_promoted_w_native.summary +++ b/cts/scheduler/summary/colo_promoted_w_native.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,37 +15,15 @@ Transition Summary: * Promote MS_RSC_NATIVE:1 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1 - * Pseudo action: MS_RSC_pre_notify_demote_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-pre_notify_demote_0 - * Pseudo action: MS_RSC_demote_0 - * Resource action: MS_RSC_NATIVE:0 demote on node2 - * Pseudo action: MS_RSC_demoted_0 - * Pseudo action: MS_RSC_post_notify_demoted_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-post_notify_demoted_0 - * Pseudo action: MS_RSC_pre_notify_promote_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-pre_notify_promote_0 - * Pseudo action: MS_RSC_promote_0 - * Resource action: MS_RSC_NATIVE:1 promote on node1 - * Pseudo action: MS_RSC_promoted_0 - * Pseudo action: MS_RSC_post_notify_promoted_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-post_notify_promoted_0 - * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Started node1 * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] diff --git a/cts/scheduler/summary/colo_unpromoted_w_native.summary b/cts/scheduler/summary/colo_unpromoted_w_native.summary index 42df383b82d..2e597b79697 100644 --- a/cts/scheduler/summary/colo_unpromoted_w_native.summary +++ b/cts/scheduler/summary/colo_unpromoted_w_native.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,40 +16,15 @@ Transition Summary: * Promote MS_RSC_NATIVE:1 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Resource action: A stop on node1 - * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1 - * Pseudo action: MS_RSC_pre_notify_demote_0 - * Resource action: A start on node2 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-pre_notify_demote_0 - * Pseudo action: MS_RSC_demote_0 - * Resource action: A monitor=10000 on node2 - * Resource action: MS_RSC_NATIVE:0 demote on node2 - * Pseudo action: MS_RSC_demoted_0 - * Pseudo action: MS_RSC_post_notify_demoted_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-post_notify_demoted_0 - * Pseudo action: MS_RSC_pre_notify_promote_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-pre_notify_promote_0 - * Pseudo action: MS_RSC_promote_0 - * Resource action: MS_RSC_NATIVE:1 promote on node1 - * Pseudo action: MS_RSC_promoted_0 - * Pseudo action: MS_RSC_post_notify_promoted_0 - * Resource action: MS_RSC_NATIVE:0 notify on node2 - * Resource action: MS_RSC_NATIVE:1 notify on node1 - * Pseudo action: MS_RSC_confirmed-post_notify_promoted_0 - * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started node2 + * A (ocf:pacemaker:Dummy): Started node1 * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] diff --git a/cts/scheduler/summary/coloc-attr.summary b/cts/scheduler/summary/coloc-attr.summary index db3fd8e0495..89389eb43ed 100644 --- a/cts/scheduler/summary/coloc-attr.summary +++ b/cts/scheduler/summary/coloc-attr.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ power720-1 power720-2 power720-3 power720-4 ] @@ -13,19 +15,15 @@ Transition Summary: * Start resource_t21 ( power720-4 ) Executing Cluster Transition: - * Pseudo action: group_test1_start_0 - * Resource action: resource_t11 start on power720-3 - * Pseudo action: group_test1_running_0 - * Pseudo action: group_test2_start_0 - * Resource action: resource_t21 start on power720-4 - * Pseudo action: group_test2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ power720-1 power720-2 power720-3 power720-4 ] * Full List of Resources: * Resource Group: group_test1: - * resource_t11 (lsb:nfsserver): Started power720-3 + * resource_t11 (lsb:nfsserver): Stopped * Resource Group: group_test2: - * resource_t21 (ocf:heartbeat:Dummy): Started power720-4 + * resource_t21 (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/coloc-clone-stays-active.summary b/cts/scheduler/summary/coloc-clone-stays-active.summary index cb212e1cde9..a21018775f9 100644 --- a/cts/scheduler/summary/coloc-clone-stays-active.summary +++ b/cts/scheduler/summary/coloc-clone-stays-active.summary @@ -1,6 +1,9 @@ 9 of 87 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ s01-0 s01-1 ] @@ -103,13 +106,11 @@ Transition Summary: * Migrate mgmt-vm ( s01-0 -> s01-1 ) Executing Cluster Transition: - * Resource action: mgmt-vm migrate_to on s01-0 - * Resource action: mgmt-vm migrate_from on s01-1 - * Resource action: mgmt-vm stop on s01-0 - * Pseudo action: mgmt-vm_start_0 - * Resource action: mgmt-vm monitor=10000 on s01-1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ s01-0 s01-1 ] @@ -200,7 +201,7 @@ Revised Cluster Status: * Started: [ s01-0 s01-1 ] * Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]: * Started: [ s01-0 s01-1 ] - * mgmt-vm (ocf:vds-ok:VirtualDomain): Started s01-1 + * mgmt-vm (ocf:vds-ok:VirtualDomain): Started s01-0 * Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-libvirtd [libvirtd]: diff --git a/cts/scheduler/summary/coloc-dependee-should-move.summary b/cts/scheduler/summary/coloc-dependee-should-move.summary index 7df3f6edaf7..799075675dc 100644 --- a/cts/scheduler/summary/coloc-dependee-should-move.summary +++ b/cts/scheduler/summary/coloc-dependee-should-move.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2019-10-22 20:53:06Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -24,25 +26,11 @@ Transition Summary: * Move dummy2d ( rhel7-4 -> rhel7-3 ) Executing Cluster Transition: - * Pseudo action: dummy2_stop_0 - * Resource action: dummy2d stop on rhel7-4 - * Resource action: dummy2c stop on rhel7-4 - * Resource action: dummy2b stop on rhel7-4 - * Resource action: dummy2a stop on rhel7-4 - * Pseudo action: dummy2_stopped_0 - * Pseudo action: dummy2_start_0 - * Resource action: dummy2a start on rhel7-3 - * Resource action: dummy2b start on rhel7-3 - * Resource action: dummy2c start on rhel7-3 - * Resource action: dummy2d start on rhel7-3 - * Pseudo action: dummy2_running_0 - * Resource action: dummy2a monitor=10000 on rhel7-3 - * Resource action: dummy2b monitor=10000 on rhel7-3 - * Resource action: dummy2c monitor=10000 on rhel7-3 - * Resource action: dummy2d monitor=10000 on rhel7-3 Using the original execution date of: 2019-10-22 20:53:06Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -55,7 +43,7 @@ Revised Cluster Status: * dummy1c (ocf:heartbeat:Dummy): Started rhel7-3 * dummy1d (ocf:heartbeat:Dummy): Started rhel7-3 * Resource Group: dummy2: - * dummy2a (ocf:pacemaker:Dummy): Started rhel7-3 - * dummy2b (ocf:heartbeat:Dummy): Started rhel7-3 - * dummy2c (ocf:heartbeat:Dummy): Started rhel7-3 - * dummy2d (ocf:heartbeat:Dummy): Started rhel7-3 + * dummy2a (ocf:pacemaker:Dummy): Started rhel7-4 + * dummy2b (ocf:heartbeat:Dummy): Started rhel7-4 + * dummy2c (ocf:heartbeat:Dummy): Started rhel7-4 + * dummy2d (ocf:heartbeat:Dummy): Started rhel7-4 diff --git a/cts/scheduler/summary/coloc-dependee-should-stay.summary b/cts/scheduler/summary/coloc-dependee-should-stay.summary index 38eb64d6c17..b414dd3894a 100644 --- a/cts/scheduler/summary/coloc-dependee-should-stay.summary +++ b/cts/scheduler/summary/coloc-dependee-should-stay.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2019-10-22 20:53:06Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -23,6 +25,8 @@ Executing Cluster Transition: Using the original execution date of: 2019-10-22 20:53:06Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] diff --git a/cts/scheduler/summary/coloc-group.summary b/cts/scheduler/summary/coloc-group.summary index 94163e2f5eb..8be571abacd 100644 --- a/cts/scheduler/summary/coloc-group.summary +++ b/cts/scheduler/summary/coloc-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -14,26 +16,16 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Pseudo action: group1_start_0 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node3 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node3 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped * Resource Group: group1: - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc2 (ocf:heartbeat:apache): Stopped * rsc3 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/coloc-intra-set.summary b/cts/scheduler/summary/coloc-intra-set.summary index fa95daba01d..ccc7f6438cd 100644 --- a/cts/scheduler/summary/coloc-intra-set.summary +++ b/cts/scheduler/summary/coloc-intra-set.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -14,25 +16,16 @@ Transition Summary: * Move dummy3 ( hex-13 -> hex-14 ) Executing Cluster Transition: - * Resource action: dummy1 stop on hex-13 - * Resource action: dummy3 stop on hex-13 - * Resource action: d0:0 delete on hex-13 - * Resource action: o2cb:0 delete on hex-13 - * Resource action: dummy4 delete on hex-13 - * Resource action: dlm:0 delete on hex-13 - * Resource action: ocfs2-3:0 delete on hex-13 - * Resource action: dummy1 start on hex-14 - * Resource action: dummy3 start on hex-14 - * Resource action: dummy1 monitor=15000 on hex-14 - * Resource action: dummy3 monitor=15000 on hex-14 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: * fencing-sbd (stonith:external/sbd): Started hex-13 * dummy0 (ocf:heartbeat:Dummy): Started hex-14 - * dummy1 (ocf:heartbeat:Dummy): Started hex-14 + * dummy1 (ocf:heartbeat:Dummy): Started hex-13 * dummy2 (ocf:heartbeat:Dummy): Started hex-14 - * dummy3 (ocf:heartbeat:Dummy): Started hex-14 + * dummy3 (ocf:heartbeat:Dummy): Started hex-13 diff --git a/cts/scheduler/summary/coloc-list.summary b/cts/scheduler/summary/coloc-list.summary index e3ac574edea..f997611ea05 100644 --- a/cts/scheduler/summary/coloc-list.summary +++ b/cts/scheduler/summary/coloc-list.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -15,28 +17,16 @@ Transition Summary: * Start rsc4 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node3 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node3 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node3 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc4 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped * rsc3 (ocf:heartbeat:apache): Stopped - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc4 (ocf:heartbeat:apache): Stopped * rsc5 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/coloc-loop.summary b/cts/scheduler/summary/coloc-loop.summary index 9d11ab0a99a..27c824c4608 100644 --- a/cts/scheduler/summary/coloc-loop.summary +++ b/cts/scheduler/summary/coloc-loop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,24 +15,14 @@ Transition Summary: * Start rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node3 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node3 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/coloc-many-one.summary b/cts/scheduler/summary/coloc-many-one.summary index d83f2c10626..7b20a1f1a92 100644 --- a/cts/scheduler/summary/coloc-many-one.summary +++ b/cts/scheduler/summary/coloc-many-one.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -14,25 +16,15 @@ Transition Summary: * Start rsc4 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node3 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node3 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc4 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped * rsc3 (ocf:heartbeat:apache): Stopped - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/coloc-negative-group.summary b/cts/scheduler/summary/coloc-negative-group.summary index ed8faa2206c..5f56f7a267d 100644 --- a/cts/scheduler/summary/coloc-negative-group.summary +++ b/cts/scheduler/summary/coloc-negative-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ lenny-a lenny-b ] @@ -11,11 +13,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: res_Dummy_1 cancel=10000 on lenny-b - * Resource action: res_Dummy_2 cancel=10000 on lenny-b - * Resource action: res_Dummy_3 cancel=10000 on lenny-a Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ lenny-a lenny-b ] diff --git a/cts/scheduler/summary/coloc-unpromoted-anti.summary b/cts/scheduler/summary/coloc-unpromoted-anti.summary index a8518d37198..5e830ef3cc4 100644 --- a/cts/scheduler/summary/coloc-unpromoted-anti.summary +++ b/cts/scheduler/summary/coloc-unpromoted-anti.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pollux sirius ] @@ -21,16 +23,10 @@ Transition Summary: * Start apache ( pollux ) Executing Cluster Transition: - * Pseudo action: group-1_start_0 - * Resource action: fs-1 start on pollux - * Resource action: ip-198 start on pollux - * Resource action: apache start on pollux - * Pseudo action: group-1_running_0 - * Resource action: fs-1 monitor=20000 on pollux - * Resource action: ip-198 monitor=30000 on pollux - * Resource action: apache monitor=60000 on pollux Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pollux sirius ] @@ -41,8 +37,8 @@ Revised Cluster Status: * Promoted: [ pollux ] * Unpromoted: [ sirius ] * Resource Group: group-1: - * fs-1 (ocf:heartbeat:Filesystem): Started pollux - * ip-198 (ocf:heartbeat:IPaddr2): Started pollux - * apache (ocf:custom:apache2): Started pollux + * fs-1 (ocf:heartbeat:Filesystem): Stopped + * ip-198 (ocf:heartbeat:IPaddr2): Stopped + * apache (ocf:custom:apache2): Stopped * pollux-fencing (stonith:external/ipmi-soft): Started sirius * sirius-fencing (stonith:external/ipmi-soft): Started pollux diff --git a/cts/scheduler/summary/coloc-with-inner-group-member.summary b/cts/scheduler/summary/coloc-with-inner-group-member.summary index 6659721a79c..b5a66c39d00 100644 --- a/cts/scheduler/summary/coloc-with-inner-group-member.summary +++ b/cts/scheduler/summary/coloc-with-inner-group-member.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2023-06-20 20:45:06Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] @@ -17,22 +19,11 @@ Transition Summary: * Restart vip ( rhel8-3 ) due to required bar start Executing Cluster Transition: - * Pseudo action: grp_stop_0 - * Resource action: vip stop on rhel8-3 - * Resource action: bar stop on rhel8-4 - * Resource action: foo stop on rhel8-4 - * Pseudo action: grp_stopped_0 - * Pseudo action: grp_start_0 - * Resource action: foo start on rhel8-3 - * Resource action: bar start on rhel8-3 - * Resource action: vip start on rhel8-3 - * Resource action: vip monitor=10000 on rhel8-3 - * Pseudo action: grp_running_0 - * Resource action: foo monitor=10000 on rhel8-3 - * Resource action: bar monitor=10000 on rhel8-3 Using the original execution date of: 2023-06-20 20:45:06Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] @@ -40,6 +31,6 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started rhel8-1 * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3 * Resource Group: grp: - * foo (ocf:pacemaker:Dummy): Started rhel8-3 - * bar (ocf:pacemaker:Dummy): Started rhel8-3 + * foo (ocf:pacemaker:Dummy): Started rhel8-4 + * bar (ocf:pacemaker:Dummy): Started rhel8-4 * vip (ocf:pacemaker:Dummy): Started rhel8-3 diff --git a/cts/scheduler/summary/coloc_fp_logic.summary b/cts/scheduler/summary/coloc_fp_logic.summary index 7826b882604..823bd5b4e84 100644 --- a/cts/scheduler/summary/coloc_fp_logic.summary +++ b/cts/scheduler/summary/coloc_fp_logic.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,14 +12,13 @@ Transition Summary: * Move A ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: A stop on node1 - * Resource action: A start on node2 - * Resource action: A monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started node2 + * A (ocf:pacemaker:Dummy): Started node1 * B (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/colocate-primitive-with-clone.summary b/cts/scheduler/summary/colocate-primitive-with-clone.summary index e884428ee40..285ed4182eb 100644 --- a/cts/scheduler/summary/colocate-primitive-with-clone.summary +++ b/cts/scheduler/summary/colocate-primitive-with-clone.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ srv01 srv02 srv03 srv04 ] @@ -60,27 +63,20 @@ Transition Summary: * Start UmDummy02 ( srv04 ) Executing Cluster Transition: - * Pseudo action: UMgroup01_start_0 - * Resource action: UmVIPcheck start on srv04 - * Resource action: UmIPaddr start on srv04 - * Resource action: UmDummy01 start on srv04 - * Resource action: UmDummy02 start on srv04 - * Cluster action: do_shutdown on srv01 - * Pseudo action: UMgroup01_running_0 - * Resource action: UmIPaddr monitor=10000 on srv04 - * Resource action: UmDummy01 monitor=10000 on srv04 - * Resource action: UmDummy02 monitor=10000 on srv04 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ srv01 srv02 srv03 srv04 ] * Full List of Resources: * Resource Group: UMgroup01: - * UmVIPcheck (ocf:heartbeat:Dummy): Started srv04 - * UmIPaddr (ocf:heartbeat:Dummy): Started srv04 - * UmDummy01 (ocf:heartbeat:Dummy): Started srv04 - * UmDummy02 (ocf:heartbeat:Dummy): Started srv04 + * UmVIPcheck (ocf:heartbeat:Dummy): Stopped + * UmIPaddr (ocf:heartbeat:Dummy): Stopped + * UmDummy01 (ocf:heartbeat:Dummy): Stopped + * UmDummy02 (ocf:heartbeat:Dummy): Stopped * Resource Group: OVDBgroup02-1: * prmExPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04 * prmFsPostgreSQLDB1-1 (ocf:heartbeat:Dummy): Started srv04 diff --git a/cts/scheduler/summary/colocate-unmanaged-group.summary b/cts/scheduler/summary/colocate-unmanaged-group.summary index f29452bfb40..6c8dc935f62 100644 --- a/cts/scheduler/summary/colocate-unmanaged-group.summary +++ b/cts/scheduler/summary/colocate-unmanaged-group.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-02-26 05:50:16Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rh80-test01 rh80-test02 ] @@ -14,11 +16,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: prmDummy1 monitor=10000 on rh80-test01 - * Resource action: prmDummy3 monitor on rh80-test01 Using the original execution date of: 2020-02-26 05:50:16Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rh80-test01 rh80-test02 ] diff --git a/cts/scheduler/summary/colocated-utilization-clone.summary b/cts/scheduler/summary/colocated-utilization-clone.summary index d303bdc1a26..7ab162ddf66 100644 --- a/cts/scheduler/summary/colocated-utilization-clone.summary +++ b/cts/scheduler/summary/colocated-utilization-clone.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -22,52 +24,18 @@ Transition Summary: * Start rsc5 ( node3 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:0 monitor on node1 - * Resource action: rsc1:1 monitor on node3 - * Pseudo action: clone1_start_0 - * Resource action: rsc2:0 monitor on node3 - * Resource action: rsc2:0 monitor on node1 - * Resource action: rsc3:0 monitor on node3 - * Resource action: rsc3:0 monitor on node1 - * Resource action: rsc2:1 monitor on node2 - * Resource action: rsc3:1 monitor on node2 - * Resource action: rsc4 monitor on node3 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node3 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Pseudo action: load_stopped_node3 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node3 - * Pseudo action: clone1_running_0 - * Pseudo action: clone2_start_0 - * Pseudo action: group1:0_start_0 - * Resource action: rsc2:0 start on node3 - * Resource action: rsc3:0 start on node3 - * Pseudo action: group1:1_start_0 - * Resource action: rsc2:1 start on node2 - * Resource action: rsc3:1 start on node2 - * Pseudo action: group1:0_running_0 - * Pseudo action: group1:1_running_0 - * Pseudo action: clone2_running_0 - * Pseudo action: group2_start_0 - * Resource action: rsc4 start on node3 - * Resource action: rsc5 start on node3 - * Pseudo action: group2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Clone Set: clone1 [rsc1]: - * Started: [ node2 node3 ] + * Stopped: [ node1 node2 node3 ] * Clone Set: clone2 [group1]: - * Started: [ node2 node3 ] + * Stopped: [ node1 node2 node3 ] * Resource Group: group2: - * rsc4 (ocf:pacemaker:Dummy): Started node3 - * rsc5 (ocf:pacemaker:Dummy): Started node3 + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/colocated-utilization-group.summary b/cts/scheduler/summary/colocated-utilization-group.summary index b76d9136be8..eff951525f4 100644 --- a/cts/scheduler/summary/colocated-utilization-group.summary +++ b/cts/scheduler/summary/colocated-utilization-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,37 +21,18 @@ Transition Summary: * Start rsc5 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Pseudo action: group1_start_0 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Pseudo action: group2_start_0 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node2 - * Pseudo action: group1_running_0 - * Pseudo action: group2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group1: - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node2 + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/colocated-utilization-primitive-1.summary b/cts/scheduler/summary/colocated-utilization-primitive-1.summary index dc4d9a632d8..4248c73e06c 100644 --- a/cts/scheduler/summary/colocated-utilization-primitive-1.summary +++ b/cts/scheduler/summary/colocated-utilization-primitive-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,23 +15,14 @@ Transition Summary: * Start rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/colocated-utilization-primitive-2.summary b/cts/scheduler/summary/colocated-utilization-primitive-2.summary index 001d647865d..b634b13f723 100644 --- a/cts/scheduler/summary/colocated-utilization-primitive-2.summary +++ b/cts/scheduler/summary/colocated-utilization-primitive-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,22 +14,14 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc3 start on node2 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc3 (ocf:pacemaker:Dummy): Started node2 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/colocation-influence.summary b/cts/scheduler/summary/colocation-influence.summary index e240003d929..464285c4e55 100644 --- a/cts/scheduler/summary/colocation-influence.summary +++ b/cts/scheduler/summary/colocation-influence.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - * GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 ] + * GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 bundle11-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 @@ -72,86 +75,51 @@ Transition Summary: * Start bundle11a:1 ( bundle11-1 ) due to unrunnable bundle11-docker-1 start (blocked) * Stop rsc13a ( rhel7-3 ) due to node availability * Stop rsc14a:1 ( Promoted rhel7-4 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1a stop on rhel7-2 - * Resource action: rsc1b stop on rhel7-2 - * Resource action: rsc2a stop on rhel7-4 - * Resource action: rsc3a start on rhel7-2 - * Resource action: rsc3b start on rhel7-2 - * Resource action: rsc4a stop on rhel7-3 - * Resource action: rsc5a stop on rhel7-1 - * Pseudo action: group6a_stop_0 - * Resource action: rsc6a2 stop on rhel7-2 - * Pseudo action: group7a_stop_0 - * Resource action: rsc7a2 stop on rhel7-3 - * Pseudo action: rsc8a-clone_stop_0 - * Resource action: rsc9c stop on rhel7-4 - * Resource action: rsc10a stop on rhel7-2 - * Resource action: rsc12b stop on rhel7-1 - * Resource action: rsc13a stop on rhel7-3 - * Pseudo action: rsc14a-clone_demote_0 - * Pseudo action: bundle11_start_0 - * Resource action: rsc1a start on rhel7-3 - * Resource action: rsc1b start on rhel7-3 - * Resource action: rsc3a monitor=10000 on rhel7-2 - * Resource action: rsc3b monitor=10000 on rhel7-2 - * Resource action: rsc6a1 stop on rhel7-2 - * Pseudo action: group7a_stopped_0 - * Resource action: rsc8a stop on rhel7-4 - * Pseudo action: rsc8a-clone_stopped_0 - * Resource action: rsc10a start on rhel7-3 - * Pseudo action: bundle11-clone_start_0 - * Resource action: rsc14a demote on rhel7-4 - * Pseudo action: rsc14a-clone_demoted_0 - * Pseudo action: rsc14a-clone_stop_0 - * Resource action: rsc1a monitor=10000 on rhel7-3 - * Resource action: rsc1b monitor=10000 on rhel7-3 - * Pseudo action: group6a_stopped_0 - * Resource action: rsc10a monitor=10000 on rhel7-3 - * Pseudo action: bundle11-clone_running_0 - * Resource action: rsc14a stop on rhel7-4 - * Pseudo action: rsc14a-clone_stopped_0 - * Pseudo action: bundle11_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - * GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 ] + * GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 bundle11-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 - * rsc1a (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc1b (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc2a (ocf:pacemaker:Dummy): Stopped + * rsc1a (ocf:pacemaker:Dummy): Started rhel7-2 + * rsc1b (ocf:pacemaker:Dummy): Started rhel7-2 + * rsc2a (ocf:pacemaker:Dummy): Started rhel7-4 * rsc2b (ocf:pacemaker:Dummy): Started rhel7-4 - * rsc3a (ocf:pacemaker:Dummy): Started rhel7-2 - * rsc3b (ocf:pacemaker:Dummy): Started rhel7-2 - * rsc4a (ocf:pacemaker:Dummy): Stopped + * rsc3a (ocf:pacemaker:Dummy): Stopped + * rsc3b (ocf:pacemaker:Dummy): Stopped + * rsc4a (ocf:pacemaker:Dummy): Started rhel7-3 * rsc4b (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc5a (ocf:pacemaker:Dummy): Stopped + * rsc5a (ocf:pacemaker:Dummy): Started rhel7-1 * Resource Group: group5a: * rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1 * rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1 * Resource Group: group6a: - * rsc6a1 (ocf:pacemaker:Dummy): Stopped - * rsc6a2 (ocf:pacemaker:Dummy): Stopped + * rsc6a1 (ocf:pacemaker:Dummy): Started rhel7-2 + * rsc6a2 (ocf:pacemaker:Dummy): Started rhel7-2 * rsc6a (ocf:pacemaker:Dummy): Started rhel7-2 * Resource Group: group7a: * rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc7a2 (ocf:pacemaker:Dummy): Stopped + * rsc7a2 (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: rsc8a-clone [rsc8a]: - * Started: [ rhel7-1 rhel7-3 ] - * Stopped: [ rhel7-2 rhel7-4 rhel7-5 ] + * Started: [ rhel7-1 rhel7-3 rhel7-4 ] * Clone Set: rsc8b-clone [rsc8b]: * Started: [ rhel7-1 rhel7-3 rhel7-4 ] * rsc9a (ocf:pacemaker:Dummy): Started rhel7-4 * rsc9b (ocf:pacemaker:Dummy): Started rhel7-4 - * rsc9c (ocf:pacemaker:Dummy): Stopped - * rsc10a (ocf:pacemaker:Dummy): Started rhel7-3 + * rsc9c (ocf:pacemaker:Dummy): Started rhel7-4 + * rsc10a (ocf:pacemaker:Dummy): Started rhel7-2 * rsc11a (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12a (ocf:pacemaker:Dummy): Started rhel7-1 - * rsc12b (ocf:pacemaker:Dummy): Stopped + * rsc12b (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12c (ocf:pacemaker:Dummy): Started rhel7-1 * Container bundle set: bundle10 [pcmktest:http]: * bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2 @@ -159,12 +127,13 @@ Revised Cluster Status: * Container bundle set: bundle11 [pcmktest:http]: * bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1 * bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped - * rsc13a (ocf:pacemaker:Dummy): Stopped + * rsc13a (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: rsc13b-clone [rsc13b] (promotable): * Promoted: [ rhel7-3 ] * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ] * Stopped: [ rhel7-5 ] * rsc14b (ocf:pacemaker:Dummy): Started rhel7-4 * Clone Set: rsc14a-clone [rsc14a] (promotable): + * Promoted: [ rhel7-4 ] * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ] - * Stopped: [ rhel7-4 rhel7-5 ] + * Stopped: [ rhel7-5 ] diff --git a/cts/scheduler/summary/colocation-priority-group.summary b/cts/scheduler/summary/colocation-priority-group.summary index 3a7cf2aee5c..efba48ff9d8 100644 --- a/cts/scheduler/summary/colocation-priority-group.summary +++ b/cts/scheduler/summary/colocation-priority-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,41 +21,18 @@ Transition Summary: * Start rsc4 ( node2 ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: member1a monitor on node2 - * Resource action: member1a monitor on node1 - * Resource action: member1b monitor on node2 - * Resource action: member1b monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: Fencing stop on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: member1a start on node1 - * Resource action: member1b start on node1 - * Resource action: rsc3 start on node1 - * Resource action: Fencing start on node2 - * Resource action: rsc4 start on node2 - * Pseudo action: group1_running_0 - * Resource action: member1a monitor=10000 on node1 - * Resource action: member1b monitor=10000 on node1 - * Resource action: rsc3 monitor=10000 on node1 - * Resource action: Fencing monitor=120000 on node2 - * Resource action: rsc4 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: group1: - * member1a (ocf:pacemaker:Dummy): Started node1 - * member1b (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 + * member1a (ocf:pacemaker:Dummy): Stopped + * member1b (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Stopped - * Fencing (stonith:fence_xvm): Started node2 - * rsc4 (ocf:pacemaker:Dummy): Started node2 + * Fencing (stonith:fence_xvm): Started node1 + * rsc4 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/colocation-vs-stickiness.summary b/cts/scheduler/summary/colocation-vs-stickiness.summary index 8bfb8b0b1bc..be014ba677f 100644 --- a/cts/scheduler/summary/colocation-vs-stickiness.summary +++ b/cts/scheduler/summary/colocation-vs-stickiness.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2018-09-26 16:40:38Z Current cluster status: + * Cluster Summary: + * Node List: * Node rhel7-1: standby * Node rhel7-2: standby @@ -25,6 +27,8 @@ Executing Cluster Transition: Using the original execution date of: 2018-09-26 16:40:38Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node rhel7-1: standby * Node rhel7-2: standby diff --git a/cts/scheduler/summary/colocation_constraint_stops_promoted.summary b/cts/scheduler/summary/colocation_constraint_stops_promoted.summary index 5d330ebfff9..22b735e37ce 100644 --- a/cts/scheduler/summary/colocation_constraint_stops_promoted.summary +++ b/cts/scheduler/summary/colocation_constraint_stops_promoted.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder fc16-builder2 ] @@ -10,29 +12,13 @@ Transition Summary: * Stop NATIVE_RSC_A:0 ( Promoted fc16-builder ) due to node availability Executing Cluster Transition: - * Pseudo action: PROMOTABLE_RSC_A_pre_notify_demote_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-pre_notify_demote_0 - * Pseudo action: PROMOTABLE_RSC_A_demote_0 - * Resource action: NATIVE_RSC_A:0 demote on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_demoted_0 - * Pseudo action: PROMOTABLE_RSC_A_post_notify_demoted_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-post_notify_demoted_0 - * Pseudo action: PROMOTABLE_RSC_A_pre_notify_stop_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-pre_notify_stop_0 - * Pseudo action: PROMOTABLE_RSC_A_stop_0 - * Resource action: NATIVE_RSC_A:0 stop on fc16-builder - * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 - * Pseudo action: PROMOTABLE_RSC_A_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_post_notify_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: PROMOTABLE_RSC_A [NATIVE_RSC_A] (promotable): - * Stopped: [ fc16-builder fc16-builder2 ] + * Promoted: [ fc16-builder ] diff --git a/cts/scheduler/summary/colocation_constraint_stops_unpromoted.summary b/cts/scheduler/summary/colocation_constraint_stops_unpromoted.summary index 32047e9b49e..7dc530de0d6 100644 --- a/cts/scheduler/summary/colocation_constraint_stops_unpromoted.summary +++ b/cts/scheduler/summary/colocation_constraint_stops_unpromoted.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -15,22 +17,15 @@ Transition Summary: * Stop NATIVE_RSC_B ( fc16-builder ) due to node availability Executing Cluster Transition: - * Pseudo action: PROMOTABLE_RSC_A_pre_notify_stop_0 - * Resource action: NATIVE_RSC_B stop on fc16-builder - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-pre_notify_stop_0 - * Pseudo action: PROMOTABLE_RSC_A_stop_0 - * Resource action: NATIVE_RSC_A:0 stop on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_post_notify_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * Clone Set: PROMOTABLE_RSC_A [NATIVE_RSC_A] (promotable): - * Stopped: [ fc16-builder fc16-builder2 ] - * NATIVE_RSC_B (ocf:pacemaker:Dummy): Stopped (disabled) + * Unpromoted: [ fc16-builder ] + * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder (disabled) diff --git a/cts/scheduler/summary/comments.summary b/cts/scheduler/summary/comments.summary index e9bcaf56265..2887a41ba7f 100644 --- a/cts/scheduler/summary/comments.summary +++ b/cts/scheduler/summary/comments.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,17 +13,13 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/complex_enforce_colo.summary b/cts/scheduler/summary/complex_enforce_colo.summary index 195ad856eea..181b0d950ae 100644 --- a/cts/scheduler/summary/complex_enforce_colo.summary +++ b/cts/scheduler/summary/complex_enforce_colo.summary @@ -1,6 +1,8 @@ 3 of 132 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] @@ -107,67 +109,67 @@ Transition Summary: * Stop keystone:0 ( rhos6-node1 ) due to node availability * Stop keystone:1 ( rhos6-node2 ) due to node availability * Stop keystone:2 ( rhos6-node3 ) due to node availability - * Stop glance-registry:0 ( rhos6-node1 ) - * Stop glance-registry:1 ( rhos6-node2 ) - * Stop glance-registry:2 ( rhos6-node3 ) - * Stop glance-api:0 ( rhos6-node1 ) - * Stop glance-api:1 ( rhos6-node2 ) - * Stop glance-api:2 ( rhos6-node3 ) + * Stop glance-registry:0 ( rhos6-node1 ) due to node availability + * Stop glance-registry:1 ( rhos6-node2 ) due to node availability + * Stop glance-registry:2 ( rhos6-node3 ) due to node availability + * Stop glance-api:0 ( rhos6-node1 ) due to node availability + * Stop glance-api:1 ( rhos6-node2 ) due to node availability + * Stop glance-api:2 ( rhos6-node3 ) due to node availability * Stop cinder-api ( rhos6-node1 ) due to unrunnable keystone-clone running * Stop cinder-scheduler ( rhos6-node1 ) due to required cinder-api start * Stop cinder-volume ( rhos6-node1 ) due to colocation with cinder-scheduler - * Stop swift-account:0 ( rhos6-node1 ) - * Stop swift-account:1 ( rhos6-node2 ) - * Stop swift-account:2 ( rhos6-node3 ) - * Stop swift-container:0 ( rhos6-node1 ) - * Stop swift-container:1 ( rhos6-node2 ) - * Stop swift-container:2 ( rhos6-node3 ) - * Stop swift-object:0 ( rhos6-node1 ) - * Stop swift-object:1 ( rhos6-node2 ) - * Stop swift-object:2 ( rhos6-node3 ) - * Stop swift-proxy:0 ( rhos6-node1 ) - * Stop swift-proxy:1 ( rhos6-node2 ) - * Stop swift-proxy:2 ( rhos6-node3 ) + * Stop swift-account:0 ( rhos6-node1 ) due to node availability + * Stop swift-account:1 ( rhos6-node2 ) due to node availability + * Stop swift-account:2 ( rhos6-node3 ) due to node availability + * Stop swift-container:0 ( rhos6-node1 ) due to node availability + * Stop swift-container:1 ( rhos6-node2 ) due to node availability + * Stop swift-container:2 ( rhos6-node3 ) due to node availability + * Stop swift-object:0 ( rhos6-node1 ) due to node availability + * Stop swift-object:1 ( rhos6-node2 ) due to node availability + * Stop swift-object:2 ( rhos6-node3 ) due to node availability + * Stop swift-proxy:0 ( rhos6-node1 ) due to node availability + * Stop swift-proxy:1 ( rhos6-node2 ) due to node availability + * Stop swift-proxy:2 ( rhos6-node3 ) due to node availability * Stop swift-object-expirer ( rhos6-node2 ) due to required swift-proxy-clone running - * Stop neutron-server:0 ( rhos6-node1 ) - * Stop neutron-server:1 ( rhos6-node2 ) - * Stop neutron-server:2 ( rhos6-node3 ) - * Stop neutron-scale:0 ( rhos6-node3 ) - * Stop neutron-scale:1 ( rhos6-node2 ) - * Stop neutron-scale:2 ( rhos6-node1 ) - * Stop neutron-ovs-cleanup:0 ( rhos6-node1 ) - * Stop neutron-ovs-cleanup:1 ( rhos6-node2 ) - * Stop neutron-ovs-cleanup:2 ( rhos6-node3 ) - * Stop neutron-netns-cleanup:0 ( rhos6-node1 ) - * Stop neutron-netns-cleanup:1 ( rhos6-node2 ) - * Stop neutron-netns-cleanup:2 ( rhos6-node3 ) - * Stop neutron-openvswitch-agent:0 ( rhos6-node1 ) - * Stop neutron-openvswitch-agent:1 ( rhos6-node2 ) - * Stop neutron-openvswitch-agent:2 ( rhos6-node3 ) - * Stop neutron-dhcp-agent:0 ( rhos6-node1 ) - * Stop neutron-dhcp-agent:1 ( rhos6-node2 ) - * Stop neutron-dhcp-agent:2 ( rhos6-node3 ) - * Stop neutron-l3-agent:0 ( rhos6-node1 ) - * Stop neutron-l3-agent:1 ( rhos6-node2 ) - * Stop neutron-l3-agent:2 ( rhos6-node3 ) - * Stop neutron-metadata-agent:0 ( rhos6-node1 ) - * Stop neutron-metadata-agent:1 ( rhos6-node2 ) - * Stop neutron-metadata-agent:2 ( rhos6-node3 ) - * Stop nova-consoleauth:0 ( rhos6-node1 ) - * Stop nova-consoleauth:1 ( rhos6-node2 ) - * Stop nova-consoleauth:2 ( rhos6-node3 ) - * Stop nova-novncproxy:0 ( rhos6-node1 ) - * Stop nova-novncproxy:1 ( rhos6-node2 ) - * Stop nova-novncproxy:2 ( rhos6-node3 ) - * Stop nova-api:0 ( rhos6-node1 ) - * Stop nova-api:1 ( rhos6-node2 ) - * Stop nova-api:2 ( rhos6-node3 ) - * Stop nova-scheduler:0 ( rhos6-node1 ) - * Stop nova-scheduler:1 ( rhos6-node2 ) - * Stop nova-scheduler:2 ( rhos6-node3 ) - * Stop nova-conductor:0 ( rhos6-node1 ) - * Stop nova-conductor:1 ( rhos6-node2 ) - * Stop nova-conductor:2 ( rhos6-node3 ) + * Stop neutron-server:0 ( rhos6-node1 ) due to node availability + * Stop neutron-server:1 ( rhos6-node2 ) due to node availability + * Stop neutron-server:2 ( rhos6-node3 ) due to node availability + * Stop neutron-scale:0 ( rhos6-node3 ) due to node availability + * Stop neutron-scale:1 ( rhos6-node2 ) due to node availability + * Stop neutron-scale:2 ( rhos6-node1 ) due to node availability + * Stop neutron-ovs-cleanup:0 ( rhos6-node1 ) due to node availability + * Stop neutron-ovs-cleanup:1 ( rhos6-node2 ) due to node availability + * Stop neutron-ovs-cleanup:2 ( rhos6-node3 ) due to node availability + * Stop neutron-netns-cleanup:0 ( rhos6-node1 ) due to node availability + * Stop neutron-netns-cleanup:1 ( rhos6-node2 ) due to node availability + * Stop neutron-netns-cleanup:2 ( rhos6-node3 ) due to node availability + * Stop neutron-openvswitch-agent:0 ( rhos6-node1 ) due to node availability + * Stop neutron-openvswitch-agent:1 ( rhos6-node2 ) due to node availability + * Stop neutron-openvswitch-agent:2 ( rhos6-node3 ) due to node availability + * Stop neutron-dhcp-agent:0 ( rhos6-node1 ) due to node availability + * Stop neutron-dhcp-agent:1 ( rhos6-node2 ) due to node availability + * Stop neutron-dhcp-agent:2 ( rhos6-node3 ) due to node availability + * Stop neutron-l3-agent:0 ( rhos6-node1 ) due to node availability + * Stop neutron-l3-agent:1 ( rhos6-node2 ) due to node availability + * Stop neutron-l3-agent:2 ( rhos6-node3 ) due to node availability + * Stop neutron-metadata-agent:0 ( rhos6-node1 ) due to node availability + * Stop neutron-metadata-agent:1 ( rhos6-node2 ) due to node availability + * Stop neutron-metadata-agent:2 ( rhos6-node3 ) due to node availability + * Stop nova-consoleauth:0 ( rhos6-node1 ) due to node availability + * Stop nova-consoleauth:1 ( rhos6-node2 ) due to node availability + * Stop nova-consoleauth:2 ( rhos6-node3 ) due to node availability + * Stop nova-novncproxy:0 ( rhos6-node1 ) due to node availability + * Stop nova-novncproxy:1 ( rhos6-node2 ) due to node availability + * Stop nova-novncproxy:2 ( rhos6-node3 ) due to node availability + * Stop nova-api:0 ( rhos6-node1 ) due to node availability + * Stop nova-api:1 ( rhos6-node2 ) due to node availability + * Stop nova-api:2 ( rhos6-node3 ) due to node availability + * Stop nova-scheduler:0 ( rhos6-node1 ) due to node availability + * Stop nova-scheduler:1 ( rhos6-node2 ) due to node availability + * Stop nova-scheduler:2 ( rhos6-node3 ) due to node availability + * Stop nova-conductor:0 ( rhos6-node1 ) due to node availability + * Stop nova-conductor:1 ( rhos6-node2 ) due to node availability + * Stop nova-conductor:2 ( rhos6-node3 ) due to node availability * Stop ceilometer-central ( rhos6-node3 ) due to unrunnable keystone-clone running * Stop ceilometer-collector:0 ( rhos6-node1 ) due to required ceilometer-central start * Stop ceilometer-collector:1 ( rhos6-node2 ) due to required ceilometer-central start @@ -199,159 +201,10 @@ Transition Summary: * Stop heat-engine ( rhos6-node2 ) due to colocation with heat-api-cloudwatch-clone Executing Cluster Transition: - * Pseudo action: glance-api-clone_stop_0 - * Resource action: cinder-volume stop on rhos6-node1 - * Pseudo action: swift-object-clone_stop_0 - * Resource action: swift-object-expirer stop on rhos6-node2 - * Pseudo action: neutron-metadata-agent-clone_stop_0 - * Pseudo action: nova-conductor-clone_stop_0 - * Resource action: heat-engine stop on rhos6-node2 - * Resource action: glance-api stop on rhos6-node1 - * Resource action: glance-api stop on rhos6-node2 - * Resource action: glance-api stop on rhos6-node3 - * Pseudo action: glance-api-clone_stopped_0 - * Resource action: cinder-scheduler stop on rhos6-node1 - * Resource action: swift-object stop on rhos6-node1 - * Resource action: swift-object stop on rhos6-node2 - * Resource action: swift-object stop on rhos6-node3 - * Pseudo action: swift-object-clone_stopped_0 - * Pseudo action: swift-proxy-clone_stop_0 - * Resource action: neutron-metadata-agent stop on rhos6-node1 - * Resource action: neutron-metadata-agent stop on rhos6-node2 - * Resource action: neutron-metadata-agent stop on rhos6-node3 - * Pseudo action: neutron-metadata-agent-clone_stopped_0 - * Resource action: nova-conductor stop on rhos6-node1 - * Resource action: nova-conductor stop on rhos6-node2 - * Resource action: nova-conductor stop on rhos6-node3 - * Pseudo action: nova-conductor-clone_stopped_0 - * Pseudo action: heat-api-cloudwatch-clone_stop_0 - * Pseudo action: glance-registry-clone_stop_0 - * Resource action: cinder-api stop on rhos6-node1 - * Pseudo action: swift-container-clone_stop_0 - * Resource action: swift-proxy stop on rhos6-node1 - * Resource action: swift-proxy stop on rhos6-node2 - * Resource action: swift-proxy stop on rhos6-node3 - * Pseudo action: swift-proxy-clone_stopped_0 - * Pseudo action: neutron-l3-agent-clone_stop_0 - * Pseudo action: nova-scheduler-clone_stop_0 - * Resource action: heat-api-cloudwatch stop on rhos6-node1 - * Resource action: heat-api-cloudwatch stop on rhos6-node2 - * Resource action: heat-api-cloudwatch stop on rhos6-node3 - * Pseudo action: heat-api-cloudwatch-clone_stopped_0 - * Resource action: glance-registry stop on rhos6-node1 - * Resource action: glance-registry stop on rhos6-node2 - * Resource action: glance-registry stop on rhos6-node3 - * Pseudo action: glance-registry-clone_stopped_0 - * Resource action: swift-container stop on rhos6-node1 - * Resource action: swift-container stop on rhos6-node2 - * Resource action: swift-container stop on rhos6-node3 - * Pseudo action: swift-container-clone_stopped_0 - * Resource action: neutron-l3-agent stop on rhos6-node1 - * Resource action: neutron-l3-agent stop on rhos6-node2 - * Resource action: neutron-l3-agent stop on rhos6-node3 - * Pseudo action: neutron-l3-agent-clone_stopped_0 - * Resource action: nova-scheduler stop on rhos6-node1 - * Resource action: nova-scheduler stop on rhos6-node2 - * Resource action: nova-scheduler stop on rhos6-node3 - * Pseudo action: nova-scheduler-clone_stopped_0 - * Pseudo action: heat-api-cfn-clone_stop_0 - * Pseudo action: swift-account-clone_stop_0 - * Pseudo action: neutron-dhcp-agent-clone_stop_0 - * Pseudo action: nova-api-clone_stop_0 - * Resource action: heat-api-cfn stop on rhos6-node1 - * Resource action: heat-api-cfn stop on rhos6-node2 - * Resource action: heat-api-cfn stop on rhos6-node3 - * Pseudo action: heat-api-cfn-clone_stopped_0 - * Resource action: swift-account stop on rhos6-node1 - * Resource action: swift-account stop on rhos6-node2 - * Resource action: swift-account stop on rhos6-node3 - * Pseudo action: swift-account-clone_stopped_0 - * Resource action: neutron-dhcp-agent stop on rhos6-node1 - * Resource action: neutron-dhcp-agent stop on rhos6-node2 - * Resource action: neutron-dhcp-agent stop on rhos6-node3 - * Pseudo action: neutron-dhcp-agent-clone_stopped_0 - * Resource action: nova-api stop on rhos6-node1 - * Resource action: nova-api stop on rhos6-node2 - * Resource action: nova-api stop on rhos6-node3 - * Pseudo action: nova-api-clone_stopped_0 - * Pseudo action: heat-api-clone_stop_0 - * Pseudo action: neutron-openvswitch-agent-clone_stop_0 - * Pseudo action: nova-novncproxy-clone_stop_0 - * Resource action: heat-api stop on rhos6-node1 - * Resource action: heat-api stop on rhos6-node2 - * Resource action: heat-api stop on rhos6-node3 - * Pseudo action: heat-api-clone_stopped_0 - * Resource action: neutron-openvswitch-agent stop on rhos6-node1 - * Resource action: neutron-openvswitch-agent stop on rhos6-node2 - * Resource action: neutron-openvswitch-agent stop on rhos6-node3 - * Pseudo action: neutron-openvswitch-agent-clone_stopped_0 - * Resource action: nova-novncproxy stop on rhos6-node1 - * Resource action: nova-novncproxy stop on rhos6-node2 - * Resource action: nova-novncproxy stop on rhos6-node3 - * Pseudo action: nova-novncproxy-clone_stopped_0 - * Pseudo action: ceilometer-notification-clone_stop_0 - * Pseudo action: neutron-netns-cleanup-clone_stop_0 - * Pseudo action: nova-consoleauth-clone_stop_0 - * Resource action: ceilometer-notification stop on rhos6-node1 - * Resource action: ceilometer-notification stop on rhos6-node2 - * Resource action: ceilometer-notification stop on rhos6-node3 - * Pseudo action: ceilometer-notification-clone_stopped_0 - * Resource action: neutron-netns-cleanup stop on rhos6-node1 - * Resource action: neutron-netns-cleanup stop on rhos6-node2 - * Resource action: neutron-netns-cleanup stop on rhos6-node3 - * Pseudo action: neutron-netns-cleanup-clone_stopped_0 - * Resource action: nova-consoleauth stop on rhos6-node1 - * Resource action: nova-consoleauth stop on rhos6-node2 - * Resource action: nova-consoleauth stop on rhos6-node3 - * Pseudo action: nova-consoleauth-clone_stopped_0 - * Pseudo action: ceilometer-alarm-notifier-clone_stop_0 - * Pseudo action: neutron-ovs-cleanup-clone_stop_0 - * Resource action: ceilometer-alarm-notifier stop on rhos6-node1 - * Resource action: ceilometer-alarm-notifier stop on rhos6-node2 - * Resource action: ceilometer-alarm-notifier stop on rhos6-node3 - * Pseudo action: ceilometer-alarm-notifier-clone_stopped_0 - * Resource action: neutron-ovs-cleanup stop on rhos6-node1 - * Resource action: neutron-ovs-cleanup stop on rhos6-node2 - * Resource action: neutron-ovs-cleanup stop on rhos6-node3 - * Pseudo action: neutron-ovs-cleanup-clone_stopped_0 - * Pseudo action: ceilometer-alarm-evaluator-clone_stop_0 - * Pseudo action: neutron-scale-clone_stop_0 - * Resource action: ceilometer-alarm-evaluator stop on rhos6-node1 - * Resource action: ceilometer-alarm-evaluator stop on rhos6-node2 - * Resource action: ceilometer-alarm-evaluator stop on rhos6-node3 - * Pseudo action: ceilometer-alarm-evaluator-clone_stopped_0 - * Resource action: neutron-scale:0 stop on rhos6-node3 - * Resource action: neutron-scale:1 stop on rhos6-node2 - * Resource action: neutron-scale:2 stop on rhos6-node1 - * Pseudo action: neutron-scale-clone_stopped_0 - * Pseudo action: ceilometer-delay-clone_stop_0 - * Pseudo action: neutron-server-clone_stop_0 - * Resource action: ceilometer-delay stop on rhos6-node1 - * Resource action: ceilometer-delay stop on rhos6-node2 - * Resource action: ceilometer-delay stop on rhos6-node3 - * Pseudo action: ceilometer-delay-clone_stopped_0 - * Resource action: neutron-server stop on rhos6-node1 - * Resource action: neutron-server stop on rhos6-node2 - * Resource action: neutron-server stop on rhos6-node3 - * Pseudo action: neutron-server-clone_stopped_0 - * Pseudo action: ceilometer-api-clone_stop_0 - * Resource action: ceilometer-api stop on rhos6-node1 - * Resource action: ceilometer-api stop on rhos6-node2 - * Resource action: ceilometer-api stop on rhos6-node3 - * Pseudo action: ceilometer-api-clone_stopped_0 - * Pseudo action: ceilometer-collector-clone_stop_0 - * Resource action: ceilometer-collector stop on rhos6-node1 - * Resource action: ceilometer-collector stop on rhos6-node2 - * Resource action: ceilometer-collector stop on rhos6-node3 - * Pseudo action: ceilometer-collector-clone_stopped_0 - * Resource action: ceilometer-central stop on rhos6-node3 - * Pseudo action: keystone-clone_stop_0 - * Resource action: keystone stop on rhos6-node1 - * Resource action: keystone stop on rhos6-node2 - * Resource action: keystone stop on rhos6-node3 - * Pseudo action: keystone-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] @@ -382,74 +235,74 @@ Revised Cluster Status: * Clone Set: mongodb-clone [mongodb]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: keystone-clone [keystone]: - * Stopped (disabled): [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-registry-clone [glance-registry]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-api-clone [glance-api]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] - * cinder-api (systemd:openstack-cinder-api): Stopped - * cinder-scheduler (systemd:openstack-cinder-scheduler): Stopped - * cinder-volume (systemd:openstack-cinder-volume): Stopped + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * cinder-api (systemd:openstack-cinder-api): Started rhos6-node1 + * cinder-scheduler (systemd:openstack-cinder-scheduler): Started rhos6-node1 + * cinder-volume (systemd:openstack-cinder-volume): Started rhos6-node1 * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-account-clone [swift-account]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-container-clone [swift-container]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-object-clone [swift-object]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-proxy-clone [swift-proxy]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] - * swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * swift-object-expirer (systemd:openstack-swift-object-expirer): Started rhos6-node2 * Clone Set: neutron-server-clone [neutron-server]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): - * neutron-scale:0 (ocf:neutron:NeutronScale): Stopped - * neutron-scale:1 (ocf:neutron:NeutronScale): Stopped - * neutron-scale:2 (ocf:neutron:NeutronScale): Stopped + * neutron-scale:0 (ocf:neutron:NeutronScale): Started rhos6-node3 + * neutron-scale:1 (ocf:neutron:NeutronScale): Started rhos6-node2 + * neutron-scale:2 (ocf:neutron:NeutronScale): Started rhos6-node1 * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-api-clone [nova-api]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-conductor-clone [nova-conductor]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] - * ceilometer-central (systemd:openstack-ceilometer-central): Stopped + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * ceilometer-central (systemd:openstack-ceilometer-central): Started rhos6-node3 * Clone Set: ceilometer-collector-clone [ceilometer-collector]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-clone [heat-api]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: - * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] - * heat-engine (systemd:openstack-heat-engine): Stopped + * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * heat-engine (systemd:openstack-heat-engine): Started rhos6-node2 * Clone Set: horizon-clone [horizon]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] diff --git a/cts/scheduler/summary/concurrent-fencing.summary b/cts/scheduler/summary/concurrent-fencing.summary index 18cbcfdb7fb..598ac84c340 100644 --- a/cts/scheduler/summary/concurrent-fencing.summary +++ b/cts/scheduler/summary/concurrent-fencing.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) * Node node2: UNCLEAN (offline) @@ -14,13 +16,14 @@ Transition Summary: * Fence (reboot) node1 'peer is no longer part of the cluster' Executing Cluster Transition: - * Fencing node3 (reboot) - * Fencing node1 (reboot) - * Fencing node2 (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ node1 node2 node3 ] + * Node node1: UNCLEAN (offline) + * Node node2: UNCLEAN (offline) + * Node node3: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/container-1.summary b/cts/scheduler/summary/container-1.summary index 366ebff1e54..871dd41f7ac 100644 --- a/cts/scheduler/summary/container-1.summary +++ b/cts/scheduler/summary/container-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,20 +15,14 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: container1 monitor on node2 - * Resource action: container1 monitor on node1 - * Resource action: container1 start on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: container1 monitor=20000 on node1 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * container1 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * container1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/container-2.summary b/cts/scheduler/summary/container-2.summary index 29c69a47984..61222a5eb79 100644 --- a/cts/scheduler/summary/container-2.summary +++ b/cts/scheduler/summary/container-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,24 +12,17 @@ Current cluster status: Transition Summary: * Restart container1 ( node1 ) * Recover rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required container1 start + * Restart rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc2 stop on node1 - * Resource action: container1 stop on node1 - * Resource action: container1 start on node1 - * Resource action: container1 monitor=20000 on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc2 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * container1 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED node1 * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/container-3.summary b/cts/scheduler/summary/container-3.summary index f579f1f694a..5024e65a938 100644 --- a/cts/scheduler/summary/container-3.summary +++ b/cts/scheduler/summary/container-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,23 +12,17 @@ Current cluster status: Transition Summary: * Restart container1 ( node1 ) * Start rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required container1 start + * Restart rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc2 stop on node1 - * Resource action: container1 stop on node1 - * Resource action: container1 start on node1 - * Resource action: container1 monitor=20000 on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc2 monitor=5000 on node1 - * Resource action: rsc1 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * container1 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 (failure ignored) + * rsc1 (ocf:pacemaker:Dummy): FAILED (failure ignored) * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/container-4.summary b/cts/scheduler/summary/container-4.summary index a60393e92ba..61e6119a4d1 100644 --- a/cts/scheduler/summary/container-4.summary +++ b/cts/scheduler/summary/container-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,21 +15,14 @@ Transition Summary: * Move rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc2 stop on node1 - * Resource action: container1 stop on node1 - * Resource action: container1 start on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: container1 monitor=20000 on node2 - * Resource action: rsc1 monitor=10000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * container1 (ocf:pacemaker:Dummy): Started node2 - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * container1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED node1 + * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/container-group-1.summary b/cts/scheduler/summary/container-group-1.summary index 955f8658ec5..b64e92548a7 100644 --- a/cts/scheduler/summary/container-group-1.summary +++ b/cts/scheduler/summary/container-group-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,23 +16,15 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Pseudo action: container-group_start_0 - * Resource action: container1 monitor on node2 - * Resource action: container1 monitor on node1 - * Resource action: container1 start on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Pseudo action: container-group_running_0 - * Resource action: container1 monitor=20000 on node1 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: container-group: - * container1 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * container1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/container-group-2.summary b/cts/scheduler/summary/container-group-2.summary index a3af18c81f3..4cff9abe6ad 100644 --- a/cts/scheduler/summary/container-group-2.summary +++ b/cts/scheduler/summary/container-group-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,29 +13,18 @@ Current cluster status: Transition Summary: * Restart container1 ( node1 ) * Recover rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required rsc1 start + * Restart rsc2 ( node1 ) Executing Cluster Transition: - * Pseudo action: container-group_stop_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc1 stop on node1 - * Resource action: container1 stop on node1 - * Pseudo action: container-group_stopped_0 - * Pseudo action: container-group_start_0 - * Resource action: container1 start on node1 - * Resource action: container1 monitor=20000 on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc2 monitor=5000 on node1 - * Pseudo action: container-group_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: container-group: * container1 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED node1 * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/container-group-3.summary b/cts/scheduler/summary/container-group-3.summary index 0859a2335d6..c0336277d2e 100644 --- a/cts/scheduler/summary/container-group-3.summary +++ b/cts/scheduler/summary/container-group-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,24 +16,15 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Pseudo action: container-group_stop_0 - * Resource action: container1 stop on node1 - * Pseudo action: container-group_stopped_0 - * Pseudo action: container-group_start_0 - * Resource action: container1 start on node1 - * Resource action: container1 monitor=20000 on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Pseudo action: container-group_running_0 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: container-group: * container1 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 (failure ignored) - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED (failure ignored) + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/container-group-4.summary b/cts/scheduler/summary/container-group-4.summary index 4ad9f2ec1a4..7c57efb92f0 100644 --- a/cts/scheduler/summary/container-group-4.summary +++ b/cts/scheduler/summary/container-group-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,26 +16,15 @@ Transition Summary: * Move rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: container-group_stop_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc1 stop on node1 - * Resource action: container1 stop on node1 - * Pseudo action: container-group_stopped_0 - * Pseudo action: container-group_start_0 - * Resource action: container1 start on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: container-group_running_0 - * Resource action: container1 monitor=20000 on node2 - * Resource action: rsc1 monitor=10000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: container-group: - * container1 (ocf:pacemaker:Dummy): Started node2 - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * container1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED node1 + * rsc2 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/container-is-remote-node.summary b/cts/scheduler/summary/container-is-remote-node.summary index c022e896f44..bd2d8e0aca9 100644 --- a/cts/scheduler/summary/container-is-remote-node.summary +++ b/cts/scheduler/summary/container-is-remote-node.summary @@ -1,6 +1,9 @@ 3 of 19 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ lama2 lama3 ] * GuestOnline: [ RNVM1 ] @@ -28,12 +31,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: dlm monitor on RNVM1 - * Resource action: clvmd monitor on RNVM1 - * Resource action: gfs2-lv_1_1 monitor on RNVM1 - * Resource action: gfs2-lv_1_2 monitor on RNVM1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ lama2 lama3 ] * GuestOnline: [ RNVM1 ] diff --git a/cts/scheduler/summary/date-1.summary b/cts/scheduler/summary/date-1.summary index 794b3c667bd..62a706cdc0e 100644 --- a/cts/scheduler/summary/date-1.summary +++ b/cts/scheduler/summary/date-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/date-2.summary b/cts/scheduler/summary/date-2.summary index 3f99a1898bc..c82bf2475be 100644 --- a/cts/scheduler/summary/date-2.summary +++ b/cts/scheduler/summary/date-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * OFFLINE: [ router1 router2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * OFFLINE: [ router1 router2 ] diff --git a/cts/scheduler/summary/date-3.summary b/cts/scheduler/summary/date-3.summary index 3f99a1898bc..c82bf2475be 100644 --- a/cts/scheduler/summary/date-3.summary +++ b/cts/scheduler/summary/date-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * OFFLINE: [ router1 router2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * OFFLINE: [ router1 router2 ] diff --git a/cts/scheduler/summary/dc-fence-ordering.summary b/cts/scheduler/summary/dc-fence-ordering.summary index 0261cad5978..0a1efc8bd91 100644 --- a/cts/scheduler/summary/dc-fence-ordering.summary +++ b/cts/scheduler/summary/dc-fence-ordering.summary @@ -1,5 +1,8 @@ Using the original execution date of: 2018-11-28 18:37:16Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node rhel7-1: UNCLEAN (online) * Online: [ rhel7-2 rhel7-4 rhel7-5 ] @@ -37,29 +40,16 @@ Transition Summary: * Stop petulant ( rhel7-1 ) due to node availability Executing Cluster Transition: - * Fencing rhel7-1 (reboot) - * Pseudo action: group-1_stop_0 - * Pseudo action: petulant_stop_0 - * Pseudo action: r192.168.122.207_stop_0 - * Pseudo action: group-1_stopped_0 - * Pseudo action: promotable-1_demote_0 - * Pseudo action: stateful-1_demote_0 - * Pseudo action: promotable-1_demoted_0 - * Pseudo action: promotable-1_stop_0 - * Resource action: stateful-1 stop on rhel7-5 - * Pseudo action: stateful-1_stop_0 - * Resource action: stateful-1 stop on rhel7-2 - * Resource action: stateful-1 stop on rhel7-4 - * Pseudo action: promotable-1_stopped_0 - * Cluster action: do_shutdown on rhel7-5 - * Cluster action: do_shutdown on rhel7-4 - * Cluster action: do_shutdown on rhel7-2 Using the original execution date of: 2018-11-28 18:37:16Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node rhel7-1: UNCLEAN (online) * Online: [ rhel7-2 rhel7-4 rhel7-5 ] - * OFFLINE: [ rhel7-1 rhel7-3 ] + * OFFLINE: [ rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped @@ -74,9 +64,11 @@ Revised Cluster Status: * Clone Set: Connectivity [ping-1]: * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Clone Set: promotable-1 [stateful-1] (promotable): - * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-1 ] + * Unpromoted: [ rhel7-2 rhel7-4 rhel7-5 ] + * Stopped: [ rhel7-3 ] * Resource Group: group-1: - * r192.168.122.207 (ocf:heartbeat:IPaddr2): Stopped - * petulant (service:pacemaker-cts-dummyd@10): Stopped + * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-1 + * petulant (service:pacemaker-cts-dummyd@10): FAILED rhel7-1 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Stopped * lsb-dummy (lsb:LSBDummy): Stopped diff --git a/cts/scheduler/summary/enforce-colo1.summary b/cts/scheduler/summary/enforce-colo1.summary index 5bc9aa0c859..4cabaab4283 100644 --- a/cts/scheduler/summary/enforce-colo1.summary +++ b/cts/scheduler/summary/enforce-colo1.summary @@ -1,6 +1,8 @@ 3 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] @@ -19,21 +21,16 @@ Transition Summary: * Stop central ( rhel7-auto3 ) due to unrunnable keystone-clone running Executing Cluster Transition: - * Resource action: engine stop on rhel7-auto3 - * Resource action: central stop on rhel7-auto3 - * Pseudo action: keystone-clone_stop_0 - * Resource action: keystone stop on rhel7-auto2 - * Resource action: keystone stop on rhel7-auto3 - * Resource action: keystone stop on rhel7-auto1 - * Pseudo action: keystone-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto2 - * engine (ocf:heartbeat:Dummy): Stopped + * engine (ocf:heartbeat:Dummy): Started rhel7-auto3 * Clone Set: keystone-clone [keystone] (disabled): - * Stopped (disabled): [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * central (ocf:heartbeat:Dummy): Stopped + * Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] + * central (ocf:heartbeat:Dummy): Started rhel7-auto3 diff --git a/cts/scheduler/summary/expire-non-blocked-failure.summary b/cts/scheduler/summary/expire-non-blocked-failure.summary index 0ca6c540468..c1291578c36 100644 --- a/cts/scheduler/summary/expire-non-blocked-failure.summary +++ b/cts/scheduler/summary/expire-non-blocked-failure.summary @@ -1,6 +1,9 @@ 0 of 3 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,9 +15,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Cluster action: clear_failcount for rsc2 on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/expired-failed-probe-primitive.summary b/cts/scheduler/summary/expired-failed-probe-primitive.summary index ac0604e84ff..67ced9117b3 100644 --- a/cts/scheduler/summary/expired-failed-probe-primitive.summary +++ b/cts/scheduler/summary/expired-failed-probe-primitive.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -11,16 +13,14 @@ Transition Summary: * Start dummy-1 ( cluster02 ) Executing Cluster Transition: - * Resource action: dummy-1 monitor on cluster02 - * Resource action: dummy-1 monitor on cluster01 - * Resource action: dummy-2 monitor on cluster01 - * Resource action: dummy-1 start on cluster02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started cluster01 - * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 + * dummy-1 (ocf:pacemaker:Dummy): Stopped * dummy-2 (ocf:pacemaker:Dummy): Started cluster02 diff --git a/cts/scheduler/summary/expired-stop-1.summary b/cts/scheduler/summary/expired-stop-1.summary index 9e94257ed43..c1ebbc97f03 100644 --- a/cts/scheduler/summary/expired-stop-1.summary +++ b/cts/scheduler/summary/expired-stop-1.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -11,12 +13,12 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Cluster action: clear_failcount for rsc1 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) + * rsc1 (ocf:pacemaker:Dummy): Started node2 (disabled) diff --git a/cts/scheduler/summary/failcount-block.summary b/cts/scheduler/summary/failcount-block.summary index 646f76b400b..5cc39429619 100644 --- a/cts/scheduler/summary/failcount-block.summary +++ b/cts/scheduler/summary/failcount-block.summary @@ -1,6 +1,9 @@ 0 of 5 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ pcmk-1 ] * OFFLINE: [ pcmk-4 ] @@ -17,16 +20,11 @@ Transition Summary: * Start rsc_pcmk-4 ( pcmk-1 ) Executing Cluster Transition: - * Resource action: rsc_pcmk-1 monitor=5000 on pcmk-1 - * Cluster action: clear_failcount for rsc_pcmk-1 on pcmk-1 - * Resource action: rsc_pcmk-3 start on pcmk-1 - * Cluster action: clear_failcount for rsc_pcmk-3 on pcmk-1 - * Resource action: rsc_pcmk-4 start on pcmk-1 - * Cluster action: clear_failcount for rsc_pcmk-5 on pcmk-1 - * Resource action: rsc_pcmk-3 monitor=5000 on pcmk-1 - * Resource action: rsc_pcmk-4 monitor=5000 on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ pcmk-1 ] * OFFLINE: [ pcmk-4 ] @@ -34,6 +32,6 @@ Revised Cluster Status: * Full List of Resources: * rsc_pcmk-1 (ocf:heartbeat:IPaddr2): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr2): FAILED pcmk-1 (blocked) - * rsc_pcmk-3 (ocf:heartbeat:IPaddr2): Started pcmk-1 - * rsc_pcmk-4 (ocf:heartbeat:IPaddr2): Started pcmk-1 + * rsc_pcmk-3 (ocf:heartbeat:IPaddr2): Stopped + * rsc_pcmk-4 (ocf:heartbeat:IPaddr2): Stopped * rsc_pcmk-5 (ocf:heartbeat:IPaddr2): Started pcmk-1 diff --git a/cts/scheduler/summary/failcount.summary b/cts/scheduler/summary/failcount.summary index 02268c395e2..e3b7889797d 100644 --- a/cts/scheduler/summary/failcount.summary +++ b/cts/scheduler/summary/failcount.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dresproddns01 dresproddns02 ] @@ -29,12 +31,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Cluster action: clear_failcount for re-openfire-lsb on dresproddns01 - * Cluster action: clear_failcount for re-openfire-lsb on dresproddns02 - * Resource action: re-named-lsb:1 monitor=10000 on dresproddns01 - * Resource action: re-named-lsb:0 monitor=10000 on dresproddns02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dresproddns01 dresproddns02 ] diff --git a/cts/scheduler/summary/failed-demote-recovery-promoted.summary b/cts/scheduler/summary/failed-demote-recovery-promoted.summary index 2d11c460508..5f1040687e7 100644 --- a/cts/scheduler/summary/failed-demote-recovery-promoted.summary +++ b/cts/scheduler/summary/failed-demote-recovery-promoted.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-11-30 12:37:50Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] @@ -14,41 +16,11 @@ Transition Summary: * Recover DB2_HADR:1 ( Unpromoted -> Promoted fastvm-rhel-7-4-96 ) Executing Cluster Transition: - * Pseudo action: DB2_HADR-master_pre_notify_stop_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_confirmed-pre_notify_stop_0 - * Pseudo action: DB2_HADR-master_stop_0 - * Resource action: DB2_HADR stop on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_stopped_0 - * Pseudo action: DB2_HADR-master_post_notify_stopped_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Pseudo action: DB2_HADR-master_confirmed-post_notify_stopped_0 - * Pseudo action: DB2_HADR-master_pre_notify_start_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Pseudo action: DB2_HADR-master_confirmed-pre_notify_start_0 - * Pseudo action: DB2_HADR-master_start_0 - * Resource action: DB2_HADR start on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_running_0 - * Pseudo action: DB2_HADR-master_post_notify_running_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_confirmed-post_notify_running_0 - * Pseudo action: DB2_HADR-master_pre_notify_promote_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_confirmed-pre_notify_promote_0 - * Pseudo action: DB2_HADR-master_promote_0 - * Resource action: DB2_HADR promote on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_promoted_0 - * Pseudo action: DB2_HADR-master_post_notify_promoted_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_confirmed-post_notify_promoted_0 - * Resource action: DB2_HADR monitor=22000 on fastvm-rhel-7-4-96 Using the original execution date of: 2017-11-30 12:37:50Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] @@ -56,5 +28,5 @@ Revised Cluster Status: * fence-fastvm-rhel-7-4-95 (stonith:fence_xvm): Started fastvm-rhel-7-4-96 * fence-fastvm-rhel-7-4-96 (stonith:fence_xvm): Started fastvm-rhel-7-4-95 * Clone Set: DB2_HADR-master [DB2_HADR] (promotable): - * Promoted: [ fastvm-rhel-7-4-96 ] + * DB2_HADR (ocf:heartbeat:db2): FAILED fastvm-rhel-7-4-96 * Unpromoted: [ fastvm-rhel-7-4-95 ] diff --git a/cts/scheduler/summary/failed-demote-recovery.summary b/cts/scheduler/summary/failed-demote-recovery.summary index 8c91259cd97..f5e75b47619 100644 --- a/cts/scheduler/summary/failed-demote-recovery.summary +++ b/cts/scheduler/summary/failed-demote-recovery.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-11-30 12:37:50Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] @@ -14,30 +16,11 @@ Transition Summary: * Recover DB2_HADR:1 ( Unpromoted fastvm-rhel-7-4-96 ) Executing Cluster Transition: - * Pseudo action: DB2_HADR-master_pre_notify_stop_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_confirmed-pre_notify_stop_0 - * Pseudo action: DB2_HADR-master_stop_0 - * Resource action: DB2_HADR stop on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_stopped_0 - * Pseudo action: DB2_HADR-master_post_notify_stopped_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Pseudo action: DB2_HADR-master_confirmed-post_notify_stopped_0 - * Pseudo action: DB2_HADR-master_pre_notify_start_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Pseudo action: DB2_HADR-master_confirmed-pre_notify_start_0 - * Pseudo action: DB2_HADR-master_start_0 - * Resource action: DB2_HADR start on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_running_0 - * Pseudo action: DB2_HADR-master_post_notify_running_0 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 - * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 - * Pseudo action: DB2_HADR-master_confirmed-post_notify_running_0 - * Resource action: DB2_HADR monitor=5000 on fastvm-rhel-7-4-96 Using the original execution date of: 2017-11-30 12:37:50Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] @@ -45,4 +28,5 @@ Revised Cluster Status: * fence-fastvm-rhel-7-4-95 (stonith:fence_xvm): Started fastvm-rhel-7-4-96 * fence-fastvm-rhel-7-4-96 (stonith:fence_xvm): Started fastvm-rhel-7-4-95 * Clone Set: DB2_HADR-master [DB2_HADR] (promotable): - * Unpromoted: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] + * DB2_HADR (ocf:heartbeat:db2): FAILED fastvm-rhel-7-4-96 + * Unpromoted: [ fastvm-rhel-7-4-95 ] diff --git a/cts/scheduler/summary/failed-probe-clone.summary b/cts/scheduler/summary/failed-probe-clone.summary index febee144000..67e52f2442a 100644 --- a/cts/scheduler/summary/failed-probe-clone.summary +++ b/cts/scheduler/summary/failed-probe-clone.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -18,21 +20,10 @@ Transition Summary: * Stop ping-3:0 ( cluster01 ) due to node availability Executing Cluster Transition: - * Cluster action: clear_failcount for ping-1 on cluster02 - * Cluster action: clear_failcount for ping-1 on cluster01 - * Cluster action: clear_failcount for ping-2 on cluster02 - * Cluster action: clear_failcount for ping-2 on cluster01 - * Pseudo action: ping-2-clone_start_0 - * Cluster action: clear_failcount for ping-3 on cluster01 - * Cluster action: clear_failcount for ping-3 on cluster02 - * Pseudo action: ping-3-clone_stop_0 - * Resource action: ping-2 start on cluster02 - * Pseudo action: ping-2-clone_running_0 - * Resource action: ping-3 stop on cluster01 - * Pseudo action: ping-3-clone_stopped_0 - * Resource action: ping-2 monitor=10000 on cluster02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -41,8 +32,8 @@ Revised Cluster Status: * Clone Set: ping-1-clone [ping-1]: * Stopped (not installed): [ cluster01 cluster02 ] * Clone Set: ping-2-clone [ping-2]: - * Started: [ cluster02 ] + * Stopped: [ cluster02 ] * Stopped (not installed): [ cluster01 ] * Clone Set: ping-3-clone [ping-3]: - * Stopped: [ cluster01 ] + * ping-3 (ocf:pacemaker:ping): FAILED cluster01 * Stopped (not installed): [ cluster02 ] diff --git a/cts/scheduler/summary/failed-probe-primitive.summary b/cts/scheduler/summary/failed-probe-primitive.summary index ea8edae4944..946f64325b6 100644 --- a/cts/scheduler/summary/failed-probe-primitive.summary +++ b/cts/scheduler/summary/failed-probe-primitive.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -13,15 +15,15 @@ Transition Summary: * Stop dummy-3 ( cluster01 ) due to node availability Executing Cluster Transition: - * Resource action: dummy-2 start on cluster02 - * Resource action: dummy-3 stop on cluster01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started cluster01 * dummy-1 (ocf:pacemaker:Dummy): Stopped (not installed) - * dummy-2 (ocf:pacemaker:Dummy): Started cluster02 - * dummy-3 (ocf:pacemaker:Dummy): Stopped (not installed) + * dummy-2 (ocf:pacemaker:Dummy): Stopped (not installed) + * dummy-3 (ocf:pacemaker:Dummy): FAILED cluster01 diff --git a/cts/scheduler/summary/failed-sticky-anticolocated-group.summary b/cts/scheduler/summary/failed-sticky-anticolocated-group.summary index 3ecb0560294..90d2ea7f84d 100644 --- a/cts/scheduler/summary/failed-sticky-anticolocated-group.summary +++ b/cts/scheduler/summary/failed-sticky-anticolocated-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -16,18 +18,10 @@ Transition Summary: * Recover member2b ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: group2_stop_0 - * Resource action: member2b stop on node1 - * Resource action: member2a stop on node1 - * Pseudo action: group2_stopped_0 - * Pseudo action: group2_start_0 - * Resource action: member2a start on node2 - * Resource action: member2b start on node2 - * Pseudo action: group2_running_0 - * Resource action: member2a monitor=10000 on node2 - * Resource action: member2b monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -37,5 +31,5 @@ Revised Cluster Status: * member1a (ocf:pacemaker:Dummy): Started node2 * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * member2a (ocf:pacemaker:Dummy): Started node2 - * member2b (ocf:pacemaker:Dummy): Started node2 + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/failed-sticky-group.summary b/cts/scheduler/summary/failed-sticky-group.summary index 2114be7cec1..49ce083d5ba 100644 --- a/cts/scheduler/summary/failed-sticky-group.summary +++ b/cts/scheduler/summary/failed-sticky-group.summary @@ -1,15 +1,11 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ act1 act2 act3 sby1 sby2 ] * Full List of Resources: - * Resource Group: grpPostgreSQLDB1: - * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 - * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act1 - * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act1 - * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act1 - * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 - * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): FAILED act1 * Resource Group: grpPostgreSQLDB2: * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 @@ -24,6 +20,13 @@ Current cluster status: * prmFsPostgreSQLDB3-3 (ocf:pacemaker:Dummy): Started act3 * prmIpPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 * prmApPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 + * Resource Group: grpPostgreSQLDB1: + * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act1 + * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): FAILED act1 Transition Summary: * Move prmExPostgreSQLDB1 ( act1 -> sby1 ) @@ -34,46 +37,15 @@ Transition Summary: * Recover prmApPostgreSQLDB1 ( act1 -> sby1 ) Executing Cluster Transition: - * Pseudo action: grpPostgreSQLDB1_stop_0 - * Resource action: prmApPostgreSQLDB1 stop on act1 - * Pseudo action: load_stopped_sby2 - * Pseudo action: load_stopped_sby1 - * Pseudo action: load_stopped_act3 - * Pseudo action: load_stopped_act2 - * Resource action: prmIpPostgreSQLDB1 stop on act1 - * Resource action: prmFsPostgreSQLDB1-3 stop on act1 - * Resource action: prmFsPostgreSQLDB1-2 stop on act1 - * Resource action: prmFsPostgreSQLDB1-1 stop on act1 - * Resource action: prmExPostgreSQLDB1 stop on act1 - * Pseudo action: load_stopped_act1 - * Pseudo action: grpPostgreSQLDB1_stopped_0 - * Pseudo action: grpPostgreSQLDB1_start_0 - * Resource action: prmExPostgreSQLDB1 start on sby1 - * Resource action: prmFsPostgreSQLDB1-1 start on sby1 - * Resource action: prmFsPostgreSQLDB1-2 start on sby1 - * Resource action: prmFsPostgreSQLDB1-3 start on sby1 - * Resource action: prmIpPostgreSQLDB1 start on sby1 - * Resource action: prmApPostgreSQLDB1 start on sby1 - * Pseudo action: grpPostgreSQLDB1_running_0 - * Resource action: prmExPostgreSQLDB1 monitor=5000 on sby1 - * Resource action: prmFsPostgreSQLDB1-1 monitor=5000 on sby1 - * Resource action: prmFsPostgreSQLDB1-2 monitor=5000 on sby1 - * Resource action: prmFsPostgreSQLDB1-3 monitor=5000 on sby1 - * Resource action: prmIpPostgreSQLDB1 monitor=5000 on sby1 - * Resource action: prmApPostgreSQLDB1 monitor=5000 on sby1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ act1 act2 act3 sby1 sby2 ] * Full List of Resources: - * Resource Group: grpPostgreSQLDB1: - * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started sby1 - * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started sby1 - * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started sby1 - * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started sby1 - * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started sby1 - * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Started sby1 * Resource Group: grpPostgreSQLDB2: * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 @@ -88,3 +60,10 @@ Revised Cluster Status: * prmFsPostgreSQLDB3-3 (ocf:pacemaker:Dummy): Started act3 * prmIpPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 * prmApPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act3 + * Resource Group: grpPostgreSQLDB1: + * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act1 + * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act1 + * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act1 + * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): FAILED act1 diff --git a/cts/scheduler/summary/force-anon-clone-max.summary b/cts/scheduler/summary/force-anon-clone-max.summary index d2320e9c571..cc29cba7634 100644 --- a/cts/scheduler/summary/force-anon-clone-max.summary +++ b/cts/scheduler/summary/force-anon-clone-max.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] @@ -26,49 +29,19 @@ Transition Summary: * Start lsb3:1 ( node2 ) Executing Cluster Transition: - * Resource action: Fencing start on node1 - * Pseudo action: clone1_start_0 - * Pseudo action: clone2_start_0 - * Pseudo action: clone3_start_0 - * Resource action: lsb1:0 start on node2 - * Resource action: lsb1:1 start on node3 - * Pseudo action: clone1_running_0 - * Resource action: lsb2:0 start on node1 - * Resource action: lsb2:1 start on node2 - * Resource action: lsb2:2 start on node3 - * Pseudo action: clone2_running_0 - * Pseudo action: group1:0_start_0 - * Resource action: dummy1:0 start on node1 - * Resource action: dummy2:0 start on node1 - * Resource action: lsb3:0 start on node1 - * Pseudo action: group1:1_start_0 - * Resource action: dummy1:1 start on node2 - * Resource action: dummy2:1 start on node2 - * Resource action: lsb3:1 start on node2 - * Resource action: lsb1:0 monitor=5000 on node2 - * Resource action: lsb1:1 monitor=5000 on node3 - * Resource action: lsb2:0 monitor=5000 on node1 - * Resource action: lsb2:1 monitor=5000 on node2 - * Resource action: lsb2:2 monitor=5000 on node3 - * Pseudo action: group1:0_running_0 - * Resource action: dummy1:0 monitor=5000 on node1 - * Resource action: dummy2:0 monitor=5000 on node1 - * Resource action: lsb3:0 monitor=5000 on node1 - * Pseudo action: group1:1_running_0 - * Resource action: dummy1:1 monitor=5000 on node2 - * Resource action: dummy2:1 monitor=5000 on node2 - * Resource action: lsb3:1 monitor=5000 on node2 - * Pseudo action: clone3_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * Fencing (stonith:fence_imaginary): Started node1 + * Fencing (stonith:fence_imaginary): Stopped * Clone Set: clone1 [lsb1]: - * Started: [ node2 node3 ] + * Stopped: [ node1 node2 node3 ] * Clone Set: clone2 [lsb2]: - * Started: [ node1 node2 node3 ] + * Stopped: [ node1 node2 node3 ] * Clone Set: clone3 [group1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/group-anticolocation-2.summary b/cts/scheduler/summary/group-anticolocation-2.summary index 3ecb0560294..90d2ea7f84d 100644 --- a/cts/scheduler/summary/group-anticolocation-2.summary +++ b/cts/scheduler/summary/group-anticolocation-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -16,18 +18,10 @@ Transition Summary: * Recover member2b ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: group2_stop_0 - * Resource action: member2b stop on node1 - * Resource action: member2a stop on node1 - * Pseudo action: group2_stopped_0 - * Pseudo action: group2_start_0 - * Resource action: member2a start on node2 - * Resource action: member2b start on node2 - * Pseudo action: group2_running_0 - * Resource action: member2a monitor=10000 on node2 - * Resource action: member2b monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -37,5 +31,5 @@ Revised Cluster Status: * member1a (ocf:pacemaker:Dummy): Started node2 * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * member2a (ocf:pacemaker:Dummy): Started node2 - * member2b (ocf:pacemaker:Dummy): Started node2 + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/group-anticolocation-3.summary b/cts/scheduler/summary/group-anticolocation-3.summary index c9d4321330e..35f438f2f58 100644 --- a/cts/scheduler/summary/group-anticolocation-3.summary +++ b/cts/scheduler/summary/group-anticolocation-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,13 +15,14 @@ Current cluster status: Transition Summary: * Stop member2b ( node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group2_stop_0 - * Resource action: member2b stop on node1 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -30,4 +33,4 @@ Revised Cluster Status: * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * member2a (ocf:pacemaker:Dummy): Started node1 - * member2b (ocf:pacemaker:Dummy): Stopped + * member2b (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/group-anticolocation-4.summary b/cts/scheduler/summary/group-anticolocation-4.summary index 3ecb0560294..90d2ea7f84d 100644 --- a/cts/scheduler/summary/group-anticolocation-4.summary +++ b/cts/scheduler/summary/group-anticolocation-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -16,18 +18,10 @@ Transition Summary: * Recover member2b ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: group2_stop_0 - * Resource action: member2b stop on node1 - * Resource action: member2a stop on node1 - * Pseudo action: group2_stopped_0 - * Pseudo action: group2_start_0 - * Resource action: member2a start on node2 - * Resource action: member2b start on node2 - * Pseudo action: group2_running_0 - * Resource action: member2a monitor=10000 on node2 - * Resource action: member2b monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -37,5 +31,5 @@ Revised Cluster Status: * member1a (ocf:pacemaker:Dummy): Started node2 * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * member2a (ocf:pacemaker:Dummy): Started node2 - * member2b (ocf:pacemaker:Dummy): Started node2 + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/group-anticolocation-5.summary b/cts/scheduler/summary/group-anticolocation-5.summary index 6f83538dc4f..9965c959b82 100644 --- a/cts/scheduler/summary/group-anticolocation-5.summary +++ b/cts/scheduler/summary/group-anticolocation-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -16,18 +18,10 @@ Transition Summary: * Recover member2b ( node1 -> node3 ) Executing Cluster Transition: - * Pseudo action: group2_stop_0 - * Resource action: member2b stop on node1 - * Resource action: member2a stop on node1 - * Pseudo action: group2_stopped_0 - * Pseudo action: group2_start_0 - * Resource action: member2a start on node3 - * Resource action: member2b start on node3 - * Pseudo action: group2_running_0 - * Resource action: member2a monitor=10000 on node3 - * Resource action: member2b monitor=10000 on node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -37,5 +31,5 @@ Revised Cluster Status: * member1a (ocf:pacemaker:Dummy): Started node2 * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * member2a (ocf:pacemaker:Dummy): Started node3 - * member2b (ocf:pacemaker:Dummy): Started node3 + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/group-anticolocation.summary b/cts/scheduler/summary/group-anticolocation.summary index 93d2e73c31e..33185835676 100644 --- a/cts/scheduler/summary/group-anticolocation.summary +++ b/cts/scheduler/summary/group-anticolocation.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -18,36 +20,18 @@ Transition Summary: * Recover member2b ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: member1b stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: member2b stop on node1 - * Resource action: member1a stop on node2 - * Resource action: member2a stop on node1 - * Pseudo action: group1_stopped_0 - * Pseudo action: group1_start_0 - * Resource action: member1a start on node1 - * Resource action: member1b start on node1 - * Pseudo action: group2_stopped_0 - * Pseudo action: group2_start_0 - * Resource action: member2a start on node2 - * Resource action: member2b start on node2 - * Pseudo action: group1_running_0 - * Resource action: member1a monitor=10000 on node1 - * Resource action: member1b monitor=10000 on node1 - * Pseudo action: group2_running_0 - * Resource action: member2a monitor=10000 on node2 - * Resource action: member2b monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Resource Group: group1: - * member1a (ocf:pacemaker:Dummy): Started node1 - * member1b (ocf:pacemaker:Dummy): Started node1 + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * member2a (ocf:pacemaker:Dummy): Started node2 - * member2b (ocf:pacemaker:Dummy): Started node2 + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/group-colocation-failure.summary b/cts/scheduler/summary/group-colocation-failure.summary index fed71c80b68..ce54e16b176 100644 --- a/cts/scheduler/summary/group-colocation-failure.summary +++ b/cts/scheduler/summary/group-colocation-failure.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -16,32 +18,17 @@ Transition Summary: * Recover member2a ( node2 -> node1 ) Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: member1b stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: member2a stop on node2 - * Resource action: member1a stop on node2 - * Pseudo action: group2_stopped_0 - * Pseudo action: group2_start_0 - * Resource action: member2a start on node1 - * Pseudo action: group1_stopped_0 - * Pseudo action: group1_start_0 - * Resource action: member1a start on node1 - * Resource action: member1b start on node1 - * Pseudo action: group2_running_0 - * Resource action: member2a monitor=10000 on node1 - * Pseudo action: group1_running_0 - * Resource action: member1a monitor=10000 on node1 - * Resource action: member1b monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Resource Group: group1: - * member1a (ocf:pacemaker:Dummy): Started node1 - * member1b (ocf:pacemaker:Dummy): Started node1 + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * member2a (ocf:pacemaker:Dummy): Started node1 + * member2a (ocf:pacemaker:Dummy): FAILED node2 diff --git a/cts/scheduler/summary/group-dependents.summary b/cts/scheduler/summary/group-dependents.summary index 33652555477..9d64f09bea5 100644 --- a/cts/scheduler/summary/group-dependents.summary +++ b/cts/scheduler/summary/group-dependents.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ asttest1 asttest2 ] @@ -53,144 +56,38 @@ Transition Summary: * Promote drbd:1 ( Unpromoted -> Promoted asttest2 ) Executing Cluster Transition: - * Pseudo action: voip_stop_0 - * Resource action: mysqld migrate_to on asttest1 - * Resource action: ip_voip_route_test1 migrate_to on asttest1 - * Resource action: ip_voip_route_test2 migrate_to on asttest1 - * Resource action: ip_voip_vlan850 migrate_to on asttest1 - * Resource action: ip_voip_vlan998 migrate_to on asttest1 - * Resource action: ip_voip_vlan851 migrate_to on asttest1 - * Resource action: ip_voip_vlan852 migrate_to on asttest1 - * Resource action: ip_voip_vlan853 migrate_to on asttest1 - * Resource action: ip_voip_vlan854 migrate_to on asttest1 - * Resource action: ip_voip_vlan855 migrate_to on asttest1 - * Resource action: ip_voip_vlan856 migrate_to on asttest1 - * Resource action: drbd:1 cancel=31000 on asttest2 - * Pseudo action: ms_drbd_pre_notify_demote_0 - * Resource action: mysqld migrate_from on asttest2 - * Resource action: dahdi migrate_to on asttest1 - * Resource action: ip_voip_route_test1 migrate_from on asttest2 - * Resource action: ip_voip_route_test2 migrate_from on asttest2 - * Resource action: ip_voip_vlan850 migrate_from on asttest2 - * Resource action: ip_voip_vlan998 migrate_from on asttest2 - * Resource action: ip_voip_vlan851 migrate_from on asttest2 - * Resource action: ip_voip_vlan852 migrate_from on asttest2 - * Resource action: ip_voip_vlan853 migrate_from on asttest2 - * Resource action: ip_voip_vlan854 migrate_from on asttest2 - * Resource action: ip_voip_vlan855 migrate_from on asttest2 - * Resource action: ip_voip_vlan856 migrate_from on asttest2 - * Resource action: drbd:0 notify on asttest1 - * Resource action: drbd:1 notify on asttest2 - * Pseudo action: ms_drbd_confirmed-pre_notify_demote_0 - * Resource action: dahdi migrate_from on asttest2 - * Resource action: dahdi stop on asttest1 - * Resource action: mysqld stop on asttest1 - * Pseudo action: voip_stopped_0 - * Pseudo action: ip_voip_routes_stop_0 - * Resource action: ip_voip_route_test1 stop on asttest1 - * Resource action: ip_voip_route_test2 stop on asttest1 - * Pseudo action: ip_voip_routes_stopped_0 - * Pseudo action: ip_voip_addresses_p_stop_0 - * Resource action: ip_voip_vlan850 stop on asttest1 - * Resource action: ip_voip_vlan998 stop on asttest1 - * Resource action: ip_voip_vlan851 stop on asttest1 - * Resource action: ip_voip_vlan852 stop on asttest1 - * Resource action: ip_voip_vlan853 stop on asttest1 - * Resource action: ip_voip_vlan854 stop on asttest1 - * Resource action: ip_voip_vlan855 stop on asttest1 - * Resource action: ip_voip_vlan856 stop on asttest1 - * Pseudo action: ip_voip_addresses_p_stopped_0 - * Resource action: fs_drbd stop on asttest1 - * Pseudo action: ms_drbd_demote_0 - * Resource action: drbd:0 demote on asttest1 - * Pseudo action: ms_drbd_demoted_0 - * Pseudo action: ms_drbd_post_notify_demoted_0 - * Resource action: drbd:0 notify on asttest1 - * Resource action: drbd:1 notify on asttest2 - * Pseudo action: ms_drbd_confirmed-post_notify_demoted_0 - * Pseudo action: ms_drbd_pre_notify_promote_0 - * Resource action: drbd:0 notify on asttest1 - * Resource action: drbd:1 notify on asttest2 - * Pseudo action: ms_drbd_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd_promote_0 - * Resource action: drbd:1 promote on asttest2 - * Pseudo action: ms_drbd_promoted_0 - * Pseudo action: ms_drbd_post_notify_promoted_0 - * Resource action: drbd:0 notify on asttest1 - * Resource action: drbd:1 notify on asttest2 - * Pseudo action: ms_drbd_confirmed-post_notify_promoted_0 - * Resource action: fs_drbd start on asttest2 - * Resource action: drbd:0 monitor=31000 on asttest1 - * Pseudo action: ip_voip_addresses_p_start_0 - * Pseudo action: ip_voip_vlan850_start_0 - * Pseudo action: ip_voip_vlan998_start_0 - * Pseudo action: ip_voip_vlan851_start_0 - * Pseudo action: ip_voip_vlan852_start_0 - * Pseudo action: ip_voip_vlan853_start_0 - * Pseudo action: ip_voip_vlan854_start_0 - * Pseudo action: ip_voip_vlan855_start_0 - * Pseudo action: ip_voip_vlan856_start_0 - * Resource action: fs_drbd monitor=1000 on asttest2 - * Pseudo action: ip_voip_addresses_p_running_0 - * Resource action: ip_voip_vlan850 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan998 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan851 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan852 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan853 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan854 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan855 monitor=1000 on asttest2 - * Resource action: ip_voip_vlan856 monitor=1000 on asttest2 - * Pseudo action: ip_voip_routes_start_0 - * Pseudo action: ip_voip_route_test1_start_0 - * Pseudo action: ip_voip_route_test2_start_0 - * Pseudo action: ip_voip_routes_running_0 - * Resource action: ip_voip_route_test1 monitor=1000 on asttest2 - * Resource action: ip_voip_route_test2 monitor=1000 on asttest2 - * Pseudo action: voip_start_0 - * Pseudo action: mysqld_start_0 - * Pseudo action: dahdi_start_0 - * Resource action: fonulator start on asttest2 - * Resource action: asterisk start on asttest2 - * Resource action: iax2_mon start on asttest2 - * Resource action: httpd start on asttest2 - * Resource action: tftp start on asttest2 - * Pseudo action: voip_running_0 - * Resource action: mysqld monitor=1000 on asttest2 - * Resource action: dahdi monitor=1000 on asttest2 - * Resource action: fonulator monitor=1000 on asttest2 - * Resource action: asterisk monitor=1000 on asttest2 - * Resource action: iax2_mon monitor=60000 on asttest2 - * Resource action: httpd monitor=1000 on asttest2 - * Resource action: tftp monitor=60000 on asttest2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ asttest1 asttest2 ] * Full List of Resources: * Resource Group: voip: - * mysqld (lsb:mysql): Started asttest2 - * dahdi (lsb:dahdi): Started asttest2 - * fonulator (lsb:fonulator): Started asttest2 - * asterisk (lsb:asterisk-11.0.1): Started asttest2 - * iax2_mon (lsb:iax2_mon): Started asttest2 - * httpd (lsb:apache2): Started asttest2 - * tftp (lsb:tftp-srce): Started asttest2 + * mysqld (lsb:mysql): Started asttest1 + * dahdi (lsb:dahdi): Started asttest1 + * fonulator (lsb:fonulator): Stopped + * asterisk (lsb:asterisk-11.0.1): Stopped + * iax2_mon (lsb:iax2_mon): Stopped + * httpd (lsb:apache2): Stopped + * tftp (lsb:tftp-srce): Stopped * Resource Group: ip_voip_routes: - * ip_voip_route_test1 (ocf:heartbeat:Route): Started asttest2 - * ip_voip_route_test2 (ocf:heartbeat:Route): Started asttest2 + * ip_voip_route_test1 (ocf:heartbeat:Route): Started asttest1 + * ip_voip_route_test2 (ocf:heartbeat:Route): Started asttest1 * Resource Group: ip_voip_addresses_p: - * ip_voip_vlan850 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan998 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan851 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan852 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan853 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan854 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan855 (ocf:heartbeat:IPaddr2): Started asttest2 - * ip_voip_vlan856 (ocf:heartbeat:IPaddr2): Started asttest2 + * ip_voip_vlan850 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan998 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan851 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan852 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan853 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan854 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan855 (ocf:heartbeat:IPaddr2): Started asttest1 + * ip_voip_vlan856 (ocf:heartbeat:IPaddr2): Started asttest1 * Clone Set: cl_route [ip_voip_route_default]: * Started: [ asttest1 asttest2 ] - * fs_drbd (ocf:heartbeat:Filesystem): Started asttest2 + * fs_drbd (ocf:heartbeat:Filesystem): Started asttest1 * Clone Set: ms_drbd [drbd] (promotable): - * Promoted: [ asttest2 ] - * Unpromoted: [ asttest1 ] + * Promoted: [ asttest1 ] + * Unpromoted: [ asttest2 ] diff --git a/cts/scheduler/summary/group-fail.summary b/cts/scheduler/summary/group-fail.summary index ab29ea9ceb6..075c6038388 100644 --- a/cts/scheduler/summary/group-fail.summary +++ b/cts/scheduler/summary/group-fail.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,29 +13,21 @@ Current cluster status: Transition Summary: * Start rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required rsc1 start + * Restart rsc2 ( node1 ) * Start rsc3 ( node1 ) - * Restart rsc4 ( node1 ) due to required rsc3 start + * Restart rsc4 ( node1 ) Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc4 stop on node1 - * Resource action: rsc2 stop on node1 - * Pseudo action: group1_stopped_0 - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node1 - * Pseudo action: group1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: group1: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc3 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/group-stop-ordering.summary b/cts/scheduler/summary/group-stop-ordering.summary index 35b4cd1e72e..b6f1aaa9a70 100644 --- a/cts/scheduler/summary/group-stop-ordering.summary +++ b/cts/scheduler/summary/group-stop-ordering.summary @@ -1,6 +1,8 @@ 0 of 5 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fastvm-rhel-7-5-73 fastvm-rhel-7-5-74 ] @@ -17,6 +19,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fastvm-rhel-7-5-73 fastvm-rhel-7-5-74 ] diff --git a/cts/scheduler/summary/group-unmanaged-stopped.summary b/cts/scheduler/summary/group-unmanaged-stopped.summary index 5164f92b337..c1a9e28d482 100644 --- a/cts/scheduler/summary/group-unmanaged-stopped.summary +++ b/cts/scheduler/summary/group-unmanaged-stopped.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 ] @@ -10,13 +12,14 @@ Current cluster status: Transition Summary: * Stop r192.168.122.115 ( pcmk-1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group-1_stop_0 - * Resource action: r192.168.122.115 stop on pcmk-1 - * Pseudo action: group-1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 ] @@ -24,4 +27,4 @@ Revised Cluster Status: * Resource Group: group-1: * r192.168.122.113 (ocf:heartbeat:IPaddr2): Started pcmk-1 * r192.168.122.114 (ocf:heartbeat:IPaddr2): Stopped (unmanaged) - * r192.168.122.115 (ocf:heartbeat:IPaddr2): Stopped + * r192.168.122.115 (ocf:heartbeat:IPaddr2): Started pcmk-1 diff --git a/cts/scheduler/summary/group-unmanaged.summary b/cts/scheduler/summary/group-unmanaged.summary index 7eac1468b4e..68ef66ae39d 100644 --- a/cts/scheduler/summary/group-unmanaged.summary +++ b/cts/scheduler/summary/group-unmanaged.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 ] diff --git a/cts/scheduler/summary/group1.summary b/cts/scheduler/summary/group1.summary index ef9bd92f1f8..4b0e88c8937 100644 --- a/cts/scheduler/summary/group1.summary +++ b/cts/scheduler/summary/group1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,24 +16,15 @@ Transition Summary: * Start child_rsc3 ( node1 ) Executing Cluster Transition: - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1 monitor on node2 - * Resource action: child_rsc1 monitor on node1 - * Resource action: child_rsc2 monitor on node2 - * Resource action: child_rsc2 monitor on node1 - * Resource action: child_rsc3 monitor on node2 - * Resource action: child_rsc3 monitor on node1 - * Resource action: child_rsc1 start on node1 - * Resource action: child_rsc2 start on node1 - * Resource action: child_rsc3 start on node1 - * Pseudo action: rsc1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: rsc1: - * child_rsc1 (ocf:heartbeat:apache): Started node1 - * child_rsc2 (ocf:heartbeat:apache): Started node1 - * child_rsc3 (ocf:heartbeat:apache): Started node1 + * child_rsc1 (ocf:heartbeat:apache): Stopped + * child_rsc2 (ocf:heartbeat:apache): Stopped + * child_rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/group10.summary b/cts/scheduler/summary/group10.summary index 35890d1e47d..4726be780b5 100644 --- a/cts/scheduler/summary/group10.summary +++ b/cts/scheduler/summary/group10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -24,37 +26,17 @@ Transition Summary: * Restart child_192.168.100.183 ( c001n01 ) due to required child_192.168.100.182 start Executing Cluster Transition: - * Pseudo action: group-1_stop_0 - * Resource action: child_192.168.100.183 stop on c001n01 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n03 - * Resource action: child_DoFencing:1 monitor on c001n01 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n01 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:3 monitor on c001n01 - * Resource action: child_192.168.100.182 stop on c001n01 - * Resource action: child_192.168.100.181 stop on c001n01 - * Pseudo action: group-1_stopped_0 - * Pseudo action: group-1_start_0 - * Resource action: child_192.168.100.181 start on c001n01 - * Resource action: child_192.168.100.181 monitor=5000 on c001n01 - * Resource action: child_192.168.100.182 start on c001n01 - * Resource action: child_192.168.100.182 monitor=5000 on c001n01 - * Resource action: child_192.168.100.183 start on c001n01 - * Resource action: child_192.168.100.183 monitor=5000 on c001n01 - * Pseudo action: group-1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: - * child_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n01 + * child_192.168.100.181 (ocf:heartbeat:IPaddr): FAILED c001n01 * child_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n01 * child_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n01 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 diff --git a/cts/scheduler/summary/group11.summary b/cts/scheduler/summary/group11.summary index 4ba5c9da054..35d3fdc9694 100644 --- a/cts/scheduler/summary/group11.summary +++ b/cts/scheduler/summary/group11.summary @@ -1,6 +1,8 @@ 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -15,18 +17,15 @@ Transition Summary: * Stop rsc3 ( node1 ) due to node availability Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc3 stop on node1 - * Resource action: rsc2 stop on node1 - * Pseudo action: group1_stopped_0 - * Pseudo action: group1_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: * Resource Group: group1: * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Stopped (disabled) - * rsc3 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Started node1 (disabled) + * rsc3 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/group13.summary b/cts/scheduler/summary/group13.summary index 7f8fad1e468..b741f8dcecc 100644 --- a/cts/scheduler/summary/group13.summary +++ b/cts/scheduler/summary/group13.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ jamesltc ] @@ -12,16 +14,15 @@ Transition Summary: * Stop resource_nfs ( jamesltc ) due to node availability Executing Cluster Transition: - * Pseudo action: nfs_stop_0 - * Resource action: resource_nfs stop on jamesltc - * Pseudo action: nfs_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ jamesltc ] * Full List of Resources: * Resource Group: nfs: - * resource_nfs (lsb:nfs): Stopped + * resource_nfs (lsb:nfs): Started jamesltc * Resource Group: fs: * resource_fs (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/group14.summary b/cts/scheduler/summary/group14.summary index 80ded38d788..01325c7f0a3 100644 --- a/cts/scheduler/summary/group14.summary +++ b/cts/scheduler/summary/group14.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n06 c001n07 ] * OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ] @@ -54,18 +56,10 @@ Transition Summary: * Start ocf_msdummy:3 ( c001n07 ) due to no quorum (blocked) Executing Cluster Transition: - * Pseudo action: group-1_stop_0 - * Resource action: r192.168.100.181 stop on c001n06 - * Pseudo action: DoFencing_start_0 - * Pseudo action: group-1_stopped_0 - * Pseudo action: group-1_start_0 - * Resource action: child_DoFencing:0 start on c001n06 - * Resource action: child_DoFencing:1 start on c001n07 - * Pseudo action: DoFencing_running_0 - * Resource action: child_DoFencing:0 monitor=20000 on c001n06 - * Resource action: child_DoFencing:1 monitor=20000 on c001n07 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n06 c001n07 ] * OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ] @@ -73,7 +67,7 @@ Revised Cluster Status: * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: - * r192.168.100.181 (ocf:heartbeat:IPaddr): Stopped + * r192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n06 * r192.168.100.182 (ocf:heartbeat:IPaddr): Stopped * r192.168.100.183 (ocf:heartbeat:IPaddr): Stopped * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped @@ -85,8 +79,7 @@ Revised Cluster Status: * rsc_c001n06 (ocf:heartbeat:IPaddr): Stopped * rsc_c001n07 (ocf:heartbeat:IPaddr): Stopped * Clone Set: DoFencing [child_DoFencing]: - * Started: [ c001n06 c001n07 ] - * Stopped: [ c001n02 c001n03 c001n04 c001n05 ] + * Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): * ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped diff --git a/cts/scheduler/summary/group15.summary b/cts/scheduler/summary/group15.summary index 82a32ca0492..f6c92601f50 100644 --- a/cts/scheduler/summary/group15.summary +++ b/cts/scheduler/summary/group15.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -18,25 +20,10 @@ Transition Summary: * Start rsc8 ( node1 ) Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Pseudo action: bar_start_0 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc7 monitor on node2 - * Resource action: rsc7 monitor on node1 - * Resource action: rsc8 monitor on node2 - * Resource action: rsc8 monitor on node1 - * Resource action: rsc6 start on node1 - * Resource action: rsc7 start on node1 - * Resource action: rsc8 start on node1 - * Pseudo action: bar_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -46,6 +33,6 @@ Revised Cluster Status: * rsc4 (ocf:heartbeat:apache): Stopped * rsc5 (ocf:heartbeat:apache): Stopped * Resource Group: bar: - * rsc6 (ocf:heartbeat:apache): Started node1 - * rsc7 (ocf:heartbeat:apache): Started node1 - * rsc8 (ocf:heartbeat:apache): Started node1 + * rsc6 (ocf:heartbeat:apache): Stopped + * rsc7 (ocf:heartbeat:apache): Stopped + * rsc8 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/group2.summary b/cts/scheduler/summary/group2.summary index f71faf484d4..0f3afc87a7e 100644 --- a/cts/scheduler/summary/group2.summary +++ b/cts/scheduler/summary/group2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -18,32 +20,17 @@ Transition Summary: * Start rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: child_rsc1 monitor on node2 - * Resource action: child_rsc1 monitor on node1 - * Resource action: child_rsc2 monitor on node2 - * Resource action: child_rsc2 monitor on node1 - * Resource action: child_rsc3 monitor on node2 - * Resource action: child_rsc3 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc1 start on node1 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc1 start on node2 - * Resource action: child_rsc2 start on node2 - * Resource action: child_rsc3 start on node2 - * Pseudo action: rsc2_running_0 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * Resource Group: rsc2: - * child_rsc1 (ocf:heartbeat:apache): Started node2 - * child_rsc2 (ocf:heartbeat:apache): Started node2 - * child_rsc3 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node1 + * child_rsc1 (ocf:heartbeat:apache): Stopped + * child_rsc2 (ocf:heartbeat:apache): Stopped + * child_rsc3 (ocf:heartbeat:apache): Stopped + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/group3.summary b/cts/scheduler/summary/group3.summary index e1bdce41b22..768ec4a9d4e 100644 --- a/cts/scheduler/summary/group3.summary +++ b/cts/scheduler/summary/group3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -21,39 +23,19 @@ Transition Summary: * Start child_rsc6 ( node2 ) Executing Cluster Transition: - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1 monitor on node2 - * Resource action: child_rsc1 monitor on node1 - * Resource action: child_rsc2 monitor on node2 - * Resource action: child_rsc2 monitor on node1 - * Resource action: child_rsc3 monitor on node2 - * Resource action: child_rsc3 monitor on node1 - * Resource action: child_rsc4 monitor on node2 - * Resource action: child_rsc4 monitor on node1 - * Resource action: child_rsc5 monitor on node2 - * Resource action: child_rsc5 monitor on node1 - * Resource action: child_rsc6 monitor on node2 - * Resource action: child_rsc6 monitor on node1 - * Resource action: child_rsc1 start on node1 - * Resource action: child_rsc2 start on node1 - * Resource action: child_rsc3 start on node1 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc4 start on node2 - * Resource action: child_rsc5 start on node2 - * Resource action: child_rsc6 start on node2 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: rsc1: - * child_rsc1 (ocf:heartbeat:apache): Started node1 - * child_rsc2 (ocf:heartbeat:apache): Started node1 - * child_rsc3 (ocf:heartbeat:apache): Started node1 + * child_rsc1 (ocf:heartbeat:apache): Stopped + * child_rsc2 (ocf:heartbeat:apache): Stopped + * child_rsc3 (ocf:heartbeat:apache): Stopped * Resource Group: rsc2: - * child_rsc4 (ocf:heartbeat:apache): Started node2 - * child_rsc5 (ocf:heartbeat:apache): Started node2 - * child_rsc6 (ocf:heartbeat:apache): Started node2 + * child_rsc4 (ocf:heartbeat:apache): Stopped + * child_rsc5 (ocf:heartbeat:apache): Stopped + * child_rsc6 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/group4.summary b/cts/scheduler/summary/group4.summary index 386925f8262..42935ce301f 100644 --- a/cts/scheduler/summary/group4.summary +++ b/cts/scheduler/summary/group4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,13 +15,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node1 - * Resource action: child_rsc1 monitor on node1 - * Resource action: child_rsc2 monitor on node1 - * Resource action: child_rsc3 monitor on node1 - * Resource action: rsc3 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/group5.summary b/cts/scheduler/summary/group5.summary index a95ec3f22f9..a61592bab7d 100644 --- a/cts/scheduler/summary/group5.summary +++ b/cts/scheduler/summary/group5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -18,34 +20,17 @@ Transition Summary: * Move rsc3 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: child_rsc1 monitor on node2 - * Resource action: child_rsc2 monitor on node2 - * Resource action: child_rsc3 monitor on node2 - * Resource action: rsc3 stop on node1 - * Resource action: rsc3 monitor on node2 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc3 stop on node1 - * Resource action: child_rsc2 stop on node1 - * Resource action: child_rsc1 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node2 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc1 start on node2 - * Resource action: child_rsc2 start on node2 - * Resource action: child_rsc3 start on node2 - * Pseudo action: rsc2_running_0 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Started node1 * Resource Group: rsc2: - * child_rsc1 (ocf:heartbeat:apache): Started node2 - * child_rsc2 (ocf:heartbeat:apache): Started node2 - * child_rsc3 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node2 + * child_rsc1 (ocf:heartbeat:apache): Started node1 + * child_rsc2 (ocf:heartbeat:apache): Started node1 + * child_rsc3 (ocf:heartbeat:apache): Started node1 + * rsc3 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/group6.summary b/cts/scheduler/summary/group6.summary index 7ad48238366..366c3b0469f 100644 --- a/cts/scheduler/summary/group6.summary +++ b/cts/scheduler/summary/group6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -21,43 +23,19 @@ Transition Summary: * Move child_rsc6 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: child_rsc1 monitor on node2 - * Resource action: child_rsc2 monitor on node2 - * Resource action: child_rsc3 monitor on node2 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc4 monitor on node2 - * Resource action: child_rsc5 monitor on node2 - * Resource action: child_rsc6 stop on node1 - * Resource action: child_rsc6 monitor on node2 - * Resource action: child_rsc5 stop on node1 - * Resource action: child_rsc4 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc1_stop_0 - * Resource action: child_rsc3 stop on node1 - * Resource action: child_rsc2 stop on node1 - * Resource action: child_rsc1 stop on node1 - * Pseudo action: rsc1_stopped_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1 start on node2 - * Resource action: child_rsc2 start on node2 - * Resource action: child_rsc3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc4 start on node2 - * Resource action: child_rsc5 start on node2 - * Resource action: child_rsc6 start on node2 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Resource Group: rsc1: - * child_rsc1 (ocf:heartbeat:apache): Started node2 - * child_rsc2 (ocf:heartbeat:apache): Started node2 - * child_rsc3 (ocf:heartbeat:apache): Started node2 + * child_rsc1 (ocf:heartbeat:apache): Started node1 + * child_rsc2 (ocf:heartbeat:apache): Started node1 + * child_rsc3 (ocf:heartbeat:apache): Started node1 * Resource Group: rsc2: - * child_rsc4 (ocf:heartbeat:apache): Started node2 - * child_rsc5 (ocf:heartbeat:apache): Started node2 - * child_rsc6 (ocf:heartbeat:apache): Started node2 + * child_rsc4 (ocf:heartbeat:apache): Started node1 + * child_rsc5 (ocf:heartbeat:apache): Started node1 + * child_rsc6 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/group7.summary b/cts/scheduler/summary/group7.summary index 79ce76bd814..1c73f309785 100644 --- a/cts/scheduler/summary/group7.summary +++ b/cts/scheduler/summary/group7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -23,50 +25,20 @@ Transition Summary: * Start child_rsc6 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc1 monitor on node3 - * Resource action: child_rsc1 monitor on node2 - * Resource action: child_rsc1 monitor on node1 - * Resource action: child_rsc2 monitor on node3 - * Resource action: child_rsc2 monitor on node2 - * Resource action: child_rsc2 monitor on node1 - * Resource action: child_rsc3 monitor on node3 - * Resource action: child_rsc3 monitor on node2 - * Resource action: child_rsc3 monitor on node1 - * Resource action: child_rsc4 monitor on node3 - * Resource action: child_rsc4 monitor on node2 - * Resource action: child_rsc4 monitor on node1 - * Resource action: child_rsc5 monitor on node3 - * Resource action: child_rsc5 monitor on node2 - * Resource action: child_rsc5 monitor on node1 - * Resource action: child_rsc6 monitor on node3 - * Resource action: child_rsc6 monitor on node2 - * Resource action: child_rsc6 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: child_rsc1 start on node2 - * Resource action: child_rsc2 start on node2 - * Resource action: child_rsc3 start on node2 - * Pseudo action: rsc2_running_0 - * Pseudo action: rsc3_start_0 - * Resource action: child_rsc4 start on node2 - * Resource action: child_rsc5 start on node2 - * Resource action: child_rsc6 start on node2 - * Pseudo action: rsc3_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * Resource Group: rsc2: - * child_rsc1 (ocf:heartbeat:apache): Started node2 - * child_rsc2 (ocf:heartbeat:apache): Started node2 - * child_rsc3 (ocf:heartbeat:apache): Started node2 + * child_rsc1 (ocf:heartbeat:apache): Stopped + * child_rsc2 (ocf:heartbeat:apache): Stopped + * child_rsc3 (ocf:heartbeat:apache): Stopped * Resource Group: rsc3: - * child_rsc4 (ocf:heartbeat:apache): Started node2 - * child_rsc5 (ocf:heartbeat:apache): Started node2 - * child_rsc6 (ocf:heartbeat:apache): Started node2 + * child_rsc4 (ocf:heartbeat:apache): Stopped + * child_rsc5 (ocf:heartbeat:apache): Stopped + * child_rsc6 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/group8.summary b/cts/scheduler/summary/group8.summary index 37ef66cc7e8..a3315a7227d 100644 --- a/cts/scheduler/summary/group8.summary +++ b/cts/scheduler/summary/group8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 node3 ] @@ -21,31 +23,20 @@ Transition Summary: * Start child_rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node1 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc1 monitor on node1 - * Resource action: child_rsc2 monitor on node1 - * Resource action: child_rsc3 monitor on node1 - * Resource action: child_rsc4 monitor on node1 - * Resource action: child_rsc5 monitor on node1 - * Resource action: child_rsc6 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: child_rsc1 start on node1 - * Resource action: child_rsc2 start on node1 - * Resource action: child_rsc3 start on node1 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 node3 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * Resource Group: rsc2: - * child_rsc1 (ocf:heartbeat:apache): Started node1 - * child_rsc2 (ocf:heartbeat:apache): Started node1 - * child_rsc3 (ocf:heartbeat:apache): Started node1 + * child_rsc1 (ocf:heartbeat:apache): Stopped + * child_rsc2 (ocf:heartbeat:apache): Stopped + * child_rsc3 (ocf:heartbeat:apache): Stopped * Resource Group: rsc3: * child_rsc4 (ocf:heartbeat:apache): Stopped * child_rsc5 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/group9.summary b/cts/scheduler/summary/group9.summary index 57cd1445d3a..16182dacf3e 100644 --- a/cts/scheduler/summary/group9.summary +++ b/cts/scheduler/summary/group9.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -16,40 +18,16 @@ Current cluster status: Transition Summary: * Recover rsc4 ( node1 ) - * Restart rsc5 ( node1 ) due to required rsc4 start + * Restart rsc5 ( node1 ) * Move rsc6 ( node1 -> node2 ) * Recover rsc7 ( node1 -> node2 ) * Move rsc8 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Pseudo action: foo_stop_0 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc5 stop on node1 - * Resource action: rsc5 monitor on node2 - * Pseudo action: bar_stop_0 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc7 monitor on node2 - * Resource action: rsc8 stop on node1 - * Resource action: rsc8 monitor on node2 - * Resource action: rsc4 stop on node1 - * Resource action: rsc7 stop on node1 - * Pseudo action: foo_stopped_0 - * Pseudo action: foo_start_0 - * Resource action: rsc4 start on node1 - * Resource action: rsc5 start on node1 - * Resource action: rsc6 stop on node1 - * Pseudo action: foo_running_0 - * Pseudo action: bar_stopped_0 - * Pseudo action: bar_start_0 - * Resource action: rsc6 start on node2 - * Resource action: rsc7 start on node2 - * Resource action: rsc8 start on node2 - * Pseudo action: bar_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -58,9 +36,9 @@ Revised Cluster Status: * rsc2 (ocf:heartbeat:apache): Started node1 * Resource Group: foo: * rsc3 (ocf:heartbeat:apache): Started node1 - * rsc4 (ocf:heartbeat:apache): Started node1 + * rsc4 (ocf:heartbeat:apache): FAILED node1 * rsc5 (ocf:heartbeat:apache): Started node1 * Resource Group: bar: - * rsc6 (ocf:heartbeat:apache): Started node2 - * rsc7 (ocf:heartbeat:apache): Started node2 - * rsc8 (ocf:heartbeat:apache): Started node2 + * rsc6 (ocf:heartbeat:apache): Started node1 + * rsc7 (ocf:heartbeat:apache): FAILED node1 + * rsc8 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/guest-host-not-fenceable.summary b/cts/scheduler/summary/guest-host-not-fenceable.summary index 9e3b5db405c..cf78cc2d846 100644 --- a/cts/scheduler/summary/guest-host-not-fenceable.summary +++ b/cts/scheduler/summary/guest-host-not-fenceable.summary @@ -1,10 +1,13 @@ Using the original execution date of: 2019-08-26 04:52:42Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node2: UNCLEAN (offline) * Node node3: UNCLEAN (offline) * Online: [ node1 ] - * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]: @@ -43,47 +46,25 @@ Transition Summary: * Stop stonith-fence_ipmilan-node2 ( node3 ) due to no quorum (blocked) Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 - * Pseudo action: galera-bundle_demote_0 - * Pseudo action: rabbitmq-bundle_stop_0 - * Resource action: rabbitmq notify on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 - * Pseudo action: rabbitmq-bundle-clone_stop_0 - * Pseudo action: galera-bundle-master_demote_0 - * Resource action: rabbitmq stop on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_stopped_0 - * Resource action: rabbitmq-bundle-0 stop on node1 - * Resource action: rabbitmq-bundle-0 cancel=60000 on node1 - * Resource action: galera demote on galera-bundle-0 - * Pseudo action: galera-bundle-master_demoted_0 - * Pseudo action: galera-bundle_demoted_0 - * Pseudo action: galera-bundle_stop_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 - * Resource action: rabbitmq-bundle-docker-0 stop on node1 - * Pseudo action: galera-bundle-master_stop_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 - * Resource action: galera stop on galera-bundle-0 - * Pseudo action: galera-bundle-master_stopped_0 - * Resource action: galera-bundle-0 stop on node1 - * Resource action: galera-bundle-0 cancel=60000 on node1 - * Pseudo action: rabbitmq-bundle_stopped_0 - * Resource action: galera-bundle-docker-0 stop on node1 - * Pseudo action: galera-bundle_stopped_0 Using the original execution date of: 2019-08-26 04:52:42Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node2: UNCLEAN (offline) * Node node3: UNCLEAN (offline) * Online: [ node1 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped + * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started node1 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN) * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN) * Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Stopped + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted node1 * galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN) * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN) * stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN) diff --git a/cts/scheduler/summary/guest-node-cleanup.summary b/cts/scheduler/summary/guest-node-cleanup.summary index f68fb4fa446..e8bc2153f61 100644 --- a/cts/scheduler/summary/guest-node-cleanup.summary +++ b/cts/scheduler/summary/guest-node-cleanup.summary @@ -1,8 +1,10 @@ Using the original execution date of: 2018-10-15 16:02:04Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - * GuestOnline: [ lxc2 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-2 @@ -10,6 +12,7 @@ Current cluster status: * container1 (ocf:heartbeat:VirtualDomain): FAILED * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc1 * Unpromoted: [ lxc2 ] * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -20,27 +23,11 @@ Transition Summary: * Restart lxc1 ( rhel7-1 ) due to required container1 start Executing Cluster Transition: - * Resource action: container1 monitor on rhel7-1 - * Pseudo action: lxc-ms-master_demote_0 - * Resource action: lxc1 stop on rhel7-1 - * Pseudo action: stonith-lxc1-reboot on lxc1 - * Resource action: container1 start on rhel7-1 - * Pseudo action: lxc-ms_demote_0 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_stop_0 - * Resource action: lxc1 start on rhel7-1 - * Resource action: lxc1 monitor=30000 on rhel7-1 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms-master_stopped_0 - * Pseudo action: lxc-ms-master_start_0 - * Resource action: lxc-ms start on lxc1 - * Pseudo action: lxc-ms-master_running_0 - * Pseudo action: lxc-ms-master_promote_0 - * Resource action: lxc-ms promote on lxc1 - * Pseudo action: lxc-ms-master_promoted_0 Using the original execution date of: 2018-10-15 16:02:04Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc1 lxc2 ] @@ -48,8 +35,9 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-2 * FencingPass (stonith:fence_dummy): Started rhel7-3 - * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-1 + * container1 (ocf:heartbeat:VirtualDomain): FAILED * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Promoted: [ lxc1 ] + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc1 * Unpromoted: [ lxc2 ] + * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] diff --git a/cts/scheduler/summary/guest-node-host-dies.summary b/cts/scheduler/summary/guest-node-host-dies.summary index 84074c1f0a8..35572fb6f82 100644 --- a/cts/scheduler/summary/guest-node-host-dies.summary +++ b/cts/scheduler/summary/guest-node-host-dies.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node rhel7-1: UNCLEAN (offline) * Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 @@ -9,6 +12,8 @@ Current cluster status: * container1 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) * container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) * Clone Set: lxc-ms-master [lxc-ms] (promotable): + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc1 + * lxc-ms (ocf:pacemaker:Stateful): FAILED lxc2 * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Transition Summary: @@ -25,58 +30,21 @@ Transition Summary: * Move lxc2 ( rhel7-1 -> rhel7-3 ) Executing Cluster Transition: - * Resource action: Fencing stop on rhel7-4 - * Pseudo action: lxc-ms-master_demote_0 - * Pseudo action: lxc1_stop_0 - * Resource action: lxc1 monitor on rhel7-5 - * Resource action: lxc1 monitor on rhel7-4 - * Resource action: lxc1 monitor on rhel7-3 - * Pseudo action: lxc2_stop_0 - * Resource action: lxc2 monitor on rhel7-5 - * Resource action: lxc2 monitor on rhel7-4 - * Resource action: lxc2 monitor on rhel7-2 - * Fencing rhel7-1 (reboot) - * Pseudo action: rsc_rhel7-1_stop_0 - * Pseudo action: container1_stop_0 - * Pseudo action: container2_stop_0 - * Pseudo action: stonith-lxc2-reboot on lxc2 - * Pseudo action: stonith-lxc1-reboot on lxc1 - * Resource action: Fencing start on rhel7-4 - * Resource action: Fencing monitor=120000 on rhel7-4 - * Resource action: rsc_rhel7-1 start on rhel7-5 - * Resource action: container1 start on rhel7-2 - * Resource action: container2 start on rhel7-3 - * Pseudo action: lxc-ms_demote_0 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_stop_0 - * Resource action: lxc1 start on rhel7-2 - * Resource action: lxc2 start on rhel7-3 - * Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms-master_stopped_0 - * Pseudo action: lxc-ms-master_start_0 - * Resource action: lxc1 monitor=30000 on rhel7-2 - * Resource action: lxc2 monitor=30000 on rhel7-3 - * Resource action: lxc-ms start on lxc1 - * Resource action: lxc-ms start on lxc2 - * Pseudo action: lxc-ms-master_running_0 - * Resource action: lxc-ms monitor=10000 on lxc2 - * Pseudo action: lxc-ms-master_promote_0 - * Resource action: lxc-ms promote on lxc1 - * Pseudo action: lxc-ms-master_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node rhel7-1: UNCLEAN (offline) * Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - * OFFLINE: [ rhel7-1 ] * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 - * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-5 - * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-2 - * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 + * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 (UNCLEAN) + * container1 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) + * container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Promoted: [ lxc1 ] - * Unpromoted: [ lxc2 ] + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc1 + * lxc-ms (ocf:pacemaker:Stateful): FAILED lxc2 + * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] diff --git a/cts/scheduler/summary/history-1.summary b/cts/scheduler/summary/history-1.summary index 74d31ec281a..6d47c0ddb0a 100644 --- a/cts/scheduler/summary/history-1.summary +++ b/cts/scheduler/summary/history-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * OFFLINE: [ pcmk-4 ] @@ -29,6 +31,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * OFFLINE: [ pcmk-4 ] diff --git a/cts/scheduler/summary/honor_stonith_rsc_order1.summary b/cts/scheduler/summary/honor_stonith_rsc_order1.summary index 392cebc98f0..c565dcd391b 100644 --- a/cts/scheduler/summary/honor_stonith_rsc_order1.summary +++ b/cts/scheduler/summary/honor_stonith_rsc_order1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -15,24 +17,16 @@ Transition Summary: * Start A ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A:0 monitor on fc16-builder - * Pseudo action: S_GROUP_start_0 - * Resource action: S_B monitor on fc16-builder - * Resource action: A monitor on fc16-builder - * Resource action: S_B start on fc16-builder - * Resource action: A start on fc16-builder - * Pseudo action: S_GROUP_running_0 - * Pseudo action: S_CLONE_start_0 - * Resource action: S_A:0 start on fc16-builder - * Pseudo action: S_CLONE_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: S_CLONE [S_A]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] * Resource Group: S_GROUP: - * S_B (stonith:fence_xvm): Started fc16-builder - * A (ocf:pacemaker:Dummy): Started fc16-builder + * S_B (stonith:fence_xvm): Stopped + * A (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/honor_stonith_rsc_order2.summary b/cts/scheduler/summary/honor_stonith_rsc_order2.summary index 281178f57a8..123b2498837 100644 --- a/cts/scheduler/summary/honor_stonith_rsc_order2.summary +++ b/cts/scheduler/summary/honor_stonith_rsc_order2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -19,30 +21,18 @@ Transition Summary: * Start A ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A:0 monitor on fc16-builder - * Pseudo action: S_GROUP_start_0 - * Resource action: S_B monitor on fc16-builder - * Resource action: S_C monitor on fc16-builder - * Resource action: S_D monitor on fc16-builder - * Resource action: A monitor on fc16-builder - * Resource action: S_B start on fc16-builder - * Resource action: S_C start on fc16-builder - * Resource action: S_D start on fc16-builder - * Resource action: A start on fc16-builder - * Pseudo action: S_GROUP_running_0 - * Pseudo action: S_CLONE_start_0 - * Resource action: S_A:0 start on fc16-builder - * Pseudo action: S_CLONE_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: S_CLONE [S_A]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] * Resource Group: S_GROUP: - * S_B (stonith:fence_xvm): Started fc16-builder - * S_C (stonith:fence_xvm): Started fc16-builder - * S_D (stonith:fence_xvm): Started fc16-builder - * A (ocf:pacemaker:Dummy): Started fc16-builder + * S_B (stonith:fence_xvm): Stopped + * S_C (stonith:fence_xvm): Stopped + * S_D (stonith:fence_xvm): Stopped + * A (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/honor_stonith_rsc_order3.summary b/cts/scheduler/summary/honor_stonith_rsc_order3.summary index 3366a6b2fad..4018215d12f 100644 --- a/cts/scheduler/summary/honor_stonith_rsc_order3.summary +++ b/cts/scheduler/summary/honor_stonith_rsc_order3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -17,30 +19,16 @@ Transition Summary: * Start A ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A:0 monitor on fc16-builder - * Resource action: S_B:0 monitor on fc16-builder - * Resource action: S_C:0 monitor on fc16-builder - * Resource action: S_D:0 monitor on fc16-builder - * Pseudo action: S_CLONE2_start_0 - * Resource action: A monitor on fc16-builder - * Pseudo action: S_GROUP:0_start_0 - * Resource action: S_B:0 start on fc16-builder - * Resource action: S_C:0 start on fc16-builder - * Resource action: S_D:0 start on fc16-builder - * Resource action: A start on fc16-builder - * Pseudo action: S_GROUP:0_running_0 - * Pseudo action: S_CLONE2_running_0 - * Pseudo action: S_CLONE_start_0 - * Resource action: S_A:0 start on fc16-builder - * Pseudo action: S_CLONE_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: S_CLONE [S_A]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] * Clone Set: S_CLONE2 [S_GROUP]: - * Started: [ fc16-builder ] - * A (ocf:pacemaker:Dummy): Started fc16-builder + * Stopped: [ fc16-builder ] + * A (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/honor_stonith_rsc_order4.summary b/cts/scheduler/summary/honor_stonith_rsc_order4.summary index d93ffdf3e10..9a4a904ed81 100644 --- a/cts/scheduler/summary/honor_stonith_rsc_order4.summary +++ b/cts/scheduler/summary/honor_stonith_rsc_order4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -13,18 +15,14 @@ Transition Summary: * Start A ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A monitor on fc16-builder - * Resource action: S_B monitor on fc16-builder - * Resource action: A monitor on fc16-builder - * Resource action: S_B start on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: S_A start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: - * S_A (stonith:fence_xvm): Started fc16-builder - * S_B (stonith:fence_xvm): Started fc16-builder - * A (ocf:pacemaker:Dummy): Started fc16-builder + * S_A (stonith:fence_xvm): Stopped + * S_B (stonith:fence_xvm): Stopped + * A (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ignore_stonith_rsc_order1.summary b/cts/scheduler/summary/ignore_stonith_rsc_order1.summary index 0331f1200e7..cb5f6385456 100644 --- a/cts/scheduler/summary/ignore_stonith_rsc_order1.summary +++ b/cts/scheduler/summary/ignore_stonith_rsc_order1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -11,15 +13,13 @@ Transition Summary: * Start A ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A monitor on fc16-builder - * Resource action: A monitor on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: S_A start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: - * S_A (stonith:fence_xvm): Started fc16-builder - * A (ocf:pacemaker:Dummy): Started fc16-builder + * S_A (stonith:fence_xvm): Stopped + * A (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ignore_stonith_rsc_order2.summary b/cts/scheduler/summary/ignore_stonith_rsc_order2.summary index cd37f0b98b9..4411411d3db 100644 --- a/cts/scheduler/summary/ignore_stonith_rsc_order2.summary +++ b/cts/scheduler/summary/ignore_stonith_rsc_order2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -14,21 +16,15 @@ Transition Summary: * Start S_B ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A monitor on fc16-builder - * Pseudo action: MIXED_GROUP_start_0 - * Resource action: A monitor on fc16-builder - * Resource action: S_B monitor on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: S_B start on fc16-builder - * Pseudo action: MIXED_GROUP_running_0 - * Resource action: S_A start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: - * S_A (stonith:fence_xvm): Started fc16-builder + * S_A (stonith:fence_xvm): Stopped * Resource Group: MIXED_GROUP: - * A (ocf:pacemaker:Dummy): Started fc16-builder - * S_B (stonith:fence_xvm): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped + * S_B (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/ignore_stonith_rsc_order3.summary b/cts/scheduler/summary/ignore_stonith_rsc_order3.summary index 36b5bf551ba..e152d7cc6ff 100644 --- a/cts/scheduler/summary/ignore_stonith_rsc_order3.summary +++ b/cts/scheduler/summary/ignore_stonith_rsc_order3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -15,24 +17,16 @@ Transition Summary: * Start S_B ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A:0 monitor on fc16-builder - * Pseudo action: MIXED_GROUP_start_0 - * Resource action: A monitor on fc16-builder - * Resource action: S_B monitor on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: S_B start on fc16-builder - * Pseudo action: MIXED_GROUP_running_0 - * Pseudo action: S_CLONE_start_0 - * Resource action: S_A:0 start on fc16-builder - * Pseudo action: S_CLONE_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: S_CLONE [S_A]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] * Resource Group: MIXED_GROUP: - * A (ocf:pacemaker:Dummy): Started fc16-builder - * S_B (stonith:fence_xvm): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped + * S_B (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/ignore_stonith_rsc_order4.summary b/cts/scheduler/summary/ignore_stonith_rsc_order4.summary index e56f65caf6b..e0496efa7a4 100644 --- a/cts/scheduler/summary/ignore_stonith_rsc_order4.summary +++ b/cts/scheduler/summary/ignore_stonith_rsc_order4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] @@ -14,25 +16,15 @@ Transition Summary: * Start S_B:0 ( fc16-builder ) Executing Cluster Transition: - * Resource action: S_A:0 monitor on fc16-builder - * Resource action: A:0 monitor on fc16-builder - * Resource action: S_B:0 monitor on fc16-builder - * Pseudo action: S_CLONE2_start_0 - * Pseudo action: MIXED_GROUP:0_start_0 - * Resource action: A:0 start on fc16-builder - * Resource action: S_B:0 start on fc16-builder - * Pseudo action: MIXED_GROUP:0_running_0 - * Pseudo action: S_CLONE2_running_0 - * Pseudo action: S_CLONE_start_0 - * Resource action: S_A:0 start on fc16-builder - * Pseudo action: S_CLONE_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * Full List of Resources: * Clone Set: S_CLONE [S_A]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] * Clone Set: S_CLONE2 [MIXED_GROUP]: - * Started: [ fc16-builder ] + * Stopped: [ fc16-builder ] diff --git a/cts/scheduler/summary/inc0.summary b/cts/scheduler/summary/inc0.summary index 947d5e5861e..02c938ad2ab 100644 --- a/cts/scheduler/summary/inc0.summary +++ b/cts/scheduler/summary/inc0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,31 +19,17 @@ Transition Summary: * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node1 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Started node1 - * child_rsc1:1 (ocf:heartbeat:apache): Started node2 - * child_rsc1:2 (ocf:heartbeat:apache): Started node1 - * child_rsc1:3 (ocf:heartbeat:apache): Started node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/inc1.summary b/cts/scheduler/summary/inc1.summary index 5201a448961..844446be8e5 100644 --- a/cts/scheduler/summary/inc1.summary +++ b/cts/scheduler/summary/inc1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -21,39 +23,19 @@ Transition Summary: * Start rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: child_rsc2:0 monitor on node2 - * Resource action: child_rsc2:0 monitor on node1 - * Resource action: child_rsc2:1 monitor on node2 - * Resource action: child_rsc2:1 monitor on node1 - * Resource action: child_rsc2:2 monitor on node2 - * Resource action: child_rsc2:2 monitor on node1 - * Resource action: child_rsc2:3 monitor on node2 - * Resource action: child_rsc2:3 monitor on node1 - * Resource action: child_rsc2:4 monitor on node2 - * Resource action: child_rsc2:4 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc1 start on node1 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc2:0 start on node2 - * Resource action: child_rsc2:1 start on node1 - * Resource action: child_rsc2:2 start on node2 - * Resource action: child_rsc2:3 start on node1 - * Pseudo action: rsc2_running_0 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Started node2 - * child_rsc2:1 (ocf:heartbeat:apache): Started node1 - * child_rsc2:2 (ocf:heartbeat:apache): Started node2 - * child_rsc2:3 (ocf:heartbeat:apache): Started node1 + * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:1 (ocf:heartbeat:apache): Stopped + * child_rsc2:2 (ocf:heartbeat:apache): Stopped + * child_rsc2:3 (ocf:heartbeat:apache): Stopped * child_rsc2:4 (ocf:heartbeat:apache): Stopped - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/inc10.summary b/cts/scheduler/summary/inc10.summary index 1037e6c5a67..f5f658951b9 100644 --- a/cts/scheduler/summary/inc10.summary +++ b/cts/scheduler/summary/inc10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node xen-2: standby (with active resources) * Online: [ xen-1 xen-3 xen-4 ] @@ -12,35 +14,20 @@ Current cluster status: Transition Summary: * Stop child_DoFencing:1 ( xen-2 ) due to node availability * Stop ocfs2:1 ( xen-2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: DoFencing_stop_0 - * Pseudo action: ocfs2-clone_pre_notify_stop_0 - * Resource action: child_DoFencing:2 stop on xen-2 - * Pseudo action: DoFencing_stopped_0 - * Resource action: ocfs2:1 notify on xen-3 - * Resource action: ocfs2:1 notify on xen-2 - * Resource action: ocfs2:3 notify on xen-1 - * Resource action: ocfs2:0 notify on xen-4 - * Pseudo action: ocfs2-clone_confirmed-pre_notify_stop_0 - * Pseudo action: ocfs2-clone_stop_0 - * Resource action: ocfs2:1 stop on xen-2 - * Pseudo action: ocfs2-clone_stopped_0 - * Pseudo action: ocfs2-clone_post_notify_stopped_0 - * Resource action: ocfs2:1 notify on xen-3 - * Resource action: ocfs2:3 notify on xen-1 - * Resource action: ocfs2:0 notify on xen-4 - * Pseudo action: ocfs2-clone_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node xen-2: standby + * Node xen-2: standby (with active resources) * Online: [ xen-1 xen-3 xen-4 ] * Full List of Resources: * Clone Set: DoFencing [child_DoFencing]: - * Started: [ xen-1 xen-3 xen-4 ] - * Stopped: [ xen-2 ] + * Started: [ xen-1 xen-2 xen-3 xen-4 ] * Clone Set: ocfs2-clone [ocfs2]: - * Started: [ xen-1 xen-3 xen-4 ] - * Stopped: [ xen-2 ] + * Started: [ xen-1 xen-2 xen-3 xen-4 ] diff --git a/cts/scheduler/summary/inc11.summary b/cts/scheduler/summary/inc11.summary index 1149123210f..d1c2d73f125 100644 --- a/cts/scheduler/summary/inc11.summary +++ b/cts/scheduler/summary/inc11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node0 node1 node2 ] @@ -14,30 +16,15 @@ Transition Summary: * Promote child_rsc1:1 ( Stopped -> Promoted node2 ) Executing Cluster Transition: - * Resource action: simple-rsc monitor on node2 - * Resource action: simple-rsc monitor on node1 - * Resource action: simple-rsc monitor on node0 - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:0 monitor on node0 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:1 monitor on node0 - * Pseudo action: rsc1_start_0 - * Resource action: simple-rsc start on node2 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_promote_0 - * Resource action: child_rsc1:1 promote on node2 - * Pseudo action: rsc1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node0 node1 node2 ] * Full List of Resources: - * simple-rsc (ocf:heartbeat:apache): Started node2 + * simple-rsc (ocf:heartbeat:apache): Stopped * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/inc12.summary b/cts/scheduler/summary/inc12.summary index 36ffffad8fb..a891741f939 100644 --- a/cts/scheduler/summary/inc12.summary +++ b/cts/scheduler/summary/inc12.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] @@ -58,75 +60,43 @@ Transition Summary: * Stop ocf_msdummy:9 ( Unpromoted c001n07 ) due to node availability * Stop ocf_msdummy:10 ( Unpromoted c001n02 ) due to node availability * Stop ocf_msdummy:11 ( Unpromoted c001n02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group-1_stop_0 - * Resource action: ocf_192.168.100.183 stop on c001n02 - * Resource action: lsb_dummy stop on c001n04 - * Resource action: rsc_c001n03 stop on c001n05 - * Resource action: rsc_c001n02 stop on c001n02 - * Resource action: rsc_c001n04 stop on c001n04 - * Resource action: rsc_c001n05 stop on c001n05 - * Resource action: rsc_c001n06 stop on c001n06 - * Resource action: rsc_c001n07 stop on c001n07 - * Pseudo action: DoFencing_stop_0 - * Pseudo action: master_rsc_1_stop_0 - * Resource action: heartbeat_192.168.100.182 stop on c001n02 - * Resource action: child_DoFencing:1 stop on c001n02 - * Resource action: child_DoFencing:2 stop on c001n04 - * Resource action: child_DoFencing:3 stop on c001n05 - * Resource action: child_DoFencing:4 stop on c001n06 - * Resource action: child_DoFencing:5 stop on c001n07 - * Pseudo action: DoFencing_stopped_0 - * Resource action: ocf_msdummy:2 stop on c001n04 - * Resource action: ocf_msdummy:3 stop on c001n04 - * Resource action: ocf_msdummy:4 stop on c001n05 - * Resource action: ocf_msdummy:5 stop on c001n05 - * Resource action: ocf_msdummy:6 stop on c001n06 - * Resource action: ocf_msdummy:7 stop on c001n06 - * Resource action: ocf_msdummy:8 stop on c001n07 - * Resource action: ocf_msdummy:9 stop on c001n07 - * Resource action: ocf_msdummy:10 stop on c001n02 - * Resource action: ocf_msdummy:11 stop on c001n02 - * Pseudo action: master_rsc_1_stopped_0 - * Cluster action: do_shutdown on c001n07 - * Cluster action: do_shutdown on c001n06 - * Cluster action: do_shutdown on c001n05 - * Cluster action: do_shutdown on c001n04 - * Resource action: ocf_192.168.100.181 stop on c001n02 - * Cluster action: do_shutdown on c001n02 - * Pseudo action: group-1_stopped_0 - * Cluster action: do_shutdown on c001n03 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: - * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Stopped - * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Stopped - * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Stopped - * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped - * rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n04 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n05 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n06 (ocf:heartbeat:IPaddr): Stopped - * rsc_c001n07 (ocf:heartbeat:IPaddr): Stopped + * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 + * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 + * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 + * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 + * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n05 + * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 + * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04 + * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05 + * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 + * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07 * Clone Set: DoFencing [child_DoFencing]: - * Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] + * Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ] + * Stopped: [ c001n03 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): * ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:8 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:9 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:10 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:11 (ocf:heartbeat:Stateful): Stopped + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted c001n04 + * ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted c001n04 + * ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted c001n05 + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted c001n05 + * ocf_msdummy:6 (ocf:heartbeat:Stateful): Unpromoted c001n06 + * ocf_msdummy:7 (ocf:heartbeat:Stateful): Unpromoted c001n06 + * ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted c001n07 + * ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted c001n07 + * ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted c001n02 + * ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted c001n02 diff --git a/cts/scheduler/summary/inc2.summary b/cts/scheduler/summary/inc2.summary index bf90e78eeaa..8062338c309 100644 --- a/cts/scheduler/summary/inc2.summary +++ b/cts/scheduler/summary/inc2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -16,22 +18,10 @@ Transition Summary: * Stop child_rsc1:4 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:4 monitor on node2 - * Pseudo action: rsc1_stop_0 - * Resource action: child_rsc1:2 stop on node1 - * Resource action: child_rsc1:3 stop on node1 - * Resource action: child_rsc1:4 stop on node1 - * Pseudo action: rsc1_stopped_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:2 start on node2 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -39,6 +29,6 @@ Revised Cluster Status: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 * child_rsc1:1 (ocf:heartbeat:apache): Started node1 - * child_rsc1:2 (ocf:heartbeat:apache): Started node2 - * child_rsc1:3 (ocf:heartbeat:apache): Started node2 - * child_rsc1:4 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Started node1 + * child_rsc1:3 (ocf:heartbeat:apache): Started node1 + * child_rsc1:4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/inc3.summary b/cts/scheduler/summary/inc3.summary index 72564463b69..b2993403980 100644 --- a/cts/scheduler/summary/inc3.summary +++ b/cts/scheduler/summary/inc3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -24,35 +26,10 @@ Transition Summary: * Move child_rsc2:4 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc2:0 monitor on node1 - * Resource action: child_rsc2:1 monitor on node1 - * Resource action: child_rsc2:2 monitor on node1 - * Resource action: child_rsc2:3 monitor on node1 - * Resource action: child_rsc2:4 monitor on node1 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc2:3 stop on node2 - * Resource action: child_rsc2:4 stop on node2 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc1_stop_0 - * Resource action: child_rsc1:2 stop on node1 - * Resource action: child_rsc1:3 stop on node1 - * Resource action: child_rsc1:4 stop on node1 - * Pseudo action: rsc1_stopped_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:2 start on node2 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc2:3 start on node1 - * Resource action: child_rsc2:4 start on node1 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -60,12 +37,12 @@ Revised Cluster Status: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 * child_rsc1:1 (ocf:heartbeat:apache): Started node1 - * child_rsc1:2 (ocf:heartbeat:apache): Started node2 - * child_rsc1:3 (ocf:heartbeat:apache): Started node2 - * child_rsc1:4 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Started node1 + * child_rsc1:3 (ocf:heartbeat:apache): Started node1 + * child_rsc1:4 (ocf:heartbeat:apache): Started node1 * Clone Set: rsc2 [child_rsc2] (unique): * child_rsc2:0 (ocf:heartbeat:apache): Started node2 * child_rsc2:1 (ocf:heartbeat:apache): Started node2 * child_rsc2:2 (ocf:heartbeat:apache): Started node2 - * child_rsc2:3 (ocf:heartbeat:apache): Started node1 - * child_rsc2:4 (ocf:heartbeat:apache): Started node1 + * child_rsc2:3 (ocf:heartbeat:apache): Started node2 + * child_rsc2:4 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/inc4.summary b/cts/scheduler/summary/inc4.summary index e71cea65b56..b2993403980 100644 --- a/cts/scheduler/summary/inc4.summary +++ b/cts/scheduler/summary/inc4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -24,35 +26,10 @@ Transition Summary: * Move child_rsc2:4 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc2:0 monitor on node1 - * Resource action: child_rsc2:1 monitor on node1 - * Resource action: child_rsc2:2 monitor on node1 - * Resource action: child_rsc2:3 monitor on node1 - * Resource action: child_rsc2:4 monitor on node1 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc2:4 stop on node2 - * Resource action: child_rsc2:3 stop on node2 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc1_stop_0 - * Resource action: child_rsc1:4 stop on node1 - * Resource action: child_rsc1:3 stop on node1 - * Resource action: child_rsc1:2 stop on node1 - * Pseudo action: rsc1_stopped_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:2 start on node2 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc2:3 start on node1 - * Resource action: child_rsc2:4 start on node1 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -60,12 +37,12 @@ Revised Cluster Status: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 * child_rsc1:1 (ocf:heartbeat:apache): Started node1 - * child_rsc1:2 (ocf:heartbeat:apache): Started node2 - * child_rsc1:3 (ocf:heartbeat:apache): Started node2 - * child_rsc1:4 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Started node1 + * child_rsc1:3 (ocf:heartbeat:apache): Started node1 + * child_rsc1:4 (ocf:heartbeat:apache): Started node1 * Clone Set: rsc2 [child_rsc2] (unique): * child_rsc2:0 (ocf:heartbeat:apache): Started node2 * child_rsc2:1 (ocf:heartbeat:apache): Started node2 * child_rsc2:2 (ocf:heartbeat:apache): Started node2 - * child_rsc2:3 (ocf:heartbeat:apache): Started node1 - * child_rsc2:4 (ocf:heartbeat:apache): Started node1 + * child_rsc2:3 (ocf:heartbeat:apache): Started node2 + * child_rsc2:4 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/inc5.summary b/cts/scheduler/summary/inc5.summary index 3b971156b22..d36e9f115f4 100644 --- a/cts/scheduler/summary/inc5.summary +++ b/cts/scheduler/summary/inc5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -43,64 +45,10 @@ Transition Summary: * Move child_rsc7:1 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc2:0 monitor on node2 - * Resource action: child_rsc2:1 monitor on node2 - * Resource action: child_rsc2:2 monitor on node2 - * Resource action: child_rsc2:2 monitor on node1 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc3:0 monitor on node2 - * Resource action: child_rsc3:1 monitor on node1 - * Resource action: child_rsc3:2 monitor on node2 - * Resource action: child_rsc3:2 monitor on node1 - * Resource action: child_rsc4:0 monitor on node2 - * Resource action: child_rsc4:1 monitor on node2 - * Resource action: child_rsc4:2 monitor on node2 - * Resource action: child_rsc4:2 monitor on node1 - * Pseudo action: rsc4_stop_0 - * Resource action: child_rsc5:0 monitor on node1 - * Resource action: child_rsc5:1 monitor on node1 - * Resource action: child_rsc5:2 monitor on node2 - * Resource action: child_rsc5:2 monitor on node1 - * Pseudo action: rsc5_stop_0 - * Resource action: child_rsc6:0 monitor on node2 - * Resource action: child_rsc6:1 monitor on node1 - * Resource action: child_rsc6:2 monitor on node2 - * Resource action: child_rsc6:2 monitor on node1 - * Resource action: child_rsc7:0 monitor on node1 - * Resource action: child_rsc7:1 monitor on node1 - * Resource action: child_rsc7:2 monitor on node2 - * Resource action: child_rsc7:2 monitor on node1 - * Pseudo action: rsc7_stop_0 - * Resource action: child_rsc8:0 monitor on node2 - * Resource action: child_rsc8:1 monitor on node1 - * Resource action: child_rsc8:2 monitor on node2 - * Resource action: child_rsc8:2 monitor on node1 - * Resource action: child_rsc2:1 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc4:1 stop on node1 - * Pseudo action: rsc4_stopped_0 - * Pseudo action: rsc4_start_0 - * Resource action: child_rsc5:1 stop on node2 - * Pseudo action: rsc5_stopped_0 - * Pseudo action: rsc5_start_0 - * Resource action: child_rsc7:1 stop on node2 - * Pseudo action: rsc7_stopped_0 - * Pseudo action: rsc7_start_0 - * Resource action: child_rsc2:1 start on node2 - * Pseudo action: rsc2_running_0 - * Resource action: child_rsc4:1 start on node2 - * Pseudo action: rsc4_running_0 - * Resource action: child_rsc5:1 start on node1 - * Pseudo action: rsc5_running_0 - * Resource action: child_rsc7:1 start on node1 - * Pseudo action: rsc7_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -111,7 +59,7 @@ Revised Cluster Status: * child_rsc1:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): * child_rsc2:0 (ocf:heartbeat:apache): Started node1 - * child_rsc2:1 (ocf:heartbeat:apache): Started node2 + * child_rsc2:1 (ocf:heartbeat:apache): Started node1 * child_rsc2:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc3 [child_rsc3] (unique): * child_rsc3:0 (ocf:heartbeat:apache): Started node1 @@ -119,11 +67,11 @@ Revised Cluster Status: * child_rsc3:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc4 [child_rsc4] (unique): * child_rsc4:0 (ocf:heartbeat:apache): Started node1 - * child_rsc4:1 (ocf:heartbeat:apache): Started node2 + * child_rsc4:1 (ocf:heartbeat:apache): Started node1 * child_rsc4:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc5 [child_rsc5] (unique): * child_rsc5:0 (ocf:heartbeat:apache): Started node2 - * child_rsc5:1 (ocf:heartbeat:apache): Started node1 + * child_rsc5:1 (ocf:heartbeat:apache): Started node2 * child_rsc5:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc6 [child_rsc6] (unique): * child_rsc6:0 (ocf:heartbeat:apache): Started node1 @@ -131,7 +79,7 @@ Revised Cluster Status: * child_rsc6:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc7 [child_rsc7] (unique): * child_rsc7:0 (ocf:heartbeat:apache): Started node2 - * child_rsc7:1 (ocf:heartbeat:apache): Started node1 + * child_rsc7:1 (ocf:heartbeat:apache): Started node2 * child_rsc7:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc8 [child_rsc8] (unique): * child_rsc8:0 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/inc6.summary b/cts/scheduler/summary/inc6.summary index 74daaa6fa90..8be4fee93e1 100644 --- a/cts/scheduler/summary/inc6.summary +++ b/cts/scheduler/summary/inc6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -32,45 +35,16 @@ Transition Summary: * Move child_rsc2:1 ( node1 -> node2 ) * Move child_rsc4:1 ( node1 -> node2 ) * Move child_rsc5:1 ( node2 -> node1 ) - * Restart child_rsc6:0 ( node1 ) due to required rsc5 running - * Restart child_rsc6:1 ( node2 ) due to required rsc5 running + * Restart child_rsc6:0 ( node1 ) + * Restart child_rsc6:1 ( node2 ) * Move child_rsc7:1 ( node2 -> node1 ) Executing Cluster Transition: - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc4_stop_0 - * Pseudo action: rsc6_stop_0 - * Pseudo action: rsc7_stop_0 - * Resource action: child_rsc2:1 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc4:1 stop on node1 - * Pseudo action: rsc4_stopped_0 - * Pseudo action: rsc4_start_0 - * Resource action: child_rsc6:0 stop on node1 - * Resource action: child_rsc6:1 stop on node2 - * Pseudo action: rsc6_stopped_0 - * Resource action: child_rsc7:1 stop on node2 - * Pseudo action: rsc7_stopped_0 - * Pseudo action: rsc7_start_0 - * Resource action: child_rsc2:1 start on node2 - * Pseudo action: rsc2_running_0 - * Resource action: child_rsc4:1 start on node2 - * Pseudo action: rsc4_running_0 - * Pseudo action: rsc5_stop_0 - * Resource action: child_rsc7:1 start on node1 - * Pseudo action: rsc7_running_0 - * Resource action: child_rsc5:1 stop on node2 - * Pseudo action: rsc5_stopped_0 - * Pseudo action: rsc5_start_0 - * Resource action: child_rsc5:1 start on node1 - * Pseudo action: rsc5_running_0 - * Pseudo action: rsc6_start_0 - * Resource action: child_rsc6:0 start on node1 - * Resource action: child_rsc6:1 start on node2 - * Pseudo action: rsc6_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -79,23 +53,23 @@ Revised Cluster Status: * Started: [ node1 node2 ] * Clone Set: rsc2 [child_rsc2] (unique): * child_rsc2:0 (ocf:heartbeat:apache): Started node1 - * child_rsc2:1 (ocf:heartbeat:apache): Started [ node1 node2 ] + * child_rsc2:1 (ocf:heartbeat:apache): Started node1 * child_rsc2:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc3 [child_rsc3]: * Started: [ node1 node2 ] * Clone Set: rsc4 [child_rsc4] (unique): * child_rsc4:0 (ocf:heartbeat:apache): Started node1 - * child_rsc4:1 (ocf:heartbeat:apache): Started [ node1 node2 ] + * child_rsc4:1 (ocf:heartbeat:apache): Started node1 * child_rsc4:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc5 [child_rsc5] (unique): * child_rsc5:0 (ocf:heartbeat:apache): Started node2 - * child_rsc5:1 (ocf:heartbeat:apache): Started node1 + * child_rsc5:1 (ocf:heartbeat:apache): Started node2 * child_rsc5:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc6 [child_rsc6]: * Started: [ node1 node2 ] * Clone Set: rsc7 [child_rsc7] (unique): * child_rsc7:0 (ocf:heartbeat:apache): Started node2 - * child_rsc7:1 (ocf:heartbeat:apache): Started node1 + * child_rsc7:1 (ocf:heartbeat:apache): Started node2 * child_rsc7:2 (ocf:heartbeat:apache): Stopped * Clone Set: rsc8 [child_rsc8]: * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/inc7.summary b/cts/scheduler/summary/inc7.summary index 71cca236634..e52c5b79be4 100644 --- a/cts/scheduler/summary/inc7.summary +++ b/cts/scheduler/summary/inc7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -31,70 +33,24 @@ Transition Summary: * Start child_rsc2:4 ( node3 ) Executing Cluster Transition: - * Resource action: rsc0 monitor on node3 - * Resource action: rsc0 monitor on node2 - * Resource action: rsc0 monitor on node1 - * Resource action: child_rsc1:0 monitor on node3 - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node3 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node3 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node3 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node3 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc2:0 monitor on node3 - * Resource action: child_rsc2:0 monitor on node2 - * Resource action: child_rsc2:0 monitor on node1 - * Resource action: child_rsc2:1 monitor on node3 - * Resource action: child_rsc2:1 monitor on node2 - * Resource action: child_rsc2:1 monitor on node1 - * Resource action: child_rsc2:2 monitor on node3 - * Resource action: child_rsc2:2 monitor on node2 - * Resource action: child_rsc2:2 monitor on node1 - * Resource action: child_rsc2:3 monitor on node3 - * Resource action: child_rsc2:3 monitor on node2 - * Resource action: child_rsc2:3 monitor on node1 - * Resource action: child_rsc2:4 monitor on node3 - * Resource action: child_rsc2:4 monitor on node2 - * Resource action: child_rsc2:4 monitor on node1 - * Resource action: rsc0 start on node1 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node3 - * Resource action: child_rsc1:3 start on node1 - * Resource action: child_rsc1:4 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc2_start_0 - * Resource action: child_rsc2:0 start on node2 - * Resource action: child_rsc2:1 start on node3 - * Resource action: child_rsc2:2 start on node1 - * Resource action: child_rsc2:3 start on node2 - * Resource action: child_rsc2:4 start on node3 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc0 (ocf:heartbeat:apache): Started node1 + * rsc0 (ocf:heartbeat:apache): Stopped * Clone Set: rsc1 [child_rsc1] (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Started node1 - * child_rsc1:1 (ocf:heartbeat:apache): Started node2 - * child_rsc1:2 (ocf:heartbeat:apache): Started node3 - * child_rsc1:3 (ocf:heartbeat:apache): Started node1 - * child_rsc1:4 (ocf:heartbeat:apache): Started node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped + * child_rsc1:4 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Started node2 - * child_rsc2:1 (ocf:heartbeat:apache): Started node3 - * child_rsc2:2 (ocf:heartbeat:apache): Started node1 - * child_rsc2:3 (ocf:heartbeat:apache): Started node2 - * child_rsc2:4 (ocf:heartbeat:apache): Started node3 + * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:1 (ocf:heartbeat:apache): Stopped + * child_rsc2:2 (ocf:heartbeat:apache): Stopped + * child_rsc2:3 (ocf:heartbeat:apache): Stopped + * child_rsc2:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/inc8.summary b/cts/scheduler/summary/inc8.summary index 9a88b44e9e9..d91d5129f78 100644 --- a/cts/scheduler/summary/inc8.summary +++ b/cts/scheduler/summary/inc8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -23,40 +25,15 @@ Transition Summary: * Start child_rsc2:1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc0 monitor on node2 - * Resource action: rsc0 monitor on node1 - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Resource action: child_rsc2:0 monitor on node2 - * Resource action: child_rsc2:0 monitor on node1 - * Resource action: child_rsc2:1 monitor on node2 - * Resource action: child_rsc2:1 monitor on node1 - * Resource action: child_rsc2:2 monitor on node2 - * Resource action: child_rsc2:2 monitor on node1 - * Resource action: child_rsc2:3 monitor on node2 - * Resource action: child_rsc2:3 monitor on node1 - * Resource action: child_rsc2:4 monitor on node2 - * Resource action: child_rsc2:4 monitor on node1 - * Pseudo action: rsc2_start_0 - * Resource action: rsc0 start on node1 - * Resource action: child_rsc2:0 start on node2 - * Resource action: child_rsc2:1 start on node1 - * Pseudo action: rsc2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc0 (ocf:heartbeat:apache): Started node1 + * rsc0 (ocf:heartbeat:apache): Stopped * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped @@ -64,8 +41,8 @@ Revised Cluster Status: * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Started node2 - * child_rsc2:1 (ocf:heartbeat:apache): Started node1 + * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:1 (ocf:heartbeat:apache): Stopped * child_rsc2:2 (ocf:heartbeat:apache): Stopped * child_rsc2:3 (ocf:heartbeat:apache): Stopped * child_rsc2:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/inc9.summary b/cts/scheduler/summary/inc9.summary index 0e91a2ea06b..f89a5615d35 100644 --- a/cts/scheduler/summary/inc9.summary +++ b/cts/scheduler/summary/inc9.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -13,18 +16,21 @@ Transition Summary: * Stop child_rsc1:5 ( node1 ) due to node availability * Stop child_rsc1:6 ( node1 ) due to node availability * Stop child_rsc1:7 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: rsc1_stop_0 - * Resource action: child_rsc1:1 stop on node1 - * Resource action: child_rsc1:2 stop on node1 - * Resource action: child_rsc1:1 stop on node2 - * Pseudo action: rsc1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1]: + * child_rsc1 (ocf:heartbeat:apache): ORPHANED Started node1 + * child_rsc1 (ocf:heartbeat:apache): ORPHANED Started node1 + * child_rsc1 (ocf:heartbeat:apache): ORPHANED Started node2 * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/interleave-0.summary b/cts/scheduler/summary/interleave-0.summary index fe16667be3a..cefdfad0dcd 100644 --- a/cts/scheduler/summary/interleave-0.summary +++ b/cts/scheduler/summary/interleave-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -42,172 +44,10 @@ Transition Summary: * Start child_CloneSet:7 ( c001n09 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n09 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 - * Resource action: child_CloneSet:0 monitor on c001n09 - * Resource action: child_CloneSet:0 monitor on c001n08 - * Resource action: child_CloneSet:0 monitor on c001n07 - * Resource action: child_CloneSet:0 monitor on c001n06 - * Resource action: child_CloneSet:0 monitor on c001n05 - * Resource action: child_CloneSet:0 monitor on c001n04 - * Resource action: child_CloneSet:0 monitor on c001n03 - * Resource action: child_CloneSet:0 monitor on c001n02 - * Resource action: child_CloneSet:1 monitor on c001n09 - * Resource action: child_CloneSet:1 monitor on c001n08 - * Resource action: child_CloneSet:1 monitor on c001n07 - * Resource action: child_CloneSet:1 monitor on c001n06 - * Resource action: child_CloneSet:1 monitor on c001n05 - * Resource action: child_CloneSet:1 monitor on c001n04 - * Resource action: child_CloneSet:1 monitor on c001n03 - * Resource action: child_CloneSet:1 monitor on c001n02 - * Resource action: child_CloneSet:2 monitor on c001n09 - * Resource action: child_CloneSet:2 monitor on c001n08 - * Resource action: child_CloneSet:2 monitor on c001n07 - * Resource action: child_CloneSet:2 monitor on c001n06 - * Resource action: child_CloneSet:2 monitor on c001n05 - * Resource action: child_CloneSet:2 monitor on c001n04 - * Resource action: child_CloneSet:2 monitor on c001n03 - * Resource action: child_CloneSet:2 monitor on c001n02 - * Resource action: child_CloneSet:3 monitor on c001n09 - * Resource action: child_CloneSet:3 monitor on c001n08 - * Resource action: child_CloneSet:3 monitor on c001n07 - * Resource action: child_CloneSet:3 monitor on c001n06 - * Resource action: child_CloneSet:3 monitor on c001n05 - * Resource action: child_CloneSet:3 monitor on c001n04 - * Resource action: child_CloneSet:3 monitor on c001n03 - * Resource action: child_CloneSet:3 monitor on c001n02 - * Resource action: child_CloneSet:4 monitor on c001n09 - * Resource action: child_CloneSet:4 monitor on c001n08 - * Resource action: child_CloneSet:4 monitor on c001n07 - * Resource action: child_CloneSet:4 monitor on c001n06 - * Resource action: child_CloneSet:4 monitor on c001n05 - * Resource action: child_CloneSet:4 monitor on c001n04 - * Resource action: child_CloneSet:4 monitor on c001n03 - * Resource action: child_CloneSet:4 monitor on c001n02 - * Resource action: child_CloneSet:5 monitor on c001n09 - * Resource action: child_CloneSet:5 monitor on c001n08 - * Resource action: child_CloneSet:5 monitor on c001n07 - * Resource action: child_CloneSet:5 monitor on c001n06 - * Resource action: child_CloneSet:5 monitor on c001n05 - * Resource action: child_CloneSet:5 monitor on c001n04 - * Resource action: child_CloneSet:5 monitor on c001n03 - * Resource action: child_CloneSet:5 monitor on c001n02 - * Resource action: child_CloneSet:6 monitor on c001n09 - * Resource action: child_CloneSet:6 monitor on c001n08 - * Resource action: child_CloneSet:6 monitor on c001n07 - * Resource action: child_CloneSet:6 monitor on c001n06 - * Resource action: child_CloneSet:6 monitor on c001n05 - * Resource action: child_CloneSet:6 monitor on c001n04 - * Resource action: child_CloneSet:6 monitor on c001n03 - * Resource action: child_CloneSet:6 monitor on c001n02 - * Resource action: child_CloneSet:7 monitor on c001n09 - * Resource action: child_CloneSet:7 monitor on c001n08 - * Resource action: child_CloneSet:7 monitor on c001n07 - * Resource action: child_CloneSet:7 monitor on c001n06 - * Resource action: child_CloneSet:7 monitor on c001n05 - * Resource action: child_CloneSet:7 monitor on c001n04 - * Resource action: child_CloneSet:7 monitor on c001n03 - * Resource action: child_CloneSet:7 monitor on c001n02 - * Pseudo action: CloneSet_start_0 - * Resource action: child_CloneSet:0 start on c001n02 - * Resource action: child_CloneSet:1 start on c001n03 - * Resource action: child_CloneSet:2 start on c001n04 - * Resource action: child_CloneSet:3 start on c001n05 - * Resource action: child_CloneSet:4 start on c001n06 - * Resource action: child_CloneSet:5 start on c001n07 - * Resource action: child_CloneSet:6 start on c001n08 - * Resource action: child_CloneSet:7 start on c001n09 - * Pseudo action: CloneSet_running_0 - * Resource action: child_CloneSet:0 monitor=5000 on c001n02 - * Resource action: child_CloneSet:1 monitor=5000 on c001n03 - * Resource action: child_CloneSet:2 monitor=5000 on c001n04 - * Resource action: child_CloneSet:3 monitor=5000 on c001n05 - * Resource action: child_CloneSet:4 monitor=5000 on c001n06 - * Resource action: child_CloneSet:5 monitor=5000 on c001n07 - * Resource action: child_CloneSet:6 monitor=5000 on c001n08 - * Resource action: child_CloneSet:7 monitor=5000 on c001n09 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -231,11 +71,11 @@ Revised Cluster Status: * child_DoFencing:6 (stonith:ssh): Started c001n08 * child_DoFencing:7 (stonith:ssh): Started c001n09 * Clone Set: CloneSet [child_CloneSet] (unique): - * child_CloneSet:0 (stonith:ssh): Started c001n02 - * child_CloneSet:1 (stonith:ssh): Started c001n03 - * child_CloneSet:2 (stonith:ssh): Started c001n04 - * child_CloneSet:3 (stonith:ssh): Started c001n05 - * child_CloneSet:4 (stonith:ssh): Started c001n06 - * child_CloneSet:5 (stonith:ssh): Started c001n07 - * child_CloneSet:6 (stonith:ssh): Started c001n08 - * child_CloneSet:7 (stonith:ssh): Started c001n09 + * child_CloneSet:0 (stonith:ssh): Stopped + * child_CloneSet:1 (stonith:ssh): Stopped + * child_CloneSet:2 (stonith:ssh): Stopped + * child_CloneSet:3 (stonith:ssh): Stopped + * child_CloneSet:4 (stonith:ssh): Stopped + * child_CloneSet:5 (stonith:ssh): Stopped + * child_CloneSet:6 (stonith:ssh): Stopped + * child_CloneSet:7 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/interleave-1.summary b/cts/scheduler/summary/interleave-1.summary index fe16667be3a..cefdfad0dcd 100644 --- a/cts/scheduler/summary/interleave-1.summary +++ b/cts/scheduler/summary/interleave-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -42,172 +44,10 @@ Transition Summary: * Start child_CloneSet:7 ( c001n09 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n09 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 - * Resource action: child_CloneSet:0 monitor on c001n09 - * Resource action: child_CloneSet:0 monitor on c001n08 - * Resource action: child_CloneSet:0 monitor on c001n07 - * Resource action: child_CloneSet:0 monitor on c001n06 - * Resource action: child_CloneSet:0 monitor on c001n05 - * Resource action: child_CloneSet:0 monitor on c001n04 - * Resource action: child_CloneSet:0 monitor on c001n03 - * Resource action: child_CloneSet:0 monitor on c001n02 - * Resource action: child_CloneSet:1 monitor on c001n09 - * Resource action: child_CloneSet:1 monitor on c001n08 - * Resource action: child_CloneSet:1 monitor on c001n07 - * Resource action: child_CloneSet:1 monitor on c001n06 - * Resource action: child_CloneSet:1 monitor on c001n05 - * Resource action: child_CloneSet:1 monitor on c001n04 - * Resource action: child_CloneSet:1 monitor on c001n03 - * Resource action: child_CloneSet:1 monitor on c001n02 - * Resource action: child_CloneSet:2 monitor on c001n09 - * Resource action: child_CloneSet:2 monitor on c001n08 - * Resource action: child_CloneSet:2 monitor on c001n07 - * Resource action: child_CloneSet:2 monitor on c001n06 - * Resource action: child_CloneSet:2 monitor on c001n05 - * Resource action: child_CloneSet:2 monitor on c001n04 - * Resource action: child_CloneSet:2 monitor on c001n03 - * Resource action: child_CloneSet:2 monitor on c001n02 - * Resource action: child_CloneSet:3 monitor on c001n09 - * Resource action: child_CloneSet:3 monitor on c001n08 - * Resource action: child_CloneSet:3 monitor on c001n07 - * Resource action: child_CloneSet:3 monitor on c001n06 - * Resource action: child_CloneSet:3 monitor on c001n05 - * Resource action: child_CloneSet:3 monitor on c001n04 - * Resource action: child_CloneSet:3 monitor on c001n03 - * Resource action: child_CloneSet:3 monitor on c001n02 - * Resource action: child_CloneSet:4 monitor on c001n09 - * Resource action: child_CloneSet:4 monitor on c001n08 - * Resource action: child_CloneSet:4 monitor on c001n07 - * Resource action: child_CloneSet:4 monitor on c001n06 - * Resource action: child_CloneSet:4 monitor on c001n05 - * Resource action: child_CloneSet:4 monitor on c001n04 - * Resource action: child_CloneSet:4 monitor on c001n03 - * Resource action: child_CloneSet:4 monitor on c001n02 - * Resource action: child_CloneSet:5 monitor on c001n09 - * Resource action: child_CloneSet:5 monitor on c001n08 - * Resource action: child_CloneSet:5 monitor on c001n07 - * Resource action: child_CloneSet:5 monitor on c001n06 - * Resource action: child_CloneSet:5 monitor on c001n05 - * Resource action: child_CloneSet:5 monitor on c001n04 - * Resource action: child_CloneSet:5 monitor on c001n03 - * Resource action: child_CloneSet:5 monitor on c001n02 - * Resource action: child_CloneSet:6 monitor on c001n09 - * Resource action: child_CloneSet:6 monitor on c001n08 - * Resource action: child_CloneSet:6 monitor on c001n07 - * Resource action: child_CloneSet:6 monitor on c001n06 - * Resource action: child_CloneSet:6 monitor on c001n05 - * Resource action: child_CloneSet:6 monitor on c001n04 - * Resource action: child_CloneSet:6 monitor on c001n03 - * Resource action: child_CloneSet:6 monitor on c001n02 - * Resource action: child_CloneSet:7 monitor on c001n09 - * Resource action: child_CloneSet:7 monitor on c001n08 - * Resource action: child_CloneSet:7 monitor on c001n07 - * Resource action: child_CloneSet:7 monitor on c001n06 - * Resource action: child_CloneSet:7 monitor on c001n05 - * Resource action: child_CloneSet:7 monitor on c001n04 - * Resource action: child_CloneSet:7 monitor on c001n03 - * Resource action: child_CloneSet:7 monitor on c001n02 - * Pseudo action: CloneSet_start_0 - * Resource action: child_CloneSet:0 start on c001n02 - * Resource action: child_CloneSet:1 start on c001n03 - * Resource action: child_CloneSet:2 start on c001n04 - * Resource action: child_CloneSet:3 start on c001n05 - * Resource action: child_CloneSet:4 start on c001n06 - * Resource action: child_CloneSet:5 start on c001n07 - * Resource action: child_CloneSet:6 start on c001n08 - * Resource action: child_CloneSet:7 start on c001n09 - * Pseudo action: CloneSet_running_0 - * Resource action: child_CloneSet:0 monitor=5000 on c001n02 - * Resource action: child_CloneSet:1 monitor=5000 on c001n03 - * Resource action: child_CloneSet:2 monitor=5000 on c001n04 - * Resource action: child_CloneSet:3 monitor=5000 on c001n05 - * Resource action: child_CloneSet:4 monitor=5000 on c001n06 - * Resource action: child_CloneSet:5 monitor=5000 on c001n07 - * Resource action: child_CloneSet:6 monitor=5000 on c001n08 - * Resource action: child_CloneSet:7 monitor=5000 on c001n09 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -231,11 +71,11 @@ Revised Cluster Status: * child_DoFencing:6 (stonith:ssh): Started c001n08 * child_DoFencing:7 (stonith:ssh): Started c001n09 * Clone Set: CloneSet [child_CloneSet] (unique): - * child_CloneSet:0 (stonith:ssh): Started c001n02 - * child_CloneSet:1 (stonith:ssh): Started c001n03 - * child_CloneSet:2 (stonith:ssh): Started c001n04 - * child_CloneSet:3 (stonith:ssh): Started c001n05 - * child_CloneSet:4 (stonith:ssh): Started c001n06 - * child_CloneSet:5 (stonith:ssh): Started c001n07 - * child_CloneSet:6 (stonith:ssh): Started c001n08 - * child_CloneSet:7 (stonith:ssh): Started c001n09 + * child_CloneSet:0 (stonith:ssh): Stopped + * child_CloneSet:1 (stonith:ssh): Stopped + * child_CloneSet:2 (stonith:ssh): Stopped + * child_CloneSet:3 (stonith:ssh): Stopped + * child_CloneSet:4 (stonith:ssh): Stopped + * child_CloneSet:5 (stonith:ssh): Stopped + * child_CloneSet:6 (stonith:ssh): Stopped + * child_CloneSet:7 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/interleave-2.summary b/cts/scheduler/summary/interleave-2.summary index fe16667be3a..cefdfad0dcd 100644 --- a/cts/scheduler/summary/interleave-2.summary +++ b/cts/scheduler/summary/interleave-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -42,172 +44,10 @@ Transition Summary: * Start child_CloneSet:7 ( c001n09 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n09 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 - * Resource action: child_CloneSet:0 monitor on c001n09 - * Resource action: child_CloneSet:0 monitor on c001n08 - * Resource action: child_CloneSet:0 monitor on c001n07 - * Resource action: child_CloneSet:0 monitor on c001n06 - * Resource action: child_CloneSet:0 monitor on c001n05 - * Resource action: child_CloneSet:0 monitor on c001n04 - * Resource action: child_CloneSet:0 monitor on c001n03 - * Resource action: child_CloneSet:0 monitor on c001n02 - * Resource action: child_CloneSet:1 monitor on c001n09 - * Resource action: child_CloneSet:1 monitor on c001n08 - * Resource action: child_CloneSet:1 monitor on c001n07 - * Resource action: child_CloneSet:1 monitor on c001n06 - * Resource action: child_CloneSet:1 monitor on c001n05 - * Resource action: child_CloneSet:1 monitor on c001n04 - * Resource action: child_CloneSet:1 monitor on c001n03 - * Resource action: child_CloneSet:1 monitor on c001n02 - * Resource action: child_CloneSet:2 monitor on c001n09 - * Resource action: child_CloneSet:2 monitor on c001n08 - * Resource action: child_CloneSet:2 monitor on c001n07 - * Resource action: child_CloneSet:2 monitor on c001n06 - * Resource action: child_CloneSet:2 monitor on c001n05 - * Resource action: child_CloneSet:2 monitor on c001n04 - * Resource action: child_CloneSet:2 monitor on c001n03 - * Resource action: child_CloneSet:2 monitor on c001n02 - * Resource action: child_CloneSet:3 monitor on c001n09 - * Resource action: child_CloneSet:3 monitor on c001n08 - * Resource action: child_CloneSet:3 monitor on c001n07 - * Resource action: child_CloneSet:3 monitor on c001n06 - * Resource action: child_CloneSet:3 monitor on c001n05 - * Resource action: child_CloneSet:3 monitor on c001n04 - * Resource action: child_CloneSet:3 monitor on c001n03 - * Resource action: child_CloneSet:3 monitor on c001n02 - * Resource action: child_CloneSet:4 monitor on c001n09 - * Resource action: child_CloneSet:4 monitor on c001n08 - * Resource action: child_CloneSet:4 monitor on c001n07 - * Resource action: child_CloneSet:4 monitor on c001n06 - * Resource action: child_CloneSet:4 monitor on c001n05 - * Resource action: child_CloneSet:4 monitor on c001n04 - * Resource action: child_CloneSet:4 monitor on c001n03 - * Resource action: child_CloneSet:4 monitor on c001n02 - * Resource action: child_CloneSet:5 monitor on c001n09 - * Resource action: child_CloneSet:5 monitor on c001n08 - * Resource action: child_CloneSet:5 monitor on c001n07 - * Resource action: child_CloneSet:5 monitor on c001n06 - * Resource action: child_CloneSet:5 monitor on c001n05 - * Resource action: child_CloneSet:5 monitor on c001n04 - * Resource action: child_CloneSet:5 monitor on c001n03 - * Resource action: child_CloneSet:5 monitor on c001n02 - * Resource action: child_CloneSet:6 monitor on c001n09 - * Resource action: child_CloneSet:6 monitor on c001n08 - * Resource action: child_CloneSet:6 monitor on c001n07 - * Resource action: child_CloneSet:6 monitor on c001n06 - * Resource action: child_CloneSet:6 monitor on c001n05 - * Resource action: child_CloneSet:6 monitor on c001n04 - * Resource action: child_CloneSet:6 monitor on c001n03 - * Resource action: child_CloneSet:6 monitor on c001n02 - * Resource action: child_CloneSet:7 monitor on c001n09 - * Resource action: child_CloneSet:7 monitor on c001n08 - * Resource action: child_CloneSet:7 monitor on c001n07 - * Resource action: child_CloneSet:7 monitor on c001n06 - * Resource action: child_CloneSet:7 monitor on c001n05 - * Resource action: child_CloneSet:7 monitor on c001n04 - * Resource action: child_CloneSet:7 monitor on c001n03 - * Resource action: child_CloneSet:7 monitor on c001n02 - * Pseudo action: CloneSet_start_0 - * Resource action: child_CloneSet:0 start on c001n02 - * Resource action: child_CloneSet:1 start on c001n03 - * Resource action: child_CloneSet:2 start on c001n04 - * Resource action: child_CloneSet:3 start on c001n05 - * Resource action: child_CloneSet:4 start on c001n06 - * Resource action: child_CloneSet:5 start on c001n07 - * Resource action: child_CloneSet:6 start on c001n08 - * Resource action: child_CloneSet:7 start on c001n09 - * Pseudo action: CloneSet_running_0 - * Resource action: child_CloneSet:0 monitor=5000 on c001n02 - * Resource action: child_CloneSet:1 monitor=5000 on c001n03 - * Resource action: child_CloneSet:2 monitor=5000 on c001n04 - * Resource action: child_CloneSet:3 monitor=5000 on c001n05 - * Resource action: child_CloneSet:4 monitor=5000 on c001n06 - * Resource action: child_CloneSet:5 monitor=5000 on c001n07 - * Resource action: child_CloneSet:6 monitor=5000 on c001n08 - * Resource action: child_CloneSet:7 monitor=5000 on c001n09 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -231,11 +71,11 @@ Revised Cluster Status: * child_DoFencing:6 (stonith:ssh): Started c001n08 * child_DoFencing:7 (stonith:ssh): Started c001n09 * Clone Set: CloneSet [child_CloneSet] (unique): - * child_CloneSet:0 (stonith:ssh): Started c001n02 - * child_CloneSet:1 (stonith:ssh): Started c001n03 - * child_CloneSet:2 (stonith:ssh): Started c001n04 - * child_CloneSet:3 (stonith:ssh): Started c001n05 - * child_CloneSet:4 (stonith:ssh): Started c001n06 - * child_CloneSet:5 (stonith:ssh): Started c001n07 - * child_CloneSet:6 (stonith:ssh): Started c001n08 - * child_CloneSet:7 (stonith:ssh): Started c001n09 + * child_CloneSet:0 (stonith:ssh): Stopped + * child_CloneSet:1 (stonith:ssh): Stopped + * child_CloneSet:2 (stonith:ssh): Stopped + * child_CloneSet:3 (stonith:ssh): Stopped + * child_CloneSet:4 (stonith:ssh): Stopped + * child_CloneSet:5 (stonith:ssh): Stopped + * child_CloneSet:6 (stonith:ssh): Stopped + * child_CloneSet:7 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/interleave-3.summary b/cts/scheduler/summary/interleave-3.summary index fe16667be3a..cefdfad0dcd 100644 --- a/cts/scheduler/summary/interleave-3.summary +++ b/cts/scheduler/summary/interleave-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -42,172 +44,10 @@ Transition Summary: * Start child_CloneSet:7 ( c001n09 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n09 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 - * Resource action: child_CloneSet:0 monitor on c001n09 - * Resource action: child_CloneSet:0 monitor on c001n08 - * Resource action: child_CloneSet:0 monitor on c001n07 - * Resource action: child_CloneSet:0 monitor on c001n06 - * Resource action: child_CloneSet:0 monitor on c001n05 - * Resource action: child_CloneSet:0 monitor on c001n04 - * Resource action: child_CloneSet:0 monitor on c001n03 - * Resource action: child_CloneSet:0 monitor on c001n02 - * Resource action: child_CloneSet:1 monitor on c001n09 - * Resource action: child_CloneSet:1 monitor on c001n08 - * Resource action: child_CloneSet:1 monitor on c001n07 - * Resource action: child_CloneSet:1 monitor on c001n06 - * Resource action: child_CloneSet:1 monitor on c001n05 - * Resource action: child_CloneSet:1 monitor on c001n04 - * Resource action: child_CloneSet:1 monitor on c001n03 - * Resource action: child_CloneSet:1 monitor on c001n02 - * Resource action: child_CloneSet:2 monitor on c001n09 - * Resource action: child_CloneSet:2 monitor on c001n08 - * Resource action: child_CloneSet:2 monitor on c001n07 - * Resource action: child_CloneSet:2 monitor on c001n06 - * Resource action: child_CloneSet:2 monitor on c001n05 - * Resource action: child_CloneSet:2 monitor on c001n04 - * Resource action: child_CloneSet:2 monitor on c001n03 - * Resource action: child_CloneSet:2 monitor on c001n02 - * Resource action: child_CloneSet:3 monitor on c001n09 - * Resource action: child_CloneSet:3 monitor on c001n08 - * Resource action: child_CloneSet:3 monitor on c001n07 - * Resource action: child_CloneSet:3 monitor on c001n06 - * Resource action: child_CloneSet:3 monitor on c001n05 - * Resource action: child_CloneSet:3 monitor on c001n04 - * Resource action: child_CloneSet:3 monitor on c001n03 - * Resource action: child_CloneSet:3 monitor on c001n02 - * Resource action: child_CloneSet:4 monitor on c001n09 - * Resource action: child_CloneSet:4 monitor on c001n08 - * Resource action: child_CloneSet:4 monitor on c001n07 - * Resource action: child_CloneSet:4 monitor on c001n06 - * Resource action: child_CloneSet:4 monitor on c001n05 - * Resource action: child_CloneSet:4 monitor on c001n04 - * Resource action: child_CloneSet:4 monitor on c001n03 - * Resource action: child_CloneSet:4 monitor on c001n02 - * Resource action: child_CloneSet:5 monitor on c001n09 - * Resource action: child_CloneSet:5 monitor on c001n08 - * Resource action: child_CloneSet:5 monitor on c001n07 - * Resource action: child_CloneSet:5 monitor on c001n06 - * Resource action: child_CloneSet:5 monitor on c001n05 - * Resource action: child_CloneSet:5 monitor on c001n04 - * Resource action: child_CloneSet:5 monitor on c001n03 - * Resource action: child_CloneSet:5 monitor on c001n02 - * Resource action: child_CloneSet:6 monitor on c001n09 - * Resource action: child_CloneSet:6 monitor on c001n08 - * Resource action: child_CloneSet:6 monitor on c001n07 - * Resource action: child_CloneSet:6 monitor on c001n06 - * Resource action: child_CloneSet:6 monitor on c001n05 - * Resource action: child_CloneSet:6 monitor on c001n04 - * Resource action: child_CloneSet:6 monitor on c001n03 - * Resource action: child_CloneSet:6 monitor on c001n02 - * Resource action: child_CloneSet:7 monitor on c001n09 - * Resource action: child_CloneSet:7 monitor on c001n08 - * Resource action: child_CloneSet:7 monitor on c001n07 - * Resource action: child_CloneSet:7 monitor on c001n06 - * Resource action: child_CloneSet:7 monitor on c001n05 - * Resource action: child_CloneSet:7 monitor on c001n04 - * Resource action: child_CloneSet:7 monitor on c001n03 - * Resource action: child_CloneSet:7 monitor on c001n02 - * Pseudo action: CloneSet_start_0 - * Resource action: child_CloneSet:0 start on c001n02 - * Resource action: child_CloneSet:1 start on c001n03 - * Resource action: child_CloneSet:2 start on c001n04 - * Resource action: child_CloneSet:3 start on c001n05 - * Resource action: child_CloneSet:4 start on c001n06 - * Resource action: child_CloneSet:5 start on c001n07 - * Resource action: child_CloneSet:6 start on c001n08 - * Resource action: child_CloneSet:7 start on c001n09 - * Pseudo action: CloneSet_running_0 - * Resource action: child_CloneSet:0 monitor=5000 on c001n02 - * Resource action: child_CloneSet:1 monitor=5000 on c001n03 - * Resource action: child_CloneSet:2 monitor=5000 on c001n04 - * Resource action: child_CloneSet:3 monitor=5000 on c001n05 - * Resource action: child_CloneSet:4 monitor=5000 on c001n06 - * Resource action: child_CloneSet:5 monitor=5000 on c001n07 - * Resource action: child_CloneSet:6 monitor=5000 on c001n08 - * Resource action: child_CloneSet:7 monitor=5000 on c001n09 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -231,11 +71,11 @@ Revised Cluster Status: * child_DoFencing:6 (stonith:ssh): Started c001n08 * child_DoFencing:7 (stonith:ssh): Started c001n09 * Clone Set: CloneSet [child_CloneSet] (unique): - * child_CloneSet:0 (stonith:ssh): Started c001n02 - * child_CloneSet:1 (stonith:ssh): Started c001n03 - * child_CloneSet:2 (stonith:ssh): Started c001n04 - * child_CloneSet:3 (stonith:ssh): Started c001n05 - * child_CloneSet:4 (stonith:ssh): Started c001n06 - * child_CloneSet:5 (stonith:ssh): Started c001n07 - * child_CloneSet:6 (stonith:ssh): Started c001n08 - * child_CloneSet:7 (stonith:ssh): Started c001n09 + * child_CloneSet:0 (stonith:ssh): Stopped + * child_CloneSet:1 (stonith:ssh): Stopped + * child_CloneSet:2 (stonith:ssh): Stopped + * child_CloneSet:3 (stonith:ssh): Stopped + * child_CloneSet:4 (stonith:ssh): Stopped + * child_CloneSet:5 (stonith:ssh): Stopped + * child_CloneSet:6 (stonith:ssh): Stopped + * child_CloneSet:7 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/interleave-pseudo-stop.summary b/cts/scheduler/summary/interleave-pseudo-stop.summary index 619e40dc388..edefff89724 100644 --- a/cts/scheduler/summary/interleave-pseudo-stop.summary +++ b/cts/scheduler/summary/interleave-pseudo-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -23,61 +25,28 @@ Transition Summary: * Stop evmsclone:1 ( node1 ) due to node availability * Stop imagestoreclone:1 ( node1 ) due to node availability * Stop configstoreclone:1 ( node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: stonithcloneset_stop_0 - * Pseudo action: evmscloneset_pre_notify_stop_0 - * Pseudo action: imagestorecloneset_pre_notify_stop_0 - * Pseudo action: configstorecloneset_pre_notify_stop_0 - * Fencing node1 (reboot) - * Pseudo action: stonithclone:0_stop_0 - * Pseudo action: stonithcloneset_stopped_0 - * Resource action: evmsclone:1 notify on node2 - * Pseudo action: evmsclone:0_post_notify_stop_0 - * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 - * Resource action: imagestoreclone:1 notify on node2 - * Pseudo action: imagestoreclone:0_post_notify_stop_0 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: imagestorecloneset_stop_0 - * Resource action: configstoreclone:1 notify on node2 - * Pseudo action: configstoreclone:0_post_notify_stop_0 - * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: configstorecloneset_stop_0 - * Pseudo action: imagestoreclone:0_stop_0 - * Pseudo action: imagestorecloneset_stopped_0 - * Pseudo action: configstoreclone:0_stop_0 - * Pseudo action: configstorecloneset_stopped_0 - * Pseudo action: imagestorecloneset_post_notify_stopped_0 - * Pseudo action: configstorecloneset_post_notify_stopped_0 - * Resource action: imagestoreclone:1 notify on node2 - * Pseudo action: imagestoreclone:0_notified_0 - * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 - * Resource action: configstoreclone:1 notify on node2 - * Pseudo action: configstoreclone:0_notified_0 - * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 - * Pseudo action: evmscloneset_stop_0 - * Pseudo action: evmsclone:0_stop_0 - * Pseudo action: evmscloneset_stopped_0 - * Pseudo action: evmscloneset_post_notify_stopped_0 - * Resource action: evmsclone:1 notify on node2 - * Pseudo action: evmsclone:0_notified_0 - * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node1: UNCLEAN (offline) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: * Clone Set: stonithcloneset [stonithclone]: + * stonithclone (stonith:external/ssh): Started node1 (UNCLEAN) * Started: [ node2 ] - * Stopped: [ node1 ] * Clone Set: evmscloneset [evmsclone]: + * evmsclone (ocf:heartbeat:EvmsSCC): Started node1 (UNCLEAN) * Started: [ node2 ] - * Stopped: [ node1 ] * Clone Set: imagestorecloneset [imagestoreclone] (disabled): + * imagestoreclone (ocf:heartbeat:Filesystem): Started node1 (UNCLEAN) * Started: [ node2 ] - * Stopped (disabled): [ node1 ] * Clone Set: configstorecloneset [configstoreclone]: + * configstoreclone (ocf:heartbeat:Filesystem): Started node1 (UNCLEAN) * Started: [ node2 ] - * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/interleave-restart.summary b/cts/scheduler/summary/interleave-restart.summary index 8862aacbb08..c3c68d277ba 100644 --- a/cts/scheduler/summary/interleave-restart.summary +++ b/cts/scheduler/summary/interleave-restart.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,70 +21,10 @@ Transition Summary: * Restart configstoreclone:1 ( node1 ) due to required evmsclone:1 start Executing Cluster Transition: - * Pseudo action: evmscloneset_pre_notify_stop_0 - * Pseudo action: imagestorecloneset_pre_notify_stop_0 - * Pseudo action: configstorecloneset_pre_notify_stop_0 - * Resource action: evmsclone:1 notify on node2 - * Resource action: evmsclone:0 notify on node1 - * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 - * Resource action: imagestoreclone:1 notify on node2 - * Resource action: imagestoreclone:0 notify on node1 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: imagestorecloneset_stop_0 - * Resource action: configstoreclone:1 notify on node2 - * Resource action: configstoreclone:0 notify on node1 - * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: configstorecloneset_stop_0 - * Resource action: imagestoreclone:0 stop on node1 - * Pseudo action: imagestorecloneset_stopped_0 - * Resource action: configstoreclone:0 stop on node1 - * Pseudo action: configstorecloneset_stopped_0 - * Pseudo action: imagestorecloneset_post_notify_stopped_0 - * Pseudo action: configstorecloneset_post_notify_stopped_0 - * Resource action: imagestoreclone:1 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 - * Pseudo action: imagestorecloneset_pre_notify_start_0 - * Resource action: configstoreclone:1 notify on node2 - * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 - * Pseudo action: configstorecloneset_pre_notify_start_0 - * Pseudo action: evmscloneset_stop_0 - * Resource action: imagestoreclone:1 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 - * Resource action: configstoreclone:1 notify on node2 - * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 - * Resource action: evmsclone:0 stop on node1 - * Pseudo action: evmscloneset_stopped_0 - * Pseudo action: evmscloneset_post_notify_stopped_0 - * Resource action: evmsclone:1 notify on node2 - * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 - * Pseudo action: evmscloneset_pre_notify_start_0 - * Resource action: evmsclone:1 notify on node2 - * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 - * Pseudo action: evmscloneset_start_0 - * Resource action: evmsclone:0 start on node1 - * Pseudo action: evmscloneset_running_0 - * Pseudo action: evmscloneset_post_notify_running_0 - * Resource action: evmsclone:1 notify on node2 - * Resource action: evmsclone:0 notify on node1 - * Pseudo action: evmscloneset_confirmed-post_notify_running_0 - * Pseudo action: imagestorecloneset_start_0 - * Pseudo action: configstorecloneset_start_0 - * Resource action: imagestoreclone:0 start on node1 - * Pseudo action: imagestorecloneset_running_0 - * Resource action: configstoreclone:0 start on node1 - * Pseudo action: configstorecloneset_running_0 - * Pseudo action: imagestorecloneset_post_notify_running_0 - * Pseudo action: configstorecloneset_post_notify_running_0 - * Resource action: imagestoreclone:1 notify on node2 - * Resource action: imagestoreclone:0 notify on node1 - * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 - * Resource action: configstoreclone:1 notify on node2 - * Resource action: configstoreclone:0 notify on node1 - * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 - * Resource action: imagestoreclone:0 monitor=20000 on node1 - * Resource action: configstoreclone:0 monitor=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -90,7 +32,8 @@ Revised Cluster Status: * Clone Set: stonithcloneset [stonithclone]: * Started: [ node1 node2 ] * Clone Set: evmscloneset [evmsclone]: - * Started: [ node1 node2 ] + * evmsclone (ocf:heartbeat:EvmsSCC): FAILED node1 + * Started: [ node2 ] * Clone Set: imagestorecloneset [imagestoreclone] (disabled): * Started: [ node1 node2 ] * Clone Set: configstorecloneset [configstoreclone]: diff --git a/cts/scheduler/summary/interleave-stop.summary b/cts/scheduler/summary/interleave-stop.summary index 560c540d6a3..94cf6bfe389 100644 --- a/cts/scheduler/summary/interleave-stop.summary +++ b/cts/scheduler/summary/interleave-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -18,57 +20,24 @@ Transition Summary: * Stop evmsclone:1 ( node1 ) due to node availability * Stop imagestoreclone:1 ( node1 ) due to node availability * Stop configstoreclone:1 ( node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: stonithcloneset_stop_0 - * Pseudo action: evmscloneset_pre_notify_stop_0 - * Pseudo action: imagestorecloneset_pre_notify_stop_0 - * Pseudo action: configstorecloneset_pre_notify_stop_0 - * Resource action: stonithclone:0 stop on node1 - * Pseudo action: stonithcloneset_stopped_0 - * Resource action: evmsclone:1 notify on node2 - * Resource action: evmsclone:0 notify on node1 - * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 - * Resource action: imagestoreclone:1 notify on node2 - * Resource action: imagestoreclone:0 notify on node1 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: imagestorecloneset_stop_0 - * Resource action: configstoreclone:1 notify on node2 - * Resource action: configstoreclone:0 notify on node1 - * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: configstorecloneset_stop_0 - * Resource action: imagestoreclone:0 stop on node1 - * Pseudo action: imagestorecloneset_stopped_0 - * Resource action: configstoreclone:0 stop on node1 - * Pseudo action: configstorecloneset_stopped_0 - * Pseudo action: imagestorecloneset_post_notify_stopped_0 - * Pseudo action: configstorecloneset_post_notify_stopped_0 - * Resource action: imagestoreclone:1 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 - * Resource action: configstoreclone:1 notify on node2 - * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 - * Pseudo action: evmscloneset_stop_0 - * Resource action: evmsclone:0 stop on node1 - * Pseudo action: evmscloneset_stopped_0 - * Pseudo action: evmscloneset_post_notify_stopped_0 - * Resource action: evmsclone:1 notify on node2 - * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: * Clone Set: stonithcloneset [stonithclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] * Clone Set: evmscloneset [evmsclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] * Clone Set: imagestorecloneset [imagestoreclone] (disabled): - * Started: [ node2 ] - * Stopped (disabled): [ node1 ] + * Started: [ node1 node2 ] * Clone Set: configstorecloneset [configstoreclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/intervals.summary b/cts/scheduler/summary/intervals.summary index f6dc2e4b7fb..68e97fd692d 100644 --- a/cts/scheduler/summary/intervals.summary +++ b/cts/scheduler/summary/intervals.summary @@ -2,6 +2,9 @@ Using the original execution date of: 2018-03-21 23:12:42Z 0 of 7 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -20,33 +23,20 @@ Transition Summary: * Move rsc6 ( rhel7-2 -> rhel7-1 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on rhel7-5 - * Resource action: rsc2 monitor on rhel7-4 - * Resource action: rsc2 monitor on rhel7-3 - * Resource action: rsc2 monitor on rhel7-2 - * Resource action: rsc2 monitor on rhel7-1 - * Resource action: rsc5 stop on rhel7-1 - * Resource action: rsc5 cancel=25000 on rhel7-2 - * Resource action: rsc6 stop on rhel7-2 - * Resource action: rsc2 start on rhel7-3 - * Resource action: rsc5 monitor=25000 on rhel7-1 - * Resource action: rsc5 start on rhel7-2 - * Resource action: rsc6 start on rhel7-1 - * Resource action: rsc2 monitor=90000 on rhel7-3 - * Resource action: rsc2 monitor=40000 on rhel7-3 - * Resource action: rsc5 monitor=20000 on rhel7-2 - * Resource action: rsc6 monitor=28000 on rhel7-1 Using the original execution date of: 2018-03-21 23:12:42Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * rsc1 (ocf:pacemaker:Dummy): Started rhel7-2 - * rsc2 (ocf:pacemaker:Dummy): Started rhel7-3 + * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Started rhel7-4 * rsc4 (ocf:pacemaker:Dummy): FAILED rhel7-5 (blocked) - * rsc5 (ocf:pacemaker:Dummy): Started rhel7-2 - * rsc6 (ocf:pacemaker:Dummy): Started rhel7-1 + * rsc5 (ocf:pacemaker:Dummy): Started rhel7-1 + * rsc6 (ocf:pacemaker:Dummy): Started rhel7-2 diff --git a/cts/scheduler/summary/leftover-pending-monitor.summary b/cts/scheduler/summary/leftover-pending-monitor.summary index 04b03f29d85..1ed4dbec86c 100644 --- a/cts/scheduler/summary/leftover-pending-monitor.summary +++ b/cts/scheduler/summary/leftover-pending-monitor.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2022-12-02 17:04:52Z Current cluster status: + * Cluster Summary: + * Node List: * Node node-2: pending * Online: [ node-1 node-3 ] @@ -14,11 +16,11 @@ Transition Summary: * Start stateful-1:1 ( node-1 ) due to unrunnable stateful-1:0 monitor (blocked) Executing Cluster Transition: - * Pseudo action: promotable-1_start_0 - * Pseudo action: promotable-1_running_0 Using the original execution date of: 2022-12-02 17:04:52Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node-2: pending * Online: [ node-1 node-3 ] diff --git a/cts/scheduler/summary/load-stopped-loop-2.summary b/cts/scheduler/summary/load-stopped-loop-2.summary index eb22c5aef6a..dd6d5d13d30 100644 --- a/cts/scheduler/summary/load-stopped-loop-2.summary +++ b/cts/scheduler/summary/load-stopped-loop-2.summary @@ -1,24 +1,21 @@ 4 of 25 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ xfc0 xfc1 xfc2 xfc3 ] * Full List of Resources: + * xu-test8 (ocf:heartbeat:Xen): Started xfc3 * Clone Set: cl_glusterd [p_glusterd]: * Started: [ xfc0 xfc1 xfc2 xfc3 ] * Clone Set: cl_p_bl_glusterfs [p_bl_glusterfs]: * Started: [ xfc0 xfc1 xfc2 xfc3 ] - * xu-test8 (ocf:heartbeat:Xen): Started xfc3 * xu-test1 (ocf:heartbeat:Xen): Started xfc3 * xu-test10 (ocf:heartbeat:Xen): Started xfc3 * xu-test11 (ocf:heartbeat:Xen): Started xfc3 * xu-test12 (ocf:heartbeat:Xen): Started xfc2 - * xu-test13 (ocf:heartbeat:Xen): Stopped - * xu-test14 (ocf:heartbeat:Xen): Stopped (disabled) - * xu-test15 (ocf:heartbeat:Xen): Stopped (disabled) - * xu-test16 (ocf:heartbeat:Xen): Stopped (disabled) - * xu-test17 (ocf:heartbeat:Xen): Stopped (disabled) * xu-test2 (ocf:heartbeat:Xen): Started xfc3 * xu-test3 (ocf:heartbeat:Xen): Started xfc1 * xu-test4 (ocf:heartbeat:Xen): Started xfc0 @@ -26,89 +23,50 @@ Current cluster status: * xu-test6 (ocf:heartbeat:Xen): Started xfc3 * xu-test7 (ocf:heartbeat:Xen): Started xfc1 * xu-test9 (ocf:heartbeat:Xen): Started xfc0 + * xu-test13 (ocf:heartbeat:Xen): Stopped + * xu-test14 (ocf:heartbeat:Xen): Stopped (disabled) + * xu-test15 (ocf:heartbeat:Xen): Stopped (disabled) + * xu-test16 (ocf:heartbeat:Xen): Stopped (disabled) + * xu-test17 (ocf:heartbeat:Xen): Stopped (disabled) Transition Summary: - * Migrate xu-test12 ( xfc2 -> xfc3 ) * Migrate xu-test2 ( xfc3 -> xfc1 ) + * Migrate xu-test6 ( xfc3 -> xfc1 ) + * Migrate xu-test12 ( xfc2 -> xfc3 ) * Migrate xu-test3 ( xfc1 -> xfc0 ) * Migrate xu-test4 ( xfc0 -> xfc2 ) * Migrate xu-test5 ( xfc2 -> xfc3 ) - * Migrate xu-test6 ( xfc3 -> xfc1 ) * Migrate xu-test7 ( xfc1 -> xfc0 ) * Migrate xu-test9 ( xfc0 -> xfc2 ) * Start xu-test13 ( xfc3 ) Executing Cluster Transition: - * Resource action: xu-test4 migrate_to on xfc0 - * Resource action: xu-test5 migrate_to on xfc2 - * Resource action: xu-test6 migrate_to on xfc3 - * Resource action: xu-test7 migrate_to on xfc1 - * Resource action: xu-test9 migrate_to on xfc0 - * Resource action: xu-test4 migrate_from on xfc2 - * Resource action: xu-test4 stop on xfc0 - * Resource action: xu-test5 migrate_from on xfc3 - * Resource action: xu-test5 stop on xfc2 - * Resource action: xu-test6 migrate_from on xfc1 - * Resource action: xu-test6 stop on xfc3 - * Resource action: xu-test7 migrate_from on xfc0 - * Resource action: xu-test7 stop on xfc1 - * Resource action: xu-test9 migrate_from on xfc2 - * Resource action: xu-test9 stop on xfc0 - * Pseudo action: load_stopped_xfc0 - * Resource action: xu-test3 migrate_to on xfc1 - * Pseudo action: xu-test7_start_0 - * Resource action: xu-test3 migrate_from on xfc0 - * Resource action: xu-test3 stop on xfc1 - * Resource action: xu-test7 monitor=10000 on xfc0 - * Pseudo action: load_stopped_xfc1 - * Resource action: xu-test2 migrate_to on xfc3 - * Pseudo action: xu-test3_start_0 - * Pseudo action: xu-test6_start_0 - * Resource action: xu-test2 migrate_from on xfc1 - * Resource action: xu-test2 stop on xfc3 - * Resource action: xu-test3 monitor=10000 on xfc0 - * Resource action: xu-test6 monitor=10000 on xfc1 - * Pseudo action: load_stopped_xfc3 - * Resource action: xu-test12 migrate_to on xfc2 - * Pseudo action: xu-test2_start_0 - * Pseudo action: xu-test5_start_0 - * Resource action: xu-test13 start on xfc3 - * Resource action: xu-test12 migrate_from on xfc3 - * Resource action: xu-test12 stop on xfc2 - * Resource action: xu-test2 monitor=10000 on xfc1 - * Resource action: xu-test5 monitor=10000 on xfc3 - * Resource action: xu-test13 monitor=10000 on xfc3 - * Pseudo action: load_stopped_xfc2 - * Pseudo action: xu-test12_start_0 - * Pseudo action: xu-test4_start_0 - * Pseudo action: xu-test9_start_0 - * Resource action: xu-test12 monitor=10000 on xfc3 - * Resource action: xu-test4 monitor=10000 on xfc2 - * Resource action: xu-test9 monitor=10000 on xfc2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ xfc0 xfc1 xfc2 xfc3 ] * Full List of Resources: + * xu-test8 (ocf:heartbeat:Xen): Started xfc3 * Clone Set: cl_glusterd [p_glusterd]: * Started: [ xfc0 xfc1 xfc2 xfc3 ] * Clone Set: cl_p_bl_glusterfs [p_bl_glusterfs]: * Started: [ xfc0 xfc1 xfc2 xfc3 ] - * xu-test8 (ocf:heartbeat:Xen): Started xfc3 * xu-test1 (ocf:heartbeat:Xen): Started xfc3 * xu-test10 (ocf:heartbeat:Xen): Started xfc3 * xu-test11 (ocf:heartbeat:Xen): Started xfc3 - * xu-test12 (ocf:heartbeat:Xen): Started xfc3 - * xu-test13 (ocf:heartbeat:Xen): Started xfc3 + * xu-test12 (ocf:heartbeat:Xen): Started xfc2 + * xu-test2 (ocf:heartbeat:Xen): Started xfc3 + * xu-test3 (ocf:heartbeat:Xen): Started xfc1 + * xu-test4 (ocf:heartbeat:Xen): Started xfc0 + * xu-test5 (ocf:heartbeat:Xen): Started xfc2 + * xu-test6 (ocf:heartbeat:Xen): Started xfc3 + * xu-test7 (ocf:heartbeat:Xen): Started xfc1 + * xu-test9 (ocf:heartbeat:Xen): Started xfc0 + * xu-test13 (ocf:heartbeat:Xen): Stopped * xu-test14 (ocf:heartbeat:Xen): Stopped (disabled) * xu-test15 (ocf:heartbeat:Xen): Stopped (disabled) * xu-test16 (ocf:heartbeat:Xen): Stopped (disabled) * xu-test17 (ocf:heartbeat:Xen): Stopped (disabled) - * xu-test2 (ocf:heartbeat:Xen): Started xfc1 - * xu-test3 (ocf:heartbeat:Xen): Started xfc0 - * xu-test4 (ocf:heartbeat:Xen): Started xfc2 - * xu-test5 (ocf:heartbeat:Xen): Started xfc3 - * xu-test6 (ocf:heartbeat:Xen): Started xfc1 - * xu-test7 (ocf:heartbeat:Xen): Started xfc0 - * xu-test9 (ocf:heartbeat:Xen): Started xfc2 diff --git a/cts/scheduler/summary/load-stopped-loop.summary b/cts/scheduler/summary/load-stopped-loop.summary index f3f2473a4f5..ab973a23329 100644 --- a/cts/scheduler/summary/load-stopped-loop.summary +++ b/cts/scheduler/summary/load-stopped-loop.summary @@ -1,31 +1,39 @@ 32 of 308 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ mgmt01 v03-a v03-b ] * Full List of Resources: - * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) - * stonith-mgmt01 (stonith:fence_xvm): Started v03-b * stonith-mgmt02 (stonith:meatware): Started mgmt01 - * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v03-a (stonith:fence_ipmilan): Started v03-b - * stonith-v03-b (stonith:fence_ipmilan): Started v03-a - * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) * Clone Set: cl-clvmd [clvmd]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-dlm [dlm]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-iscsid [iscsid]: * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-multipathd [multipathd]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * stonith-v03-b (stonith:fence_ipmilan): Started v03-a + * stonith-v03-a (stonith:fence_ipmilan): Started v03-b + * stonith-mgmt01 (stonith:fence_xvm): Started v03-b * Clone Set: cl-libvirtd [libvirtd]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-multipathd [multipathd]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-node-params [node-params]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -59,18 +67,6 @@ Current cluster status: * Clone Set: cl-vlan909-if [vlan909-if]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-libvirt-images-pool [libvirt-images-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -80,42 +76,18 @@ Current cluster status: * Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b - * vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped - * vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b - * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * Clone Set: cl-vlan200-if [vlan200-if]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] * lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * Clone Set: cl-mcast-test-net [mcast-test-net]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * ktstudio.net-vm (ocf:vds-ok:VirtualDomain): Started v03-a * cloudsrv.credo-dialogue.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * c6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a @@ -130,19 +102,49 @@ Current cluster status: * Stopped: [ mgmt01 ] * gw.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * license.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b - * terminal.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * lustre01-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * lustre02-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * test-01.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * Clone Set: cl-libvirt-qpid [libvirt-qpid]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * gw.gleb.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * gw.gotin.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * terminal0.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * Clone Set: cl-mcast-gleb-net [mcast-gleb-net]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] + * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) + * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped + * vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * terminal.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * gw.gleb.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * gw.gotin.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) Transition Summary: * Reload vds-ok-pool-0-iscsi:0 ( mgmt01 ) @@ -151,73 +153,48 @@ Transition Summary: * Reload vds-ok-pool-1-iscsi:0 ( mgmt01 ) * Reload vds-ok-pool-1-iscsi:1 ( v03-b ) * Reload vds-ok-pool-1-iscsi:2 ( v03-a ) - * Restart stonith-v03-b ( v03-a ) due to resource definition change * Restart stonith-v03-a ( v03-b ) due to resource definition change + * Restart stonith-v03-b ( v03-a ) due to resource definition change * Migrate license.anbriz.vds-ok.com-vm ( v03-b -> v03-a ) * Migrate terminal0.anbriz.vds-ok.com-vm ( v03-a -> v03-b ) * Start vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm ( v03-a ) Executing Cluster Transition: - * Resource action: vds-ok-pool-0-iscsi:1 reload-agent on mgmt01 - * Resource action: vds-ok-pool-0-iscsi:1 monitor=30000 on mgmt01 - * Resource action: vds-ok-pool-0-iscsi:0 reload-agent on v03-b - * Resource action: vds-ok-pool-0-iscsi:0 monitor=30000 on v03-b - * Resource action: vds-ok-pool-0-iscsi:2 reload-agent on v03-a - * Resource action: vds-ok-pool-0-iscsi:2 monitor=30000 on v03-a - * Resource action: vds-ok-pool-1-iscsi:1 reload-agent on mgmt01 - * Resource action: vds-ok-pool-1-iscsi:1 monitor=30000 on mgmt01 - * Resource action: vds-ok-pool-1-iscsi:0 reload-agent on v03-b - * Resource action: vds-ok-pool-1-iscsi:0 monitor=30000 on v03-b - * Resource action: vds-ok-pool-1-iscsi:2 reload-agent on v03-a - * Resource action: vds-ok-pool-1-iscsi:2 monitor=30000 on v03-a - * Resource action: stonith-v03-b stop on v03-a - * Resource action: stonith-v03-b start on v03-a - * Resource action: stonith-v03-b monitor=60000 on v03-a - * Resource action: stonith-v03-a stop on v03-b - * Resource action: stonith-v03-a start on v03-b - * Resource action: stonith-v03-a monitor=60000 on v03-b - * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a - * Pseudo action: load_stopped_mgmt01 - * Resource action: terminal0.anbriz.vds-ok.com-vm migrate_from on v03-b - * Resource action: terminal0.anbriz.vds-ok.com-vm stop on v03-a - * Pseudo action: load_stopped_v03-a - * Resource action: license.anbriz.vds-ok.com-vm migrate_to on v03-b - * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm start on v03-a - * Resource action: license.anbriz.vds-ok.com-vm migrate_from on v03-a - * Resource action: license.anbriz.vds-ok.com-vm stop on v03-b - * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-a - * Pseudo action: load_stopped_v03-b - * Pseudo action: license.anbriz.vds-ok.com-vm_start_0 - * Pseudo action: terminal0.anbriz.vds-ok.com-vm_start_0 - * Resource action: license.anbriz.vds-ok.com-vm monitor=10000 on v03-a - * Resource action: terminal0.anbriz.vds-ok.com-vm monitor=10000 on v03-b Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ mgmt01 v03-a v03-b ] * Full List of Resources: - * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) - * stonith-mgmt01 (stonith:fence_xvm): Started v03-b * stonith-mgmt02 (stonith:meatware): Started mgmt01 - * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v03-a (stonith:fence_ipmilan): Started v03-b - * stonith-v03-b (stonith:fence_ipmilan): Started v03-a - * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) * Clone Set: cl-clvmd [clvmd]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-dlm [dlm]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-iscsid [iscsid]: * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-multipathd [multipathd]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * stonith-v03-b (stonith:fence_ipmilan): Started v03-a + * stonith-v03-a (stonith:fence_ipmilan): Started v03-b + * stonith-mgmt01 (stonith:fence_xvm): Started v03-b * Clone Set: cl-libvirtd [libvirtd]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-multipathd [multipathd]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-node-params [node-params]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -251,18 +228,6 @@ Revised Cluster Status: * Clone Set: cl-vlan909-if [vlan909-if]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-libvirt-images-pool [libvirt-images-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -272,42 +237,18 @@ Revised Cluster Status: * Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b - * vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b - * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * Clone Set: cl-vlan200-if [vlan200-if]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] * lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * Clone Set: cl-mcast-test-net [mcast-test-net]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * ktstudio.net-vm (ocf:vds-ok:VirtualDomain): Started v03-a * cloudsrv.credo-dialogue.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * c6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a @@ -321,17 +262,47 @@ Revised Cluster Status: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] * gw.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b - * license.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * terminal.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * license.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * lustre01-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * lustre02-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * test-01.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * Clone Set: cl-libvirt-qpid [libvirt-qpid]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * gw.gleb.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * gw.gotin.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * terminal0.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * terminal0.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * Clone Set: cl-mcast-gleb-net [mcast-gleb-net]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] + * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) + * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped + * vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * terminal.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * gw.gleb.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) + * gw.gotin.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) diff --git a/cts/scheduler/summary/location-date-rules-1.summary b/cts/scheduler/summary/location-date-rules-1.summary index b1afba40354..2b97f944551 100644 --- a/cts/scheduler/summary/location-date-rules-1.summary +++ b/cts/scheduler/summary/location-date-rules-1.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2019-09-20 15:10:52Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -16,21 +18,17 @@ Transition Summary: * Start rsc3 ( rhel7-4 ) Executing Cluster Transition: - * Resource action: rsc1 start on rhel7-3 - * Resource action: rsc2 start on rhel7-4 - * Resource action: rsc3 start on rhel7-4 - * Resource action: rsc1 monitor=10000 on rhel7-3 - * Resource action: rsc2 monitor=10000 on rhel7-4 - * Resource action: rsc3 monitor=10000 on rhel7-4 Using the original execution date of: 2019-09-20 15:10:52Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingPass (stonith:fence_dummy): Started rhel7-2 - * rsc1 (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc2 (ocf:pacemaker:Dummy): Started rhel7-4 - * rsc3 (ocf:pacemaker:Dummy): Started rhel7-4 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/location-date-rules-2.summary b/cts/scheduler/summary/location-date-rules-2.summary index 3f27c03922e..8c09b155ebd 100644 --- a/cts/scheduler/summary/location-date-rules-2.summary +++ b/cts/scheduler/summary/location-date-rules-2.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2019-09-20 15:10:52Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -16,21 +18,17 @@ Transition Summary: * Start rsc3 ( rhel7-4 ) Executing Cluster Transition: - * Resource action: rsc1 start on rhel7-3 - * Resource action: rsc2 start on rhel7-3 - * Resource action: rsc3 start on rhel7-4 - * Resource action: rsc1 monitor=10000 on rhel7-3 - * Resource action: rsc2 monitor=10000 on rhel7-3 - * Resource action: rsc3 monitor=10000 on rhel7-4 Using the original execution date of: 2019-09-20 15:10:52Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingPass (stonith:fence_dummy): Started rhel7-2 - * rsc1 (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc2 (ocf:pacemaker:Dummy): Started rhel7-3 - * rsc3 (ocf:pacemaker:Dummy): Started rhel7-4 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/location-sets-templates.summary b/cts/scheduler/summary/location-sets-templates.summary index e6047113c5f..7484f6f19af 100644 --- a/cts/scheduler/summary/location-sets-templates.summary +++ b/cts/scheduler/summary/location-sets-templates.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,33 +21,17 @@ Transition Summary: * Start rsc6 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node2 - * Resource action: rsc6 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node2 - * rsc6 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped + * rsc6 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/managed-0.summary b/cts/scheduler/summary/managed-0.summary index 39d715bc20c..eb0c5092c9e 100644 --- a/cts/scheduler/summary/managed-0.summary +++ b/cts/scheduler/summary/managed-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -25,89 +27,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] diff --git a/cts/scheduler/summary/managed-1.summary b/cts/scheduler/summary/managed-1.summary index 9c250802372..df3d64ffc28 100644 --- a/cts/scheduler/summary/managed-1.summary +++ b/cts/scheduler/summary/managed-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -25,89 +27,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] diff --git a/cts/scheduler/summary/managed-2.summary b/cts/scheduler/summary/managed-2.summary index dd0a1870b8a..aa78ad7770f 100644 --- a/cts/scheduler/summary/managed-2.summary +++ b/cts/scheduler/summary/managed-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] @@ -25,123 +27,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n07 - * Resource action: DcIPaddr monitor on c001n06 - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr monitor on c001n04 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n02 - * Resource action: rsc_c001n09 monitor on c001n08 - * Resource action: rsc_c001n09 monitor on c001n07 - * Resource action: rsc_c001n09 monitor on c001n05 - * Resource action: rsc_c001n09 monitor on c001n04 - * Resource action: rsc_c001n09 monitor on c001n03 - * Resource action: rsc_c001n09 monitor on c001n02 - * Resource action: rsc_c001n02 monitor on c001n09 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n07 - * Resource action: rsc_c001n02 monitor on c001n05 - * Resource action: rsc_c001n02 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n09 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n07 - * Resource action: rsc_c001n03 monitor on c001n05 - * Resource action: rsc_c001n03 monitor on c001n04 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n04 monitor on c001n09 - * Resource action: rsc_c001n04 monitor on c001n08 - * Resource action: rsc_c001n04 monitor on c001n07 - * Resource action: rsc_c001n04 monitor on c001n05 - * Resource action: rsc_c001n04 monitor on c001n03 - * Resource action: rsc_c001n04 monitor on c001n02 - * Resource action: rsc_c001n05 monitor on c001n09 - * Resource action: rsc_c001n05 monitor on c001n08 - * Resource action: rsc_c001n05 monitor on c001n07 - * Resource action: rsc_c001n05 monitor on c001n06 - * Resource action: rsc_c001n05 monitor on c001n04 - * Resource action: rsc_c001n05 monitor on c001n03 - * Resource action: rsc_c001n05 monitor on c001n02 - * Resource action: rsc_c001n06 monitor on c001n09 - * Resource action: rsc_c001n06 monitor on c001n08 - * Resource action: rsc_c001n06 monitor on c001n07 - * Resource action: rsc_c001n06 monitor on c001n05 - * Resource action: rsc_c001n06 monitor on c001n04 - * Resource action: rsc_c001n06 monitor on c001n03 - * Resource action: rsc_c001n07 monitor on c001n09 - * Resource action: rsc_c001n07 monitor on c001n08 - * Resource action: rsc_c001n07 monitor on c001n06 - * Resource action: rsc_c001n07 monitor on c001n05 - * Resource action: rsc_c001n07 monitor on c001n04 - * Resource action: rsc_c001n08 monitor on c001n09 - * Resource action: rsc_c001n08 monitor on c001n07 - * Resource action: rsc_c001n08 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n09 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n07 - * Resource action: child_DoFencing:0 monitor on c001n06 - * Resource action: child_DoFencing:0 monitor on c001n05 - * Resource action: child_DoFencing:0 monitor on c001n04 - * Resource action: child_DoFencing:0 monitor on c001n03 - * Resource action: child_DoFencing:0 monitor on c001n02 - * Resource action: child_DoFencing:1 monitor on c001n09 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n07 - * Resource action: child_DoFencing:1 monitor on c001n06 - * Resource action: child_DoFencing:1 monitor on c001n05 - * Resource action: child_DoFencing:1 monitor on c001n04 - * Resource action: child_DoFencing:1 monitor on c001n03 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n09 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n07 - * Resource action: child_DoFencing:2 monitor on c001n06 - * Resource action: child_DoFencing:2 monitor on c001n05 - * Resource action: child_DoFencing:2 monitor on c001n04 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:2 monitor on c001n02 - * Resource action: child_DoFencing:3 monitor on c001n09 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n07 - * Resource action: child_DoFencing:3 monitor on c001n06 - * Resource action: child_DoFencing:3 monitor on c001n05 - * Resource action: child_DoFencing:3 monitor on c001n04 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: child_DoFencing:4 monitor on c001n09 - * Resource action: child_DoFencing:4 monitor on c001n08 - * Resource action: child_DoFencing:4 monitor on c001n07 - * Resource action: child_DoFencing:4 monitor on c001n06 - * Resource action: child_DoFencing:4 monitor on c001n05 - * Resource action: child_DoFencing:4 monitor on c001n04 - * Resource action: child_DoFencing:4 monitor on c001n03 - * Resource action: child_DoFencing:4 monitor on c001n02 - * Resource action: child_DoFencing:5 monitor on c001n09 - * Resource action: child_DoFencing:5 monitor on c001n08 - * Resource action: child_DoFencing:5 monitor on c001n07 - * Resource action: child_DoFencing:5 monitor on c001n06 - * Resource action: child_DoFencing:5 monitor on c001n05 - * Resource action: child_DoFencing:5 monitor on c001n04 - * Resource action: child_DoFencing:5 monitor on c001n03 - * Resource action: child_DoFencing:5 monitor on c001n02 - * Resource action: child_DoFencing:6 monitor on c001n09 - * Resource action: child_DoFencing:6 monitor on c001n08 - * Resource action: child_DoFencing:6 monitor on c001n07 - * Resource action: child_DoFencing:6 monitor on c001n06 - * Resource action: child_DoFencing:6 monitor on c001n05 - * Resource action: child_DoFencing:6 monitor on c001n04 - * Resource action: child_DoFencing:6 monitor on c001n03 - * Resource action: child_DoFencing:6 monitor on c001n02 - * Resource action: child_DoFencing:7 monitor on c001n09 - * Resource action: child_DoFencing:7 monitor on c001n08 - * Resource action: child_DoFencing:7 monitor on c001n07 - * Resource action: child_DoFencing:7 monitor on c001n06 - * Resource action: child_DoFencing:7 monitor on c001n05 - * Resource action: child_DoFencing:7 monitor on c001n04 - * Resource action: child_DoFencing:7 monitor on c001n03 - * Resource action: child_DoFencing:7 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ] diff --git a/cts/scheduler/summary/migrate-1.summary b/cts/scheduler/summary/migrate-1.summary index 13a5c6b456c..03d9aa51b7d 100644 --- a/cts/scheduler/summary/migrate-1.summary +++ b/cts/scheduler/summary/migrate-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -10,16 +12,13 @@ Transition Summary: * Migrate rsc3 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 migrate_to on node1 - * Resource action: rsc3 migrate_from on node2 - * Resource action: rsc3 stop on node1 - * Pseudo action: rsc3_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/migrate-2.summary b/cts/scheduler/summary/migrate-2.summary index e7723b15edf..557148db7f6 100644 --- a/cts/scheduler/summary/migrate-2.summary +++ b/cts/scheduler/summary/migrate-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby * Online: [ node2 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node1: standby * Online: [ node2 ] diff --git a/cts/scheduler/summary/migrate-3.summary b/cts/scheduler/summary/migrate-3.summary index 5190069380a..e4c351fa3c1 100644 --- a/cts/scheduler/summary/migrate-3.summary +++ b/cts/scheduler/summary/migrate-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -10,14 +12,13 @@ Transition Summary: * Recover rsc3 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 stop on node1 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): FAILED node1 diff --git a/cts/scheduler/summary/migrate-4.summary b/cts/scheduler/summary/migrate-4.summary index 366fc2247e6..25d7298d01c 100644 --- a/cts/scheduler/summary/migrate-4.summary +++ b/cts/scheduler/summary/migrate-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby * Online: [ node2 ] @@ -10,13 +12,13 @@ Transition Summary: * Recover rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: rsc3 stop on node2 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node1: standby * Online: [ node2 ] * Full List of Resources: - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): FAILED node2 diff --git a/cts/scheduler/summary/migrate-5.summary b/cts/scheduler/summary/migrate-5.summary index f66986559a3..dca71545c36 100644 --- a/cts/scheduler/summary/migrate-5.summary +++ b/cts/scheduler/summary/migrate-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] @@ -11,25 +13,19 @@ Current cluster status: Transition Summary: * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 ( dom0-02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: domU-test01 migrate_to on dom0-02 - * Pseudo action: clone-dom0-iscsi1_stop_0 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: dom0-iscsi1:1_stop_0 - * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 - * Pseudo action: domU-test01_start_0 - * Pseudo action: dom0-iscsi1:1_stopped_0 - * Pseudo action: clone-dom0-iscsi1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node dom0-02: standby + * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] * Full List of Resources: - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-01 dom0-02 ] diff --git a/cts/scheduler/summary/migrate-begin.summary b/cts/scheduler/summary/migrate-begin.summary index 3c673029a7d..6502eff9fc1 100644 --- a/cts/scheduler/summary/migrate-begin.summary +++ b/cts/scheduler/summary/migrate-begin.summary @@ -1,28 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-14 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Started hex-14 Transition Summary: * Migrate test-vm ( hex-14 -> hex-13 ) Executing Cluster Transition: - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm migrate_to on hex-14 - * Resource action: test-vm migrate_from on hex-13 - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-14 - * Pseudo action: test-vm_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Started hex-14 diff --git a/cts/scheduler/summary/migrate-both-vms.summary b/cts/scheduler/summary/migrate-both-vms.summary index 0edd108dafa..32461bf2b76 100644 --- a/cts/scheduler/summary/migrate-both-vms.summary +++ b/cts/scheduler/summary/migrate-both-vms.summary @@ -1,24 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node cvmh03: standby (with active resources) * Node cvmh04: standby (with active resources) * Online: [ cvmh01 cvmh02 ] * Full List of Resources: - * fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 - * fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 - * fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 - * fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 * Clone Set: c-fs-libvirt-VM-xcm [fs-libvirt-VM-xcm]: * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] - * Clone Set: c-p-libvirtd [p-libvirtd]: - * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] - * Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh]: - * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] * Clone Set: c-watch-ib0 [p-watch-ib0]: * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] * Clone Set: c-fs-gpfs [p-fs-gpfs]: * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] + * fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 + * fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 + * fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 + * fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 + * Clone Set: c-p-libvirtd [p-libvirtd]: + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] + * Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh]: + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] * vm-compute-test (ocf:ccni:xcatVirtualDomain): Started cvmh03 * vm-swbuildsl6 (ocf:ccni:xcatVirtualDomain): Started cvmh04 @@ -35,68 +37,33 @@ Transition Summary: * Stop fs-bind-libvirt-VM-cvmh:2 ( cvmh03 ) due to node availability * Migrate vm-compute-test ( cvmh03 -> cvmh01 ) * Migrate vm-swbuildsl6 ( cvmh04 -> cvmh02 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: c-watch-ib0_stop_0 - * Pseudo action: load_stopped_cvmh02 - * Pseudo action: load_stopped_cvmh01 - * Resource action: p-watch-ib0 stop on cvmh03 - * Resource action: vm-compute-test migrate_to on cvmh03 - * Resource action: p-watch-ib0 stop on cvmh04 - * Pseudo action: c-watch-ib0_stopped_0 - * Resource action: vm-compute-test migrate_from on cvmh01 - * Resource action: vm-swbuildsl6 migrate_to on cvmh04 - * Resource action: vm-swbuildsl6 migrate_from on cvmh02 - * Resource action: vm-swbuildsl6 stop on cvmh04 - * Pseudo action: load_stopped_cvmh04 - * Resource action: vm-compute-test stop on cvmh03 - * Pseudo action: load_stopped_cvmh03 - * Pseudo action: c-p-libvirtd_stop_0 - * Pseudo action: vm-compute-test_start_0 - * Pseudo action: vm-swbuildsl6_start_0 - * Resource action: p-libvirtd stop on cvmh03 - * Resource action: vm-compute-test monitor=45000 on cvmh01 - * Resource action: vm-swbuildsl6 monitor=45000 on cvmh02 - * Resource action: p-libvirtd stop on cvmh04 - * Pseudo action: c-p-libvirtd_stopped_0 - * Pseudo action: c-fs-bind-libvirt-VM-cvmh_stop_0 - * Pseudo action: c-fs-libvirt-VM-xcm_stop_0 - * Resource action: fs-bind-libvirt-VM-cvmh stop on cvmh03 - * Resource action: fs-libvirt-VM-xcm stop on cvmh03 - * Resource action: fs-bind-libvirt-VM-cvmh stop on cvmh04 - * Pseudo action: c-fs-bind-libvirt-VM-cvmh_stopped_0 - * Resource action: fs-libvirt-VM-xcm stop on cvmh04 - * Pseudo action: c-fs-libvirt-VM-xcm_stopped_0 - * Pseudo action: c-fs-gpfs_stop_0 - * Resource action: p-fs-gpfs stop on cvmh03 - * Resource action: p-fs-gpfs stop on cvmh04 - * Pseudo action: c-fs-gpfs_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node cvmh03: standby - * Node cvmh04: standby + * Node cvmh03: standby (with active resources) + * Node cvmh04: standby (with active resources) * Online: [ cvmh01 cvmh02 ] * Full List of Resources: - * fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 - * fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 - * fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 - * fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 * Clone Set: c-fs-libvirt-VM-xcm [fs-libvirt-VM-xcm]: - * Started: [ cvmh01 cvmh02 ] - * Stopped: [ cvmh03 cvmh04 ] - * Clone Set: c-p-libvirtd [p-libvirtd]: - * Started: [ cvmh01 cvmh02 ] - * Stopped: [ cvmh03 cvmh04 ] - * Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh]: - * Started: [ cvmh01 cvmh02 ] - * Stopped: [ cvmh03 cvmh04 ] + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] * Clone Set: c-watch-ib0 [p-watch-ib0]: - * Started: [ cvmh01 cvmh02 ] - * Stopped: [ cvmh03 cvmh04 ] + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] * Clone Set: c-fs-gpfs [p-fs-gpfs]: - * Started: [ cvmh01 cvmh02 ] - * Stopped: [ cvmh03 cvmh04 ] - * vm-compute-test (ocf:ccni:xcatVirtualDomain): Started cvmh01 - * vm-swbuildsl6 (ocf:ccni:xcatVirtualDomain): Started cvmh02 + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] + * fence-cvmh04 (stonith:fence_ipmilan): Started cvmh02 + * fence-cvmh03 (stonith:fence_ipmilan): Started cvmh01 + * fence-cvmh02 (stonith:fence_ipmilan): Started cvmh01 + * fence-cvmh01 (stonith:fence_ipmilan): Started cvmh02 + * Clone Set: c-p-libvirtd [p-libvirtd]: + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] + * Clone Set: c-fs-bind-libvirt-VM-cvmh [fs-bind-libvirt-VM-cvmh]: + * Started: [ cvmh01 cvmh02 cvmh03 cvmh04 ] + * vm-compute-test (ocf:ccni:xcatVirtualDomain): Started cvmh03 + * vm-swbuildsl6 (ocf:ccni:xcatVirtualDomain): Started cvmh04 diff --git a/cts/scheduler/summary/migrate-fail-2.summary b/cts/scheduler/summary/migrate-fail-2.summary index 278b2c02818..aa2717ef2ae 100644 --- a/cts/scheduler/summary/migrate-fail-2.summary +++ b/cts/scheduler/summary/migrate-fail-2.summary @@ -1,27 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): FAILED [ hex-13 hex-14 ] * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED [ hex-13 hex-14 ] Transition Summary: * Recover test-vm ( hex-13 ) Executing Cluster Transition: - * Resource action: test-vm stop on hex-14 - * Resource action: test-vm stop on hex-13 - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migrate-fail-3.summary b/cts/scheduler/summary/migrate-fail-3.summary index f0283966304..76b5fc90950 100644 --- a/cts/scheduler/summary/migrate-fail-3.summary +++ b/cts/scheduler/summary/migrate-fail-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -11,16 +13,14 @@ Transition Summary: * Recover test-vm ( hex-13 ) Executing Cluster Transition: - * Resource action: test-vm stop on hex-13 - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 + * test-vm (ocf:heartbeat:Xen): FAILED hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migrate-fail-4.summary b/cts/scheduler/summary/migrate-fail-4.summary index 0d155f4ed67..6990b28bb5a 100644 --- a/cts/scheduler/summary/migrate-fail-4.summary +++ b/cts/scheduler/summary/migrate-fail-4.summary @@ -1,26 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): FAILED hex-14 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED hex-14 Transition Summary: * Recover test-vm ( hex-14 -> hex-13 ) Executing Cluster Transition: - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-13 - * Pseudo action: load_stopped_hex-14 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED hex-14 diff --git a/cts/scheduler/summary/migrate-fail-5.summary b/cts/scheduler/summary/migrate-fail-5.summary index 4200e29fcbb..44575e5036e 100644 --- a/cts/scheduler/summary/migrate-fail-5.summary +++ b/cts/scheduler/summary/migrate-fail-5.summary @@ -1,25 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Stopped * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Stopped Transition Summary: * Start test-vm ( hex-13 ) Executing Cluster Transition: - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Stopped diff --git a/cts/scheduler/summary/migrate-fail-6.summary b/cts/scheduler/summary/migrate-fail-6.summary index da1ccb0ee7b..aa2717ef2ae 100644 --- a/cts/scheduler/summary/migrate-fail-6.summary +++ b/cts/scheduler/summary/migrate-fail-6.summary @@ -1,27 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): FAILED [ hex-13 hex-14 ] * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED [ hex-13 hex-14 ] Transition Summary: * Recover test-vm ( hex-13 ) Executing Cluster Transition: - * Resource action: test-vm stop on hex-13 - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migrate-fail-7.summary b/cts/scheduler/summary/migrate-fail-7.summary index 9a8222d73ff..8a8cc95d641 100644 --- a/cts/scheduler/summary/migrate-fail-7.summary +++ b/cts/scheduler/summary/migrate-fail-7.summary @@ -1,9 +1,11 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Stopped hex-13 + * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] @@ -11,11 +13,11 @@ Transition Summary: * Restart test-vm ( hex-13 ) Executing Cluster Transition: - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm start on hex-13 + * Resource action: test-vm stop on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migrate-fail-8.summary b/cts/scheduler/summary/migrate-fail-8.summary index 0d155f4ed67..6990b28bb5a 100644 --- a/cts/scheduler/summary/migrate-fail-8.summary +++ b/cts/scheduler/summary/migrate-fail-8.summary @@ -1,26 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): FAILED hex-14 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED hex-14 Transition Summary: * Recover test-vm ( hex-14 -> hex-13 ) Executing Cluster Transition: - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-13 - * Pseudo action: load_stopped_hex-14 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): FAILED hex-14 diff --git a/cts/scheduler/summary/migrate-fail-9.summary b/cts/scheduler/summary/migrate-fail-9.summary index 4200e29fcbb..44575e5036e 100644 --- a/cts/scheduler/summary/migrate-fail-9.summary +++ b/cts/scheduler/summary/migrate-fail-9.summary @@ -1,25 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Stopped * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Stopped Transition Summary: * Start test-vm ( hex-13 ) Executing Cluster Transition: - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Stopped diff --git a/cts/scheduler/summary/migrate-fencing.summary b/cts/scheduler/summary/migrate-fencing.summary index 500c78a3b53..bae5b73e908 100644 --- a/cts/scheduler/summary/migrate-fencing.summary +++ b/cts/scheduler/summary/migrate-fencing.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node pcmk-4: UNCLEAN (online) * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -34,75 +36,33 @@ Transition Summary: * Stop ping-1:0 ( pcmk-4 ) due to node availability * Stop stateful-1:0 ( Promoted pcmk-4 ) due to node availability * Promote stateful-1:1 ( Unpromoted -> Promoted pcmk-1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: Fencing_stop_0 - * Resource action: stateful-1:3 monitor=15000 on pcmk-3 - * Resource action: stateful-1:2 monitor=15000 on pcmk-2 - * Fencing pcmk-4 (reboot) - * Pseudo action: FencingChild:0_stop_0 - * Pseudo action: Fencing_stopped_0 - * Pseudo action: rsc_pcmk-4_stop_0 - * Pseudo action: lsb-dummy_stop_0 - * Resource action: migrator migrate_to on pcmk-1 - * Pseudo action: Connectivity_stop_0 - * Pseudo action: group-1_stop_0 - * Pseudo action: r192.168.101.183_stop_0 - * Resource action: rsc_pcmk-4 start on pcmk-2 - * Resource action: migrator migrate_from on pcmk-3 - * Resource action: migrator stop on pcmk-1 - * Pseudo action: ping-1:0_stop_0 - * Pseudo action: Connectivity_stopped_0 - * Pseudo action: r192.168.101.182_stop_0 - * Resource action: rsc_pcmk-4 monitor=5000 on pcmk-2 - * Pseudo action: migrator_start_0 - * Pseudo action: r192.168.101.181_stop_0 - * Resource action: migrator monitor=10000 on pcmk-3 - * Pseudo action: group-1_stopped_0 - * Pseudo action: master-1_demote_0 - * Pseudo action: stateful-1:0_demote_0 - * Pseudo action: master-1_demoted_0 - * Pseudo action: master-1_stop_0 - * Pseudo action: stateful-1:0_stop_0 - * Pseudo action: master-1_stopped_0 - * Pseudo action: master-1_promote_0 - * Resource action: stateful-1:1 promote on pcmk-1 - * Pseudo action: master-1_promoted_0 - * Pseudo action: group-1_start_0 - * Resource action: r192.168.101.181 start on pcmk-1 - * Resource action: r192.168.101.182 start on pcmk-1 - * Resource action: r192.168.101.183 start on pcmk-1 - * Resource action: stateful-1:1 monitor=16000 on pcmk-1 - * Pseudo action: group-1_running_0 - * Resource action: r192.168.101.181 monitor=5000 on pcmk-1 - * Resource action: r192.168.101.182 monitor=5000 on pcmk-1 - * Resource action: r192.168.101.183 monitor=5000 on pcmk-1 - * Resource action: lsb-dummy start on pcmk-1 - * Resource action: lsb-dummy monitor=5000 on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node pcmk-4: UNCLEAN (online) * Online: [ pcmk-1 pcmk-2 pcmk-3 ] - * OFFLINE: [ pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild]: - * Started: [ pcmk-1 pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-4 ] + * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Resource Group: group-1: - * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-1 - * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-1 - * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-1 + * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-4 + * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-4 + * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-4 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 - * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-2 - * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 - * migrator (ocf:pacemaker:Dummy): Started pcmk-3 + * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 + * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-4 + * migrator (ocf:pacemaker:Dummy): Started pcmk-1 * Clone Set: Connectivity [ping-1]: - * Started: [ pcmk-1 pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-4 ] + * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Promoted: [ pcmk-1 ] - * Unpromoted: [ pcmk-2 pcmk-3 ] - * Stopped: [ pcmk-4 ] + * Promoted: [ pcmk-4 ] + * Unpromoted: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/cts/scheduler/summary/migrate-partial-1.summary b/cts/scheduler/summary/migrate-partial-1.summary index f65b708d263..d0db430bbbf 100644 --- a/cts/scheduler/summary/migrate-partial-1.summary +++ b/cts/scheduler/summary/migrate-partial-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -10,11 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migrate-partial-2.summary b/cts/scheduler/summary/migrate-partial-2.summary index 3a423593e7f..33e2797d52e 100644 --- a/cts/scheduler/summary/migrate-partial-2.summary +++ b/cts/scheduler/summary/migrate-partial-2.summary @@ -1,27 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started [ hex-13 hex-14 ] * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Started [ hex-13 hex-14 ] Transition Summary: * Migrate test-vm ( hex-14 -> hex-13 ) Executing Cluster Transition: - * Resource action: test-vm migrate_from on hex-13 - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 - * Pseudo action: test-vm_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] + * test-vm (ocf:heartbeat:Xen): Started [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migrate-partial-3.summary b/cts/scheduler/summary/migrate-partial-3.summary index a674cf7a27a..7b0b4f6d5f6 100644 --- a/cts/scheduler/summary/migrate-partial-3.summary +++ b/cts/scheduler/summary/migrate-partial-3.summary @@ -1,31 +1,30 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * OFFLINE: [ hex-15 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): FAILED hex-14 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] * Stopped: [ hex-15 ] + * test-vm (ocf:heartbeat:Xen): FAILED hex-14 Transition Summary: * Recover test-vm ( hex-14 -> hex-13 ) Executing Cluster Transition: - * Resource action: test-vm stop on hex-14 - * Pseudo action: load_stopped_hex-15 - * Pseudo action: load_stopped_hex-13 - * Pseudo action: load_stopped_hex-14 - * Resource action: test-vm start on hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * OFFLINE: [ hex-15 ] * Full List of Resources: - * test-vm (ocf:heartbeat:Xen): Started hex-13 * Clone Set: c-clusterfs [dlm]: * Started: [ hex-13 hex-14 ] * Stopped: [ hex-15 ] + * test-vm (ocf:heartbeat:Xen): FAILED hex-14 diff --git a/cts/scheduler/summary/migrate-partial-4.summary b/cts/scheduler/summary/migrate-partial-4.summary index abb31f1e6f9..34a5e47ec38 100644 --- a/cts/scheduler/summary/migrate-partial-4.summary +++ b/cts/scheduler/summary/migrate-partial-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] @@ -55,45 +57,26 @@ Transition Summary: * Start drbd-mgs:1 ( lustre02-left ) Executing Cluster Transition: - * Resource action: drbd-stacked start on lustre02-left - * Resource action: drbd-testfs-local start on lustre03-left - * Resource action: lustre migrate_to on lustre03-left - * Resource action: testfs stop on lustre02-left - * Resource action: testfs stop on lustre01-left - * Pseudo action: ms-drbd-mgs_pre_notify_start_0 - * Resource action: lustre migrate_from on lustre04-left - * Resource action: lustre stop on lustre03-left - * Resource action: testfs start on lustre03-left - * Pseudo action: ms-drbd-mgs_confirmed-pre_notify_start_0 - * Pseudo action: ms-drbd-mgs_start_0 - * Pseudo action: lustre_start_0 - * Resource action: drbd-mgs:0 start on lustre01-left - * Resource action: drbd-mgs:1 start on lustre02-left - * Pseudo action: ms-drbd-mgs_running_0 - * Pseudo action: ms-drbd-mgs_post_notify_running_0 - * Resource action: drbd-mgs:0 notify on lustre01-left - * Resource action: drbd-mgs:1 notify on lustre02-left - * Pseudo action: ms-drbd-mgs_confirmed-post_notify_running_0 - * Resource action: drbd-mgs:0 monitor=30000 on lustre01-left - * Resource action: drbd-mgs:1 monitor=30000 on lustre02-left Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Full List of Resources: * drbd-local (ocf:vds-ok:Ticketer): Started lustre01-left - * drbd-stacked (ocf:vds-ok:Ticketer): Started lustre02-left - * drbd-testfs-local (ocf:vds-ok:Ticketer): Started lustre03-left + * drbd-stacked (ocf:vds-ok:Ticketer): Stopped + * drbd-testfs-local (ocf:vds-ok:Ticketer): Stopped * drbd-testfs-stacked (ocf:vds-ok:Ticketer): Stopped * ip-testfs-mdt0000-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0000-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0001-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0002-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0003-left (ocf:heartbeat:IPaddr2): Stopped - * lustre (ocf:vds-ok:Ticketer): Started lustre04-left + * lustre (ocf:vds-ok:Ticketer): Started lustre03-left * mgs (ocf:vds-ok:lustre-server): Stopped - * testfs (ocf:vds-ok:Ticketer): Started lustre03-left + * testfs (ocf:vds-ok:Ticketer): Started lustre02-left * testfs-mdt0000 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0000 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0001 (ocf:vds-ok:lustre-server): Stopped @@ -103,7 +86,7 @@ Revised Cluster Status: * ip-booth (ocf:heartbeat:IPaddr2): Started lustre02-left * boothd (ocf:pacemaker:booth-site): Started lustre02-left * Clone Set: ms-drbd-mgs [drbd-mgs] (promotable): - * Unpromoted: [ lustre01-left lustre02-left ] + * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] (promotable): diff --git a/cts/scheduler/summary/migrate-shutdown.summary b/cts/scheduler/summary/migrate-shutdown.summary index 985b554c227..16a2b962842 100644 --- a/cts/scheduler/summary/migrate-shutdown.summary +++ b/cts/scheduler/summary/migrate-shutdown.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] @@ -40,53 +42,29 @@ Transition Summary: * Stop stateful-1:2 ( Unpromoted pcmk-4 ) due to node availability Executing Cluster Transition: - * Resource action: Fencing stop on pcmk-1 - * Resource action: rsc_pcmk-1 stop on pcmk-1 - * Resource action: rsc_pcmk-2 stop on pcmk-2 - * Resource action: rsc_pcmk-4 stop on pcmk-4 - * Resource action: lsb-dummy stop on pcmk-2 - * Resource action: migrator stop on pcmk-1 - * Resource action: migrator stop on pcmk-3 - * Pseudo action: Connectivity_stop_0 - * Cluster action: do_shutdown on pcmk-3 - * Pseudo action: group-1_stop_0 - * Resource action: r192.168.122.107 stop on pcmk-2 - * Resource action: ping-1:0 stop on pcmk-1 - * Resource action: ping-1:1 stop on pcmk-2 - * Resource action: ping-1:3 stop on pcmk-4 - * Pseudo action: Connectivity_stopped_0 - * Resource action: r192.168.122.106 stop on pcmk-2 - * Resource action: r192.168.122.105 stop on pcmk-2 - * Pseudo action: group-1_stopped_0 - * Pseudo action: master-1_demote_0 - * Resource action: stateful-1:0 demote on pcmk-2 - * Pseudo action: master-1_demoted_0 - * Pseudo action: master-1_stop_0 - * Resource action: stateful-1:2 stop on pcmk-1 - * Resource action: stateful-1:0 stop on pcmk-2 - * Resource action: stateful-1:3 stop on pcmk-4 - * Pseudo action: master-1_stopped_0 - * Cluster action: do_shutdown on pcmk-4 - * Cluster action: do_shutdown on pcmk-2 - * Cluster action: do_shutdown on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Stopped + * Fencing (stonith:fence_xvm): Started pcmk-1 * Resource Group: group-1: - * r192.168.122.105 (ocf:heartbeat:IPaddr): Stopped - * r192.168.122.106 (ocf:heartbeat:IPaddr): Stopped - * r192.168.122.107 (ocf:heartbeat:IPaddr): Stopped - * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Stopped - * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Stopped + * r192.168.122.105 (ocf:heartbeat:IPaddr): Started pcmk-2 + * r192.168.122.106 (ocf:heartbeat:IPaddr): Started pcmk-2 + * r192.168.122.107 (ocf:heartbeat:IPaddr): Started pcmk-2 + * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 + * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Stopped - * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Stopped - * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped - * migrator (ocf:pacemaker:Dummy): Stopped + * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 + * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 + * migrator (ocf:pacemaker:Dummy): Started pcmk-1 * Clone Set: Connectivity [ping-1]: - * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] + * Started: [ pcmk-1 pcmk-2 pcmk-4 ] + * Stopped: [ pcmk-3 ] * Clone Set: master-1 [stateful-1] (promotable): - * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] + * Promoted: [ pcmk-2 ] + * Unpromoted: [ pcmk-1 pcmk-4 ] + * Stopped: [ pcmk-3 ] diff --git a/cts/scheduler/summary/migrate-start-complex.summary b/cts/scheduler/summary/migrate-start-complex.summary index 78a408b0145..92e783fcc31 100644 --- a/cts/scheduler/summary/migrate-start-complex.summary +++ b/cts/scheduler/summary/migrate-start-complex.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dom0-01 dom0-02 ] @@ -19,32 +21,18 @@ Transition Summary: * Start bottom:1 ( dom0-02 ) Executing Cluster Transition: - * Resource action: top stop on dom0-02 - * Pseudo action: clone-dom0-iscsi1_start_0 - * Resource action: bottom:0 monitor on dom0-01 - * Resource action: bottom:1 monitor on dom0-02 - * Pseudo action: clone-bottom_start_0 - * Pseudo action: dom0-iscsi1:1_start_0 - * Resource action: dom0-iscsi1-cnx1:1 start on dom0-01 - * Resource action: bottom:0 start on dom0-01 - * Resource action: bottom:1 start on dom0-02 - * Pseudo action: clone-bottom_running_0 - * Pseudo action: dom0-iscsi1:1_running_0 - * Pseudo action: clone-dom0-iscsi1_running_0 - * Resource action: domU-test01 migrate_to on dom0-02 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: domU-test01_start_0 - * Resource action: top start on dom0-01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dom0-01 dom0-02 ] * Full List of Resources: - * top (ocf:heartbeat:Dummy): Started dom0-01 - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * top (ocf:heartbeat:Dummy): Started dom0-02 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 dom0-02 ] + * Started: [ dom0-02 ] + * Stopped: [ dom0-01 ] * Clone Set: clone-bottom [bottom]: - * Started: [ dom0-01 dom0-02 ] + * Stopped: [ dom0-01 dom0-02 ] diff --git a/cts/scheduler/summary/migrate-start.summary b/cts/scheduler/summary/migrate-start.summary index 9aa1831ab3a..e896316cc02 100644 --- a/cts/scheduler/summary/migrate-start.summary +++ b/cts/scheduler/summary/migrate-start.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dom0-01 dom0-02 ] @@ -13,21 +15,15 @@ Transition Summary: * Start dom0-iscsi1-cnx1:1 ( dom0-01 ) Executing Cluster Transition: - * Pseudo action: clone-dom0-iscsi1_start_0 - * Pseudo action: dom0-iscsi1:1_start_0 - * Resource action: dom0-iscsi1-cnx1:1 start on dom0-01 - * Pseudo action: dom0-iscsi1:1_running_0 - * Pseudo action: clone-dom0-iscsi1_running_0 - * Resource action: domU-test01 migrate_to on dom0-02 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: domU-test01_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dom0-01 dom0-02 ] * Full List of Resources: - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 dom0-02 ] + * Started: [ dom0-02 ] + * Stopped: [ dom0-01 ] diff --git a/cts/scheduler/summary/migrate-stop-complex.summary b/cts/scheduler/summary/migrate-stop-complex.summary index 7cc68b0929f..b9e0f73120f 100644 --- a/cts/scheduler/summary/migrate-stop-complex.summary +++ b/cts/scheduler/summary/migrate-stop-complex.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] @@ -16,34 +18,22 @@ Transition Summary: * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 ( dom0-02 ) due to node availability * Stop bottom:1 ( dom0-02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: top stop on dom0-02 - * Resource action: domU-test01 migrate_to on dom0-02 - * Pseudo action: clone-dom0-iscsi1_stop_0 - * Pseudo action: clone-bottom_stop_0 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: dom0-iscsi1:1_stop_0 - * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 - * Resource action: bottom:0 stop on dom0-02 - * Pseudo action: clone-bottom_stopped_0 - * Pseudo action: domU-test01_start_0 - * Pseudo action: dom0-iscsi1:1_stopped_0 - * Pseudo action: clone-dom0-iscsi1_stopped_0 - * Resource action: top start on dom0-01 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node dom0-02: standby + * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] * Full List of Resources: - * top (ocf:heartbeat:Dummy): Started dom0-01 - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * top (ocf:heartbeat:Dummy): Started dom0-02 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-01 dom0-02 ] * Clone Set: clone-bottom [bottom]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-01 dom0-02 ] diff --git a/cts/scheduler/summary/migrate-stop-start-complex.summary b/cts/scheduler/summary/migrate-stop-start-complex.summary index b317383d3c2..069455a4ff6 100644 --- a/cts/scheduler/summary/migrate-stop-start-complex.summary +++ b/cts/scheduler/summary/migrate-stop-start-complex.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] @@ -16,35 +18,23 @@ Transition Summary: * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 ( dom0-02 ) due to node availability * Move bottom:0 ( dom0-02 -> dom0-01 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: domU-test01 migrate_to on dom0-02 - * Pseudo action: clone-dom0-iscsi1_stop_0 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: dom0-iscsi1:1_stop_0 - * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 - * Pseudo action: domU-test01_start_0 - * Pseudo action: dom0-iscsi1:1_stopped_0 - * Pseudo action: clone-dom0-iscsi1_stopped_0 - * Pseudo action: clone-bottom_stop_0 - * Resource action: bottom:0 stop on dom0-02 - * Pseudo action: clone-bottom_stopped_0 - * Pseudo action: clone-bottom_start_0 - * Resource action: bottom:0 start on dom0-01 - * Pseudo action: clone-bottom_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node dom0-02: standby + * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] * Full List of Resources: * top (ocf:heartbeat:Dummy): Started dom0-01 - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-01 dom0-02 ] * Clone Set: clone-bottom [bottom]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-02 ] + * Stopped: [ dom0-01 ] diff --git a/cts/scheduler/summary/migrate-stop.summary b/cts/scheduler/summary/migrate-stop.summary index f66986559a3..dca71545c36 100644 --- a/cts/scheduler/summary/migrate-stop.summary +++ b/cts/scheduler/summary/migrate-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] @@ -11,25 +13,19 @@ Current cluster status: Transition Summary: * Migrate domU-test01 ( dom0-02 -> dom0-01 ) * Stop dom0-iscsi1-cnx1:1 ( dom0-02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: domU-test01 migrate_to on dom0-02 - * Pseudo action: clone-dom0-iscsi1_stop_0 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: dom0-iscsi1:1_stop_0 - * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 - * Pseudo action: domU-test01_start_0 - * Pseudo action: dom0-iscsi1:1_stopped_0 - * Pseudo action: clone-dom0-iscsi1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node dom0-02: standby + * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] * Full List of Resources: - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-01 dom0-02 ] diff --git a/cts/scheduler/summary/migrate-stop_start.summary b/cts/scheduler/summary/migrate-stop_start.summary index 13cb1c95ff7..1145d3a2987 100644 --- a/cts/scheduler/summary/migrate-stop_start.summary +++ b/cts/scheduler/summary/migrate-stop_start.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] @@ -14,28 +16,16 @@ Transition Summary: * Move dom0-iscsi1-cnx1:0 ( dom0-02 -> dom0-01 ) Executing Cluster Transition: - * Pseudo action: clone-dom0-iscsi1_stop_0 - * Pseudo action: dom0-iscsi1:0_stop_0 - * Resource action: dom0-iscsi1-cnx1:0 stop on dom0-02 - * Pseudo action: dom0-iscsi1:0_stopped_0 - * Pseudo action: clone-dom0-iscsi1_stopped_0 - * Pseudo action: clone-dom0-iscsi1_start_0 - * Pseudo action: dom0-iscsi1:0_start_0 - * Resource action: dom0-iscsi1-cnx1:0 start on dom0-01 - * Pseudo action: dom0-iscsi1:0_running_0 - * Pseudo action: clone-dom0-iscsi1_running_0 - * Resource action: domU-test01 migrate_to on dom0-02 - * Resource action: domU-test01 migrate_from on dom0-01 - * Resource action: domU-test01 stop on dom0-02 - * Pseudo action: domU-test01_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node dom0-02: standby + * Node dom0-02: standby (with active resources) * Online: [ dom0-01 ] * Full List of Resources: - * domU-test01 (ocf:heartbeat:Xen): Started dom0-01 + * domU-test01 (ocf:heartbeat:Xen): Started dom0-02 * Clone Set: clone-dom0-iscsi1 [dom0-iscsi1]: - * Started: [ dom0-01 ] - * Stopped: [ dom0-02 ] + * Started: [ dom0-02 ] + * Stopped: [ dom0-01 ] diff --git a/cts/scheduler/summary/migrate-success.summary b/cts/scheduler/summary/migrate-success.summary index cf9a0002cdd..d0db430bbbf 100644 --- a/cts/scheduler/summary/migrate-success.summary +++ b/cts/scheduler/summary/migrate-success.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Pseudo action: load_stopped_hex-14 - * Pseudo action: load_stopped_hex-13 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/migration-behind-migrating-remote.summary b/cts/scheduler/summary/migration-behind-migrating-remote.summary index 5529819e046..e4bbbc32d1a 100644 --- a/cts/scheduler/summary/migration-behind-migrating-remote.summary +++ b/cts/scheduler/summary/migration-behind-migrating-remote.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * RemoteOnline: [ remote1 remote2 ] @@ -15,25 +17,17 @@ Transition Summary: * Migrate remote1 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 migrate_to on remote1 - * Resource action: remote1 migrate_to on node1 - * Resource action: rsc1 migrate_from on remote2 - * Resource action: rsc1 stop on remote1 - * Resource action: remote1 migrate_from on node2 - * Resource action: remote1 stop on node1 - * Pseudo action: rsc1_start_0 - * Pseudo action: remote1_start_0 - * Resource action: rsc1 monitor=10000 on remote2 - * Resource action: remote1 monitor=60000 on node2 Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * RemoteOnline: [ remote1 remote2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node2 - * rsc1 (ocf:pacemaker:Dummy): Started remote2 - * remote1 (ocf:pacemaker:remote): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started remote1 + * remote1 (ocf:pacemaker:remote): Started node1 * remote2 (ocf:pacemaker:remote): Started node2 diff --git a/cts/scheduler/summary/migration-intermediary-cleaned.summary b/cts/scheduler/summary/migration-intermediary-cleaned.summary index dd127a848cd..32ab763ec9c 100644 --- a/cts/scheduler/summary/migration-intermediary-cleaned.summary +++ b/cts/scheduler/summary/migration-intermediary-cleaned.summary @@ -1,5 +1,8 @@ Using the original execution date of: 2023-01-19 21:05:59Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-1 ] @@ -33,34 +36,12 @@ Transition Summary: * Start ping-1:3 ( rhel8-2 ) Executing Cluster Transition: - * Resource action: Fencing monitor on rhel8-2 - * Resource action: FencingPass monitor on rhel8-2 - * Resource action: FencingFail monitor on rhel8-2 - * Resource action: rsc_rhel8-1 stop on rhel8-3 - * Resource action: rsc_rhel8-1 monitor on rhel8-2 - * Resource action: rsc_rhel8-2 stop on rhel8-4 - * Resource action: rsc_rhel8-2 monitor on rhel8-2 - * Resource action: rsc_rhel8-3 monitor on rhel8-2 - * Resource action: rsc_rhel8-4 monitor on rhel8-2 - * Resource action: rsc_rhel8-5 monitor on rhel8-2 - * Resource action: migrator monitor on rhel8-2 - * Resource action: ping-1 monitor on rhel8-2 - * Pseudo action: Connectivity_start_0 - * Resource action: stateful-1 monitor on rhel8-2 - * Resource action: r192.168.122.207 monitor on rhel8-2 - * Resource action: petulant monitor on rhel8-2 - * Resource action: r192.168.122.208 monitor on rhel8-2 - * Resource action: lsb-dummy monitor on rhel8-2 - * Resource action: rsc_rhel8-1 start on rhel8-2 - * Resource action: rsc_rhel8-2 start on rhel8-2 - * Resource action: ping-1 start on rhel8-2 - * Pseudo action: Connectivity_running_0 - * Resource action: rsc_rhel8-1 monitor=5000 on rhel8-2 - * Resource action: rsc_rhel8-2 monitor=5000 on rhel8-2 - * Resource action: ping-1 monitor=60000 on rhel8-2 Using the original execution date of: 2023-01-19 21:05:59Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-1 ] @@ -69,15 +50,15 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started rhel8-3 * FencingPass (stonith:fence_dummy): Started rhel8-4 * FencingFail (stonith:fence_dummy): Started rhel8-5 - * rsc_rhel8-1 (ocf:heartbeat:IPaddr2): Started rhel8-2 - * rsc_rhel8-2 (ocf:heartbeat:IPaddr2): Started rhel8-2 + * rsc_rhel8-1 (ocf:heartbeat:IPaddr2): Started rhel8-3 + * rsc_rhel8-2 (ocf:heartbeat:IPaddr2): Started rhel8-4 * rsc_rhel8-3 (ocf:heartbeat:IPaddr2): Started rhel8-3 * rsc_rhel8-4 (ocf:heartbeat:IPaddr2): Started rhel8-4 * rsc_rhel8-5 (ocf:heartbeat:IPaddr2): Started rhel8-5 * migrator (ocf:pacemaker:Dummy): Started rhel8-5 * Clone Set: Connectivity [ping-1]: - * Started: [ rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] - * Stopped: [ rhel8-1 ] + * Started: [ rhel8-3 rhel8-4 rhel8-5 ] + * Stopped: [ rhel8-1 rhel8-2 ] * Clone Set: promotable-1 [stateful-1] (promotable): * Promoted: [ rhel8-3 ] * Unpromoted: [ rhel8-4 rhel8-5 ] diff --git a/cts/scheduler/summary/migration-ping-pong.summary b/cts/scheduler/summary/migration-ping-pong.summary index 0891fbf9a46..253f2d4f62e 100644 --- a/cts/scheduler/summary/migration-ping-pong.summary +++ b/cts/scheduler/summary/migration-ping-pong.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2019-06-06 13:56:45Z Current cluster status: + * Cluster Summary: + * Node List: * Node ha-idg-2: standby * Online: [ ha-idg-1 ] @@ -16,6 +18,8 @@ Executing Cluster Transition: Using the original execution date of: 2019-06-06 13:56:45Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node ha-idg-2: standby * Online: [ ha-idg-1 ] diff --git a/cts/scheduler/summary/minimal.summary b/cts/scheduler/summary/minimal.summary index 3886b9e8596..f8e98906ad3 100644 --- a/cts/scheduler/summary/minimal.summary +++ b/cts/scheduler/summary/minimal.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ host1 host2 ] @@ -11,19 +13,13 @@ Transition Summary: * Start rsc2 ( host1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on host2 - * Resource action: rsc1 monitor on host1 - * Resource action: rsc2 monitor on host2 - * Resource action: rsc2 monitor on host1 - * Pseudo action: load_stopped_host2 - * Pseudo action: load_stopped_host1 - * Resource action: rsc1 start on host1 - * Resource action: rsc2 start on host1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ host1 host2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started host1 - * rsc2 (ocf:pacemaker:Dummy): Started host1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/mon-rsc-1.summary b/cts/scheduler/summary/mon-rsc-1.summary index 92229e31ae5..f9ff5deedd4 100644 --- a/cts/scheduler/summary/mon-rsc-1.summary +++ b/cts/scheduler/summary/mon-rsc-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,14 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/mon-rsc-2.summary b/cts/scheduler/summary/mon-rsc-2.summary index 3d605e87832..bd3ed2a1d28 100644 --- a/cts/scheduler/summary/mon-rsc-2.summary +++ b/cts/scheduler/summary/mon-rsc-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby (with active resources) * Online: [ node1 ] @@ -10,15 +12,13 @@ Transition Summary: * Move rsc1 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node2: standby + * Node node2: standby (with active resources) * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/mon-rsc-3.summary b/cts/scheduler/summary/mon-rsc-3.summary index b60e20f71c7..b611f0137dd 100644 --- a/cts/scheduler/summary/mon-rsc-3.summary +++ b/cts/scheduler/summary/mon-rsc-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -8,11 +10,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/mon-rsc-4.summary b/cts/scheduler/summary/mon-rsc-4.summary index 8ee662865d7..ed58269e820 100644 --- a/cts/scheduler/summary/mon-rsc-4.summary +++ b/cts/scheduler/summary/mon-rsc-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby (with active resources) * Online: [ node1 ] @@ -10,15 +12,13 @@ Transition Summary: * Move rsc1 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby (with active resources) * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started [ node1 node2 ] + * rsc1 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/monitor-onfail-restart.summary b/cts/scheduler/summary/monitor-onfail-restart.summary index 5f409fc7438..0da852997c6 100644 --- a/cts/scheduler/summary/monitor-onfail-restart.summary +++ b/cts/scheduler/summary/monitor-onfail-restart.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -10,14 +12,13 @@ Transition Summary: * Recover A ( fc16-builder ) Executing Cluster Transition: - * Resource action: A stop on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: A monitor=20000 on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started fc16-builder + * A (ocf:pacemaker:Dummy): FAILED fc16-builder diff --git a/cts/scheduler/summary/monitor-onfail-stop.summary b/cts/scheduler/summary/monitor-onfail-stop.summary index 2633efd16c8..a5b31878d14 100644 --- a/cts/scheduler/summary/monitor-onfail-stop.summary +++ b/cts/scheduler/summary/monitor-onfail-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop A ( fc16-builder ) due to node availability Executing Cluster Transition: - * Resource action: A stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Stopped + * A (ocf:pacemaker:Dummy): FAILED fc16-builder diff --git a/cts/scheduler/summary/monitor-recovery.summary b/cts/scheduler/summary/monitor-recovery.summary index 1a7ff74a4cd..c9d07be6598 100644 --- a/cts/scheduler/summary/monitor-recovery.summary +++ b/cts/scheduler/summary/monitor-recovery.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ CSE-1 ] * OFFLINE: [ CSE-2 ] @@ -15,11 +17,10 @@ Transition Summary: * Stop d_tomcat:0 ( CSE-1 ) due to node availability Executing Cluster Transition: - * Pseudo action: cl_tomcat_stop_0 - * Resource action: d_tomcat stop on CSE-1 - * Pseudo action: cl_tomcat_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ CSE-1 ] * OFFLINE: [ CSE-2 ] @@ -29,4 +30,5 @@ Revised Cluster Status: * ip_19 (ocf:heartbeat:IPaddr2): Stopped * ip_11 (ocf:heartbeat:IPaddr2): Stopped * Clone Set: cl_tomcat [d_tomcat]: - * Stopped: [ CSE-1 CSE-2 ] + * Started: [ CSE-1 ] + * Stopped: [ CSE-2 ] diff --git a/cts/scheduler/summary/multi1.summary b/cts/scheduler/summary/multi1.summary index a4ea1496c00..91e143ab53b 100644 --- a/cts/scheduler/summary/multi1.summary +++ b/cts/scheduler/summary/multi1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Restart rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Started [ node1 node2 ] diff --git a/cts/scheduler/summary/multiple-active-block-group.summary b/cts/scheduler/summary/multiple-active-block-group.summary index 923ce5592a5..3fc534d087e 100644 --- a/cts/scheduler/summary/multiple-active-block-group.summary +++ b/cts/scheduler/summary/multiple-active-block-group.summary @@ -1,6 +1,8 @@ 0 of 4 resource instances DISABLED and 3 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node2 node3 ] @@ -16,6 +18,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node2 node3 ] diff --git a/cts/scheduler/summary/multiple-monitor-one-failed.summary b/cts/scheduler/summary/multiple-monitor-one-failed.summary index f6c872c4d87..d8c72f34cd1 100644 --- a/cts/scheduler/summary/multiple-monitor-one-failed.summary +++ b/cts/scheduler/summary/multiple-monitor-one-failed.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dhcp69 dhcp180 ] @@ -9,14 +11,12 @@ Transition Summary: * Recover Dummy-test2 ( dhcp180 ) Executing Cluster Transition: - * Resource action: Dummy-test2 stop on dhcp180 - * Resource action: Dummy-test2 start on dhcp180 - * Resource action: Dummy-test2 monitor=30000 on dhcp180 - * Resource action: Dummy-test2 monitor=10000 on dhcp180 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dhcp69 dhcp180 ] * Full List of Resources: - * Dummy-test2 (ocf:test:Dummy): Started dhcp180 + * Dummy-test2 (ocf:test:Dummy): FAILED dhcp180 diff --git a/cts/scheduler/summary/multiply-active-stonith.summary b/cts/scheduler/summary/multiply-active-stonith.summary index ec37de03b0e..57d7f90c377 100644 --- a/cts/scheduler/summary/multiply-active-stonith.summary +++ b/cts/scheduler/summary/multiply-active-stonith.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2018-05-09 09:54:39Z Current cluster status: + * Cluster Summary: + * Node List: * Node node2: UNCLEAN (online) * Online: [ node1 node3 ] @@ -13,16 +15,15 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: fencer monitor=60000 on node3 - * Fencing node2 (reboot) - * Pseudo action: rsc1_stop_0 Using the original execution date of: 2018-05-09 09:54:39Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 node3 ] - * OFFLINE: [ node2 ] * Full List of Resources: - * fencer (stonith:fence_ipmilan): Started node3 - * rsc1 (lsb:rsc1): Stopped (not installed) + * fencer (stonith:fence_ipmilan): Started [ node2 node3 ] + * rsc1 (lsb:rsc1): FAILED node2 diff --git a/cts/scheduler/summary/nested-remote-recovery.summary b/cts/scheduler/summary/nested-remote-recovery.summary index fd3ccd76135..2aeacdca0e5 100644 --- a/cts/scheduler/summary/nested-remote-recovery.summary +++ b/cts/scheduler/summary/nested-remote-recovery.summary @@ -1,9 +1,29 @@ Using the original execution date of: 2018-09-11 21:23:25Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254002f6d57 on controller-1 changed: 0:0;237:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254002f6d57 on controller-1 changed: 0:0;229:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540066e27e on controller-1 changed: 0:0;227:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540066e27e on controller-1 changed: 0:0;219:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400a16c0d on controller-1 changed: 0:0;235:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400a16c0d on controller-1 changed: 0:0;229:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254003296a5 on controller-1 changed: 0:0;224:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254003296a5 on controller-1 changed: 0:0;216:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540098c9ff on controller-1 changed: 0:0;219:1:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540098c9ff on controller-1 changed: 0:0;211:2:0:79eb6bb3-23ce-41d1-863c-4f68a738af58 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400aab9d9 on controller-2 changed: 0:0;232:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400aab9d9 on controller-2 changed: 0:0;226:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254000203a2 on controller-2 changed: 0:0;222:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254000203a2 on controller-2 changed: 0:0;216:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540065418e on controller-2 changed: 0:0;229:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540065418e on controller-2 changed: 0:0;223:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254005f9a33 on controller-2 changed: 0:0;217:1:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254005f9a33 on controller-2 changed: 0:0;211:2:0:018a4c7f-d5cb-4ef8-85a4-031ed2cffd23 + * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Started controller-0 @@ -53,37 +73,11 @@ Transition Summary: * Recover galera:0 ( Promoted galera-bundle-0 ) Executing Cluster Transition: - * Resource action: galera-bundle-0 stop on controller-0 - * Pseudo action: galera-bundle_demote_0 - * Pseudo action: galera-bundle-master_demote_0 - * Pseudo action: galera_demote_0 - * Pseudo action: galera-bundle-master_demoted_0 - * Pseudo action: galera-bundle_demoted_0 - * Pseudo action: galera-bundle_stop_0 - * Resource action: galera-bundle-docker-0 stop on database-0 - * Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0 - * Pseudo action: galera-bundle-master_stop_0 - * Pseudo action: galera_stop_0 - * Pseudo action: galera-bundle-master_stopped_0 - * Pseudo action: galera-bundle_stopped_0 - * Pseudo action: galera-bundle_start_0 - * Pseudo action: galera-bundle-master_start_0 - * Resource action: galera-bundle-docker-0 start on database-0 - * Resource action: galera-bundle-docker-0 monitor=60000 on database-0 - * Resource action: galera-bundle-0 start on controller-0 - * Resource action: galera-bundle-0 monitor=30000 on controller-0 - * Resource action: galera start on galera-bundle-0 - * Pseudo action: galera-bundle-master_running_0 - * Pseudo action: galera-bundle_running_0 - * Pseudo action: galera-bundle_promote_0 - * Pseudo action: galera-bundle-master_promote_0 - * Resource action: galera promote on galera-bundle-0 - * Pseudo action: galera-bundle-master_promoted_0 - * Pseudo action: galera-bundle_promoted_0 - * Resource action: galera monitor=10000 on galera-bundle-0 Using the original execution date of: 2018-09-11 21:23:25Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] @@ -97,7 +91,7 @@ Revised Cluster Status: * messaging-1 (ocf:pacemaker:remote): Started controller-1 * messaging-2 (ocf:pacemaker:remote): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: diff --git a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary index ab8f8ff4506..42ae5e2f09c 100644 --- a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary +++ b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary @@ -1,5 +1,13 @@ Using the original execution date of: 2020-05-14 10:49:31Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254005e097a on controller-0 changed: 0:0;217:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254005e097a on controller-0 changed: 0:0;218:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400985679 on controller-1 changed: 0:0;223:64:0:515fab44-df8e-4e73-a22c-ed4886e03330 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400985679 on controller-1 changed: 0:0;224:64:0:515fab44-df8e-4e73-a22c-ed4886e03330 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400afe30e on controller-2 changed: 0:0;219:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400afe30e on controller-2 changed: 0:0;220:60:0:515fab44-df8e-4e73-a22c-ed4886e03330 + * Node List: * Online: [ controller-0 controller-1 controller-2 ] * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] @@ -32,52 +40,18 @@ Transition Summary: * Stop ovn-dbs-bundle-0 ( controller-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start * Stop ovndb_servers:0 ( Unpromoted ovn-dbs-bundle-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start * Promote ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 - * Pseudo action: ovn-dbs-bundle_stop_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 - * Pseudo action: ovn-dbs-bundle-master_stop_0 - * Resource action: ovndb_servers stop on ovn-dbs-bundle-0 - * Pseudo action: ovn-dbs-bundle-master_stopped_0 - * Resource action: ovn-dbs-bundle-0 stop on controller-0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 - * Resource action: ovn-dbs-bundle-podman-0 stop on controller-0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle_stopped_0 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle-master_start_0 - * Pseudo action: ovn-dbs-bundle-master_running_0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: ovn-dbs-bundle_running_0 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 - * Pseudo action: ovn-dbs-bundle_promote_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: ovn-dbs-bundle-master_promote_0 - * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_promoted_0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: ovn-dbs-bundle_promoted_0 - * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1 Using the original execution date of: 2020-05-14 10:49:31Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ controller-0 controller-1 controller-2 ] - * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]: @@ -93,8 +67,8 @@ Revised Cluster Status: * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: - * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-1 + * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-1 * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-2 * stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/no_quorum_demote.summary b/cts/scheduler/summary/no_quorum_demote.summary index 7de1658048e..73d3384f9d7 100644 --- a/cts/scheduler/summary/no_quorum_demote.summary +++ b/cts/scheduler/summary/no_quorum_demote.summary @@ -1,5 +1,8 @@ Using the original execution date of: 2020-06-17 17:26:35Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 ] * OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] @@ -18,23 +21,20 @@ Transition Summary: * Stop rsc2 ( rhel7-2 ) due to no quorum Executing Cluster Transition: - * Resource action: Fencing stop on rhel7-1 - * Resource action: rsc1 cancel=10000 on rhel7-1 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc2 stop on rhel7-2 - * Resource action: rsc1 demote on rhel7-1 - * Pseudo action: rsc1-clone_demoted_0 - * Resource action: rsc1 monitor=11000 on rhel7-1 Using the original execution date of: 2020-06-17 17:26:35Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 ] * OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Stopped + * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * Unpromoted: [ rhel7-1 rhel7-2 ] + * Promoted: [ rhel7-1 ] + * Unpromoted: [ rhel7-2 ] * Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started rhel7-2 diff --git a/cts/scheduler/summary/node-maintenance-1.summary b/cts/scheduler/summary/node-maintenance-1.summary index eb75567721c..5dd3195b577 100644 --- a/cts/scheduler/summary/node-maintenance-1.summary +++ b/cts/scheduler/summary/node-maintenance-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: maintenance * Online: [ node1 ] @@ -12,15 +14,15 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc2 cancel=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: maintenance * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node1 * rsc2 (ocf:pacemaker:Dummy): Started node2 (maintenance) diff --git a/cts/scheduler/summary/node-maintenance-2.summary b/cts/scheduler/summary/node-maintenance-2.summary index b21e5dbad17..f2d73e85b16 100644 --- a/cts/scheduler/summary/node-maintenance-2.summary +++ b/cts/scheduler/summary/node-maintenance-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,15 +13,14 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Resource action: rsc2 monitor=10000 on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/node-pending-timeout.summary b/cts/scheduler/summary/node-pending-timeout.summary index 0fef9823ea9..3f21df83dc4 100644 --- a/cts/scheduler/summary/node-pending-timeout.summary +++ b/cts/scheduler/summary/node-pending-timeout.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2023-02-21 12:19:57Z Current cluster status: + * Cluster Summary: + * Node List: * Node node-2: UNCLEAN (online) * Online: [ node-1 ] @@ -12,15 +14,14 @@ Transition Summary: * Start st-sbd ( node-1 ) Executing Cluster Transition: - * Resource action: st-sbd monitor on node-1 - * Fencing node-2 (reboot) - * Resource action: st-sbd start on node-1 Using the original execution date of: 2023-02-21 12:19:57Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node-2: UNCLEAN (online) * Online: [ node-1 ] - * OFFLINE: [ node-2 ] * Full List of Resources: - * st-sbd (stonith:external/sbd): Started node-1 + * st-sbd (stonith:external/sbd): Stopped diff --git a/cts/scheduler/summary/not-installed-agent.summary b/cts/scheduler/summary/not-installed-agent.summary index 3e4fb6bfc4d..ffcd8899ee9 100644 --- a/cts/scheduler/summary/not-installed-agent.summary +++ b/cts/scheduler/summary/not-installed-agent.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sles11-1 sles11-2 ] @@ -12,18 +14,14 @@ Transition Summary: * Recover rsc2 ( sles11-1 -> sles11-2 ) Executing Cluster Transition: - * Resource action: rsc1 stop on sles11-1 - * Resource action: rsc2 stop on sles11-1 - * Resource action: rsc1 start on sles11-2 - * Resource action: rsc2 start on sles11-2 - * Resource action: rsc1 monitor=10000 on sles11-2 - * Resource action: rsc2 monitor=10000 on sles11-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sles11-1 sles11-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sles11-1 - * rsc1 (ocf:pacemaker:Dummy): Started sles11-2 - * rsc2 (ocf:pacemaker:Dummy): Started sles11-2 + * rsc1 (ocf:pacemaker:Dummy): FAILED sles11-1 + * rsc2 (ocf:pacemaker:Dummy): FAILED sles11-1 diff --git a/cts/scheduler/summary/not-installed-tools.summary b/cts/scheduler/summary/not-installed-tools.summary index 7481216fcec..c7d81e47b41 100644 --- a/cts/scheduler/summary/not-installed-tools.summary +++ b/cts/scheduler/summary/not-installed-tools.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sles11-1 sles11-2 ] @@ -11,15 +13,14 @@ Transition Summary: * Recover rsc1 ( sles11-1 -> sles11-2 ) Executing Cluster Transition: - * Resource action: rsc1 stop on sles11-1 - * Resource action: rsc1 start on sles11-2 - * Resource action: rsc1 monitor=10000 on sles11-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sles11-1 sles11-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sles11-1 - * rsc1 (ocf:pacemaker:Dummy): Started sles11-2 + * rsc1 (ocf:pacemaker:Dummy): FAILED sles11-1 * rsc2 (ocf:pacemaker:Dummy): Started sles11-1 (failure ignored) diff --git a/cts/scheduler/summary/not-reschedule-unneeded-monitor.summary b/cts/scheduler/summary/not-reschedule-unneeded-monitor.summary index 44965355cfb..fef23ce87c7 100644 --- a/cts/scheduler/summary/not-reschedule-unneeded-monitor.summary +++ b/cts/scheduler/summary/not-reschedule-unneeded-monitor.summary @@ -1,6 +1,8 @@ 1 of 11 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ castor kimball ] @@ -19,12 +21,10 @@ Transition Summary: * Recover sle12-kvm ( kimball ) Executing Cluster Transition: - * Resource action: sle12-kvm stop on kimball - * Resource action: sle12-kvm stop on castor - * Resource action: sle12-kvm start on kimball - * Resource action: sle12-kvm monitor=10000 on kimball Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ castor kimball ] @@ -35,6 +35,6 @@ Revised Cluster Status: * Clone Set: c-vm-fs [vm1]: * Started: [ castor kimball ] * xen-f (ocf:heartbeat:VirtualDomain): Stopped (disabled) - * sle12-kvm (ocf:heartbeat:VirtualDomain): Started kimball + * sle12-kvm (ocf:heartbeat:VirtualDomain): FAILED [ kimball castor ] * Clone Set: cl_sgdisk [sgdisk]: * Started: [ castor kimball ] diff --git a/cts/scheduler/summary/notifs-for-unrunnable.summary b/cts/scheduler/summary/notifs-for-unrunnable.summary index a9503b46b28..c61740bd36b 100644 --- a/cts/scheduler/summary/notifs-for-unrunnable.summary +++ b/cts/scheduler/summary/notifs-for-unrunnable.summary @@ -1,9 +1,17 @@ Using the original execution date of: 2018-02-13 23:40:47Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400fec0c8 on controller-1 changed: 0:0;178:2:0:8b283351-71e8-4848-b470-8664f73af1e9 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400fec0c8 on controller-1 changed: 0:0;179:2:0:8b283351-71e8-4848-b470-8664f73af1e9 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254008f971a on controller-1 changed: 0:0;182:2:0:8b283351-71e8-4848-b470-8664f73af1e9 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254008f971a on controller-1 changed: 0:0;183:2:0:8b283351-71e8-4848-b470-8664f73af1e9 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254002ff217 on controller-2 changed: 0:0;180:2:0:8b283351-71e8-4848-b470-8664f73af1e9 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254002ff217 on controller-2 changed: 0:0;181:2:0:8b283351-71e8-4848-b470-8664f73af1e9 + * Node List: * Online: [ controller-1 controller-2 ] * OFFLINE: [ controller-0 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq:pcmklatest]: @@ -42,33 +50,19 @@ Transition Summary: * Start redis:0 ( redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start (blocked) Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: galera-bundle_start_0 - * Pseudo action: rabbitmq-bundle_start_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Pseudo action: galera-bundle-master_start_0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: redis-bundle-master_start_0 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Pseudo action: galera-bundle-master_running_0 - * Pseudo action: redis-bundle-master_running_0 - * Pseudo action: galera-bundle_running_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Pseudo action: rabbitmq-bundle_running_0 + * Resource action: rabbitmq notify on rabbitmq-bundle-1 + * Resource action: rabbitmq notify on rabbitmq-bundle-2 + * Resource action: redis notify on redis-bundle-1 + * Resource action: redis notify on redis-bundle-2 Using the original execution date of: 2018-02-13 23:40:47Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ controller-1 controller-2 ] * OFFLINE: [ controller-0 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq:pcmklatest]: diff --git a/cts/scheduler/summary/notify-0.summary b/cts/scheduler/summary/notify-0.summary index f39ea946471..03eafaf3365 100644 --- a/cts/scheduler/summary/notify-0.summary +++ b/cts/scheduler/summary/notify-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] @@ -16,16 +18,10 @@ Transition Summary: * Stop child_rsc2:0 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: child_rsc1:1 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc2:1 monitor on node1 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc1:1 start on node1 - * Pseudo action: rsc1_running_0 - * Resource action: child_rsc2:0 stop on node1 - * Pseudo action: rsc2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] @@ -33,7 +29,7 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 - * child_rsc1:1 (ocf:heartbeat:apache): Started node1 + * child_rsc1:1 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:0 (ocf:heartbeat:apache): Started node1 * child_rsc2:1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/notify-1.summary b/cts/scheduler/summary/notify-1.summary index 8ce4b25ae04..c89106dceed 100644 --- a/cts/scheduler/summary/notify-1.summary +++ b/cts/scheduler/summary/notify-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] @@ -16,28 +18,11 @@ Transition Summary: * Stop child_rsc2:0 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: child_rsc1:1 monitor on node1 - * Pseudo action: rsc1_pre_notify_start_0 - * Resource action: child_rsc2:1 monitor on node1 - * Pseudo action: rsc2_pre_notify_stop_0 - * Resource action: child_rsc1:0 notify on node1 - * Pseudo action: rsc1_confirmed-pre_notify_start_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc2:0 notify on node1 - * Pseudo action: rsc2_confirmed-pre_notify_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc1:1 start on node1 - * Pseudo action: rsc1_running_0 - * Resource action: child_rsc2:0 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc1_post_notify_running_0 - * Pseudo action: rsc2_post_notify_stopped_0 * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node1 - * Pseudo action: rsc1_confirmed-post_notify_running_0 - * Pseudo action: rsc2_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] @@ -45,7 +30,7 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 - * child_rsc1:1 (ocf:heartbeat:apache): Started node1 + * child_rsc1:1 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:0 (ocf:heartbeat:apache): Started node1 * child_rsc2:1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/notify-2.summary b/cts/scheduler/summary/notify-2.summary index 8ce4b25ae04..c89106dceed 100644 --- a/cts/scheduler/summary/notify-2.summary +++ b/cts/scheduler/summary/notify-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] @@ -16,28 +18,11 @@ Transition Summary: * Stop child_rsc2:0 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: child_rsc1:1 monitor on node1 - * Pseudo action: rsc1_pre_notify_start_0 - * Resource action: child_rsc2:1 monitor on node1 - * Pseudo action: rsc2_pre_notify_stop_0 - * Resource action: child_rsc1:0 notify on node1 - * Pseudo action: rsc1_confirmed-pre_notify_start_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc2:0 notify on node1 - * Pseudo action: rsc2_confirmed-pre_notify_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc1:1 start on node1 - * Pseudo action: rsc1_running_0 - * Resource action: child_rsc2:0 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc1_post_notify_running_0 - * Pseudo action: rsc2_post_notify_stopped_0 * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node1 - * Pseudo action: rsc1_confirmed-post_notify_running_0 - * Pseudo action: rsc2_confirmed-post_notify_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] @@ -45,7 +30,7 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 - * child_rsc1:1 (ocf:heartbeat:apache): Started node1 + * child_rsc1:1 (ocf:heartbeat:apache): Stopped * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:0 (ocf:heartbeat:apache): Started node1 * child_rsc2:1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/notify-3.summary b/cts/scheduler/summary/notify-3.summary index 56586923e4c..de4edba0660 100644 --- a/cts/scheduler/summary/notify-3.summary +++ b/cts/scheduler/summary/notify-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,48 +17,17 @@ Transition Summary: * Stop child_rsc2:0 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Pseudo action: rsc1_pre_notify_stop_0 - * Resource action: child_rsc2:0 monitor on node2 - * Resource action: child_rsc2:1 monitor on node2 - * Resource action: child_rsc2:1 monitor on node1 - * Pseudo action: rsc2_pre_notify_stop_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Pseudo action: rsc1_confirmed-pre_notify_stop_0 - * Pseudo action: rsc1_stop_0 - * Resource action: child_rsc2:0 notify on node1 - * Pseudo action: rsc2_confirmed-pre_notify_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: child_rsc1:1 stop on node2 - * Pseudo action: rsc1_stopped_0 - * Resource action: child_rsc2:0 stop on node1 - * Pseudo action: rsc2_stopped_0 - * Pseudo action: rsc1_post_notify_stopped_0 - * Pseudo action: rsc2_post_notify_stopped_0 - * Resource action: child_rsc1:0 notify on node1 - * Pseudo action: rsc1_confirmed-post_notify_stopped_0 - * Pseudo action: rsc1_pre_notify_start_0 - * Pseudo action: rsc2_confirmed-post_notify_stopped_0 - * Resource action: child_rsc1:0 notify on node1 - * Pseudo action: rsc1_confirmed-pre_notify_start_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:1 start on node1 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_post_notify_running_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node1 - * Pseudo action: rsc1_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (unique): * child_rsc1:0 (ocf:heartbeat:apache): Started node1 - * child_rsc1:1 (ocf:heartbeat:apache): Started node1 + * child_rsc1:1 (ocf:heartbeat:apache): Started node2 * Clone Set: rsc2 [child_rsc2] (unique): - * child_rsc2:0 (ocf:heartbeat:apache): Stopped + * child_rsc2:0 (ocf:heartbeat:apache): Started node1 * child_rsc2:1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/notify-behind-stopping-remote.summary b/cts/scheduler/summary/notify-behind-stopping-remote.summary index 257e445274c..ab78b3297a3 100644 --- a/cts/scheduler/summary/notify-behind-stopping-remote.summary +++ b/cts/scheduler/summary/notify-behind-stopping-remote.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2018-11-22 20:36:07Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ ra1 ra2 ra3 ] * GuestOnline: [ redis-bundle-0 redis-bundle-1 redis-bundle-2 ] @@ -17,48 +19,17 @@ Transition Summary: * Start redis:1 ( redis-bundle-1 ) due to unrunnable redis-bundle-docker-1 start (blocked) Executing Cluster Transition: - * Resource action: redis cancel=45000 on redis-bundle-0 - * Resource action: redis cancel=60000 on redis-bundle-0 - * Pseudo action: redis-bundle-master_pre_notify_start_0 - * Resource action: redis-bundle-0 monitor=30000 on ra1 - * Resource action: redis-bundle-0 cancel=60000 on ra1 - * Resource action: redis-bundle-1 stop on ra2 - * Resource action: redis-bundle-1 cancel=60000 on ra2 - * Resource action: redis-bundle-2 monitor=30000 on ra3 - * Resource action: redis-bundle-2 cancel=60000 on ra3 - * Pseudo action: redis-bundle_stop_0 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 - * Resource action: redis-bundle-docker-1 stop on ra2 - * Pseudo action: redis-bundle_stopped_0 - * Pseudo action: redis-bundle_start_0 - * Pseudo action: redis-bundle-master_start_0 - * Pseudo action: redis-bundle-master_running_0 - * Pseudo action: redis-bundle-master_post_notify_running_0 - * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 - * Pseudo action: redis-bundle_running_0 - * Pseudo action: redis-bundle-master_pre_notify_promote_0 - * Pseudo action: redis-bundle_promote_0 - * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 - * Pseudo action: redis-bundle-master_promote_0 - * Resource action: redis promote on redis-bundle-0 - * Pseudo action: redis-bundle-master_promoted_0 - * Pseudo action: redis-bundle-master_post_notify_promoted_0 - * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-2 - * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 - * Pseudo action: redis-bundle_promoted_0 - * Resource action: redis monitor=20000 on redis-bundle-0 Using the original execution date of: 2018-11-22 20:36:07Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ ra1 ra2 ra3 ] - * GuestOnline: [ redis-bundle-0 redis-bundle-2 ] + * GuestOnline: [ redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * Container bundle set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]: - * redis-bundle-0 (ocf:heartbeat:redis): Promoted ra1 - * redis-bundle-1 (ocf:heartbeat:redis): Stopped + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted ra1 + * redis-bundle-1 (ocf:heartbeat:redis): Stopped ra2 * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted ra3 diff --git a/cts/scheduler/summary/novell-239079.summary b/cts/scheduler/summary/novell-239079.summary index 0afbba57970..07b2ab282b8 100644 --- a/cts/scheduler/summary/novell-239079.summary +++ b/cts/scheduler/summary/novell-239079.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ xen-1 xen-2 ] @@ -12,22 +14,14 @@ Transition Summary: * Start drbd0:1 ( xen-2 ) Executing Cluster Transition: - * Pseudo action: ms-drbd0_pre_notify_start_0 - * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 - * Pseudo action: ms-drbd0_start_0 - * Resource action: drbd0:0 start on xen-1 - * Resource action: drbd0:1 start on xen-2 - * Pseudo action: ms-drbd0_running_0 - * Pseudo action: ms-drbd0_post_notify_running_0 - * Resource action: drbd0:0 notify on xen-1 - * Resource action: drbd0:1 notify on xen-2 - * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: ms-drbd0 [drbd0] (promotable): - * Unpromoted: [ xen-1 xen-2 ] + * Stopped: [ xen-1 xen-2 ] diff --git a/cts/scheduler/summary/novell-239082.summary b/cts/scheduler/summary/novell-239082.summary index 051c0220e01..54d1e3a85f0 100644 --- a/cts/scheduler/summary/novell-239082.summary +++ b/cts/scheduler/summary/novell-239082.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ xen-1 xen-2 ] @@ -12,48 +14,19 @@ Transition Summary: * Move fs_1 ( xen-1 -> xen-2 ) * Promote drbd0:0 ( Unpromoted -> Promoted xen-2 ) * Stop drbd0:1 ( Promoted xen-1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: fs_1 stop on xen-1 - * Pseudo action: ms-drbd0_pre_notify_demote_0 - * Resource action: drbd0:0 notify on xen-2 - * Resource action: drbd0:1 notify on xen-1 - * Pseudo action: ms-drbd0_confirmed-pre_notify_demote_0 - * Pseudo action: ms-drbd0_demote_0 - * Resource action: drbd0:1 demote on xen-1 - * Pseudo action: ms-drbd0_demoted_0 - * Pseudo action: ms-drbd0_post_notify_demoted_0 - * Resource action: drbd0:0 notify on xen-2 - * Resource action: drbd0:1 notify on xen-1 - * Pseudo action: ms-drbd0_confirmed-post_notify_demoted_0 - * Pseudo action: ms-drbd0_pre_notify_stop_0 - * Resource action: drbd0:0 notify on xen-2 - * Resource action: drbd0:1 notify on xen-1 - * Pseudo action: ms-drbd0_confirmed-pre_notify_stop_0 - * Pseudo action: ms-drbd0_stop_0 - * Resource action: drbd0:1 stop on xen-1 - * Pseudo action: ms-drbd0_stopped_0 - * Cluster action: do_shutdown on xen-1 - * Pseudo action: ms-drbd0_post_notify_stopped_0 - * Resource action: drbd0:0 notify on xen-2 - * Pseudo action: ms-drbd0_confirmed-post_notify_stopped_0 - * Pseudo action: ms-drbd0_pre_notify_promote_0 - * Resource action: drbd0:0 notify on xen-2 - * Pseudo action: ms-drbd0_confirmed-pre_notify_promote_0 - * Pseudo action: ms-drbd0_promote_0 - * Resource action: drbd0:0 promote on xen-2 - * Pseudo action: ms-drbd0_promoted_0 - * Pseudo action: ms-drbd0_post_notify_promoted_0 - * Resource action: drbd0:0 notify on xen-2 - * Pseudo action: ms-drbd0_confirmed-post_notify_promoted_0 - * Resource action: fs_1 start on xen-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: - * fs_1 (ocf:heartbeat:Filesystem): Started xen-2 + * fs_1 (ocf:heartbeat:Filesystem): Started xen-1 * Clone Set: ms-drbd0 [drbd0] (promotable): - * Promoted: [ xen-2 ] - * Stopped: [ xen-1 ] + * Promoted: [ xen-1 ] + * Unpromoted: [ xen-2 ] diff --git a/cts/scheduler/summary/novell-239087.summary b/cts/scheduler/summary/novell-239087.summary index 0c158d3873d..cbbc80b6c83 100644 --- a/cts/scheduler/summary/novell-239087.summary +++ b/cts/scheduler/summary/novell-239087.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ xen-1 xen-2 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ xen-1 xen-2 ] diff --git a/cts/scheduler/summary/novell-251689.summary b/cts/scheduler/summary/novell-251689.summary index 51a4beacdf7..9f73699fac4 100644 --- a/cts/scheduler/summary/novell-251689.summary +++ b/cts/scheduler/summary/novell-251689.summary @@ -1,6 +1,8 @@ 1 of 11 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -21,17 +23,10 @@ Transition Summary: * Stop sles10 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: stonithclone:0 monitor=5000 on node2 - * Resource action: stonithclone:1 monitor=5000 on node1 - * Resource action: evmsdclone:0 monitor=5000 on node2 - * Resource action: evmsdclone:1 monitor=5000 on node1 - * Resource action: imagestoreclone:0 monitor=20000 on node2 - * Resource action: imagestoreclone:1 monitor=20000 on node1 - * Resource action: configstoreclone:0 monitor=20000 on node2 - * Resource action: configstoreclone:1 monitor=20000 on node1 - * Resource action: sles10 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -46,4 +41,4 @@ Revised Cluster Status: * Started: [ node1 node2 ] * Clone Set: configstorecloneset [configstoreclone]: * Started: [ node1 node2 ] - * sles10 (ocf:heartbeat:Xen): Stopped (disabled) + * sles10 (ocf:heartbeat:Xen): Started node2 (disabled) diff --git a/cts/scheduler/summary/novell-252693-2.summary b/cts/scheduler/summary/novell-252693-2.summary index 45ee46d2e97..87471fd6ce3 100644 --- a/cts/scheduler/summary/novell-252693-2.summary +++ b/cts/scheduler/summary/novell-252693-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -29,75 +31,30 @@ Transition Summary: * Migrate sles10 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: stonithclone:0 monitor=5000 on node2 - * Resource action: stonithclone:1 monitor on node1 - * Pseudo action: stonithcloneset_start_0 - * Resource action: evmsdclone:0 monitor=5000 on node2 - * Resource action: evmsdclone:1 monitor on node1 - * Pseudo action: evmsdcloneset_start_0 - * Resource action: evmsclone:1 monitor on node1 - * Pseudo action: evmscloneset_pre_notify_start_0 - * Resource action: imagestoreclone:1 monitor on node1 - * Pseudo action: imagestorecloneset_pre_notify_start_0 - * Resource action: configstoreclone:1 monitor on node1 - * Pseudo action: configstorecloneset_pre_notify_start_0 - * Resource action: sles10 monitor on node1 - * Resource action: stonithclone:1 start on node1 - * Pseudo action: stonithcloneset_running_0 - * Resource action: evmsdclone:1 start on node1 - * Pseudo action: evmsdcloneset_running_0 - * Resource action: evmsclone:0 notify on node2 - * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 - * Pseudo action: evmscloneset_start_0 - * Resource action: imagestoreclone:0 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 - * Resource action: configstoreclone:0 notify on node2 - * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 - * Resource action: stonithclone:1 monitor=5000 on node1 - * Resource action: evmsdclone:1 monitor=5000 on node1 - * Resource action: evmsclone:1 start on node1 - * Pseudo action: evmscloneset_running_0 - * Pseudo action: evmscloneset_post_notify_running_0 * Resource action: evmsclone:0 notify on node2 - * Resource action: evmsclone:1 notify on node1 - * Pseudo action: evmscloneset_confirmed-post_notify_running_0 - * Pseudo action: imagestorecloneset_start_0 - * Pseudo action: configstorecloneset_start_0 - * Resource action: imagestoreclone:1 start on node1 - * Pseudo action: imagestorecloneset_running_0 - * Resource action: configstoreclone:1 start on node1 - * Pseudo action: configstorecloneset_running_0 - * Pseudo action: imagestorecloneset_post_notify_running_0 - * Pseudo action: configstorecloneset_post_notify_running_0 * Resource action: imagestoreclone:0 notify on node2 - * Resource action: imagestoreclone:1 notify on node1 - * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 * Resource action: configstoreclone:0 notify on node2 - * Resource action: configstoreclone:1 notify on node1 - * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 - * Resource action: sles10 migrate_to on node2 - * Resource action: imagestoreclone:0 monitor=20000 on node2 - * Resource action: imagestoreclone:1 monitor=20000 on node1 - * Resource action: configstoreclone:0 monitor=20000 on node2 - * Resource action: configstoreclone:1 monitor=20000 on node1 - * Resource action: sles10 migrate_from on node1 - * Resource action: sles10 stop on node2 - * Pseudo action: sles10_start_0 - * Resource action: sles10 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: stonithcloneset [stonithclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: evmsdcloneset [evmsdclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: evmscloneset [evmsclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: imagestorecloneset [imagestoreclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: configstorecloneset [configstoreclone]: - * Started: [ node1 node2 ] - * sles10 (ocf:heartbeat:Xen): Started node1 + * Started: [ node2 ] + * Stopped: [ node1 ] + * sles10 (ocf:heartbeat:Xen): Started node2 diff --git a/cts/scheduler/summary/novell-252693-3.summary b/cts/scheduler/summary/novell-252693-3.summary index 246969d6bd5..8fc32ce77f7 100644 --- a/cts/scheduler/summary/novell-252693-3.summary +++ b/cts/scheduler/summary/novell-252693-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -30,83 +32,29 @@ Transition Summary: * Migrate sles10 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: stonithclone:0 monitor=5000 on node2 - * Resource action: stonithclone:1 monitor on node1 - * Pseudo action: stonithcloneset_start_0 - * Resource action: evmsdclone:0 monitor=5000 on node2 - * Resource action: evmsdclone:1 monitor on node1 - * Pseudo action: evmsdcloneset_start_0 - * Resource action: evmsclone:1 monitor on node1 - * Pseudo action: evmscloneset_pre_notify_start_0 - * Resource action: imagestoreclone:0 monitor on node1 - * Pseudo action: imagestorecloneset_pre_notify_stop_0 - * Resource action: configstoreclone:1 monitor on node1 - * Pseudo action: configstorecloneset_pre_notify_start_0 - * Resource action: sles10 monitor on node1 - * Resource action: stonithclone:1 start on node1 - * Pseudo action: stonithcloneset_running_0 - * Resource action: evmsdclone:1 start on node1 - * Pseudo action: evmsdcloneset_running_0 - * Resource action: evmsclone:0 notify on node2 - * Pseudo action: evmscloneset_confirmed-pre_notify_start_0 - * Pseudo action: evmscloneset_start_0 - * Resource action: imagestoreclone:0 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: imagestorecloneset_stop_0 - * Resource action: configstoreclone:0 notify on node2 - * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 - * Resource action: stonithclone:1 monitor=5000 on node1 - * Resource action: evmsdclone:1 monitor=5000 on node1 - * Resource action: evmsclone:1 start on node1 - * Pseudo action: evmscloneset_running_0 - * Resource action: imagestoreclone:0 stop on node2 - * Pseudo action: imagestorecloneset_stopped_0 - * Pseudo action: evmscloneset_post_notify_running_0 - * Pseudo action: imagestorecloneset_post_notify_stopped_0 * Resource action: evmsclone:0 notify on node2 - * Resource action: evmsclone:1 notify on node1 - * Pseudo action: evmscloneset_confirmed-post_notify_running_0 - * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 - * Pseudo action: imagestorecloneset_pre_notify_start_0 - * Pseudo action: configstorecloneset_start_0 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_start_0 - * Pseudo action: imagestorecloneset_start_0 - * Resource action: configstoreclone:1 start on node1 - * Pseudo action: configstorecloneset_running_0 - * Resource action: imagestoreclone:0 start on node1 - * Resource action: imagestoreclone:1 start on node2 - * Pseudo action: imagestorecloneset_running_0 - * Pseudo action: configstorecloneset_post_notify_running_0 - * Pseudo action: imagestorecloneset_post_notify_running_0 * Resource action: configstoreclone:0 notify on node2 - * Resource action: configstoreclone:1 notify on node1 - * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 - * Resource action: imagestoreclone:0 notify on node1 - * Resource action: imagestoreclone:1 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-post_notify_running_0 - * Resource action: configstoreclone:0 monitor=20000 on node2 - * Resource action: configstoreclone:1 monitor=20000 on node1 - * Resource action: sles10 migrate_to on node2 - * Resource action: imagestoreclone:0 monitor=20000 on node1 - * Resource action: imagestoreclone:1 monitor=20000 on node2 - * Resource action: sles10 migrate_from on node1 - * Resource action: sles10 stop on node2 - * Pseudo action: sles10_start_0 - * Resource action: sles10 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: stonithcloneset [stonithclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: evmsdcloneset [evmsdclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: evmscloneset [evmsclone]: - * Started: [ node1 node2 ] + * Started: [ node2 ] + * Stopped: [ node1 ] * Clone Set: imagestorecloneset [imagestoreclone]: - * Started: [ node1 node2 ] + * imagestoreclone (ocf:heartbeat:Filesystem): FAILED node2 + * Stopped: [ node1 ] * Clone Set: configstorecloneset [configstoreclone]: - * Started: [ node1 node2 ] - * sles10 (ocf:heartbeat:Xen): Started node1 + * Started: [ node2 ] + * Stopped: [ node1 ] + * sles10 (ocf:heartbeat:Xen): Started node2 diff --git a/cts/scheduler/summary/novell-252693.summary b/cts/scheduler/summary/novell-252693.summary index 82fce77ee43..f74033ede33 100644 --- a/cts/scheduler/summary/novell-252693.summary +++ b/cts/scheduler/summary/novell-252693.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -22,73 +24,26 @@ Transition Summary: * Stop imagestoreclone:1 ( node1 ) due to node availability * Stop configstoreclone:1 ( node1 ) due to node availability * Migrate sles10 ( node1 -> node2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: stonithclone:0 monitor=5000 on node2 - * Pseudo action: stonithcloneset_stop_0 - * Resource action: evmsdclone:0 monitor=5000 on node2 - * Pseudo action: evmscloneset_pre_notify_stop_0 - * Pseudo action: imagestorecloneset_pre_notify_stop_0 - * Pseudo action: configstorecloneset_pre_notify_stop_0 - * Resource action: sles10 migrate_to on node1 - * Resource action: stonithclone:1 stop on node1 - * Pseudo action: stonithcloneset_stopped_0 - * Resource action: evmsclone:0 notify on node2 - * Resource action: evmsclone:1 notify on node1 - * Pseudo action: evmscloneset_confirmed-pre_notify_stop_0 - * Resource action: imagestoreclone:0 notify on node2 - * Resource action: imagestoreclone:0 notify on node1 - * Pseudo action: imagestorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: imagestorecloneset_stop_0 - * Resource action: configstoreclone:0 notify on node2 - * Resource action: configstoreclone:0 notify on node1 - * Pseudo action: configstorecloneset_confirmed-pre_notify_stop_0 - * Pseudo action: configstorecloneset_stop_0 - * Resource action: sles10 migrate_from on node2 - * Resource action: sles10 stop on node1 - * Resource action: imagestoreclone:0 stop on node1 - * Pseudo action: imagestorecloneset_stopped_0 - * Resource action: configstoreclone:0 stop on node1 - * Pseudo action: configstorecloneset_stopped_0 - * Pseudo action: sles10_start_0 - * Pseudo action: imagestorecloneset_post_notify_stopped_0 - * Pseudo action: configstorecloneset_post_notify_stopped_0 - * Resource action: sles10 monitor=10000 on node2 - * Resource action: imagestoreclone:0 notify on node2 - * Pseudo action: imagestorecloneset_confirmed-post_notify_stopped_0 - * Resource action: configstoreclone:0 notify on node2 - * Pseudo action: configstorecloneset_confirmed-post_notify_stopped_0 - * Pseudo action: evmscloneset_stop_0 - * Resource action: imagestoreclone:0 monitor=20000 on node2 - * Resource action: configstoreclone:0 monitor=20000 on node2 - * Resource action: evmsclone:1 stop on node1 - * Pseudo action: evmscloneset_stopped_0 - * Pseudo action: evmscloneset_post_notify_stopped_0 - * Resource action: evmsclone:0 notify on node2 - * Pseudo action: evmscloneset_confirmed-post_notify_stopped_0 - * Pseudo action: evmsdcloneset_stop_0 - * Resource action: evmsdclone:1 stop on node1 - * Pseudo action: evmsdcloneset_stopped_0 - * Cluster action: do_shutdown on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: stonithcloneset [stonithclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] * Clone Set: evmsdcloneset [evmsdclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] * Clone Set: evmscloneset [evmsclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] * Clone Set: imagestorecloneset [imagestoreclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] * Clone Set: configstorecloneset [configstoreclone]: - * Started: [ node2 ] - * Stopped: [ node1 ] - * sles10 (ocf:heartbeat:Xen): Started node2 + * Started: [ node1 node2 ] + * sles10 (ocf:heartbeat:Xen): Started node1 diff --git a/cts/scheduler/summary/nvpair-date-rules-1.summary b/cts/scheduler/summary/nvpair-date-rules-1.summary index 145ff4a7202..c8cef6076c2 100644 --- a/cts/scheduler/summary/nvpair-date-rules-1.summary +++ b/cts/scheduler/summary/nvpair-date-rules-1.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2019-09-23 17:00:00Z Current cluster status: + * Cluster Summary: + * Node List: * Node rhel7-3: standby * Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] @@ -17,15 +19,11 @@ Transition Summary: * Start rsc3 ( rhel7-4 ) Executing Cluster Transition: - * Resource action: rsc1 start on rhel7-5 - * Resource action: rsc2 start on rhel7-2 - * Resource action: rsc3 start on rhel7-4 - * Resource action: rsc1 monitor=10000 on rhel7-5 - * Resource action: rsc2 monitor=10000 on rhel7-2 - * Resource action: rsc3 monitor=10000 on rhel7-4 Using the original execution date of: 2019-09-23 17:00:00Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node rhel7-3: standby * Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] @@ -33,6 +31,6 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingPass (stonith:fence_dummy): Started rhel7-2 - * rsc1 (ocf:pacemaker:Dummy): Started rhel7-5 - * rsc2 (ocf:pacemaker:Dummy): Started rhel7-2 - * rsc3 (ocf:pacemaker:Dummy): Started rhel7-4 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/nvpair-id-ref.summary b/cts/scheduler/summary/nvpair-id-ref.summary index 4f058610255..b38a66196f2 100644 --- a/cts/scheduler/summary/nvpair-id-ref.summary +++ b/cts/scheduler/summary/nvpair-id-ref.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,20 +14,14 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node1 - * Resource action: rsc1 monitor=10000 on node2 - * Resource action: rsc2 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/obsolete-lrm-resource.summary b/cts/scheduler/summary/obsolete-lrm-resource.summary index 3d8889e67c2..b714fc95873 100644 --- a/cts/scheduler/summary/obsolete-lrm-resource.summary +++ b/cts/scheduler/summary/obsolete-lrm-resource.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] @@ -10,16 +12,13 @@ Transition Summary: * Start rsc1_child:0 ( yingying.site ) Executing Cluster Transition: - * Resource action: rsc1_child:0 monitor on yingying.site - * Pseudo action: rsc1_start_0 - * Resource action: rsc1 delete on yingying.site - * Resource action: rsc1_child:0 start on yingying.site - * Pseudo action: rsc1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] * Full List of Resources: * Clone Set: rsc1 [rsc1_child]: - * Started: [ yingying.site ] + * Stopped: [ yingying.site ] diff --git a/cts/scheduler/summary/ocf_degraded-remap-ocf_ok.summary b/cts/scheduler/summary/ocf_degraded-remap-ocf_ok.summary index 7cfb040729f..c6a097b455b 100644 --- a/cts/scheduler/summary/ocf_degraded-remap-ocf_ok.summary +++ b/cts/scheduler/summary/ocf_degraded-remap-ocf_ok.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-09-30 10:24:21Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 ] @@ -13,6 +15,8 @@ Executing Cluster Transition: Using the original execution date of: 2020-09-30 10:24:21Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 ] diff --git a/cts/scheduler/summary/ocf_degraded_promoted-remap-ocf_ok.summary b/cts/scheduler/summary/ocf_degraded_promoted-remap-ocf_ok.summary index f2970425930..4c0aa858137 100644 --- a/cts/scheduler/summary/ocf_degraded_promoted-remap-ocf_ok.summary +++ b/cts/scheduler/summary/ocf_degraded_promoted-remap-ocf_ok.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-09-30 14:23:26Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 ] @@ -15,6 +17,8 @@ Executing Cluster Transition: Using the original execution date of: 2020-09-30 14:23:26Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 ] diff --git a/cts/scheduler/summary/on-fail-ignore.summary b/cts/scheduler/summary/on-fail-ignore.summary index 2558f6040d5..82b2e189e71 100644 --- a/cts/scheduler/summary/on-fail-ignore.summary +++ b/cts/scheduler/summary/on-fail-ignore.summary @@ -1,5 +1,11 @@ Using the original execution date of: 2017-10-26 14:23:50Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for fence_db2 on 407888-db1 changed: 0:0;7:4:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d +Only 'private' parameters to 60s-interval monitor for fence_db2 on 407888-db1 changed: 0:0;8:4:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d +Only 'private' parameters to 0s-interval start for fence_db1 on 407892-db2 changed: 0:0;4:3:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d +Only 'private' parameters to 60s-interval monitor for fence_db1 on 407892-db2 changed: 0:0;5:3:0:dcc10f17-10a1-4e5d-89af-ef7ff033520d + * Node List: * Online: [ 407888-db1 407892-db2 ] @@ -14,6 +20,8 @@ Executing Cluster Transition: Using the original execution date of: 2017-10-26 14:23:50Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 407888-db1 407892-db2 ] diff --git a/cts/scheduler/summary/on_fail_demote1.summary b/cts/scheduler/summary/on_fail_demote1.summary index a386da096ad..a77a49c2d49 100644 --- a/cts/scheduler/summary/on_fail_demote1.summary +++ b/cts/scheduler/summary/on_fail_demote1.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * RemoteOnline: [ remote-rhel7-2 ] @@ -32,37 +34,11 @@ Transition Summary: * Re-promote bundled:0 ( Promoted stateful-bundle-0 ) Executing Cluster Transition: - * Pseudo action: rsc1-clone_demote_0 - * Pseudo action: rsc2-master_demote_0 - * Pseudo action: lxc-ms-master_demote_0 - * Pseudo action: stateful-bundle_demote_0 - * Resource action: rsc1 demote on rhel7-4 - * Pseudo action: rsc1-clone_demoted_0 - * Pseudo action: rsc1-clone_promote_0 - * Resource action: rsc2 demote on remote-rhel7-2 - * Pseudo action: rsc2-master_demoted_0 - * Pseudo action: rsc2-master_promote_0 - * Resource action: lxc-ms demote on lxc2 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_promote_0 - * Pseudo action: stateful-bundle-master_demote_0 - * Resource action: rsc1 promote on rhel7-4 - * Pseudo action: rsc1-clone_promoted_0 - * Resource action: rsc2 promote on remote-rhel7-2 - * Pseudo action: rsc2-master_promoted_0 - * Resource action: lxc-ms promote on lxc2 - * Pseudo action: lxc-ms-master_promoted_0 - * Resource action: bundled demote on stateful-bundle-0 - * Pseudo action: stateful-bundle-master_demoted_0 - * Pseudo action: stateful-bundle_demoted_0 - * Pseudo action: stateful-bundle_promote_0 - * Pseudo action: stateful-bundle-master_promote_0 - * Resource action: bundled promote on stateful-bundle-0 - * Pseudo action: stateful-bundle-master_promoted_0 - * Pseudo action: stateful-bundle_promoted_0 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * RemoteOnline: [ remote-rhel7-2 ] @@ -71,18 +47,19 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 * Clone Set: rsc1-clone [rsc1] (promotable): - * Promoted: [ rhel7-4 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4 * Unpromoted: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Promoted: [ remote-rhel7-2 ] + * rsc2 (ocf:pacemaker:Stateful): FAILED Promoted remote-rhel7-2 * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Promoted: [ lxc2 ] + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc2 * Unpromoted: [ lxc1 ] + * Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * Container bundle set: stateful-bundle [pcmktest:http]: - * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Promoted rhel7-5 + * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Promoted rhel7-5 * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1 * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-4 diff --git a/cts/scheduler/summary/on_fail_demote2.summary b/cts/scheduler/summary/on_fail_demote2.summary index 0ec0ea35fd0..329f9dfbe3c 100644 --- a/cts/scheduler/summary/on_fail_demote2.summary +++ b/cts/scheduler/summary/on_fail_demote2.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -17,27 +19,19 @@ Transition Summary: * Promote rsc1:1 ( Unpromoted -> Promoted rhel7-3 ) Executing Cluster Transition: - * Resource action: rsc1 cancel=10000 on rhel7-4 - * Resource action: rsc1 cancel=11000 on rhel7-3 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc1 demote on rhel7-4 - * Pseudo action: rsc1-clone_demoted_0 - * Pseudo action: rsc1-clone_promote_0 - * Resource action: rsc1 monitor=11000 on rhel7-4 - * Resource action: rsc1 promote on rhel7-3 - * Pseudo action: rsc1-clone_promoted_0 - * Resource action: rsc1 monitor=10000 on rhel7-3 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * Promoted: [ rhel7-3 ] - * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4 + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): * Promoted: [ rhel7-4 ] * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] diff --git a/cts/scheduler/summary/on_fail_demote3.summary b/cts/scheduler/summary/on_fail_demote3.summary index 793804af2fa..09387b2ef53 100644 --- a/cts/scheduler/summary/on_fail_demote3.summary +++ b/cts/scheduler/summary/on_fail_demote3.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -16,21 +18,19 @@ Transition Summary: * Demote rsc1:0 ( Promoted -> Unpromoted rhel7-4 ) Executing Cluster Transition: - * Resource action: rsc1 cancel=10000 on rhel7-4 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc1 demote on rhel7-4 - * Pseudo action: rsc1-clone_demoted_0 - * Resource action: rsc1 monitor=11000 on rhel7-4 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4 + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): * Promoted: [ rhel7-4 ] * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] diff --git a/cts/scheduler/summary/on_fail_demote4.summary b/cts/scheduler/summary/on_fail_demote4.summary index 30826511980..cfe17fa8109 100644 --- a/cts/scheduler/summary/on_fail_demote4.summary +++ b/cts/scheduler/summary/on_fail_demote4.summary @@ -1,25 +1,29 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: + * Cluster Summary: + * Node List: - * RemoteNode remote-rhel7-2: UNCLEAN (offline) + * RemoteNode remote-rhel7-2: UNCLEAN (online) * Node rhel7-4: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-3 rhel7-5 ] - * GuestOnline: [ lxc1 stateful-bundle-1 ] + * GuestOnline: [ lxc1 lxc2 stateful-bundle-0 stateful-bundle-1 stateful-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN) * Clone Set: rsc1-clone [rsc1] (promotable): * rsc1 (ocf:pacemaker:Stateful): Promoted rhel7-4 (UNCLEAN) - * rsc1 (ocf:pacemaker:Stateful): Unpromoted remote-rhel7-2 (UNCLEAN) - * Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED lxc2 + * Unpromoted: [ lxc1 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): * rsc2 (ocf:pacemaker:Stateful): Unpromoted rhel7-4 (UNCLEAN) - * rsc2 (ocf:pacemaker:Stateful): Promoted remote-rhel7-2 (UNCLEAN) + * rsc2 (ocf:pacemaker:Stateful): FAILED lxc2 + * Promoted: [ remote-rhel7-2 ] * Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] * remote-rhel7-2 (ocf:pacemaker:remote): FAILED rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc2 * Unpromoted: [ lxc1 ] * Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * Container bundle set: stateful-bundle [pcmktest:http]: @@ -55,135 +59,36 @@ Transition Summary: * Restart lxc2 ( rhel7-3 ) due to required container2 start Executing Cluster Transition: - * Pseudo action: Fencing_stop_0 - * Resource action: rsc1 cancel=11000 on rhel7-3 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc2 cancel=11000 on rhel7-3 - * Pseudo action: rsc2-master_demote_0 - * Pseudo action: lxc-ms-master_demote_0 - * Resource action: stateful-bundle-0 stop on rhel7-5 - * Pseudo action: stateful-bundle-2_stop_0 - * Resource action: lxc2 stop on rhel7-3 - * Pseudo action: stateful-bundle_demote_0 - * Fencing remote-rhel7-2 (reboot) - * Fencing rhel7-4 (reboot) - * Pseudo action: rsc1_demote_0 - * Pseudo action: rsc1-clone_demoted_0 - * Pseudo action: rsc2_demote_0 - * Pseudo action: rsc2-master_demoted_0 - * Resource action: container2 stop on rhel7-3 - * Pseudo action: stateful-bundle-master_demote_0 - * Pseudo action: stonith-stateful-bundle-2-reboot on stateful-bundle-2 - * Pseudo action: stonith-lxc2-reboot on lxc2 - * Resource action: Fencing start on rhel7-5 - * Pseudo action: rsc1-clone_stop_0 - * Pseudo action: rsc2-master_stop_0 - * Pseudo action: lxc-ms_demote_0 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_stop_0 - * Pseudo action: bundled_demote_0 - * Pseudo action: stateful-bundle-master_demoted_0 - * Pseudo action: stateful-bundle_demoted_0 - * Pseudo action: stateful-bundle_stop_0 - * Resource action: Fencing monitor=120000 on rhel7-5 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc1-clone_stopped_0 - * Pseudo action: rsc1-clone_start_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2-master_stopped_0 - * Pseudo action: rsc2-master_start_0 - * Resource action: remote-rhel7-2 stop on rhel7-1 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms-master_stopped_0 - * Pseudo action: lxc-ms-master_start_0 - * Resource action: stateful-bundle-docker-0 stop on rhel7-5 - * Pseudo action: stateful-bundle-docker-2_stop_0 - * Pseudo action: stonith-stateful-bundle-0-reboot on stateful-bundle-0 - * Resource action: remote-rhel7-2 start on rhel7-1 - * Resource action: remote-rhel7-2 monitor=60000 on rhel7-1 - * Resource action: container2 start on rhel7-3 - * Resource action: container2 monitor=20000 on rhel7-3 - * Pseudo action: stateful-bundle-master_stop_0 - * Pseudo action: stateful-bundle-ip-192.168.122.133_stop_0 - * Resource action: lxc2 start on rhel7-3 - * Resource action: lxc2 monitor=30000 on rhel7-3 - * Resource action: rsc1 start on lxc2 - * Pseudo action: rsc1-clone_running_0 - * Resource action: rsc2 start on lxc2 - * Pseudo action: rsc2-master_running_0 - * Resource action: lxc-ms start on lxc2 - * Pseudo action: lxc-ms-master_running_0 - * Pseudo action: bundled_stop_0 - * Resource action: stateful-bundle-ip-192.168.122.133 start on rhel7-3 - * Resource action: rsc1 monitor=11000 on lxc2 - * Pseudo action: rsc1-clone_promote_0 - * Resource action: rsc2 monitor=11000 on lxc2 - * Pseudo action: rsc2-master_promote_0 - * Pseudo action: lxc-ms-master_promote_0 - * Pseudo action: bundled_stop_0 - * Pseudo action: stateful-bundle-master_stopped_0 - * Resource action: stateful-bundle-ip-192.168.122.133 monitor=60000 on rhel7-3 - * Pseudo action: stateful-bundle_stopped_0 - * Pseudo action: stateful-bundle_start_0 - * Resource action: rsc1 promote on rhel7-3 - * Pseudo action: rsc1-clone_promoted_0 - * Resource action: rsc2 promote on rhel7-3 - * Pseudo action: rsc2-master_promoted_0 - * Resource action: lxc-ms promote on lxc2 - * Pseudo action: lxc-ms-master_promoted_0 - * Pseudo action: stateful-bundle-master_start_0 - * Resource action: stateful-bundle-docker-0 start on rhel7-5 - * Resource action: stateful-bundle-docker-0 monitor=60000 on rhel7-5 - * Resource action: stateful-bundle-0 start on rhel7-5 - * Resource action: stateful-bundle-0 monitor=30000 on rhel7-5 - * Resource action: stateful-bundle-docker-2 start on rhel7-3 - * Resource action: stateful-bundle-2 start on rhel7-3 - * Resource action: rsc1 monitor=10000 on rhel7-3 - * Resource action: rsc2 monitor=10000 on rhel7-3 - * Resource action: lxc-ms monitor=10000 on lxc2 - * Resource action: bundled start on stateful-bundle-0 - * Resource action: bundled start on stateful-bundle-2 - * Pseudo action: stateful-bundle-master_running_0 - * Resource action: stateful-bundle-docker-2 monitor=60000 on rhel7-3 - * Resource action: stateful-bundle-2 monitor=30000 on rhel7-3 - * Pseudo action: stateful-bundle_running_0 - * Resource action: bundled monitor=11000 on stateful-bundle-2 - * Pseudo action: stateful-bundle_promote_0 - * Pseudo action: stateful-bundle-master_promote_0 - * Resource action: bundled promote on stateful-bundle-0 - * Pseudo action: stateful-bundle-master_promoted_0 - * Pseudo action: stateful-bundle_promoted_0 - * Resource action: bundled monitor=10000 on stateful-bundle-0 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * RemoteNode remote-rhel7-2: UNCLEAN (online) + * Node rhel7-4: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-3 rhel7-5 ] - * OFFLINE: [ rhel7-4 ] - * RemoteOnline: [ remote-rhel7-2 ] * GuestOnline: [ lxc1 lxc2 stateful-bundle-0 stateful-bundle-1 stateful-bundle-2 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started rhel7-5 + * Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN) * Clone Set: rsc1-clone [rsc1] (promotable): - * Promoted: [ rhel7-3 ] - * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-5 ] - * Stopped: [ remote-rhel7-2 rhel7-4 ] + * rsc1 (ocf:pacemaker:Stateful): Promoted rhel7-4 (UNCLEAN) + * rsc1 (ocf:pacemaker:Stateful): FAILED lxc2 + * Unpromoted: [ lxc1 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Promoted: [ rhel7-3 ] - * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-5 ] - * Stopped: [ remote-rhel7-2 rhel7-4 ] - * remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1 + * rsc2 (ocf:pacemaker:Stateful): Unpromoted rhel7-4 (UNCLEAN) + * rsc2 (ocf:pacemaker:Stateful): FAILED lxc2 + * Promoted: [ remote-rhel7-2 ] + * Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] + * remote-rhel7-2 (ocf:pacemaker:remote): FAILED rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 - * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 + * container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Promoted: [ lxc2 ] + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc2 * Unpromoted: [ lxc1 ] + * Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * Container bundle set: stateful-bundle [pcmktest:http]: - * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Promoted rhel7-5 + * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Promoted rhel7-5 * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1 - * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-3 + * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): FAILED rhel7-4 (UNCLEAN) diff --git a/cts/scheduler/summary/one-or-more-0.summary b/cts/scheduler/summary/one-or-more-0.summary index 100e42c54a1..4e15e13689a 100644 --- a/cts/scheduler/summary/one-or-more-0.summary +++ b/cts/scheduler/summary/one-or-more-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -16,23 +18,16 @@ Transition Summary: * Start D ( fc16-builder ) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: B start on fc16-builder - * Resource action: C start on fc16-builder - * Pseudo action: one-or-more:require-all-set-1 - * Resource action: D start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started fc16-builder - * B (ocf:pacemaker:Dummy): Started fc16-builder - * C (ocf:pacemaker:Dummy): Started fc16-builder - * D (ocf:pacemaker:Dummy): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped + * B (ocf:pacemaker:Dummy): Stopped + * C (ocf:pacemaker:Dummy): Stopped + * D (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/one-or-more-1.summary b/cts/scheduler/summary/one-or-more-1.summary index cdbdc69621e..f30c56e4378 100644 --- a/cts/scheduler/summary/one-or-more-1.summary +++ b/cts/scheduler/summary/one-or-more-1.summary @@ -1,6 +1,8 @@ 1 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -17,12 +19,10 @@ Transition Summary: * Start D ( fc16-builder ) due to unrunnable one-or-more:require-all-set-1 (blocked) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] diff --git a/cts/scheduler/summary/one-or-more-2.summary b/cts/scheduler/summary/one-or-more-2.summary index eb40a7f14e9..61a3631e9fc 100644 --- a/cts/scheduler/summary/one-or-more-2.summary +++ b/cts/scheduler/summary/one-or-more-2.summary @@ -1,6 +1,8 @@ 1 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -17,22 +19,16 @@ Transition Summary: * Start D ( fc16-builder ) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: C start on fc16-builder - * Pseudo action: one-or-more:require-all-set-1 - * Resource action: D start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped * B (ocf:pacemaker:Dummy): Stopped (disabled) - * C (ocf:pacemaker:Dummy): Started fc16-builder - * D (ocf:pacemaker:Dummy): Started fc16-builder + * C (ocf:pacemaker:Dummy): Stopped + * D (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/one-or-more-3.summary b/cts/scheduler/summary/one-or-more-3.summary index 92358702da3..ed591ba5263 100644 --- a/cts/scheduler/summary/one-or-more-3.summary +++ b/cts/scheduler/summary/one-or-more-3.summary @@ -1,6 +1,8 @@ 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -16,19 +18,16 @@ Transition Summary: * Start D ( fc16-builder ) due to unrunnable one-or-more:require-all-set-1 (blocked) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder - * Resource action: A start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped * B (ocf:pacemaker:Dummy): Stopped (disabled) * C (ocf:pacemaker:Dummy): Stopped (disabled) * D (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/one-or-more-4.summary b/cts/scheduler/summary/one-or-more-4.summary index 828f6a5df1f..b51c27566ff 100644 --- a/cts/scheduler/summary/one-or-more-4.summary +++ b/cts/scheduler/summary/one-or-more-4.summary @@ -1,6 +1,8 @@ 1 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -17,22 +19,16 @@ Transition Summary: * Start C ( fc16-builder ) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder - * Resource action: A start on fc16-builder - * Resource action: B start on fc16-builder - * Resource action: C start on fc16-builder - * Pseudo action: one-or-more:require-all-set-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started fc16-builder - * B (ocf:pacemaker:Dummy): Started fc16-builder - * C (ocf:pacemaker:Dummy): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped + * B (ocf:pacemaker:Dummy): Stopped + * C (ocf:pacemaker:Dummy): Stopped * D (ocf:pacemaker:Dummy): Stopped (disabled) diff --git a/cts/scheduler/summary/one-or-more-5.summary b/cts/scheduler/summary/one-or-more-5.summary index 607566b6f9f..3eafd9b6d09 100644 --- a/cts/scheduler/summary/one-or-more-5.summary +++ b/cts/scheduler/summary/one-or-more-5.summary @@ -1,6 +1,8 @@ 2 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -20,28 +22,18 @@ Transition Summary: * Start F ( fc16-builder ) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder - * Resource action: E monitor on fc16-builder - * Resource action: F monitor on fc16-builder - * Resource action: B start on fc16-builder - * Pseudo action: one-or-more:require-all-set-1 - * Resource action: A start on fc16-builder - * Resource action: E start on fc16-builder - * Pseudo action: one-or-more:require-all-set-3 - * Resource action: F start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: - * A (ocf:pacemaker:Dummy): Started fc16-builder - * B (ocf:pacemaker:Dummy): Started fc16-builder + * A (ocf:pacemaker:Dummy): Stopped + * B (ocf:pacemaker:Dummy): Stopped * C (ocf:pacemaker:Dummy): Stopped (disabled) * D (ocf:pacemaker:Dummy): Stopped (disabled) - * E (ocf:pacemaker:Dummy): Started fc16-builder - * F (ocf:pacemaker:Dummy): Started fc16-builder + * E (ocf:pacemaker:Dummy): Stopped + * F (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/one-or-more-6.summary b/cts/scheduler/summary/one-or-more-6.summary index 79dc8914af0..068984f1d5f 100644 --- a/cts/scheduler/summary/one-or-more-6.summary +++ b/cts/scheduler/summary/one-or-more-6.summary @@ -1,6 +1,8 @@ 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -14,14 +16,15 @@ Transition Summary: * Stop B ( fc16-builder ) due to node availability Executing Cluster Transition: - * Resource action: B stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Started fc16-builder - * B (ocf:pacemaker:Dummy): Stopped (disabled) + * B (ocf:pacemaker:Dummy): Started fc16-builder (disabled) * C (ocf:pacemaker:Dummy): Started fc16-builder diff --git a/cts/scheduler/summary/one-or-more-7.summary b/cts/scheduler/summary/one-or-more-7.summary index a25c6188fd3..5023c17e8be 100644 --- a/cts/scheduler/summary/one-or-more-7.summary +++ b/cts/scheduler/summary/one-or-more-7.summary @@ -1,6 +1,8 @@ 1 of 3 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -14,9 +16,10 @@ Transition Summary: * Stop C ( fc16-builder ) due to node availability Executing Cluster Transition: - * Resource action: C stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -24,4 +27,4 @@ Revised Cluster Status: * Full List of Resources: * A (ocf:pacemaker:Dummy): Started fc16-builder * B (ocf:pacemaker:Dummy): Started fc16-builder - * C (ocf:pacemaker:Dummy): Stopped (disabled) + * C (ocf:pacemaker:Dummy): Started fc16-builder (disabled) diff --git a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary index 58c572d199e..39633a10593 100644 --- a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary +++ b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * RemoteOnline: [ mrg-07 mrg-08 mrg-09 ] @@ -9,7 +12,6 @@ Current cluster status: * fence3 (stonith:fence_xvm): Started rdo7-node3 * Clone Set: lb-haproxy-clone [lb-haproxy]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-db (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-keystone (ocf:heartbeat:IPaddr2): Started rdo7-node2 @@ -25,108 +27,100 @@ Current cluster status: * vip-node (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: galera-master [galera] (promotable): * Promoted: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: rabbitmq-server-clone [rabbitmq-server]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: memcached-clone [memcached]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: mongodb-clone [mongodb]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: keystone-clone [keystone]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: glance-registry-clone [glance-registry]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: glance-api-clone [glance-api]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: cinder-api-clone [cinder-api]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: cinder-scheduler-clone [cinder-scheduler]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * cinder-volume (systemd:openstack-cinder-volume): Stopped * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-account-clone [swift-account]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-container-clone [swift-container]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-object-clone [swift-object]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-proxy-clone [swift-proxy]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped * Clone Set: neutron-server-clone [neutron-server]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): * neutron-scale:0 (ocf:neutron:NeutronScale): Stopped * neutron-scale:1 (ocf:neutron:NeutronScale): Stopped * neutron-scale:2 (ocf:neutron:NeutronScale): Stopped * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-api-clone [nova-api]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-conductor-clone [nova-conductor]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: redis-master [redis] (promotable): * Promoted: [ rdo7-node1 ] * Unpromoted: [ rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-redis (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: ceilometer-central-clone [ceilometer-central]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-collector-clone [ceilometer-collector]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-clone [heat-api]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-engine-clone [heat-engine]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: horizon-clone [horizon]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: libvirtd-compute-clone [libvirtd-compute]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-compute-clone [ceilometer-compute]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-compute-clone [nova-compute]: - * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * fence-nova (stonith:fence_compute): Stopped * fence-compute (stonith:fence_apc_snmp): Started rdo7-node3 * mrg-07 (ocf:pacemaker:remote): Started rdo7-node1 @@ -250,321 +244,11 @@ Transition Summary: * Start fence-nova ( rdo7-node2 ) Executing Cluster Transition: - * Resource action: galera monitor=10000 on rdo7-node2 - * Pseudo action: keystone-clone_start_0 - * Pseudo action: nova-compute-clone_pre_notify_start_0 - * Resource action: keystone start on rdo7-node2 - * Resource action: keystone start on rdo7-node3 - * Resource action: keystone start on rdo7-node1 - * Pseudo action: keystone-clone_running_0 - * Pseudo action: glance-registry-clone_start_0 - * Pseudo action: cinder-api-clone_start_0 - * Pseudo action: swift-account-clone_start_0 - * Pseudo action: neutron-server-clone_start_0 - * Pseudo action: nova-consoleauth-clone_start_0 - * Pseudo action: ceilometer-central-clone_start_0 - * Pseudo action: nova-compute-clone_confirmed-pre_notify_start_0 - * Resource action: keystone monitor=60000 on rdo7-node2 - * Resource action: keystone monitor=60000 on rdo7-node3 - * Resource action: keystone monitor=60000 on rdo7-node1 - * Resource action: glance-registry start on rdo7-node2 - * Resource action: glance-registry start on rdo7-node3 - * Resource action: glance-registry start on rdo7-node1 - * Pseudo action: glance-registry-clone_running_0 - * Pseudo action: glance-api-clone_start_0 - * Resource action: cinder-api start on rdo7-node2 - * Resource action: cinder-api start on rdo7-node3 - * Resource action: cinder-api start on rdo7-node1 - * Pseudo action: cinder-api-clone_running_0 - * Pseudo action: cinder-scheduler-clone_start_0 - * Resource action: swift-account start on rdo7-node3 - * Resource action: swift-account start on rdo7-node1 - * Resource action: swift-account start on rdo7-node2 - * Pseudo action: swift-account-clone_running_0 - * Pseudo action: swift-container-clone_start_0 - * Pseudo action: swift-proxy-clone_start_0 - * Resource action: neutron-server start on rdo7-node1 - * Resource action: neutron-server start on rdo7-node2 - * Resource action: neutron-server start on rdo7-node3 - * Pseudo action: neutron-server-clone_running_0 - * Pseudo action: neutron-scale-clone_start_0 - * Resource action: nova-consoleauth start on rdo7-node1 - * Resource action: nova-consoleauth start on rdo7-node2 - * Resource action: nova-consoleauth start on rdo7-node3 - * Pseudo action: nova-consoleauth-clone_running_0 - * Pseudo action: nova-novncproxy-clone_start_0 - * Resource action: ceilometer-central start on rdo7-node2 - * Resource action: ceilometer-central start on rdo7-node3 - * Resource action: ceilometer-central start on rdo7-node1 - * Pseudo action: ceilometer-central-clone_running_0 - * Pseudo action: ceilometer-collector-clone_start_0 - * Pseudo action: clone-one-or-more:order-neutron-server-clone-neutron-openvswitch-agent-compute-clone-mandatory - * Resource action: glance-registry monitor=60000 on rdo7-node2 - * Resource action: glance-registry monitor=60000 on rdo7-node3 - * Resource action: glance-registry monitor=60000 on rdo7-node1 - * Resource action: glance-api start on rdo7-node2 - * Resource action: glance-api start on rdo7-node3 - * Resource action: glance-api start on rdo7-node1 - * Pseudo action: glance-api-clone_running_0 - * Resource action: cinder-api monitor=60000 on rdo7-node2 - * Resource action: cinder-api monitor=60000 on rdo7-node3 - * Resource action: cinder-api monitor=60000 on rdo7-node1 - * Resource action: cinder-scheduler start on rdo7-node2 - * Resource action: cinder-scheduler start on rdo7-node3 - * Resource action: cinder-scheduler start on rdo7-node1 - * Pseudo action: cinder-scheduler-clone_running_0 - * Resource action: cinder-volume start on rdo7-node2 - * Resource action: swift-account monitor=60000 on rdo7-node3 - * Resource action: swift-account monitor=60000 on rdo7-node1 - * Resource action: swift-account monitor=60000 on rdo7-node2 - * Resource action: swift-container start on rdo7-node3 - * Resource action: swift-container start on rdo7-node1 - * Resource action: swift-container start on rdo7-node2 - * Pseudo action: swift-container-clone_running_0 - * Pseudo action: swift-object-clone_start_0 - * Resource action: swift-proxy start on rdo7-node3 - * Resource action: swift-proxy start on rdo7-node1 - * Resource action: swift-proxy start on rdo7-node2 - * Pseudo action: swift-proxy-clone_running_0 - * Resource action: swift-object-expirer start on rdo7-node3 - * Resource action: neutron-server monitor=60000 on rdo7-node1 - * Resource action: neutron-server monitor=60000 on rdo7-node2 - * Resource action: neutron-server monitor=60000 on rdo7-node3 - * Resource action: neutron-scale:0 start on rdo7-node1 - * Resource action: neutron-scale:1 start on rdo7-node2 - * Resource action: neutron-scale:2 start on rdo7-node3 - * Pseudo action: neutron-scale-clone_running_0 - * Pseudo action: neutron-ovs-cleanup-clone_start_0 - * Resource action: nova-consoleauth monitor=60000 on rdo7-node1 - * Resource action: nova-consoleauth monitor=60000 on rdo7-node2 - * Resource action: nova-consoleauth monitor=60000 on rdo7-node3 - * Resource action: nova-novncproxy start on rdo7-node1 - * Resource action: nova-novncproxy start on rdo7-node2 - * Resource action: nova-novncproxy start on rdo7-node3 - * Pseudo action: nova-novncproxy-clone_running_0 - * Pseudo action: nova-api-clone_start_0 - * Resource action: ceilometer-central monitor=60000 on rdo7-node2 - * Resource action: ceilometer-central monitor=60000 on rdo7-node3 - * Resource action: ceilometer-central monitor=60000 on rdo7-node1 - * Resource action: ceilometer-collector start on rdo7-node2 - * Resource action: ceilometer-collector start on rdo7-node3 - * Resource action: ceilometer-collector start on rdo7-node1 - * Pseudo action: ceilometer-collector-clone_running_0 - * Pseudo action: ceilometer-api-clone_start_0 - * Pseudo action: neutron-openvswitch-agent-compute-clone_start_0 - * Resource action: glance-api monitor=60000 on rdo7-node2 - * Resource action: glance-api monitor=60000 on rdo7-node3 - * Resource action: glance-api monitor=60000 on rdo7-node1 - * Resource action: cinder-scheduler monitor=60000 on rdo7-node2 - * Resource action: cinder-scheduler monitor=60000 on rdo7-node3 - * Resource action: cinder-scheduler monitor=60000 on rdo7-node1 - * Resource action: cinder-volume monitor=60000 on rdo7-node2 - * Resource action: swift-container monitor=60000 on rdo7-node3 - * Resource action: swift-container monitor=60000 on rdo7-node1 - * Resource action: swift-container monitor=60000 on rdo7-node2 - * Resource action: swift-object start on rdo7-node3 - * Resource action: swift-object start on rdo7-node1 - * Resource action: swift-object start on rdo7-node2 - * Pseudo action: swift-object-clone_running_0 - * Resource action: swift-proxy monitor=60000 on rdo7-node3 - * Resource action: swift-proxy monitor=60000 on rdo7-node1 - * Resource action: swift-proxy monitor=60000 on rdo7-node2 - * Resource action: swift-object-expirer monitor=60000 on rdo7-node3 - * Resource action: neutron-scale:0 monitor=10000 on rdo7-node1 - * Resource action: neutron-scale:1 monitor=10000 on rdo7-node2 - * Resource action: neutron-scale:2 monitor=10000 on rdo7-node3 - * Resource action: neutron-ovs-cleanup start on rdo7-node1 - * Resource action: neutron-ovs-cleanup start on rdo7-node2 - * Resource action: neutron-ovs-cleanup start on rdo7-node3 - * Pseudo action: neutron-ovs-cleanup-clone_running_0 - * Pseudo action: neutron-netns-cleanup-clone_start_0 - * Resource action: nova-novncproxy monitor=60000 on rdo7-node1 - * Resource action: nova-novncproxy monitor=60000 on rdo7-node2 - * Resource action: nova-novncproxy monitor=60000 on rdo7-node3 - * Resource action: nova-api start on rdo7-node1 - * Resource action: nova-api start on rdo7-node2 - * Resource action: nova-api start on rdo7-node3 - * Pseudo action: nova-api-clone_running_0 - * Pseudo action: nova-scheduler-clone_start_0 - * Resource action: ceilometer-collector monitor=60000 on rdo7-node2 - * Resource action: ceilometer-collector monitor=60000 on rdo7-node3 - * Resource action: ceilometer-collector monitor=60000 on rdo7-node1 - * Resource action: ceilometer-api start on rdo7-node2 - * Resource action: ceilometer-api start on rdo7-node3 - * Resource action: ceilometer-api start on rdo7-node1 - * Pseudo action: ceilometer-api-clone_running_0 - * Pseudo action: ceilometer-delay-clone_start_0 - * Resource action: neutron-openvswitch-agent-compute start on mrg-07 - * Resource action: neutron-openvswitch-agent-compute start on mrg-08 - * Resource action: neutron-openvswitch-agent-compute start on mrg-09 - * Pseudo action: neutron-openvswitch-agent-compute-clone_running_0 - * Pseudo action: libvirtd-compute-clone_start_0 - * Resource action: swift-object monitor=60000 on rdo7-node3 - * Resource action: swift-object monitor=60000 on rdo7-node1 - * Resource action: swift-object monitor=60000 on rdo7-node2 - * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node1 - * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node2 - * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node3 - * Resource action: neutron-netns-cleanup start on rdo7-node1 - * Resource action: neutron-netns-cleanup start on rdo7-node2 - * Resource action: neutron-netns-cleanup start on rdo7-node3 - * Pseudo action: neutron-netns-cleanup-clone_running_0 - * Pseudo action: neutron-openvswitch-agent-clone_start_0 - * Resource action: nova-api monitor=60000 on rdo7-node1 - * Resource action: nova-api monitor=60000 on rdo7-node2 - * Resource action: nova-api monitor=60000 on rdo7-node3 - * Resource action: nova-scheduler start on rdo7-node1 - * Resource action: nova-scheduler start on rdo7-node2 - * Resource action: nova-scheduler start on rdo7-node3 - * Pseudo action: nova-scheduler-clone_running_0 - * Pseudo action: nova-conductor-clone_start_0 - * Resource action: ceilometer-api monitor=60000 on rdo7-node2 - * Resource action: ceilometer-api monitor=60000 on rdo7-node3 - * Resource action: ceilometer-api monitor=60000 on rdo7-node1 - * Resource action: ceilometer-delay start on rdo7-node2 - * Resource action: ceilometer-delay start on rdo7-node3 - * Resource action: ceilometer-delay start on rdo7-node1 - * Pseudo action: ceilometer-delay-clone_running_0 - * Pseudo action: ceilometer-alarm-evaluator-clone_start_0 - * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-07 - * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-08 - * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-09 - * Resource action: libvirtd-compute start on mrg-07 - * Resource action: libvirtd-compute start on mrg-08 - * Resource action: libvirtd-compute start on mrg-09 - * Pseudo action: libvirtd-compute-clone_running_0 - * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node1 - * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node2 - * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node3 - * Resource action: neutron-openvswitch-agent start on rdo7-node1 - * Resource action: neutron-openvswitch-agent start on rdo7-node2 - * Resource action: neutron-openvswitch-agent start on rdo7-node3 - * Pseudo action: neutron-openvswitch-agent-clone_running_0 - * Pseudo action: neutron-dhcp-agent-clone_start_0 - * Resource action: nova-scheduler monitor=60000 on rdo7-node1 - * Resource action: nova-scheduler monitor=60000 on rdo7-node2 - * Resource action: nova-scheduler monitor=60000 on rdo7-node3 - * Resource action: nova-conductor start on rdo7-node1 - * Resource action: nova-conductor start on rdo7-node2 - * Resource action: nova-conductor start on rdo7-node3 - * Pseudo action: nova-conductor-clone_running_0 - * Resource action: ceilometer-delay monitor=10000 on rdo7-node2 - * Resource action: ceilometer-delay monitor=10000 on rdo7-node3 - * Resource action: ceilometer-delay monitor=10000 on rdo7-node1 - * Resource action: ceilometer-alarm-evaluator start on rdo7-node2 - * Resource action: ceilometer-alarm-evaluator start on rdo7-node3 - * Resource action: ceilometer-alarm-evaluator start on rdo7-node1 - * Pseudo action: ceilometer-alarm-evaluator-clone_running_0 - * Pseudo action: ceilometer-alarm-notifier-clone_start_0 - * Resource action: libvirtd-compute monitor=60000 on mrg-07 - * Resource action: libvirtd-compute monitor=60000 on mrg-08 - * Resource action: libvirtd-compute monitor=60000 on mrg-09 - * Resource action: fence-nova start on rdo7-node2 - * Pseudo action: clone-one-or-more:order-nova-conductor-clone-nova-compute-clone-mandatory - * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node1 - * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node2 - * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node3 - * Resource action: neutron-dhcp-agent start on rdo7-node1 - * Resource action: neutron-dhcp-agent start on rdo7-node2 - * Resource action: neutron-dhcp-agent start on rdo7-node3 - * Pseudo action: neutron-dhcp-agent-clone_running_0 - * Pseudo action: neutron-l3-agent-clone_start_0 - * Resource action: nova-conductor monitor=60000 on rdo7-node1 - * Resource action: nova-conductor monitor=60000 on rdo7-node2 - * Resource action: nova-conductor monitor=60000 on rdo7-node3 - * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node2 - * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node3 - * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node1 - * Resource action: ceilometer-alarm-notifier start on rdo7-node2 - * Resource action: ceilometer-alarm-notifier start on rdo7-node3 - * Resource action: ceilometer-alarm-notifier start on rdo7-node1 - * Pseudo action: ceilometer-alarm-notifier-clone_running_0 - * Pseudo action: ceilometer-notification-clone_start_0 - * Resource action: fence-nova monitor=60000 on rdo7-node2 - * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node1 - * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node2 - * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node3 - * Resource action: neutron-l3-agent start on rdo7-node1 - * Resource action: neutron-l3-agent start on rdo7-node2 - * Resource action: neutron-l3-agent start on rdo7-node3 - * Pseudo action: neutron-l3-agent-clone_running_0 - * Pseudo action: neutron-metadata-agent-clone_start_0 - * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node2 - * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node3 - * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node1 - * Resource action: ceilometer-notification start on rdo7-node2 - * Resource action: ceilometer-notification start on rdo7-node3 - * Resource action: ceilometer-notification start on rdo7-node1 - * Pseudo action: ceilometer-notification-clone_running_0 - * Pseudo action: heat-api-clone_start_0 - * Pseudo action: clone-one-or-more:order-ceilometer-notification-clone-ceilometer-compute-clone-mandatory - * Resource action: neutron-l3-agent monitor=60000 on rdo7-node1 - * Resource action: neutron-l3-agent monitor=60000 on rdo7-node2 - * Resource action: neutron-l3-agent monitor=60000 on rdo7-node3 - * Resource action: neutron-metadata-agent start on rdo7-node1 - * Resource action: neutron-metadata-agent start on rdo7-node2 - * Resource action: neutron-metadata-agent start on rdo7-node3 - * Pseudo action: neutron-metadata-agent-clone_running_0 - * Resource action: ceilometer-notification monitor=60000 on rdo7-node2 - * Resource action: ceilometer-notification monitor=60000 on rdo7-node3 - * Resource action: ceilometer-notification monitor=60000 on rdo7-node1 - * Resource action: heat-api start on rdo7-node2 - * Resource action: heat-api start on rdo7-node3 - * Resource action: heat-api start on rdo7-node1 - * Pseudo action: heat-api-clone_running_0 - * Pseudo action: heat-api-cfn-clone_start_0 - * Pseudo action: ceilometer-compute-clone_start_0 - * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node1 - * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node2 - * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node3 - * Resource action: heat-api monitor=60000 on rdo7-node2 - * Resource action: heat-api monitor=60000 on rdo7-node3 - * Resource action: heat-api monitor=60000 on rdo7-node1 - * Resource action: heat-api-cfn start on rdo7-node2 - * Resource action: heat-api-cfn start on rdo7-node3 - * Resource action: heat-api-cfn start on rdo7-node1 - * Pseudo action: heat-api-cfn-clone_running_0 - * Pseudo action: heat-api-cloudwatch-clone_start_0 - * Resource action: ceilometer-compute start on mrg-07 - * Resource action: ceilometer-compute start on mrg-08 - * Resource action: ceilometer-compute start on mrg-09 - * Pseudo action: ceilometer-compute-clone_running_0 - * Pseudo action: nova-compute-clone_start_0 - * Resource action: heat-api-cfn monitor=60000 on rdo7-node2 - * Resource action: heat-api-cfn monitor=60000 on rdo7-node3 - * Resource action: heat-api-cfn monitor=60000 on rdo7-node1 - * Resource action: heat-api-cloudwatch start on rdo7-node2 - * Resource action: heat-api-cloudwatch start on rdo7-node3 - * Resource action: heat-api-cloudwatch start on rdo7-node1 - * Pseudo action: heat-api-cloudwatch-clone_running_0 - * Pseudo action: heat-engine-clone_start_0 - * Resource action: ceilometer-compute monitor=60000 on mrg-07 - * Resource action: ceilometer-compute monitor=60000 on mrg-08 - * Resource action: ceilometer-compute monitor=60000 on mrg-09 - * Resource action: nova-compute start on mrg-07 - * Resource action: nova-compute start on mrg-08 - * Resource action: nova-compute start on mrg-09 - * Pseudo action: nova-compute-clone_running_0 - * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node2 - * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node3 - * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node1 - * Resource action: heat-engine start on rdo7-node2 - * Resource action: heat-engine start on rdo7-node3 - * Resource action: heat-engine start on rdo7-node1 - * Pseudo action: heat-engine-clone_running_0 - * Pseudo action: nova-compute-clone_post_notify_running_0 - * Resource action: heat-engine monitor=60000 on rdo7-node2 - * Resource action: heat-engine monitor=60000 on rdo7-node3 - * Resource action: heat-engine monitor=60000 on rdo7-node1 - * Resource action: nova-compute notify on mrg-07 - * Resource action: nova-compute notify on mrg-08 - * Resource action: nova-compute notify on mrg-09 - * Pseudo action: nova-compute-clone_confirmed-post_notify_running_0 - * Resource action: nova-compute monitor=10000 on mrg-07 - * Resource action: nova-compute monitor=10000 on mrg-08 - * Resource action: nova-compute monitor=10000 on mrg-09 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * RemoteOnline: [ mrg-07 mrg-08 mrg-09 ] @@ -575,7 +259,6 @@ Revised Cluster Status: * fence3 (stonith:fence_xvm): Started rdo7-node3 * Clone Set: lb-haproxy-clone [lb-haproxy]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-db (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-keystone (ocf:heartbeat:IPaddr2): Started rdo7-node2 @@ -591,145 +274,101 @@ Revised Cluster Status: * vip-node (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: galera-master [galera] (promotable): * Promoted: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: rabbitmq-server-clone [rabbitmq-server]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: memcached-clone [memcached]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: mongodb-clone [mongodb]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: keystone-clone [keystone]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: glance-registry-clone [glance-registry]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: glance-api-clone [glance-api]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: cinder-api-clone [cinder-api]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: cinder-scheduler-clone [cinder-scheduler]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] - * cinder-volume (systemd:openstack-cinder-volume): Started rdo7-node2 + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * cinder-volume (systemd:openstack-cinder-volume): Stopped * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-account-clone [swift-account]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-container-clone [swift-container]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-object-clone [swift-object]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-proxy-clone [swift-proxy]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] - * swift-object-expirer (systemd:openstack-swift-object-expirer): Started rdo7-node3 + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped * Clone Set: neutron-server-clone [neutron-server]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): - * neutron-scale:0 (ocf:neutron:NeutronScale): Started rdo7-node1 - * neutron-scale:1 (ocf:neutron:NeutronScale): Started rdo7-node2 - * neutron-scale:2 (ocf:neutron:NeutronScale): Started rdo7-node3 + * neutron-scale:0 (ocf:neutron:NeutronScale): Stopped + * neutron-scale:1 (ocf:neutron:NeutronScale): Stopped + * neutron-scale:2 (ocf:neutron:NeutronScale): Stopped * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-api-clone [nova-api]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-conductor-clone [nova-conductor]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: redis-master [redis] (promotable): * Promoted: [ rdo7-node1 ] * Unpromoted: [ rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-redis (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: ceilometer-central-clone [ceilometer-central]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-collector-clone [ceilometer-collector]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-clone [heat-api]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-engine-clone [heat-engine]: - * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: horizon-clone [horizon]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]: - * Started: [ mrg-07 mrg-08 mrg-09 ] - * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: libvirtd-compute-clone [libvirtd-compute]: - * Started: [ mrg-07 mrg-08 mrg-09 ] - * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-compute-clone [ceilometer-compute]: - * Started: [ mrg-07 mrg-08 mrg-09 ] - * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-compute-clone [nova-compute]: - * Started: [ mrg-07 mrg-08 mrg-09 ] - * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] - * fence-nova (stonith:fence_compute): Started rdo7-node2 + * Stopped: [ mrg-07 mrg-08 mrg-09 ] + * fence-nova (stonith:fence_compute): Stopped * fence-compute (stonith:fence_apc_snmp): Started rdo7-node3 * mrg-07 (ocf:pacemaker:remote): Started rdo7-node1 * mrg-08 (ocf:pacemaker:remote): Started rdo7-node2 diff --git a/cts/scheduler/summary/op-defaults-2.summary b/cts/scheduler/summary/op-defaults-2.summary index c42da11d72e..357d0f96d20 100644 --- a/cts/scheduler/summary/op-defaults-2.summary +++ b/cts/scheduler/summary/op-defaults-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -17,32 +19,16 @@ Transition Summary: * Start ping-rsc-ping ( cluster01 ) Executing Cluster Transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: ip-rsc monitor on cluster02 - * Resource action: ip-rsc monitor on cluster01 - * Resource action: rsc-passes monitor on cluster02 - * Resource action: rsc-passes monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: ip-rsc start on cluster02 - * Resource action: rsc-passes start on cluster01 - * Resource action: dummy-rsc start on cluster02 - * Resource action: ping-rsc-ping start on cluster01 - * Resource action: ip-rsc monitor=20000 on cluster02 - * Resource action: rsc-passes monitor=10000 on cluster01 - * Resource action: dummy-rsc monitor=10000 on cluster02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: - * fencing (stonith:fence_xvm): Started cluster01 - * ip-rsc (ocf:heartbeat:IPaddr2): Started cluster02 - * rsc-passes (ocf:heartbeat:IPaddr2): Started cluster01 - * dummy-rsc (ocf:pacemaker:Dummy): Started cluster02 - * ping-rsc-ping (ocf:pacemaker:ping): Started cluster01 + * fencing (stonith:fence_xvm): Stopped + * ip-rsc (ocf:heartbeat:IPaddr2): Stopped + * rsc-passes (ocf:heartbeat:IPaddr2): Stopped + * dummy-rsc (ocf:pacemaker:Dummy): Stopped + * ping-rsc-ping (ocf:pacemaker:ping): Stopped diff --git a/cts/scheduler/summary/op-defaults-3.summary b/cts/scheduler/summary/op-defaults-3.summary index 4e22be762e6..5ce457caf0b 100644 --- a/cts/scheduler/summary/op-defaults-3.summary +++ b/cts/scheduler/summary/op-defaults-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -11,18 +13,13 @@ Transition Summary: * Start dummy-rsc ( cluster02 ) Executing Cluster Transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: dummy-rsc start on cluster02 - * Resource action: dummy-rsc monitor=10000 on cluster02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: - * fencing (stonith:fence_xvm): Started cluster01 - * dummy-rsc (ocf:pacemaker:Dummy): Started cluster02 + * fencing (stonith:fence_xvm): Stopped + * dummy-rsc (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/op-defaults.summary b/cts/scheduler/summary/op-defaults.summary index 7e4830e5dbf..84cc5996544 100644 --- a/cts/scheduler/summary/op-defaults.summary +++ b/cts/scheduler/summary/op-defaults.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -17,32 +19,16 @@ Transition Summary: * Start ping-rsc-ping ( cluster01 ) Executing Cluster Transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: ip-rsc monitor on cluster02 - * Resource action: ip-rsc monitor on cluster01 - * Resource action: ip-rsc2 monitor on cluster02 - * Resource action: ip-rsc2 monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: ip-rsc start on cluster02 - * Resource action: ip-rsc2 start on cluster01 - * Resource action: dummy-rsc start on cluster02 - * Resource action: ping-rsc-ping start on cluster01 - * Resource action: ip-rsc monitor=20000 on cluster02 - * Resource action: ip-rsc2 monitor=10000 on cluster01 - * Resource action: dummy-rsc monitor=60000 on cluster02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: - * fencing (stonith:fence_xvm): Started cluster01 - * ip-rsc (ocf:heartbeat:IPaddr2): Started cluster02 - * ip-rsc2 (ocf:heartbeat:IPaddr2): Started cluster01 - * dummy-rsc (ocf:pacemaker:Dummy): Started cluster02 - * ping-rsc-ping (ocf:pacemaker:ping): Started cluster01 + * fencing (stonith:fence_xvm): Stopped + * ip-rsc (ocf:heartbeat:IPaddr2): Stopped + * ip-rsc2 (ocf:heartbeat:IPaddr2): Stopped + * dummy-rsc (ocf:pacemaker:Dummy): Stopped + * ping-rsc-ping (ocf:pacemaker:ping): Stopped diff --git a/cts/scheduler/summary/order-clone.summary b/cts/scheduler/summary/order-clone.summary index d60aa2edb4f..78031c5add7 100644 --- a/cts/scheduler/summary/order-clone.summary +++ b/cts/scheduler/summary/order-clone.summary @@ -1,6 +1,8 @@ 4 of 25 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-0 hex-7 hex-8 hex-9 ] @@ -23,14 +25,15 @@ Transition Summary: * Start fencing-sbd ( hex-0 ) Executing Cluster Transition: - * Resource action: fencing-sbd start on hex-0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-0 hex-7 hex-8 hex-9 ] * Full List of Resources: - * fencing-sbd (stonith:external/sbd): Started hex-0 + * fencing-sbd (stonith:external/sbd): Stopped * Clone Set: o2cb-clone [o2cb]: * Stopped: [ hex-0 hex-7 hex-8 hex-9 ] * Clone Set: vg1-clone [vg1]: diff --git a/cts/scheduler/summary/order-expired-failure.summary b/cts/scheduler/summary/order-expired-failure.summary index 7ec061757c5..723d4b34447 100644 --- a/cts/scheduler/summary/order-expired-failure.summary +++ b/cts/scheduler/summary/order-expired-failure.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2018-04-09 07:55:35Z Current cluster status: + * Cluster Summary: + * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] @@ -35,7 +37,7 @@ Current cluster status: * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) * Started: [ overcloud-novacompute-0 ] - * Stopped: [ controller-0 controller-1 controller-2 ] + * Stopped: [ controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 @@ -51,22 +53,15 @@ Transition Summary: * Start ip-10.0.0.110 ( controller-1 ) * Recover stonith-fence_compute-fence-nova ( controller-2 ) * Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: overcloud-novacompute-1 stop on controller-1 - * Resource action: stonith-fence_compute-fence-nova stop on controller-2 - * Fencing overcloud-novacompute-1 (reboot) - * Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1 - * Resource action: ip-10.0.0.110 start on controller-1 - * Resource action: stonith-fence_compute-fence-nova start on controller-2 - * Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2 - * Pseudo action: compute-unfence-trigger-clone_stop_0 - * Resource action: ip-10.0.0.110 monitor=10000 on controller-1 - * Pseudo action: compute-unfence-trigger_stop_0 - * Pseudo action: compute-unfence-trigger-clone_stopped_0 Using the original execution date of: 2018-04-09 07:55:35Z Revised Cluster Status: + * Cluster Summary: + * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] @@ -75,7 +70,7 @@ Revised Cluster Status: * Full List of Resources: * overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0 - * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED + * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED controller-1 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 @@ -89,7 +84,7 @@ Revised Cluster Status: * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Started controller-1 + * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0 @@ -98,10 +93,11 @@ Revised Cluster Status: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 - * stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2 + * stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: + * compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) * Started: [ overcloud-novacompute-0 ] - * Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ] + * Stopped: [ controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 diff --git a/cts/scheduler/summary/order-first-probes.summary b/cts/scheduler/summary/order-first-probes.summary index 8648abad209..da4ff805eb1 100644 --- a/cts/scheduler/summary/order-first-probes.summary +++ b/cts/scheduler/summary/order-first-probes.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2016-10-05 07:32:34Z Current cluster status: + * Cluster Summary: + * Node List: * Node rh72-01: standby (with active resources) * Online: [ rh72-02 ] @@ -14,24 +16,16 @@ Transition Summary: * Start prmDummy2 ( rh72-02 ) Executing Cluster Transition: - * Pseudo action: grpDummy_stop_0 - * Resource action: prmDummy2 monitor on rh72-01 - * Resource action: prmDummy1 stop on rh72-01 - * Pseudo action: grpDummy_stopped_0 - * Pseudo action: grpDummy_start_0 - * Resource action: prmDummy1 start on rh72-02 - * Resource action: prmDummy2 start on rh72-02 - * Pseudo action: grpDummy_running_0 - * Resource action: prmDummy1 monitor=10000 on rh72-02 - * Resource action: prmDummy2 monitor=10000 on rh72-02 Using the original execution date of: 2016-10-05 07:32:34Z Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node rh72-01: standby + * Node rh72-01: standby (with active resources) * Online: [ rh72-02 ] * Full List of Resources: * Resource Group: grpDummy: - * prmDummy1 (ocf:pacemaker:Dummy1): Started rh72-02 - * prmDummy2 (ocf:pacemaker:Dummy2): Started rh72-02 + * prmDummy1 (ocf:pacemaker:Dummy1): Started rh72-01 + * prmDummy2 (ocf:pacemaker:Dummy2): Stopped diff --git a/cts/scheduler/summary/order-mandatory.summary b/cts/scheduler/summary/order-mandatory.summary index f6856b01fc5..d80b13e11ca 100644 --- a/cts/scheduler/summary/order-mandatory.summary +++ b/cts/scheduler/summary/order-mandatory.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -10,21 +12,19 @@ Current cluster status: Transition Summary: * Start rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required rsc1 start + * Restart rsc2 ( node1 ) * Stop rsc4 ( node1 ) due to unrunnable rsc3 start Executing Cluster Transition: - * Resource action: rsc1 start on node1 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4 stop on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped - * rsc4 (ocf:heartbeat:apache): Stopped + * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/order-optional-keyword.summary b/cts/scheduler/summary/order-optional-keyword.summary index d8a12bf9e7b..1b0acf6bfa8 100644 --- a/cts/scheduler/summary/order-optional-keyword.summary +++ b/cts/scheduler/summary/order-optional-keyword.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -12,14 +14,15 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/order-optional.summary b/cts/scheduler/summary/order-optional.summary index d8a12bf9e7b..1b0acf6bfa8 100644 --- a/cts/scheduler/summary/order-optional.summary +++ b/cts/scheduler/summary/order-optional.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -12,14 +14,15 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/order-required.summary b/cts/scheduler/summary/order-required.summary index f6856b01fc5..d80b13e11ca 100644 --- a/cts/scheduler/summary/order-required.summary +++ b/cts/scheduler/summary/order-required.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -10,21 +12,19 @@ Current cluster status: Transition Summary: * Start rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required rsc1 start + * Restart rsc2 ( node1 ) * Stop rsc4 ( node1 ) due to unrunnable rsc3 start Executing Cluster Transition: - * Resource action: rsc1 start on node1 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4 stop on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped - * rsc4 (ocf:heartbeat:apache): Stopped + * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/order-serialize-set.summary b/cts/scheduler/summary/order-serialize-set.summary index b0b759b51ce..edb9972f83c 100644 --- a/cts/scheduler/summary/order-serialize-set.summary +++ b/cts/scheduler/summary/order-serialize-set.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node xen-a: standby (with active resources) * Online: [ xen-b ] @@ -25,49 +28,23 @@ Transition Summary: * Migrate base ( xen-a -> xen-b ) Executing Cluster Transition: - * Resource action: xen-a-fencing stop on xen-b - * Resource action: xen-a-fencing start on xen-b - * Resource action: xen-a-fencing monitor=60000 on xen-b - * Resource action: xen-b-fencing stop on xen-a - * Resource action: db migrate_to on xen-a - * Resource action: db migrate_from on xen-b - * Resource action: db stop on xen-a - * Resource action: core-101 migrate_to on xen-a - * Pseudo action: db_start_0 - * Resource action: core-101 migrate_from on xen-b - * Resource action: core-101 stop on xen-a - * Resource action: core-200 migrate_to on xen-a - * Resource action: db monitor=10000 on xen-b - * Pseudo action: core-101_start_0 - * Resource action: core-200 migrate_from on xen-b - * Resource action: core-200 stop on xen-a - * Resource action: edge migrate_to on xen-a - * Resource action: core-101 monitor=10000 on xen-b - * Pseudo action: core-200_start_0 - * Resource action: edge migrate_from on xen-b - * Resource action: edge stop on xen-a - * Resource action: base migrate_to on xen-a - * Resource action: core-200 monitor=10000 on xen-b - * Pseudo action: edge_start_0 - * Resource action: base migrate_from on xen-b - * Resource action: base stop on xen-a - * Resource action: edge monitor=10000 on xen-b - * Pseudo action: base_start_0 - * Resource action: base monitor=10000 on xen-b Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Node xen-a: standby + * Node xen-a: standby (with active resources) * Online: [ xen-b ] * Full List of Resources: * xen-a-fencing (stonith:external/ipmi): Started xen-b - * xen-b-fencing (stonith:external/ipmi): Stopped - * db (ocf:heartbeat:Xen): Started xen-b + * xen-b-fencing (stonith:external/ipmi): Started xen-a + * db (ocf:heartbeat:Xen): Started xen-a * dbreplica (ocf:heartbeat:Xen): Started xen-b - * core-101 (ocf:heartbeat:Xen): Started xen-b - * core-200 (ocf:heartbeat:Xen): Started xen-b + * core-101 (ocf:heartbeat:Xen): Started xen-a + * core-200 (ocf:heartbeat:Xen): Started xen-a * sysadmin (ocf:heartbeat:Xen): Started xen-b - * edge (ocf:heartbeat:Xen): Started xen-b - * base (ocf:heartbeat:Xen): Started xen-b + * edge (ocf:heartbeat:Xen): Started xen-a + * base (ocf:heartbeat:Xen): Started xen-a * Email_Alerting (ocf:heartbeat:MailTo): Started xen-b diff --git a/cts/scheduler/summary/order-serialize.summary b/cts/scheduler/summary/order-serialize.summary index c7ef3e0e12c..edb9972f83c 100644 --- a/cts/scheduler/summary/order-serialize.summary +++ b/cts/scheduler/summary/order-serialize.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node xen-a: standby (with active resources) * Online: [ xen-b ] @@ -25,49 +28,23 @@ Transition Summary: * Migrate base ( xen-a -> xen-b ) Executing Cluster Transition: - * Resource action: xen-a-fencing stop on xen-b - * Resource action: xen-a-fencing start on xen-b - * Resource action: xen-a-fencing monitor=60000 on xen-b - * Resource action: xen-b-fencing stop on xen-a - * Resource action: db migrate_to on xen-a - * Resource action: core-101 migrate_to on xen-a - * Resource action: edge migrate_to on xen-a - * Resource action: db migrate_from on xen-b - * Resource action: db stop on xen-a - * Resource action: core-101 migrate_from on xen-b - * Resource action: core-101 stop on xen-a - * Resource action: core-200 migrate_to on xen-a - * Resource action: edge migrate_from on xen-b - * Resource action: edge stop on xen-a - * Resource action: base migrate_to on xen-a - * Pseudo action: db_start_0 - * Pseudo action: core-101_start_0 - * Resource action: core-200 migrate_from on xen-b - * Resource action: core-200 stop on xen-a - * Pseudo action: edge_start_0 - * Resource action: base migrate_from on xen-b - * Resource action: base stop on xen-a - * Resource action: db monitor=10000 on xen-b - * Resource action: core-101 monitor=10000 on xen-b - * Pseudo action: core-200_start_0 - * Resource action: edge monitor=10000 on xen-b - * Pseudo action: base_start_0 - * Resource action: core-200 monitor=10000 on xen-b - * Resource action: base monitor=10000 on xen-b Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Node xen-a: standby + * Node xen-a: standby (with active resources) * Online: [ xen-b ] * Full List of Resources: * xen-a-fencing (stonith:external/ipmi): Started xen-b - * xen-b-fencing (stonith:external/ipmi): Stopped - * db (ocf:heartbeat:Xen): Started xen-b + * xen-b-fencing (stonith:external/ipmi): Started xen-a + * db (ocf:heartbeat:Xen): Started xen-a * dbreplica (ocf:heartbeat:Xen): Started xen-b - * core-101 (ocf:heartbeat:Xen): Started xen-b - * core-200 (ocf:heartbeat:Xen): Started xen-b + * core-101 (ocf:heartbeat:Xen): Started xen-a + * core-200 (ocf:heartbeat:Xen): Started xen-a * sysadmin (ocf:heartbeat:Xen): Started xen-b - * edge (ocf:heartbeat:Xen): Started xen-b - * base (ocf:heartbeat:Xen): Started xen-b + * edge (ocf:heartbeat:Xen): Started xen-a + * base (ocf:heartbeat:Xen): Started xen-a * Email_Alerting (ocf:heartbeat:MailTo): Started xen-b diff --git a/cts/scheduler/summary/order-sets.summary b/cts/scheduler/summary/order-sets.summary index 201ef438300..ca68addc505 100644 --- a/cts/scheduler/summary/order-sets.summary +++ b/cts/scheduler/summary/order-sets.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node ubuntu_2: standby (with active resources) * Online: [ ubuntu_1 ] @@ -16,26 +18,16 @@ Transition Summary: * Move world4 ( ubuntu_2 -> ubuntu_1 ) Executing Cluster Transition: - * Resource action: world4 stop on ubuntu_2 - * Resource action: world3 stop on ubuntu_2 - * Resource action: world2 stop on ubuntu_2 - * Resource action: world1 stop on ubuntu_2 - * Resource action: world1 start on ubuntu_1 - * Resource action: world2 start on ubuntu_1 - * Resource action: world3 start on ubuntu_1 - * Resource action: world4 start on ubuntu_1 - * Resource action: world1 monitor=10000 on ubuntu_1 - * Resource action: world2 monitor=10000 on ubuntu_1 - * Resource action: world3 monitor=10000 on ubuntu_1 - * Resource action: world4 monitor=10000 on ubuntu_1 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node ubuntu_2: standby + * Node ubuntu_2: standby (with active resources) * Online: [ ubuntu_1 ] * Full List of Resources: - * world1 (ocf:bbnd:world1test): Started ubuntu_1 - * world2 (ocf:bbnd:world2test): Started ubuntu_1 - * world3 (ocf:bbnd:world3test): Started ubuntu_1 - * world4 (ocf:bbnd:world4test): Started ubuntu_1 + * world1 (ocf:bbnd:world1test): Started ubuntu_2 + * world2 (ocf:bbnd:world2test): Started ubuntu_2 + * world3 (ocf:bbnd:world3test): Started ubuntu_2 + * world4 (ocf:bbnd:world4test): Started ubuntu_2 diff --git a/cts/scheduler/summary/order-wrong-kind.summary b/cts/scheduler/summary/order-wrong-kind.summary index 903a25c7239..359460449ad 100644 --- a/cts/scheduler/summary/order-wrong-kind.summary +++ b/cts/scheduler/summary/order-wrong-kind.summary @@ -1,5 +1,9 @@ -Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 ] @@ -11,19 +15,20 @@ Current cluster status: Transition Summary: * Start rsc1 ( node1 ) - * Restart rsc2 ( node1 ) due to required rsc1 start + * Restart rsc2 ( node1 ) +Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) Executing Cluster Transition: - * Resource action: rsc1 start on node1 - * Resource action: rsc2 stop on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/order1.summary b/cts/scheduler/summary/order1.summary index 59028d7f479..e889b9d6580 100644 --- a/cts/scheduler/summary/order1.summary +++ b/cts/scheduler/summary/order1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,21 +15,14 @@ Transition Summary: * Start rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/order2.summary b/cts/scheduler/summary/order2.summary index 285d067df2a..06680d2aaec 100644 --- a/cts/scheduler/summary/order2.summary +++ b/cts/scheduler/summary/order2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Start rsc4 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node1 - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc3 (ocf:heartbeat:apache): Stopped + * rsc4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/order3.summary b/cts/scheduler/summary/order3.summary index 9bba0f59459..60abf0a8726 100644 --- a/cts/scheduler/summary/order3.summary +++ b/cts/scheduler/summary/order3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Move rsc4 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 stop on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4 stop on node1 - * Resource action: rsc1 stop on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node2 - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc4 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/order4.summary b/cts/scheduler/summary/order4.summary index 59028d7f479..e889b9d6580 100644 --- a/cts/scheduler/summary/order4.summary +++ b/cts/scheduler/summary/order4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,21 +15,14 @@ Transition Summary: * Start rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/order5.summary b/cts/scheduler/summary/order5.summary index 6a841e3727d..0a2932c4fd6 100644 --- a/cts/scheduler/summary/order5.summary +++ b/cts/scheduler/summary/order5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,33 +21,19 @@ Transition Summary: * Move rsc8 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 stop on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc4 stop on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 stop on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc7 monitor on node1 - * Resource action: rsc8 stop on node2 - * Resource action: rsc8 monitor on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc6 start on node1 - * Resource action: rsc8 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Started node1 - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc4 (ocf:heartbeat:apache): Started node1 * rsc5 (ocf:heartbeat:apache): Started node2 - * rsc6 (ocf:heartbeat:apache): Started node1 + * rsc6 (ocf:heartbeat:apache): Started node2 * rsc7 (ocf:heartbeat:apache): Started node2 - * rsc8 (ocf:heartbeat:apache): Started node1 + * rsc8 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/order6.summary b/cts/scheduler/summary/order6.summary index 6a841e3727d..debd506ff23 100644 --- a/cts/scheduler/summary/order6.summary +++ b/cts/scheduler/summary/order6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -19,33 +22,20 @@ Transition Summary: * Move rsc8 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 stop on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc4 stop on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 stop on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc7 monitor on node1 - * Resource action: rsc8 stop on node2 - * Resource action: rsc8 monitor on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc6 start on node1 - * Resource action: rsc8 start on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Started node1 - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc4 (ocf:heartbeat:apache): Started node1 * rsc5 (ocf:heartbeat:apache): Started node2 - * rsc6 (ocf:heartbeat:apache): Started node1 + * rsc6 (ocf:heartbeat:apache): Started node2 * rsc7 (ocf:heartbeat:apache): Started node2 - * rsc8 (ocf:heartbeat:apache): Started node1 + * rsc8 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/order7.summary b/cts/scheduler/summary/order7.summary index 1cc76813c55..9f49c7c4067 100644 --- a/cts/scheduler/summary/order7.summary +++ b/cts/scheduler/summary/order7.summary @@ -1,6 +1,8 @@ 0 of 6 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -19,22 +21,17 @@ Transition Summary: * Start rscC ( node1 ) due to unrunnable rscA start (blocked) Executing Cluster Transition: - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node1 - * Resource action: rscB monitor on node1 - * Resource action: rscC monitor on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rscB start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc3 (ocf:heartbeat:apache): Stopped * rscA (ocf:heartbeat:apache): FAILED node1 (blocked) - * rscB (ocf:heartbeat:apache): Started node1 + * rscB (ocf:heartbeat:apache): Stopped * rscC (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/order_constraint_stops_promoted.summary b/cts/scheduler/summary/order_constraint_stops_promoted.summary index e888be5e0ab..a85fcb78288 100644 --- a/cts/scheduler/summary/order_constraint_stops_promoted.summary +++ b/cts/scheduler/summary/order_constraint_stops_promoted.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder fc16-builder2 ] @@ -10,35 +12,18 @@ Current cluster status: * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder2 (disabled) Transition Summary: - * Stop NATIVE_RSC_A:0 ( Promoted fc16-builder ) due to required NATIVE_RSC_B start + * Stop NATIVE_RSC_A:0 ( Promoted fc16-builder ) due to unrunnable NATIVE_RSC_B start * Stop NATIVE_RSC_B ( fc16-builder2 ) due to node availability Executing Cluster Transition: - * Pseudo action: PROMOTABLE_RSC_A_pre_notify_demote_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-pre_notify_demote_0 - * Pseudo action: PROMOTABLE_RSC_A_demote_0 - * Resource action: NATIVE_RSC_A:0 demote on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_demoted_0 - * Pseudo action: PROMOTABLE_RSC_A_post_notify_demoted_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-post_notify_demoted_0 - * Pseudo action: PROMOTABLE_RSC_A_pre_notify_stop_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-pre_notify_stop_0 - * Pseudo action: PROMOTABLE_RSC_A_stop_0 - * Resource action: NATIVE_RSC_A:0 stop on fc16-builder - * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 - * Pseudo action: PROMOTABLE_RSC_A_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_post_notify_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_confirmed-post_notify_stopped_0 - * Resource action: NATIVE_RSC_B stop on fc16-builder2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: PROMOTABLE_RSC_A [NATIVE_RSC_A] (promotable): - * Stopped: [ fc16-builder fc16-builder2 ] - * NATIVE_RSC_B (ocf:pacemaker:Dummy): Stopped (disabled) + * Promoted: [ fc16-builder ] + * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder2 (disabled) diff --git a/cts/scheduler/summary/order_constraint_stops_unpromoted.summary b/cts/scheduler/summary/order_constraint_stops_unpromoted.summary index 2898d2ec984..b31db0e7cee 100644 --- a/cts/scheduler/summary/order_constraint_stops_unpromoted.summary +++ b/cts/scheduler/summary/order_constraint_stops_unpromoted.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -11,26 +13,19 @@ Current cluster status: * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder (disabled) Transition Summary: - * Stop NATIVE_RSC_A:0 ( Unpromoted fc16-builder ) due to required NATIVE_RSC_B start + * Stop NATIVE_RSC_A:0 ( Unpromoted fc16-builder ) due to unrunnable NATIVE_RSC_B start * Stop NATIVE_RSC_B ( fc16-builder ) due to node availability Executing Cluster Transition: - * Pseudo action: PROMOTABLE_RSC_A_pre_notify_stop_0 - * Resource action: NATIVE_RSC_A:0 notify on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_confirmed-pre_notify_stop_0 - * Pseudo action: PROMOTABLE_RSC_A_stop_0 - * Resource action: NATIVE_RSC_A:0 stop on fc16-builder - * Pseudo action: PROMOTABLE_RSC_A_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_post_notify_stopped_0 - * Pseudo action: PROMOTABLE_RSC_A_confirmed-post_notify_stopped_0 - * Resource action: NATIVE_RSC_B stop on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * Clone Set: PROMOTABLE_RSC_A [NATIVE_RSC_A] (promotable): - * Stopped: [ fc16-builder fc16-builder2 ] - * NATIVE_RSC_B (ocf:pacemaker:Dummy): Stopped (disabled) + * Unpromoted: [ fc16-builder ] + * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder (disabled) diff --git a/cts/scheduler/summary/ordered-set-basic-startup.summary b/cts/scheduler/summary/ordered-set-basic-startup.summary index 2554358f7ba..06da0dc177d 100644 --- a/cts/scheduler/summary/ordered-set-basic-startup.summary +++ b/cts/scheduler/summary/ordered-set-basic-startup.summary @@ -1,6 +1,8 @@ 2 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] @@ -20,22 +22,17 @@ Transition Summary: * Start F ( fc16-builder ) due to unrunnable D start (blocked) Executing Cluster Transition: - * Resource action: A monitor on fc16-builder - * Resource action: B monitor on fc16-builder - * Resource action: C monitor on fc16-builder - * Resource action: D monitor on fc16-builder - * Resource action: E monitor on fc16-builder - * Resource action: F monitor on fc16-builder - * Resource action: B start on fc16-builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Stopped - * B (ocf:pacemaker:Dummy): Started fc16-builder + * B (ocf:pacemaker:Dummy): Stopped * C (ocf:pacemaker:Dummy): Stopped (disabled) * D (ocf:pacemaker:Dummy): Stopped (disabled) * E (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ordered-set-natural.summary b/cts/scheduler/summary/ordered-set-natural.summary index b944e0d6f40..58962f3bc5b 100644 --- a/cts/scheduler/summary/ordered-set-natural.summary +++ b/cts/scheduler/summary/ordered-set-natural.summary @@ -1,6 +1,9 @@ 3 of 15 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -33,6 +36,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/origin.summary b/cts/scheduler/summary/origin.summary index 32514e2704c..726afd72bcd 100644 --- a/cts/scheduler/summary/origin.summary +++ b/cts/scheduler/summary/origin.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -8,9 +10,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: resD monitor=3600000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] diff --git a/cts/scheduler/summary/orphan-0.summary b/cts/scheduler/summary/orphan-0.summary index ddab295dac2..9f10847f0f4 100644 --- a/cts/scheduler/summary/orphan-0.summary +++ b/cts/scheduler/summary/orphan-0.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -12,21 +15,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 monitor=6000 on c001n03 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/orphan-1.summary b/cts/scheduler/summary/orphan-1.summary index f0774f6e9a9..cd8387673fc 100644 --- a/cts/scheduler/summary/orphan-1.summary +++ b/cts/scheduler/summary/orphan-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -13,25 +16,11 @@ Transition Summary: * Stop rsc_c001n08 ( c001n08 ) due to node availability Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor=6000 on c001n03 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n03 cancel=5000 on c001n03 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 - * Resource action: rsc_c001n08 stop on c001n08 - * Resource action: rsc_c001n08 delete on c001n08 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -40,3 +29,4 @@ Revised Cluster Status: * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 + * rsc_c001n08 (ocf:heartbeat:IPaddr): ORPHANED Started c001n08 diff --git a/cts/scheduler/summary/orphan-2.summary b/cts/scheduler/summary/orphan-2.summary index 07f7c5ced13..cd8387673fc 100644 --- a/cts/scheduler/summary/orphan-2.summary +++ b/cts/scheduler/summary/orphan-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -13,27 +16,11 @@ Transition Summary: * Stop rsc_c001n08 ( c001n08 ) due to node availability Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor=6000 on c001n03 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n03 cancel=5000 on c001n03 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 - * Cluster action: clear_failcount for rsc_c001n08 on c001n08 - * Cluster action: clear_failcount for rsc_c001n08 on c001n02 - * Resource action: rsc_c001n08 stop on c001n08 - * Resource action: rsc_c001n08 delete on c001n08 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -42,3 +29,4 @@ Revised Cluster Status: * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 + * rsc_c001n08 (ocf:heartbeat:IPaddr): ORPHANED Started c001n08 diff --git a/cts/scheduler/summary/params-0.summary b/cts/scheduler/summary/params-0.summary index ee291fc98f2..e4ce3379e92 100644 --- a/cts/scheduler/summary/params-0.summary +++ b/cts/scheduler/summary/params-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -12,23 +14,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/params-1.summary b/cts/scheduler/summary/params-1.summary index 7150d36bf24..72741caef9c 100644 --- a/cts/scheduler/summary/params-1.summary +++ b/cts/scheduler/summary/params-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -13,29 +15,10 @@ Transition Summary: * Restart DcIPaddr ( c001n02 ) due to resource definition change Executing Cluster Transition: - * Resource action: DcIPaddr stop on c001n02 - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: DcIPaddr start on c001n02 - * Resource action: DcIPaddr monitor=5000 on c001n02 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n08 monitor=5000 on c001n08 - * Resource action: rsc_c001n02 monitor=6000 on c001n02 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/params-2.summary b/cts/scheduler/summary/params-2.summary index c43892d9d11..372b264adea 100644 --- a/cts/scheduler/summary/params-2.summary +++ b/cts/scheduler/summary/params-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -13,25 +15,14 @@ Transition Summary: * Start rsc3 ( node3 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node3 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc2 stop on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc3 delete on node3 - * Cluster action: do_shutdown on node1 - * Resource action: rsc2 delete on node2 - * Resource action: rsc3 start on node3 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * rsc1 (lsb:apache): Stopped + * rsc1 (lsb:apache): Started node1 * rsc2 (lsb:apache): Started node2 - * rsc3 (lsb:apache): Started node3 + * rsc3 (lsb:apache): Stopped diff --git a/cts/scheduler/summary/params-3.summary b/cts/scheduler/summary/params-3.summary index de38fbfa090..6bf3f1fe7b5 100644 --- a/cts/scheduler/summary/params-3.summary +++ b/cts/scheduler/summary/params-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -13,29 +15,10 @@ Transition Summary: * Restart DcIPaddr ( c001n02 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: DcIPaddr stop on c001n02 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n08 monitor=5000 on c001n08 - * Resource action: rsc_c001n02 monitor=6000 on c001n02 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 - * Resource action: DcIPaddr start on c001n02 - * Resource action: DcIPaddr monitor=5000 on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/params-4.summary b/cts/scheduler/summary/params-4.summary index d6a71474afb..f0e73d78c85 100644 --- a/cts/scheduler/summary/params-4.summary +++ b/cts/scheduler/summary/params-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -13,28 +15,10 @@ Transition Summary: * Reload DcIPaddr ( c001n02 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: DcIPaddr reload-agent on c001n02 - * Resource action: DcIPaddr monitor=5000 on c001n02 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n08 monitor=5000 on c001n08 - * Resource action: rsc_c001n02 monitor=6000 on c001n02 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/params-5.summary b/cts/scheduler/summary/params-5.summary index 7150d36bf24..72741caef9c 100644 --- a/cts/scheduler/summary/params-5.summary +++ b/cts/scheduler/summary/params-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -13,29 +15,10 @@ Transition Summary: * Restart DcIPaddr ( c001n02 ) due to resource definition change Executing Cluster Transition: - * Resource action: DcIPaddr stop on c001n02 - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: DcIPaddr start on c001n02 - * Resource action: DcIPaddr monitor=5000 on c001n02 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n08 monitor=5000 on c001n08 - * Resource action: rsc_c001n02 monitor=6000 on c001n02 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/params-6.summary b/cts/scheduler/summary/params-6.summary index 4b5c4807ae3..1f94a2eec02 100644 --- a/cts/scheduler/summary/params-6.summary +++ b/cts/scheduler/summary/params-6.summary @@ -1,31 +1,39 @@ 90 of 337 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ mgmt01 v03-a v03-b ] * Full List of Resources: - * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) - * stonith-mgmt01 (stonith:fence_xvm): Started v03-a * stonith-mgmt02 (stonith:meatware): Started v03-a - * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v03-a (stonith:fence_ipmilan): Started v03-b - * stonith-v03-b (stonith:fence_ipmilan): Started mgmt01 - * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) * Clone Set: cl-clvmd [clvmd]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-dlm [dlm]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-iscsid [iscsid]: * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-multipathd [multipathd]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * stonith-v03-b (stonith:fence_ipmilan): Started mgmt01 + * stonith-v03-a (stonith:fence_ipmilan): Started v03-b + * stonith-mgmt01 (stonith:fence_xvm): Started v03-a * Clone Set: cl-libvirtd [libvirtd]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-multipathd [multipathd]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-node-params [node-params]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -59,18 +67,6 @@ Current cluster status: * Clone Set: cl-vlan909-if [vlan909-if]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-libvirt-images-pool [libvirt-images-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -80,14 +76,31 @@ Current cluster status: * Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * Clone Set: cl-libvirt-qpid [libvirt-qpid]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a + * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * Clone Set: cl-vlan200-if [vlan200-if]: + * Started: [ v03-a v03-b ] + * Stopped: [ mgmt01 ] + * lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a + * sl6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a + * Clone Set: cl-mcast-test-net [mcast-test-net]: + * Started: [ v03-a v03-b ] + * Stopped: [ mgmt01 ] + * vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) + * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) @@ -100,15 +113,9 @@ Current cluster status: * vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * Clone Set: cl-vlan200-if [vlan200-if]: - * Started: [ v03-a v03-b ] - * Stopped: [ mgmt01 ] * anbriz-gw-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * anbriz-work-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vptest1.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest2.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest3.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) @@ -169,7 +176,6 @@ Current cluster status: * vptest58.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest59.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest60.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * sl6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) @@ -177,50 +183,47 @@ Current cluster status: * metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * Clone Set: cl-mcast-test-net [mcast-test-net]: - * Started: [ v03-a v03-b ] - * Stopped: [ mgmt01 ] * dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) Transition Summary: * Reload c5-x64-devel.vds-ok.com-vm ( v03-a ) Executing Cluster Transition: - * Resource action: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-b - * Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-b - * Resource action: c5-x64-devel.vds-ok.com-vm reload-agent on v03-a - * Resource action: c5-x64-devel.vds-ok.com-vm monitor=10000 on v03-a - * Pseudo action: load_stopped_v03-b - * Pseudo action: load_stopped_v03-a - * Pseudo action: load_stopped_mgmt01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ mgmt01 v03-a v03-b ] * Full List of Resources: - * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) - * stonith-mgmt01 (stonith:fence_xvm): Started v03-a * stonith-mgmt02 (stonith:meatware): Started v03-a - * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) - * stonith-v03-a (stonith:fence_ipmilan): Started v03-b - * stonith-v03-b (stonith:fence_ipmilan): Started mgmt01 - * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) * Clone Set: cl-clvmd [clvmd]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-dlm [dlm]: * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-iscsid [iscsid]: * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-multipathd [multipathd]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: + * Started: [ mgmt01 v03-a v03-b ] + * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: + * Started: [ mgmt01 v03-a v03-b ] + * stonith-v03-b (stonith:fence_ipmilan): Started mgmt01 + * stonith-v03-a (stonith:fence_ipmilan): Started v03-b + * stonith-mgmt01 (stonith:fence_xvm): Started v03-a * Clone Set: cl-libvirtd [libvirtd]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-multipathd [multipathd]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-node-params [node-params]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -254,18 +257,6 @@ Revised Cluster Status: * Clone Set: cl-vlan909-if [vlan909-if]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * Clone Set: cl-libvirt-images-fs [libvirt-images-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-libvirt-install-fs [libvirt-install-fs]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]: - * Started: [ mgmt01 v03-a v03-b ] - * Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]: - * Started: [ mgmt01 v03-a v03-b ] * Clone Set: cl-libvirt-images-pool [libvirt-images-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] @@ -275,14 +266,31 @@ Revised Cluster Status: * Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * Clone Set: cl-libvirt-qpid [libvirt-qpid]: * Started: [ v03-a v03-b ] * Stopped: [ mgmt01 ] - * vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a + * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * Clone Set: cl-vlan200-if [vlan200-if]: + * Started: [ v03-a v03-b ] + * Stopped: [ mgmt01 ] + * lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a + * sl6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a + * Clone Set: cl-mcast-test-net [mcast-test-net]: + * Started: [ v03-a v03-b ] + * Stopped: [ mgmt01 ] + * vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b + * stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled) + * stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled) + * git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) @@ -295,15 +303,9 @@ Revised Cluster Status: * vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * Clone Set: cl-vlan200-if [vlan200-if]: - * Started: [ v03-a v03-b ] - * Stopped: [ mgmt01 ] * anbriz-gw-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * anbriz-work-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a * vptest1.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest2.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest3.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) @@ -364,7 +366,6 @@ Revised Cluster Status: * vptest58.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest59.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * vptest60.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * sl6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b * dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) @@ -372,8 +373,4 @@ Revised Cluster Status: * metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) * ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) - * c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a - * Clone Set: cl-mcast-test-net [mcast-test-net]: - * Started: [ v03-a v03-b ] - * Stopped: [ mgmt01 ] * dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled) diff --git a/cts/scheduler/summary/partial-live-migration-multiple-active.summary b/cts/scheduler/summary/partial-live-migration-multiple-active.summary index 41819e017f8..cbb6077cc05 100644 --- a/cts/scheduler/summary/partial-live-migration-multiple-active.summary +++ b/cts/scheduler/summary/partial-live-migration-multiple-active.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2021-03-02 21:28:21Z Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby (with active resources) * Online: [ node1 ] @@ -8,15 +10,16 @@ Current cluster status: * migrator (ocf:pacemaker:Dummy): Started node2 Transition Summary: - * Move migrator ( node2 -> node1 ) + * Migrate migrator ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: migrator stop on node2 - * Resource action: migrator start on node1 - * Resource action: migrator monitor=10000 on node1 + * Resource action: migrator migrate_to on node2 + * Resource action: migrator migrate_from on node1 Using the original execution date of: 2021-03-02 21:28:21Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/partial-unmanaged-group.summary b/cts/scheduler/summary/partial-unmanaged-group.summary index 9cb68bc71ae..a5ea59e2f9f 100644 --- a/cts/scheduler/summary/partial-unmanaged-group.summary +++ b/cts/scheduler/summary/partial-unmanaged-group.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-01-20 21:19:17Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-3 ] @@ -18,14 +20,11 @@ Transition Summary: * Start interloper ( rhel8-2 ) due to unrunnable grp1b stop (blocked) Executing Cluster Transition: - * Pseudo action: grp1_start_0 - * Resource action: interloper monitor on rhel8-5 - * Resource action: interloper monitor on rhel8-4 - * Resource action: interloper monitor on rhel8-2 - * Resource action: interloper monitor on rhel8-1 Using the original execution date of: 2020-01-20 21:19:17Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-3 ] diff --git a/cts/scheduler/summary/pending-node-no-uname.summary b/cts/scheduler/summary/pending-node-no-uname.summary index 5f04fc64534..e9e424cf9db 100644 --- a/cts/scheduler/summary/pending-node-no-uname.summary +++ b/cts/scheduler/summary/pending-node-no-uname.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2023-02-21 12:19:57Z Current cluster status: + * Cluster Summary: + * Node List: * Node node-2: pending * Online: [ node-1 ] @@ -11,10 +13,11 @@ Transition Summary: * Start st-sbd ( node-1 ) blocked Executing Cluster Transition: - * Resource action: st-sbd monitor on node-1 Using the original execution date of: 2023-02-21 12:19:57Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node-2: pending * Online: [ node-1 ] diff --git a/cts/scheduler/summary/per-node-attrs.summary b/cts/scheduler/summary/per-node-attrs.summary index 718a8452918..e4da9dfee46 100644 --- a/cts/scheduler/summary/per-node-attrs.summary +++ b/cts/scheduler/summary/per-node-attrs.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -9,14 +11,12 @@ Transition Summary: * Start dummy ( pcmk-1 ) Executing Cluster Transition: - * Resource action: dummy monitor on pcmk-3 - * Resource action: dummy monitor on pcmk-2 - * Resource action: dummy monitor on pcmk-1 - * Resource action: dummy start on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: - * dummy (ocf:heartbeat:IPaddr2): Started pcmk-1 + * dummy (ocf:heartbeat:IPaddr2): Stopped diff --git a/cts/scheduler/summary/per-op-failcount.summary b/cts/scheduler/summary/per-op-failcount.summary index a86c2942c5a..c2105793d80 100644 --- a/cts/scheduler/summary/per-op-failcount.summary +++ b/cts/scheduler/summary/per-op-failcount.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-04-06 09:04:22Z Current cluster status: + * Cluster Summary: + * Node List: * Node rh73-01-snmp: UNCLEAN (online) * Online: [ rh73-02-snmp ] @@ -15,20 +17,16 @@ Transition Summary: * Move prmStonith2-1 ( rh73-01-snmp -> rh73-02-snmp ) Executing Cluster Transition: - * Pseudo action: prmStonith2-1_stop_0 - * Fencing rh73-01-snmp (reboot) - * Pseudo action: prmDummy_stop_0 - * Resource action: prmStonith2-1 start on rh73-02-snmp - * Resource action: prmDummy start on rh73-02-snmp - * Resource action: prmDummy monitor=10000 on rh73-02-snmp Using the original execution date of: 2017-04-06 09:04:22Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node rh73-01-snmp: UNCLEAN (online) * Online: [ rh73-02-snmp ] - * OFFLINE: [ rh73-01-snmp ] * Full List of Resources: - * prmDummy (ocf:pacemaker:Dummy): Started rh73-02-snmp + * prmDummy (ocf:pacemaker:Dummy): FAILED rh73-01-snmp * prmStonith1-1 (stonith:external/ssh): Started rh73-02-snmp - * prmStonith2-1 (stonith:external/ssh): Started rh73-02-snmp + * prmStonith2-1 (stonith:external/ssh): Started rh73-01-snmp diff --git a/cts/scheduler/summary/placement-capacity.summary b/cts/scheduler/summary/placement-capacity.summary index b17d7f0e60b..39771bddfdc 100644 --- a/cts/scheduler/summary/placement-capacity.summary +++ b/cts/scheduler/summary/placement-capacity.summary @@ -1,23 +1,24 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/placement-location.summary b/cts/scheduler/summary/placement-location.summary index f38df747d45..a87698bc4ad 100644 --- a/cts/scheduler/summary/placement-location.summary +++ b/cts/scheduler/summary/placement-location.summary @@ -1,25 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/placement-priority.summary b/cts/scheduler/summary/placement-priority.summary index 71843cadef9..a87698bc4ad 100644 --- a/cts/scheduler/summary/placement-priority.summary +++ b/cts/scheduler/summary/placement-priority.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -11,15 +13,14 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Pseudo action: load_stopped_node1 - * Pseudo action: load_stopped_node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/placement-stickiness.summary b/cts/scheduler/summary/placement-stickiness.summary index f38df747d45..a87698bc4ad 100644 --- a/cts/scheduler/summary/placement-stickiness.summary +++ b/cts/scheduler/summary/placement-stickiness.summary @@ -1,25 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/primitive-with-group-with-clone.summary b/cts/scheduler/summary/primitive-with-group-with-clone.summary index aa0b96f43d8..ad731866e19 100644 --- a/cts/scheduler/summary/primitive-with-group-with-clone.summary +++ b/cts/scheduler/summary/primitive-with-group-with-clone.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -20,52 +22,18 @@ Transition Summary: * Start group1rsc2 ( node5 ) Executing Cluster Transition: - * Resource action: rsc2:0 monitor on node5 - * Resource action: rsc2:0 monitor on node4 - * Resource action: rsc2:0 monitor on node1 - * Resource action: rsc2:1 monitor on node2 - * Resource action: rsc2:2 monitor on node3 - * Pseudo action: rsc2-clone_start_0 - * Resource action: rsc1 monitor on node5 - * Resource action: rsc1 monitor on node4 - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Pseudo action: group1_start_0 - * Resource action: group1rsc1 monitor on node5 - * Resource action: group1rsc1 monitor on node4 - * Resource action: group1rsc1 monitor on node3 - * Resource action: group1rsc1 monitor on node2 - * Resource action: group1rsc1 monitor on node1 - * Resource action: group1rsc2 monitor on node5 - * Resource action: group1rsc2 monitor on node4 - * Resource action: group1rsc2 monitor on node3 - * Resource action: group1rsc2 monitor on node2 - * Resource action: group1rsc2 monitor on node1 - * Resource action: rsc2:0 start on node5 - * Resource action: rsc2:1 start on node2 - * Resource action: rsc2:2 start on node3 - * Pseudo action: rsc2-clone_running_0 - * Resource action: rsc1 start on node5 - * Resource action: group1rsc1 start on node5 - * Resource action: group1rsc2 start on node5 - * Resource action: rsc2:0 monitor=10000 on node5 - * Resource action: rsc2:1 monitor=10000 on node2 - * Resource action: rsc2:2 monitor=10000 on node3 - * Resource action: rsc1 monitor=10000 on node5 - * Pseudo action: group1_running_0 - * Resource action: group1rsc1 monitor=10000 on node5 - * Resource action: group1rsc2 monitor=10000 on node5 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Clone Set: rsc2-clone [rsc2]: - * Started: [ node2 node3 node5 ] - * rsc1 (ocf:pacemaker:Dummy): Started node5 + * Stopped: [ node1 node2 node3 node4 node5 ] + * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group1: - * group1rsc1 (ocf:pacemaker:Dummy): Started node5 - * group1rsc2 (ocf:pacemaker:Dummy): Started node5 + * group1rsc1 (ocf:pacemaker:Dummy): Stopped + * group1rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/primitive-with-group-with-promoted.summary b/cts/scheduler/summary/primitive-with-group-with-promoted.summary index b92ce1e50a6..2cdc64e6f34 100644 --- a/cts/scheduler/summary/primitive-with-group-with-promoted.summary +++ b/cts/scheduler/summary/primitive-with-group-with-promoted.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -20,56 +22,18 @@ Transition Summary: * Start group1rsc2 ( node5 ) Executing Cluster Transition: - * Resource action: rsc2:0 monitor on node5 - * Resource action: rsc2:0 monitor on node4 - * Resource action: rsc2:0 monitor on node1 - * Resource action: rsc2:1 monitor on node2 - * Resource action: rsc2:2 monitor on node3 - * Pseudo action: rsc2-clone_start_0 - * Resource action: rsc1 monitor on node5 - * Resource action: rsc1 monitor on node4 - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Pseudo action: group1_start_0 - * Resource action: group1rsc1 monitor on node5 - * Resource action: group1rsc1 monitor on node4 - * Resource action: group1rsc1 monitor on node3 - * Resource action: group1rsc1 monitor on node2 - * Resource action: group1rsc1 monitor on node1 - * Resource action: group1rsc2 monitor on node5 - * Resource action: group1rsc2 monitor on node4 - * Resource action: group1rsc2 monitor on node3 - * Resource action: group1rsc2 monitor on node2 - * Resource action: group1rsc2 monitor on node1 - * Resource action: rsc2:0 start on node5 - * Resource action: rsc2:1 start on node2 - * Resource action: rsc2:2 start on node3 - * Pseudo action: rsc2-clone_running_0 - * Resource action: rsc1 start on node5 - * Resource action: group1rsc1 start on node5 - * Resource action: group1rsc2 start on node5 - * Resource action: rsc2:1 monitor=11000 on node2 - * Resource action: rsc2:2 monitor=11000 on node3 - * Pseudo action: rsc2-clone_promote_0 - * Resource action: rsc1 monitor=10000 on node5 - * Pseudo action: group1_running_0 - * Resource action: group1rsc1 monitor=10000 on node5 - * Resource action: group1rsc2 monitor=10000 on node5 - * Resource action: rsc2:0 promote on node5 - * Pseudo action: rsc2-clone_promoted_0 - * Resource action: rsc2:0 monitor=10000 on node5 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Clone Set: rsc2-clone [rsc2] (promotable): - * Promoted: [ node5 ] - * Unpromoted: [ node2 node3 ] - * rsc1 (ocf:pacemaker:Dummy): Started node5 + * Stopped: [ node1 node2 node3 node4 node5 ] + * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group1: - * group1rsc1 (ocf:pacemaker:Dummy): Started node5 - * group1rsc2 (ocf:pacemaker:Dummy): Started node5 + * group1rsc1 (ocf:pacemaker:Dummy): Stopped + * group1rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/primitive-with-unrunnable-group.summary b/cts/scheduler/summary/primitive-with-unrunnable-group.summary index 5b6c382a600..32b648b210c 100644 --- a/cts/scheduler/summary/primitive-with-unrunnable-group.summary +++ b/cts/scheduler/summary/primitive-with-unrunnable-group.summary @@ -1,6 +1,8 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -18,13 +20,10 @@ Transition Summary: * Start group1b ( node2 ) due to unrunnable rsc2 start (blocked) Executing Cluster Transition: - * Resource action: rsc2 monitor on node5 - * Resource action: rsc2 monitor on node4 - * Resource action: rsc2 monitor on node3 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] diff --git a/cts/scheduler/summary/priority-fencing-delay.summary b/cts/scheduler/summary/priority-fencing-delay.summary index ce5aff2562f..f4504afbd77 100644 --- a/cts/scheduler/summary/priority-fencing-delay.summary +++ b/cts/scheduler/summary/priority-fencing-delay.summary @@ -1,8 +1,11 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node kiff-01: UNCLEAN (offline) * Online: [ kiff-02 ] - * GuestOnline: [ lxc-01_kiff-02 lxc-02_kiff-02 ] + * GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Full List of Resources: * vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01 @@ -38,67 +41,37 @@ Transition Summary: * Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 ) * Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 ) * Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: vm-fs monitor on lxc-02_kiff-02 - * Resource action: vm-fs monitor on lxc-01_kiff-02 - * Pseudo action: fence-kiff-02_stop_0 - * Resource action: dlm monitor on lxc-02_kiff-02 - * Resource action: dlm monitor on lxc-01_kiff-02 - * Resource action: clvmd monitor on lxc-02_kiff-02 - * Resource action: clvmd monitor on lxc-01_kiff-02 - * Resource action: shared0 monitor on lxc-02_kiff-02 - * Resource action: shared0 monitor on lxc-01_kiff-02 - * Pseudo action: lxc-01_kiff-01_stop_0 - * Pseudo action: lxc-02_kiff-01_stop_0 - * Fencing kiff-01 (reboot) - * Pseudo action: R-lxc-01_kiff-01_stop_0 - * Pseudo action: R-lxc-02_kiff-01_stop_0 - * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01 - * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01 - * Pseudo action: vm-fs_stop_0 - * Resource action: fence-kiff-02 start on kiff-02 - * Pseudo action: shared0-clone_stop_0 - * Resource action: R-lxc-01_kiff-01 start on kiff-02 - * Resource action: R-lxc-02_kiff-01 start on kiff-02 - * Resource action: lxc-01_kiff-01 start on kiff-02 - * Resource action: lxc-02_kiff-01 start on kiff-02 - * Resource action: vm-fs start on lxc-01_kiff-01 - * Resource action: fence-kiff-02 monitor=60000 on kiff-02 - * Pseudo action: shared0_stop_0 - * Pseudo action: shared0-clone_stopped_0 - * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02 - * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02 - * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 - * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 - * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 - * Pseudo action: clvmd-clone_stop_0 - * Pseudo action: clvmd_stop_0 - * Pseudo action: clvmd-clone_stopped_0 - * Pseudo action: dlm-clone_stop_0 - * Pseudo action: dlm_stop_0 - * Pseudo action: dlm-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node kiff-01: UNCLEAN (offline) * Online: [ kiff-02 ] - * OFFLINE: [ kiff-01 ] * GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Full List of Resources: - * vm-fs (ocf:heartbeat:Filesystem): Started lxc-01_kiff-01 + * vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01 * R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02 * fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 - * fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02 + * fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN) * Clone Set: dlm-clone [dlm]: + * dlm (ocf:pacemaker:controld): Started kiff-01 (UNCLEAN) * Started: [ kiff-02 ] - * Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] + * Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Clone Set: clvmd-clone [clvmd]: + * clvmd (ocf:heartbeat:clvm): Started kiff-01 (UNCLEAN) * Started: [ kiff-02 ] - * Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] + * Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Clone Set: shared0-clone [shared0]: + * shared0 (ocf:heartbeat:Filesystem): Started kiff-01 (UNCLEAN) * Started: [ kiff-02 ] - * Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] - * R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02 - * R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02 + * Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] + * R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN) + * R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN) * R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02 diff --git a/cts/scheduler/summary/probe-0.summary b/cts/scheduler/summary/probe-0.summary index c717f0f6e03..3f84cfbc1b8 100644 --- a/cts/scheduler/summary/probe-0.summary +++ b/cts/scheduler/summary/probe-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ x32c47 x32c48 ] @@ -15,20 +17,10 @@ Transition Summary: * Start configstoreclone:1 ( x32c48 ) Executing Cluster Transition: - * Resource action: configstoreclone:0 monitor on x32c47 - * Resource action: configstoreclone:1 monitor on x32c48 - * Pseudo action: configstorecloneset_pre_notify_start_0 - * Pseudo action: configstorecloneset_confirmed-pre_notify_start_0 - * Pseudo action: configstorecloneset_start_0 - * Resource action: configstoreclone:0 start on x32c47 - * Resource action: configstoreclone:1 start on x32c48 - * Pseudo action: configstorecloneset_running_0 - * Pseudo action: configstorecloneset_post_notify_running_0 - * Resource action: configstoreclone:0 notify on x32c47 - * Resource action: configstoreclone:1 notify on x32c48 - * Pseudo action: configstorecloneset_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ x32c47 x32c48 ] @@ -38,4 +30,4 @@ Revised Cluster Status: * Clone Set: imagestorecloneset [imagestoreclone]: * Started: [ x32c47 x32c48 ] * Clone Set: configstorecloneset [configstoreclone]: - * Started: [ x32c47 x32c48 ] + * Stopped: [ x32c47 x32c48 ] diff --git a/cts/scheduler/summary/probe-1.summary b/cts/scheduler/summary/probe-1.summary index 605ea0fe189..13ab59f4d58 100644 --- a/cts/scheduler/summary/probe-1.summary +++ b/cts/scheduler/summary/probe-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n05 ] @@ -9,13 +11,12 @@ Transition Summary: * Start DcIPaddr ( c001n05 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n05 - * Resource action: DcIPaddr start on c001n05 - * Resource action: DcIPaddr monitor=5000 on c001n05 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n05 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n05 + * DcIPaddr (ocf:heartbeat:IPaddr): Stopped diff --git a/cts/scheduler/summary/probe-2.summary b/cts/scheduler/summary/probe-2.summary index f73d5612464..b6b5f1f6a29 100644 --- a/cts/scheduler/summary/probe-2.summary +++ b/cts/scheduler/summary/probe-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node wc02: standby (with active resources) * Online: [ wc01 ] @@ -44,91 +46,16 @@ Transition Summary: * Stop apache2:1 ( wc02 ) due to node availability * Restart stonith_rackpdu:0 ( wc01 ) * Stop stonith_rackpdu:1 ( wc02 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: drbd_mysql:0 cancel=10000 on wc01 - * Pseudo action: ms_drbd_mysql_pre_notify_demote_0 - * Pseudo action: group_mysql_stop_0 - * Resource action: mysql-server stop on wc02 - * Pseudo action: ms_drbd_www_pre_notify_stop_0 - * Pseudo action: clone_mysql-proxy_stop_0 - * Pseudo action: clone_webservice_stop_0 - * Pseudo action: DoFencing_stop_0 - * Resource action: drbd_mysql:0 notify on wc01 - * Resource action: drbd_mysql:1 notify on wc02 - * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_demote_0 - * Resource action: intip_sql stop on wc02 - * Resource action: drbd_www:0 notify on wc01 - * Resource action: drbd_www:1 notify on wc02 - * Pseudo action: ms_drbd_www_confirmed-pre_notify_stop_0 - * Pseudo action: ms_drbd_www_stop_0 - * Pseudo action: group_mysql-proxy:1_stop_0 - * Resource action: mysql-proxy:1 stop on wc02 - * Pseudo action: group_webservice:1_stop_0 - * Resource action: apache2:1 stop on wc02 - * Resource action: stonith_rackpdu:0 stop on wc01 - * Resource action: stonith_rackpdu:1 stop on wc02 - * Pseudo action: DoFencing_stopped_0 - * Pseudo action: DoFencing_start_0 - * Resource action: fs_mysql stop on wc02 - * Resource action: drbd_www:1 stop on wc02 - * Pseudo action: ms_drbd_www_stopped_0 - * Pseudo action: group_mysql-proxy:1_stopped_0 - * Pseudo action: clone_mysql-proxy_stopped_0 - * Resource action: fs_www:1 stop on wc02 - * Resource action: stonith_rackpdu:0 start on wc01 - * Pseudo action: DoFencing_running_0 - * Pseudo action: group_mysql_stopped_0 - * Pseudo action: ms_drbd_www_post_notify_stopped_0 - * Pseudo action: group_webservice:1_stopped_0 - * Pseudo action: clone_webservice_stopped_0 - * Resource action: stonith_rackpdu:0 monitor=5000 on wc01 - * Pseudo action: ms_drbd_mysql_demote_0 - * Resource action: drbd_www:0 notify on wc01 - * Pseudo action: ms_drbd_www_confirmed-post_notify_stopped_0 - * Pseudo action: clone_nfs-common_stop_0 - * Resource action: drbd_mysql:1 demote on wc02 - * Pseudo action: ms_drbd_mysql_demoted_0 - * Pseudo action: group_nfs-common:1_stop_0 - * Resource action: nfs-common:1 stop on wc02 - * Pseudo action: ms_drbd_mysql_post_notify_demoted_0 - * Pseudo action: group_nfs-common:1_stopped_0 - * Pseudo action: clone_nfs-common_stopped_0 - * Resource action: drbd_mysql:0 notify on wc01 - * Resource action: drbd_mysql:1 notify on wc02 - * Pseudo action: ms_drbd_mysql_confirmed-post_notify_demoted_0 - * Pseudo action: ms_drbd_mysql_pre_notify_stop_0 - * Resource action: drbd_mysql:0 notify on wc01 - * Resource action: drbd_mysql:1 notify on wc02 - * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_stop_0 - * Pseudo action: ms_drbd_mysql_stop_0 - * Resource action: drbd_mysql:1 stop on wc02 - * Pseudo action: ms_drbd_mysql_stopped_0 - * Pseudo action: ms_drbd_mysql_post_notify_stopped_0 - * Resource action: drbd_mysql:0 notify on wc01 - * Pseudo action: ms_drbd_mysql_confirmed-post_notify_stopped_0 - * Pseudo action: ms_drbd_mysql_pre_notify_promote_0 - * Resource action: drbd_mysql:0 notify on wc01 - * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd_mysql_promote_0 - * Resource action: drbd_mysql:0 promote on wc01 - * Pseudo action: ms_drbd_mysql_promoted_0 - * Pseudo action: ms_drbd_mysql_post_notify_promoted_0 - * Resource action: drbd_mysql:0 notify on wc01 - * Pseudo action: ms_drbd_mysql_confirmed-post_notify_promoted_0 - * Pseudo action: group_mysql_start_0 - * Resource action: fs_mysql start on wc01 - * Resource action: intip_sql start on wc01 - * Resource action: mysql-server start on wc01 - * Resource action: drbd_mysql:0 monitor=5000 on wc01 - * Pseudo action: group_mysql_running_0 - * Resource action: fs_mysql monitor=30000 on wc01 - * Resource action: intip_sql monitor=30000 on wc01 - * Resource action: mysql-server monitor=30000 on wc01 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node wc02: standby + * Node wc02: standby (with active resources) * Online: [ wc01 ] * Full List of Resources: @@ -137,27 +64,24 @@ Revised Cluster Status: * nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 * intip_nfs (ocf:heartbeat:IPaddr2): Started wc01 * Clone Set: ms_drbd_mysql [drbd_mysql] (promotable): - * Promoted: [ wc01 ] - * Stopped: [ wc02 ] + * Promoted: [ wc02 ] + * Unpromoted: [ wc01 ] * Resource Group: group_mysql: - * fs_mysql (ocf:heartbeat:Filesystem): Started wc01 - * intip_sql (ocf:heartbeat:IPaddr2): Started wc01 - * mysql-server (ocf:heartbeat:mysql): Started wc01 + * fs_mysql (ocf:heartbeat:Filesystem): Started wc02 + * intip_sql (ocf:heartbeat:IPaddr2): Started wc02 + * mysql-server (ocf:heartbeat:mysql): Started wc02 * Clone Set: ms_drbd_www [drbd_www] (promotable): * Promoted: [ wc01 ] - * Stopped: [ wc02 ] + * Unpromoted: [ wc02 ] * Clone Set: clone_nfs-common [group_nfs-common]: - * Started: [ wc01 ] - * Stopped: [ wc02 ] + * Started: [ wc01 wc02 ] * Clone Set: clone_mysql-proxy [group_mysql-proxy]: - * Started: [ wc01 ] - * Stopped: [ wc02 ] + * Started: [ wc01 wc02 ] * Clone Set: clone_webservice [group_webservice]: - * Started: [ wc01 ] - * Stopped: [ wc02 ] + * Started: [ wc01 wc02 ] * Resource Group: group_ftpd: * extip_ftp (ocf:heartbeat:IPaddr2): Started wc01 * pure-ftpd (ocf:heartbeat:Pure-FTPd): Started wc01 * Clone Set: DoFencing [stonith_rackpdu] (unique): * stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 - * stonith_rackpdu:1 (stonith:external/rackpdu): Stopped + * stonith_rackpdu:1 (stonith:external/rackpdu): Started wc02 diff --git a/cts/scheduler/summary/probe-3.summary b/cts/scheduler/summary/probe-3.summary index 929fb4d7c88..a8c4d097b0c 100644 --- a/cts/scheduler/summary/probe-3.summary +++ b/cts/scheduler/summary/probe-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -30,6 +32,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/cts/scheduler/summary/probe-4.summary b/cts/scheduler/summary/probe-4.summary index 99005e966b2..cb0159514a3 100644 --- a/cts/scheduler/summary/probe-4.summary +++ b/cts/scheduler/summary/probe-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] @@ -31,6 +33,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] diff --git a/cts/scheduler/summary/probe-pending-node.summary b/cts/scheduler/summary/probe-pending-node.summary index 92153e28f2e..e728631097a 100644 --- a/cts/scheduler/summary/probe-pending-node.summary +++ b/cts/scheduler/summary/probe-pending-node.summary @@ -4,6 +4,8 @@ Using the original execution date of: 2021-06-11 13:55:24Z The cluster will not attempt to start, stop or recover services Current cluster status: + * Cluster Summary: + * Node List: * Node gcdoubwap02: pending * Online: [ gcdoubwap01 ] @@ -32,6 +34,8 @@ Executing Cluster Transition: Using the original execution date of: 2021-06-11 13:55:24Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Node gcdoubwap02: pending * Online: [ gcdoubwap01 ] diff --git a/cts/scheduler/summary/probe-target-of-failed-migrate_to-1.summary b/cts/scheduler/summary/probe-target-of-failed-migrate_to-1.summary index 1be0ea60368..2d4e89461a6 100644 --- a/cts/scheduler/summary/probe-target-of-failed-migrate_to-1.summary +++ b/cts/scheduler/summary/probe-target-of-failed-migrate_to-1.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2022-05-09 10:28:56Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,11 +12,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: st-sbd monitor on node2 - * Resource action: dummy1 monitor on node2 Using the original execution date of: 2022-05-09 10:28:56Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/probe-target-of-failed-migrate_to-2.summary b/cts/scheduler/summary/probe-target-of-failed-migrate_to-2.summary index 6346e383fa8..36157affb9f 100644 --- a/cts/scheduler/summary/probe-target-of-failed-migrate_to-2.summary +++ b/cts/scheduler/summary/probe-target-of-failed-migrate_to-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/probe-timeout.summary b/cts/scheduler/summary/probe-timeout.summary index ca7dc5afec7..3f7a3c40c48 100644 --- a/cts/scheduler/summary/probe-timeout.summary +++ b/cts/scheduler/summary/probe-timeout.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,21 +13,13 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc1 monitor=5000 on node1 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=10000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/promoted-0.summary b/cts/scheduler/summary/promoted-0.summary index 3e724ffdc48..156136f9cdc 100644 --- a/cts/scheduler/summary/promoted-0.summary +++ b/cts/scheduler/summary/promoted-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,31 +19,17 @@ Transition Summary: * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node1 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2 - * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/promoted-1.summary b/cts/scheduler/summary/promoted-1.summary index 839de37f1bb..c7f962534ea 100644 --- a/cts/scheduler/summary/promoted-1.summary +++ b/cts/scheduler/summary/promoted-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,34 +19,17 @@ Transition Summary: * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node1 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_promote_0 - * Resource action: child_rsc1:1 promote on node2 - * Pseudo action: rsc1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 - * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/promoted-10.summary b/cts/scheduler/summary/promoted-10.summary index 7efbce92b6e..984e276b95a 100644 --- a/cts/scheduler/summary/promoted-10.summary +++ b/cts/scheduler/summary/promoted-10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,59 +19,17 @@ Transition Summary: * Promote child_rsc1:3 ( Stopped -> Promoted node2 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_pre_notify_start_0 - * Pseudo action: rsc1_confirmed-pre_notify_start_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node1 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_post_notify_running_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Resource action: child_rsc1:2 notify on node1 - * Resource action: child_rsc1:3 notify on node2 - * Pseudo action: rsc1_confirmed-post_notify_running_0 - * Pseudo action: rsc1_pre_notify_promote_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Resource action: child_rsc1:2 notify on node1 - * Resource action: child_rsc1:3 notify on node2 - * Pseudo action: rsc1_confirmed-pre_notify_promote_0 - * Pseudo action: rsc1_promote_0 - * Resource action: child_rsc1:0 promote on node1 - * Resource action: child_rsc1:3 promote on node2 - * Pseudo action: rsc1_promoted_0 - * Pseudo action: rsc1_post_notify_promoted_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Resource action: child_rsc1:2 notify on node1 - * Resource action: child_rsc1:3 notify on node2 - * Pseudo action: rsc1_confirmed-post_notify_promoted_0 - * Resource action: child_rsc1:0 monitor=11000 on node1 - * Resource action: child_rsc1:1 monitor=1000 on node2 - * Resource action: child_rsc1:2 monitor=1000 on node1 - * Resource action: child_rsc1:3 monitor=11000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Promoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2 - * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:3 (ocf:heartbeat:apache): Promoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/promoted-11.summary b/cts/scheduler/summary/promoted-11.summary index 6999bb1af01..a1470f63525 100644 --- a/cts/scheduler/summary/promoted-11.summary +++ b/cts/scheduler/summary/promoted-11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,27 +16,15 @@ Transition Summary: * Promote child_rsc1:1 ( Stopped -> Promoted node2 ) Executing Cluster Transition: - * Resource action: simple-rsc monitor on node2 - * Resource action: simple-rsc monitor on node1 - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: simple-rsc start on node2 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_promote_0 - * Resource action: child_rsc1:1 promote on node2 - * Pseudo action: rsc1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * simple-rsc (ocf:heartbeat:apache): Started node2 + * simple-rsc (ocf:heartbeat:apache): Stopped * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/promoted-12.summary b/cts/scheduler/summary/promoted-12.summary index 9125a9aa28c..6b286e4f1c3 100644 --- a/cts/scheduler/summary/promoted-12.summary +++ b/cts/scheduler/summary/promoted-12.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sel3 sel4 ] @@ -15,11 +17,10 @@ Transition Summary: * Promote sf:0 ( Unpromoted -> Promoted sel3 ) Executing Cluster Transition: - * Pseudo action: ms-sf_promote_0 - * Resource action: sf:0 promote on sel3 - * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sel3 sel4 ] @@ -28,6 +29,6 @@ Revised Cluster Status: * Promoted: [ sel3 ] * Unpromoted: [ sel4 ] * Clone Set: ms-sf [sf] (promotable, unique): - * sf:0 (ocf:heartbeat:Stateful): Promoted sel3 + * sf:0 (ocf:heartbeat:Stateful): Unpromoted sel3 * sf:1 (ocf:heartbeat:Stateful): Unpromoted sel4 * fs0 (ocf:heartbeat:Filesystem): Started sel3 diff --git a/cts/scheduler/summary/promoted-13.summary b/cts/scheduler/summary/promoted-13.summary index 5f977c8edbc..eb321b066cf 100644 --- a/cts/scheduler/summary/promoted-13.summary +++ b/cts/scheduler/summary/promoted-13.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ frigg odin ] @@ -17,46 +19,17 @@ Transition Summary: * Start MailTo ( odin ) Executing Cluster Transition: - * Resource action: drbd0:1 cancel=12000 on odin - * Resource action: drbd0:0 cancel=10000 on frigg - * Pseudo action: ms_drbd_pre_notify_demote_0 - * Resource action: drbd0:1 notify on odin - * Resource action: drbd0:0 notify on frigg - * Pseudo action: ms_drbd_confirmed-pre_notify_demote_0 - * Pseudo action: ms_drbd_demote_0 - * Resource action: drbd0:0 demote on frigg - * Pseudo action: ms_drbd_demoted_0 - * Pseudo action: ms_drbd_post_notify_demoted_0 - * Resource action: drbd0:1 notify on odin - * Resource action: drbd0:0 notify on frigg - * Pseudo action: ms_drbd_confirmed-post_notify_demoted_0 - * Pseudo action: ms_drbd_pre_notify_promote_0 - * Resource action: drbd0:1 notify on odin - * Resource action: drbd0:0 notify on frigg - * Pseudo action: ms_drbd_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd_promote_0 - * Resource action: drbd0:1 promote on odin - * Pseudo action: ms_drbd_promoted_0 - * Pseudo action: ms_drbd_post_notify_promoted_0 - * Resource action: drbd0:1 notify on odin - * Resource action: drbd0:0 notify on frigg - * Pseudo action: ms_drbd_confirmed-post_notify_promoted_0 - * Pseudo action: group_start_0 - * Resource action: IPaddr0 start on odin - * Resource action: MailTo start on odin - * Resource action: drbd0:1 monitor=10000 on odin - * Resource action: drbd0:0 monitor=12000 on frigg - * Pseudo action: group_running_0 - * Resource action: IPaddr0 monitor=5000 on odin Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ frigg odin ] * Full List of Resources: * Clone Set: ms_drbd [drbd0] (promotable): - * Promoted: [ odin ] - * Unpromoted: [ frigg ] + * Promoted: [ frigg ] + * Unpromoted: [ odin ] * Resource Group: group: - * IPaddr0 (ocf:heartbeat:IPaddr): Started odin - * MailTo (ocf:heartbeat:MailTo): Started odin + * IPaddr0 (ocf:heartbeat:IPaddr): Stopped + * MailTo (ocf:heartbeat:MailTo): Stopped diff --git a/cts/scheduler/summary/promoted-2.summary b/cts/scheduler/summary/promoted-2.summary index 58e3e2ec824..984e276b95a 100644 --- a/cts/scheduler/summary/promoted-2.summary +++ b/cts/scheduler/summary/promoted-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,55 +19,17 @@ Transition Summary: * Promote child_rsc1:3 ( Stopped -> Promoted node2 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_pre_notify_start_0 - * Pseudo action: rsc1_confirmed-pre_notify_start_0 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node1 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_post_notify_running_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Resource action: child_rsc1:2 notify on node1 - * Resource action: child_rsc1:3 notify on node2 - * Pseudo action: rsc1_confirmed-post_notify_running_0 - * Pseudo action: rsc1_pre_notify_promote_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Resource action: child_rsc1:2 notify on node1 - * Resource action: child_rsc1:3 notify on node2 - * Pseudo action: rsc1_confirmed-pre_notify_promote_0 - * Pseudo action: rsc1_promote_0 - * Resource action: child_rsc1:0 promote on node1 - * Resource action: child_rsc1:3 promote on node2 - * Pseudo action: rsc1_promoted_0 - * Pseudo action: rsc1_post_notify_promoted_0 - * Resource action: child_rsc1:0 notify on node1 - * Resource action: child_rsc1:1 notify on node2 - * Resource action: child_rsc1:2 notify on node1 - * Resource action: child_rsc1:3 notify on node2 - * Pseudo action: rsc1_confirmed-post_notify_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Promoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2 - * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:3 (ocf:heartbeat:apache): Promoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/promoted-3.summary b/cts/scheduler/summary/promoted-3.summary index 839de37f1bb..c7f962534ea 100644 --- a/cts/scheduler/summary/promoted-3.summary +++ b/cts/scheduler/summary/promoted-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,34 +19,17 @@ Transition Summary: * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: - * Resource action: child_rsc1:0 monitor on node2 - * Resource action: child_rsc1:0 monitor on node1 - * Resource action: child_rsc1:1 monitor on node2 - * Resource action: child_rsc1:1 monitor on node1 - * Resource action: child_rsc1:2 monitor on node2 - * Resource action: child_rsc1:2 monitor on node1 - * Resource action: child_rsc1:3 monitor on node2 - * Resource action: child_rsc1:3 monitor on node1 - * Resource action: child_rsc1:4 monitor on node2 - * Resource action: child_rsc1:4 monitor on node1 - * Pseudo action: rsc1_start_0 - * Resource action: child_rsc1:0 start on node1 - * Resource action: child_rsc1:1 start on node2 - * Resource action: child_rsc1:2 start on node1 - * Resource action: child_rsc1:3 start on node2 - * Pseudo action: rsc1_running_0 - * Pseudo action: rsc1_promote_0 - * Resource action: child_rsc1:1 promote on node2 - * Pseudo action: rsc1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable, unique): - * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 - * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 - * child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2 + * child_rsc1:0 (ocf:heartbeat:apache): Stopped + * child_rsc1:1 (ocf:heartbeat:apache): Stopped + * child_rsc1:2 (ocf:heartbeat:apache): Stopped + * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/promoted-4.summary b/cts/scheduler/summary/promoted-4.summary index 2bcb25eaf13..b2c81f0ca8e 100644 --- a/cts/scheduler/summary/promoted-4.summary +++ b/cts/scheduler/summary/promoted-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -31,40 +33,10 @@ Transition Summary: * Promote ocf_msdummy:0 ( Unpromoted -> Promoted c001n08 ) Executing Cluster Transition: - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:1 monitor on c001n01 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:2 monitor on c001n02 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n01 - * Resource action: ocf_msdummy:0 cancel=5000 on c001n08 - * Resource action: ocf_msdummy:2 monitor on c001n08 - * Resource action: ocf_msdummy:2 monitor on c001n03 - * Resource action: ocf_msdummy:2 monitor on c001n02 - * Resource action: ocf_msdummy:3 monitor on c001n03 - * Resource action: ocf_msdummy:3 monitor on c001n02 - * Resource action: ocf_msdummy:3 monitor on c001n01 - * Resource action: ocf_msdummy:4 monitor on c001n08 - * Resource action: ocf_msdummy:4 monitor on c001n02 - * Resource action: ocf_msdummy:4 monitor on c001n01 - * Resource action: ocf_msdummy:5 monitor on c001n08 - * Resource action: ocf_msdummy:5 monitor on c001n03 - * Resource action: ocf_msdummy:5 monitor on c001n02 - * Resource action: ocf_msdummy:6 monitor on c001n08 - * Resource action: ocf_msdummy:6 monitor on c001n03 - * Resource action: ocf_msdummy:6 monitor on c001n01 - * Resource action: ocf_msdummy:7 monitor on c001n08 - * Resource action: ocf_msdummy:7 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n01 - * Pseudo action: master_rsc_1_promote_0 - * Resource action: ocf_msdummy:0 promote on c001n08 - * Pseudo action: master_rsc_1_promoted_0 - * Resource action: ocf_msdummy:0 monitor=6000 on c001n08 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -84,7 +56,7 @@ Revised Cluster Status: * child_DoFencing:2 (stonith:ssh): Started c001n01 * child_DoFencing:3 (stonith:ssh): Started c001n02 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 diff --git a/cts/scheduler/summary/promoted-5.summary b/cts/scheduler/summary/promoted-5.summary index 8a2f1a232f7..459862569c4 100644 --- a/cts/scheduler/summary/promoted-5.summary +++ b/cts/scheduler/summary/promoted-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -30,35 +32,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n02 - * Resource action: child_DoFencing:1 monitor on c001n01 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:2 monitor on c001n02 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n01 - * Resource action: ocf_msdummy:2 monitor on c001n08 - * Resource action: ocf_msdummy:2 monitor on c001n03 - * Resource action: ocf_msdummy:2 monitor on c001n02 - * Resource action: ocf_msdummy:3 monitor on c001n03 - * Resource action: ocf_msdummy:3 monitor on c001n02 - * Resource action: ocf_msdummy:3 monitor on c001n01 - * Resource action: ocf_msdummy:4 monitor on c001n08 - * Resource action: ocf_msdummy:4 monitor on c001n02 - * Resource action: ocf_msdummy:4 monitor on c001n01 - * Resource action: ocf_msdummy:5 monitor on c001n08 - * Resource action: ocf_msdummy:5 monitor on c001n03 - * Resource action: ocf_msdummy:5 monitor on c001n02 - * Resource action: ocf_msdummy:6 monitor on c001n08 - * Resource action: ocf_msdummy:6 monitor on c001n03 - * Resource action: ocf_msdummy:6 monitor on c001n01 - * Resource action: ocf_msdummy:7 monitor on c001n08 - * Resource action: ocf_msdummy:7 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/promoted-6.summary b/cts/scheduler/summary/promoted-6.summary index 2d9c953bfa1..5daf2abd720 100644 --- a/cts/scheduler/summary/promoted-6.summary +++ b/cts/scheduler/summary/promoted-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -31,32 +33,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n03 - * Resource action: child_DoFencing:1 monitor on c001n01 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n01 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Resource action: ocf_msdummy:2 monitor on c001n08 - * Resource action: ocf_msdummy:2 monitor on c001n01 - * Resource action: ocf_msdummy:3 monitor on c001n03 - * Resource action: ocf_msdummy:3 monitor on c001n01 - * Resource action: ocf_msdummy:4 monitor on c001n08 - * Resource action: ocf_msdummy:4 monitor on c001n03 - * Resource action: ocf_msdummy:4 monitor on c001n01 - * Resource action: ocf_msdummy:5 monitor on c001n08 - * Resource action: ocf_msdummy:5 monitor on c001n02 - * Resource action: ocf_msdummy:5 monitor on c001n01 - * Resource action: ocf_msdummy:6 monitor on c001n08 - * Resource action: ocf_msdummy:6 monitor on c001n03 - * Resource action: ocf_msdummy:6 monitor on c001n02 - * Resource action: ocf_msdummy:7 monitor on c001n08 - * Resource action: ocf_msdummy:7 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/promoted-7.summary b/cts/scheduler/summary/promoted-7.summary index a1ddea5d994..05250c80ad2 100644 --- a/cts/scheduler/summary/promoted-7.summary +++ b/cts/scheduler/summary/promoted-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c001n01: UNCLEAN (offline) * Online: [ c001n02 c001n03 c001n08 ] @@ -40,82 +42,40 @@ Transition Summary: * Stop child_DoFencing:0 ( c001n01 ) due to node availability * Stop ocf_msdummy:0 ( Promoted c001n01 ) due to node availability * Stop ocf_msdummy:4 ( Unpromoted c001n01 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group-1_stop_0 - * Resource action: ocf_192.168.100.183 stop on c001n03 - * Resource action: lsb_dummy stop on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Pseudo action: DoFencing_stop_0 - * Resource action: ocf_msdummy:4 monitor on c001n08 - * Resource action: ocf_msdummy:4 monitor on c001n03 - * Resource action: ocf_msdummy:4 monitor on c001n02 - * Resource action: ocf_msdummy:5 monitor on c001n08 - * Resource action: ocf_msdummy:5 monitor on c001n02 - * Resource action: ocf_msdummy:6 monitor on c001n08 - * Resource action: ocf_msdummy:6 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n02 - * Pseudo action: master_rsc_1_demote_0 - * Fencing c001n01 (reboot) - * Pseudo action: DcIPaddr_stop_0 - * Resource action: heartbeat_192.168.100.182 stop on c001n03 - * Resource action: lsb_dummy start on c001n08 - * Pseudo action: rsc_c001n01_stop_0 - * Pseudo action: child_DoFencing:0_stop_0 - * Pseudo action: DoFencing_stopped_0 - * Pseudo action: ocf_msdummy:0_demote_0 - * Pseudo action: master_rsc_1_demoted_0 - * Pseudo action: master_rsc_1_stop_0 - * Resource action: DcIPaddr start on c001n03 - * Resource action: ocf_192.168.100.181 stop on c001n03 - * Resource action: lsb_dummy monitor=5000 on c001n08 - * Resource action: rsc_c001n01 start on c001n03 - * Pseudo action: ocf_msdummy:0_stop_0 - * Pseudo action: ocf_msdummy:4_stop_0 - * Pseudo action: master_rsc_1_stopped_0 - * Resource action: DcIPaddr monitor=5000 on c001n03 - * Pseudo action: group-1_stopped_0 - * Pseudo action: group-1_start_0 - * Resource action: ocf_192.168.100.181 start on c001n02 - * Resource action: heartbeat_192.168.100.182 start on c001n02 - * Resource action: ocf_192.168.100.183 start on c001n02 - * Resource action: rsc_c001n01 monitor=5000 on c001n03 - * Pseudo action: group-1_running_0 - * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 - * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 - * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node c001n01: UNCLEAN (offline) * Online: [ c001n02 c001n03 c001n08 ] - * OFFLINE: [ c001n01 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03 + * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * Resource Group: group-1: - * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 - * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 - * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 - * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 - * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n03 + * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n03 + * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03 + * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n03 + * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 + * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Stopped + * child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n02 * child_DoFencing:3 (stonith:ssh): Started c001n08 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n01 (UNCLEAN) * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 (UNCLEAN) * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 diff --git a/cts/scheduler/summary/promoted-8.summary b/cts/scheduler/summary/promoted-8.summary index ed646ed589b..4b1aa36379e 100644 --- a/cts/scheduler/summary/promoted-8.summary +++ b/cts/scheduler/summary/promoted-8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c001n01: UNCLEAN (offline) * Online: [ c001n02 c001n03 c001n08 ] @@ -39,82 +41,36 @@ Transition Summary: * Move rsc_c001n01 ( c001n01 -> c001n03 ) * Stop child_DoFencing:0 ( c001n01 ) due to node availability * Move ocf_msdummy:0 ( Promoted c001n01 -> Unpromoted c001n03 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group-1_stop_0 - * Resource action: ocf_192.168.100.183 stop on c001n03 - * Resource action: lsb_dummy stop on c001n02 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n02 - * Pseudo action: DoFencing_stop_0 - * Resource action: ocf_msdummy:4 monitor on c001n08 - * Resource action: ocf_msdummy:4 monitor on c001n03 - * Resource action: ocf_msdummy:4 monitor on c001n02 - * Resource action: ocf_msdummy:5 monitor on c001n08 - * Resource action: ocf_msdummy:5 monitor on c001n03 - * Resource action: ocf_msdummy:5 monitor on c001n02 - * Resource action: ocf_msdummy:6 monitor on c001n08 - * Resource action: ocf_msdummy:6 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n03 - * Resource action: ocf_msdummy:7 monitor on c001n02 - * Pseudo action: master_rsc_1_demote_0 - * Fencing c001n01 (reboot) - * Pseudo action: DcIPaddr_stop_0 - * Resource action: heartbeat_192.168.100.182 stop on c001n03 - * Resource action: lsb_dummy start on c001n08 - * Pseudo action: rsc_c001n01_stop_0 - * Pseudo action: child_DoFencing:0_stop_0 - * Pseudo action: DoFencing_stopped_0 - * Pseudo action: ocf_msdummy:0_demote_0 - * Pseudo action: master_rsc_1_demoted_0 - * Pseudo action: master_rsc_1_stop_0 - * Resource action: DcIPaddr start on c001n03 - * Resource action: ocf_192.168.100.181 stop on c001n03 - * Resource action: lsb_dummy monitor=5000 on c001n08 - * Resource action: rsc_c001n01 start on c001n03 - * Pseudo action: ocf_msdummy:0_stop_0 - * Pseudo action: master_rsc_1_stopped_0 - * Pseudo action: master_rsc_1_start_0 - * Resource action: DcIPaddr monitor=5000 on c001n03 - * Pseudo action: group-1_stopped_0 - * Pseudo action: group-1_start_0 - * Resource action: ocf_192.168.100.181 start on c001n02 - * Resource action: heartbeat_192.168.100.182 start on c001n02 - * Resource action: ocf_192.168.100.183 start on c001n02 - * Resource action: rsc_c001n01 monitor=5000 on c001n03 - * Resource action: ocf_msdummy:0 start on c001n03 - * Pseudo action: master_rsc_1_running_0 - * Pseudo action: group-1_running_0 - * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 - * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 - * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 - * Resource action: ocf_msdummy:0 monitor=5000 on c001n03 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node c001n01: UNCLEAN (offline) * Online: [ c001n02 c001n03 c001n08 ] - * OFFLINE: [ c001n01 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03 + * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * Resource Group: group-1: - * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 - * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 - * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 - * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 - * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n03 + * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n03 + * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03 + * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n03 + * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 + * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Stopped + * child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n02 * child_DoFencing:3 (stonith:ssh): Started c001n08 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n01 (UNCLEAN) * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 diff --git a/cts/scheduler/summary/promoted-9.summary b/cts/scheduler/summary/promoted-9.summary index 69dab46a2ce..19821689500 100644 --- a/cts/scheduler/summary/promoted-9.summary +++ b/cts/scheduler/summary/promoted-9.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node sgi2: UNCLEAN (offline) * Node test02: UNCLEAN (offline) @@ -43,31 +46,15 @@ Transition Summary: * Stop child_DoFencing:1 ( ibm1 ) due to node availability * Promote ocf_msdummy:0 ( Stopped -> Promoted va1 ) blocked * Start ocf_msdummy:1 ( va1 ) due to no quorum (blocked) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: child_DoFencing:1 monitor on va1 - * Resource action: child_DoFencing:2 monitor on va1 - * Resource action: child_DoFencing:2 monitor on ibm1 - * Resource action: child_DoFencing:3 monitor on va1 - * Resource action: child_DoFencing:3 monitor on ibm1 - * Pseudo action: DoFencing_stop_0 - * Resource action: ocf_msdummy:2 monitor on va1 - * Resource action: ocf_msdummy:2 monitor on ibm1 - * Resource action: ocf_msdummy:3 monitor on va1 - * Resource action: ocf_msdummy:3 monitor on ibm1 - * Resource action: ocf_msdummy:4 monitor on va1 - * Resource action: ocf_msdummy:4 monitor on ibm1 - * Resource action: ocf_msdummy:5 monitor on va1 - * Resource action: ocf_msdummy:5 monitor on ibm1 - * Resource action: ocf_msdummy:6 monitor on va1 - * Resource action: ocf_msdummy:6 monitor on ibm1 - * Resource action: ocf_msdummy:7 monitor on va1 - * Resource action: ocf_msdummy:7 monitor on ibm1 - * Resource action: child_DoFencing:1 stop on ibm1 - * Pseudo action: DoFencing_stopped_0 - * Cluster action: do_shutdown on ibm1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node sgi2: UNCLEAN (offline) * Node test02: UNCLEAN (offline) @@ -86,7 +73,7 @@ Revised Cluster Status: * rsc_test02 (ocf:heartbeat:IPaddr): Stopped * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started va1 - * child_DoFencing:1 (stonith:ssh): Stopped + * child_DoFencing:1 (stonith:ssh): Started ibm1 * child_DoFencing:2 (stonith:ssh): Stopped * child_DoFencing:3 (stonith:ssh): Stopped * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): diff --git a/cts/scheduler/summary/promoted-allow-start.summary b/cts/scheduler/summary/promoted-allow-start.summary index c9afdaa1055..24387f75a41 100644 --- a/cts/scheduler/summary/promoted-allow-start.summary +++ b/cts/scheduler/summary/promoted-allow-start.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sles11-a sles11-b ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sles11-a sles11-b ] diff --git a/cts/scheduler/summary/promoted-asymmetrical-order.summary b/cts/scheduler/summary/promoted-asymmetrical-order.summary index 591ff18a04f..4ee4929597c 100644 --- a/cts/scheduler/summary/promoted-asymmetrical-order.summary +++ b/cts/scheduler/summary/promoted-asymmetrical-order.summary @@ -1,6 +1,9 @@ 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -17,21 +20,18 @@ Transition Summary: * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:0 demote on node1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 - * Resource action: rsc1:0 stop on node1 - * Resource action: rsc1:1 stop on node2 - * Pseudo action: ms1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable, disabled): - * Stopped (disabled): [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] * Clone Set: ms2 [rsc2] (promotable): * Promoted: [ node2 ] * Unpromoted: [ node1 ] diff --git a/cts/scheduler/summary/promoted-colocation.summary b/cts/scheduler/summary/promoted-colocation.summary index b3e776bcd9f..b351742471c 100644 --- a/cts/scheduler/summary/promoted-colocation.summary +++ b/cts/scheduler/summary/promoted-colocation.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ box1 box2 ] @@ -14,20 +16,16 @@ Transition Summary: * Promote conntrackd-stateful:1 ( Unpromoted -> Promoted box2 ) Executing Cluster Transition: - * Resource action: conntrackd-stateful:0 monitor=29000 on box1 - * Pseudo action: ms-conntrackd_promote_0 - * Resource action: conntrackd-stateful:1 promote on box2 - * Pseudo action: ms-conntrackd_promoted_0 - * Resource action: conntrackd-stateful:1 monitor=30000 on box2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ box1 box2 ] * Full List of Resources: * Clone Set: ms-conntrackd [conntrackd-stateful] (promotable): - * Promoted: [ box2 ] - * Unpromoted: [ box1 ] + * Unpromoted: [ box1 box2 ] * Resource Group: virtualips: * externalip (ocf:heartbeat:IPaddr2): Started box2 * internalip (ocf:heartbeat:IPaddr2): Started box2 diff --git a/cts/scheduler/summary/promoted-demote-2.summary b/cts/scheduler/summary/promoted-demote-2.summary index e371d3f1c16..f93d2ab23d9 100644 --- a/cts/scheduler/summary/promoted-demote-2.summary +++ b/cts/scheduler/summary/promoted-demote-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] @@ -29,47 +31,27 @@ Transition Summary: * Promote stateful-1:1 ( Unpromoted -> Promoted pcmk-2 ) Executing Cluster Transition: - * Resource action: stateful-1:0 cancel=15000 on pcmk-2 - * Pseudo action: master-1_stop_0 - * Resource action: stateful-1:1 stop on pcmk-1 - * Pseudo action: master-1_stopped_0 - * Pseudo action: master-1_start_0 - * Resource action: stateful-1:1 start on pcmk-1 - * Pseudo action: master-1_running_0 - * Resource action: stateful-1:1 monitor=15000 on pcmk-1 - * Pseudo action: master-1_promote_0 - * Resource action: stateful-1:0 promote on pcmk-2 - * Pseudo action: master-1_promoted_0 - * Pseudo action: group-1_start_0 - * Resource action: r192.168.122.105 start on pcmk-2 - * Resource action: r192.168.122.106 start on pcmk-2 - * Resource action: r192.168.122.107 start on pcmk-2 - * Resource action: stateful-1:0 monitor=16000 on pcmk-2 - * Pseudo action: group-1_running_0 - * Resource action: r192.168.122.105 monitor=5000 on pcmk-2 - * Resource action: r192.168.122.106 monitor=5000 on pcmk-2 - * Resource action: r192.168.122.107 monitor=5000 on pcmk-2 - * Resource action: lsb-dummy start on pcmk-2 - * Resource action: lsb-dummy monitor=5000 on pcmk-2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started pcmk-1 * Resource Group: group-1: - * r192.168.122.105 (ocf:heartbeat:IPaddr): Started pcmk-2 - * r192.168.122.106 (ocf:heartbeat:IPaddr): Started pcmk-2 - * r192.168.122.107 (ocf:heartbeat:IPaddr): Started pcmk-2 + * r192.168.122.105 (ocf:heartbeat:IPaddr): Stopped + * r192.168.122.106 (ocf:heartbeat:IPaddr): Stopped + * r192.168.122.107 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 - * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 + * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped * migrator (ocf:pacemaker:Dummy): Started pcmk-4 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Promoted: [ pcmk-2 ] - * Unpromoted: [ pcmk-1 pcmk-3 pcmk-4 ] + * stateful-1 (ocf:pacemaker:Stateful): FAILED pcmk-1 + * Unpromoted: [ pcmk-2 pcmk-3 pcmk-4 ] diff --git a/cts/scheduler/summary/promoted-demote-block.summary b/cts/scheduler/summary/promoted-demote-block.summary index e4fc1006510..464c6f2b597 100644 --- a/cts/scheduler/summary/promoted-demote-block.summary +++ b/cts/scheduler/summary/promoted-demote-block.summary @@ -1,6 +1,9 @@ 0 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node dl380g5c: standby (with active resources) * Online: [ dl380g5d ] @@ -13,9 +16,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: dummy:1 monitor=20000 on dl380g5d Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node dl380g5c: standby (with active resources) * Online: [ dl380g5d ] diff --git a/cts/scheduler/summary/promoted-demote.summary b/cts/scheduler/summary/promoted-demote.summary index 3ba4985afdc..a6ab9e495f5 100644 --- a/cts/scheduler/summary/promoted-demote.summary +++ b/cts/scheduler/summary/promoted-demote.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cxa1 cxb1 ] @@ -28,22 +30,10 @@ Transition Summary: * Promote named_drbd_node:1 ( Unpromoted -> Promoted cxb1 ) Executing Cluster Transition: - * Resource action: named_address stop on cxa1 - * Pseudo action: named_drbd_pre_notify_promote_0 - * Resource action: named_address start on cxb1 - * Resource action: named_drbd_node:1 notify on cxa1 - * Resource action: named_drbd_node:0 notify on cxb1 - * Pseudo action: named_drbd_confirmed-pre_notify_promote_0 - * Pseudo action: named_drbd_promote_0 - * Resource action: named_drbd_node:0 promote on cxb1 - * Pseudo action: named_drbd_promoted_0 - * Pseudo action: named_drbd_post_notify_promoted_0 - * Resource action: named_drbd_node:1 notify on cxa1 - * Resource action: named_drbd_node:0 notify on cxb1 - * Pseudo action: named_drbd_confirmed-post_notify_promoted_0 - * Resource action: named_drbd_node:0 monitor=10000 on cxb1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cxa1 cxb1 ] @@ -56,14 +46,13 @@ Revised Cluster Status: * Clone Set: cyrus_drbd [cyrus_drbd_node] (promotable): * Promoted: [ cxa1 ] * Unpromoted: [ cxb1 ] - * named_address (ocf:heartbeat:IPaddr2): Started cxb1 + * named_address (ocf:heartbeat:IPaddr2): Started cxa1 * named_filesys (ocf:heartbeat:Filesystem): Stopped * named_volgroup (ocf:heartbeat:VolGroup): Stopped * named_daemon (ocf:heartbeat:recursor): Stopped * named_syslogd (ocf:heartbeat:syslogd): Stopped * Clone Set: named_drbd [named_drbd_node] (promotable): - * Promoted: [ cxb1 ] - * Unpromoted: [ cxa1 ] + * Unpromoted: [ cxa1 cxb1 ] * Clone Set: pingd_clone [pingd_node]: * Started: [ cxa1 cxb1 ] * Clone Set: fence_clone [fence_node]: diff --git a/cts/scheduler/summary/promoted-depend.summary b/cts/scheduler/summary/promoted-depend.summary index 3df262f90d5..bed66091d84 100644 --- a/cts/scheduler/summary/promoted-depend.summary +++ b/cts/scheduler/summary/promoted-depend.summary @@ -1,6 +1,8 @@ 3 of 10 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ vbox4 ] * OFFLINE: [ vbox3 ] @@ -22,38 +24,19 @@ Transition Summary: * Start cman:0 ( vbox4 ) Executing Cluster Transition: - * Resource action: drbd0:0 monitor on vbox4 - * Pseudo action: drbd_pre_notify_start_0 - * Resource action: cman:0 monitor on vbox4 - * Pseudo action: cman_clone_start_0 - * Resource action: clvmd:0 monitor on vbox4 - * Resource action: vmnci36 monitor on vbox4 - * Resource action: vmnci37 monitor on vbox4 - * Resource action: vmnci38 monitor on vbox4 - * Resource action: vmnci55 monitor on vbox4 - * Pseudo action: drbd_confirmed-pre_notify_start_0 - * Pseudo action: drbd_start_0 - * Resource action: cman:0 start on vbox4 - * Pseudo action: cman_clone_running_0 - * Resource action: drbd0:0 start on vbox4 - * Pseudo action: drbd_running_0 - * Pseudo action: drbd_post_notify_running_0 - * Resource action: drbd0:0 notify on vbox4 - * Pseudo action: drbd_confirmed-post_notify_running_0 - * Resource action: drbd0:0 monitor=60000 on vbox4 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ vbox4 ] * OFFLINE: [ vbox3 ] * Full List of Resources: * Clone Set: drbd [drbd0] (promotable): - * Unpromoted: [ vbox4 ] - * Stopped: [ vbox3 ] + * Stopped: [ vbox3 vbox4 ] * Clone Set: cman_clone [cman]: - * Started: [ vbox4 ] - * Stopped: [ vbox3 ] + * Stopped: [ vbox3 vbox4 ] * Clone Set: clvmd_clone [clvmd]: * Stopped: [ vbox3 vbox4 ] * vmnci36 (ocf:heartbeat:vm): Stopped diff --git a/cts/scheduler/summary/promoted-dependent-ban.summary b/cts/scheduler/summary/promoted-dependent-ban.summary index 2b24139acc5..2782609277d 100644 --- a/cts/scheduler/summary/promoted-dependent-ban.summary +++ b/cts/scheduler/summary/promoted-dependent-ban.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c6 c7 c8 ] @@ -12,27 +14,14 @@ Transition Summary: * Start p_dtest1 ( c7 ) Executing Cluster Transition: - * Pseudo action: ms_drbd-dtest1_pre_notify_promote_0 - * Resource action: p_drbd-dtest1 notify on c7 - * Resource action: p_drbd-dtest1 notify on c6 - * Pseudo action: ms_drbd-dtest1_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd-dtest1_promote_0 - * Resource action: p_drbd-dtest1 promote on c7 - * Pseudo action: ms_drbd-dtest1_promoted_0 - * Pseudo action: ms_drbd-dtest1_post_notify_promoted_0 - * Resource action: p_drbd-dtest1 notify on c7 - * Resource action: p_drbd-dtest1 notify on c6 - * Pseudo action: ms_drbd-dtest1_confirmed-post_notify_promoted_0 - * Resource action: p_dtest1 start on c7 - * Resource action: p_drbd-dtest1 monitor=10000 on c7 - * Resource action: p_drbd-dtest1 monitor=20000 on c6 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c6 c7 c8 ] * Full List of Resources: * Clone Set: ms_drbd-dtest1 [p_drbd-dtest1] (promotable): - * Promoted: [ c7 ] - * Unpromoted: [ c6 ] - * p_dtest1 (ocf:heartbeat:Dummy): Started c7 + * Unpromoted: [ c6 c7 ] + * p_dtest1 (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/promoted-failed-demote-2.summary b/cts/scheduler/summary/promoted-failed-demote-2.summary index 3f317fabeaa..d4a7c45183a 100644 --- a/cts/scheduler/summary/promoted-failed-demote-2.summary +++ b/cts/scheduler/summary/promoted-failed-demote-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ dl380g5a dl380g5b ] @@ -15,33 +18,23 @@ Transition Summary: * Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability * Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a ) * Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: stateful-1:1 cancel=20000 on dl380g5a - * Resource action: stateful-2:1 cancel=20000 on dl380g5a - * Pseudo action: ms-sf_stop_0 - * Pseudo action: group:0_stop_0 - * Resource action: stateful-1:0 stop on dl380g5b - * Pseudo action: group:0_stopped_0 - * Pseudo action: ms-sf_stopped_0 - * Pseudo action: ms-sf_promote_0 - * Pseudo action: group:1_promote_0 - * Resource action: stateful-1:1 promote on dl380g5a - * Resource action: stateful-2:1 promote on dl380g5a - * Pseudo action: group:1_promoted_0 - * Resource action: stateful-1:1 monitor=10000 on dl380g5a - * Resource action: stateful-2:1 monitor=10000 on dl380g5a - * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ dl380g5a dl380g5b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable, unique): * Resource Group: group:0: - * stateful-1:0 (ocf:heartbeat:Stateful): Stopped + * stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a - * stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a + * stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a + * stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a diff --git a/cts/scheduler/summary/promoted-failed-demote.summary b/cts/scheduler/summary/promoted-failed-demote.summary index 70b3e1b2cff..d4a7c45183a 100644 --- a/cts/scheduler/summary/promoted-failed-demote.summary +++ b/cts/scheduler/summary/promoted-failed-demote.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ dl380g5a dl380g5b ] @@ -15,50 +18,23 @@ Transition Summary: * Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability * Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a ) * Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: stateful-1:1 cancel=20000 on dl380g5a - * Resource action: stateful-2:1 cancel=20000 on dl380g5a - * Pseudo action: ms-sf_pre_notify_stop_0 - * Resource action: stateful-1:0 notify on dl380g5b - * Resource action: stateful-1:1 notify on dl380g5a - * Resource action: stateful-2:1 notify on dl380g5a - * Pseudo action: ms-sf_confirmed-pre_notify_stop_0 - * Pseudo action: ms-sf_stop_0 - * Pseudo action: group:0_stop_0 - * Resource action: stateful-1:0 stop on dl380g5b - * Pseudo action: group:0_stopped_0 - * Pseudo action: ms-sf_stopped_0 - * Pseudo action: ms-sf_post_notify_stopped_0 - * Resource action: stateful-1:1 notify on dl380g5a - * Resource action: stateful-2:1 notify on dl380g5a - * Pseudo action: ms-sf_confirmed-post_notify_stopped_0 - * Pseudo action: ms-sf_pre_notify_promote_0 - * Resource action: stateful-1:1 notify on dl380g5a - * Resource action: stateful-2:1 notify on dl380g5a - * Pseudo action: ms-sf_confirmed-pre_notify_promote_0 - * Pseudo action: ms-sf_promote_0 - * Pseudo action: group:1_promote_0 - * Resource action: stateful-1:1 promote on dl380g5a - * Resource action: stateful-2:1 promote on dl380g5a - * Pseudo action: group:1_promoted_0 - * Pseudo action: ms-sf_promoted_0 - * Pseudo action: ms-sf_post_notify_promoted_0 - * Resource action: stateful-1:1 notify on dl380g5a - * Resource action: stateful-2:1 notify on dl380g5a - * Pseudo action: ms-sf_confirmed-post_notify_promoted_0 - * Resource action: stateful-1:1 monitor=10000 on dl380g5a - * Resource action: stateful-2:1 monitor=10000 on dl380g5a Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ dl380g5a dl380g5b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable, unique): * Resource Group: group:0: - * stateful-1:0 (ocf:heartbeat:Stateful): Stopped + * stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a - * stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a + * stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a + * stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a diff --git a/cts/scheduler/summary/promoted-group.summary b/cts/scheduler/summary/promoted-group.summary index 44b380c25b7..2ab25904a7a 100644 --- a/cts/scheduler/summary/promoted-group.summary +++ b/cts/scheduler/summary/promoted-group.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rh44-1 rh44-2 ] @@ -15,15 +18,11 @@ Transition Summary: * Promote promotable_Stateful:1 ( Unpromoted -> Promoted rh44-1 ) Executing Cluster Transition: - * Resource action: promotable_Stateful:1 cancel=5000 on rh44-1 - * Pseudo action: ms-sf_promote_0 - * Pseudo action: grp_ms_sf:1_promote_0 - * Resource action: promotable_Stateful:1 promote on rh44-1 - * Pseudo action: grp_ms_sf:1_promoted_0 - * Resource action: promotable_Stateful:1 monitor=6000 on rh44-1 - * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rh44-1 rh44-2 ] @@ -34,4 +33,4 @@ Revised Cluster Status: * Resource Group: grp_ms_sf:0: * promotable_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2 * Resource Group: grp_ms_sf:1: - * promotable_Stateful:1 (ocf:heartbeat:Stateful): Promoted rh44-1 + * promotable_Stateful:1 (ocf:heartbeat:Stateful): Unpromoted rh44-1 diff --git a/cts/scheduler/summary/promoted-move.summary b/cts/scheduler/summary/promoted-move.summary index 4782edb5510..7b3f7bf9bca 100644 --- a/cts/scheduler/summary/promoted-move.summary +++ b/cts/scheduler/summary/promoted-move.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ bl460g1n13 bl460g1n14 ] @@ -19,54 +21,18 @@ Transition Summary: * Promote prmDRBD:1 ( Unpromoted -> Promoted bl460g1n14 ) Executing Cluster Transition: - * Pseudo action: grpDRBD_stop_0 - * Resource action: dummy02 stop on bl460g1n13 - * Resource action: prmDRBD:0 cancel=10000 on bl460g1n13 - * Resource action: prmDRBD:1 cancel=20000 on bl460g1n14 - * Pseudo action: msDRBD_pre_notify_demote_0 - * Resource action: dummy01 stop on bl460g1n13 - * Resource action: prmDRBD:0 notify on bl460g1n13 - * Resource action: prmDRBD:1 notify on bl460g1n14 - * Pseudo action: msDRBD_confirmed-pre_notify_demote_0 - * Pseudo action: grpDRBD_stopped_0 - * Pseudo action: msDRBD_demote_0 - * Resource action: prmDRBD:0 demote on bl460g1n13 - * Pseudo action: msDRBD_demoted_0 - * Pseudo action: msDRBD_post_notify_demoted_0 - * Resource action: prmDRBD:0 notify on bl460g1n13 - * Resource action: prmDRBD:1 notify on bl460g1n14 - * Pseudo action: msDRBD_confirmed-post_notify_demoted_0 - * Pseudo action: msDRBD_pre_notify_promote_0 - * Resource action: prmDRBD:0 notify on bl460g1n13 - * Resource action: prmDRBD:1 notify on bl460g1n14 - * Pseudo action: msDRBD_confirmed-pre_notify_promote_0 - * Pseudo action: msDRBD_promote_0 - * Resource action: prmDRBD:1 promote on bl460g1n14 - * Pseudo action: msDRBD_promoted_0 - * Pseudo action: msDRBD_post_notify_promoted_0 - * Resource action: prmDRBD:0 notify on bl460g1n13 - * Resource action: prmDRBD:1 notify on bl460g1n14 - * Pseudo action: msDRBD_confirmed-post_notify_promoted_0 - * Pseudo action: grpDRBD_start_0 - * Resource action: dummy01 start on bl460g1n14 - * Resource action: dummy02 start on bl460g1n14 - * Resource action: dummy03 start on bl460g1n14 - * Resource action: prmDRBD:0 monitor=20000 on bl460g1n13 - * Resource action: prmDRBD:1 monitor=10000 on bl460g1n14 - * Pseudo action: grpDRBD_running_0 - * Resource action: dummy01 monitor=10000 on bl460g1n14 - * Resource action: dummy02 monitor=10000 on bl460g1n14 - * Resource action: dummy03 monitor=10000 on bl460g1n14 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ bl460g1n13 bl460g1n14 ] * Full List of Resources: * Resource Group: grpDRBD: - * dummy01 (ocf:pacemaker:Dummy): Started bl460g1n14 - * dummy02 (ocf:pacemaker:Dummy): Started bl460g1n14 - * dummy03 (ocf:pacemaker:Dummy): Started bl460g1n14 + * dummy01 (ocf:pacemaker:Dummy): FAILED bl460g1n13 + * dummy02 (ocf:pacemaker:Dummy): Started bl460g1n13 + * dummy03 (ocf:pacemaker:Dummy): Stopped * Clone Set: msDRBD [prmDRBD] (promotable): - * Promoted: [ bl460g1n14 ] - * Unpromoted: [ bl460g1n13 ] + * Promoted: [ bl460g1n13 ] + * Unpromoted: [ bl460g1n14 ] diff --git a/cts/scheduler/summary/promoted-notify.summary b/cts/scheduler/summary/promoted-notify.summary index f0fb04027d8..5cbf7dea917 100644 --- a/cts/scheduler/summary/promoted-notify.summary +++ b/cts/scheduler/summary/promoted-notify.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] @@ -11,26 +14,15 @@ Transition Summary: * Promote fake:0 ( Unpromoted -> Promoted rhel7-auto1 ) Executing Cluster Transition: - * Pseudo action: fake-master_pre_notify_promote_0 - * Resource action: fake notify on rhel7-auto1 - * Resource action: fake notify on rhel7-auto3 - * Resource action: fake notify on rhel7-auto2 - * Pseudo action: fake-master_confirmed-pre_notify_promote_0 - * Pseudo action: fake-master_promote_0 - * Resource action: fake promote on rhel7-auto1 - * Pseudo action: fake-master_promoted_0 - * Pseudo action: fake-master_post_notify_promoted_0 - * Resource action: fake notify on rhel7-auto1 - * Resource action: fake notify on rhel7-auto3 - * Resource action: fake notify on rhel7-auto2 - * Pseudo action: fake-master_confirmed-post_notify_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: fake-master [fake] (promotable): - * Promoted: [ rhel7-auto1 ] - * Unpromoted: [ rhel7-auto2 rhel7-auto3 ] + * Unpromoted: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] diff --git a/cts/scheduler/summary/promoted-ordering.summary b/cts/scheduler/summary/promoted-ordering.summary index 0ef1bd89e86..3bf881a2dd4 100644 --- a/cts/scheduler/summary/promoted-ordering.summary +++ b/cts/scheduler/summary/promoted-ordering.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ webcluster01 ] * OFFLINE: [ webcluster02 ] @@ -31,66 +34,30 @@ Transition Summary: * Start drbd_mysql:0 ( webcluster01 ) Executing Cluster Transition: - * Resource action: mysql-server monitor on webcluster01 - * Resource action: extip_1 monitor on webcluster01 - * Resource action: extip_2 monitor on webcluster01 - * Resource action: intip_0_main monitor on webcluster01 - * Resource action: intip_1_active monitor on webcluster01 - * Resource action: intip_2_passive monitor on webcluster01 - * Resource action: drbd_www:0 monitor on webcluster01 - * Pseudo action: ms_drbd_www_pre_notify_start_0 - * Resource action: ocfs2_www:0 monitor on webcluster01 - * Resource action: ocfs2_www:1 monitor on webcluster01 - * Resource action: apache2:0 monitor on webcluster01 - * Resource action: mysql-proxy:0 monitor on webcluster01 - * Resource action: drbd_mysql:0 monitor on webcluster01 - * Pseudo action: ms_drbd_mysql_pre_notify_start_0 - * Resource action: fs_mysql monitor on webcluster01 - * Resource action: extip_1 start on webcluster01 - * Resource action: extip_2 start on webcluster01 - * Resource action: intip_1_active start on webcluster01 - * Resource action: intip_2_passive start on webcluster01 - * Pseudo action: ms_drbd_www_confirmed-pre_notify_start_0 - * Pseudo action: ms_drbd_www_start_0 - * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_start_0 - * Pseudo action: ms_drbd_mysql_start_0 - * Resource action: extip_1 monitor=30000 on webcluster01 - * Resource action: extip_2 monitor=30000 on webcluster01 - * Resource action: intip_1_active monitor=30000 on webcluster01 - * Resource action: intip_2_passive monitor=30000 on webcluster01 - * Resource action: drbd_www:0 start on webcluster01 - * Pseudo action: ms_drbd_www_running_0 - * Resource action: drbd_mysql:0 start on webcluster01 - * Pseudo action: ms_drbd_mysql_running_0 - * Pseudo action: ms_drbd_www_post_notify_running_0 - * Pseudo action: ms_drbd_mysql_post_notify_running_0 - * Resource action: drbd_www:0 notify on webcluster01 - * Pseudo action: ms_drbd_www_confirmed-post_notify_running_0 - * Resource action: drbd_mysql:0 notify on webcluster01 - * Pseudo action: ms_drbd_mysql_confirmed-post_notify_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ webcluster01 ] * OFFLINE: [ webcluster02 ] * Full List of Resources: * mysql-server (ocf:heartbeat:mysql): Stopped - * extip_1 (ocf:heartbeat:IPaddr2): Started webcluster01 - * extip_2 (ocf:heartbeat:IPaddr2): Started webcluster01 + * extip_1 (ocf:heartbeat:IPaddr2): Stopped + * extip_2 (ocf:heartbeat:IPaddr2): Stopped * Resource Group: group_main: * intip_0_main (ocf:heartbeat:IPaddr2): Stopped - * intip_1_active (ocf:heartbeat:IPaddr2): Started webcluster01 - * intip_2_passive (ocf:heartbeat:IPaddr2): Started webcluster01 + * intip_1_active (ocf:heartbeat:IPaddr2): Stopped + * intip_2_passive (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms_drbd_www [drbd_www] (promotable): - * Unpromoted: [ webcluster01 ] - * Stopped: [ webcluster02 ] + * Stopped: [ webcluster01 webcluster02 ] * Clone Set: clone_ocfs2_www [ocfs2_www] (unique): * ocfs2_www:0 (ocf:heartbeat:Filesystem): Stopped * ocfs2_www:1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: clone_webservice [group_webservice]: * Stopped: [ webcluster01 webcluster02 ] * Clone Set: ms_drbd_mysql [drbd_mysql] (promotable): - * Unpromoted: [ webcluster01 ] - * Stopped: [ webcluster02 ] + * Stopped: [ webcluster01 webcluster02 ] * fs_mysql (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/promoted-partially-demoted-group.summary b/cts/scheduler/summary/promoted-partially-demoted-group.summary index b85c805711d..30ced6cd28d 100644 --- a/cts/scheduler/summary/promoted-partially-demoted-group.summary +++ b/cts/scheduler/summary/promoted-partially-demoted-group.summary @@ -1,13 +1,22 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ sd01-0 sd01-1 ] * Full List of Resources: - * stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 - * stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 + * Resource Group: cdev-pool-0-iscsi-vips: + * vip-164 (ocf:heartbeat:IPaddr2): Started sd01-1 + * vip-165 (ocf:heartbeat:IPaddr2): Started sd01-1 * Resource Group: cdev-pool-0-iscsi-export: * cdev-pool-0-iscsi-target (ocf:vds-ok:iSCSITarget): Started sd01-1 * cdev-pool-0-iscsi-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started sd01-1 + * Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable): + * Promoted: [ sd01-1 ] + * Unpromoted: [ sd01-0 ] + * stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 + * stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 * Clone Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] (promotable): * Promoted: [ sd01-1 ] * Unpromoted: [ sd01-0 ] @@ -15,14 +24,10 @@ Current cluster status: * Started: [ sd01-0 sd01-1 ] * Clone Set: cl-vlan1-net [vlan1-net]: * Started: [ sd01-0 sd01-1 ] - * Resource Group: cdev-pool-0-iscsi-vips: - * vip-164 (ocf:heartbeat:IPaddr2): Started sd01-1 - * vip-165 (ocf:heartbeat:IPaddr2): Started sd01-1 - * Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable): - * Promoted: [ sd01-1 ] - * Unpromoted: [ sd01-0 ] Transition Summary: + * Demote cdev-pool-0-drbd:0 ( Promoted -> Unpromoted sd01-1 ) + * Promote cdev-pool-0-drbd:1 ( Unpromoted -> Promoted sd01-0 ) * Move vip-164 ( sd01-1 -> sd01-0 ) * Move vip-165 ( sd01-1 -> sd01-0 ) * Move cdev-pool-0-iscsi-target ( sd01-1 -> sd01-0 ) @@ -30,89 +35,32 @@ Transition Summary: * Demote vip-164-fw:0 ( Promoted -> Unpromoted sd01-1 ) * Promote vip-164-fw:1 ( Unpromoted -> Promoted sd01-0 ) * Promote vip-165-fw:1 ( Unpromoted -> Promoted sd01-0 ) - * Demote cdev-pool-0-drbd:0 ( Promoted -> Unpromoted sd01-1 ) - * Promote cdev-pool-0-drbd:1 ( Unpromoted -> Promoted sd01-0 ) Executing Cluster Transition: - * Resource action: vip-165-fw monitor=10000 on sd01-1 - * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demote_0 - * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_demote_0 - * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demote_0 - * Resource action: vip-164-fw demote on sd01-1 - * Resource action: cdev-pool-0-drbd notify on sd01-1 - * Resource action: cdev-pool-0-drbd notify on sd01-0 - * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_demote_0 - * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demoted_0 - * Resource action: vip-164-fw monitor=10000 on sd01-1 - * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demoted_0 - * Pseudo action: cdev-pool-0-iscsi-vips_stop_0 - * Resource action: vip-165 stop on sd01-1 - * Resource action: vip-164 stop on sd01-1 - * Pseudo action: cdev-pool-0-iscsi-vips_stopped_0 - * Pseudo action: cdev-pool-0-iscsi-export_stop_0 - * Resource action: cdev-pool-0-iscsi-lun-1 stop on sd01-1 - * Resource action: cdev-pool-0-iscsi-target stop on sd01-1 - * Pseudo action: cdev-pool-0-iscsi-export_stopped_0 - * Pseudo action: ms-cdev-pool-0-drbd_demote_0 - * Resource action: cdev-pool-0-drbd demote on sd01-1 - * Pseudo action: ms-cdev-pool-0-drbd_demoted_0 - * Pseudo action: ms-cdev-pool-0-drbd_post_notify_demoted_0 - * Resource action: cdev-pool-0-drbd notify on sd01-1 - * Resource action: cdev-pool-0-drbd notify on sd01-0 - * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_demoted_0 - * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_promote_0 - * Resource action: cdev-pool-0-drbd notify on sd01-1 - * Resource action: cdev-pool-0-drbd notify on sd01-0 - * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_promote_0 - * Pseudo action: ms-cdev-pool-0-drbd_promote_0 - * Resource action: cdev-pool-0-drbd promote on sd01-0 - * Pseudo action: ms-cdev-pool-0-drbd_promoted_0 - * Pseudo action: ms-cdev-pool-0-drbd_post_notify_promoted_0 - * Resource action: cdev-pool-0-drbd notify on sd01-1 - * Resource action: cdev-pool-0-drbd notify on sd01-0 - * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_promoted_0 - * Pseudo action: cdev-pool-0-iscsi-export_start_0 - * Resource action: cdev-pool-0-iscsi-target start on sd01-0 - * Resource action: cdev-pool-0-iscsi-lun-1 start on sd01-0 - * Resource action: cdev-pool-0-drbd monitor=20000 on sd01-1 - * Resource action: cdev-pool-0-drbd monitor=10000 on sd01-0 - * Pseudo action: cdev-pool-0-iscsi-export_running_0 - * Resource action: cdev-pool-0-iscsi-target monitor=10000 on sd01-0 - * Resource action: cdev-pool-0-iscsi-lun-1 monitor=10000 on sd01-0 - * Pseudo action: cdev-pool-0-iscsi-vips_start_0 - * Resource action: vip-164 start on sd01-0 - * Resource action: vip-165 start on sd01-0 - * Pseudo action: cdev-pool-0-iscsi-vips_running_0 - * Resource action: vip-164 monitor=30000 on sd01-0 - * Resource action: vip-165 monitor=30000 on sd01-0 - * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promote_0 - * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_promote_0 - * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promote_0 - * Resource action: vip-164-fw promote on sd01-0 - * Resource action: vip-165-fw promote on sd01-0 - * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promoted_0 - * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ sd01-0 sd01-1 ] * Full List of Resources: - * stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 - * stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 + * Resource Group: cdev-pool-0-iscsi-vips: + * vip-164 (ocf:heartbeat:IPaddr2): Started sd01-1 + * vip-165 (ocf:heartbeat:IPaddr2): Started sd01-1 * Resource Group: cdev-pool-0-iscsi-export: - * cdev-pool-0-iscsi-target (ocf:vds-ok:iSCSITarget): Started sd01-0 - * cdev-pool-0-iscsi-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started sd01-0 + * cdev-pool-0-iscsi-target (ocf:vds-ok:iSCSITarget): Started sd01-1 + * cdev-pool-0-iscsi-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started sd01-1 + * Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable): + * Promoted: [ sd01-1 ] + * Unpromoted: [ sd01-0 ] + * stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 + * stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 * Clone Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] (promotable): - * Promoted: [ sd01-0 ] - * Unpromoted: [ sd01-1 ] + * Promoted: [ sd01-1 ] + * Unpromoted: [ sd01-0 ] * Clone Set: cl-ietd [ietd]: * Started: [ sd01-0 sd01-1 ] * Clone Set: cl-vlan1-net [vlan1-net]: * Started: [ sd01-0 sd01-1 ] - * Resource Group: cdev-pool-0-iscsi-vips: - * vip-164 (ocf:heartbeat:IPaddr2): Started sd01-0 - * vip-165 (ocf:heartbeat:IPaddr2): Started sd01-0 - * Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable): - * Promoted: [ sd01-0 ] - * Unpromoted: [ sd01-1 ] diff --git a/cts/scheduler/summary/promoted-probed-score.summary b/cts/scheduler/summary/promoted-probed-score.summary index 52487d48a19..9e314d8a5ff 100644 --- a/cts/scheduler/summary/promoted-probed-score.summary +++ b/cts/scheduler/summary/promoted-probed-score.summary @@ -1,6 +1,8 @@ 1 of 60 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] @@ -99,231 +101,43 @@ Transition Summary: * Start Proxy ( orestes-corosync.nevis.columbia.edu ) Executing Cluster Transition: - * Pseudo action: AdminClone_pre_notify_start_0 - * Resource action: StonithHypatia start on orestes-corosync.nevis.columbia.edu - * Resource action: StonithOrestes start on hypatia-corosync.nevis.columbia.edu - * Resource action: SymlinkEtcLibvirt:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: Libvirtd:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: Libvirtd:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: SymlinkTftp:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: Xinetd:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: SymlinkTftp:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: Xinetd:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMail:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailInbox:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailFolders:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailForward:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailProcmailrc:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevisOffsite:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportWWW:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMail:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailInbox:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailFolders:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailForward:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailProcmailrc:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevisOffsite:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: ExportWWW:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: AdminLvm:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSMail:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSWork:0 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: AdminLvm:1 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:1 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:1 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:1 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSMail:1 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSWork:1 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: KVM-guest monitor on orestes-corosync.nevis.columbia.edu - * Resource action: KVM-guest monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: Proxy monitor on orestes-corosync.nevis.columbia.edu - * Resource action: Proxy monitor on hypatia-corosync.nevis.columbia.edu - * Pseudo action: AdminClone_confirmed-pre_notify_start_0 - * Pseudo action: AdminClone_start_0 - * Resource action: AdminDrbd:0 start on orestes-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: AdminClone_running_0 - * Pseudo action: AdminClone_post_notify_running_0 - * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu - * Pseudo action: AdminClone_confirmed-post_notify_running_0 - * Pseudo action: AdminClone_pre_notify_promote_0 - * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu - * Pseudo action: AdminClone_confirmed-pre_notify_promote_0 - * Pseudo action: AdminClone_promote_0 - * Resource action: AdminDrbd:0 promote on orestes-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 promote on hypatia-corosync.nevis.columbia.edu - * Pseudo action: AdminClone_promoted_0 - * Pseudo action: AdminClone_post_notify_promoted_0 - * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu - * Pseudo action: AdminClone_confirmed-post_notify_promoted_0 - * Pseudo action: FilesystemClone_start_0 - * Resource action: AdminDrbd:0 monitor=59000 on orestes-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 monitor=59000 on hypatia-corosync.nevis.columbia.edu - * Pseudo action: FilesystemGroup:0_start_0 - * Resource action: AdminLvm:0 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:0 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:0 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:0 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSMail:0 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSWork:0 start on orestes-corosync.nevis.columbia.edu - * Pseudo action: FilesystemGroup:1_start_0 - * Resource action: AdminLvm:1 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:1 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:1 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:1 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSMail:1 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSWork:1 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: FilesystemGroup:0_running_0 - * Resource action: AdminLvm:0 monitor=30000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:0 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:0 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:0 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSMail:0 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSWork:0 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Pseudo action: FilesystemGroup:1_running_0 - * Resource action: AdminLvm:1 monitor=30000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSMail:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSWork:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Pseudo action: FilesystemClone_running_0 - * Resource action: CronAmbientTemperature start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: DhcpGroup_start_0 - * Resource action: SymlinkDhcpdConf start on orestes-corosync.nevis.columbia.edu - * Resource action: SymlinkSysconfigDhcpd start on orestes-corosync.nevis.columbia.edu - * Resource action: SymlinkDhcpdLeases start on orestes-corosync.nevis.columbia.edu - * Pseudo action: CupsClone_start_0 - * Pseudo action: IPClone_start_0 - * Pseudo action: LibvirtdClone_start_0 - * Pseudo action: TftpClone_start_0 - * Pseudo action: ExportsClone_start_0 - * Resource action: CronAmbientTemperature monitor=60000 on hypatia-corosync.nevis.columbia.edu - * Resource action: SymlinkDhcpdConf monitor=60000 on orestes-corosync.nevis.columbia.edu - * Resource action: SymlinkSysconfigDhcpd monitor=60000 on orestes-corosync.nevis.columbia.edu - * Resource action: SymlinkDhcpdLeases monitor=60000 on orestes-corosync.nevis.columbia.edu - * Pseudo action: CupsGroup:0_start_0 - * Resource action: SymlinkUsrShareCups:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: SymlinkCupsdConf:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: Cups:0 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: CupsGroup:1_start_0 - * Resource action: SymlinkUsrShareCups:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: SymlinkCupsdConf:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: Cups:1 start on orestes-corosync.nevis.columbia.edu - * Pseudo action: IPGroup:0_start_0 - * Resource action: ClusterIP:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ClusterIPLocal:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ClusterIPSandbox:0 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: IPGroup:1_start_0 - * Resource action: ClusterIP:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ClusterIPLocal:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ClusterIPSandbox:1 start on orestes-corosync.nevis.columbia.edu - * Pseudo action: LibvirtdGroup:0_start_0 - * Resource action: SymlinkEtcLibvirt:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: Libvirtd:0 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: LibvirtdGroup:1_start_0 - * Resource action: SymlinkEtcLibvirt:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: Libvirtd:1 start on orestes-corosync.nevis.columbia.edu - * Pseudo action: TftpGroup:0_start_0 - * Resource action: SymlinkTftp:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: Xinetd:0 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: TftpGroup:1_start_0 - * Resource action: SymlinkTftp:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: Xinetd:1 start on orestes-corosync.nevis.columbia.edu - * Pseudo action: ExportsGroup:0_start_0 - * Resource action: ExportMail:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailInbox:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailFolders:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailForward:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportMailProcmailrc:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevisOffsite:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: ExportWWW:0 start on hypatia-corosync.nevis.columbia.edu - * Pseudo action: ExportsGroup:1_start_0 - * Resource action: ExportMail:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailInbox:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailFolders:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailForward:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportMailProcmailrc:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevis:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportUsrNevisOffsite:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: ExportWWW:1 start on orestes-corosync.nevis.columbia.edu - * Pseudo action: CupsGroup:0_running_0 - * Resource action: SymlinkUsrShareCups:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu - * Resource action: SymlinkCupsdConf:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu - * Resource action: Cups:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu - * Pseudo action: CupsGroup:1_running_0 - * Resource action: SymlinkUsrShareCups:1 monitor=60000 on orestes-corosync.nevis.columbia.edu - * Resource action: SymlinkCupsdConf:1 monitor=60000 on orestes-corosync.nevis.columbia.edu - * Resource action: Cups:1 monitor=30000 on orestes-corosync.nevis.columbia.edu - * Pseudo action: CupsClone_running_0 - * Pseudo action: IPGroup:0_running_0 - * Resource action: ClusterIP:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu - * Resource action: ClusterIPLocal:0 monitor=31000 on hypatia-corosync.nevis.columbia.edu - * Resource action: ClusterIPSandbox:0 monitor=32000 on hypatia-corosync.nevis.columbia.edu - * Pseudo action: IPGroup:1_running_0 - * Resource action: ClusterIP:1 monitor=30000 on orestes-corosync.nevis.columbia.edu - * Resource action: ClusterIPLocal:1 monitor=31000 on orestes-corosync.nevis.columbia.edu - * Resource action: ClusterIPSandbox:1 monitor=32000 on orestes-corosync.nevis.columbia.edu - * Pseudo action: IPClone_running_0 - * Pseudo action: LibvirtdGroup:0_running_0 - * Resource action: SymlinkEtcLibvirt:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu - * Resource action: Libvirtd:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu - * Pseudo action: LibvirtdGroup:1_running_0 - * Resource action: SymlinkEtcLibvirt:1 monitor=60000 on orestes-corosync.nevis.columbia.edu - * Resource action: Libvirtd:1 monitor=30000 on orestes-corosync.nevis.columbia.edu - * Pseudo action: LibvirtdClone_running_0 - * Pseudo action: TftpGroup:0_running_0 - * Resource action: SymlinkTftp:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu - * Pseudo action: TftpGroup:1_running_0 - * Resource action: SymlinkTftp:1 monitor=60000 on orestes-corosync.nevis.columbia.edu - * Pseudo action: TftpClone_running_0 - * Pseudo action: ExportsGroup:0_running_0 - * Pseudo action: ExportsGroup:1_running_0 - * Pseudo action: ExportsClone_running_0 - * Resource action: KVM-guest start on hypatia-corosync.nevis.columbia.edu - * Resource action: Proxy start on orestes-corosync.nevis.columbia.edu Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Full List of Resources: * Clone Set: AdminClone [AdminDrbd] (promotable): - * Promoted: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] - * CronAmbientTemperature (ocf:heartbeat:symlink): Started hypatia-corosync.nevis.columbia.edu - * StonithHypatia (stonith:fence_nut): Started orestes-corosync.nevis.columbia.edu - * StonithOrestes (stonith:fence_nut): Started hypatia-corosync.nevis.columbia.edu + * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * CronAmbientTemperature (ocf:heartbeat:symlink): Stopped + * StonithHypatia (stonith:fence_nut): Stopped + * StonithOrestes (stonith:fence_nut): Stopped * Resource Group: DhcpGroup: - * SymlinkDhcpdConf (ocf:heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu - * SymlinkSysconfigDhcpd (ocf:heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu - * SymlinkDhcpdLeases (ocf:heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu + * SymlinkDhcpdConf (ocf:heartbeat:symlink): Stopped + * SymlinkSysconfigDhcpd (ocf:heartbeat:symlink): Stopped + * SymlinkDhcpdLeases (ocf:heartbeat:symlink): Stopped * Dhcpd (lsb:dhcpd): Stopped (disabled) * DhcpIP (ocf:heartbeat:IPaddr2): Stopped * Clone Set: CupsClone [CupsGroup]: - * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: IPClone [IPGroup] (unique): * Resource Group: IPGroup:0: - * ClusterIP:0 (ocf:heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu - * ClusterIPLocal:0 (ocf:heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu - * ClusterIPSandbox:0 (ocf:heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu + * ClusterIP:0 (ocf:heartbeat:IPaddr2): Stopped + * ClusterIPLocal:0 (ocf:heartbeat:IPaddr2): Stopped + * ClusterIPSandbox:0 (ocf:heartbeat:IPaddr2): Stopped * Resource Group: IPGroup:1: - * ClusterIP:1 (ocf:heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu - * ClusterIPLocal:1 (ocf:heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu - * ClusterIPSandbox:1 (ocf:heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu + * ClusterIP:1 (ocf:heartbeat:IPaddr2): Stopped + * ClusterIPLocal:1 (ocf:heartbeat:IPaddr2): Stopped + * ClusterIPSandbox:1 (ocf:heartbeat:IPaddr2): Stopped * Clone Set: LibvirtdClone [LibvirtdGroup]: - * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: TftpClone [TftpGroup]: - * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: ExportsClone [ExportsGroup]: - * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: FilesystemClone [FilesystemGroup]: - * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] - * KVM-guest (ocf:heartbeat:VirtualDomain): Started hypatia-corosync.nevis.columbia.edu - * Proxy (ocf:heartbeat:VirtualDomain): Started orestes-corosync.nevis.columbia.edu + * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * KVM-guest (ocf:heartbeat:VirtualDomain): Stopped + * Proxy (ocf:heartbeat:VirtualDomain): Stopped diff --git a/cts/scheduler/summary/promoted-promotion-constraint.summary b/cts/scheduler/summary/promoted-promotion-constraint.summary index 22bc250311e..0a6b2f9b39c 100644 --- a/cts/scheduler/summary/promoted-promotion-constraint.summary +++ b/cts/scheduler/summary/promoted-promotion-constraint.summary @@ -1,6 +1,8 @@ 2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -17,13 +19,10 @@ Transition Summary: * Demote s0:0 ( Promoted -> Unpromoted hex-14 ) Executing Cluster Transition: - * Resource action: s0:1 cancel=20000 on hex-14 - * Pseudo action: ms0_demote_0 - * Resource action: s0:1 demote on hex-14 - * Pseudo action: ms0_demoted_0 - * Resource action: s0:1 monitor=21000 on hex-14 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -33,4 +32,5 @@ Revised Cluster Status: * d0 (ocf:pacemaker:Dummy): Stopped (disabled) * d1 (ocf:pacemaker:Dummy): Stopped (disabled) * Clone Set: ms0 [s0] (promotable): - * Unpromoted: [ hex-13 hex-14 ] + * Promoted: [ hex-14 ] + * Unpromoted: [ hex-13 ] diff --git a/cts/scheduler/summary/promoted-pseudo.summary b/cts/scheduler/summary/promoted-pseudo.summary index 92302e773d8..1fdb1ede63a 100644 --- a/cts/scheduler/summary/promoted-pseudo.summary +++ b/cts/scheduler/summary/promoted-pseudo.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node raki.linbit: standby * Online: [ sambuca.linbit ] @@ -17,44 +19,18 @@ Transition Summary: * Start ip_nfs ( sambuca.linbit ) Executing Cluster Transition: - * Resource action: ip_float_right start on sambuca.linbit - * Pseudo action: ms_drbd_float_pre_notify_stop_0 - * Resource action: drbd_float:0 notify on sambuca.linbit - * Pseudo action: ms_drbd_float_confirmed-pre_notify_stop_0 - * Pseudo action: ms_drbd_float_stop_0 - * Resource action: drbd_float:0 stop on sambuca.linbit - * Pseudo action: ms_drbd_float_stopped_0 - * Pseudo action: ms_drbd_float_post_notify_stopped_0 - * Pseudo action: ms_drbd_float_confirmed-post_notify_stopped_0 - * Pseudo action: ms_drbd_float_pre_notify_start_0 - * Pseudo action: ms_drbd_float_confirmed-pre_notify_start_0 - * Pseudo action: ms_drbd_float_start_0 - * Resource action: drbd_float:0 start on sambuca.linbit - * Pseudo action: ms_drbd_float_running_0 - * Pseudo action: ms_drbd_float_post_notify_running_0 - * Resource action: drbd_float:0 notify on sambuca.linbit - * Pseudo action: ms_drbd_float_confirmed-post_notify_running_0 - * Pseudo action: ms_drbd_float_pre_notify_promote_0 - * Resource action: drbd_float:0 notify on sambuca.linbit - * Pseudo action: ms_drbd_float_confirmed-pre_notify_promote_0 - * Pseudo action: ms_drbd_float_promote_0 - * Resource action: drbd_float:0 promote on sambuca.linbit - * Pseudo action: ms_drbd_float_promoted_0 - * Pseudo action: ms_drbd_float_post_notify_promoted_0 - * Resource action: drbd_float:0 notify on sambuca.linbit - * Pseudo action: ms_drbd_float_confirmed-post_notify_promoted_0 - * Pseudo action: nfsexport_start_0 - * Resource action: ip_nfs start on sambuca.linbit Revised Cluster Status: + * Cluster Summary: + * Node List: * Node raki.linbit: standby * Online: [ sambuca.linbit ] * Full List of Resources: - * ip_float_right (ocf:heartbeat:IPaddr2): Started sambuca.linbit + * ip_float_right (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms_drbd_float [drbd_float] (promotable): - * Promoted: [ sambuca.linbit ] + * Unpromoted: [ sambuca.linbit ] * Resource Group: nfsexport: - * ip_nfs (ocf:heartbeat:IPaddr2): Started sambuca.linbit + * ip_nfs (ocf:heartbeat:IPaddr2): Stopped * fs_float (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/promoted-reattach.summary b/cts/scheduler/summary/promoted-reattach.summary index 8f07251f2e1..1fc1b2fff45 100644 --- a/cts/scheduler/summary/promoted-reattach.summary +++ b/cts/scheduler/summary/promoted-reattach.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ dktest1 dktest2 ] @@ -14,13 +16,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: drbd1:0 monitor=10000 on dktest1 - * Resource action: drbd1:0 monitor=11000 on dktest2 - * Resource action: apache-vip monitor=60000 on dktest1 - * Resource action: mount monitor=10000 on dktest1 - * Resource action: webserver monitor=30000 on dktest1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ dktest1 dktest2 ] diff --git a/cts/scheduler/summary/promoted-role.summary b/cts/scheduler/summary/promoted-role.summary index 588f5230bbc..3ecbc026b62 100644 --- a/cts/scheduler/summary/promoted-role.summary +++ b/cts/scheduler/summary/promoted-role.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sles11-a sles11-b ] @@ -10,15 +12,13 @@ Transition Summary: * Demote res_Stateful_1:1 ( Promoted -> Unpromoted sles11-a ) Executing Cluster Transition: - * Pseudo action: ms_res_Stateful_1_demote_0 - * Resource action: res_Stateful_1:0 demote on sles11-a - * Pseudo action: ms_res_Stateful_1_demoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ sles11-a sles11-b ] * Full List of Resources: * Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable): - * Promoted: [ sles11-b ] - * Unpromoted: [ sles11-a ] + * Promoted: [ sles11-a sles11-b ] diff --git a/cts/scheduler/summary/promoted-score-startup.summary b/cts/scheduler/summary/promoted-score-startup.summary index f9d36405d12..42779a0ca38 100644 --- a/cts/scheduler/summary/promoted-score-startup.summary +++ b/cts/scheduler/summary/promoted-score-startup.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ srv1 srv2 ] @@ -13,42 +15,14 @@ Transition Summary: * Start pgsql-master-ip ( srv1 ) Executing Cluster Transition: - * Resource action: pgsqld:0 monitor on srv1 - * Resource action: pgsqld:1 monitor on srv2 - * Pseudo action: pgsql-ha_pre_notify_start_0 - * Resource action: pgsql-master-ip monitor on srv2 - * Resource action: pgsql-master-ip monitor on srv1 - * Pseudo action: pgsql-ha_confirmed-pre_notify_start_0 - * Pseudo action: pgsql-ha_start_0 - * Resource action: pgsqld:0 start on srv1 - * Resource action: pgsqld:1 start on srv2 - * Pseudo action: pgsql-ha_running_0 - * Pseudo action: pgsql-ha_post_notify_running_0 - * Resource action: pgsqld:0 notify on srv1 - * Resource action: pgsqld:1 notify on srv2 - * Pseudo action: pgsql-ha_confirmed-post_notify_running_0 - * Pseudo action: pgsql-ha_pre_notify_promote_0 - * Resource action: pgsqld:0 notify on srv1 - * Resource action: pgsqld:1 notify on srv2 - * Pseudo action: pgsql-ha_confirmed-pre_notify_promote_0 - * Pseudo action: pgsql-ha_promote_0 - * Resource action: pgsqld:0 promote on srv1 - * Pseudo action: pgsql-ha_promoted_0 - * Pseudo action: pgsql-ha_post_notify_promoted_0 - * Resource action: pgsqld:0 notify on srv1 - * Resource action: pgsqld:1 notify on srv2 - * Pseudo action: pgsql-ha_confirmed-post_notify_promoted_0 - * Resource action: pgsql-master-ip start on srv1 - * Resource action: pgsqld:0 monitor=15000 on srv1 - * Resource action: pgsqld:1 monitor=16000 on srv2 - * Resource action: pgsql-master-ip monitor=10000 on srv1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ srv1 srv2 ] * Full List of Resources: * Clone Set: pgsql-ha [pgsqld] (promotable): - * Promoted: [ srv1 ] - * Unpromoted: [ srv2 ] - * pgsql-master-ip (ocf:heartbeat:IPaddr2): Started srv1 + * Stopped: [ srv1 srv2 ] + * pgsql-master-ip (ocf:heartbeat:IPaddr2): Stopped diff --git a/cts/scheduler/summary/promoted-stop.summary b/cts/scheduler/summary/promoted-stop.summary index efc74928306..7fa746ac78d 100644 --- a/cts/scheduler/summary/promoted-stop.summary +++ b/cts/scheduler/summary/promoted-stop.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -8,17 +10,17 @@ Current cluster status: Transition Summary: * Stop dummy:2 ( Unpromoted node3 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: m_stop_0 - * Resource action: dummy:2 stop on node3 - * Pseudo action: m_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Clone Set: m [dummy] (promotable): - * Unpromoted: [ node1 node2 ] - * Stopped: [ node3 ] + * Unpromoted: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/promoted-unmanaged-monitor.summary b/cts/scheduler/summary/promoted-unmanaged-monitor.summary index 3c5b39aa17c..6bfaa09bab6 100644 --- a/cts/scheduler/summary/promoted-unmanaged-monitor.summary +++ b/cts/scheduler/summary/promoted-unmanaged-monitor.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] @@ -29,18 +31,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: lsb-dummy monitor=5000 on pcmk-3 - * Resource action: migrator monitor=10000 on pcmk-4 - * Resource action: ping-1:0 monitor=60000 on pcmk-2 - * Resource action: ping-1:0 monitor=60000 on pcmk-3 - * Resource action: ping-1:0 monitor=60000 on pcmk-4 - * Resource action: ping-1:0 monitor=60000 on pcmk-1 - * Resource action: stateful-1:0 monitor=15000 on pcmk-2 - * Resource action: stateful-1:0 monitor on pcmk-1 - * Resource action: stateful-1:0 monitor=16000 on pcmk-3 - * Resource action: stateful-1:0 monitor=15000 on pcmk-4 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] diff --git a/cts/scheduler/summary/promoted-with-blocked.summary b/cts/scheduler/summary/promoted-with-blocked.summary index 82177a9a6a2..0a50234015e 100644 --- a/cts/scheduler/summary/promoted-with-blocked.summary +++ b/cts/scheduler/summary/promoted-with-blocked.summary @@ -1,6 +1,8 @@ 1 of 8 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -20,34 +22,10 @@ Transition Summary: * Promote rsc2:4 ( Stopped -> Promoted node2 ) due to colocation with rsc1 (blocked) Executing Cluster Transition: - * Resource action: rsc1 monitor on node5 - * Resource action: rsc1 monitor on node4 - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2:0 monitor on node3 - * Resource action: rsc2:1 monitor on node4 - * Resource action: rsc2:2 monitor on node5 - * Resource action: rsc2:3 monitor on node1 - * Resource action: rsc2:4 monitor on node2 - * Pseudo action: rsc2-clone_start_0 - * Resource action: rsc3 monitor on node5 - * Resource action: rsc3 monitor on node4 - * Resource action: rsc3 monitor on node3 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2:0 start on node3 - * Resource action: rsc2:1 start on node4 - * Resource action: rsc2:2 start on node5 - * Resource action: rsc2:3 start on node1 - * Resource action: rsc2:4 start on node2 - * Pseudo action: rsc2-clone_running_0 - * Resource action: rsc2:0 monitor=10000 on node3 - * Resource action: rsc2:1 monitor=10000 on node4 - * Resource action: rsc2:2 monitor=10000 on node5 - * Resource action: rsc2:3 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -55,5 +33,5 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Clone Set: rsc2-clone [rsc2] (promotable): - * Unpromoted: [ node1 node2 node3 node4 node5 ] + * Stopped: [ node1 node2 node3 node4 node5 ] * rsc3 (ocf:pacemaker:Dummy): Stopped (disabled) diff --git a/cts/scheduler/summary/promoted_monitor_restart.summary b/cts/scheduler/summary/promoted_monitor_restart.summary index be181bd6ac7..6fe361f847b 100644 --- a/cts/scheduler/summary/promoted_monitor_restart.summary +++ b/cts/scheduler/summary/promoted_monitor_restart.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,9 +13,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: MS_RSC_NATIVE:0 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/quorum-1.summary b/cts/scheduler/summary/quorum-1.summary index d0a05bd6a7a..bb60d4515a0 100644 --- a/cts/scheduler/summary/quorum-1.summary +++ b/cts/scheduler/summary/quorum-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,19 +14,14 @@ Transition Summary: * Start rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 stop on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/quorum-2.summary b/cts/scheduler/summary/quorum-2.summary index 136a84e2439..0e75ceeb462 100644 --- a/cts/scheduler/summary/quorum-2.summary +++ b/cts/scheduler/summary/quorum-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,18 +14,14 @@ Transition Summary: * Start rsc3 ( node1 ) due to quorum freeze (blocked) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 stop on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/quorum-3.summary b/cts/scheduler/summary/quorum-3.summary index e51f9c434c5..98bd587137c 100644 --- a/cts/scheduler/summary/quorum-3.summary +++ b/cts/scheduler/summary/quorum-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,18 +15,14 @@ Transition Summary: * Start rsc3 ( node1 ) due to no quorum (blocked) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 stop on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Stopped - * rsc2 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/quorum-4.summary b/cts/scheduler/summary/quorum-4.summary index 3d0c88e81f6..ab870598c1f 100644 --- a/cts/scheduler/summary/quorum-4.summary +++ b/cts/scheduler/summary/quorum-4.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node hadev1: UNCLEAN (offline) * Node hadev3: UNCLEAN (offline) @@ -11,15 +14,15 @@ Transition Summary: * Start child_DoFencing ( hadev2 ) Executing Cluster Transition: - * Resource action: child_DoFencing monitor on hadev2 - * Resource action: child_DoFencing start on hadev2 - * Resource action: child_DoFencing monitor=5000 on hadev2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node hadev1: UNCLEAN (offline) * Node hadev3: UNCLEAN (offline) * Online: [ hadev2 ] * Full List of Resources: - * child_DoFencing (stonith:ssh): Started hadev2 + * child_DoFencing (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/quorum-5.summary b/cts/scheduler/summary/quorum-5.summary index 1e7abf38ee9..9e2cccf0312 100644 --- a/cts/scheduler/summary/quorum-5.summary +++ b/cts/scheduler/summary/quorum-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node hadev1: UNCLEAN (offline) * Node hadev3: UNCLEAN (offline) @@ -14,16 +17,11 @@ Transition Summary: * Start child_DoFencing_2 ( hadev2 ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: child_DoFencing_1 monitor on hadev2 - * Resource action: child_DoFencing_2 monitor on hadev2 - * Resource action: child_DoFencing_1 start on hadev2 - * Resource action: child_DoFencing_2 start on hadev2 - * Pseudo action: group1_running_0 - * Resource action: child_DoFencing_1 monitor=5000 on hadev2 - * Resource action: child_DoFencing_2 monitor=5000 on hadev2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node hadev1: UNCLEAN (offline) * Node hadev3: UNCLEAN (offline) @@ -31,5 +29,5 @@ Revised Cluster Status: * Full List of Resources: * Resource Group: group1: - * child_DoFencing_1 (stonith:ssh): Started hadev2 - * child_DoFencing_2 (stonith:ssh): Started hadev2 + * child_DoFencing_1 (stonith:ssh): Stopped + * child_DoFencing_2 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/quorum-6.summary b/cts/scheduler/summary/quorum-6.summary index 321410d5b5e..2812d76d848 100644 --- a/cts/scheduler/summary/quorum-6.summary +++ b/cts/scheduler/summary/quorum-6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node hadev1: UNCLEAN (offline) * Node hadev3: UNCLEAN (offline) @@ -19,20 +22,11 @@ Transition Summary: * Start child_DoFencing:0 ( hadev2 ) Executing Cluster Transition: - * Resource action: child_DoFencing:0 monitor on hadev2 - * Resource action: child_DoFencing:1 monitor on hadev2 - * Resource action: child_DoFencing:2 monitor on hadev2 - * Resource action: child_DoFencing:3 monitor on hadev2 - * Resource action: child_DoFencing:4 monitor on hadev2 - * Resource action: child_DoFencing:5 monitor on hadev2 - * Resource action: child_DoFencing:6 monitor on hadev2 - * Resource action: child_DoFencing:7 monitor on hadev2 - * Pseudo action: DoFencing_start_0 - * Resource action: child_DoFencing:0 start on hadev2 - * Pseudo action: DoFencing_running_0 - * Resource action: child_DoFencing:0 monitor=5000 on hadev2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node hadev1: UNCLEAN (offline) * Node hadev3: UNCLEAN (offline) @@ -40,7 +34,7 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Started hadev2 + * child_DoFencing:0 (stonith:ssh): Stopped * child_DoFencing:1 (stonith:ssh): Stopped * child_DoFencing:2 (stonith:ssh): Stopped * child_DoFencing:3 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/rebalance-unique-clones.summary b/cts/scheduler/summary/rebalance-unique-clones.summary index 2dea83b42ed..30397d2301c 100644 --- a/cts/scheduler/summary/rebalance-unique-clones.summary +++ b/cts/scheduler/summary/rebalance-unique-clones.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,14 @@ Transition Summary: * Move P:1 ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: C_stop_0 - * Resource action: P:1 stop on node1 - * Pseudo action: C_stopped_0 - * Pseudo action: C_start_0 - * Resource action: P:1 start on node2 - * Pseudo action: C_running_0 - * Resource action: P:1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: C [P] (unique): * P:0 (ocf:heartbeat:IPaddr2): Started node1 - * P:1 (ocf:heartbeat:IPaddr2): Started node2 + * P:1 (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/rec-node-1.summary b/cts/scheduler/summary/rec-node-1.summary index 35d9dd36cd1..918374ebda0 100644 --- a/cts/scheduler/summary/rec-node-1.summary +++ b/cts/scheduler/summary/rec-node-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] @@ -12,16 +14,14 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rec-node-10.summary b/cts/scheduler/summary/rec-node-10.summary index a77b2a14eed..f823006352f 100644 --- a/cts/scheduler/summary/rec-node-10.summary +++ b/cts/scheduler/summary/rec-node-10.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -14,11 +17,11 @@ Transition Summary: * Stop rsc2 ( node1 ) due to no quorum (blocked) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] diff --git a/cts/scheduler/summary/rec-node-11.summary b/cts/scheduler/summary/rec-node-11.summary index 453dc006e70..db7bac2bbb1 100644 --- a/cts/scheduler/summary/rec-node-11.summary +++ b/cts/scheduler/summary/rec-node-11.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (online) * Online: [ node2 ] @@ -18,30 +21,18 @@ Transition Summary: * Restart rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Fencing node1 (reboot) - * Resource action: stonith-1 start on node2 - * Pseudo action: group1_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: group1_stopped_0 - * Resource action: rsc3 stop on node2 - * Resource action: rsc3 start on node2 - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: group1_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node node1: UNCLEAN (online) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started node2 + * stonith-1 (stonith:dummy): Stopped * Resource Group: group1: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Started node1 * rsc3 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/rec-node-12.summary b/cts/scheduler/summary/rec-node-12.summary index 8edeec258a9..d41c0fb43ee 100644 --- a/cts/scheduler/summary/rec-node-12.summary +++ b/cts/scheduler/summary/rec-node-12.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c001n02: UNCLEAN (offline) * Online: [ c001n01 c001n03 c001n08 ] @@ -27,66 +29,22 @@ Transition Summary: * Start child_DoFencing:2 ( c001n08 ) Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n08 monitor on c001n08 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n03 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n01 - * Resource action: child_DoFencing:0 monitor on c001n08 - * Resource action: child_DoFencing:0 monitor on c001n03 - * Resource action: child_DoFencing:0 monitor on c001n01 - * Resource action: child_DoFencing:1 monitor on c001n08 - * Resource action: child_DoFencing:1 monitor on c001n03 - * Resource action: child_DoFencing:1 monitor on c001n01 - * Resource action: child_DoFencing:2 monitor on c001n08 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:2 monitor on c001n01 - * Resource action: child_DoFencing:3 monitor on c001n08 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n01 - * Pseudo action: DoFencing_start_0 - * Fencing c001n02 (reboot) - * Resource action: DcIPaddr start on c001n08 - * Resource action: rsc_c001n08 start on c001n08 - * Resource action: rsc_c001n02 start on c001n01 - * Resource action: rsc_c001n03 start on c001n03 - * Resource action: rsc_c001n01 start on c001n01 - * Resource action: child_DoFencing:0 start on c001n03 - * Resource action: child_DoFencing:1 start on c001n01 - * Resource action: child_DoFencing:2 start on c001n08 - * Pseudo action: DoFencing_running_0 - * Resource action: DcIPaddr monitor=5000 on c001n08 - * Resource action: rsc_c001n08 monitor=5000 on c001n08 - * Resource action: rsc_c001n02 monitor=5000 on c001n01 - * Resource action: rsc_c001n03 monitor=5000 on c001n03 - * Resource action: rsc_c001n01 monitor=5000 on c001n01 - * Resource action: child_DoFencing:0 monitor=5000 on c001n03 - * Resource action: child_DoFencing:1 monitor=5000 on c001n01 - * Resource action: child_DoFencing:2 monitor=5000 on c001n08 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node c001n02: UNCLEAN (offline) * Online: [ c001n01 c001n03 c001n08 ] - * OFFLINE: [ c001n02 ] * Full List of Resources: - * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 - * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 - * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n01 - * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 - * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 + * DcIPaddr (ocf:heartbeat:IPaddr): Stopped + * rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped + * rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped + * rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped + * rsc_c001n01 (ocf:heartbeat:IPaddr): Stopped * Clone Set: DoFencing [child_DoFencing] (unique): - * child_DoFencing:0 (stonith:ssh): Started c001n03 - * child_DoFencing:1 (stonith:ssh): Started c001n01 - * child_DoFencing:2 (stonith:ssh): Started c001n08 + * child_DoFencing:0 (stonith:ssh): Stopped + * child_DoFencing:1 (stonith:ssh): Stopped + * child_DoFencing:2 (stonith:ssh): Stopped * child_DoFencing:3 (stonith:ssh): Stopped diff --git a/cts/scheduler/summary/rec-node-13.summary b/cts/scheduler/summary/rec-node-13.summary index 72c8e427366..b659c69cb42 100644 --- a/cts/scheduler/summary/rec-node-13.summary +++ b/cts/scheduler/summary/rec-node-13.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c001n04: UNCLEAN (online) * Online: [ c001n02 c001n06 c001n07 ] @@ -37,17 +39,18 @@ Current cluster status: Transition Summary: * Fence (reboot) c001n04 'ocf_msdummy:6 failed there' * Stop ocf_msdummy:6 ( Unpromoted c001n04 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Fencing c001n04 (reboot) - * Pseudo action: master_rsc_1_stop_0 - * Pseudo action: ocf_msdummy:6_stop_0 - * Pseudo action: master_rsc_1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node c001n04: UNCLEAN (online) * Online: [ c001n02 c001n06 c001n07 ] - * OFFLINE: [ c001n03 c001n04 c001n05 ] + * OFFLINE: [ c001n03 c001n05 ] * Full List of Resources: * Clone Set: DoFencing [child_DoFencing]: @@ -72,7 +75,7 @@ Revised Cluster Status: * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04 * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 diff --git a/cts/scheduler/summary/rec-node-14.summary b/cts/scheduler/summary/rec-node-14.summary index 5c553916e03..598ac84c340 100644 --- a/cts/scheduler/summary/rec-node-14.summary +++ b/cts/scheduler/summary/rec-node-14.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) * Node node2: UNCLEAN (offline) @@ -14,13 +16,14 @@ Transition Summary: * Fence (reboot) node1 'peer is no longer part of the cluster' Executing Cluster Transition: - * Fencing node1 (reboot) - * Fencing node3 (reboot) - * Fencing node2 (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ node1 node2 node3 ] + * Node node1: UNCLEAN (offline) + * Node node2: UNCLEAN (offline) + * Node node3: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/rec-node-15.summary b/cts/scheduler/summary/rec-node-15.summary index 39a99646dee..b21dd9806c4 100644 --- a/cts/scheduler/summary/rec-node-15.summary +++ b/cts/scheduler/summary/rec-node-15.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node sapcl02: standby (with active resources) * Node sapcl03: UNCLEAN (offline) @@ -34,55 +36,28 @@ Transition Summary: * Start oralsnr_25 ( sapcl01 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on sapcl02 - * Resource action: stonith-1 monitor on sapcl01 - * Pseudo action: app02_stop_0 - * Resource action: Filesystem_13 stop on sapcl02 - * Pseudo action: oracle_start_0 - * Fencing sapcl03 (reboot) - * Resource action: stonith-1 start on sapcl01 - * Resource action: LVM_12 stop on sapcl02 - * Resource action: IPaddr_192_168_1_104 start on sapcl01 - * Resource action: LVM_22 start on sapcl01 - * Resource action: Filesystem_23 start on sapcl01 - * Resource action: oracle_24 start on sapcl01 - * Resource action: oralsnr_25 start on sapcl01 - * Resource action: IPaddr_192_168_1_102 stop on sapcl02 - * Pseudo action: oracle_running_0 - * Resource action: IPaddr_192_168_1_104 monitor=5000 on sapcl01 - * Resource action: LVM_22 monitor=120000 on sapcl01 - * Resource action: Filesystem_23 monitor=120000 on sapcl01 - * Resource action: oracle_24 monitor=120000 on sapcl01 - * Resource action: oralsnr_25 monitor=120000 on sapcl01 - * Pseudo action: app02_stopped_0 - * Pseudo action: app02_start_0 - * Resource action: IPaddr_192_168_1_102 start on sapcl01 - * Resource action: LVM_12 start on sapcl01 - * Resource action: Filesystem_13 start on sapcl01 - * Pseudo action: app02_running_0 - * Resource action: IPaddr_192_168_1_102 monitor=5000 on sapcl01 - * Resource action: LVM_12 monitor=120000 on sapcl01 - * Resource action: Filesystem_13 monitor=120000 on sapcl01 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node sapcl02: standby + * Node sapcl02: standby (with active resources) + * Node sapcl03: UNCLEAN (offline) * Online: [ sapcl01 ] - * OFFLINE: [ sapcl03 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started sapcl01 + * stonith-1 (stonith:dummy): Stopped * Resource Group: app01: * IPaddr_192_168_1_101 (ocf:heartbeat:IPaddr): Started sapcl01 * LVM_2 (ocf:heartbeat:LVM): Started sapcl01 * Filesystem_3 (ocf:heartbeat:Filesystem): Started sapcl01 * Resource Group: app02: - * IPaddr_192_168_1_102 (ocf:heartbeat:IPaddr): Started sapcl01 - * LVM_12 (ocf:heartbeat:LVM): Started sapcl01 - * Filesystem_13 (ocf:heartbeat:Filesystem): Started sapcl01 + * IPaddr_192_168_1_102 (ocf:heartbeat:IPaddr): Started sapcl02 + * LVM_12 (ocf:heartbeat:LVM): Started sapcl02 + * Filesystem_13 (ocf:heartbeat:Filesystem): Started sapcl02 * Resource Group: oracle: - * IPaddr_192_168_1_104 (ocf:heartbeat:IPaddr): Started sapcl01 - * LVM_22 (ocf:heartbeat:LVM): Started sapcl01 - * Filesystem_23 (ocf:heartbeat:Filesystem): Started sapcl01 - * oracle_24 (ocf:heartbeat:oracle): Started sapcl01 - * oralsnr_25 (ocf:heartbeat:oralsnr): Started sapcl01 + * IPaddr_192_168_1_104 (ocf:heartbeat:IPaddr): Stopped + * LVM_22 (ocf:heartbeat:LVM): Stopped + * Filesystem_23 (ocf:heartbeat:Filesystem): Stopped + * oracle_24 (ocf:heartbeat:oracle): Stopped + * oralsnr_25 (ocf:heartbeat:oralsnr): Stopped diff --git a/cts/scheduler/summary/rec-node-2.summary b/cts/scheduler/summary/rec-node-2.summary index 11e818a76d4..9d59981a2b8 100644 --- a/cts/scheduler/summary/rec-node-2.summary +++ b/cts/scheduler/summary/rec-node-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -25,38 +27,21 @@ Transition Summary: * Start rsc6 ( node2 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Pseudo action: group1_start_0 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc4 monitor on node2 - * Pseudo action: group2_start_0 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc6 monitor on node2 - * Fencing node1 (reboot) - * Resource action: stonith-1 start on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node2 - * Resource action: rsc6 start on node2 - * Pseudo action: group1_running_0 - * Pseudo action: group2_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node1: UNCLEAN (offline) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started node2 - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * stonith-1 (stonith:dummy): Stopped + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped * Resource Group: group1: - * rsc3 (ocf:heartbeat:apache): Started node2 - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Stopped + * rsc4 (ocf:heartbeat:apache): Stopped * Resource Group: group2: - * rsc5 (ocf:heartbeat:apache): Started node2 - * rsc6 (ocf:heartbeat:apache): Started node2 + * rsc5 (ocf:heartbeat:apache): Stopped + * rsc6 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rec-node-3.summary b/cts/scheduler/summary/rec-node-3.summary index 35d9dd36cd1..918374ebda0 100644 --- a/cts/scheduler/summary/rec-node-3.summary +++ b/cts/scheduler/summary/rec-node-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] @@ -12,16 +14,14 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rec-node-4.summary b/cts/scheduler/summary/rec-node-4.summary index f56c118b9c5..b683d088c01 100644 --- a/cts/scheduler/summary/rec-node-4.summary +++ b/cts/scheduler/summary/rec-node-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -15,22 +17,15 @@ Transition Summary: * Move rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Fencing node1 (reboot) - * Resource action: stonith-1 start on node2 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node1: UNCLEAN (offline) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started node2 - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * stonith-1 (stonith:dummy): Stopped + * rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN) + * rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN) diff --git a/cts/scheduler/summary/rec-node-5.summary b/cts/scheduler/summary/rec-node-5.summary index a4128ca167d..497d4493f85 100644 --- a/cts/scheduler/summary/rec-node-5.summary +++ b/cts/scheduler/summary/rec-node-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -12,16 +15,15 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rec-node-6.summary b/cts/scheduler/summary/rec-node-6.summary index a7ee902c818..42001837042 100644 --- a/cts/scheduler/summary/rec-node-6.summary +++ b/cts/scheduler/summary/rec-node-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (online) * Online: [ node2 ] @@ -15,22 +17,15 @@ Transition Summary: * Move rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Fencing node1 (reboot) - * Resource action: stonith-1 start on node2 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node1: UNCLEAN (online) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started node2 - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * stonith-1 (stonith:dummy): Stopped + * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/rec-node-7.summary b/cts/scheduler/summary/rec-node-7.summary index f56c118b9c5..b683d088c01 100644 --- a/cts/scheduler/summary/rec-node-7.summary +++ b/cts/scheduler/summary/rec-node-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -15,22 +17,15 @@ Transition Summary: * Move rsc2 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Fencing node1 (reboot) - * Resource action: stonith-1 start on node2 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node1: UNCLEAN (offline) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started node2 - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node2 + * stonith-1 (stonith:dummy): Stopped + * rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN) + * rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN) diff --git a/cts/scheduler/summary/rec-node-8.summary b/cts/scheduler/summary/rec-node-8.summary index 226e333dfc1..962b5edba66 100644 --- a/cts/scheduler/summary/rec-node-8.summary +++ b/cts/scheduler/summary/rec-node-8.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] @@ -16,12 +19,11 @@ Transition Summary: * Start rsc3 ( node2 ) due to quorum freeze (blocked) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc3 monitor on node2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] diff --git a/cts/scheduler/summary/rec-node-9.summary b/cts/scheduler/summary/rec-node-9.summary index edb9d8de65d..256a23ed739 100644 --- a/cts/scheduler/summary/rec-node-9.summary +++ b/cts/scheduler/summary/rec-node-9.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] @@ -12,10 +14,10 @@ Transition Summary: * Start rsc2 ( node2 ) due to no quorum (blocked) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] diff --git a/cts/scheduler/summary/rec-rsc-0.summary b/cts/scheduler/summary/rec-rsc-0.summary index 9861e82f764..a07b44ece90 100644 --- a/cts/scheduler/summary/rec-rsc-0.summary +++ b/cts/scheduler/summary/rec-rsc-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,12 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): FAILED [ node1 node2 ] diff --git a/cts/scheduler/summary/rec-rsc-1.summary b/cts/scheduler/summary/rec-rsc-1.summary index 95f311f101e..9547af64d4f 100644 --- a/cts/scheduler/summary/rec-rsc-1.summary +++ b/cts/scheduler/summary/rec-rsc-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Recover rsc1 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc1 (ocf:heartbeat:apache): FAILED node1 diff --git a/cts/scheduler/summary/rec-rsc-2.summary b/cts/scheduler/summary/rec-rsc-2.summary index 27a2eb0b64a..996485c0879 100644 --- a/cts/scheduler/summary/rec-rsc-2.summary +++ b/cts/scheduler/summary/rec-rsc-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,14 +11,12 @@ Transition Summary: * Recover rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 cancel=1 on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): FAILED node1 diff --git a/cts/scheduler/summary/rec-rsc-3.summary b/cts/scheduler/summary/rec-rsc-3.summary index 12ee7b02ba9..f568354407a 100644 --- a/cts/scheduler/summary/rec-rsc-3.summary +++ b/cts/scheduler/summary/rec-rsc-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,12 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 (failure ignored) + * rsc1 (ocf:heartbeat:apache): Stopped (failure ignored) diff --git a/cts/scheduler/summary/rec-rsc-4.summary b/cts/scheduler/summary/rec-rsc-4.summary index 2f5dbdbde2e..b6db0401b33 100644 --- a/cts/scheduler/summary/rec-rsc-4.summary +++ b/cts/scheduler/summary/rec-rsc-4.summary @@ -1,6 +1,8 @@ 0 of 1 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,9 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/rec-rsc-5.summary b/cts/scheduler/summary/rec-rsc-5.summary index b045e03e9d4..710069fd82d 100644 --- a/cts/scheduler/summary/rec-rsc-5.summary +++ b/cts/scheduler/summary/rec-rsc-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: UNCLEAN (online) * Online: [ node1 ] @@ -15,22 +17,15 @@ Transition Summary: * Move rsc2 ( node2 -> node1 ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on node1 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node1 - * Fencing node2 (reboot) - * Resource action: stonith-1 start on node1 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc2_stop_0 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 ] - * OFFLINE: [ node2 ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started node1 - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node1 + * stonith-1 (stonith:dummy): Stopped + * rsc1 (ocf:heartbeat:apache): FAILED node2 + * rsc2 (ocf:heartbeat:apache): Started node2 diff --git a/cts/scheduler/summary/rec-rsc-6.summary b/cts/scheduler/summary/rec-rsc-6.summary index a4ea1496c00..91e143ab53b 100644 --- a/cts/scheduler/summary/rec-rsc-6.summary +++ b/cts/scheduler/summary/rec-rsc-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Restart rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Started [ node1 node2 ] diff --git a/cts/scheduler/summary/rec-rsc-7.summary b/cts/scheduler/summary/rec-rsc-7.summary index bb5cd9863e3..2bdcfd1f4c8 100644 --- a/cts/scheduler/summary/rec-rsc-7.summary +++ b/cts/scheduler/summary/rec-rsc-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,12 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Started [ node1 node2 ] diff --git a/cts/scheduler/summary/rec-rsc-8.summary b/cts/scheduler/summary/rec-rsc-8.summary index 5ea2de6229e..e3a8795f495 100644 --- a/cts/scheduler/summary/rec-rsc-8.summary +++ b/cts/scheduler/summary/rec-rsc-8.summary @@ -1,6 +1,8 @@ 0 of 1 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/rec-rsc-9.summary b/cts/scheduler/summary/rec-rsc-9.summary index f3fae63d983..e81df114e48 100644 --- a/cts/scheduler/summary/rec-rsc-9.summary +++ b/cts/scheduler/summary/rec-rsc-9.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 ] @@ -10,27 +13,16 @@ Current cluster status: * rsc3 (ocf:heartbeat:apache): FAILED node1 Transition Summary: - * Restart rsc1 ( node1 ) due to required bar running - * Restart rsc2 ( node1 ) due to required bar running + * Restart rsc1 ( node1 ) + * Restart rsc2 ( node1 ) * Recover rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Pseudo action: foo_stop_0 - * Resource action: rsc2 stop on node1 - * Pseudo action: foo_stopped_0 - * Pseudo action: bar_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: bar_stopped_0 - * Pseudo action: bar_start_0 - * Resource action: rsc3 start on node1 - * Pseudo action: bar_running_0 - * Resource action: rsc1 start on node1 - * Pseudo action: foo_start_0 - * Resource action: rsc2 start on node1 - * Pseudo action: foo_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 ] @@ -39,4 +31,4 @@ Revised Cluster Status: * Resource Group: foo: * rsc2 (ocf:heartbeat:apache): Started node1 * Resource Group: bar: - * rsc3 (ocf:heartbeat:apache): Started node1 + * rsc3 (ocf:heartbeat:apache): FAILED node1 diff --git a/cts/scheduler/summary/reload-becomes-restart.summary b/cts/scheduler/summary/reload-becomes-restart.summary index a6bd43a849e..3130e0970ba 100644 --- a/cts/scheduler/summary/reload-becomes-restart.summary +++ b/cts/scheduler/summary/reload-becomes-restart.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2016-12-12 20:28:26Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,37 +21,18 @@ Transition Summary: * Start rsc2:1 ( node2 ) Executing Cluster Transition: - * Resource action: Fencing monitor on node2 - * Resource action: Fencing monitor on node1 - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:1 monitor on node1 - * Pseudo action: cl-rsc1_start_0 - * Resource action: rsc2 monitor on node2 - * Pseudo action: cl-rsc2_stop_0 - * Resource action: Fencing start on node1 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: cl-rsc1_running_0 - * Resource action: rsc2 stop on node1 - * Pseudo action: cl-rsc2_stopped_0 - * Pseudo action: cl-rsc2_start_0 - * Resource action: Fencing monitor=120000 on node1 - * Resource action: rsc1:0 monitor=120000 on node2 - * Resource action: rsc1:1 monitor=120000 on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc2 monitor=200000 on node1 - * Resource action: rsc2 start on node2 - * Pseudo action: cl-rsc2_running_0 - * Resource action: rsc2 monitor=200000 on node2 Using the original execution date of: 2016-12-12 20:28:26Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started node1 + * Fencing (stonith:fence_xvm): Stopped * Clone Set: cl-rsc1 [rsc1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] * Clone Set: cl-rsc2 [rsc2]: - * Started: [ node1 node2 ] + * Started: [ node1 ] + * Stopped: [ node2 ] diff --git a/cts/scheduler/summary/remote-connection-shutdown.summary b/cts/scheduler/summary/remote-connection-shutdown.summary index b8ea5be0465..319c16a5300 100644 --- a/cts/scheduler/summary/remote-connection-shutdown.summary +++ b/cts/scheduler/summary/remote-connection-shutdown.summary @@ -1,5 +1,33 @@ Using the original execution date of: 2020-11-17 07:03:16Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-0 changed +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-1 changed +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254009cb549 on database-0 changed: 0:0;322:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254009cb549 on database-0 changed: 0:0;323:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for nova-evacuate on database-0 changed: 0:0;258:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 10s-interval monitor for nova-evacuate on database-0 changed: 0:0;259:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400dc0f81 on database-1 changed: 0:0;330:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400dc0f81 on database-1 changed: 0:0;331:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-52540033df9c on database-1 changed: 0:0;261:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-52540033df9c on database-1 changed: 0:0;263:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400d5382b on database-2 changed: 0:0;319:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400d5382b on database-2 changed: 0:0;320:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254001f5f3c on database-2 changed: 0:0;263:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254001f5f3c on database-2 changed: 0:0;265:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400bb150b on messaging-0 changed: 0:0;324:70:0:40f880e4-b328-4380-9703-47856390a1e0 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400bb150b on messaging-0 changed: 0:0;325:70:0:40f880e4-b328-4380-9703-47856390a1e0 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254003f88b4 on messaging-0 changed: 0:0;267:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254003f88b4 on messaging-0 changed: 0:0;269:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400e10267 on messaging-1 changed: 0:0;320:1317:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400e10267 on messaging-1 changed: 0:0;326:1318:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-5254007b7920 on messaging-1 changed: 0:0;271:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-5254007b7920 on messaging-1 changed: 0:0;273:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400642894 on messaging-2 changed: 0:0;272:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400642894 on messaging-2 changed: 0:0;274:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400ffc780 on messaging-2 changed: 0:0;321:49:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400ffc780 on messaging-2 changed: 0:0;323:50:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 + * Node List: * Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * RemoteOnline: [ compute-0 compute-1 ] @@ -38,7 +66,7 @@ Current cluster status: * stonith-fence_compute-fence-nova (stonith:fence_compute): Stopped * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] - * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-0 * stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-1 * stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started database-2 @@ -65,55 +93,24 @@ Transition Summary: * Move stonith-fence_ipmilan-5254007b7920 ( messaging-1 -> messaging-2 ) * Move stonith-fence_ipmilan-525400ffc780 ( messaging-2 -> database-0 ) * Move stonith-fence_ipmilan-5254009cb549 ( database-0 -> database-1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: stonith-fence_compute-fence-nova start on database-0 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-2 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-1 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-2 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-1 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-0 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-2 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-1 - * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-0 - * Pseudo action: compute-unfence-trigger-clone_stop_0 - * Resource action: nova-evacuate stop on database-0 - * Resource action: stonith-fence_ipmilan-52540033df9c stop on database-1 - * Resource action: stonith-fence_ipmilan-5254001f5f3c stop on database-2 - * Resource action: stonith-fence_ipmilan-5254003f88b4 stop on messaging-0 - * Resource action: stonith-fence_ipmilan-5254007b7920 stop on messaging-1 - * Resource action: stonith-fence_ipmilan-525400ffc780 stop on messaging-2 - * Resource action: stonith-fence_ipmilan-5254009cb549 stop on database-0 - * Resource action: stonith-fence_compute-fence-nova monitor=60000 on database-0 - * Resource action: compute-unfence-trigger stop on compute-0 - * Pseudo action: compute-unfence-trigger-clone_stopped_0 - * Resource action: nova-evacuate start on database-1 - * Resource action: stonith-fence_ipmilan-52540033df9c start on database-2 - * Resource action: stonith-fence_ipmilan-5254001f5f3c start on messaging-0 - * Resource action: stonith-fence_ipmilan-5254003f88b4 start on messaging-1 - * Resource action: stonith-fence_ipmilan-5254007b7920 start on messaging-2 - * Resource action: stonith-fence_ipmilan-525400ffc780 start on database-0 - * Resource action: stonith-fence_ipmilan-5254009cb549 start on database-1 - * Resource action: compute-0 stop on controller-0 - * Resource action: nova-evacuate monitor=10000 on database-1 - * Resource action: stonith-fence_ipmilan-52540033df9c monitor=60000 on database-2 - * Resource action: stonith-fence_ipmilan-5254001f5f3c monitor=60000 on messaging-0 - * Resource action: stonith-fence_ipmilan-5254003f88b4 monitor=60000 on messaging-1 - * Resource action: stonith-fence_ipmilan-5254007b7920 monitor=60000 on messaging-2 - * Resource action: stonith-fence_ipmilan-525400ffc780 monitor=60000 on database-0 - * Resource action: stonith-fence_ipmilan-5254009cb549 monitor=60000 on database-1 Using the original execution date of: 2020-11-17 07:03:16Z Revised Cluster Status: + * Cluster Summary: +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-0 changed +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-1 changed + * Node List: * Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] - * RemoteOnline: [ compute-1 ] - * RemoteOFFLINE: [ compute-0 ] + * RemoteOnline: [ compute-0 compute-1 ] * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: - * compute-0 (ocf:pacemaker:remote): Stopped + * compute-0 (ocf:pacemaker:remote): Started controller-0 * compute-1 (ocf:pacemaker:remote): Started controller-1 * Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest]: * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 @@ -142,20 +139,20 @@ Revised Cluster Status: * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-0 * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 * ip-172.17.1.57 (ocf:heartbeat:IPaddr2): Started controller-2 - * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-0 + * stonith-fence_compute-fence-nova (stonith:fence_compute): Stopped * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: - * Started: [ compute-1 ] - * Stopped: [ compute-0 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] - * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-1 - * stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-2 - * stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started messaging-0 - * stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-1 - * stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-2 + * Started: [ compute-0 compute-1 ] + * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-0 + * stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-1 + * stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started database-2 + * stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-0 + * stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-1 * stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2 * stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2 * stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0 - * stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started database-0 - * stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-1 + * stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started messaging-2 + * stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1 * stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1 * Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest]: diff --git a/cts/scheduler/summary/remote-connection-unrecoverable.summary b/cts/scheduler/summary/remote-connection-unrecoverable.summary index ad8f353b6a5..10ce3fdbc07 100644 --- a/cts/scheduler/summary/remote-connection-unrecoverable.summary +++ b/cts/scheduler/summary/remote-connection-unrecoverable.summary @@ -1,8 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (offline) + * RemoteNode remote1: UNCLEAN (online) * Online: [ node2 ] - * RemoteOnline: [ remote1 ] * Full List of Resources: * remote1 (ocf:pacemaker:remote): Started node1 (UNCLEAN) @@ -11,7 +13,6 @@ Current cluster status: * Clone Set: rsc2-master [rsc2] (promotable): * rsc2 (ocf:pacemaker:Stateful): Promoted node1 (UNCLEAN) * Promoted: [ node2 ] - * Stopped: [ remote1 ] Transition Summary: * Fence (reboot) remote1 'resources are active but connection is unrecoverable' @@ -20,35 +21,23 @@ Transition Summary: * Restart killer ( node2 ) due to resource definition change * Move rsc1 ( remote1 -> node2 ) * Stop rsc2:0 ( Promoted node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: remote1_stop_0 - * Resource action: killer stop on node2 - * Resource action: rsc1 monitor on node2 - * Fencing node1 (reboot) - * Fencing remote1 (reboot) - * Resource action: killer start on node2 - * Resource action: killer monitor=60000 on node2 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc2-master_demote_0 - * Resource action: rsc1 start on node2 - * Pseudo action: rsc2_demote_0 - * Pseudo action: rsc2-master_demoted_0 - * Pseudo action: rsc2-master_stop_0 - * Resource action: rsc1 monitor=10000 on node2 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2-master_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node1: UNCLEAN (offline) + * RemoteNode remote1: UNCLEAN (online) * Online: [ node2 ] - * OFFLINE: [ node1 ] - * RemoteOFFLINE: [ remote1 ] * Full List of Resources: - * remote1 (ocf:pacemaker:remote): Stopped + * remote1 (ocf:pacemaker:remote): Started node1 (UNCLEAN) * killer (stonith:fence_xvm): Started node2 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started remote1 * Clone Set: rsc2-master [rsc2] (promotable): + * rsc2 (ocf:pacemaker:Stateful): Promoted node1 (UNCLEAN) * Promoted: [ node2 ] - * Stopped: [ node1 remote1 ] diff --git a/cts/scheduler/summary/remote-disable.summary b/cts/scheduler/summary/remote-disable.summary index a90cb40d925..d6f902e124a 100644 --- a/cts/scheduler/summary/remote-disable.summary +++ b/cts/scheduler/summary/remote-disable.summary @@ -1,6 +1,8 @@ 1 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] * RemoteOnline: [ remote1 ] @@ -18,18 +20,18 @@ Transition Summary: * Stop FAKE2 ( remote1 ) due to node availability Executing Cluster Transition: - * Resource action: FAKE2 stop on remote1 - * Resource action: remote1 stop on 18builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] - * RemoteOFFLINE: [ remote1 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node1 - * remote1 (ocf:pacemaker:remote): Stopped (disabled) + * remote1 (ocf:pacemaker:remote): Started 18builder (disabled) * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 - * FAKE2 (ocf:heartbeat:Dummy): Stopped + * FAKE2 (ocf:heartbeat:Dummy): Started remote1 * FAKE3 (ocf:heartbeat:Dummy): Started 18builder * FAKE4 (ocf:heartbeat:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/remote-fence-before-reconnect.summary b/cts/scheduler/summary/remote-fence-before-reconnect.summary index ab361efdc72..c6655a9b204 100644 --- a/cts/scheduler/summary/remote-fence-before-reconnect.summary +++ b/cts/scheduler/summary/remote-fence-before-reconnect.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * RemoteNode c7auto4: UNCLEAN (offline) * Online: [ c7auto1 c7auto2 c7auto3 ] @@ -18,22 +20,19 @@ Transition Summary: * Move fake2 ( c7auto4 -> c7auto1 ) Executing Cluster Transition: - * Resource action: c7auto4 stop on c7auto1 - * Fencing c7auto4 (reboot) - * Pseudo action: fake2_stop_0 - * Resource action: fake2 start on c7auto1 - * Resource action: fake2 monitor=10000 on c7auto1 Revised Cluster Status: + * Cluster Summary: + * Node List: * RemoteNode c7auto4: UNCLEAN (offline) * Online: [ c7auto1 c7auto2 c7auto3 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto2 - * c7auto4 (ocf:pacemaker:remote): FAILED + * c7auto4 (ocf:pacemaker:remote): FAILED c7auto1 * fake1 (ocf:heartbeat:Dummy): Started c7auto3 - * fake2 (ocf:heartbeat:Dummy): Started c7auto1 + * fake2 (ocf:heartbeat:Dummy): Started c7auto4 (UNCLEAN) * fake3 (ocf:heartbeat:Dummy): Started c7auto1 * fake4 (ocf:heartbeat:Dummy): Started c7auto2 * fake5 (ocf:heartbeat:Dummy): Started c7auto3 diff --git a/cts/scheduler/summary/remote-fence-unclean-3.summary b/cts/scheduler/summary/remote-fence-unclean-3.summary index af916ed3e55..f2e5bf15cb3 100644 --- a/cts/scheduler/summary/remote-fence-unclean-3.summary +++ b/cts/scheduler/summary/remote-fence-unclean-3.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: + * RemoteNode overcloud-novacompute-0: UNCLEAN (offline) * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] - * RemoteOFFLINE: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: @@ -40,41 +42,18 @@ Transition Summary: * Stop overcloud-novacompute-0 ( overcloud-controller-0 ) due to node availability Executing Cluster Transition: - * Resource action: fence1 monitor on overcloud-controller-2 - * Resource action: fence1 monitor on overcloud-controller-1 - * Resource action: fence1 monitor on overcloud-controller-0 - * Resource action: overcloud-novacompute-0 stop on overcloud-controller-0 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-2 - * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-1 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-2 - * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-0 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-1 - * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-0 - * Resource action: galera-bundle-0 monitor on overcloud-controller-2 - * Resource action: galera-bundle-0 monitor on overcloud-controller-1 - * Resource action: galera-bundle-1 monitor on overcloud-controller-2 - * Resource action: galera-bundle-1 monitor on overcloud-controller-0 - * Resource action: galera-bundle-2 monitor on overcloud-controller-1 - * Resource action: galera-bundle-2 monitor on overcloud-controller-0 - * Resource action: redis-bundle-0 monitor on overcloud-controller-2 - * Resource action: redis-bundle-0 monitor on overcloud-controller-1 - * Resource action: redis-bundle-1 monitor on overcloud-controller-2 - * Resource action: redis-bundle-1 monitor on overcloud-controller-0 - * Resource action: redis-bundle-2 monitor on overcloud-controller-1 - * Resource action: redis-bundle-2 monitor on overcloud-controller-0 - * Fencing overcloud-novacompute-0 (reboot) - * Resource action: fence1 start on overcloud-controller-0 - * Resource action: fence1 monitor=60000 on overcloud-controller-0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * RemoteNode overcloud-novacompute-0: UNCLEAN (offline) * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] - * RemoteOFFLINE: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: - * fence1 (stonith:fence_xvm): Started overcloud-controller-0 - * overcloud-novacompute-0 (ocf:pacemaker:remote): Stopped + * fence1 (stonith:fence_xvm): Stopped + * overcloud-novacompute-0 (ocf:pacemaker:remote): FAILED overcloud-controller-0 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-1 diff --git a/cts/scheduler/summary/remote-fence-unclean.summary b/cts/scheduler/summary/remote-fence-unclean.summary index a467dc3d9c1..f8c07ef1dec 100644 --- a/cts/scheduler/summary/remote-fence-unclean.summary +++ b/cts/scheduler/summary/remote-fence-unclean.summary @@ -1,13 +1,15 @@ Current cluster status: + * Cluster Summary: + * Node List: - * RemoteNode remote1: UNCLEAN (offline) + * RemoteNode remote1: UNCLEAN (online) * Online: [ 18builder 18node1 18node2 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18builder * remote1 (ocf:pacemaker:remote): FAILED 18node1 * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 - * FAKE2 (ocf:heartbeat:Dummy): Started remote1 (UNCLEAN) + * FAKE2 (ocf:heartbeat:Dummy): Started remote1 * FAKE3 (ocf:heartbeat:Dummy): Started 18builder * FAKE4 (ocf:heartbeat:Dummy): Started 18node1 @@ -19,29 +21,18 @@ Transition Summary: * Move FAKE4 ( 18node1 -> 18node2 ) Executing Cluster Transition: - * Resource action: FAKE3 stop on 18builder - * Resource action: FAKE4 stop on 18node1 - * Fencing remote1 (reboot) - * Pseudo action: FAKE2_stop_0 - * Resource action: FAKE3 start on 18node1 - * Resource action: FAKE4 start on 18node2 - * Resource action: remote1 stop on 18node1 - * Resource action: FAKE2 start on 18builder - * Resource action: FAKE3 monitor=60000 on 18node1 - * Resource action: FAKE4 monitor=60000 on 18node2 - * Resource action: remote1 start on 18node1 - * Resource action: remote1 monitor=60000 on 18node1 - * Resource action: FAKE2 monitor=60000 on 18builder Revised Cluster Status: + * Cluster Summary: + * Node List: + * RemoteNode remote1: UNCLEAN (online) * Online: [ 18builder 18node1 18node2 ] - * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18builder - * remote1 (ocf:pacemaker:remote): Started 18node1 + * remote1 (ocf:pacemaker:remote): FAILED 18node1 * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 - * FAKE2 (ocf:heartbeat:Dummy): Started 18builder - * FAKE3 (ocf:heartbeat:Dummy): Started 18node1 - * FAKE4 (ocf:heartbeat:Dummy): Started 18node2 + * FAKE2 (ocf:heartbeat:Dummy): Started remote1 + * FAKE3 (ocf:heartbeat:Dummy): Started 18builder + * FAKE4 (ocf:heartbeat:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/remote-fence-unclean2.summary b/cts/scheduler/summary/remote-fence-unclean2.summary index a4251c6adc4..73513d0f513 100644 --- a/cts/scheduler/summary/remote-fence-unclean2.summary +++ b/cts/scheduler/summary/remote-fence-unclean2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node rhel7-alt1: standby * Node rhel7-alt2: standby @@ -15,17 +17,17 @@ Transition Summary: * Stop fake ( rhel7-alt4 ) due to node availability Executing Cluster Transition: - * Fencing rhel7-alt4 (reboot) - * Pseudo action: fake_stop_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node rhel7-alt1: standby * Node rhel7-alt2: standby + * RemoteNode rhel7-alt4: UNCLEAN (offline) * OFFLINE: [ rhel7-alt3 ] - * RemoteOFFLINE: [ rhel7-alt4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Stopped * rhel7-alt4 (ocf:pacemaker:remote): Stopped - * fake (ocf:heartbeat:Dummy): Stopped + * fake (ocf:heartbeat:Dummy): Started rhel7-alt4 (UNCLEAN) diff --git a/cts/scheduler/summary/remote-move.summary b/cts/scheduler/summary/remote-move.summary index 5fc5f09a7f8..bc25e1bdcf1 100644 --- a/cts/scheduler/summary/remote-move.summary +++ b/cts/scheduler/summary/remote-move.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] * RemoteOnline: [ remote1 ] @@ -16,23 +18,17 @@ Transition Summary: * Migrate remote1 ( 18builder -> 18node1 ) Executing Cluster Transition: - * Resource action: shooter stop on 18node1 - * Resource action: remote1 migrate_to on 18builder - * Resource action: shooter start on 18builder - * Resource action: remote1 migrate_from on 18node1 - * Resource action: remote1 stop on 18builder - * Resource action: shooter monitor=60000 on 18builder - * Pseudo action: remote1_start_0 - * Resource action: remote1 monitor=60000 on 18node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] * RemoteOnline: [ remote1 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started 18builder - * remote1 (ocf:pacemaker:remote): Started 18node1 + * shooter (stonith:fence_xvm): Started 18node1 + * remote1 (ocf:pacemaker:remote): Started 18builder * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 * FAKE2 (ocf:heartbeat:Dummy): Started remote1 * FAKE3 (ocf:heartbeat:Dummy): Started 18builder diff --git a/cts/scheduler/summary/remote-orphaned.summary b/cts/scheduler/summary/remote-orphaned.summary index 4b5ed6f5876..6d86f31ae16 100644 --- a/cts/scheduler/summary/remote-orphaned.summary +++ b/cts/scheduler/summary/remote-orphaned.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18node1 18node3 ] * OFFLINE: [ 18node2 ] @@ -17,7 +20,7 @@ Current cluster status: * Clone Set: master-1 [stateful-1] (promotable): * Promoted: [ 18node1 ] * Unpromoted: [ 18node3 ] - * Stopped: [ 18node2 ] + * Stopped: [ 18node2 remote1 ] * Resource Group: group-1: * r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 @@ -29,41 +32,37 @@ Transition Summary: * Move rsc_18node2 ( remote1 -> 18node1 ) * Stop ping-1:2 ( remote1 ) due to node availability * Stop remote1 ( 18node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc_18node2 stop on remote1 - * Pseudo action: Connectivity_stop_0 - * Resource action: rsc_18node2 start on 18node1 - * Resource action: ping-1 stop on remote1 - * Pseudo action: Connectivity_stopped_0 - * Resource action: remote1 stop on 18node1 - * Resource action: remote1 delete on 18node3 - * Resource action: remote1 delete on 18node1 - * Resource action: rsc_18node2 monitor=5000 on 18node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18node1 18node3 ] * OFFLINE: [ 18node2 ] - * RemoteOFFLINE: [ remote1 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started 18node3 * FencingPass (stonith:fence_dummy): Started 18node1 * FencingFail (stonith:fence_dummy): Started 18node3 * rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1 - * rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node1 + * rsc_18node2 (ocf:heartbeat:IPaddr2): Started remote1 * rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3 * migrator (ocf:pacemaker:Dummy): Started 18node1 * Clone Set: Connectivity [ping-1]: - * Started: [ 18node1 18node3 ] - * Stopped: [ 18node2 ] + * Started: [ 18node1 18node3 remote1 ] * Clone Set: master-1 [stateful-1] (promotable): * Promoted: [ 18node1 ] * Unpromoted: [ 18node3 ] - * Stopped: [ 18node2 ] + * Stopped: [ 18node2 remote1 ] * Resource Group: group-1: * r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 + * remote1 (ocf:pacemaker:remote): ORPHANED Started 18node1 diff --git a/cts/scheduler/summary/remote-orphaned2.summary b/cts/scheduler/summary/remote-orphaned2.summary index 9b0091467b0..aeceada851a 100644 --- a/cts/scheduler/summary/remote-orphaned2.summary +++ b/cts/scheduler/summary/remote-orphaned2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * RemoteNode mrg-02: UNCLEAN (offline) * RemoteNode mrg-03: UNCLEAN (offline) @@ -6,16 +9,19 @@ Current cluster status: * Online: [ host-026 host-027 host-028 ] * Full List of Resources: - * neutron-openvswitch-agent-compute (ocf:heartbeat:Dummy): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] - * libvirtd-compute (systemd:libvirtd): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] - * ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] - * nova-compute (systemd:openstack-nova-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] + * neutron-openvswitch-agent-compute (ocf:heartbeat:Dummy): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] + * libvirtd-compute (systemd:libvirtd): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] + * ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] + * nova-compute (systemd:openstack-nova-compute): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * RemoteNode mrg-02: UNCLEAN (offline) * RemoteNode mrg-03: UNCLEAN (offline) @@ -23,7 +29,7 @@ Revised Cluster Status: * Online: [ host-026 host-027 host-028 ] * Full List of Resources: - * neutron-openvswitch-agent-compute (ocf:heartbeat:Dummy): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] - * libvirtd-compute (systemd:libvirtd): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] - * ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] - * nova-compute (systemd:openstack-nova-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ] + * neutron-openvswitch-agent-compute (ocf:heartbeat:Dummy): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] + * libvirtd-compute (systemd:libvirtd): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] + * ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] + * nova-compute (systemd:openstack-nova-compute): ORPHANED Started (unmanaged) [ mrg-03 mrg-02 mrg-04 ] diff --git a/cts/scheduler/summary/remote-partial-migrate.summary b/cts/scheduler/summary/remote-partial-migrate.summary index 2cdf22797fc..dab19463240 100644 --- a/cts/scheduler/summary/remote-partial-migrate.summary +++ b/cts/scheduler/summary/remote-partial-migrate.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk1 pcmk2 pcmk3 ] * RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 ] @@ -82,50 +84,10 @@ Transition Summary: * Start FAKE50 ( pcmk1 ) Executing Cluster Transition: - * Resource action: pcmk_remote3 migrate_from on pcmk2 - * Resource action: pcmk_remote3 stop on pcmk1 - * Resource action: FAKE10 start on pcmk1 - * Resource action: FAKE13 start on pcmk2 - * Resource action: FAKE15 start on pcmk3 - * Resource action: FAKE22 start on pcmk1 - * Resource action: FAKE23 stop on pcmk1 - * Resource action: FAKE26 start on pcmk1 - * Resource action: FAKE29 start on pcmk2 - * Resource action: FAKE30 stop on pcmk1 - * Resource action: FAKE36 start on pcmk1 - * Resource action: FAKE37 stop on pcmk1 - * Resource action: FAKE43 start on pcmk1 - * Resource action: FAKE44 stop on pcmk1 - * Resource action: FAKE50 start on pcmk1 - * Pseudo action: pcmk_remote3_start_0 - * Resource action: FAKE4 start on pcmk_remote3 - * Resource action: FAKE9 stop on pcmk_remote3 - * Resource action: FAKE10 monitor=10000 on pcmk1 - * Resource action: FAKE13 monitor=10000 on pcmk2 - * Resource action: FAKE15 monitor=10000 on pcmk3 - * Resource action: FAKE16 stop on pcmk1 - * Resource action: FAKE22 monitor=10000 on pcmk1 - * Resource action: FAKE23 start on pcmk_remote1 - * Resource action: FAKE26 monitor=10000 on pcmk1 - * Resource action: FAKE29 monitor=10000 on pcmk2 - * Resource action: FAKE30 start on pcmk_remote2 - * Resource action: FAKE36 monitor=10000 on pcmk1 - * Resource action: FAKE37 start on pcmk2 - * Resource action: FAKE43 monitor=10000 on pcmk1 - * Resource action: FAKE44 start on pcmk2 - * Resource action: FAKE50 monitor=10000 on pcmk1 - * Resource action: pcmk_remote3 monitor=60000 on pcmk2 - * Resource action: FAKE4 monitor=10000 on pcmk_remote3 - * Resource action: FAKE9 start on pcmk1 - * Resource action: FAKE16 start on pcmk_remote3 - * Resource action: FAKE23 monitor=10000 on pcmk_remote1 - * Resource action: FAKE30 monitor=10000 on pcmk_remote2 - * Resource action: FAKE37 monitor=10000 on pcmk2 - * Resource action: FAKE44 monitor=10000 on pcmk2 - * Resource action: FAKE9 monitor=10000 on pcmk1 - * Resource action: FAKE16 monitor=10000 on pcmk_remote3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk1 pcmk2 pcmk3 ] * RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 ] @@ -135,56 +97,56 @@ Revised Cluster Status: * shooter (stonith:fence_docker_cts): Started pcmk2 * pcmk_remote1 (ocf:pacemaker:remote): Started pcmk1 * pcmk_remote2 (ocf:pacemaker:remote): Started pcmk3 - * pcmk_remote3 (ocf:pacemaker:remote): Started pcmk2 + * pcmk_remote3 (ocf:pacemaker:remote): Started [ pcmk2 pcmk1 ] * pcmk_remote4 (ocf:pacemaker:remote): Started pcmk3 * pcmk_remote5 (ocf:pacemaker:remote): Stopped * FAKE1 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE2 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE3 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE4 (ocf:heartbeat:Dummy): Started pcmk_remote3 + * FAKE4 (ocf:heartbeat:Dummy): Stopped * FAKE5 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE6 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE7 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE8 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE9 (ocf:heartbeat:Dummy): Started pcmk1 - * FAKE10 (ocf:heartbeat:Dummy): Started pcmk1 + * FAKE9 (ocf:heartbeat:Dummy): Started pcmk_remote3 + * FAKE10 (ocf:heartbeat:Dummy): Stopped * FAKE11 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE12 (ocf:heartbeat:Dummy): Started pcmk_remote2 - * FAKE13 (ocf:heartbeat:Dummy): Started pcmk2 + * FAKE13 (ocf:heartbeat:Dummy): Stopped * FAKE14 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE15 (ocf:heartbeat:Dummy): Started pcmk3 - * FAKE16 (ocf:heartbeat:Dummy): Started pcmk_remote3 + * FAKE15 (ocf:heartbeat:Dummy): Stopped + * FAKE16 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE17 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE18 (ocf:heartbeat:Dummy): Started pcmk2 * FAKE19 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE20 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE21 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE22 (ocf:heartbeat:Dummy): Started pcmk1 - * FAKE23 (ocf:heartbeat:Dummy): Started pcmk_remote1 + * FAKE22 (ocf:heartbeat:Dummy): Stopped + * FAKE23 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE24 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE25 (ocf:heartbeat:Dummy): Started pcmk_remote1 - * FAKE26 (ocf:heartbeat:Dummy): Started pcmk1 + * FAKE26 (ocf:heartbeat:Dummy): Stopped * FAKE27 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE28 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE29 (ocf:heartbeat:Dummy): Started pcmk2 - * FAKE30 (ocf:heartbeat:Dummy): Started pcmk_remote2 + * FAKE29 (ocf:heartbeat:Dummy): Stopped + * FAKE30 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE31 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE32 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE33 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE34 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE35 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE36 (ocf:heartbeat:Dummy): Started pcmk1 - * FAKE37 (ocf:heartbeat:Dummy): Started pcmk2 + * FAKE36 (ocf:heartbeat:Dummy): Stopped + * FAKE37 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE38 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE39 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE40 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE41 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE42 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE43 (ocf:heartbeat:Dummy): Started pcmk1 - * FAKE44 (ocf:heartbeat:Dummy): Started pcmk2 + * FAKE43 (ocf:heartbeat:Dummy): Stopped + * FAKE44 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE45 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE46 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE47 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE48 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE49 (ocf:heartbeat:Dummy): Started pcmk_remote4 - * FAKE50 (ocf:heartbeat:Dummy): Started pcmk1 + * FAKE50 (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/remote-partial-migrate2.summary b/cts/scheduler/summary/remote-partial-migrate2.summary index f7157c51f55..1ee086d8a8e 100644 --- a/cts/scheduler/summary/remote-partial-migrate2.summary +++ b/cts/scheduler/summary/remote-partial-migrate2.summary @@ -1,9 +1,10 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node pcmk4: UNCLEAN (offline) * Online: [ pcmk1 pcmk2 pcmk3 ] - * RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote5 ] - * RemoteOFFLINE: [ pcmk_remote4 ] + * RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 pcmk_remote5 ] * Full List of Resources: * shooter (stonith:fence_docker_cts): Started pcmk3 @@ -85,94 +86,39 @@ Transition Summary: * Move FAKE49 ( pcmk_remote3 -> pcmk_remote4 ) Executing Cluster Transition: - * Resource action: pcmk_remote2 migrate_from on pcmk1 - * Resource action: pcmk_remote2 stop on pcmk3 - * Resource action: pcmk_remote4 start on pcmk2 - * Resource action: pcmk_remote5 migrate_to on pcmk1 - * Resource action: FAKE5 stop on pcmk1 - * Resource action: FAKE9 stop on pcmk2 - * Resource action: FAKE12 stop on pcmk1 - * Resource action: FAKE14 stop on pcmk2 - * Resource action: FAKE17 stop on pcmk_remote1 - * Resource action: FAKE25 stop on pcmk_remote1 - * Resource action: FAKE28 stop on pcmk3 - * Resource action: FAKE30 stop on pcmk1 - * Resource action: FAKE33 stop on pcmk_remote1 - * Resource action: FAKE38 stop on pcmk2 - * Resource action: FAKE48 stop on pcmk1 - * Resource action: FAKE49 stop on pcmk_remote3 - * Fencing pcmk4 (reboot) - * Pseudo action: pcmk_remote2_start_0 - * Resource action: pcmk_remote4 monitor=60000 on pcmk2 - * Resource action: pcmk_remote5 migrate_from on pcmk2 - * Resource action: pcmk_remote5 stop on pcmk1 - * Resource action: FAKE5 start on pcmk_remote4 - * Resource action: FAKE9 start on pcmk_remote4 - * Resource action: FAKE12 start on pcmk2 - * Resource action: FAKE14 start on pcmk_remote1 - * Resource action: FAKE17 start on pcmk_remote4 - * Resource action: FAKE25 start on pcmk_remote4 - * Resource action: FAKE28 start on pcmk1 - * Resource action: FAKE30 start on pcmk_remote1 - * Resource action: FAKE33 start on pcmk_remote4 - * Resource action: FAKE38 start on pcmk_remote1 - * Resource action: FAKE39 stop on pcmk1 - * Resource action: FAKE41 stop on pcmk_remote2 - * Resource action: FAKE47 stop on pcmk_remote1 - * Resource action: FAKE48 start on pcmk_remote3 - * Resource action: FAKE49 start on pcmk_remote4 - * Resource action: pcmk_remote2 monitor=60000 on pcmk1 - * Pseudo action: pcmk_remote5_start_0 - * Resource action: FAKE5 monitor=10000 on pcmk_remote4 - * Resource action: FAKE9 monitor=10000 on pcmk_remote4 - * Resource action: FAKE12 monitor=10000 on pcmk2 - * Resource action: FAKE14 monitor=10000 on pcmk_remote1 - * Resource action: FAKE17 monitor=10000 on pcmk_remote4 - * Resource action: FAKE25 monitor=10000 on pcmk_remote4 - * Resource action: FAKE28 monitor=10000 on pcmk1 - * Resource action: FAKE30 monitor=10000 on pcmk_remote1 - * Resource action: FAKE33 monitor=10000 on pcmk_remote4 - * Resource action: FAKE38 monitor=10000 on pcmk_remote1 - * Resource action: FAKE39 start on pcmk_remote2 - * Resource action: FAKE41 start on pcmk_remote4 - * Resource action: FAKE47 start on pcmk_remote2 - * Resource action: FAKE48 monitor=10000 on pcmk_remote3 - * Resource action: FAKE49 monitor=10000 on pcmk_remote4 - * Resource action: pcmk_remote5 monitor=60000 on pcmk2 - * Resource action: FAKE39 monitor=10000 on pcmk_remote2 - * Resource action: FAKE41 monitor=10000 on pcmk_remote4 - * Resource action: FAKE47 monitor=10000 on pcmk_remote2 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node pcmk4: UNCLEAN (offline) * Online: [ pcmk1 pcmk2 pcmk3 ] - * OFFLINE: [ pcmk4 ] * RemoteOnline: [ pcmk_remote1 pcmk_remote2 pcmk_remote3 pcmk_remote4 pcmk_remote5 ] * Full List of Resources: * shooter (stonith:fence_docker_cts): Started pcmk3 * pcmk_remote1 (ocf:pacemaker:remote): Started pcmk1 - * pcmk_remote2 (ocf:pacemaker:remote): Started pcmk1 + * pcmk_remote2 (ocf:pacemaker:remote): Started [ pcmk1 pcmk3 ] * pcmk_remote3 (ocf:pacemaker:remote): Started pcmk3 - * pcmk_remote4 (ocf:pacemaker:remote): Started pcmk2 - * pcmk_remote5 (ocf:pacemaker:remote): Started pcmk2 + * pcmk_remote4 (ocf:pacemaker:remote): Stopped + * pcmk_remote5 (ocf:pacemaker:remote): Started pcmk1 * FAKE1 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE2 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE3 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE4 (ocf:heartbeat:Dummy): Started pcmk_remote5 - * FAKE5 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE5 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE6 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE7 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE8 (ocf:heartbeat:Dummy): Started pcmk_remote3 - * FAKE9 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE9 (ocf:heartbeat:Dummy): Started pcmk2 * FAKE10 (ocf:heartbeat:Dummy): Started pcmk_remote5 * FAKE11 (ocf:heartbeat:Dummy): Started pcmk1 - * FAKE12 (ocf:heartbeat:Dummy): Started pcmk2 + * FAKE12 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE13 (ocf:heartbeat:Dummy): Started pcmk3 - * FAKE14 (ocf:heartbeat:Dummy): Started pcmk_remote1 + * FAKE14 (ocf:heartbeat:Dummy): Started pcmk2 * FAKE15 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE16 (ocf:heartbeat:Dummy): Started pcmk_remote3 - * FAKE17 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE17 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE18 (ocf:heartbeat:Dummy): Started pcmk_remote5 * FAKE19 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE20 (ocf:heartbeat:Dummy): Started pcmk2 @@ -180,29 +126,29 @@ Revised Cluster Status: * FAKE22 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE23 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE24 (ocf:heartbeat:Dummy): Started pcmk_remote3 - * FAKE25 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE25 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE26 (ocf:heartbeat:Dummy): Started pcmk_remote5 * FAKE27 (ocf:heartbeat:Dummy): Started pcmk3 - * FAKE28 (ocf:heartbeat:Dummy): Started pcmk1 + * FAKE28 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE29 (ocf:heartbeat:Dummy): Started pcmk2 - * FAKE30 (ocf:heartbeat:Dummy): Started pcmk_remote1 + * FAKE30 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE31 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE32 (ocf:heartbeat:Dummy): Started pcmk_remote3 - * FAKE33 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE33 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE34 (ocf:heartbeat:Dummy): Started pcmk_remote5 * FAKE35 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE36 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE37 (ocf:heartbeat:Dummy): Started pcmk2 - * FAKE38 (ocf:heartbeat:Dummy): Started pcmk_remote1 - * FAKE39 (ocf:heartbeat:Dummy): Started pcmk_remote2 + * FAKE38 (ocf:heartbeat:Dummy): Started pcmk2 + * FAKE39 (ocf:heartbeat:Dummy): Started pcmk1 * FAKE40 (ocf:heartbeat:Dummy): Started pcmk_remote3 - * FAKE41 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE41 (ocf:heartbeat:Dummy): Started pcmk_remote2 * FAKE42 (ocf:heartbeat:Dummy): Started pcmk_remote5 * FAKE43 (ocf:heartbeat:Dummy): Started pcmk_remote1 * FAKE44 (ocf:heartbeat:Dummy): Started pcmk2 * FAKE45 (ocf:heartbeat:Dummy): Started pcmk3 * FAKE46 (ocf:heartbeat:Dummy): Started pcmk1 - * FAKE47 (ocf:heartbeat:Dummy): Started pcmk_remote2 - * FAKE48 (ocf:heartbeat:Dummy): Started pcmk_remote3 - * FAKE49 (ocf:heartbeat:Dummy): Started pcmk_remote4 + * FAKE47 (ocf:heartbeat:Dummy): Started pcmk_remote1 + * FAKE48 (ocf:heartbeat:Dummy): Started pcmk1 + * FAKE49 (ocf:heartbeat:Dummy): Started pcmk_remote3 * FAKE50 (ocf:heartbeat:Dummy): Started pcmk_remote5 diff --git a/cts/scheduler/summary/remote-probe-disable.summary b/cts/scheduler/summary/remote-probe-disable.summary index 34c0d847967..a6b18f073a2 100644 --- a/cts/scheduler/summary/remote-probe-disable.summary +++ b/cts/scheduler/summary/remote-probe-disable.summary @@ -1,6 +1,8 @@ 1 of 6 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] * RemoteOnline: [ remote1 ] @@ -17,20 +19,17 @@ Transition Summary: * Stop remote1 ( 18builder ) due to node availability Executing Cluster Transition: - * Resource action: FAKE1 monitor on remote1 - * Resource action: FAKE2 monitor on remote1 - * Resource action: FAKE3 monitor on remote1 - * Resource action: FAKE4 monitor on remote1 - * Resource action: remote1 stop on 18builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] - * RemoteOFFLINE: [ remote1 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node1 - * remote1 (ocf:pacemaker:remote): Stopped (disabled) + * remote1 (ocf:pacemaker:remote): Started 18builder (disabled) * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 * FAKE2 (ocf:heartbeat:Dummy): Stopped * FAKE3 (ocf:heartbeat:Dummy): Started 18builder diff --git a/cts/scheduler/summary/remote-reconnect-delay.summary b/cts/scheduler/summary/remote-reconnect-delay.summary index f1959199860..f9039fc7199 100644 --- a/cts/scheduler/summary/remote-reconnect-delay.summary +++ b/cts/scheduler/summary/remote-reconnect-delay.summary @@ -1,5 +1,8 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * RemoteOFFLINE: [ remote-rhel7-3 ] @@ -32,12 +35,12 @@ Transition Summary: * Restart Fencing ( rhel7-2 ) due to resource definition change Executing Cluster Transition: - * Resource action: Fencing stop on rhel7-2 - * Resource action: Fencing start on rhel7-2 - * Resource action: Fencing monitor=120000 on rhel7-2 Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * RemoteOFFLINE: [ remote-rhel7-3 ] diff --git a/cts/scheduler/summary/remote-recover-all.summary b/cts/scheduler/summary/remote-recover-all.summary index 257301a3d73..c2fa4a59cf3 100644 --- a/cts/scheduler/summary/remote-recover-all.summary +++ b/cts/scheduler/summary/remote-recover-all.summary @@ -1,9 +1,17 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea + * Node List: * Node controller-1: UNCLEAN (offline) + * RemoteNode galera-2: UNCLEAN (online) + * RemoteNode messaging-1: UNCLEAN (online) * Online: [ controller-0 controller-2 ] - * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] + * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 @@ -14,15 +22,14 @@ Current cluster status: * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 galera-2 ] - * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: redis-master [redis] (promotable): * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) @@ -32,7 +39,6 @@ Current cluster status: * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 @@ -53,100 +59,49 @@ Transition Summary: * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: messaging-1_stop_0 - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: rabbitmq-clone_pre_notify_stop_0 - * Pseudo action: galera-master_demote_0 - * Pseudo action: redis-master_pre_notify_stop_0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: rabbitmq notify on messaging-2 - * Resource action: rabbitmq notify on messaging-0 - * Pseudo action: rabbitmq-clone_confirmed-pre_notify_stop_0 - * Pseudo action: redis_post_notify_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-pre_notify_stop_0 - * Pseudo action: redis-master_stop_0 - * Pseudo action: haproxy-clone_stop_0 - * Fencing galera-2 (reboot) - * Pseudo action: galera_demote_0 - * Pseudo action: galera-master_demoted_0 - * Pseudo action: galera-master_stop_0 - * Pseudo action: redis_stop_0 - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 - * Fencing messaging-1 (reboot) - * Resource action: galera-0 start on controller-2 - * Pseudo action: rabbitmq_post_notify_stop_0 - * Pseudo action: rabbitmq-clone_stop_0 - * Pseudo action: galera_stop_0 - * Resource action: galera monitor=10000 on galera-0 - * Pseudo action: galera-master_stopped_0 - * Pseudo action: redis-master_post_notify_stopped_0 - * Pseudo action: ip-172.17.1.14_stop_0 - * Pseudo action: ip-172.17.1.17_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Pseudo action: rabbitmq_stop_0 - * Pseudo action: rabbitmq-clone_stopped_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-post_notify_stopped_0 - * Resource action: ip-172.17.1.14 start on controller-2 - * Resource action: ip-172.17.1.17 start on controller-2 - * Resource action: ip-172.17.4.11 start on controller-2 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 - * Pseudo action: rabbitmq-clone_post_notify_stopped_0 - * Pseudo action: redis_notified_0 - * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 - * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 - * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 - * Resource action: rabbitmq notify on messaging-2 - * Resource action: rabbitmq notify on messaging-0 - * Pseudo action: rabbitmq_notified_0 - * Pseudo action: rabbitmq-clone_confirmed-post_notify_stopped_0 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node controller-1: UNCLEAN (offline) + * RemoteNode galera-2: UNCLEAN (online) + * RemoteNode messaging-1: UNCLEAN (online) * Online: [ controller-0 controller-2 ] - * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] - * RemoteOFFLINE: [ galera-2 messaging-1 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 - * messaging-1 (ocf:pacemaker:remote): Stopped + * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 - * galera-0 (ocf:pacemaker:remote): Started controller-2 + * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 - * galera-2 (ocf:pacemaker:remote): Stopped + * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: - * Started: [ messaging-0 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] + * Started: [ messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): - * Promoted: [ galera-0 galera-1 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] + * Promoted: [ galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: redis-master [redis] (promotable): + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) + * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: + * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 + * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) diff --git a/cts/scheduler/summary/remote-recover-connection.summary b/cts/scheduler/summary/remote-recover-connection.summary index fd6900dd961..bb8828ff263 100644 --- a/cts/scheduler/summary/remote-recover-connection.summary +++ b/cts/scheduler/summary/remote-recover-connection.summary @@ -1,5 +1,11 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea + * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] @@ -14,15 +20,14 @@ Current cluster status: * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 galera-2 ] - * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: redis-master [redis] (promotable): * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) @@ -32,7 +37,6 @@ Current cluster status: * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 @@ -49,84 +53,47 @@ Transition Summary: * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: messaging-1_stop_0 - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: messaging-1 start on controller-2 - * Resource action: galera-0 start on controller-2 - * Resource action: galera-2 start on controller-2 - * Resource action: rabbitmq monitor=10000 on messaging-1 - * Resource action: galera monitor=10000 on galera-2 - * Resource action: galera monitor=10000 on galera-0 - * Pseudo action: redis_post_notify_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-pre_notify_stop_0 - * Pseudo action: redis-master_stop_0 - * Pseudo action: haproxy-clone_stop_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: messaging-1 monitor=20000 on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Resource action: galera-2 monitor=20000 on controller-2 - * Pseudo action: redis_stop_0 - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 - * Pseudo action: redis-master_post_notify_stopped_0 - * Pseudo action: ip-172.17.1.14_stop_0 - * Pseudo action: ip-172.17.1.17_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-post_notify_stopped_0 - * Resource action: ip-172.17.1.14 start on controller-2 - * Resource action: ip-172.17.1.17 start on controller-2 - * Resource action: ip-172.17.4.11 start on controller-2 - * Pseudo action: redis_notified_0 - * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 - * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 - * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] - * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 - * messaging-1 (ocf:pacemaker:remote): Started controller-2 + * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 - * galera-0 (ocf:pacemaker:remote): Started controller-2 + * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 - * galera-2 (ocf:pacemaker:remote): Started controller-2 + * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 galera-2 ] - * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: redis-master [redis] (promotable): + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) + * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: + * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 + * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) diff --git a/cts/scheduler/summary/remote-recover-fail.summary b/cts/scheduler/summary/remote-recover-fail.summary index d2399149f80..131e9b7412d 100644 --- a/cts/scheduler/summary/remote-recover-fail.summary +++ b/cts/scheduler/summary/remote-recover-fail.summary @@ -1,6 +1,8 @@ Current cluster status: + * Cluster Summary: + * Node List: - * RemoteNode rhel7-auto4: UNCLEAN (offline) + * RemoteNode rhel7-auto4: UNCLEAN (online) * Online: [ rhel7-auto2 rhel7-auto3 ] * OFFLINE: [ rhel7-auto1 ] @@ -8,11 +10,11 @@ Current cluster status: * shooter (stonith:fence_xvm): Started rhel7-auto3 * rhel7-auto4 (ocf:pacemaker:remote): FAILED rhel7-auto2 * FAKE1 (ocf:heartbeat:Dummy): Stopped - * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) + * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto4 * FAKE3 (ocf:heartbeat:Dummy): Started rhel7-auto2 * FAKE4 (ocf:heartbeat:Dummy): Started rhel7-auto3 * FAKE5 (ocf:heartbeat:Dummy): Started rhel7-auto3 - * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) + * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto4 Transition Summary: * Fence (reboot) rhel7-auto4 'FAKE2 is thought to be active there' @@ -22,33 +24,21 @@ Transition Summary: * Move FAKE6 ( rhel7-auto4 -> rhel7-auto2 ) Executing Cluster Transition: - * Resource action: FAKE3 monitor=10000 on rhel7-auto2 - * Resource action: FAKE4 monitor=10000 on rhel7-auto3 - * Fencing rhel7-auto4 (reboot) - * Resource action: FAKE1 start on rhel7-auto2 - * Pseudo action: FAKE2_stop_0 - * Pseudo action: FAKE6_stop_0 - * Resource action: rhel7-auto4 stop on rhel7-auto2 - * Resource action: FAKE1 monitor=10000 on rhel7-auto2 - * Resource action: FAKE2 start on rhel7-auto3 - * Resource action: FAKE6 start on rhel7-auto2 - * Resource action: rhel7-auto4 start on rhel7-auto2 - * Resource action: FAKE2 monitor=10000 on rhel7-auto3 - * Resource action: FAKE6 monitor=10000 on rhel7-auto2 - * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto2 Revised Cluster Status: + * Cluster Summary: + * Node List: + * RemoteNode rhel7-auto4: UNCLEAN (online) * Online: [ rhel7-auto2 rhel7-auto3 ] * OFFLINE: [ rhel7-auto1 ] - * RemoteOnline: [ rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto3 - * rhel7-auto4 (ocf:pacemaker:remote): Started rhel7-auto2 - * FAKE1 (ocf:heartbeat:Dummy): Started rhel7-auto2 - * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto3 + * rhel7-auto4 (ocf:pacemaker:remote): FAILED rhel7-auto2 + * FAKE1 (ocf:heartbeat:Dummy): Stopped + * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto4 * FAKE3 (ocf:heartbeat:Dummy): Started rhel7-auto2 * FAKE4 (ocf:heartbeat:Dummy): Started rhel7-auto3 * FAKE5 (ocf:heartbeat:Dummy): Started rhel7-auto3 - * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto2 + * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto4 diff --git a/cts/scheduler/summary/remote-recover-no-resources.summary b/cts/scheduler/summary/remote-recover-no-resources.summary index d5978be4496..ad6d4235722 100644 --- a/cts/scheduler/summary/remote-recover-no-resources.summary +++ b/cts/scheduler/summary/remote-recover-no-resources.summary @@ -1,9 +1,16 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea + * Node List: * Node controller-1: UNCLEAN (offline) + * RemoteNode messaging-1: UNCLEAN (online) * Online: [ controller-0 controller-2 ] - * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] + * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 @@ -14,15 +21,14 @@ Current cluster status: * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 galera-2 ] * Clone Set: redis-master [redis] (promotable): * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) @@ -32,7 +38,6 @@ Current cluster status: * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 @@ -51,93 +56,48 @@ Transition Summary: * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: messaging-1_stop_0 - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: rabbitmq-clone_pre_notify_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: rabbitmq notify on messaging-2 - * Resource action: rabbitmq notify on messaging-0 - * Pseudo action: rabbitmq-clone_confirmed-pre_notify_stop_0 - * Pseudo action: redis_post_notify_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-pre_notify_stop_0 - * Pseudo action: redis-master_stop_0 - * Pseudo action: haproxy-clone_stop_0 - * Fencing messaging-1 (reboot) - * Resource action: galera-0 start on controller-2 - * Pseudo action: rabbitmq_post_notify_stop_0 - * Pseudo action: rabbitmq-clone_stop_0 - * Resource action: galera monitor=10000 on galera-0 - * Pseudo action: redis_stop_0 - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Pseudo action: rabbitmq_stop_0 - * Pseudo action: rabbitmq-clone_stopped_0 - * Pseudo action: redis-master_post_notify_stopped_0 - * Pseudo action: ip-172.17.1.14_stop_0 - * Pseudo action: ip-172.17.1.17_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 - * Pseudo action: rabbitmq-clone_post_notify_stopped_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-post_notify_stopped_0 - * Resource action: ip-172.17.1.14 start on controller-2 - * Resource action: ip-172.17.1.17 start on controller-2 - * Resource action: ip-172.17.4.11 start on controller-2 - * Resource action: rabbitmq notify on messaging-2 - * Resource action: rabbitmq notify on messaging-0 - * Pseudo action: rabbitmq_notified_0 - * Pseudo action: rabbitmq-clone_confirmed-post_notify_stopped_0 - * Pseudo action: redis_notified_0 - * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 - * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 - * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node controller-1: UNCLEAN (offline) + * RemoteNode messaging-1: UNCLEAN (online) * Online: [ controller-0 controller-2 ] - * OFFLINE: [ controller-1 ] - * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] - * RemoteOFFLINE: [ galera-2 messaging-1 ] + * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 - * messaging-1 (ocf:pacemaker:remote): Stopped + * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 - * galera-0 (ocf:pacemaker:remote): Started controller-2 + * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 - * galera-2 (ocf:pacemaker:remote): Stopped + * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: - * Started: [ messaging-0 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] + * Started: [ messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 galera-2 ] * Clone Set: redis-master [redis] (promotable): + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) + * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: + * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 + * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) diff --git a/cts/scheduler/summary/remote-recover-unknown.summary b/cts/scheduler/summary/remote-recover-unknown.summary index c68915878db..015f6cd699b 100644 --- a/cts/scheduler/summary/remote-recover-unknown.summary +++ b/cts/scheduler/summary/remote-recover-unknown.summary @@ -1,9 +1,17 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea + * Node List: * Node controller-1: UNCLEAN (offline) + * RemoteNode galera-2: UNCLEAN (online) + * RemoteNode messaging-1: UNCLEAN (online) * Online: [ controller-0 controller-2 ] - * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] + * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 @@ -14,15 +22,14 @@ Current cluster status: * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 galera-2 ] * Clone Set: redis-master [redis] (promotable): * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) @@ -32,7 +39,6 @@ Current cluster status: * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 @@ -52,94 +58,49 @@ Transition Summary: * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: messaging-1_stop_0 - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: rabbitmq-clone_pre_notify_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: rabbitmq notify on messaging-2 - * Resource action: rabbitmq notify on messaging-0 - * Pseudo action: rabbitmq-clone_confirmed-pre_notify_stop_0 - * Pseudo action: redis_post_notify_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-pre_notify_stop_0 - * Pseudo action: redis-master_stop_0 - * Pseudo action: haproxy-clone_stop_0 - * Fencing galera-2 (reboot) - * Fencing messaging-1 (reboot) - * Resource action: galera-0 start on controller-2 - * Pseudo action: rabbitmq_post_notify_stop_0 - * Pseudo action: rabbitmq-clone_stop_0 - * Resource action: galera monitor=10000 on galera-0 - * Pseudo action: redis_stop_0 - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Pseudo action: rabbitmq_stop_0 - * Pseudo action: rabbitmq-clone_stopped_0 - * Pseudo action: redis-master_post_notify_stopped_0 - * Pseudo action: ip-172.17.1.14_stop_0 - * Pseudo action: ip-172.17.1.17_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 - * Pseudo action: rabbitmq-clone_post_notify_stopped_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-post_notify_stopped_0 - * Resource action: ip-172.17.1.14 start on controller-2 - * Resource action: ip-172.17.1.17 start on controller-2 - * Resource action: ip-172.17.4.11 start on controller-2 - * Resource action: rabbitmq notify on messaging-2 - * Resource action: rabbitmq notify on messaging-0 - * Pseudo action: rabbitmq_notified_0 - * Pseudo action: rabbitmq-clone_confirmed-post_notify_stopped_0 - * Pseudo action: redis_notified_0 - * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 - * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 - * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node controller-1: UNCLEAN (offline) + * RemoteNode galera-2: UNCLEAN (online) + * RemoteNode messaging-1: UNCLEAN (online) * Online: [ controller-0 controller-2 ] - * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] - * RemoteOFFLINE: [ galera-2 messaging-1 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 - * messaging-1 (ocf:pacemaker:remote): Stopped + * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 - * galera-0 (ocf:pacemaker:remote): Started controller-2 + * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 - * galera-2 (ocf:pacemaker:remote): Stopped + * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: - * Started: [ messaging-0 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] + * Started: [ messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 galera-2 ] * Clone Set: redis-master [redis] (promotable): + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) + * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: + * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 + * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) diff --git a/cts/scheduler/summary/remote-recover.summary b/cts/scheduler/summary/remote-recover.summary index 3d956c2fd4f..97d907f2e87 100644 --- a/cts/scheduler/summary/remote-recover.summary +++ b/cts/scheduler/summary/remote-recover.summary @@ -1,27 +1,26 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node rhel7-alt2: standby - * RemoteNode rhel7-alt4: UNCLEAN (offline) * Online: [ rhel7-alt1 ] * OFFLINE: [ rhel7-alt3 ] + * RemoteOnline: [ rhel7-alt4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Stopped * rhel7-alt4 (ocf:pacemaker:remote): Stopped - * fake (ocf:heartbeat:Dummy): Started rhel7-alt4 (UNCLEAN) + * fake (ocf:heartbeat:Dummy): Started rhel7-alt4 Transition Summary: * Start shooter ( rhel7-alt1 ) * Start rhel7-alt4 ( rhel7-alt1 ) Executing Cluster Transition: - * Resource action: shooter start on rhel7-alt1 - * Resource action: rhel7-alt4 start on rhel7-alt1 - * Resource action: fake monitor=10000 on rhel7-alt4 - * Resource action: shooter monitor=60000 on rhel7-alt1 - * Resource action: rhel7-alt4 monitor=60000 on rhel7-alt1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node rhel7-alt2: standby * Online: [ rhel7-alt1 ] @@ -29,6 +28,6 @@ Revised Cluster Status: * RemoteOnline: [ rhel7-alt4 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started rhel7-alt1 - * rhel7-alt4 (ocf:pacemaker:remote): Started rhel7-alt1 + * shooter (stonith:fence_xvm): Stopped + * rhel7-alt4 (ocf:pacemaker:remote): Stopped * fake (ocf:heartbeat:Dummy): Started rhel7-alt4 diff --git a/cts/scheduler/summary/remote-recovery.summary b/cts/scheduler/summary/remote-recovery.summary index fd6900dd961..bb8828ff263 100644 --- a/cts/scheduler/summary/remote-recovery.summary +++ b/cts/scheduler/summary/remote-recovery.summary @@ -1,5 +1,11 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: + * Cluster Summary: +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;124:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400bbf613 on controller-0 changed: 0:0;129:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 0s-interval start for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;126:17:0:e47e0f5b-bac4-432e-9993-f38bc43128ea +Only 'private' parameters to 60s-interval monitor for stonith-fence_ipmilan-525400b4f6bd on controller-0 changed: 0:0;132:18:0:e47e0f5b-bac4-432e-9993-f38bc43128ea + * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] @@ -14,15 +20,14 @@ Current cluster status: * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 galera-2 ] - * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: redis-master [redis] (promotable): * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) @@ -32,7 +37,6 @@ Current cluster status: * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 @@ -49,84 +53,47 @@ Transition Summary: * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: messaging-1_stop_0 - * Pseudo action: galera-0_stop_0 - * Pseudo action: galera-2_stop_0 - * Pseudo action: redis-master_pre_notify_stop_0 - * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 - * Fencing controller-1 (reboot) - * Resource action: messaging-1 start on controller-2 - * Resource action: galera-0 start on controller-2 - * Resource action: galera-2 start on controller-2 - * Resource action: rabbitmq monitor=10000 on messaging-1 - * Resource action: galera monitor=10000 on galera-2 - * Resource action: galera monitor=10000 on galera-0 - * Pseudo action: redis_post_notify_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-pre_notify_stop_0 - * Pseudo action: redis-master_stop_0 - * Pseudo action: haproxy-clone_stop_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 - * Resource action: messaging-1 monitor=20000 on controller-2 - * Resource action: galera-0 monitor=20000 on controller-2 - * Resource action: galera-2 monitor=20000 on controller-2 - * Pseudo action: redis_stop_0 - * Pseudo action: redis-master_stopped_0 - * Pseudo action: haproxy_stop_0 - * Pseudo action: haproxy-clone_stopped_0 - * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 - * Pseudo action: redis-master_post_notify_stopped_0 - * Pseudo action: ip-172.17.1.14_stop_0 - * Pseudo action: ip-172.17.1.17_stop_0 - * Pseudo action: ip-172.17.4.11_stop_0 - * Resource action: redis notify on controller-0 - * Resource action: redis notify on controller-2 - * Pseudo action: redis-master_confirmed-post_notify_stopped_0 - * Resource action: ip-172.17.1.14 start on controller-2 - * Resource action: ip-172.17.1.17 start on controller-2 - * Resource action: ip-172.17.4.11 start on controller-2 - * Pseudo action: redis_notified_0 - * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 - * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 - * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] - * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 - * messaging-1 (ocf:pacemaker:remote): Started controller-2 + * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 - * galera-0 (ocf:pacemaker:remote): Started controller-2 + * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 - * galera-2 (ocf:pacemaker:remote): Started controller-2 + * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] - * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] + * Stopped: [ controller-1 ] * Clone Set: galera-master [galera] (promotable): * Promoted: [ galera-0 galera-1 galera-2 ] - * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] + * Stopped: [ controller-1 ] * Clone Set: redis-master [redis] (promotable): + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) * Promoted: [ controller-0 ] * Unpromoted: [ controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) + * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 - * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 + * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: + * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] - * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 - * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 + * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) diff --git a/cts/scheduler/summary/remote-stale-node-entry.summary b/cts/scheduler/summary/remote-stale-node-entry.summary index 77cffc965e1..e761bb0d4e7 100644 --- a/cts/scheduler/summary/remote-stale-node-entry.summary +++ b/cts/scheduler/summary/remote-stale-node-entry.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-node1 rhel7-node2 rhel7-node3 ] * RemoteOFFLINE: [ remote1 ] @@ -32,77 +34,23 @@ Transition Summary: * Start ping-1:2 ( rhel7-node3 ) Executing Cluster Transition: - * Resource action: Fencing monitor on rhel7-node3 - * Resource action: Fencing monitor on rhel7-node2 - * Resource action: Fencing monitor on rhel7-node1 - * Resource action: FencingPass monitor on rhel7-node3 - * Resource action: FencingPass monitor on rhel7-node2 - * Resource action: FencingPass monitor on rhel7-node1 - * Resource action: rsc_rhel7-node1 monitor on rhel7-node3 - * Resource action: rsc_rhel7-node1 monitor on rhel7-node2 - * Resource action: rsc_rhel7-node1 monitor on rhel7-node1 - * Resource action: rsc_rhel7-node2 monitor on rhel7-node3 - * Resource action: rsc_rhel7-node2 monitor on rhel7-node2 - * Resource action: rsc_rhel7-node2 monitor on rhel7-node1 - * Resource action: rsc_rhel7-node3 monitor on rhel7-node3 - * Resource action: rsc_rhel7-node3 monitor on rhel7-node2 - * Resource action: rsc_rhel7-node3 monitor on rhel7-node1 - * Resource action: migrator monitor on rhel7-node3 - * Resource action: migrator monitor on rhel7-node2 - * Resource action: migrator monitor on rhel7-node1 - * Resource action: ping-1:0 monitor on rhel7-node1 - * Resource action: ping-1:1 monitor on rhel7-node2 - * Resource action: ping-1:2 monitor on rhel7-node3 - * Pseudo action: Connectivity_start_0 - * Resource action: stateful-1:0 monitor on rhel7-node3 - * Resource action: stateful-1:0 monitor on rhel7-node2 - * Resource action: stateful-1:0 monitor on rhel7-node1 - * Resource action: r192.168.122.204 monitor on rhel7-node3 - * Resource action: r192.168.122.204 monitor on rhel7-node2 - * Resource action: r192.168.122.204 monitor on rhel7-node1 - * Resource action: r192.168.122.205 monitor on rhel7-node3 - * Resource action: r192.168.122.205 monitor on rhel7-node2 - * Resource action: r192.168.122.205 monitor on rhel7-node1 - * Resource action: r192.168.122.206 monitor on rhel7-node3 - * Resource action: r192.168.122.206 monitor on rhel7-node2 - * Resource action: r192.168.122.206 monitor on rhel7-node1 - * Resource action: lsb-dummy monitor on rhel7-node3 - * Resource action: lsb-dummy monitor on rhel7-node2 - * Resource action: lsb-dummy monitor on rhel7-node1 - * Resource action: Fencing start on rhel7-node1 - * Resource action: FencingPass start on rhel7-node2 - * Resource action: rsc_rhel7-node1 start on rhel7-node1 - * Resource action: rsc_rhel7-node2 start on rhel7-node2 - * Resource action: rsc_rhel7-node3 start on rhel7-node3 - * Resource action: migrator start on rhel7-node3 - * Resource action: ping-1:0 start on rhel7-node1 - * Resource action: ping-1:1 start on rhel7-node2 - * Resource action: ping-1:2 start on rhel7-node3 - * Pseudo action: Connectivity_running_0 - * Resource action: Fencing monitor=120000 on rhel7-node1 - * Resource action: rsc_rhel7-node1 monitor=5000 on rhel7-node1 - * Resource action: rsc_rhel7-node2 monitor=5000 on rhel7-node2 - * Resource action: rsc_rhel7-node3 monitor=5000 on rhel7-node3 - * Resource action: migrator monitor=10000 on rhel7-node3 - * Resource action: ping-1:0 monitor=60000 on rhel7-node1 - * Resource action: ping-1:1 monitor=60000 on rhel7-node2 - * Resource action: ping-1:2 monitor=60000 on rhel7-node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-node1 rhel7-node2 rhel7-node3 ] * RemoteOFFLINE: [ remote1 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started rhel7-node1 - * FencingPass (stonith:fence_dummy): Started rhel7-node2 - * rsc_rhel7-node1 (ocf:heartbeat:IPaddr2): Started rhel7-node1 - * rsc_rhel7-node2 (ocf:heartbeat:IPaddr2): Started rhel7-node2 - * rsc_rhel7-node3 (ocf:heartbeat:IPaddr2): Started rhel7-node3 - * migrator (ocf:pacemaker:Dummy): Started rhel7-node3 + * Fencing (stonith:fence_xvm): Stopped + * FencingPass (stonith:fence_dummy): Stopped + * rsc_rhel7-node1 (ocf:heartbeat:IPaddr2): Stopped + * rsc_rhel7-node2 (ocf:heartbeat:IPaddr2): Stopped + * rsc_rhel7-node3 (ocf:heartbeat:IPaddr2): Stopped + * migrator (ocf:pacemaker:Dummy): Stopped * Clone Set: Connectivity [ping-1]: - * Started: [ rhel7-node1 rhel7-node2 rhel7-node3 ] - * Stopped: [ remote1 ] + * Stopped: [ remote1 rhel7-node1 rhel7-node2 rhel7-node3 ] * Clone Set: master-1 [stateful-1] (promotable): * Stopped: [ remote1 rhel7-node1 rhel7-node2 rhel7-node3 ] * Resource Group: group-1: diff --git a/cts/scheduler/summary/remote-start-fail.summary b/cts/scheduler/summary/remote-start-fail.summary index cf83c04e896..9a6edf66867 100644 --- a/cts/scheduler/summary/remote-start-fail.summary +++ b/cts/scheduler/summary/remote-start-fail.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * RemoteOFFLINE: [ rhel7-auto4 ] + * RemoteOnline: [ rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 @@ -11,15 +13,14 @@ Transition Summary: * Recover rhel7-auto4 ( rhel7-auto2 -> rhel7-auto3 ) Executing Cluster Transition: - * Resource action: rhel7-auto4 stop on rhel7-auto2 - * Resource action: rhel7-auto4 start on rhel7-auto3 - * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * RemoteOnline: [ rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 - * rhel7-auto4 (ocf:pacemaker:remote): Started rhel7-auto3 + * rhel7-auto4 (ocf:pacemaker:remote): FAILED rhel7-auto2 diff --git a/cts/scheduler/summary/remote-startup-probes.summary b/cts/scheduler/summary/remote-startup-probes.summary index b49f5dba9ee..9db785ccace 100644 --- a/cts/scheduler/summary/remote-startup-probes.summary +++ b/cts/scheduler/summary/remote-startup-probes.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] - * RemoteOFFLINE: [ remote1 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node1 @@ -17,28 +19,18 @@ Transition Summary: * Move FAKE2 ( 18node2 -> remote1 ) Executing Cluster Transition: - * Resource action: remote1 start on 18builder - * Resource action: FAKE1 stop on 18builder - * Resource action: FAKE1 monitor on remote1 - * Resource action: FAKE2 stop on 18node2 - * Resource action: FAKE2 monitor on remote1 - * Resource action: FAKE3 monitor on remote1 - * Resource action: FAKE4 monitor on remote1 - * Resource action: remote1 monitor=60000 on 18builder - * Resource action: FAKE1 start on 18node2 - * Resource action: FAKE2 start on remote1 - * Resource action: FAKE1 monitor=60000 on 18node2 - * Resource action: FAKE2 monitor=60000 on remote1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node1 - * remote1 (ocf:pacemaker:remote): Started 18builder - * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 - * FAKE2 (ocf:heartbeat:Dummy): Started remote1 + * remote1 (ocf:pacemaker:remote): Stopped + * FAKE1 (ocf:heartbeat:Dummy): Started 18builder + * FAKE2 (ocf:heartbeat:Dummy): Started 18node2 * FAKE3 (ocf:heartbeat:Dummy): Started 18builder * FAKE4 (ocf:heartbeat:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/remote-startup.summary b/cts/scheduler/summary/remote-startup.summary index 00bb311705c..a9b2068bff1 100644 --- a/cts/scheduler/summary/remote-startup.summary +++ b/cts/scheduler/summary/remote-startup.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * RemoteNode remote1: UNCLEAN (offline) * Online: [ 18builder 18node1 18node2 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18builder @@ -14,26 +16,15 @@ Transition Summary: * Start remote1 ( 18builder ) Executing Cluster Transition: - * Resource action: shooter stop on 18builder - * Resource action: fake monitor on 18node2 - * Resource action: fake monitor on 18node1 - * Resource action: fake monitor on 18builder - * Resource action: remote1 monitor on 18node2 - * Resource action: remote1 monitor on 18node1 - * Resource action: remote1 monitor on 18builder - * Resource action: shooter start on 18node1 - * Resource action: remote1 start on 18builder - * Resource action: shooter monitor=60000 on 18node1 - * Resource action: fake monitor on remote1 - * Resource action: remote1 monitor=60000 on 18builder - * Resource action: fake start on 18node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder 18node1 18node2 ] * RemoteOnline: [ remote1 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started 18node1 - * fake (ocf:pacemaker:Dummy): Started 18node2 - * remote1 (ocf:pacemaker:remote): Started 18builder + * shooter (stonith:fence_xvm): Started 18builder + * fake (ocf:pacemaker:Dummy): Stopped + * remote1 (ocf:pacemaker:remote): Stopped diff --git a/cts/scheduler/summary/remote-unclean2.summary b/cts/scheduler/summary/remote-unclean2.summary index 3ad98b98abe..6b2838fa7dc 100644 --- a/cts/scheduler/summary/remote-unclean2.summary +++ b/cts/scheduler/summary/remote-unclean2.summary @@ -1,6 +1,8 @@ Current cluster status: + * Cluster Summary: + * Node List: - * RemoteNode rhel7-auto4: UNCLEAN (offline) + * RemoteNode rhel7-auto4: UNCLEAN (online) * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Full List of Resources: @@ -12,16 +14,14 @@ Transition Summary: * Recover rhel7-auto4 ( rhel7-auto1 ) Executing Cluster Transition: - * Resource action: rhel7-auto4 stop on rhel7-auto1 - * Fencing rhel7-auto4 (reboot) - * Resource action: rhel7-auto4 start on rhel7-auto1 - * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto1 Revised Cluster Status: + * Cluster Summary: + * Node List: + * RemoteNode rhel7-auto4: UNCLEAN (online) * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * RemoteOnline: [ rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto2 - * rhel7-auto4 (ocf:pacemaker:remote): Started rhel7-auto1 + * rhel7-auto4 (ocf:pacemaker:remote): FAILED rhel7-auto1 diff --git a/cts/scheduler/summary/reprobe-target_rc.summary b/cts/scheduler/summary/reprobe-target_rc.summary index 7902ce82a46..7b54575d818 100644 --- a/cts/scheduler/summary/reprobe-target_rc.summary +++ b/cts/scheduler/summary/reprobe-target_rc.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node-0 node-1 ] @@ -29,6 +31,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node-0 node-1 ] diff --git a/cts/scheduler/summary/resource-discovery.summary b/cts/scheduler/summary/resource-discovery.summary index 2d6ab7c0595..e5e9312265e 100644 --- a/cts/scheduler/summary/resource-discovery.summary +++ b/cts/scheduler/summary/resource-discovery.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 18node4 ] - * RemoteOFFLINE: [ remote1 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Stopped @@ -12,7 +14,7 @@ Current cluster status: * FAKE4 (ocf:heartbeat:Dummy): Stopped * FAKE5 (ocf:heartbeat:Dummy): Stopped * Clone Set: FAKECLONE1-clone [FAKECLONE1]: - * Stopped: [ 18node1 18node2 18node3 18node4 remote1 ] + * Stopped: [ 18node1 remote1 ] * Clone Set: FAKECLONE2-clone [FAKECLONE2]: * Stopped: [ 18node1 18node2 18node3 18node4 remote1 ] * Resource Group: FAKEGROUP: @@ -38,91 +40,26 @@ Transition Summary: * Start FAKE7 ( 18node1 ) Executing Cluster Transition: - * Resource action: shooter monitor on 18node4 - * Resource action: shooter monitor on 18node3 - * Resource action: shooter monitor on 18node2 - * Resource action: shooter monitor on 18node1 - * Resource action: remote1 monitor on 18node4 - * Resource action: remote1 monitor on 18node3 - * Resource action: remote1 monitor on 18node2 - * Resource action: remote1 monitor on 18node1 - * Resource action: FAKE1 monitor on 18node4 - * Resource action: FAKE2 monitor on 18node2 - * Resource action: FAKE2 monitor on 18node1 - * Resource action: FAKE3 monitor on 18node3 - * Resource action: FAKE4 monitor on 18node4 - * Resource action: FAKE5 monitor on 18node4 - * Resource action: FAKE5 monitor on 18node3 - * Resource action: FAKE5 monitor on 18node2 - * Resource action: FAKE5 monitor on 18node1 - * Resource action: FAKECLONE1:0 monitor on 18node1 - * Resource action: FAKECLONE2:0 monitor on 18node3 - * Resource action: FAKECLONE2:1 monitor on 18node1 - * Resource action: FAKECLONE2:3 monitor on 18node4 - * Pseudo action: FAKEGROUP_start_0 - * Resource action: FAKE6 monitor on 18node2 - * Resource action: FAKE6 monitor on 18node1 - * Resource action: FAKE7 monitor on 18node2 - * Resource action: FAKE7 monitor on 18node1 - * Resource action: shooter start on 18node2 - * Resource action: remote1 start on 18node1 - * Resource action: FAKE1 start on 18node4 - * Resource action: FAKE2 start on 18node2 - * Resource action: FAKE3 start on 18node3 - * Resource action: FAKE4 start on 18node4 - * Resource action: FAKE5 monitor on remote1 - * Resource action: FAKECLONE1:1 monitor on remote1 - * Pseudo action: FAKECLONE1-clone_start_0 - * Resource action: FAKECLONE2:4 monitor on remote1 - * Pseudo action: FAKECLONE2-clone_start_0 - * Resource action: FAKE6 start on 18node1 - * Resource action: FAKE7 start on 18node1 - * Resource action: shooter monitor=60000 on 18node2 - * Resource action: remote1 monitor=60000 on 18node1 - * Resource action: FAKE1 monitor=60000 on 18node4 - * Resource action: FAKE2 monitor=60000 on 18node2 - * Resource action: FAKE3 monitor=60000 on 18node3 - * Resource action: FAKE4 monitor=60000 on 18node4 - * Resource action: FAKE5 start on remote1 - * Resource action: FAKECLONE1:0 start on 18node1 - * Resource action: FAKECLONE1:1 start on remote1 - * Pseudo action: FAKECLONE1-clone_running_0 - * Resource action: FAKECLONE2:0 start on 18node3 - * Resource action: FAKECLONE2:1 start on 18node1 - * Resource action: FAKECLONE2:2 start on 18node2 - * Resource action: FAKECLONE2:3 start on 18node4 - * Resource action: FAKECLONE2:4 start on remote1 - * Pseudo action: FAKECLONE2-clone_running_0 - * Pseudo action: FAKEGROUP_running_0 - * Resource action: FAKE6 monitor=10000 on 18node1 - * Resource action: FAKE7 monitor=10000 on 18node1 - * Resource action: FAKE5 monitor=60000 on remote1 - * Resource action: FAKECLONE1:0 monitor=60000 on 18node1 - * Resource action: FAKECLONE1:1 monitor=60000 on remote1 - * Resource action: FAKECLONE2:0 monitor=60000 on 18node3 - * Resource action: FAKECLONE2:1 monitor=60000 on 18node1 - * Resource action: FAKECLONE2:2 monitor=60000 on 18node2 - * Resource action: FAKECLONE2:3 monitor=60000 on 18node4 - * Resource action: FAKECLONE2:4 monitor=60000 on remote1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 18node4 ] * RemoteOnline: [ remote1 ] * Full List of Resources: - * shooter (stonith:fence_xvm): Started 18node2 - * remote1 (ocf:pacemaker:remote): Started 18node1 - * FAKE1 (ocf:heartbeat:Dummy): Started 18node4 - * FAKE2 (ocf:heartbeat:Dummy): Started 18node2 - * FAKE3 (ocf:heartbeat:Dummy): Started 18node3 - * FAKE4 (ocf:heartbeat:Dummy): Started 18node4 - * FAKE5 (ocf:heartbeat:Dummy): Started remote1 + * shooter (stonith:fence_xvm): Stopped + * remote1 (ocf:pacemaker:remote): Stopped + * FAKE1 (ocf:heartbeat:Dummy): Stopped + * FAKE2 (ocf:heartbeat:Dummy): Stopped + * FAKE3 (ocf:heartbeat:Dummy): Stopped + * FAKE4 (ocf:heartbeat:Dummy): Stopped + * FAKE5 (ocf:heartbeat:Dummy): Stopped * Clone Set: FAKECLONE1-clone [FAKECLONE1]: - * Started: [ 18node1 remote1 ] - * Stopped: [ 18node2 18node3 18node4 ] + * Stopped: [ 18node1 remote1 ] * Clone Set: FAKECLONE2-clone [FAKECLONE2]: - * Started: [ 18node1 18node2 18node3 18node4 remote1 ] + * Stopped: [ 18node1 18node2 18node3 18node4 remote1 ] * Resource Group: FAKEGROUP: - * FAKE6 (ocf:heartbeat:Dummy): Started 18node1 - * FAKE7 (ocf:heartbeat:Dummy): Started 18node1 + * FAKE6 (ocf:heartbeat:Dummy): Stopped + * FAKE7 (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/restart-with-extra-op-params.summary b/cts/scheduler/summary/restart-with-extra-op-params.summary index d80e0d9d872..00c8aed00de 100644 --- a/cts/scheduler/summary/restart-with-extra-op-params.summary +++ b/cts/scheduler/summary/restart-with-extra-op-params.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2021-03-31 14:58:18Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,11 @@ Transition Summary: * Restart dummy1 ( node2 ) due to resource definition change Executing Cluster Transition: - * Resource action: dummy1 stop on node2 - * Resource action: dummy1 start on node2 - * Resource action: dummy1 monitor=10000 on node2 Using the original execution date of: 2021-03-31 14:58:18Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/route-remote-notify.summary b/cts/scheduler/summary/route-remote-notify.summary index fb55346f35a..9721166fe5d 100644 --- a/cts/scheduler/summary/route-remote-notify.summary +++ b/cts/scheduler/summary/route-remote-notify.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2018-10-31 11:51:32Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ controller-0 controller-1 controller-2 ] * GuestOnline: [ rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 ] @@ -21,78 +23,3 @@ Current cluster status: * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0 - -Transition Summary: - * Stop rabbitmq-bundle-docker-0 ( controller-0 ) due to node availability - * Stop rabbitmq-bundle-0 ( controller-0 ) due to unrunnable rabbitmq-bundle-docker-0 start - * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start - * Move ip-192.168.24.12 ( controller-0 -> controller-2 ) - * Move ip-172.17.1.11 ( controller-0 -> controller-1 ) - * Stop haproxy-bundle-docker-0 ( controller-0 ) due to node availability - * Move openstack-cinder-volume-docker-0 ( controller-0 -> controller-2 ) - -Executing Cluster Transition: - * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 - * Pseudo action: openstack-cinder-volume_stop_0 - * Pseudo action: openstack-cinder-volume_start_0 - * Pseudo action: haproxy-bundle_stop_0 - * Pseudo action: rabbitmq-bundle_stop_0 - * Resource action: rabbitmq notify on rabbitmq-bundle-0 - * Resource action: rabbitmq notify on rabbitmq-bundle-1 - * Resource action: rabbitmq notify on rabbitmq-bundle-2 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 - * Pseudo action: rabbitmq-bundle-clone_stop_0 - * Resource action: haproxy-bundle-docker-0 stop on controller-0 - * Resource action: openstack-cinder-volume-docker-0 stop on controller-0 - * Pseudo action: openstack-cinder-volume_stopped_0 - * Pseudo action: haproxy-bundle_stopped_0 - * Resource action: rabbitmq stop on rabbitmq-bundle-0 - * Pseudo action: rabbitmq-bundle-clone_stopped_0 - * Resource action: rabbitmq-bundle-0 stop on controller-0 - * Resource action: ip-192.168.24.12 stop on controller-0 - * Resource action: ip-172.17.1.11 stop on controller-0 - * Resource action: openstack-cinder-volume-docker-0 start on controller-2 - * Pseudo action: openstack-cinder-volume_running_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 - * Resource action: rabbitmq-bundle-docker-0 stop on controller-0 - * Resource action: ip-192.168.24.12 start on controller-2 - * Resource action: ip-172.17.1.11 start on controller-1 - * Resource action: openstack-cinder-volume-docker-0 monitor=60000 on controller-2 - * Cluster action: do_shutdown on controller-0 - * Resource action: rabbitmq notify on rabbitmq-bundle-1 - * Resource action: rabbitmq notify on rabbitmq-bundle-2 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: ip-192.168.24.12 monitor=10000 on controller-2 - * Resource action: ip-172.17.1.11 monitor=10000 on controller-1 - * Pseudo action: rabbitmq-bundle_stopped_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 - * Pseudo action: rabbitmq-bundle-clone_start_0 - * Pseudo action: rabbitmq-bundle-clone_running_0 - * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Pseudo action: rabbitmq-bundle_running_0 -Using the original execution date of: 2018-10-31 11:51:32Z - -Revised Cluster Status: - * Node List: - * Online: [ controller-0 controller-1 controller-2 ] - * GuestOnline: [ rabbitmq-bundle-1 rabbitmq-bundle-2 ] - - * Full List of Resources: - * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: - * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped - * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 - * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 - * ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-10.0.0.101 (ocf:heartbeat:IPaddr2): Started controller-1 - * ip-172.17.1.20 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-1 - * ip-172.17.3.16 (ocf:heartbeat:IPaddr2): Started controller-1 - * ip-172.17.4.15 (ocf:heartbeat:IPaddr2): Started controller-2 - * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: - * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped - * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 - * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 - * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: - * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-2 diff --git a/cts/scheduler/summary/rsc-defaults-2.summary b/cts/scheduler/summary/rsc-defaults-2.summary index b363fe82952..c618a1552d5 100644 --- a/cts/scheduler/summary/rsc-defaults-2.summary +++ b/cts/scheduler/summary/rsc-defaults-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -11,19 +13,14 @@ Transition Summary: * Start fencing ( cluster01 ) Executing Cluster Transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: - * fencing (stonith:fence_xvm): Started cluster01 + * fencing (stonith:fence_xvm): Stopped * dummy-rsc (ocf:pacemaker:Dummy): Stopped (unmanaged) * ping-rsc-ping (ocf:pacemaker:ping): Stopped (unmanaged) diff --git a/cts/scheduler/summary/rsc-defaults.summary b/cts/scheduler/summary/rsc-defaults.summary index c3657e7459b..9902d1b748b 100644 --- a/cts/scheduler/summary/rsc-defaults.summary +++ b/cts/scheduler/summary/rsc-defaults.summary @@ -1,6 +1,8 @@ 2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -16,26 +18,16 @@ Transition Summary: * Start ping-rsc-ping ( cluster02 ) Executing Cluster Transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: ip-rsc monitor on cluster02 - * Resource action: ip-rsc monitor on cluster01 - * Resource action: ip-rsc2 monitor on cluster02 - * Resource action: ip-rsc2 monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: ping-rsc-ping start on cluster02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] * Full List of Resources: - * fencing (stonith:fence_xvm): Started cluster01 + * fencing (stonith:fence_xvm): Stopped * ip-rsc (ocf:heartbeat:IPaddr2): Stopped (disabled) * ip-rsc2 (ocf:heartbeat:IPaddr2): Stopped (disabled) * dummy-rsc (ocf:pacemaker:Dummy): Stopped (unmanaged) - * ping-rsc-ping (ocf:pacemaker:ping): Started cluster02 + * ping-rsc-ping (ocf:pacemaker:ping): Stopped diff --git a/cts/scheduler/summary/rsc-discovery-per-node.summary b/cts/scheduler/summary/rsc-discovery-per-node.summary index 3c34ced4ff1..1b8f2724729 100644 --- a/cts/scheduler/summary/rsc-discovery-per-node.summary +++ b/cts/scheduler/summary/rsc-discovery-per-node.summary @@ -1,7 +1,10 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18builder 18node1 18node2 18node3 18node4 ] - * RemoteOFFLINE: [ remote1 ] + * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node1 @@ -37,94 +40,24 @@ Transition Summary: * Start FAKECLONE2:5 ( 18builder ) Executing Cluster Transition: - * Resource action: shooter monitor on 18node4 - * Resource action: shooter monitor on 18node3 - * Resource action: remote1 monitor on 18node4 - * Resource action: remote1 monitor on 18node3 - * Resource action: FAKE1 monitor on 18node4 - * Resource action: FAKE1 monitor on 18node3 - * Resource action: FAKE1 monitor on 18node2 - * Resource action: FAKE1 monitor on 18node1 - * Resource action: FAKE1 monitor on 18builder - * Resource action: FAKE2 stop on 18node2 - * Resource action: FAKE2 monitor on 18node4 - * Resource action: FAKE2 monitor on 18node3 - * Resource action: FAKE3 stop on 18builder - * Resource action: FAKE3 monitor on 18node4 - * Resource action: FAKE3 monitor on 18node3 - * Resource action: FAKE4 monitor on 18node4 - * Resource action: FAKE4 monitor on 18node3 - * Resource action: FAKE5 monitor on 18node4 - * Resource action: FAKE5 monitor on 18node3 - * Resource action: FAKE5 monitor on 18node2 - * Resource action: FAKE5 monitor on 18node1 - * Resource action: FAKE5 monitor on 18builder - * Resource action: FAKECLONE1:0 monitor on 18node1 - * Resource action: FAKECLONE1:1 monitor on 18node2 - * Resource action: FAKECLONE1:2 monitor on 18node3 - * Resource action: FAKECLONE1:3 monitor on 18node4 - * Resource action: FAKECLONE1:5 monitor on 18builder - * Pseudo action: FAKECLONE1-clone_start_0 - * Resource action: FAKECLONE2:0 monitor on 18node1 - * Resource action: FAKECLONE2:1 monitor on 18node2 - * Resource action: FAKECLONE2:2 monitor on 18node3 - * Resource action: FAKECLONE2:3 monitor on 18node4 - * Resource action: FAKECLONE2:5 monitor on 18builder - * Pseudo action: FAKECLONE2-clone_start_0 - * Resource action: remote1 start on 18builder - * Resource action: FAKE1 start on 18node2 - * Resource action: FAKE2 start on 18node3 - * Resource action: FAKE3 start on 18node4 - * Resource action: FAKE4 stop on 18node1 - * Resource action: FAKE5 start on 18builder - * Resource action: FAKECLONE1:0 start on 18node1 - * Resource action: FAKECLONE1:1 start on 18node2 - * Resource action: FAKECLONE1:2 start on 18node3 - * Resource action: FAKECLONE1:3 start on 18node4 - * Resource action: FAKECLONE1:4 start on remote1 - * Resource action: FAKECLONE1:5 start on 18builder - * Pseudo action: FAKECLONE1-clone_running_0 - * Resource action: FAKECLONE2:0 start on 18node1 - * Resource action: FAKECLONE2:1 start on 18node2 - * Resource action: FAKECLONE2:2 start on 18node3 - * Resource action: FAKECLONE2:3 start on 18node4 - * Resource action: FAKECLONE2:4 start on remote1 - * Resource action: FAKECLONE2:5 start on 18builder - * Pseudo action: FAKECLONE2-clone_running_0 - * Resource action: remote1 monitor=60000 on 18builder - * Resource action: FAKE1 monitor=60000 on 18node2 - * Resource action: FAKE2 monitor=60000 on 18node3 - * Resource action: FAKE3 monitor=60000 on 18node4 - * Resource action: FAKE4 start on remote1 - * Resource action: FAKE5 monitor=60000 on 18builder - * Resource action: FAKECLONE1:0 monitor=60000 on 18node1 - * Resource action: FAKECLONE1:1 monitor=60000 on 18node2 - * Resource action: FAKECLONE1:2 monitor=60000 on 18node3 - * Resource action: FAKECLONE1:3 monitor=60000 on 18node4 - * Resource action: FAKECLONE1:4 monitor=60000 on remote1 - * Resource action: FAKECLONE1:5 monitor=60000 on 18builder - * Resource action: FAKECLONE2:0 monitor=60000 on 18node1 - * Resource action: FAKECLONE2:1 monitor=60000 on 18node2 - * Resource action: FAKECLONE2:2 monitor=60000 on 18node3 - * Resource action: FAKECLONE2:3 monitor=60000 on 18node4 - * Resource action: FAKECLONE2:4 monitor=60000 on remote1 - * Resource action: FAKECLONE2:5 monitor=60000 on 18builder - * Resource action: FAKE4 monitor=60000 on remote1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18builder 18node1 18node2 18node3 18node4 ] * RemoteOnline: [ remote1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node1 - * remote1 (ocf:pacemaker:remote): Started 18builder - * FAKE1 (ocf:heartbeat:Dummy): Started 18node2 - * FAKE2 (ocf:heartbeat:Dummy): Started 18node3 - * FAKE3 (ocf:heartbeat:Dummy): Started 18node4 - * FAKE4 (ocf:heartbeat:Dummy): Started remote1 - * FAKE5 (ocf:heartbeat:Dummy): Started 18builder + * remote1 (ocf:pacemaker:remote): Stopped + * FAKE1 (ocf:heartbeat:Dummy): Stopped + * FAKE2 (ocf:heartbeat:Dummy): Started 18node2 + * FAKE3 (ocf:heartbeat:Dummy): Started 18builder + * FAKE4 (ocf:heartbeat:Dummy): Started 18node1 + * FAKE5 (ocf:heartbeat:Dummy): Stopped * Clone Set: FAKECLONE1-clone [FAKECLONE1]: - * Started: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] + * Stopped: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] * Clone Set: FAKECLONE2-clone [FAKECLONE2]: - * Started: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] + * Stopped: [ 18builder 18node1 18node2 18node3 18node4 remote1 ] diff --git a/cts/scheduler/summary/rsc-maintenance.summary b/cts/scheduler/summary/rsc-maintenance.summary index 0525d8cd649..10c8b81d3a4 100644 --- a/cts/scheduler/summary/rsc-maintenance.summary +++ b/cts/scheduler/summary/rsc-maintenance.summary @@ -1,6 +1,8 @@ 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,10 +17,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 cancel=10000 on node1 - * Resource action: rsc2 cancel=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/rsc-sets-clone-1.summary b/cts/scheduler/summary/rsc-sets-clone-1.summary index 9f57a8fd66c..21efbd03fcc 100644 --- a/cts/scheduler/summary/rsc-sets-clone-1.summary +++ b/cts/scheduler/summary/rsc-sets-clone-1.summary @@ -1,6 +1,8 @@ 5 of 24 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ sys2 sys3 ] @@ -9,78 +11,10 @@ Current cluster status: * vm2 (ocf:heartbeat:Xen): Stopped (disabled) * vm3 (ocf:heartbeat:Xen): Stopped (disabled) * vm4 (ocf:heartbeat:Xen): Stopped (disabled) - * stonithsys2 (stonith:external/ipmi): Stopped * stonithsys3 (stonith:external/ipmi): Started sys2 * Clone Set: baseclone [basegrp]: * Started: [ sys2 ] * Stopped: [ sys3 ] * Clone Set: fs1 [nfs1] (disabled): * Stopped (disabled): [ sys2 sys3 ] - -Transition Summary: - * Restart stonithsys3 ( sys2 ) due to resource definition change - * Start controld:1 ( sys3 ) - * Start clvmd:1 ( sys3 ) - * Start o2cb:1 ( sys3 ) - * Start iscsi1:1 ( sys3 ) - * Start iscsi2:1 ( sys3 ) - * Start vg1:1 ( sys3 ) - * Start vg2:1 ( sys3 ) - * Start fs2:1 ( sys3 ) - * Start stonithsys2 ( sys3 ) - -Executing Cluster Transition: - * Resource action: vm1 monitor on sys3 - * Resource action: vm2 monitor on sys3 - * Resource action: vm3 monitor on sys3 - * Resource action: vm4 monitor on sys3 - * Resource action: stonithsys3 stop on sys2 - * Resource action: stonithsys3 monitor on sys3 - * Resource action: stonithsys3 start on sys2 - * Resource action: stonithsys3 monitor=15000 on sys2 - * Resource action: controld:1 monitor on sys3 - * Resource action: clvmd:1 monitor on sys3 - * Resource action: o2cb:1 monitor on sys3 - * Resource action: iscsi1:1 monitor on sys3 - * Resource action: iscsi2:1 monitor on sys3 - * Resource action: vg1:1 monitor on sys3 - * Resource action: vg2:1 monitor on sys3 - * Resource action: fs2:1 monitor on sys3 - * Pseudo action: baseclone_start_0 - * Resource action: nfs1:0 monitor on sys3 - * Resource action: stonithsys2 monitor on sys3 - * Pseudo action: load_stopped_sys3 - * Pseudo action: load_stopped_sys2 - * Pseudo action: basegrp:1_start_0 - * Resource action: controld:1 start on sys3 - * Resource action: clvmd:1 start on sys3 - * Resource action: o2cb:1 start on sys3 - * Resource action: iscsi1:1 start on sys3 - * Resource action: iscsi2:1 start on sys3 - * Resource action: vg1:1 start on sys3 - * Resource action: vg2:1 start on sys3 - * Resource action: fs2:1 start on sys3 - * Resource action: stonithsys2 start on sys3 - * Pseudo action: basegrp:1_running_0 - * Resource action: controld:1 monitor=10000 on sys3 - * Resource action: iscsi1:1 monitor=120000 on sys3 - * Resource action: iscsi2:1 monitor=120000 on sys3 - * Resource action: fs2:1 monitor=20000 on sys3 - * Pseudo action: baseclone_running_0 - * Resource action: stonithsys2 monitor=15000 on sys3 - -Revised Cluster Status: - * Node List: - * Online: [ sys2 sys3 ] - - * Full List of Resources: - * vm1 (ocf:heartbeat:Xen): Started sys2 - * vm2 (ocf:heartbeat:Xen): Stopped (disabled) - * vm3 (ocf:heartbeat:Xen): Stopped (disabled) - * vm4 (ocf:heartbeat:Xen): Stopped (disabled) - * stonithsys2 (stonith:external/ipmi): Started sys3 - * stonithsys3 (stonith:external/ipmi): Started sys2 - * Clone Set: baseclone [basegrp]: - * Started: [ sys2 sys3 ] - * Clone Set: fs1 [nfs1] (disabled): - * Stopped (disabled): [ sys2 sys3 ] + * stonithsys2 (stonith:external/ipmi): Stopped diff --git a/cts/scheduler/summary/rsc-sets-clone.summary b/cts/scheduler/summary/rsc-sets-clone.summary index ac3ad53957a..b6477b34102 100644 --- a/cts/scheduler/summary/rsc-sets-clone.summary +++ b/cts/scheduler/summary/rsc-sets-clone.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -14,25 +16,21 @@ Transition Summary: * Move rsc1 ( node1 -> node2 ) * Move rsc3 ( node1 -> node2 ) * Stop rsc:0 ( node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone-rsc_stop_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc:0 stop on node1 - * Pseudo action: clone-rsc_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone-rsc [rsc]: - * Started: [ node2 ] - * Stopped: [ node1 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/rsc-sets-promoted.summary b/cts/scheduler/summary/rsc-sets-promoted.summary index af78ecbaa39..5213646b783 100644 --- a/cts/scheduler/summary/rsc-sets-promoted.summary +++ b/cts/scheduler/summary/rsc-sets-promoted.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -17,33 +19,22 @@ Transition Summary: * Move rsc1 ( node1 -> node2 ) * Move rsc2 ( node1 -> node2 ) * Move rsc3 ( node1 -> node2 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc2 stop on node1 - * Resource action: rsc3 stop on node1 - * Pseudo action: ms-rsc_demote_0 - * Resource action: rsc:0 demote on node1 - * Pseudo action: ms-rsc_demoted_0 - * Pseudo action: ms-rsc_stop_0 - * Resource action: rsc:0 stop on node1 - * Pseudo action: ms-rsc_stopped_0 - * Pseudo action: ms-rsc_promote_0 - * Resource action: rsc:1 promote on node2 - * Pseudo action: ms-rsc_promoted_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: * Clone Set: ms-rsc [rsc] (promotable): - * Promoted: [ node2 ] - * Stopped: [ node1 ] - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] + * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/rsc-sets-seq-false.summary b/cts/scheduler/summary/rsc-sets-seq-false.summary index e864c17cadb..8fbdd361817 100644 --- a/cts/scheduler/summary/rsc-sets-seq-false.summary +++ b/cts/scheduler/summary/rsc-sets-seq-false.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -20,28 +22,18 @@ Transition Summary: * Move rsc6 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc4 stop on node1 - * Resource action: rsc5 stop on node1 - * Resource action: rsc6 stop on node1 - * Resource action: rsc3 stop on node1 - * Resource action: rsc2 stop on node1 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node2 - * Resource action: rsc6 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node2 - * rsc6 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 + * rsc4 (ocf:pacemaker:Dummy): Started node1 + * rsc5 (ocf:pacemaker:Dummy): Started node1 + * rsc6 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/rsc-sets-seq-true.summary b/cts/scheduler/summary/rsc-sets-seq-true.summary index fec65c4b4de..8fbdd361817 100644 --- a/cts/scheduler/summary/rsc-sets-seq-true.summary +++ b/cts/scheduler/summary/rsc-sets-seq-true.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -20,28 +22,18 @@ Transition Summary: * Move rsc6 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc6 stop on node1 - * Resource action: rsc5 stop on node1 - * Resource action: rsc4 stop on node1 - * Resource action: rsc3 stop on node1 - * Resource action: rsc2 stop on node1 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node2 - * Resource action: rsc6 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node2 - * rsc6 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 + * rsc4 (ocf:pacemaker:Dummy): Started node1 + * rsc5 (ocf:pacemaker:Dummy): Started node1 + * rsc6 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/rsc_dep1.summary b/cts/scheduler/summary/rsc_dep1.summary index c7d9ebfd877..8d642106c13 100644 --- a/cts/scheduler/summary/rsc_dep1.summary +++ b/cts/scheduler/summary/rsc_dep1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,17 +13,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc1 (ocf:heartbeat:apache): Started node2 + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep10.summary b/cts/scheduler/summary/rsc_dep10.summary index da800c89b59..f510c348403 100644 --- a/cts/scheduler/summary/rsc_dep10.summary +++ b/cts/scheduler/summary/rsc_dep10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,16 +12,13 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:heartbeat:apache): Stopped - * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep2.summary b/cts/scheduler/summary/rsc_dep2.summary index d66735a3fe1..b8357487420 100644 --- a/cts/scheduler/summary/rsc_dep2.summary +++ b/cts/scheduler/summary/rsc_dep2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,21 +15,15 @@ Transition Summary: * Start rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Started node2 * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep3.summary b/cts/scheduler/summary/rsc_dep3.summary index e48f5cfa7a7..2b36adb1c6b 100644 --- a/cts/scheduler/summary/rsc_dep3.summary +++ b/cts/scheduler/summary/rsc_dep3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,17 +13,13 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep4.summary b/cts/scheduler/summary/rsc_dep4.summary index b4f280ddfac..09ce8e40aef 100644 --- a/cts/scheduler/summary/rsc_dep4.summary +++ b/cts/scheduler/summary/rsc_dep4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,23 +16,15 @@ Transition Summary: * Start rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc4 stop on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc4 (ocf:heartbeat:apache): Started node2 + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc4 (ocf:heartbeat:apache): Started node1 * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep5.summary b/cts/scheduler/summary/rsc_dep5.summary index cab66530508..9239220c913 100644 --- a/cts/scheduler/summary/rsc_dep5.summary +++ b/cts/scheduler/summary/rsc_dep5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,20 +14,14 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc2 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc3 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep7.summary b/cts/scheduler/summary/rsc_dep7.summary index 8d4b6ecfc1e..55edad708ca 100644 --- a/cts/scheduler/summary/rsc_dep7.summary +++ b/cts/scheduler/summary/rsc_dep7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,21 +15,14 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc3 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc3 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rsc_dep8.summary b/cts/scheduler/summary/rsc_dep8.summary index d66735a3fe1..b8357487420 100644 --- a/cts/scheduler/summary/rsc_dep8.summary +++ b/cts/scheduler/summary/rsc_dep8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,21 +15,15 @@ Transition Summary: * Start rsc3 ( node2 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc2 (ocf:heartbeat:apache): Stopped * rsc4 (ocf:heartbeat:apache): Started node2 * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc3 (ocf:heartbeat:apache): Started node2 + * rsc3 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/rule-dbl-as-auto-number-match.summary b/cts/scheduler/summary/rule-dbl-as-auto-number-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-dbl-as-auto-number-match.summary +++ b/cts/scheduler/summary/rule-dbl-as-auto-number-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-dbl-as-auto-number-no-match.summary b/cts/scheduler/summary/rule-dbl-as-auto-number-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-dbl-as-auto-number-no-match.summary +++ b/cts/scheduler/summary/rule-dbl-as-auto-number-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-dbl-as-integer-match.summary b/cts/scheduler/summary/rule-dbl-as-integer-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-dbl-as-integer-match.summary +++ b/cts/scheduler/summary/rule-dbl-as-integer-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-dbl-as-integer-no-match.summary b/cts/scheduler/summary/rule-dbl-as-integer-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-dbl-as-integer-no-match.summary +++ b/cts/scheduler/summary/rule-dbl-as-integer-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-dbl-as-number-match.summary b/cts/scheduler/summary/rule-dbl-as-number-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-dbl-as-number-match.summary +++ b/cts/scheduler/summary/rule-dbl-as-number-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-dbl-as-number-no-match.summary b/cts/scheduler/summary/rule-dbl-as-number-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-dbl-as-number-no-match.summary +++ b/cts/scheduler/summary/rule-dbl-as-number-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-dbl-parse-fail-default-str-match.summary b/cts/scheduler/summary/rule-dbl-parse-fail-default-str-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-dbl-parse-fail-default-str-match.summary +++ b/cts/scheduler/summary/rule-dbl-parse-fail-default-str-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-dbl-parse-fail-default-str-no-match.summary b/cts/scheduler/summary/rule-dbl-parse-fail-default-str-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-dbl-parse-fail-default-str-no-match.summary +++ b/cts/scheduler/summary/rule-dbl-parse-fail-default-str-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-int-as-auto-integer-match.summary b/cts/scheduler/summary/rule-int-as-auto-integer-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-int-as-auto-integer-match.summary +++ b/cts/scheduler/summary/rule-int-as-auto-integer-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-int-as-auto-integer-no-match.summary b/cts/scheduler/summary/rule-int-as-auto-integer-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-int-as-auto-integer-no-match.summary +++ b/cts/scheduler/summary/rule-int-as-auto-integer-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-int-as-integer-match.summary b/cts/scheduler/summary/rule-int-as-integer-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-int-as-integer-match.summary +++ b/cts/scheduler/summary/rule-int-as-integer-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-int-as-integer-no-match.summary b/cts/scheduler/summary/rule-int-as-integer-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-int-as-integer-no-match.summary +++ b/cts/scheduler/summary/rule-int-as-integer-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-int-as-number-match.summary b/cts/scheduler/summary/rule-int-as-number-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-int-as-number-match.summary +++ b/cts/scheduler/summary/rule-int-as-number-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-int-as-number-no-match.summary b/cts/scheduler/summary/rule-int-as-number-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-int-as-number-no-match.summary +++ b/cts/scheduler/summary/rule-int-as-number-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/rule-int-parse-fail-default-str-match.summary b/cts/scheduler/summary/rule-int-parse-fail-default-str-match.summary index 32c56454525..49aca3d447f 100644 --- a/cts/scheduler/summary/rule-int-parse-fail-default-str-match.summary +++ b/cts/scheduler/summary/rule-int-parse-fail-default-str-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop dummy ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: dummy stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:heartbeat:Dummy): Stopped + * dummy (ocf:heartbeat:Dummy): Started node1 diff --git a/cts/scheduler/summary/rule-int-parse-fail-default-str-no-match.summary b/cts/scheduler/summary/rule-int-parse-fail-default-str-no-match.summary index 2bec6ebe50b..f952d0c2f26 100644 --- a/cts/scheduler/summary/rule-int-parse-fail-default-str-no-match.summary +++ b/cts/scheduler/summary/rule-int-parse-fail-default-str-no-match.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node node2: standby * Online: [ node1 ] diff --git a/cts/scheduler/summary/shutdown-lock-expiration.summary b/cts/scheduler/summary/shutdown-lock-expiration.summary index aa6f2e8650f..c1dc4971c8f 100644 --- a/cts/scheduler/summary/shutdown-lock-expiration.summary +++ b/cts/scheduler/summary/shutdown-lock-expiration.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-01-06 22:11:40Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node3 node4 node5 ] * OFFLINE: [ node1 node2 ] @@ -14,15 +16,11 @@ Transition Summary: * Start rsc2 ( node4 ) Executing Cluster Transition: - * Resource action: Fencing stop on node3 - * Resource action: Fencing start on node3 - * Resource action: Fencing monitor=120000 on node3 - * Resource action: rsc2 start on node4 - * Cluster action: lrm_delete for rsc2 on node2 - * Resource action: rsc2 monitor=10000 on node4 Using the original execution date of: 2020-01-06 22:11:40Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node3 node4 node5 ] * OFFLINE: [ node1 node2 ] @@ -30,4 +28,4 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node3 * rsc1 (ocf:pacemaker:Dummy): Stopped node1 (LOCKED) - * rsc2 (ocf:pacemaker:Dummy): Started node4 + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/shutdown-lock.summary b/cts/scheduler/summary/shutdown-lock.summary index e36a005b88a..382385c74e6 100644 --- a/cts/scheduler/summary/shutdown-lock.summary +++ b/cts/scheduler/summary/shutdown-lock.summary @@ -1,12 +1,14 @@ Using the original execution date of: 2020-01-06 21:59:11Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node3 node4 node5 ] * OFFLINE: [ node2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Started node1 (LOCKED) * rsc2 (ocf:pacemaker:Dummy): Stopped node2 (LOCKED) Transition Summary: @@ -14,19 +16,17 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: Fencing stop on node1 - * Resource action: rsc1 stop on node1 - * Cluster action: do_shutdown on node1 - * Resource action: Fencing start on node3 - * Resource action: Fencing monitor=120000 on node3 + * Cluster action: lrm_delete for rsc1 on node1 Using the original execution date of: 2020-01-06 21:59:11Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node3 node4 node5 ] * OFFLINE: [ node2 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started node3 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * Fencing (stonith:fence_xvm): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Started node1 (LOCKED) * rsc2 (ocf:pacemaker:Dummy): Stopped node2 (LOCKED) diff --git a/cts/scheduler/summary/shutdown-maintenance-node.summary b/cts/scheduler/summary/shutdown-maintenance-node.summary index b8bca961652..748a3d52ce4 100644 --- a/cts/scheduler/summary/shutdown-maintenance-node.summary +++ b/cts/scheduler/summary/shutdown-maintenance-node.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node sle12sp2-2: OFFLINE (maintenance) * Online: [ sle12sp2-1 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Node sle12sp2-2: OFFLINE (maintenance) * Online: [ sle12sp2-1 ] diff --git a/cts/scheduler/summary/simple1.summary b/cts/scheduler/summary/simple1.summary index 14afbe24d65..cc9ac2992c3 100644 --- a/cts/scheduler/summary/simple1.summary +++ b/cts/scheduler/summary/simple1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * OFFLINE: [ node1 ] @@ -10,6 +12,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * OFFLINE: [ node1 ] diff --git a/cts/scheduler/summary/simple11.summary b/cts/scheduler/summary/simple11.summary index fc329b8552d..5817f23a89c 100644 --- a/cts/scheduler/summary/simple11.summary +++ b/cts/scheduler/summary/simple11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,17 +13,13 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 - * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/simple12.summary b/cts/scheduler/summary/simple12.summary index 4e654b6c432..e05cb71fbfb 100644 --- a/cts/scheduler/summary/simple12.summary +++ b/cts/scheduler/summary/simple12.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,17 +13,13 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node2 - * rsc2 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/simple2.summary b/cts/scheduler/summary/simple2.summary index 7d133a85949..f9ff5deedd4 100644 --- a/cts/scheduler/summary/simple2.summary +++ b/cts/scheduler/summary/simple2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/simple3.summary b/cts/scheduler/summary/simple3.summary index 9ca3dd46e1d..9ad1c74ea11 100644 --- a/cts/scheduler/summary/simple3.summary +++ b/cts/scheduler/summary/simple3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -9,11 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Started node1 + * rsc1 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/simple4.summary b/cts/scheduler/summary/simple4.summary index 456e7dccc40..d62b41c52ac 100644 --- a/cts/scheduler/summary/simple4.summary +++ b/cts/scheduler/summary/simple4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -9,11 +11,12 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): FAILED node1 diff --git a/cts/scheduler/summary/simple6.summary b/cts/scheduler/summary/simple6.summary index 5f6c9ce5dc4..5fbea259a41 100644 --- a/cts/scheduler/summary/simple6.summary +++ b/cts/scheduler/summary/simple6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -11,14 +13,13 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc2 monitor on node1 - * Resource action: rsc1 stop on node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc2 (ocf:heartbeat:apache): Started node1 - * rsc1 (ocf:heartbeat:apache): Stopped + * rsc2 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/simple7.summary b/cts/scheduler/summary/simple7.summary index fa102edc349..da624785354 100644 --- a/cts/scheduler/summary/simple7.summary +++ b/cts/scheduler/summary/simple7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 ] @@ -9,12 +11,12 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Cluster action: do_shutdown on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 ] * Full List of Resources: - * rsc1 (ocf:heartbeat:apache): Stopped + * rsc1 (ocf:heartbeat:apache): Started node1 diff --git a/cts/scheduler/summary/simple8.summary b/cts/scheduler/summary/simple8.summary index 24bf53b0ac4..f75269de549 100644 --- a/cts/scheduler/summary/simple8.summary +++ b/cts/scheduler/summary/simple8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,12 +15,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc3 monitor on node2 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc2 monitor on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/site-specific-params.summary b/cts/scheduler/summary/site-specific-params.summary index 08a1dbb0f77..e689c5a3f14 100644 --- a/cts/scheduler/summary/site-specific-params.summary +++ b/cts/scheduler/summary/site-specific-params.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -10,16 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/standby.summary b/cts/scheduler/summary/standby.summary index b13326e398e..9b3c57446a5 100644 --- a/cts/scheduler/summary/standby.summary +++ b/cts/scheduler/summary/standby.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node sapcl02: standby (with active resources) * Node sapcl03: standby (with active resources) @@ -31,43 +33,13 @@ Transition Summary: * Move oralsnr_25 ( sapcl03 -> sapcl01 ) Executing Cluster Transition: - * Pseudo action: app02_stop_0 - * Resource action: Filesystem_13 stop on sapcl02 - * Pseudo action: oracle_stop_0 - * Resource action: oralsnr_25 stop on sapcl03 - * Resource action: LVM_12 stop on sapcl02 - * Resource action: oracle_24 stop on sapcl03 - * Resource action: IPaddr_192_168_1_102 stop on sapcl02 - * Resource action: Filesystem_23 stop on sapcl03 - * Pseudo action: app02_stopped_0 - * Pseudo action: app02_start_0 - * Resource action: IPaddr_192_168_1_102 start on sapcl01 - * Resource action: LVM_12 start on sapcl01 - * Resource action: Filesystem_13 start on sapcl01 - * Resource action: LVM_22 stop on sapcl03 - * Pseudo action: app02_running_0 - * Resource action: IPaddr_192_168_1_102 monitor=5000 on sapcl01 - * Resource action: LVM_12 monitor=120000 on sapcl01 - * Resource action: Filesystem_13 monitor=120000 on sapcl01 - * Resource action: IPaddr_192_168_1_104 stop on sapcl03 - * Pseudo action: oracle_stopped_0 - * Pseudo action: oracle_start_0 - * Resource action: IPaddr_192_168_1_104 start on sapcl01 - * Resource action: LVM_22 start on sapcl01 - * Resource action: Filesystem_23 start on sapcl01 - * Resource action: oracle_24 start on sapcl01 - * Resource action: oralsnr_25 start on sapcl01 - * Pseudo action: oracle_running_0 - * Resource action: IPaddr_192_168_1_104 monitor=5000 on sapcl01 - * Resource action: LVM_22 monitor=120000 on sapcl01 - * Resource action: Filesystem_23 monitor=120000 on sapcl01 - * Resource action: oracle_24 monitor=120000 on sapcl01 - * Resource action: oralsnr_25 monitor=120000 on sapcl01 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node sapcl02: standby - * Node sapcl03: standby + * Node sapcl02: standby (with active resources) + * Node sapcl03: standby (with active resources) * Online: [ sapcl01 ] * Full List of Resources: @@ -76,12 +48,12 @@ Revised Cluster Status: * LVM_2 (ocf:heartbeat:LVM): Started sapcl01 * Filesystem_3 (ocf:heartbeat:Filesystem): Started sapcl01 * Resource Group: app02: - * IPaddr_192_168_1_102 (ocf:heartbeat:IPaddr): Started sapcl01 - * LVM_12 (ocf:heartbeat:LVM): Started sapcl01 - * Filesystem_13 (ocf:heartbeat:Filesystem): Started sapcl01 + * IPaddr_192_168_1_102 (ocf:heartbeat:IPaddr): Started sapcl02 + * LVM_12 (ocf:heartbeat:LVM): Started sapcl02 + * Filesystem_13 (ocf:heartbeat:Filesystem): Started sapcl02 * Resource Group: oracle: - * IPaddr_192_168_1_104 (ocf:heartbeat:IPaddr): Started sapcl01 - * LVM_22 (ocf:heartbeat:LVM): Started sapcl01 - * Filesystem_23 (ocf:heartbeat:Filesystem): Started sapcl01 - * oracle_24 (ocf:heartbeat:oracle): Started sapcl01 - * oralsnr_25 (ocf:heartbeat:oralsnr): Started sapcl01 + * IPaddr_192_168_1_104 (ocf:heartbeat:IPaddr): Started sapcl03 + * LVM_22 (ocf:heartbeat:LVM): Started sapcl03 + * Filesystem_23 (ocf:heartbeat:Filesystem): Started sapcl03 + * oracle_24 (ocf:heartbeat:oracle): Started sapcl03 + * oralsnr_25 (ocf:heartbeat:oralsnr): Started sapcl03 diff --git a/cts/scheduler/summary/start-then-stop-with-unfence.summary b/cts/scheduler/summary/start-then-stop-with-unfence.summary index 8d83fcc0b69..6e44f8383a0 100644 --- a/cts/scheduler/summary/start-then-stop-with-unfence.summary +++ b/cts/scheduler/summary/start-then-stop-with-unfence.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-node1.example.com rhel7-node2.example.com ] @@ -18,27 +20,18 @@ Transition Summary: * Start jrummy:1 ( rhel7-node1.example.com ) Executing Cluster Transition: - * Pseudo action: jrummy-clone_start_0 - * Fencing rhel7-node1.example.com (on) - * Resource action: mpath-node2 monitor on rhel7-node1.example.com - * Resource action: mpath-node1 monitor on rhel7-node1.example.com - * Resource action: jrummy start on rhel7-node1.example.com - * Pseudo action: jrummy-clone_running_0 - * Resource action: mpath-node1 start on rhel7-node1.example.com - * Resource action: ip1 stop on rhel7-node2.example.com - * Resource action: jrummy monitor=10000 on rhel7-node1.example.com - * Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com - * Resource action: ip1 start on rhel7-node1.example.com - * Resource action: ip1 monitor=10000 on rhel7-node1.example.com Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-node1.example.com rhel7-node2.example.com ] * Full List of Resources: * mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com - * mpath-node1 (stonith:fence_mpath): Started rhel7-node1.example.com - * ip1 (ocf:heartbeat:IPaddr2): Started rhel7-node1.example.com + * mpath-node1 (stonith:fence_mpath): Stopped + * ip1 (ocf:heartbeat:IPaddr2): Started rhel7-node2.example.com * ip2 (ocf:heartbeat:IPaddr2): Started rhel7-node2.example.com * Clone Set: jrummy-clone [jrummy]: - * Started: [ rhel7-node1.example.com rhel7-node2.example.com ] + * Started: [ rhel7-node2.example.com ] + * Stopped: [ rhel7-node1.example.com ] diff --git a/cts/scheduler/summary/stonith-0.summary b/cts/scheduler/summary/stonith-0.summary index f9745bd6429..d950c8002a2 100644 --- a/cts/scheduler/summary/stonith-0.summary +++ b/cts/scheduler/summary/stonith-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node c001n03: UNCLEAN (online) * Node c001n05: UNCLEAN (online) @@ -47,49 +49,28 @@ Transition Summary: * Move rsc_c001n07 ( c001n03 -> c001n07 ) Executing Cluster Transition: - * Resource action: child_DoFencing:4 monitor=20000 on c001n08 - * Fencing c001n05 (reboot) - * Fencing c001n03 (reboot) - * Pseudo action: group-1_stop_0 - * Pseudo action: ocf_192.168.100.183_stop_0 - * Pseudo action: ocf_192.168.100.183_stop_0 - * Pseudo action: rsc_c001n05_stop_0 - * Pseudo action: rsc_c001n07_stop_0 - * Pseudo action: heartbeat_192.168.100.182_stop_0 - * Resource action: rsc_c001n05 start on c001n07 - * Resource action: rsc_c001n07 start on c001n07 - * Pseudo action: ocf_192.168.100.181_stop_0 - * Pseudo action: ocf_192.168.100.181_stop_0 - * Resource action: rsc_c001n05 monitor=5000 on c001n07 - * Resource action: rsc_c001n07 monitor=5000 on c001n07 - * Pseudo action: group-1_stopped_0 - * Pseudo action: group-1_start_0 - * Resource action: ocf_192.168.100.181 start on c001n02 - * Resource action: heartbeat_192.168.100.182 start on c001n02 - * Resource action: ocf_192.168.100.183 start on c001n02 - * Pseudo action: group-1_running_0 - * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 - * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 - * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node c001n03: UNCLEAN (online) + * Node c001n05: UNCLEAN (online) * Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] - * OFFLINE: [ c001n03 c001n05 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: - * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 - * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 - * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 + * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started [ c001n03 c001n05 ] + * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03 + * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): FAILED [ c001n03 c001n05 ] * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04 - * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07 + * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05 * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 - * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07 + * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * Clone Set: DoFencing [child_DoFencing]: * Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] diff --git a/cts/scheduler/summary/stonith-1.summary b/cts/scheduler/summary/stonith-1.summary index dfb4be43ee4..3ccf2350aef 100644 --- a/cts/scheduler/summary/stonith-1.summary +++ b/cts/scheduler/summary/stonith-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node sles-3: UNCLEAN (offline) * Online: [ sles-1 sles-2 sles-4 ] @@ -42,72 +44,35 @@ Transition Summary: * Move ocf_msdummy:5 ( sles-3 -> sles-2 Unpromoted ) Executing Cluster Transition: - * Pseudo action: group-1_start_0 - * Resource action: r192.168.100.182 monitor=5000 on sles-1 - * Resource action: lsb_dummy monitor=5000 on sles-2 - * Resource action: rsc_sles-2 monitor=5000 on sles-2 - * Resource action: rsc_sles-4 monitor=5000 on sles-4 - * Pseudo action: DoFencing_stop_0 - * Fencing sles-3 (reboot) - * Resource action: r192.168.100.183 start on sles-1 - * Pseudo action: migrator_stop_0 - * Pseudo action: rsc_sles-3_stop_0 - * Pseudo action: child_DoFencing:2_stop_0 - * Pseudo action: DoFencing_stopped_0 - * Pseudo action: DoFencing_start_0 - * Pseudo action: master_rsc_1_stop_0 - * Pseudo action: group-1_running_0 - * Resource action: r192.168.100.183 monitor=5000 on sles-1 - * Resource action: migrator start on sles-4 - * Resource action: rsc_sles-3 start on sles-4 - * Resource action: child_DoFencing:2 start on sles-4 - * Pseudo action: DoFencing_running_0 - * Pseudo action: ocf_msdummy:2_stop_0 - * Pseudo action: ocf_msdummy:5_stop_0 - * Pseudo action: master_rsc_1_stopped_0 - * Pseudo action: master_rsc_1_start_0 - * Resource action: migrator monitor=10000 on sles-4 - * Resource action: rsc_sles-3 monitor=5000 on sles-4 - * Resource action: child_DoFencing:2 monitor=60000 on sles-4 - * Resource action: ocf_msdummy:0 start on sles-4 - * Resource action: ocf_msdummy:1 start on sles-1 - * Resource action: ocf_msdummy:2 start on sles-2 - * Resource action: ocf_msdummy:3 start on sles-4 - * Resource action: ocf_msdummy:4 start on sles-1 - * Resource action: ocf_msdummy:5 start on sles-2 - * Pseudo action: master_rsc_1_running_0 - * Resource action: ocf_msdummy:0 monitor=5000 on sles-4 - * Resource action: ocf_msdummy:1 monitor=5000 on sles-1 - * Resource action: ocf_msdummy:2 monitor=5000 on sles-2 - * Resource action: ocf_msdummy:3 monitor=5000 on sles-4 - * Resource action: ocf_msdummy:4 monitor=5000 on sles-1 - * Resource action: ocf_msdummy:5 monitor=5000 on sles-2 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node sles-3: UNCLEAN (offline) * Online: [ sles-1 sles-2 sles-4 ] - * OFFLINE: [ sles-3 ] * Full List of Resources: * Resource Group: group-1: * r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1 - * r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1 + * r192.168.100.183 (ocf:heartbeat:IPaddr): Stopped * lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 - * migrator (ocf:heartbeat:Dummy): Started sles-4 + * migrator (ocf:heartbeat:Dummy): Started sles-3 (UNCLEAN) * rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1 * rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2 - * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-4 + * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3 (UNCLEAN) * rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4 * Clone Set: DoFencing [child_DoFencing]: - * Started: [ sles-1 sles-2 sles-4 ] - * Stopped: [ sles-3 ] + * child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN) + * Started: [ sles-1 sles-2 ] + * Stopped: [ sles-4 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique): - * ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-4 - * ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-1 - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-2 - * ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-4 - * ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-1 - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-2 + * ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped + * ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-3 (UNCLEAN) + * ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped + * ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-3 (UNCLEAN) * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped diff --git a/cts/scheduler/summary/stonith-2.summary b/cts/scheduler/summary/stonith-2.summary index c6f657193b6..83151460503 100644 --- a/cts/scheduler/summary/stonith-2.summary +++ b/cts/scheduler/summary/stonith-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node sles-5: UNCLEAN (offline) * Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] @@ -38,14 +40,13 @@ Transition Summary: * Start rsc_sles-5 ( sles-6 ) Executing Cluster Transition: - * Fencing sles-5 (reboot) - * Resource action: rsc_sles-5 start on sles-6 - * Resource action: rsc_sles-5 monitor=5000 on sles-6 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node sles-5: UNCLEAN (offline) * Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] - * OFFLINE: [ sles-5 ] * Full List of Resources: * Resource Group: group-1: @@ -58,7 +59,7 @@ Revised Cluster Status: * rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2 * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3 * rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4 - * rsc_sles-5 (ocf:heartbeat:IPaddr): Started sles-6 + * rsc_sles-5 (ocf:heartbeat:IPaddr): Stopped * rsc_sles-6 (ocf:heartbeat:IPaddr): Started sles-6 * Clone Set: DoFencing [child_DoFencing]: * Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] diff --git a/cts/scheduler/summary/stonith-3.summary b/cts/scheduler/summary/stonith-3.summary index d1adf9b96c1..6f5e7690891 100644 --- a/cts/scheduler/summary/stonith-3.summary +++ b/cts/scheduler/summary/stonith-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node rh5node1: UNCLEAN (offline) * Online: [ rh5node2 ] @@ -14,24 +16,15 @@ Transition Summary: * Start prmStonith:0 ( rh5node2 ) Executing Cluster Transition: - * Resource action: prmIpPostgreSQLDB monitor on rh5node2 - * Resource action: prmStonith:0 monitor on rh5node2 - * Pseudo action: clnStonith_start_0 - * Fencing rh5node1 (reboot) - * Resource action: prmIpPostgreSQLDB start on rh5node2 - * Pseudo action: grpStonith:0_start_0 - * Resource action: prmStonith:0 start on rh5node2 - * Resource action: prmIpPostgreSQLDB monitor=30000 on rh5node2 - * Pseudo action: grpStonith:0_running_0 - * Pseudo action: clnStonith_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node rh5node1: UNCLEAN (offline) * Online: [ rh5node2 ] - * OFFLINE: [ rh5node1 ] * Full List of Resources: - * prmIpPostgreSQLDB (ocf:heartbeat:IPaddr): Started rh5node2 + * prmIpPostgreSQLDB (ocf:heartbeat:IPaddr): Stopped * Clone Set: clnStonith [grpStonith]: - * Started: [ rh5node2 ] - * Stopped: [ rh5node1 ] + * Stopped: [ rh5node1 rh5node2 ] diff --git a/cts/scheduler/summary/stonith-4.summary b/cts/scheduler/summary/stonith-4.summary index 6aa0f4d6d68..2a15f55c0df 100644 --- a/cts/scheduler/summary/stonith-4.summary +++ b/cts/scheduler/summary/stonith-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node pcmk-2: pending * Node pcmk-3: pending @@ -22,19 +24,21 @@ Transition Summary: * Start Fencing ( pcmk-1 ) blocked Executing Cluster Transition: - * Fencing pcmk-5 (reboot) - * Fencing pcmk-7 (reboot) - * Fencing pcmk-8 (reboot) - * Fencing pcmk-10 (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: * Node pcmk-2: pending * Node pcmk-3: pending + * Node pcmk-5: UNCLEAN (offline) + * Node pcmk-7: UNCLEAN (online) + * Node pcmk-8: UNCLEAN (offline) * Node pcmk-9: pending + * Node pcmk-10: UNCLEAN (online) * Node pcmk-11: pending * Online: [ pcmk-1 ] - * OFFLINE: [ pcmk-4 pcmk-5 pcmk-6 pcmk-7 pcmk-8 pcmk-10 ] + * OFFLINE: [ pcmk-4 pcmk-6 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/stop-all-resources.summary b/cts/scheduler/summary/stop-all-resources.summary index da36fb0e881..feafbb0caac 100644 --- a/cts/scheduler/summary/stop-all-resources.summary +++ b/cts/scheduler/summary/stop-all-resources.summary @@ -1,6 +1,8 @@ 4 of 27 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] @@ -27,38 +29,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: ping:0 monitor on cluster02 - * Resource action: ping:0 monitor on cluster01 - * Resource action: Fencing monitor on cluster02 - * Resource action: Fencing monitor on cluster01 - * Resource action: dummy monitor on cluster02 - * Resource action: dummy monitor on cluster01 - * Resource action: inactive-dhcpd:0 monitor on cluster02 - * Resource action: inactive-dhcpd:0 monitor on cluster01 - * Resource action: inactive-dummy-1 monitor on cluster02 - * Resource action: inactive-dummy-1 monitor on cluster01 - * Resource action: inactive-dummy-2 monitor on cluster02 - * Resource action: inactive-dummy-2 monitor on cluster01 - * Resource action: httpd-bundle-ip-192.168.122.131 monitor on cluster02 - * Resource action: httpd-bundle-ip-192.168.122.131 monitor on cluster01 - * Resource action: httpd-bundle-docker-0 monitor on cluster02 - * Resource action: httpd-bundle-docker-0 monitor on cluster01 - * Resource action: httpd-bundle-ip-192.168.122.132 monitor on cluster02 - * Resource action: httpd-bundle-ip-192.168.122.132 monitor on cluster01 - * Resource action: httpd-bundle-docker-1 monitor on cluster02 - * Resource action: httpd-bundle-docker-1 monitor on cluster01 - * Resource action: httpd-bundle-ip-192.168.122.133 monitor on cluster02 - * Resource action: httpd-bundle-ip-192.168.122.133 monitor on cluster01 - * Resource action: httpd-bundle-docker-2 monitor on cluster02 - * Resource action: httpd-bundle-docker-2 monitor on cluster01 - * Resource action: Public-IP monitor on cluster02 - * Resource action: Public-IP monitor on cluster01 - * Resource action: Email monitor on cluster02 - * Resource action: Email monitor on cluster01 - * Resource action: mysql-proxy:0 monitor on cluster02 - * Resource action: mysql-proxy:0 monitor on cluster01 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ cluster01 cluster02 ] diff --git a/cts/scheduler/summary/stop-failure-no-fencing.summary b/cts/scheduler/summary/stop-failure-no-fencing.summary index bb164fd5be4..37983783b64 100644 --- a/cts/scheduler/summary/stop-failure-no-fencing.summary +++ b/cts/scheduler/summary/stop-failure-no-fencing.summary @@ -1,6 +1,9 @@ 0 of 9 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node pcmk-3: UNCLEAN (offline) * Node pcmk-4: UNCLEAN (offline) @@ -9,13 +12,16 @@ Current cluster status: * Full List of Resources: * Clone Set: dlm-clone [dlm]: * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] - * ClusterIP (ocf:heartbeat:IPaddr2): Stopped + * ClusterIP (ocf:heartbeat:IPaddr2): Stopped (unmanaged) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node pcmk-3: UNCLEAN (offline) * Node pcmk-4: UNCLEAN (offline) @@ -24,4 +30,4 @@ Revised Cluster Status: * Full List of Resources: * Clone Set: dlm-clone [dlm]: * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] - * ClusterIP (ocf:heartbeat:IPaddr2): Stopped + * ClusterIP (ocf:heartbeat:IPaddr2): Stopped (unmanaged) diff --git a/cts/scheduler/summary/stop-failure-no-quorum.summary b/cts/scheduler/summary/stop-failure-no-quorum.summary index e76827ddfc2..c367e37c41d 100644 --- a/cts/scheduler/summary/stop-failure-no-quorum.summary +++ b/cts/scheduler/summary/stop-failure-no-quorum.summary @@ -1,6 +1,9 @@ 0 of 10 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node pcmk-2: UNCLEAN (online) * Node pcmk-3: UNCLEAN (offline) @@ -26,20 +29,23 @@ Transition Summary: * Start Fencing ( pcmk-1 ) due to no quorum (blocked) Executing Cluster Transition: - * Fencing pcmk-2 (reboot) - * Pseudo action: clvm-clone_stop_0 - * Pseudo action: clvm_stop_0 - * Pseudo action: clvm-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node pcmk-2: UNCLEAN (online) * Node pcmk-3: UNCLEAN (offline) * Node pcmk-4: UNCLEAN (offline) * Online: [ pcmk-1 ] - * OFFLINE: [ pcmk-2 ] * Full List of Resources: * Clone Set: dlm-clone [dlm]: * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] + * Clone Set: clvm-clone [clvm]: + * clvm (lsb:clvmd): FAILED pcmk-2 + * clvm (lsb:clvmd): FAILED pcmk-3 (UNCLEAN, blocked) + * Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] * ClusterIP (ocf:heartbeat:IPaddr2): Stopped * Fencing (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/stop-failure-with-fencing.summary b/cts/scheduler/summary/stop-failure-with-fencing.summary index 437708ef2e2..723dc9cd6dc 100644 --- a/cts/scheduler/summary/stop-failure-with-fencing.summary +++ b/cts/scheduler/summary/stop-failure-with-fencing.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node pcmk-2: UNCLEAN (online) * Node pcmk-3: UNCLEAN (offline) @@ -23,23 +26,22 @@ Transition Summary: * Start Fencing ( pcmk-1 ) due to no quorum (blocked) Executing Cluster Transition: - * Resource action: Fencing monitor on pcmk-1 - * Fencing pcmk-2 (reboot) - * Pseudo action: clvm-clone_stop_0 - * Pseudo action: clvm_stop_0 - * Pseudo action: clvm-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node pcmk-2: UNCLEAN (online) * Node pcmk-3: UNCLEAN (offline) * Node pcmk-4: UNCLEAN (offline) * Online: [ pcmk-1 ] - * OFFLINE: [ pcmk-2 ] * Full List of Resources: * Clone Set: dlm-clone [dlm]: * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: clvm-clone [clvm]: - * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] + * clvm (lsb:clvmd): FAILED pcmk-2 + * Stopped: [ pcmk-1 pcmk-3 pcmk-4 ] * ClusterIP (ocf:heartbeat:IPaddr2): Stopped * Fencing (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/stop-unexpected-2.summary b/cts/scheduler/summary/stop-unexpected-2.summary index d6b0c15dca8..c373c09311c 100644 --- a/cts/scheduler/summary/stop-unexpected-2.summary +++ b/cts/scheduler/summary/stop-unexpected-2.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2022-04-22 14:15:37Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] @@ -13,12 +15,11 @@ Transition Summary: * Restart test ( rhel8-4 ) Executing Cluster Transition: - * Resource action: test stop on rhel8-3 - * Pseudo action: test_start_0 - * Resource action: test monitor=10000 on rhel8-4 Using the original execution date of: 2022-04-22 14:15:37Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] @@ -26,4 +27,4 @@ Revised Cluster Status: * Fencing (stonith:fence_xvm): Started rhel8-1 * FencingPass (stonith:fence_dummy): Started rhel8-2 * FencingFail (stonith:fence_dummy): Started rhel8-3 - * test (ocf:pacemaker:Dummy): Started rhel8-4 + * test (ocf:pacemaker:Dummy): Started [ rhel8-4 rhel8-3 ] diff --git a/cts/scheduler/summary/stop-unexpected.summary b/cts/scheduler/summary/stop-unexpected.summary index 7c7fc68b6fd..31db10464e4 100644 --- a/cts/scheduler/summary/stop-unexpected.summary +++ b/cts/scheduler/summary/stop-unexpected.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node2 node3 ] @@ -11,31 +13,20 @@ Current cluster status: Transition Summary: * Recover dummy ( node2 ) due to being multiply active - * Restart dummy2 ( node2 ) due to required dummy start - * Restart dummy3 ( node2 ) due to required dummy2 start + * Restart dummy2 ( node2 ) + * Restart dummy3 ( node2 ) Executing Cluster Transition: - * Pseudo action: dgroup_stop_0 - * Resource action: dummy3 stop on node2 - * Resource action: dummy2 stop on node2 - * Resource action: dummy stop on node3 - * Pseudo action: dgroup_stopped_0 - * Pseudo action: dgroup_start_0 - * Pseudo action: dummy_start_0 - * Resource action: dummy monitor=10000 on node2 - * Resource action: dummy2 start on node2 - * Resource action: dummy2 monitor=10000 on node2 - * Resource action: dummy3 start on node2 - * Resource action: dummy3 monitor=10000 on node2 - * Pseudo action: dgroup_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node2 node3 ] * Full List of Resources: * st-sbd (stonith:external/sbd): Started node2 * Resource Group: dgroup: - * dummy (ocf:heartbeat:DummyTimeout): Started node2 + * dummy (ocf:heartbeat:DummyTimeout): FAILED [ node2 node3 ] * dummy2 (ocf:heartbeat:Dummy): Started node2 * dummy3 (ocf:heartbeat:Dummy): Started node2 diff --git a/cts/scheduler/summary/stopped-monitor-00.summary b/cts/scheduler/summary/stopped-monitor-00.summary index c28cad74968..1f96793d154 100644 --- a/cts/scheduler/summary/stopped-monitor-00.summary +++ b/cts/scheduler/summary/stopped-monitor-00.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,15 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 monitor=20000 on node2 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/stopped-monitor-01.summary b/cts/scheduler/summary/stopped-monitor-01.summary index 0bd04883d6f..4507ade4c86 100644 --- a/cts/scheduler/summary/stopped-monitor-01.summary +++ b/cts/scheduler/summary/stopped-monitor-01.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Recover rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED node1 diff --git a/cts/scheduler/summary/stopped-monitor-02.summary b/cts/scheduler/summary/stopped-monitor-02.summary index 93d9286e89e..4813c31d903 100644 --- a/cts/scheduler/summary/stopped-monitor-02.summary +++ b/cts/scheduler/summary/stopped-monitor-02.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,15 +11,12 @@ Transition Summary: * Recover rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 monitor=20000 on node2 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): FAILED [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-03.summary b/cts/scheduler/summary/stopped-monitor-03.summary index d16e523b2ea..1738e9e8fc3 100644 --- a/cts/scheduler/summary/stopped-monitor-03.summary +++ b/cts/scheduler/summary/stopped-monitor-03.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,12 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 monitor=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) + * rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled) diff --git a/cts/scheduler/summary/stopped-monitor-04.summary b/cts/scheduler/summary/stopped-monitor-04.summary index 11f4d49b798..0d1057125c0 100644 --- a/cts/scheduler/summary/stopped-monitor-04.summary +++ b/cts/scheduler/summary/stopped-monitor-04.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-05.summary b/cts/scheduler/summary/stopped-monitor-05.summary index 1ed0d69050c..dcd7a563a12 100644 --- a/cts/scheduler/summary/stopped-monitor-05.summary +++ b/cts/scheduler/summary/stopped-monitor-05.summary @@ -1,6 +1,8 @@ 0 of 1 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-06.summary b/cts/scheduler/summary/stopped-monitor-06.summary index 744994f2927..8489b612d45 100644 --- a/cts/scheduler/summary/stopped-monitor-06.summary +++ b/cts/scheduler/summary/stopped-monitor-06.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-07.summary b/cts/scheduler/summary/stopped-monitor-07.summary index 596e6c5c765..3faf1c3b1e5 100644 --- a/cts/scheduler/summary/stopped-monitor-07.summary +++ b/cts/scheduler/summary/stopped-monitor-07.summary @@ -1,6 +1,8 @@ 0 of 1 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-08.summary b/cts/scheduler/summary/stopped-monitor-08.summary index d23f033fa9f..d7e2a1da610 100644 --- a/cts/scheduler/summary/stopped-monitor-08.summary +++ b/cts/scheduler/summary/stopped-monitor-08.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] @@ -10,16 +12,13 @@ Transition Summary: * Move rsc1 ( node1 -> node2 ) Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 cancel=20000 on node2 - * Resource action: rsc1 monitor=20000 on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node node1: standby + * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/stopped-monitor-09.summary b/cts/scheduler/summary/stopped-monitor-09.summary index 9a11f5abe90..e486fc2b728 100644 --- a/cts/scheduler/summary/stopped-monitor-09.summary +++ b/cts/scheduler/summary/stopped-monitor-09.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,6 +12,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-10.summary b/cts/scheduler/summary/stopped-monitor-10.summary index 5ca93433362..e1c4ead6b32 100644 --- a/cts/scheduler/summary/stopped-monitor-10.summary +++ b/cts/scheduler/summary/stopped-monitor-10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,6 +12,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-11.summary b/cts/scheduler/summary/stopped-monitor-11.summary index 74feb98cb06..749b7ef2905 100644 --- a/cts/scheduler/summary/stopped-monitor-11.summary +++ b/cts/scheduler/summary/stopped-monitor-11.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-12.summary b/cts/scheduler/summary/stopped-monitor-12.summary index 9d14834f043..a1f245ebfa5 100644 --- a/cts/scheduler/summary/stopped-monitor-12.summary +++ b/cts/scheduler/summary/stopped-monitor-12.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-20.summary b/cts/scheduler/summary/stopped-monitor-20.summary index b0d44ee9f8e..55a3226feaf 100644 --- a/cts/scheduler/summary/stopped-monitor-20.summary +++ b/cts/scheduler/summary/stopped-monitor-20.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc1 monitor=20000 on node2 - * Resource action: rsc1 monitor=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-21.summary b/cts/scheduler/summary/stopped-monitor-21.summary index e3e64c05d94..b910ca53bd9 100644 --- a/cts/scheduler/summary/stopped-monitor-21.summary +++ b/cts/scheduler/summary/stopped-monitor-21.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,12 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 monitor=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) + * rsc1 (ocf:pacemaker:Dummy): FAILED node1 (disabled) diff --git a/cts/scheduler/summary/stopped-monitor-22.summary b/cts/scheduler/summary/stopped-monitor-22.summary index 8b04d7f2791..733ebcef07a 100644 --- a/cts/scheduler/summary/stopped-monitor-22.summary +++ b/cts/scheduler/summary/stopped-monitor-22.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,14 +14,12 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Resource action: rsc1 monitor=20000 on node2 - * Resource action: rsc1 stop on node1 - * Resource action: rsc1 monitor=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) + * rsc1 (ocf:pacemaker:Dummy): FAILED (disabled) [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-23.summary b/cts/scheduler/summary/stopped-monitor-23.summary index 3135b99ea0a..1f96793d154 100644 --- a/cts/scheduler/summary/stopped-monitor-23.summary +++ b/cts/scheduler/summary/stopped-monitor-23.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,13 +11,12 @@ Transition Summary: * Start rsc1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 cancel=20000 on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc1 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/stopped-monitor-24.summary b/cts/scheduler/summary/stopped-monitor-24.summary index abbedf8f786..f80ae9bad9e 100644 --- a/cts/scheduler/summary/stopped-monitor-24.summary +++ b/cts/scheduler/summary/stopped-monitor-24.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-25.summary b/cts/scheduler/summary/stopped-monitor-25.summary index 44e7340dac0..a1f245ebfa5 100644 --- a/cts/scheduler/summary/stopped-monitor-25.summary +++ b/cts/scheduler/summary/stopped-monitor-25.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc1 cancel=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-26.summary b/cts/scheduler/summary/stopped-monitor-26.summary index c88413d4c11..3b624c04a7f 100644 --- a/cts/scheduler/summary/stopped-monitor-26.summary +++ b/cts/scheduler/summary/stopped-monitor-26.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,6 +12,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-27.summary b/cts/scheduler/summary/stopped-monitor-27.summary index f38b439318d..e1c4ead6b32 100644 --- a/cts/scheduler/summary/stopped-monitor-27.summary +++ b/cts/scheduler/summary/stopped-monitor-27.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -8,10 +10,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc1 cancel=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/stopped-monitor-30.summary b/cts/scheduler/summary/stopped-monitor-30.summary index 97f47ad87dd..d18ec7d1c60 100644 --- a/cts/scheduler/summary/stopped-monitor-30.summary +++ b/cts/scheduler/summary/stopped-monitor-30.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -8,10 +10,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor=20000 on node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/stopped-monitor-31.summary b/cts/scheduler/summary/stopped-monitor-31.summary index f3876d473f4..698e5e7fa42 100644 --- a/cts/scheduler/summary/stopped-monitor-31.summary +++ b/cts/scheduler/summary/stopped-monitor-31.summary @@ -1,6 +1,8 @@ 1 of 1 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node3 - * Resource action: rsc1 monitor=20000 on node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/suicide-needed-inquorate.summary b/cts/scheduler/summary/suicide-needed-inquorate.summary index d98152dbd41..d56db7a832c 100644 --- a/cts/scheduler/summary/suicide-needed-inquorate.summary +++ b/cts/scheduler/summary/suicide-needed-inquorate.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: + * Cluster Summary: + * Node List: * Node node1: UNCLEAN (online) * Node node2: UNCLEAN (online) @@ -14,14 +16,15 @@ Transition Summary: * Fence (reboot) node1 'cluster does not have quorum' Executing Cluster Transition: - * Fencing node1 (reboot) - * Fencing node3 (reboot) - * Fencing node2 (reboot) Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ node1 node2 node3 ] + * Node node1: UNCLEAN (online) + * Node node2: UNCLEAN (online) + * Node node3: UNCLEAN (online) * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/suicide-not-needed-initial-quorum.summary b/cts/scheduler/summary/suicide-not-needed-initial-quorum.summary index 9865ed3f86d..9735951a4ca 100644 --- a/cts/scheduler/summary/suicide-not-needed-initial-quorum.summary +++ b/cts/scheduler/summary/suicide-not-needed-initial-quorum.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -10,16 +12,13 @@ Transition Summary: * Start Fencing ( node1 ) Executing Cluster Transition: - * Resource action: Fencing monitor on node3 - * Resource action: Fencing monitor on node2 - * Resource action: Fencing monitor on node1 - * Resource action: Fencing start on node1 - * Resource action: Fencing monitor=120000 on node1 Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started node1 + * Fencing (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/suicide-not-needed-never-quorate.summary b/cts/scheduler/summary/suicide-not-needed-never-quorate.summary index 5c1f24809ef..5aa269bd536 100644 --- a/cts/scheduler/summary/suicide-not-needed-never-quorate.summary +++ b/cts/scheduler/summary/suicide-not-needed-never-quorate.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -10,12 +12,11 @@ Transition Summary: * Start Fencing ( node1 ) due to no quorum (blocked) Executing Cluster Transition: - * Resource action: Fencing monitor on node3 - * Resource action: Fencing monitor on node2 - * Resource action: Fencing monitor on node1 Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/suicide-not-needed-quorate.summary b/cts/scheduler/summary/suicide-not-needed-quorate.summary index 9865ed3f86d..9735951a4ca 100644 --- a/cts/scheduler/summary/suicide-not-needed-quorate.summary +++ b/cts/scheduler/summary/suicide-not-needed-quorate.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] @@ -10,16 +12,13 @@ Transition Summary: * Start Fencing ( node1 ) Executing Cluster Transition: - * Resource action: Fencing monitor on node3 - * Resource action: Fencing monitor on node2 - * Resource action: Fencing monitor on node1 - * Resource action: Fencing start on node1 - * Resource action: Fencing monitor=120000 on node1 Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started node1 + * Fencing (stonith:fence_xvm): Stopped diff --git a/cts/scheduler/summary/systemhealth1.summary b/cts/scheduler/summary/systemhealth1.summary index f47d395f1b4..f68a3ee3882 100644 --- a/cts/scheduler/summary/systemhealth1.summary +++ b/cts/scheduler/summary/systemhealth1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: UNCLEAN (offline) * Node hs21d: UNCLEAN (offline) @@ -13,12 +15,13 @@ Transition Summary: * Fence (reboot) hs21c 'node is unclean' Executing Cluster Transition: - * Fencing hs21d (reboot) - * Fencing hs21c (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ hs21c hs21d ] + * Node hs21c: UNCLEAN (offline) + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealth2.summary b/cts/scheduler/summary/systemhealth2.summary index ec1d7beec05..cd876e9e2d6 100644 --- a/cts/scheduler/summary/systemhealth2.summary +++ b/cts/scheduler/summary/systemhealth2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] @@ -15,22 +17,15 @@ Transition Summary: * Start nfs_1 ( hs21c ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) - * Resource action: stonith-1 start on hs21c - * Resource action: apache_1 start on hs21c - * Resource action: nfs_1 start on hs21c - * Resource action: apache_1 monitor=10000 on hs21c - * Resource action: nfs_1 monitor=20000 on hs21c Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] - * OFFLINE: [ hs21d ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started hs21c - * apache_1 (ocf:heartbeat:apache): Started hs21c - * nfs_1 (ocf:heartbeat:Filesystem): Started hs21c + * stonith-1 (stonith:dummy): Stopped + * apache_1 (ocf:heartbeat:apache): Stopped + * nfs_1 (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/systemhealth3.summary b/cts/scheduler/summary/systemhealth3.summary index ec1d7beec05..cd876e9e2d6 100644 --- a/cts/scheduler/summary/systemhealth3.summary +++ b/cts/scheduler/summary/systemhealth3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] @@ -15,22 +17,15 @@ Transition Summary: * Start nfs_1 ( hs21c ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) - * Resource action: stonith-1 start on hs21c - * Resource action: apache_1 start on hs21c - * Resource action: nfs_1 start on hs21c - * Resource action: apache_1 monitor=10000 on hs21c - * Resource action: nfs_1 monitor=20000 on hs21c Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] - * OFFLINE: [ hs21d ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started hs21c - * apache_1 (ocf:heartbeat:apache): Started hs21c - * nfs_1 (ocf:heartbeat:Filesystem): Started hs21c + * stonith-1 (stonith:dummy): Stopped + * apache_1 (ocf:heartbeat:apache): Stopped + * nfs_1 (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/systemhealthm1.summary b/cts/scheduler/summary/systemhealthm1.summary index f47d395f1b4..f68a3ee3882 100644 --- a/cts/scheduler/summary/systemhealthm1.summary +++ b/cts/scheduler/summary/systemhealthm1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: UNCLEAN (offline) * Node hs21d: UNCLEAN (offline) @@ -13,12 +15,13 @@ Transition Summary: * Fence (reboot) hs21c 'node is unclean' Executing Cluster Transition: - * Fencing hs21d (reboot) - * Fencing hs21c (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ hs21c hs21d ] + * Node hs21c: UNCLEAN (offline) + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealthm2.summary b/cts/scheduler/summary/systemhealthm2.summary index 41071ff56ed..a6dd55bade4 100644 --- a/cts/scheduler/summary/systemhealthm2.summary +++ b/cts/scheduler/summary/systemhealthm2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is YELLOW) * Node hs21d: UNCLEAN (offline) @@ -15,22 +17,15 @@ Transition Summary: * Start nfs_1 ( hs21c ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) - * Resource action: stonith-1 start on hs21c - * Resource action: apache_1 start on hs21c - * Resource action: nfs_1 start on hs21c - * Resource action: apache_1 monitor=10000 on hs21c - * Resource action: nfs_1 monitor=20000 on hs21c Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is YELLOW) - * OFFLINE: [ hs21d ] + * Node hs21d: UNCLEAN (offline) * Full List of Resources: - * stonith-1 (stonith:dummy): Started hs21c - * apache_1 (ocf:heartbeat:apache): Started hs21c - * nfs_1 (ocf:heartbeat:Filesystem): Started hs21c + * stonith-1 (stonith:dummy): Stopped + * apache_1 (ocf:heartbeat:apache): Stopped + * nfs_1 (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/systemhealthm3.summary b/cts/scheduler/summary/systemhealthm3.summary index e8c2174e84a..e3f0849c118 100644 --- a/cts/scheduler/summary/systemhealthm3.summary +++ b/cts/scheduler/summary/systemhealthm3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is RED) * Node hs21d: UNCLEAN (offline) @@ -12,15 +14,13 @@ Transition Summary: * Fence (reboot) hs21d 'node is unclean' Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is RED) - * OFFLINE: [ hs21d ] + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealthn1.summary b/cts/scheduler/summary/systemhealthn1.summary index f47d395f1b4..f68a3ee3882 100644 --- a/cts/scheduler/summary/systemhealthn1.summary +++ b/cts/scheduler/summary/systemhealthn1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: UNCLEAN (offline) * Node hs21d: UNCLEAN (offline) @@ -13,12 +15,13 @@ Transition Summary: * Fence (reboot) hs21c 'node is unclean' Executing Cluster Transition: - * Fencing hs21d (reboot) - * Fencing hs21c (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ hs21c hs21d ] + * Node hs21c: UNCLEAN (offline) + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealthn2.summary b/cts/scheduler/summary/systemhealthn2.summary index ec1d7beec05..cd876e9e2d6 100644 --- a/cts/scheduler/summary/systemhealthn2.summary +++ b/cts/scheduler/summary/systemhealthn2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] @@ -15,22 +17,15 @@ Transition Summary: * Start nfs_1 ( hs21c ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) - * Resource action: stonith-1 start on hs21c - * Resource action: apache_1 start on hs21c - * Resource action: nfs_1 start on hs21c - * Resource action: apache_1 monitor=10000 on hs21c - * Resource action: nfs_1 monitor=20000 on hs21c Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] - * OFFLINE: [ hs21d ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started hs21c - * apache_1 (ocf:heartbeat:apache): Started hs21c - * nfs_1 (ocf:heartbeat:Filesystem): Started hs21c + * stonith-1 (stonith:dummy): Stopped + * apache_1 (ocf:heartbeat:apache): Stopped + * nfs_1 (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/systemhealthn3.summary b/cts/scheduler/summary/systemhealthn3.summary index ec1d7beec05..cd876e9e2d6 100644 --- a/cts/scheduler/summary/systemhealthn3.summary +++ b/cts/scheduler/summary/systemhealthn3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] @@ -15,22 +17,15 @@ Transition Summary: * Start nfs_1 ( hs21c ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) - * Resource action: stonith-1 start on hs21c - * Resource action: apache_1 start on hs21c - * Resource action: nfs_1 start on hs21c - * Resource action: apache_1 monitor=10000 on hs21c - * Resource action: nfs_1 monitor=20000 on hs21c Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node hs21d: UNCLEAN (offline) * Online: [ hs21c ] - * OFFLINE: [ hs21d ] * Full List of Resources: - * stonith-1 (stonith:dummy): Started hs21c - * apache_1 (ocf:heartbeat:apache): Started hs21c - * nfs_1 (ocf:heartbeat:Filesystem): Started hs21c + * stonith-1 (stonith:dummy): Stopped + * apache_1 (ocf:heartbeat:apache): Stopped + * nfs_1 (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/systemhealtho1.summary b/cts/scheduler/summary/systemhealtho1.summary index f47d395f1b4..f68a3ee3882 100644 --- a/cts/scheduler/summary/systemhealtho1.summary +++ b/cts/scheduler/summary/systemhealtho1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: UNCLEAN (offline) * Node hs21d: UNCLEAN (offline) @@ -13,12 +15,13 @@ Transition Summary: * Fence (reboot) hs21c 'node is unclean' Executing Cluster Transition: - * Fencing hs21d (reboot) - * Fencing hs21c (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ hs21c hs21d ] + * Node hs21c: UNCLEAN (offline) + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealtho2.summary b/cts/scheduler/summary/systemhealtho2.summary index fb951fd9988..096717625be 100644 --- a/cts/scheduler/summary/systemhealtho2.summary +++ b/cts/scheduler/summary/systemhealtho2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is YELLOW) * Node hs21d: UNCLEAN (offline) @@ -12,15 +14,13 @@ Transition Summary: * Fence (reboot) hs21d 'node is unclean' Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is YELLOW) - * OFFLINE: [ hs21d ] + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealtho3.summary b/cts/scheduler/summary/systemhealtho3.summary index e8c2174e84a..e3f0849c118 100644 --- a/cts/scheduler/summary/systemhealtho3.summary +++ b/cts/scheduler/summary/systemhealtho3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is RED) * Node hs21d: UNCLEAN (offline) @@ -12,15 +14,13 @@ Transition Summary: * Fence (reboot) hs21d 'node is unclean' Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is RED) - * OFFLINE: [ hs21d ] + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealthp1.summary b/cts/scheduler/summary/systemhealthp1.summary index f47d395f1b4..f68a3ee3882 100644 --- a/cts/scheduler/summary/systemhealthp1.summary +++ b/cts/scheduler/summary/systemhealthp1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: UNCLEAN (offline) * Node hs21d: UNCLEAN (offline) @@ -13,12 +15,13 @@ Transition Summary: * Fence (reboot) hs21c 'node is unclean' Executing Cluster Transition: - * Fencing hs21d (reboot) - * Fencing hs21c (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ hs21c hs21d ] + * Node hs21c: UNCLEAN (offline) + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/systemhealthp2.summary b/cts/scheduler/summary/systemhealthp2.summary index 9dba00189ee..1d421409aab 100644 --- a/cts/scheduler/summary/systemhealthp2.summary +++ b/cts/scheduler/summary/systemhealthp2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is YELLOW) * Node hs21d: UNCLEAN (offline) @@ -14,21 +16,15 @@ Transition Summary: * Start nfs_1 ( hs21c ) Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) - * Resource action: apache_1 start on hs21c - * Resource action: nfs_1 start on hs21c - * Resource action: apache_1 monitor=10000 on hs21c - * Resource action: nfs_1 monitor=20000 on hs21c Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is YELLOW) - * OFFLINE: [ hs21d ] + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped - * apache_1 (ocf:heartbeat:apache): Started hs21c - * nfs_1 (ocf:heartbeat:Filesystem): Started hs21c + * apache_1 (ocf:heartbeat:apache): Stopped + * nfs_1 (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/systemhealthp3.summary b/cts/scheduler/summary/systemhealthp3.summary index e8c2174e84a..e3f0849c118 100644 --- a/cts/scheduler/summary/systemhealthp3.summary +++ b/cts/scheduler/summary/systemhealthp3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is RED) * Node hs21d: UNCLEAN (offline) @@ -12,15 +14,13 @@ Transition Summary: * Fence (reboot) hs21d 'node is unclean' Executing Cluster Transition: - * Resource action: stonith-1 monitor on hs21c - * Resource action: apache_1 monitor on hs21c - * Resource action: nfs_1 monitor on hs21c - * Fencing hs21d (reboot) Revised Cluster Status: + * Cluster Summary: + * Node List: * Node hs21c: online (health is RED) - * OFFLINE: [ hs21d ] + * Node hs21d: UNCLEAN (offline) * Full List of Resources: * stonith-1 (stonith:dummy): Stopped diff --git a/cts/scheduler/summary/tags-coloc-order-1.summary b/cts/scheduler/summary/tags-coloc-order-1.summary index 9d421dd0651..bc4a4025d0a 100644 --- a/cts/scheduler/summary/tags-coloc-order-1.summary +++ b/cts/scheduler/summary/tags-coloc-order-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Start rsc4 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/tags-coloc-order-2.summary b/cts/scheduler/summary/tags-coloc-order-2.summary index 11e4730c0b1..8bb8d4553ff 100644 --- a/cts/scheduler/summary/tags-coloc-order-2.summary +++ b/cts/scheduler/summary/tags-coloc-order-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -31,57 +33,23 @@ Transition Summary: * Start rsc12 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc7 monitor on node2 - * Resource action: rsc7 monitor on node1 - * Resource action: rsc8 monitor on node2 - * Resource action: rsc8 monitor on node1 - * Resource action: rsc9 monitor on node2 - * Resource action: rsc9 monitor on node1 - * Resource action: rsc10 monitor on node2 - * Resource action: rsc10 monitor on node1 - * Resource action: rsc11 monitor on node2 - * Resource action: rsc11 monitor on node1 - * Resource action: rsc12 monitor on node2 - * Resource action: rsc12 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node1 - * Resource action: rsc5 start on node1 - * Resource action: rsc6 start on node1 - * Resource action: rsc7 start on node1 - * Resource action: rsc8 start on node1 - * Resource action: rsc9 start on node1 - * Resource action: rsc10 start on node1 - * Resource action: rsc11 start on node1 - * Resource action: rsc12 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 - * rsc5 (ocf:pacemaker:Dummy): Started node1 - * rsc6 (ocf:pacemaker:Dummy): Started node1 - * rsc7 (ocf:pacemaker:Dummy): Started node1 - * rsc8 (ocf:pacemaker:Dummy): Started node1 - * rsc9 (ocf:pacemaker:Dummy): Started node1 - * rsc10 (ocf:pacemaker:Dummy): Started node1 - * rsc11 (ocf:pacemaker:Dummy): Started node1 - * rsc12 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped + * rsc6 (ocf:pacemaker:Dummy): Stopped + * rsc7 (ocf:pacemaker:Dummy): Stopped + * rsc8 (ocf:pacemaker:Dummy): Stopped + * rsc9 (ocf:pacemaker:Dummy): Stopped + * rsc10 (ocf:pacemaker:Dummy): Stopped + * rsc11 (ocf:pacemaker:Dummy): Stopped + * rsc12 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/tags-location.summary b/cts/scheduler/summary/tags-location.summary index e6047113c5f..7484f6f19af 100644 --- a/cts/scheduler/summary/tags-location.summary +++ b/cts/scheduler/summary/tags-location.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,33 +21,17 @@ Transition Summary: * Start rsc6 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node2 - * Resource action: rsc6 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node2 - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node2 - * rsc6 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped + * rsc6 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/tags-ticket.summary b/cts/scheduler/summary/tags-ticket.summary index 572d2b48c1c..f49364babea 100644 --- a/cts/scheduler/summary/tags-ticket.summary +++ b/cts/scheduler/summary/tags-ticket.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,20 +15,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/target-0.summary b/cts/scheduler/summary/target-0.summary index ee291fc98f2..e4ce3379e92 100644 --- a/cts/scheduler/summary/target-0.summary +++ b/cts/scheduler/summary/target-0.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -12,23 +14,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] diff --git a/cts/scheduler/summary/target-1.summary b/cts/scheduler/summary/target-1.summary index edc1daf32b5..eda2177abb1 100644 --- a/cts/scheduler/summary/target-1.summary +++ b/cts/scheduler/summary/target-1.summary @@ -1,6 +1,9 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -16,27 +19,17 @@ Transition Summary: * Stop rsc_c001n08 ( c001n08 ) due to node availability Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n08 stop on c001n08 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02 - * rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped (disabled) + * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 (disabled) * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * Clone Set: promoteme [rsc_c001n03] (promotable): * Unpromoted: [ c001n03 ] diff --git a/cts/scheduler/summary/target-2.summary b/cts/scheduler/summary/target-2.summary index a6194ae01ef..392d217c3e9 100644 --- a/cts/scheduler/summary/target-2.summary +++ b/cts/scheduler/summary/target-2.summary @@ -1,6 +1,9 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] @@ -15,30 +18,17 @@ Transition Summary: * Stop rsc_c001n08 ( c001n08 ) due to node availability Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: rsc_c001n08 stop on c001n08 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02 - * rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped (disabled) + * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 (disabled) * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 diff --git a/cts/scheduler/summary/template-1.summary b/cts/scheduler/summary/template-1.summary index eb4493e3532..c16fafb6b55 100644 --- a/cts/scheduler/summary/template-1.summary +++ b/cts/scheduler/summary/template-1.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,19 +14,13 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc2 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-2.summary b/cts/scheduler/summary/template-2.summary index e7d2c11423b..75d1b767190 100644 --- a/cts/scheduler/summary/template-2.summary +++ b/cts/scheduler/summary/template-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,19 +12,13 @@ Transition Summary: * Start rsc2 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc2 monitor=20000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-3.summary b/cts/scheduler/summary/template-3.summary index 4054f1e5bcd..3f7a3c40c48 100644 --- a/cts/scheduler/summary/template-3.summary +++ b/cts/scheduler/summary/template-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,23 +13,13 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 monitor=30000 on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc1 monitor=20000 on node1 - * Resource action: rsc1 monitor=10000 on node1 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-clone-group.summary b/cts/scheduler/summary/template-clone-group.summary index efc904daad7..0562159e5e4 100644 --- a/cts/scheduler/summary/template-clone-group.summary +++ b/cts/scheduler/summary/template-clone-group.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,25 +15,13 @@ Transition Summary: * Start rsc2:1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node1 - * Resource action: rsc2:0 monitor on node1 - * Resource action: rsc1:1 monitor on node2 - * Resource action: rsc2:1 monitor on node2 - * Pseudo action: clone1_start_0 - * Pseudo action: group1:0_start_0 - * Resource action: rsc1:0 start on node1 - * Resource action: rsc2:0 start on node1 - * Pseudo action: group1:1_start_0 - * Resource action: rsc1:1 start on node2 - * Resource action: rsc2:1 start on node2 - * Pseudo action: group1:0_running_0 - * Pseudo action: group1:1_running_0 - * Pseudo action: clone1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: clone1 [group1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/template-clone-primitive.summary b/cts/scheduler/summary/template-clone-primitive.summary index 59fdfbeec81..90ee291e65d 100644 --- a/cts/scheduler/summary/template-clone-primitive.summary +++ b/cts/scheduler/summary/template-clone-primitive.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,17 +13,13 @@ Transition Summary: * Start rsc1:1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node1 - * Resource action: rsc1:1 monitor on node2 - * Pseudo action: clone1_start_0 - * Resource action: rsc1:0 start on node1 - * Resource action: rsc1:1 start on node2 - * Pseudo action: clone1_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/template-coloc-1.summary b/cts/scheduler/summary/template-coloc-1.summary index 9d421dd0651..bc4a4025d0a 100644 --- a/cts/scheduler/summary/template-coloc-1.summary +++ b/cts/scheduler/summary/template-coloc-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Start rsc4 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-coloc-2.summary b/cts/scheduler/summary/template-coloc-2.summary index 9d421dd0651..bc4a4025d0a 100644 --- a/cts/scheduler/summary/template-coloc-2.summary +++ b/cts/scheduler/summary/template-coloc-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Start rsc4 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-coloc-3.summary b/cts/scheduler/summary/template-coloc-3.summary index a7ff63e8dec..48734a8074c 100644 --- a/cts/scheduler/summary/template-coloc-3.summary +++ b/cts/scheduler/summary/template-coloc-3.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -19,33 +22,18 @@ Transition Summary: * Start rsc6 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node1 - * Resource action: rsc6 start on node2 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node1 - * rsc6 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped + * rsc6 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-order-1.summary b/cts/scheduler/summary/template-order-1.summary index 1b3059c375a..c59409920c2 100644 --- a/cts/scheduler/summary/template-order-1.summary +++ b/cts/scheduler/summary/template-order-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Start rsc4 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-order-2.summary b/cts/scheduler/summary/template-order-2.summary index 9283ce80666..c59409920c2 100644 --- a/cts/scheduler/summary/template-order-2.summary +++ b/cts/scheduler/summary/template-order-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -15,25 +17,15 @@ Transition Summary: * Start rsc4 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc1 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-order-3.summary b/cts/scheduler/summary/template-order-3.summary index 664b1a66546..454dbf7048f 100644 --- a/cts/scheduler/summary/template-order-3.summary +++ b/cts/scheduler/summary/template-order-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -19,33 +21,17 @@ Transition Summary: * Start rsc6 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc6 monitor on node2 - * Resource action: rsc6 monitor on node1 - * Resource action: rsc4 start on node2 - * Resource action: rsc5 start on node1 - * Resource action: rsc6 start on node2 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node2 - * Resource action: rsc3 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node2 - * rsc5 (ocf:pacemaker:Dummy): Started node1 - * rsc6 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped + * rsc6 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-rsc-sets-1.summary b/cts/scheduler/summary/template-rsc-sets-1.summary index 8e005c4abb8..0b3e7b4dbc9 100644 --- a/cts/scheduler/summary/template-rsc-sets-1.summary +++ b/cts/scheduler/summary/template-rsc-sets-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,29 +19,16 @@ Transition Summary: * Start rsc5 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc4 start on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc5 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 - * rsc5 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-rsc-sets-2.summary b/cts/scheduler/summary/template-rsc-sets-2.summary index 8e005c4abb8..0b3e7b4dbc9 100644 --- a/cts/scheduler/summary/template-rsc-sets-2.summary +++ b/cts/scheduler/summary/template-rsc-sets-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,29 +19,16 @@ Transition Summary: * Start rsc5 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc4 start on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc5 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 - * rsc5 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-rsc-sets-3.summary b/cts/scheduler/summary/template-rsc-sets-3.summary index 8e005c4abb8..0b3e7b4dbc9 100644 --- a/cts/scheduler/summary/template-rsc-sets-3.summary +++ b/cts/scheduler/summary/template-rsc-sets-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -17,29 +19,16 @@ Transition Summary: * Start rsc5 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4 monitor on node2 - * Resource action: rsc4 monitor on node1 - * Resource action: rsc5 monitor on node2 - * Resource action: rsc5 monitor on node1 - * Resource action: rsc4 start on node1 - * Resource action: rsc1 start on node1 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc5 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 - * rsc4 (ocf:pacemaker:Dummy): Started node1 - * rsc5 (ocf:pacemaker:Dummy): Started node1 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc5 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/template-rsc-sets-4.summary b/cts/scheduler/summary/template-rsc-sets-4.summary index e74b971cd6b..4c83548e1b7 100644 --- a/cts/scheduler/summary/template-rsc-sets-4.summary +++ b/cts/scheduler/summary/template-rsc-sets-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,14 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/template-ticket.summary b/cts/scheduler/summary/template-ticket.summary index e74b971cd6b..4c83548e1b7 100644 --- a/cts/scheduler/summary/template-ticket.summary +++ b/cts/scheduler/summary/template-ticket.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,14 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-1.summary b/cts/scheduler/summary/ticket-clone-1.summary index f682d7392dd..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-1.summary +++ b/cts/scheduler/summary/ticket-clone-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:0 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-10.summary b/cts/scheduler/summary/ticket-clone-10.summary index f682d7392dd..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-10.summary +++ b/cts/scheduler/summary/ticket-clone-10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:0 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-11.summary b/cts/scheduler/summary/ticket-clone-11.summary index abba11fa1b4..2b80e384a76 100644 --- a/cts/scheduler/summary/ticket-clone-11.summary +++ b/cts/scheduler/summary/ticket-clone-11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,18 +14,14 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Pseudo action: clone1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: clone1_running_0 - * Resource action: rsc1:0 monitor=5000 on node2 - * Resource action: rsc1:1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-12.summary b/cts/scheduler/summary/ticket-clone-12.summary index d71f36e0af3..1f9fa6bc73e 100644 --- a/cts/scheduler/summary/ticket-clone-12.summary +++ b/cts/scheduler/summary/ticket-clone-12.summary @@ -1,21 +1,27 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * rsc1 (ocf:pacemaker:Dummy): Started node1 (blocked) + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * rsc1 (ocf:pacemaker:Dummy): Started node1 (blocked) + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) diff --git a/cts/scheduler/summary/ticket-clone-13.summary b/cts/scheduler/summary/ticket-clone-13.summary index d3be28c9657..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-13.summary +++ b/cts/scheduler/summary/ticket-clone-13.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-14.summary b/cts/scheduler/summary/ticket-clone-14.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-14.summary +++ b/cts/scheduler/summary/ticket-clone-14.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-15.summary b/cts/scheduler/summary/ticket-clone-15.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-15.summary +++ b/cts/scheduler/summary/ticket-clone-15.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-16.summary b/cts/scheduler/summary/ticket-clone-16.summary index d3be28c9657..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-16.summary +++ b/cts/scheduler/summary/ticket-clone-16.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-17.summary b/cts/scheduler/summary/ticket-clone-17.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-17.summary +++ b/cts/scheduler/summary/ticket-clone-17.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-18.summary b/cts/scheduler/summary/ticket-clone-18.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-18.summary +++ b/cts/scheduler/summary/ticket-clone-18.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-19.summary b/cts/scheduler/summary/ticket-clone-19.summary index d3be28c9657..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-19.summary +++ b/cts/scheduler/summary/ticket-clone-19.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-2.summary b/cts/scheduler/summary/ticket-clone-2.summary index abba11fa1b4..2b80e384a76 100644 --- a/cts/scheduler/summary/ticket-clone-2.summary +++ b/cts/scheduler/summary/ticket-clone-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,18 +14,14 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Pseudo action: clone1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: clone1_running_0 - * Resource action: rsc1:0 monitor=5000 on node2 - * Resource action: rsc1:1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-20.summary b/cts/scheduler/summary/ticket-clone-20.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-20.summary +++ b/cts/scheduler/summary/ticket-clone-20.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-21.summary b/cts/scheduler/summary/ticket-clone-21.summary index 1dfd9b4319a..01a424b257a 100644 --- a/cts/scheduler/summary/ticket-clone-21.summary +++ b/cts/scheduler/summary/ticket-clone-21.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * Online: [ node1 node2 ] + * Node node1: UNCLEAN (online) + * Node node2: UNCLEAN (online) * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -15,19 +18,15 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: rsc_stonith_stop_0 - * Fencing node1 (reboot) - * Fencing node2 (reboot) - * Pseudo action: clone1_stop_0 - * Pseudo action: rsc1:1_stop_0 - * Pseudo action: rsc1:0_stop_0 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ node1 node2 ] + * Node node1: UNCLEAN (online) + * Node node2: UNCLEAN (online) * Full List of Resources: - * rsc_stonith (stonith:null): Stopped + * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-22.summary b/cts/scheduler/summary/ticket-clone-22.summary index d3be28c9657..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-22.summary +++ b/cts/scheduler/summary/ticket-clone-22.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,6 +14,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-23.summary b/cts/scheduler/summary/ticket-clone-23.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-23.summary +++ b/cts/scheduler/summary/ticket-clone-23.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-24.summary b/cts/scheduler/summary/ticket-clone-24.summary index d71f36e0af3..1f9fa6bc73e 100644 --- a/cts/scheduler/summary/ticket-clone-24.summary +++ b/cts/scheduler/summary/ticket-clone-24.summary @@ -1,21 +1,27 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * rsc1 (ocf:pacemaker:Dummy): Started node1 (blocked) + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * rsc1 (ocf:pacemaker:Dummy): Started node1 (blocked) + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) diff --git a/cts/scheduler/summary/ticket-clone-3.summary b/cts/scheduler/summary/ticket-clone-3.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-3.summary +++ b/cts/scheduler/summary/ticket-clone-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-4.summary b/cts/scheduler/summary/ticket-clone-4.summary index f682d7392dd..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-4.summary +++ b/cts/scheduler/summary/ticket-clone-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:0 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-5.summary b/cts/scheduler/summary/ticket-clone-5.summary index abba11fa1b4..2b80e384a76 100644 --- a/cts/scheduler/summary/ticket-clone-5.summary +++ b/cts/scheduler/summary/ticket-clone-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,18 +14,14 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Pseudo action: clone1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: clone1_running_0 - * Resource action: rsc1:0 monitor=5000 on node2 - * Resource action: rsc1:1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-6.summary b/cts/scheduler/summary/ticket-clone-6.summary index 11dbd5cacf1..81cb1b9bc4f 100644 --- a/cts/scheduler/summary/ticket-clone-6.summary +++ b/cts/scheduler/summary/ticket-clone-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,16 +14,14 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: clone1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-7.summary b/cts/scheduler/summary/ticket-clone-7.summary index f682d7392dd..042be49295c 100644 --- a/cts/scheduler/summary/ticket-clone-7.summary +++ b/cts/scheduler/summary/ticket-clone-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,10 +12,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:0 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-8.summary b/cts/scheduler/summary/ticket-clone-8.summary index abba11fa1b4..2b80e384a76 100644 --- a/cts/scheduler/summary/ticket-clone-8.summary +++ b/cts/scheduler/summary/ticket-clone-8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -12,18 +14,14 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Pseudo action: clone1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: clone1_running_0 - * Resource action: rsc1:0 monitor=5000 on node2 - * Resource action: rsc1:1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-clone-9.summary b/cts/scheduler/summary/ticket-clone-9.summary index 1dfd9b4319a..01a424b257a 100644 --- a/cts/scheduler/summary/ticket-clone-9.summary +++ b/cts/scheduler/summary/ticket-clone-9.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * Online: [ node1 node2 ] + * Node node1: UNCLEAN (online) + * Node node2: UNCLEAN (online) * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -15,19 +18,15 @@ Transition Summary: * Stop rsc1:1 ( node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: rsc_stonith_stop_0 - * Fencing node1 (reboot) - * Fencing node2 (reboot) - * Pseudo action: clone1_stop_0 - * Pseudo action: rsc1:1_stop_0 - * Pseudo action: rsc1:0_stop_0 - * Pseudo action: clone1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * OFFLINE: [ node1 node2 ] + * Node node1: UNCLEAN (online) + * Node node2: UNCLEAN (online) * Full List of Resources: - * rsc_stonith (stonith:null): Stopped + * rsc_stonith (stonith:null): Started node1 * Clone Set: clone1 [rsc1]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-1.summary b/cts/scheduler/summary/ticket-group-1.summary index 4db96ef573b..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-1.summary +++ b/cts/scheduler/summary/ticket-group-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-10.summary b/cts/scheduler/summary/ticket-group-10.summary index 4db96ef573b..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-10.summary +++ b/cts/scheduler/summary/ticket-group-10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-11.summary b/cts/scheduler/summary/ticket-group-11.summary index 23516953fb7..2269b2f0e8b 100644 --- a/cts/scheduler/summary/ticket-group-11.summary +++ b/cts/scheduler/summary/ticket-group-11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,19 +15,15 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: group1_running_0 - * Resource action: rsc1 monitor=5000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-group-12.summary b/cts/scheduler/summary/ticket-group-12.summary index 322b79f34f9..7520d174db1 100644 --- a/cts/scheduler/summary/ticket-group-12.summary +++ b/cts/scheduler/summary/ticket-group-12.summary @@ -1,23 +1,27 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) + * rsc2 (ocf:pacemaker:Dummy): Started node2 (blocked) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) + * rsc2 (ocf:pacemaker:Dummy): Started node2 (blocked) diff --git a/cts/scheduler/summary/ticket-group-13.summary b/cts/scheduler/summary/ticket-group-13.summary index 378dda4b5e4..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-13.summary +++ b/cts/scheduler/summary/ticket-group-13.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-14.summary b/cts/scheduler/summary/ticket-group-14.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-14.summary +++ b/cts/scheduler/summary/ticket-group-14.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-15.summary b/cts/scheduler/summary/ticket-group-15.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-15.summary +++ b/cts/scheduler/summary/ticket-group-15.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-16.summary b/cts/scheduler/summary/ticket-group-16.summary index 378dda4b5e4..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-16.summary +++ b/cts/scheduler/summary/ticket-group-16.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-17.summary b/cts/scheduler/summary/ticket-group-17.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-17.summary +++ b/cts/scheduler/summary/ticket-group-17.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-18.summary b/cts/scheduler/summary/ticket-group-18.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-18.summary +++ b/cts/scheduler/summary/ticket-group-18.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-19.summary b/cts/scheduler/summary/ticket-group-19.summary index 378dda4b5e4..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-19.summary +++ b/cts/scheduler/summary/ticket-group-19.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-2.summary b/cts/scheduler/summary/ticket-group-2.summary index 23516953fb7..2269b2f0e8b 100644 --- a/cts/scheduler/summary/ticket-group-2.summary +++ b/cts/scheduler/summary/ticket-group-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,19 +15,15 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: group1_running_0 - * Resource action: rsc1 monitor=5000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-group-20.summary b/cts/scheduler/summary/ticket-group-20.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-20.summary +++ b/cts/scheduler/summary/ticket-group-20.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-21.summary b/cts/scheduler/summary/ticket-group-21.summary index 19880d909be..313012697a3 100644 --- a/cts/scheduler/summary/ticket-group-21.summary +++ b/cts/scheduler/summary/ticket-group-21.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * Online: [ node1 node2 ] + * Node node2: UNCLEAN (online) + * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -12,21 +15,20 @@ Transition Summary: * Fence (reboot) node2 'deadman ticket was lost' * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Fencing node2 (reboot) - * Pseudo action: group1_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 ] - * OFFLINE: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-22.summary b/cts/scheduler/summary/ticket-group-22.summary index 378dda4b5e4..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-22.summary +++ b/cts/scheduler/summary/ticket-group-22.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,6 +15,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-23.summary b/cts/scheduler/summary/ticket-group-23.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-23.summary +++ b/cts/scheduler/summary/ticket-group-23.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-24.summary b/cts/scheduler/summary/ticket-group-24.summary index 322b79f34f9..7520d174db1 100644 --- a/cts/scheduler/summary/ticket-group-24.summary +++ b/cts/scheduler/summary/ticket-group-24.summary @@ -1,23 +1,27 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) + * rsc2 (ocf:pacemaker:Dummy): Started node2 (blocked) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) + * rsc2 (ocf:pacemaker:Dummy): Started node2 (blocked) diff --git a/cts/scheduler/summary/ticket-group-3.summary b/cts/scheduler/summary/ticket-group-3.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-3.summary +++ b/cts/scheduler/summary/ticket-group-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-4.summary b/cts/scheduler/summary/ticket-group-4.summary index 4db96ef573b..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-4.summary +++ b/cts/scheduler/summary/ticket-group-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-5.summary b/cts/scheduler/summary/ticket-group-5.summary index 23516953fb7..2269b2f0e8b 100644 --- a/cts/scheduler/summary/ticket-group-5.summary +++ b/cts/scheduler/summary/ticket-group-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,19 +15,15 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: group1_running_0 - * Resource action: rsc1 monitor=5000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-group-6.summary b/cts/scheduler/summary/ticket-group-6.summary index 72f746469a2..e437239cb10 100644 --- a/cts/scheduler/summary/ticket-group-6.summary +++ b/cts/scheduler/summary/ticket-group-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,19 +13,19 @@ Current cluster status: Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Resource action: rsc2 stop on node2 - * Resource action: rsc1 stop on node2 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-group-7.summary b/cts/scheduler/summary/ticket-group-7.summary index 4db96ef573b..e2cebd223f7 100644 --- a/cts/scheduler/summary/ticket-group-7.summary +++ b/cts/scheduler/summary/ticket-group-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,12 +13,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-group-8.summary b/cts/scheduler/summary/ticket-group-8.summary index 23516953fb7..2269b2f0e8b 100644 --- a/cts/scheduler/summary/ticket-group-8.summary +++ b/cts/scheduler/summary/ticket-group-8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -13,19 +15,15 @@ Transition Summary: * Start rsc2 ( node2 ) Executing Cluster Transition: - * Pseudo action: group1_start_0 - * Resource action: rsc1 start on node2 - * Resource action: rsc2 start on node2 - * Pseudo action: group1_running_0 - * Resource action: rsc1 monitor=5000 on node2 - * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Started node2 - * rsc2 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-group-9.summary b/cts/scheduler/summary/ticket-group-9.summary index 19880d909be..313012697a3 100644 --- a/cts/scheduler/summary/ticket-group-9.summary +++ b/cts/scheduler/summary/ticket-group-9.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * Online: [ node1 node2 ] + * Node node2: UNCLEAN (online) + * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -12,21 +15,20 @@ Transition Summary: * Fence (reboot) node2 'deadman ticket was lost' * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Fencing node2 (reboot) - * Pseudo action: group1_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: group1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 ] - * OFFLINE: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Resource Group: group1: - * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-1.summary b/cts/scheduler/summary/ticket-primitive-1.summary index 80e49e9321e..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-1.summary +++ b/cts/scheduler/summary/ticket-primitive-1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,10 +11,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-10.summary b/cts/scheduler/summary/ticket-primitive-10.summary index 80e49e9321e..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-10.summary +++ b/cts/scheduler/summary/ticket-primitive-10.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,10 +11,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-11.summary b/cts/scheduler/summary/ticket-primitive-11.summary index cb38b43e470..6013c0ad580 100644 --- a/cts/scheduler/summary/ticket-primitive-11.summary +++ b/cts/scheduler/summary/ticket-primitive-11.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,13 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-primitive-12.summary b/cts/scheduler/summary/ticket-primitive-12.summary index fc3d40deb83..311864fcfa6 100644 --- a/cts/scheduler/summary/ticket-primitive-12.summary +++ b/cts/scheduler/summary/ticket-primitive-12.summary @@ -1,19 +1,23 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) diff --git a/cts/scheduler/summary/ticket-primitive-13.summary b/cts/scheduler/summary/ticket-primitive-13.summary index 3ba6f11d1b8..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-13.summary +++ b/cts/scheduler/summary/ticket-primitive-13.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-14.summary b/cts/scheduler/summary/ticket-primitive-14.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-14.summary +++ b/cts/scheduler/summary/ticket-primitive-14.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-15.summary b/cts/scheduler/summary/ticket-primitive-15.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-15.summary +++ b/cts/scheduler/summary/ticket-primitive-15.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-16.summary b/cts/scheduler/summary/ticket-primitive-16.summary index 3ba6f11d1b8..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-16.summary +++ b/cts/scheduler/summary/ticket-primitive-16.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-17.summary b/cts/scheduler/summary/ticket-primitive-17.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-17.summary +++ b/cts/scheduler/summary/ticket-primitive-17.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-18.summary b/cts/scheduler/summary/ticket-primitive-18.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-18.summary +++ b/cts/scheduler/summary/ticket-primitive-18.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-19.summary b/cts/scheduler/summary/ticket-primitive-19.summary index 3ba6f11d1b8..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-19.summary +++ b/cts/scheduler/summary/ticket-primitive-19.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-2.summary b/cts/scheduler/summary/ticket-primitive-2.summary index cb38b43e470..6013c0ad580 100644 --- a/cts/scheduler/summary/ticket-primitive-2.summary +++ b/cts/scheduler/summary/ticket-primitive-2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,13 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-primitive-20.summary b/cts/scheduler/summary/ticket-primitive-20.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-20.summary +++ b/cts/scheduler/summary/ticket-primitive-20.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-21.summary b/cts/scheduler/summary/ticket-primitive-21.summary index ba8d8cbed9c..66aa087ca8d 100644 --- a/cts/scheduler/summary/ticket-primitive-21.summary +++ b/cts/scheduler/summary/ticket-primitive-21.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * Online: [ node1 node2 ] + * Node node2: UNCLEAN (online) + * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -11,14 +14,14 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Fencing node2 (reboot) - * Pseudo action: rsc1_stop_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 ] - * OFFLINE: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-22.summary b/cts/scheduler/summary/ticket-primitive-22.summary index 3ba6f11d1b8..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-22.summary +++ b/cts/scheduler/summary/ticket-primitive-22.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,6 +13,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-23.summary b/cts/scheduler/summary/ticket-primitive-23.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-23.summary +++ b/cts/scheduler/summary/ticket-primitive-23.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-24.summary b/cts/scheduler/summary/ticket-primitive-24.summary index fc3d40deb83..311864fcfa6 100644 --- a/cts/scheduler/summary/ticket-primitive-24.summary +++ b/cts/scheduler/summary/ticket-primitive-24.summary @@ -1,19 +1,23 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Started node2 (blocked) diff --git a/cts/scheduler/summary/ticket-primitive-3.summary b/cts/scheduler/summary/ticket-primitive-3.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-3.summary +++ b/cts/scheduler/summary/ticket-primitive-3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-4.summary b/cts/scheduler/summary/ticket-primitive-4.summary index 80e49e9321e..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-4.summary +++ b/cts/scheduler/summary/ticket-primitive-4.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,10 +11,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-5.summary b/cts/scheduler/summary/ticket-primitive-5.summary index cb38b43e470..6013c0ad580 100644 --- a/cts/scheduler/summary/ticket-primitive-5.summary +++ b/cts/scheduler/summary/ticket-primitive-5.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,13 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-primitive-6.summary b/cts/scheduler/summary/ticket-primitive-6.summary index f28cec301f4..7dc7f3be648 100644 --- a/cts/scheduler/summary/ticket-primitive-6.summary +++ b/cts/scheduler/summary/ticket-primitive-6.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,12 +12,13 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-primitive-7.summary b/cts/scheduler/summary/ticket-primitive-7.summary index 80e49e9321e..27301f40837 100644 --- a/cts/scheduler/summary/ticket-primitive-7.summary +++ b/cts/scheduler/summary/ticket-primitive-7.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -9,10 +11,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-primitive-8.summary b/cts/scheduler/summary/ticket-primitive-8.summary index cb38b43e470..6013c0ad580 100644 --- a/cts/scheduler/summary/ticket-primitive-8.summary +++ b/cts/scheduler/summary/ticket-primitive-8.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,13 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Resource action: rsc1 monitor=10000 on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/ticket-primitive-9.summary b/cts/scheduler/summary/ticket-primitive-9.summary index ba8d8cbed9c..66aa087ca8d 100644 --- a/cts/scheduler/summary/ticket-primitive-9.summary +++ b/cts/scheduler/summary/ticket-primitive-9.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: - * Online: [ node1 node2 ] + * Node node2: UNCLEAN (online) + * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -11,14 +14,14 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability Executing Cluster Transition: - * Fencing node2 (reboot) - * Pseudo action: rsc1_stop_0 Revised Cluster Status: + * Cluster Summary: + * Node List: + * Node node2: UNCLEAN (online) * Online: [ node1 ] - * OFFLINE: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/ticket-promoted-1.summary b/cts/scheduler/summary/ticket-promoted-1.summary index 6bc13645dfb..40934c93447 100644 --- a/cts/scheduler/summary/ticket-promoted-1.summary +++ b/cts/scheduler/summary/ticket-promoted-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -10,10 +13,11 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:0 monitor on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-10.summary b/cts/scheduler/summary/ticket-promoted-10.summary index eab3d91008b..df403161aaf 100644 --- a/cts/scheduler/summary/ticket-promoted-10.summary +++ b/cts/scheduler/summary/ticket-promoted-10.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,18 +15,15 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:1 monitor on node1 - * Pseudo action: ms1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: ms1_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-11.summary b/cts/scheduler/summary/ticket-promoted-11.summary index 381603997eb..716a94ae53c 100644 --- a/cts/scheduler/summary/ticket-promoted-11.summary +++ b/cts/scheduler/summary/ticket-promoted-11.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -11,16 +14,15 @@ Transition Summary: * Promote rsc1:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_promote_0 - * Resource action: rsc1:1 promote on node1 - * Pseudo action: ms1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-12.summary b/cts/scheduler/summary/ticket-promoted-12.summary index b51c277faf7..126702a9ede 100644 --- a/cts/scheduler/summary/ticket-promoted-12.summary +++ b/cts/scheduler/summary/ticket-promoted-12.summary @@ -1,11 +1,14 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] + * rsc1 (ocf:pacemaker:Stateful): Promoted node1 (blocked) * Unpromoted: [ node2 ] Transition Summary: @@ -13,11 +16,14 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] + * rsc1 (ocf:pacemaker:Stateful): Promoted node1 (blocked) * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-13.summary b/cts/scheduler/summary/ticket-promoted-13.summary index 6b5d14a64dd..40934c93447 100644 --- a/cts/scheduler/summary/ticket-promoted-13.summary +++ b/cts/scheduler/summary/ticket-promoted-13.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,6 +15,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-14.summary b/cts/scheduler/summary/ticket-promoted-14.summary index ee8912b2e97..3c5d9d1e44b 100644 --- a/cts/scheduler/summary/ticket-promoted-14.summary +++ b/cts/scheduler/summary/ticket-promoted-14.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -13,19 +16,16 @@ Transition Summary: * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: ms1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Stopped: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-15.summary b/cts/scheduler/summary/ticket-promoted-15.summary index ee8912b2e97..3c5d9d1e44b 100644 --- a/cts/scheduler/summary/ticket-promoted-15.summary +++ b/cts/scheduler/summary/ticket-promoted-15.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -13,19 +16,16 @@ Transition Summary: * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: ms1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Stopped: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-16.summary b/cts/scheduler/summary/ticket-promoted-16.summary index 851e54ebd50..335c77d6874 100644 --- a/cts/scheduler/summary/ticket-promoted-16.summary +++ b/cts/scheduler/summary/ticket-promoted-16.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,6 +15,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-17.summary b/cts/scheduler/summary/ticket-promoted-17.summary index ee25f92c4e2..46d91ec9f9c 100644 --- a/cts/scheduler/summary/ticket-promoted-17.summary +++ b/cts/scheduler/summary/ticket-promoted-17.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,15 +15,16 @@ Transition Summary: * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-18.summary b/cts/scheduler/summary/ticket-promoted-18.summary index ee25f92c4e2..46d91ec9f9c 100644 --- a/cts/scheduler/summary/ticket-promoted-18.summary +++ b/cts/scheduler/summary/ticket-promoted-18.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,15 +15,16 @@ Transition Summary: * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-19.summary b/cts/scheduler/summary/ticket-promoted-19.summary index 851e54ebd50..335c77d6874 100644 --- a/cts/scheduler/summary/ticket-promoted-19.summary +++ b/cts/scheduler/summary/ticket-promoted-19.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,6 +15,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-2.summary b/cts/scheduler/summary/ticket-promoted-2.summary index dc67f96156b..9d5992a19ed 100644 --- a/cts/scheduler/summary/ticket-promoted-2.summary +++ b/cts/scheduler/summary/ticket-promoted-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,20 +15,15 @@ Transition Summary: * Promote rsc1:1 ( Stopped -> Promoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: ms1_running_0 - * Pseudo action: ms1_promote_0 - * Resource action: rsc1:1 promote on node1 - * Pseudo action: ms1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-20.summary b/cts/scheduler/summary/ticket-promoted-20.summary index ee25f92c4e2..46d91ec9f9c 100644 --- a/cts/scheduler/summary/ticket-promoted-20.summary +++ b/cts/scheduler/summary/ticket-promoted-20.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,15 +15,16 @@ Transition Summary: * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-21.summary b/cts/scheduler/summary/ticket-promoted-21.summary index f116a2eea0b..177a9e3050b 100644 --- a/cts/scheduler/summary/ticket-promoted-21.summary +++ b/cts/scheduler/summary/ticket-promoted-21.summary @@ -1,6 +1,10 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Online: [ node1 node2 ] + * Node node1: UNCLEAN (online) + * Online: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -12,25 +16,21 @@ Transition Summary: * Fence (reboot) node1 'deadman ticket was lost' * Move rsc_stonith ( node1 -> node2 ) * Stop rsc1:0 ( Promoted node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: rsc_stonith_stop_0 - * Pseudo action: ms1_demote_0 - * Fencing node1 (reboot) - * Resource action: rsc_stonith start on node2 - * Pseudo action: rsc1:1_demote_0 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 - * Pseudo action: rsc1:1_stop_0 - * Pseudo action: ms1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node node1: UNCLEAN (online) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * rsc_stonith (stonith:null): Started node2 + * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): + * Promoted: [ node1 ] * Unpromoted: [ node2 ] - * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/ticket-promoted-22.summary b/cts/scheduler/summary/ticket-promoted-22.summary index 851e54ebd50..335c77d6874 100644 --- a/cts/scheduler/summary/ticket-promoted-22.summary +++ b/cts/scheduler/summary/ticket-promoted-22.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,6 +15,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-23.summary b/cts/scheduler/summary/ticket-promoted-23.summary index ee25f92c4e2..46d91ec9f9c 100644 --- a/cts/scheduler/summary/ticket-promoted-23.summary +++ b/cts/scheduler/summary/ticket-promoted-23.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,15 +15,16 @@ Transition Summary: * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-24.summary b/cts/scheduler/summary/ticket-promoted-24.summary index b51c277faf7..126702a9ede 100644 --- a/cts/scheduler/summary/ticket-promoted-24.summary +++ b/cts/scheduler/summary/ticket-promoted-24.summary @@ -1,11 +1,14 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] + * rsc1 (ocf:pacemaker:Stateful): Promoted node1 (blocked) * Unpromoted: [ node2 ] Transition Summary: @@ -13,11 +16,14 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] + * rsc1 (ocf:pacemaker:Stateful): Promoted node1 (blocked) * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-3.summary b/cts/scheduler/summary/ticket-promoted-3.summary index ee8912b2e97..3c5d9d1e44b 100644 --- a/cts/scheduler/summary/ticket-promoted-3.summary +++ b/cts/scheduler/summary/ticket-promoted-3.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -13,19 +16,16 @@ Transition Summary: * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 - * Resource action: rsc1:1 stop on node1 - * Resource action: rsc1:0 stop on node2 - * Pseudo action: ms1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Stopped: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-4.summary b/cts/scheduler/summary/ticket-promoted-4.summary index eab3d91008b..df403161aaf 100644 --- a/cts/scheduler/summary/ticket-promoted-4.summary +++ b/cts/scheduler/summary/ticket-promoted-4.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,18 +15,15 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:1 monitor on node1 - * Pseudo action: ms1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: ms1_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-5.summary b/cts/scheduler/summary/ticket-promoted-5.summary index 381603997eb..716a94ae53c 100644 --- a/cts/scheduler/summary/ticket-promoted-5.summary +++ b/cts/scheduler/summary/ticket-promoted-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -11,16 +14,15 @@ Transition Summary: * Promote rsc1:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_promote_0 - * Resource action: rsc1:1 promote on node1 - * Pseudo action: ms1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-6.summary b/cts/scheduler/summary/ticket-promoted-6.summary index ee25f92c4e2..46d91ec9f9c 100644 --- a/cts/scheduler/summary/ticket-promoted-6.summary +++ b/cts/scheduler/summary/ticket-promoted-6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,15 +15,16 @@ Transition Summary: * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_demote_0 - * Resource action: rsc1:1 demote on node1 - * Pseudo action: ms1_demoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-7.summary b/cts/scheduler/summary/ticket-promoted-7.summary index eab3d91008b..df403161aaf 100644 --- a/cts/scheduler/summary/ticket-promoted-7.summary +++ b/cts/scheduler/summary/ticket-promoted-7.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -12,18 +15,15 @@ Transition Summary: * Start rsc1:1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:1 monitor on node1 - * Pseudo action: ms1_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node1 - * Pseudo action: ms1_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Unpromoted: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-8.summary b/cts/scheduler/summary/ticket-promoted-8.summary index 381603997eb..716a94ae53c 100644 --- a/cts/scheduler/summary/ticket-promoted-8.summary +++ b/cts/scheduler/summary/ticket-promoted-8.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -11,16 +14,15 @@ Transition Summary: * Promote rsc1:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Pseudo action: ms1_promote_0 - * Resource action: rsc1:1 promote on node1 - * Pseudo action: ms1_promoted_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-promoted-9.summary b/cts/scheduler/summary/ticket-promoted-9.summary index f116a2eea0b..177a9e3050b 100644 --- a/cts/scheduler/summary/ticket-promoted-9.summary +++ b/cts/scheduler/summary/ticket-promoted-9.summary @@ -1,6 +1,10 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: - * Online: [ node1 node2 ] + * Node node1: UNCLEAN (online) + * Online: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 @@ -12,25 +16,21 @@ Transition Summary: * Fence (reboot) node1 'deadman ticket was lost' * Move rsc_stonith ( node1 -> node2 ) * Stop rsc1:0 ( Promoted node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: rsc_stonith_stop_0 - * Pseudo action: ms1_demote_0 - * Fencing node1 (reboot) - * Resource action: rsc_stonith start on node2 - * Pseudo action: rsc1:1_demote_0 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 - * Pseudo action: rsc1:1_stop_0 - * Pseudo action: ms1_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node node1: UNCLEAN (online) * Online: [ node2 ] - * OFFLINE: [ node1 ] * Full List of Resources: - * rsc_stonith (stonith:null): Started node2 + * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): + * Promoted: [ node1 ] * Unpromoted: [ node2 ] - * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-1.summary b/cts/scheduler/summary/ticket-rsc-sets-1.summary index d119ce5176e..e30d810b670 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-1.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -18,22 +21,11 @@ Transition Summary: * Start rsc5:1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4:0 monitor on node2 - * Resource action: rsc4:0 monitor on node1 - * Resource action: rsc5:0 monitor on node2 - * Resource action: rsc5:1 monitor on node1 - * Pseudo action: ms5_start_0 - * Resource action: rsc5:0 start on node2 - * Resource action: rsc5:1 start on node1 - * Pseudo action: ms5_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -46,4 +38,4 @@ Revised Cluster Status: * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-10.summary b/cts/scheduler/summary/ticket-rsc-sets-10.summary index 3bc9d648ac3..0a6ff56ada3 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-10.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-10.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -21,32 +24,26 @@ Transition Summary: * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone4_stop_0 - * Pseudo action: ms5_demote_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4:1 stop on node1 - * Resource action: rsc4:0 stop on node2 - * Pseudo action: clone4_stopped_0 - * Resource action: rsc5:1 demote on node1 - * Pseudo action: ms5_demoted_0 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-11.summary b/cts/scheduler/summary/ticket-rsc-sets-11.summary index 03153aa264b..1d94dcfb69b 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-11.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-11.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -18,6 +21,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-12.summary b/cts/scheduler/summary/ticket-rsc-sets-12.summary index 68e0827f78b..aeaad927f3a 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-12.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-12.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -17,24 +20,24 @@ Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Resource action: rsc2 stop on node1 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): diff --git a/cts/scheduler/summary/ticket-rsc-sets-13.summary b/cts/scheduler/summary/ticket-rsc-sets-13.summary index 3bc9d648ac3..0a6ff56ada3 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-13.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-13.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -21,32 +24,26 @@ Transition Summary: * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone4_stop_0 - * Pseudo action: ms5_demote_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4:1 stop on node1 - * Resource action: rsc4:0 stop on node2 - * Pseudo action: clone4_stopped_0 - * Resource action: rsc5:1 demote on node1 - * Pseudo action: ms5_demoted_0 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-14.summary b/cts/scheduler/summary/ticket-rsc-sets-14.summary index 3bc9d648ac3..0a6ff56ada3 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-14.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-14.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -21,32 +24,26 @@ Transition Summary: * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone4_stop_0 - * Pseudo action: ms5_demote_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4:1 stop on node1 - * Resource action: rsc4:0 stop on node2 - * Pseudo action: clone4_stopped_0 - * Resource action: rsc5:1 demote on node1 - * Pseudo action: ms5_demoted_0 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-2.summary b/cts/scheduler/summary/ticket-rsc-sets-2.summary index fccf3cad1ba..1caeb506ecb 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-2.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-2.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -22,36 +25,21 @@ Transition Summary: * Promote rsc5:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Pseudo action: group2_start_0 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Pseudo action: clone4_start_0 - * Pseudo action: ms5_promote_0 - * Resource action: rsc1 monitor=10000 on node2 - * Pseudo action: group2_running_0 - * Resource action: rsc2 monitor=5000 on node1 - * Resource action: rsc3 monitor=5000 on node1 - * Resource action: rsc4:0 start on node2 - * Resource action: rsc4:1 start on node1 - * Pseudo action: clone4_running_0 - * Resource action: rsc5:1 promote on node1 - * Pseudo action: ms5_promoted_0 - * Resource action: rsc4:0 monitor=5000 on node2 - * Resource action: rsc4:1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-3.summary b/cts/scheduler/summary/ticket-rsc-sets-3.summary index 3bc9d648ac3..0a6ff56ada3 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-3.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-3.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -21,32 +24,26 @@ Transition Summary: * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone4_stop_0 - * Pseudo action: ms5_demote_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4:1 stop on node1 - * Resource action: rsc4:0 stop on node2 - * Pseudo action: clone4_stopped_0 - * Resource action: rsc5:1 demote on node1 - * Pseudo action: ms5_demoted_0 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-4.summary b/cts/scheduler/summary/ticket-rsc-sets-4.summary index d119ce5176e..e30d810b670 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-4.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-4.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -18,22 +21,11 @@ Transition Summary: * Start rsc5:1 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Resource action: rsc3 monitor on node2 - * Resource action: rsc3 monitor on node1 - * Resource action: rsc4:0 monitor on node2 - * Resource action: rsc4:0 monitor on node1 - * Resource action: rsc5:0 monitor on node2 - * Resource action: rsc5:1 monitor on node1 - * Pseudo action: ms5_start_0 - * Resource action: rsc5:0 start on node2 - * Resource action: rsc5:1 start on node1 - * Pseudo action: ms5_running_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -46,4 +38,4 @@ Revised Cluster Status: * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-5.summary b/cts/scheduler/summary/ticket-rsc-sets-5.summary index 217243a7b25..67153acb00f 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-5.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-5.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -19,25 +22,20 @@ Transition Summary: * Start rsc3 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1 start on node2 - * Pseudo action: group2_start_0 - * Resource action: rsc2 start on node1 - * Resource action: rsc3 start on node1 - * Resource action: rsc1 monitor=10000 on node2 - * Pseudo action: group2_running_0 - * Resource action: rsc2 monitor=5000 on node1 - * Resource action: rsc3 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node1 + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): diff --git a/cts/scheduler/summary/ticket-rsc-sets-6.summary b/cts/scheduler/summary/ticket-rsc-sets-6.summary index 7336f70db30..1ec10415ade 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-6.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-6.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -19,17 +22,11 @@ Transition Summary: * Promote rsc5:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: - * Pseudo action: clone4_start_0 - * Pseudo action: ms5_promote_0 - * Resource action: rsc4:0 start on node2 - * Resource action: rsc4:1 start on node1 - * Pseudo action: clone4_running_0 - * Resource action: rsc5:1 promote on node1 - * Pseudo action: ms5_promoted_0 - * Resource action: rsc4:0 monitor=5000 on node2 - * Resource action: rsc4:1 monitor=5000 on node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -40,7 +37,6 @@ Revised Cluster Status: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Started: [ node1 node2 ] + * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Promoted: [ node1 ] - * Unpromoted: [ node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-7.summary b/cts/scheduler/summary/ticket-rsc-sets-7.summary index 3bc9d648ac3..0a6ff56ada3 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-7.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-7.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -21,32 +24,26 @@ Transition Summary: * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone4_stop_0 - * Pseudo action: ms5_demote_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4:1 stop on node1 - * Resource action: rsc4:0 stop on node2 - * Pseudo action: clone4_stopped_0 - * Resource action: rsc5:1 demote on node1 - * Pseudo action: ms5_demoted_0 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-8.summary b/cts/scheduler/summary/ticket-rsc-sets-8.summary index 03153aa264b..1d94dcfb69b 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-8.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-8.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -18,6 +21,9 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-9.summary b/cts/scheduler/summary/ticket-rsc-sets-9.summary index 3bc9d648ac3..0a6ff56ada3 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-9.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-9.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] @@ -21,32 +24,26 @@ Transition Summary: * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc1 stop on node2 - * Pseudo action: group2_stop_0 - * Resource action: rsc3 stop on node1 - * Pseudo action: clone4_stop_0 - * Pseudo action: ms5_demote_0 - * Resource action: rsc2 stop on node1 - * Resource action: rsc4:1 stop on node1 - * Resource action: rsc4:0 stop on node2 - * Pseudo action: clone4_stopped_0 - * Resource action: rsc5:1 demote on node1 - * Pseudo action: ms5_demoted_0 - * Pseudo action: group2_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: - * rsc2 (ocf:pacemaker:Dummy): Stopped - * rsc3 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Started node1 + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: - * Stopped: [ node1 node2 ] + * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Unpromoted: [ node1 node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/timeout-by-node.summary b/cts/scheduler/summary/timeout-by-node.summary index 78f4fcdc8be..d443a50b5ee 100644 --- a/cts/scheduler/summary/timeout-by-node.summary +++ b/cts/scheduler/summary/timeout-by-node.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] @@ -15,29 +17,14 @@ Transition Summary: * Start rsc1:4 ( node1 ) Executing Cluster Transition: - * Resource action: rsc1:0 monitor on node2 - * Resource action: rsc1:1 monitor on node3 - * Resource action: rsc1:2 monitor on node4 - * Resource action: rsc1:3 monitor on node5 - * Resource action: rsc1:4 monitor on node1 - * Pseudo action: rsc1-clone_start_0 - * Resource action: rsc1:0 start on node2 - * Resource action: rsc1:1 start on node3 - * Resource action: rsc1:2 start on node4 - * Resource action: rsc1:3 start on node5 - * Resource action: rsc1:4 start on node1 - * Pseudo action: rsc1-clone_running_0 - * Resource action: rsc1:0 monitor=10000 on node2 - * Resource action: rsc1:1 monitor=10000 on node3 - * Resource action: rsc1:2 monitor=10000 on node4 - * Resource action: rsc1:3 monitor=10000 on node5 - * Resource action: rsc1:4 monitor=10000 on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 node3 node4 node5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Clone Set: rsc1-clone [rsc1]: - * Started: [ node1 node2 node3 node4 node5 ] + * Stopped: [ node1 node2 node3 node4 node5 ] diff --git a/cts/scheduler/summary/unfence-definition.summary b/cts/scheduler/summary/unfence-definition.summary index 2d94f71a9ca..d18c191613c 100644 --- a/cts/scheduler/summary/unfence-definition.summary +++ b/cts/scheduler/summary/unfence-definition.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node virt-4: UNCLEAN (offline) * Online: [ virt-1 virt-2 virt-3 ] @@ -11,55 +13,3 @@ Current cluster status: * Clone Set: clvmd-clone [clvmd]: * Started: [ virt-1 ] * Stopped: [ virt-2 virt-3 virt-4 ] - -Transition Summary: - * Fence (reboot) virt-4 'node is unclean' - * Fence (on) virt-3 'required by fencing monitor' - * Fence (on) virt-1 'Device definition changed' - * Restart fencing ( virt-1 ) - * Restart dlm:0 ( virt-1 ) due to required stonith - * Start dlm:2 ( virt-3 ) - * Restart clvmd:0 ( virt-1 ) due to required stonith - * Start clvmd:1 ( virt-2 ) - * Start clvmd:2 ( virt-3 ) - -Executing Cluster Transition: - * Resource action: fencing stop on virt-1 - * Resource action: clvmd monitor on virt-2 - * Pseudo action: clvmd-clone_stop_0 - * Fencing virt-4 (reboot) - * Fencing virt-3 (on) - * Resource action: fencing monitor on virt-3 - * Resource action: fencing delete on virt-1 - * Resource action: dlm monitor on virt-3 - * Resource action: clvmd monitor on virt-3 - * Resource action: clvmd stop on virt-1 - * Pseudo action: clvmd-clone_stopped_0 - * Pseudo action: dlm-clone_stop_0 - * Resource action: dlm stop on virt-1 - * Pseudo action: dlm-clone_stopped_0 - * Pseudo action: dlm-clone_start_0 - * Fencing virt-1 (on) - * Resource action: fencing start on virt-1 - * Resource action: dlm start on virt-1 - * Resource action: dlm start on virt-3 - * Pseudo action: dlm-clone_running_0 - * Pseudo action: clvmd-clone_start_0 - * Resource action: clvmd start on virt-1 - * Resource action: clvmd start on virt-2 - * Resource action: clvmd start on virt-3 - * Pseudo action: clvmd-clone_running_0 - -Revised Cluster Status: - * Node List: - * Online: [ virt-1 virt-2 virt-3 ] - * OFFLINE: [ virt-4 ] - - * Full List of Resources: - * fencing (stonith:fence_scsi): Started virt-1 - * Clone Set: dlm-clone [dlm]: - * Started: [ virt-1 virt-2 virt-3 ] - * Stopped: [ virt-4 ] - * Clone Set: clvmd-clone [clvmd]: - * Started: [ virt-1 virt-2 virt-3 ] - * Stopped: [ virt-4 ] diff --git a/cts/scheduler/summary/unfence-device.summary b/cts/scheduler/summary/unfence-device.summary index 6ee7a593115..72f0d1f5075 100644 --- a/cts/scheduler/summary/unfence-device.summary +++ b/cts/scheduler/summary/unfence-device.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2017-11-30 10:44:29Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ virt-008 virt-009 virt-013 ] @@ -13,19 +15,13 @@ Transition Summary: * Start fence_scsi ( virt-008 ) Executing Cluster Transition: - * Fencing virt-013 (on) - * Fencing virt-009 (on) - * Fencing virt-008 (on) - * Resource action: fence_scsi monitor on virt-013 - * Resource action: fence_scsi monitor on virt-009 - * Resource action: fence_scsi monitor on virt-008 - * Resource action: fence_scsi start on virt-008 - * Resource action: fence_scsi monitor=60000 on virt-008 Using the original execution date of: 2017-11-30 10:44:29Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ virt-008 virt-009 virt-013 ] * Full List of Resources: - * fence_scsi (stonith:fence_scsi): Started virt-008 + * fence_scsi (stonith:fence_scsi): Stopped diff --git a/cts/scheduler/summary/unfence-parameters.summary b/cts/scheduler/summary/unfence-parameters.summary index 93a65e667d3..d18c191613c 100644 --- a/cts/scheduler/summary/unfence-parameters.summary +++ b/cts/scheduler/summary/unfence-parameters.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node virt-4: UNCLEAN (offline) * Online: [ virt-1 virt-2 virt-3 ] @@ -11,54 +13,3 @@ Current cluster status: * Clone Set: clvmd-clone [clvmd]: * Started: [ virt-1 ] * Stopped: [ virt-2 virt-3 virt-4 ] - -Transition Summary: - * Fence (reboot) virt-4 'node is unclean' - * Fence (on) virt-3 'required by fencing monitor' - * Fence (on) virt-1 'Device parameters changed' - * Restart fencing ( virt-1 ) due to resource definition change - * Restart dlm:0 ( virt-1 ) due to required stonith - * Start dlm:2 ( virt-3 ) - * Restart clvmd:0 ( virt-1 ) due to required stonith - * Start clvmd:1 ( virt-2 ) - * Start clvmd:2 ( virt-3 ) - -Executing Cluster Transition: - * Resource action: fencing stop on virt-1 - * Resource action: clvmd monitor on virt-2 - * Pseudo action: clvmd-clone_stop_0 - * Fencing virt-4 (reboot) - * Fencing virt-3 (on) - * Resource action: fencing monitor on virt-3 - * Resource action: dlm monitor on virt-3 - * Resource action: clvmd monitor on virt-3 - * Resource action: clvmd stop on virt-1 - * Pseudo action: clvmd-clone_stopped_0 - * Pseudo action: dlm-clone_stop_0 - * Resource action: dlm stop on virt-1 - * Pseudo action: dlm-clone_stopped_0 - * Pseudo action: dlm-clone_start_0 - * Fencing virt-1 (on) - * Resource action: fencing start on virt-1 - * Resource action: dlm start on virt-1 - * Resource action: dlm start on virt-3 - * Pseudo action: dlm-clone_running_0 - * Pseudo action: clvmd-clone_start_0 - * Resource action: clvmd start on virt-1 - * Resource action: clvmd start on virt-2 - * Resource action: clvmd start on virt-3 - * Pseudo action: clvmd-clone_running_0 - -Revised Cluster Status: - * Node List: - * Online: [ virt-1 virt-2 virt-3 ] - * OFFLINE: [ virt-4 ] - - * Full List of Resources: - * fencing (stonith:fence_scsi): Started virt-1 - * Clone Set: dlm-clone [dlm]: - * Started: [ virt-1 virt-2 virt-3 ] - * Stopped: [ virt-4 ] - * Clone Set: clvmd-clone [clvmd]: - * Started: [ virt-1 virt-2 virt-3 ] - * Stopped: [ virt-4 ] diff --git a/cts/scheduler/summary/unfence-startup.summary b/cts/scheduler/summary/unfence-startup.summary index 94617c23474..d18c191613c 100644 --- a/cts/scheduler/summary/unfence-startup.summary +++ b/cts/scheduler/summary/unfence-startup.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Node virt-4: UNCLEAN (offline) * Online: [ virt-1 virt-2 virt-3 ] @@ -11,39 +13,3 @@ Current cluster status: * Clone Set: clvmd-clone [clvmd]: * Started: [ virt-1 ] * Stopped: [ virt-2 virt-3 virt-4 ] - -Transition Summary: - * Fence (reboot) virt-4 'node is unclean' - * Fence (on) virt-3 'required by fencing monitor' - * Start dlm:2 ( virt-3 ) - * Start clvmd:1 ( virt-2 ) - * Start clvmd:2 ( virt-3 ) - -Executing Cluster Transition: - * Resource action: clvmd monitor on virt-2 - * Fencing virt-4 (reboot) - * Fencing virt-3 (on) - * Resource action: fencing monitor on virt-3 - * Resource action: dlm monitor on virt-3 - * Pseudo action: dlm-clone_start_0 - * Resource action: clvmd monitor on virt-3 - * Resource action: dlm start on virt-3 - * Pseudo action: dlm-clone_running_0 - * Pseudo action: clvmd-clone_start_0 - * Resource action: clvmd start on virt-2 - * Resource action: clvmd start on virt-3 - * Pseudo action: clvmd-clone_running_0 - -Revised Cluster Status: - * Node List: - * Online: [ virt-1 virt-2 virt-3 ] - * OFFLINE: [ virt-4 ] - - * Full List of Resources: - * fencing (stonith:fence_scsi): Started virt-1 - * Clone Set: dlm-clone [dlm]: - * Started: [ virt-1 virt-2 virt-3 ] - * Stopped: [ virt-4 ] - * Clone Set: clvmd-clone [clvmd]: - * Started: [ virt-1 virt-2 virt-3 ] - * Stopped: [ virt-4 ] diff --git a/cts/scheduler/summary/unmanaged-block-restart.summary b/cts/scheduler/summary/unmanaged-block-restart.summary index c771449f894..ed9a44eee2c 100644 --- a/cts/scheduler/summary/unmanaged-block-restart.summary +++ b/cts/scheduler/summary/unmanaged-block-restart.summary @@ -1,6 +1,8 @@ 0 of 4 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] @@ -14,13 +16,13 @@ Current cluster status: Transition Summary: * Start rsc1 ( yingying.site ) due to unrunnable rsc2 stop (blocked) * Stop rsc2 ( yingying.site ) due to unrunnable rsc3 stop (blocked) - * Stop rsc3 ( yingying.site ) due to required rsc2 stop (blocked) + * Stop rsc3 ( yingying.site ) due to unrunnable rsc4 stop (blocked) Executing Cluster Transition: - * Pseudo action: group1_stop_0 - * Pseudo action: group1_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] diff --git a/cts/scheduler/summary/unmanaged-promoted.summary b/cts/scheduler/summary/unmanaged-promoted.summary index a617e07342c..339f7fb20a7 100644 --- a/cts/scheduler/summary/unmanaged-promoted.summary +++ b/cts/scheduler/summary/unmanaged-promoted.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 ] * OFFLINE: [ pcmk-3 pcmk-4 ] @@ -36,10 +38,10 @@ Current cluster status: Transition Summary: Executing Cluster Transition: - * Cluster action: do_shutdown on pcmk-2 - * Cluster action: do_shutdown on pcmk-1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ pcmk-1 pcmk-2 ] * OFFLINE: [ pcmk-3 pcmk-4 ] diff --git a/cts/scheduler/summary/unmanaged-stop-1.summary b/cts/scheduler/summary/unmanaged-stop-1.summary index ce91d7a1c22..367ee297d00 100644 --- a/cts/scheduler/summary/unmanaged-stop-1.summary +++ b/cts/scheduler/summary/unmanaged-stop-1.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] @@ -14,6 +16,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] diff --git a/cts/scheduler/summary/unmanaged-stop-2.summary b/cts/scheduler/summary/unmanaged-stop-2.summary index ce91d7a1c22..367ee297d00 100644 --- a/cts/scheduler/summary/unmanaged-stop-2.summary +++ b/cts/scheduler/summary/unmanaged-stop-2.summary @@ -1,6 +1,8 @@ 1 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] @@ -14,6 +16,8 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] diff --git a/cts/scheduler/summary/unmanaged-stop-3.summary b/cts/scheduler/summary/unmanaged-stop-3.summary index 373130a55d8..ce65d2f226f 100644 --- a/cts/scheduler/summary/unmanaged-stop-3.summary +++ b/cts/scheduler/summary/unmanaged-stop-3.summary @@ -1,6 +1,8 @@ 2 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] @@ -13,9 +15,11 @@ Transition Summary: * Stop rsc1 ( yingying.site ) due to node availability (blocked) Executing Cluster Transition: - * Pseudo action: group1_stop_0 + * Pseudo action: group1_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] diff --git a/cts/scheduler/summary/unmanaged-stop-4.summary b/cts/scheduler/summary/unmanaged-stop-4.summary index edf940c8bc3..f952f6d2eba 100644 --- a/cts/scheduler/summary/unmanaged-stop-4.summary +++ b/cts/scheduler/summary/unmanaged-stop-4.summary @@ -1,6 +1,8 @@ 3 of 3 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] @@ -14,9 +16,11 @@ Transition Summary: * Stop rsc1 ( yingying.site ) due to node availability (blocked) Executing Cluster Transition: - * Pseudo action: group1_stop_0 + * Pseudo action: group1_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ yingying.site ] diff --git a/cts/scheduler/summary/unrunnable-1.summary b/cts/scheduler/summary/unrunnable-1.summary index 75fda238563..a9e6266cabb 100644 --- a/cts/scheduler/summary/unrunnable-1.summary +++ b/cts/scheduler/summary/unrunnable-1.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node c001n02: UNCLEAN (offline) * Online: [ c001n03 ] @@ -29,23 +32,15 @@ Transition Summary: * Start rsc_c001n03 ( c001n03 ) due to no quorum (blocked) * Start rsc_c001n01 ( c001n03 ) due to no quorum (blocked) * Stop child_DoFencing:1 ( c001n02 ) due to node availability (blocked) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: child_192.168.100.181 monitor on c001n03 - * Resource action: child_192.168.100.182 monitor on c001n03 - * Resource action: child_192.168.100.183 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n03 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: child_DoFencing:1 monitor on c001n03 - * Resource action: child_DoFencing:2 monitor on c001n03 - * Resource action: child_DoFencing:3 monitor on c001n03 - * Pseudo action: DoFencing_stop_0 - * Pseudo action: DoFencing_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node c001n02: UNCLEAN (offline) * Online: [ c001n03 ] diff --git a/cts/scheduler/summary/unrunnable-2.summary b/cts/scheduler/summary/unrunnable-2.summary index 26c63510785..2a2e2be4dc5 100644 --- a/cts/scheduler/summary/unrunnable-2.summary +++ b/cts/scheduler/summary/unrunnable-2.summary @@ -1,6 +1,9 @@ 6 of 117 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] @@ -88,10 +91,21 @@ Current cluster status: Transition Summary: * Start openstack-cinder-volume ( overcloud-controller-2 ) due to unrunnable openstack-cinder-scheduler-clone running (blocked) + * Stop openstack-aodh-listener:0 ( overcloud-controller-1 ) due to colocation with openstack-aodh-evaluator-clone + * Stop openstack-aodh-listener:1 ( overcloud-controller-0 ) due to colocation with openstack-aodh-evaluator-clone + * Stop openstack-aodh-listener:2 ( overcloud-controller-2 ) due to colocation with openstack-aodh-evaluator-clone Executing Cluster Transition: + * Pseudo action: openstack-aodh-listener-clone_stop_0 + * Resource action: openstack-aodh-listener stop on overcloud-controller-1 + * Resource action: openstack-aodh-listener stop on overcloud-controller-0 + * Resource action: openstack-aodh-listener stop on overcloud-controller-2 + * Pseudo action: openstack-aodh-listener-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] @@ -129,7 +143,7 @@ Revised Cluster Status: * Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]: - * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] + * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]: diff --git a/cts/scheduler/summary/use-after-free-merge.summary b/cts/scheduler/summary/use-after-free-merge.summary index af3e2a20168..e59644b6139 100644 --- a/cts/scheduler/summary/use-after-free-merge.summary +++ b/cts/scheduler/summary/use-after-free-merge.summary @@ -1,6 +1,8 @@ 2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] @@ -18,28 +20,17 @@ Transition Summary: * Start s0:1 ( hex-14 ) Executing Cluster Transition: - * Resource action: fencing-sbd monitor on hex-14 - * Resource action: fencing-sbd monitor on hex-13 - * Resource action: d0 monitor on hex-14 - * Resource action: d0 monitor on hex-13 - * Resource action: d1 monitor on hex-14 - * Resource action: d1 monitor on hex-13 - * Resource action: s0:0 monitor on hex-13 - * Resource action: s0:1 monitor on hex-14 - * Pseudo action: ms0_start_0 - * Resource action: fencing-sbd start on hex-14 - * Resource action: s0:0 start on hex-13 - * Resource action: s0:1 start on hex-14 - * Pseudo action: ms0_running_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: - * fencing-sbd (stonith:external/sbd): Started hex-14 + * fencing-sbd (stonith:external/sbd): Stopped * Resource Group: g0 (disabled): * d0 (ocf:heartbeat:Dummy): Stopped (disabled) * d1 (ocf:heartbeat:Dummy): Stopped (disabled) * Clone Set: ms0 [s0] (promotable): - * Unpromoted: [ hex-13 hex-14 ] + * Stopped: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/utilization-check-allowed-nodes.summary b/cts/scheduler/summary/utilization-check-allowed-nodes.summary index 608a3771fc9..7950e4787b6 100644 --- a/cts/scheduler/summary/utilization-check-allowed-nodes.summary +++ b/cts/scheduler/summary/utilization-check-allowed-nodes.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -10,18 +12,13 @@ Transition Summary: * Start rsc1 ( node2 ) Executing Cluster Transition: - * Resource action: rsc1 monitor on node2 - * Resource action: rsc1 monitor on node1 - * Resource action: rsc2 monitor on node2 - * Resource action: rsc2 monitor on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc1 start on node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc1 (ocf:pacemaker:Dummy): Stopped * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/utilization-complex.summary b/cts/scheduler/summary/utilization-complex.summary index 946dd121820..71fee9cb205 100644 --- a/cts/scheduler/summary/utilization-complex.summary +++ b/cts/scheduler/summary/utilization-complex.summary @@ -1,8 +1,10 @@ Using the original execution date of: 2022-01-05 22:04:47Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] - * GuestOnline: [ httpd-bundle-0 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * dummy3 (ocf:pacemaker:Dummy): Started rhel8-1 @@ -27,122 +29,3 @@ Current cluster status: * Clone Set: clone2-clone [clone2]: * Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] * Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] - -Transition Summary: - * Stop dummy3 ( rhel8-1 ) due to node availability - * Move dummy5 ( rhel8-2 -> rhel8-5 ) - * Move httpd-bundle-ip-192.168.122.131 ( rhel8-2 -> rhel8-5 ) - * Move httpd-bundle-podman-0 ( rhel8-2 -> rhel8-5 ) - * Move httpd-bundle-0 ( rhel8-2 -> rhel8-5 ) - * Restart httpd:0 ( httpd-bundle-0 ) due to required httpd-bundle-podman-0 start - * Start httpd-bundle-1 ( rhel8-1 ) due to unrunnable httpd-bundle-podman-1 start (blocked) - * Start httpd:1 ( httpd-bundle-1 ) due to unrunnable httpd-bundle-podman-1 start (blocked) - * Start httpd-bundle-2 ( rhel8-2 ) due to unrunnable httpd-bundle-podman-2 start (blocked) - * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-podman-2 start (blocked) - * Move dummy4 ( rhel8-5 -> rhel8-4 ) - * Move dummy1 ( rhel8-1 -> rhel8-3 ) - * Move dummy2 ( rhel8-1 -> rhel8-3 ) - * Move Fencing ( rhel8-3 -> rhel8-1 ) - * Move FencingFail ( rhel8-5 -> rhel8-2 ) - * Move g1m1 ( rhel8-5 -> rhel8-4 ) - * Move g1m2 ( rhel8-5 -> rhel8-4 ) - * Move g1m3 ( rhel8-5 -> rhel8-4 ) - * Stop clone1:3 ( rhel8-5 ) due to node availability - * Stop clone2:3 ( rhel8-5 ) due to node availability - -Executing Cluster Transition: - * Resource action: dummy3 stop on rhel8-1 - * Resource action: dummy5 stop on rhel8-2 - * Resource action: dummy4 stop on rhel8-5 - * Resource action: dummy1 stop on rhel8-1 - * Resource action: dummy2 stop on rhel8-1 - * Resource action: Fencing stop on rhel8-3 - * Resource action: FencingFail stop on rhel8-5 - * Pseudo action: g1_stop_0 - * Resource action: g1m3 stop on rhel8-5 - * Pseudo action: clone1-clone_stop_0 - * Pseudo action: clone2-clone_stop_0 - * Pseudo action: httpd-bundle_stop_0 - * Pseudo action: httpd-bundle_start_0 - * Pseudo action: load_stopped_rhel8-4 - * Pseudo action: load_stopped_rhel8-3 - * Pseudo action: load_stopped_httpd-bundle-2 - * Pseudo action: load_stopped_httpd-bundle-1 - * Pseudo action: load_stopped_httpd-bundle-0 - * Pseudo action: load_stopped_rhel8-1 - * Pseudo action: httpd-bundle-clone_stop_0 - * Resource action: dummy4 start on rhel8-4 - * Resource action: dummy1 start on rhel8-3 - * Resource action: dummy2 start on rhel8-3 - * Resource action: Fencing start on rhel8-1 - * Resource action: FencingFail start on rhel8-2 - * Resource action: g1m2 stop on rhel8-5 - * Resource action: clone1 stop on rhel8-5 - * Pseudo action: clone1-clone_stopped_0 - * Resource action: clone2 stop on rhel8-5 - * Pseudo action: clone2-clone_stopped_0 - * Resource action: httpd stop on httpd-bundle-0 - * Pseudo action: httpd-bundle-clone_stopped_0 - * Pseudo action: httpd-bundle-clone_start_0 - * Resource action: httpd-bundle-0 stop on rhel8-2 - * Resource action: dummy4 monitor=10000 on rhel8-4 - * Resource action: dummy1 monitor=10000 on rhel8-3 - * Resource action: dummy2 monitor=10000 on rhel8-3 - * Resource action: Fencing monitor=120000 on rhel8-1 - * Resource action: g1m1 stop on rhel8-5 - * Pseudo action: load_stopped_rhel8-5 - * Resource action: dummy5 start on rhel8-5 - * Resource action: httpd-bundle-podman-0 stop on rhel8-2 - * Pseudo action: g1_stopped_0 - * Pseudo action: g1_start_0 - * Resource action: g1m1 start on rhel8-4 - * Resource action: g1m2 start on rhel8-4 - * Resource action: g1m3 start on rhel8-4 - * Pseudo action: httpd-bundle_stopped_0 - * Pseudo action: load_stopped_rhel8-2 - * Resource action: dummy5 monitor=10000 on rhel8-5 - * Resource action: httpd-bundle-ip-192.168.122.131 stop on rhel8-2 - * Pseudo action: g1_running_0 - * Resource action: g1m1 monitor=10000 on rhel8-4 - * Resource action: g1m2 monitor=10000 on rhel8-4 - * Resource action: g1m3 monitor=10000 on rhel8-4 - * Resource action: httpd-bundle-ip-192.168.122.131 start on rhel8-5 - * Resource action: httpd-bundle-podman-0 start on rhel8-5 - * Resource action: httpd-bundle-0 start on rhel8-5 - * Resource action: httpd start on httpd-bundle-0 - * Resource action: httpd monitor=15000 on httpd-bundle-0 - * Pseudo action: httpd-bundle-clone_running_0 - * Resource action: httpd-bundle-ip-192.168.122.131 monitor=60000 on rhel8-5 - * Resource action: httpd-bundle-podman-0 monitor=60000 on rhel8-5 - * Resource action: httpd-bundle-0 monitor=30000 on rhel8-5 - * Pseudo action: httpd-bundle_running_0 -Using the original execution date of: 2022-01-05 22:04:47Z - -Revised Cluster Status: - * Node List: - * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] - * GuestOnline: [ httpd-bundle-0 ] - - * Full List of Resources: - * dummy3 (ocf:pacemaker:Dummy): Stopped - * dummy5 (ocf:pacemaker:Dummy): Started rhel8-5 - * Container bundle set: httpd-bundle [localhost/pcmktest:http]: - * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel8-5 - * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped - * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped - * dummy4 (ocf:pacemaker:Dummy): Started rhel8-4 - * dummy1 (ocf:pacemaker:Dummy): Started rhel8-3 - * dummy2 (ocf:pacemaker:Dummy): Started rhel8-3 - * Fencing (stonith:fence_xvm): Started rhel8-1 - * FencingPass (stonith:fence_dummy): Started rhel8-4 - * FencingFail (stonith:fence_dummy): Started rhel8-2 - * Resource Group: g1: - * g1m1 (ocf:pacemaker:Dummy): Started rhel8-4 - * g1m2 (ocf:pacemaker:Dummy): Started rhel8-4 - * g1m3 (ocf:pacemaker:Dummy): Started rhel8-4 - * Clone Set: clone1-clone [clone1]: - * Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 ] - * Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 rhel8-5 ] - * Clone Set: clone2-clone [clone2]: - * Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 ] - * Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 rhel8-5 ] diff --git a/cts/scheduler/summary/utilization-order1.summary b/cts/scheduler/summary/utilization-order1.summary index f76ce611919..68d4b73a6d4 100644 --- a/cts/scheduler/summary/utilization-order1.summary +++ b/cts/scheduler/summary/utilization-order1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,15 +13,13 @@ Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: - * Resource action: rsc1 stop on node1 - * Pseudo action: load_stopped_node2 - * Pseudo action: load_stopped_node1 - * Resource action: rsc2 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/utilization-order2.summary b/cts/scheduler/summary/utilization-order2.summary index 123b935be85..d53a9621d83 100644 --- a/cts/scheduler/summary/utilization-order2.summary +++ b/cts/scheduler/summary/utilization-order2.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -14,26 +16,20 @@ Transition Summary: * Move rsc3 ( node1 -> node2 ) * Stop rsc2:0 ( node1 ) due to node availability * Stop rsc1 ( node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: rsc3 stop on node1 - * Pseudo action: clone-rsc2_stop_0 - * Resource action: rsc1 stop on node2 - * Pseudo action: load_stopped_node2 - * Resource action: rsc3 start on node2 - * Resource action: rsc2:1 stop on node1 - * Pseudo action: clone-rsc2_stopped_0 - * Pseudo action: load_stopped_node1 - * Resource action: rsc4 start on node1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc4 (ocf:pacemaker:Dummy): Started node1 - * rsc3 (ocf:pacemaker:Dummy): Started node2 + * rsc4 (ocf:pacemaker:Dummy): Stopped + * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone-rsc2 [rsc2]: - * Started: [ node2 ] - * Stopped: [ node1 ] - * rsc1 (ocf:pacemaker:Dummy): Stopped + * Started: [ node1 node2 ] + * rsc1 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/utilization-order3.summary b/cts/scheduler/summary/utilization-order3.summary index b192e2a670f..40fa2c2daf0 100644 --- a/cts/scheduler/summary/utilization-order3.summary +++ b/cts/scheduler/summary/utilization-order3.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] @@ -11,18 +13,13 @@ Transition Summary: * Migrate rsc1 ( node1 -> node2 ) Executing Cluster Transition: - * Pseudo action: load_stopped_node2 - * Resource action: rsc1 migrate_to on node1 - * Resource action: rsc1 migrate_from on node2 - * Resource action: rsc1 stop on node1 - * Pseudo action: load_stopped_node1 - * Resource action: rsc2 start on node1 - * Pseudo action: rsc1_start_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * rsc2 (ocf:pacemaker:Dummy): Started node1 - * rsc1 (ocf:pacemaker:Dummy): Started node2 + * rsc2 (ocf:pacemaker:Dummy): Stopped + * rsc1 (ocf:pacemaker:Dummy): Started node1 diff --git a/cts/scheduler/summary/utilization-order4.summary b/cts/scheduler/summary/utilization-order4.summary index a3f8aa0d066..6d9b881c5b6 100644 --- a/cts/scheduler/summary/utilization-order4.summary +++ b/cts/scheduler/summary/utilization-order4.summary @@ -1,6 +1,8 @@ 2 of 13 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Node deglxen002: standby (with active resources) * Online: [ deglxen001 ] @@ -23,41 +25,25 @@ Transition Summary: * Stop nfs-xen_swapfiles:1 ( deglxen002 ) due to node availability * Stop nfs-xen_images:1 ( deglxen002 ) due to node availability * Stop prim-ping:1 ( deglxen002 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: degllx61-vm stop on deglxen001 - * Pseudo action: load_stopped_deglxen001 - * Resource action: degllx62-vm migrate_to on deglxen002 - * Resource action: degllx62-vm migrate_from on deglxen001 - * Resource action: degllx62-vm stop on deglxen002 - * Pseudo action: clone-nfs_stop_0 - * Pseudo action: load_stopped_deglxen002 - * Pseudo action: degllx62-vm_start_0 - * Pseudo action: grp-nfs:1_stop_0 - * Resource action: nfs-xen_images:1 stop on deglxen002 - * Resource action: degllx62-vm monitor=30000 on deglxen001 - * Resource action: nfs-xen_swapfiles:1 stop on deglxen002 - * Resource action: nfs-xen_config:1 stop on deglxen002 - * Pseudo action: grp-nfs:1_stopped_0 - * Pseudo action: clone-nfs_stopped_0 - * Pseudo action: clone-ping_stop_0 - * Resource action: prim-ping:0 stop on deglxen002 - * Pseudo action: clone-ping_stopped_0 Revised Cluster Status: + * Cluster Summary: + * Node List: - * Node deglxen002: standby + * Node deglxen002: standby (with active resources) * Online: [ deglxen001 ] * Full List of Resources: - * degllx62-vm (ocf:heartbeat:Xen): Started deglxen001 + * degllx62-vm (ocf:heartbeat:Xen): Started deglxen002 * degllx63-vm (ocf:heartbeat:Xen): Stopped (disabled) - * degllx61-vm (ocf:heartbeat:Xen): Stopped + * degllx61-vm (ocf:heartbeat:Xen): Started deglxen001 * degllx64-vm (ocf:heartbeat:Xen): Stopped (disabled) * stonith_sbd (stonith:external/sbd): Started deglxen001 * Clone Set: clone-nfs [grp-nfs]: - * Started: [ deglxen001 ] - * Stopped: [ deglxen002 ] + * Started: [ deglxen001 deglxen002 ] * Clone Set: clone-ping [prim-ping]: - * Started: [ deglxen001 ] - * Stopped: [ deglxen002 ] + * Started: [ deglxen001 deglxen002 ] diff --git a/cts/scheduler/summary/utilization-shuffle.summary b/cts/scheduler/summary/utilization-shuffle.summary index c350e94df71..95f20fe7823 100644 --- a/cts/scheduler/summary/utilization-shuffle.summary +++ b/cts/scheduler/summary/utilization-shuffle.summary @@ -1,22 +1,11 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ act1 act2 act3 sby1 sby2 ] * Full List of Resources: - * Resource Group: grpPostgreSQLDB1: - * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped - * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Stopped - * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Stopped - * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Stopped - * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped - * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped - * Resource Group: grpPostgreSQLDB2: - * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 - * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 - * prmFsPostgreSQLDB2-2 (ocf:pacemaker:Dummy): Started act2 - * prmFsPostgreSQLDB2-3 (ocf:pacemaker:Dummy): Started act2 - * prmIpPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 - * prmApPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * Resource Group: grpPostgreSQLDB3: * prmExPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act1 * prmFsPostgreSQLDB3-1 (ocf:pacemaker:Dummy): Started act1 @@ -24,12 +13,26 @@ Current cluster status: * prmFsPostgreSQLDB3-3 (ocf:pacemaker:Dummy): Started act1 * prmIpPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act1 * prmApPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act1 + * Resource Group: grpPostgreSQLDB2: + * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 + * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 + * prmFsPostgreSQLDB2-2 (ocf:pacemaker:Dummy): Started act2 + * prmFsPostgreSQLDB2-3 (ocf:pacemaker:Dummy): Started act2 + * prmIpPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 + * prmApPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * Clone Set: clnPingd [prmPingd]: * Started: [ act1 act2 act3 sby1 sby2 ] * Clone Set: clnDiskd1 [prmDiskd1]: * Started: [ act1 act2 act3 sby1 sby2 ] * Clone Set: clnDiskd2 [prmDiskd2]: * Started: [ act1 act2 act3 sby1 sby2 ] + * Resource Group: grpPostgreSQLDB1: + * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped + * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Stopped + * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Stopped + * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Stopped + * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped + * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped Transition Summary: * Start prmExPostgreSQLDB1 ( act3 ) @@ -40,45 +43,15 @@ Transition Summary: * Start prmApPostgreSQLDB1 ( act3 ) Executing Cluster Transition: - * Pseudo action: grpPostgreSQLDB1_start_0 - * Pseudo action: load_stopped_sby2 - * Pseudo action: load_stopped_sby1 - * Pseudo action: load_stopped_act3 - * Pseudo action: load_stopped_act2 - * Pseudo action: load_stopped_act1 - * Resource action: prmExPostgreSQLDB1 start on act3 - * Resource action: prmFsPostgreSQLDB1-1 start on act3 - * Resource action: prmFsPostgreSQLDB1-2 start on act3 - * Resource action: prmFsPostgreSQLDB1-3 start on act3 - * Resource action: prmIpPostgreSQLDB1 start on act3 - * Resource action: prmApPostgreSQLDB1 start on act3 - * Pseudo action: grpPostgreSQLDB1_running_0 - * Resource action: prmExPostgreSQLDB1 monitor=5000 on act3 - * Resource action: prmFsPostgreSQLDB1-1 monitor=5000 on act3 - * Resource action: prmFsPostgreSQLDB1-2 monitor=5000 on act3 - * Resource action: prmFsPostgreSQLDB1-3 monitor=5000 on act3 - * Resource action: prmIpPostgreSQLDB1 monitor=5000 on act3 - * Resource action: prmApPostgreSQLDB1 monitor=5000 on act3 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ act1 act2 act3 sby1 sby2 ] * Full List of Resources: - * Resource Group: grpPostgreSQLDB1: - * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act3 - * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Started act3 - * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Started act3 - * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Started act3 - * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act3 - * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Started act3 - * Resource Group: grpPostgreSQLDB2: - * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 - * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 - * prmFsPostgreSQLDB2-2 (ocf:pacemaker:Dummy): Started act2 - * prmFsPostgreSQLDB2-3 (ocf:pacemaker:Dummy): Started act2 - * prmIpPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 - * prmApPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * Resource Group: grpPostgreSQLDB3: * prmExPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act1 * prmFsPostgreSQLDB3-1 (ocf:pacemaker:Dummy): Started act1 @@ -86,9 +59,23 @@ Revised Cluster Status: * prmFsPostgreSQLDB3-3 (ocf:pacemaker:Dummy): Started act1 * prmIpPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act1 * prmApPostgreSQLDB3 (ocf:pacemaker:Dummy): Started act1 + * Resource Group: grpPostgreSQLDB2: + * prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 + * prmFsPostgreSQLDB2-1 (ocf:pacemaker:Dummy): Started act2 + * prmFsPostgreSQLDB2-2 (ocf:pacemaker:Dummy): Started act2 + * prmFsPostgreSQLDB2-3 (ocf:pacemaker:Dummy): Started act2 + * prmIpPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 + * prmApPostgreSQLDB2 (ocf:pacemaker:Dummy): Started act2 * Clone Set: clnPingd [prmPingd]: * Started: [ act1 act2 act3 sby1 sby2 ] * Clone Set: clnDiskd1 [prmDiskd1]: * Started: [ act1 act2 act3 sby1 sby2 ] * Clone Set: clnDiskd2 [prmDiskd2]: * Started: [ act1 act2 act3 sby1 sby2 ] + * Resource Group: grpPostgreSQLDB1: + * prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped + * prmFsPostgreSQLDB1-1 (ocf:pacemaker:Dummy): Stopped + * prmFsPostgreSQLDB1-2 (ocf:pacemaker:Dummy): Stopped + * prmFsPostgreSQLDB1-3 (ocf:pacemaker:Dummy): Stopped + * prmIpPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped + * prmApPostgreSQLDB1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/utilization.summary b/cts/scheduler/summary/utilization.summary index 8a72fde5941..e308cf98606 100644 --- a/cts/scheduler/summary/utilization.summary +++ b/cts/scheduler/summary/utilization.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ host1 host2 ] @@ -10,18 +12,13 @@ Transition Summary: * Start rsc2 ( host2 ) Executing Cluster Transition: - * Resource action: rsc2 monitor on host2 - * Resource action: rsc2 monitor on host1 - * Resource action: rsc1 monitor on host2 - * Resource action: rsc1 monitor on host1 - * Pseudo action: load_stopped_host2 - * Pseudo action: load_stopped_host1 - * Resource action: rsc2 start on host2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ host1 host2 ] * Full List of Resources: - * rsc2 (ocf:pacemaker:Dummy): Started host2 + * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc1 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/value-source.summary b/cts/scheduler/summary/value-source.summary index 7f033ca83c2..53b5dbacbaa 100644 --- a/cts/scheduler/summary/value-source.summary +++ b/cts/scheduler/summary/value-source.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2020-11-12 21:28:08Z Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] @@ -26,37 +28,21 @@ Transition Summary: * Start insane-rsc ( rhel7-4 ) Executing Cluster Transition: - * Resource action: Fencing start on rhel7-1 - * Resource action: rsc1 start on rhel7-4 - * Resource action: rsc2 start on rhel7-5 - * Resource action: invert-match start on rhel7-1 - * Resource action: single-rsc start on rhel7-2 - * Resource action: set-rsc1 start on rhel7-3 - * Resource action: set-rsc2 start on rhel7-4 - * Resource action: meta-rsc start on rhel7-5 - * Resource action: insane-rsc start on rhel7-4 - * Resource action: Fencing monitor=120000 on rhel7-1 - * Resource action: rsc1 monitor=10000 on rhel7-4 - * Resource action: rsc2 monitor=10000 on rhel7-5 - * Resource action: invert-match monitor=10000 on rhel7-1 - * Resource action: single-rsc monitor=10000 on rhel7-2 - * Resource action: set-rsc1 monitor=10000 on rhel7-3 - * Resource action: set-rsc2 monitor=10000 on rhel7-4 - * Resource action: meta-rsc monitor=10000 on rhel7-5 - * Resource action: insane-rsc monitor=10000 on rhel7-4 Using the original execution date of: 2020-11-12 21:28:08Z Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: - * Fencing (stonith:fence_xvm): Started rhel7-1 - * rsc1 (ocf:pacemaker:Dummy): Started rhel7-4 - * rsc2 (ocf:pacemaker:Dummy): Started rhel7-5 - * invert-match (ocf:pacemaker:Dummy): Started rhel7-1 - * single-rsc (ocf:pacemaker:Dummy): Started rhel7-2 - * set-rsc1 (ocf:pacemaker:Dummy): Started rhel7-3 - * set-rsc2 (ocf:pacemaker:Dummy): Started rhel7-4 - * meta-rsc (ocf:pacemaker:Dummy): Started rhel7-5 - * insane-rsc (ocf:pacemaker:Dummy): Started rhel7-4 + * Fencing (stonith:fence_xvm): Stopped + * rsc1 (ocf:pacemaker:Dummy): Stopped + * rsc2 (ocf:pacemaker:Dummy): Stopped + * invert-match (ocf:pacemaker:Dummy): Stopped + * single-rsc (ocf:pacemaker:Dummy): Stopped + * set-rsc1 (ocf:pacemaker:Dummy): Stopped + * set-rsc2 (ocf:pacemaker:Dummy): Stopped + * meta-rsc (ocf:pacemaker:Dummy): Stopped + * insane-rsc (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/whitebox-asymmetric.summary b/cts/scheduler/summary/whitebox-asymmetric.summary index 53911391c2a..68bd9b2be9c 100644 --- a/cts/scheduler/summary/whitebox-asymmetric.summary +++ b/cts/scheduler/summary/whitebox-asymmetric.summary @@ -1,6 +1,13 @@ +element primitive: Relax-NG validity error : Type ID doesn't allow value '18node2' +element primitive: Relax-NG validity error : Invalid sequence in interleave +element primitive: Relax-NG validity error : Element primitive failed to validate content +Entity: line 22: element primitive: Relax-NG validity error : Element resources has extra content: primitive 1 of 7 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18builder ] @@ -26,8 +33,15 @@ Executing Cluster Transition: * Resource action: 18node2 monitor=30000 on 18builder * Resource action: nfs_mount start on 18node2 * Resource action: nfs_mount monitor=10000 on 18node2 +element primitive: Relax-NG validity error : Type ID doesn't allow value '18node2' +element primitive: Relax-NG validity error : Invalid sequence in interleave +element primitive: Relax-NG validity error : Element primitive failed to validate content +Entity: line 22: element primitive: Relax-NG validity error : Element resources has extra content: primitive Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18builder ] * GuestOnline: [ 18node2 ] diff --git a/cts/scheduler/summary/whitebox-fail1.summary b/cts/scheduler/summary/whitebox-fail1.summary index 974f124093d..0ccda9777d1 100644 --- a/cts/scheduler/summary/whitebox-fail1.summary +++ b/cts/scheduler/summary/whitebox-fail1.summary @@ -1,13 +1,16 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] - * GuestOnline: [ lxc2 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * container1 (ocf:heartbeat:VirtualDomain): FAILED 18node2 * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: + * M (ocf:pacemaker:Dummy): FAILED lxc1 * Started: [ 18node1 18node2 18node3 lxc2 ] * A (ocf:pacemaker:Dummy): Started 18node1 * B (ocf:pacemaker:Dummy): FAILED lxc1 @@ -22,38 +25,22 @@ Transition Summary: * Restart lxc1 ( 18node2 ) due to required container1 start Executing Cluster Transition: - * Resource action: A monitor on lxc2 - * Resource action: B monitor on lxc2 - * Resource action: D monitor on lxc2 - * Resource action: lxc1 stop on 18node2 - * Resource action: container1 stop on 18node2 - * Pseudo action: stonith-lxc1-reboot on lxc1 - * Resource action: container1 start on 18node2 - * Pseudo action: M-clone_stop_0 - * Pseudo action: B_stop_0 - * Resource action: lxc1 start on 18node2 - * Resource action: lxc1 monitor=30000 on 18node2 - * Pseudo action: M_stop_0 - * Pseudo action: M-clone_stopped_0 - * Pseudo action: M-clone_start_0 - * Resource action: B start on lxc1 - * Resource action: M start on lxc1 - * Pseudo action: M-clone_running_0 - * Resource action: B monitor=10000 on lxc1 - * Resource action: M monitor=10000 on lxc1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: - * container1 (ocf:heartbeat:VirtualDomain): Started 18node2 + * container1 (ocf:heartbeat:VirtualDomain): FAILED 18node2 * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: - * Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] + * M (ocf:pacemaker:Dummy): FAILED lxc1 + * Started: [ 18node1 18node2 18node3 lxc2 ] * A (ocf:pacemaker:Dummy): Started 18node1 - * B (ocf:pacemaker:Dummy): Started lxc1 + * B (ocf:pacemaker:Dummy): FAILED lxc1 * C (ocf:pacemaker:Dummy): Started lxc2 * D (ocf:pacemaker:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/whitebox-fail2.summary b/cts/scheduler/summary/whitebox-fail2.summary index 73b44f536d7..47119781b14 100644 --- a/cts/scheduler/summary/whitebox-fail2.summary +++ b/cts/scheduler/summary/whitebox-fail2.summary @@ -1,13 +1,16 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] - * GuestOnline: [ lxc2 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * container1 (ocf:heartbeat:VirtualDomain): FAILED 18node2 * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: + * M (ocf:pacemaker:Dummy): FAILED lxc1 * Started: [ 18node1 18node2 18node3 lxc2 ] * A (ocf:pacemaker:Dummy): Started 18node1 * B (ocf:pacemaker:Dummy): FAILED lxc1 @@ -22,38 +25,22 @@ Transition Summary: * Recover lxc1 ( 18node2 ) Executing Cluster Transition: - * Resource action: A monitor on lxc2 - * Resource action: B monitor on lxc2 - * Resource action: D monitor on lxc2 - * Resource action: lxc1 stop on 18node2 - * Resource action: container1 stop on 18node2 - * Pseudo action: stonith-lxc1-reboot on lxc1 - * Resource action: container1 start on 18node2 - * Pseudo action: M-clone_stop_0 - * Pseudo action: B_stop_0 - * Resource action: lxc1 start on 18node2 - * Resource action: lxc1 monitor=30000 on 18node2 - * Pseudo action: M_stop_0 - * Pseudo action: M-clone_stopped_0 - * Pseudo action: M-clone_start_0 - * Resource action: B start on lxc1 - * Resource action: M start on lxc1 - * Pseudo action: M-clone_running_0 - * Resource action: B monitor=10000 on lxc1 - * Resource action: M monitor=10000 on lxc1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: - * container1 (ocf:heartbeat:VirtualDomain): Started 18node2 + * container1 (ocf:heartbeat:VirtualDomain): FAILED 18node2 * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: - * Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] + * M (ocf:pacemaker:Dummy): FAILED lxc1 + * Started: [ 18node1 18node2 18node3 lxc2 ] * A (ocf:pacemaker:Dummy): Started 18node1 - * B (ocf:pacemaker:Dummy): Started lxc1 + * B (ocf:pacemaker:Dummy): FAILED lxc1 * C (ocf:pacemaker:Dummy): Started lxc2 * D (ocf:pacemaker:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/whitebox-fail3.summary b/cts/scheduler/summary/whitebox-fail3.summary index b7de4a7eaca..4098f68241e 100644 --- a/cts/scheduler/summary/whitebox-fail3.summary +++ b/cts/scheduler/summary/whitebox-fail3.summary @@ -1,4 +1,11 @@ +element primitive: Relax-NG validity error : Type ID doesn't allow value '18builder' +element primitive: Relax-NG validity error : Invalid sequence in interleave +element primitive: Relax-NG validity error : Element primitive failed to validate content +Entity: line 16: element primitive: Relax-NG validity error : Element resources has extra content: primitive Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ dvossel-laptop2 ] @@ -35,8 +42,15 @@ Executing Cluster Transition: * Resource action: FAKE start on 18builder * Resource action: W monitor=10000 on 18builder * Resource action: X monitor=10000 on 18builder +element primitive: Relax-NG validity error : Type ID doesn't allow value '18builder' +element primitive: Relax-NG validity error : Invalid sequence in interleave +element primitive: Relax-NG validity error : Element primitive failed to validate content +Entity: line 16: element primitive: Relax-NG validity error : Element resources has extra content: primitive Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ dvossel-laptop2 ] * GuestOnline: [ 18builder ] diff --git a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary index 78506c5354e..2e3c943d8c3 100644 --- a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary +++ b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary @@ -1,8 +1,11 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Node kiff-01: UNCLEAN (offline) * Online: [ kiff-02 ] - * GuestOnline: [ lxc-01_kiff-02 lxc-02_kiff-02 ] + * GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Full List of Resources: * fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 @@ -38,67 +41,37 @@ Transition Summary: * Recover vm-fs ( lxc-01_kiff-01 ) * Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 ) * Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 ) +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: fence-kiff-02_stop_0 - * Resource action: dlm monitor on lxc-02_kiff-02 - * Resource action: dlm monitor on lxc-01_kiff-02 - * Resource action: clvmd monitor on lxc-02_kiff-02 - * Resource action: clvmd monitor on lxc-01_kiff-02 - * Resource action: shared0 monitor on lxc-02_kiff-02 - * Resource action: shared0 monitor on lxc-01_kiff-02 - * Resource action: vm-fs monitor on lxc-02_kiff-02 - * Resource action: vm-fs monitor on lxc-01_kiff-02 - * Pseudo action: lxc-01_kiff-01_stop_0 - * Pseudo action: lxc-02_kiff-01_stop_0 - * Fencing kiff-01 (reboot) - * Pseudo action: R-lxc-01_kiff-01_stop_0 - * Pseudo action: R-lxc-02_kiff-01_stop_0 - * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01 - * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01 - * Resource action: fence-kiff-02 start on kiff-02 - * Pseudo action: shared0-clone_stop_0 - * Resource action: R-lxc-01_kiff-01 start on kiff-02 - * Resource action: R-lxc-02_kiff-01 start on kiff-02 - * Pseudo action: vm-fs_stop_0 - * Resource action: lxc-01_kiff-01 start on kiff-02 - * Resource action: lxc-02_kiff-01 start on kiff-02 - * Resource action: fence-kiff-02 monitor=60000 on kiff-02 - * Pseudo action: shared0_stop_0 - * Pseudo action: shared0-clone_stopped_0 - * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02 - * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02 - * Resource action: vm-fs start on lxc-01_kiff-01 - * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 - * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 - * Pseudo action: clvmd-clone_stop_0 - * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 - * Pseudo action: clvmd_stop_0 - * Pseudo action: clvmd-clone_stopped_0 - * Pseudo action: dlm-clone_stop_0 - * Pseudo action: dlm_stop_0 - * Pseudo action: dlm-clone_stopped_0 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: + * Node kiff-01: UNCLEAN (offline) * Online: [ kiff-02 ] - * OFFLINE: [ kiff-01 ] * GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Full List of Resources: * fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02 - * fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02 + * fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN) * Clone Set: dlm-clone [dlm]: + * dlm (ocf:pacemaker:controld): Started kiff-01 (UNCLEAN) * Started: [ kiff-02 ] - * Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] + * Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Clone Set: clvmd-clone [clvmd]: + * clvmd (ocf:heartbeat:clvm): Started kiff-01 (UNCLEAN) * Started: [ kiff-02 ] - * Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] + * Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] * Clone Set: shared0-clone [shared0]: + * shared0 (ocf:heartbeat:Filesystem): Started kiff-01 (UNCLEAN) * Started: [ kiff-02 ] - * Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] - * R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02 - * R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02 + * Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ] + * R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN) + * R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN) * R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02 * R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02 - * vm-fs (ocf:heartbeat:Filesystem): Started lxc-01_kiff-01 + * vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01 diff --git a/cts/scheduler/summary/whitebox-migrate1.summary b/cts/scheduler/summary/whitebox-migrate1.summary index f86454827c6..34f12027038 100644 --- a/cts/scheduler/summary/whitebox-migrate1.summary +++ b/cts/scheduler/summary/whitebox-migrate1.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-node2 rhel7-node3 ] * GuestOnline: [ rhel7-node1 ] @@ -21,36 +23,21 @@ Transition Summary: * Migrate rhel7-node1 ( rhel7-node2 -> rhel7-node3 ) Executing Cluster Transition: - * Resource action: shooter1 stop on rhel7-node3 - * Resource action: FAKE3 stop on rhel7-node3 - * Resource action: rhel7-node1 monitor on rhel7-node3 - * Resource action: shooter1 start on rhel7-node2 - * Resource action: FAKE3 start on rhel7-node2 - * Resource action: remote-rsc migrate_to on rhel7-node2 - * Resource action: shooter1 monitor=60000 on rhel7-node2 - * Resource action: FAKE3 monitor=10000 on rhel7-node2 - * Resource action: remote-rsc migrate_from on rhel7-node3 - * Resource action: rhel7-node1 migrate_to on rhel7-node2 - * Resource action: rhel7-node1 migrate_from on rhel7-node3 - * Resource action: rhel7-node1 stop on rhel7-node2 - * Resource action: remote-rsc stop on rhel7-node2 - * Pseudo action: remote-rsc_start_0 - * Pseudo action: rhel7-node1_start_0 - * Resource action: remote-rsc monitor=10000 on rhel7-node3 - * Resource action: rhel7-node1 monitor=30000 on rhel7-node3 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-node2 rhel7-node3 ] * GuestOnline: [ rhel7-node1 ] * Full List of Resources: - * shooter1 (stonith:fence_xvm): Started rhel7-node2 + * shooter1 (stonith:fence_xvm): Started rhel7-node3 * FAKE1 (ocf:heartbeat:Dummy): Started rhel7-node1 * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-node1 - * FAKE3 (ocf:heartbeat:Dummy): Started rhel7-node2 + * FAKE3 (ocf:heartbeat:Dummy): Started rhel7-node3 * FAKE4 (ocf:heartbeat:Dummy): Started rhel7-node3 * FAKE5 (ocf:heartbeat:Dummy): Started rhel7-node2 * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-node1 * FAKE7 (ocf:heartbeat:Dummy): Started rhel7-node3 - * remote-rsc (ocf:heartbeat:Dummy): Started rhel7-node3 + * remote-rsc (ocf:heartbeat:Dummy): Started rhel7-node2 diff --git a/cts/scheduler/summary/whitebox-move.summary b/cts/scheduler/summary/whitebox-move.summary index 88846e2c612..b9c03d93526 100644 --- a/cts/scheduler/summary/whitebox-move.summary +++ b/cts/scheduler/summary/whitebox-move.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] @@ -18,30 +20,16 @@ Transition Summary: * Move lxc1 ( 18node1 -> 18node2 ) Executing Cluster Transition: - * Pseudo action: M-clone_stop_0 - * Resource action: A stop on lxc1 - * Resource action: A monitor on lxc2 - * Resource action: M stop on lxc1 - * Pseudo action: M-clone_stopped_0 - * Pseudo action: M-clone_start_0 - * Resource action: lxc1 stop on 18node1 - * Resource action: container1 stop on 18node1 - * Resource action: container1 start on 18node2 - * Resource action: lxc1 start on 18node2 - * Resource action: M start on lxc1 - * Resource action: M monitor=10000 on lxc1 - * Pseudo action: M-clone_running_0 - * Resource action: A start on lxc1 - * Resource action: A monitor=10000 on lxc1 - * Resource action: lxc1 monitor=30000 on 18node2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: - * container1 (ocf:heartbeat:VirtualDomain): Started 18node2 + * container1 (ocf:heartbeat:VirtualDomain): Started 18node1 * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: diff --git a/cts/scheduler/summary/whitebox-ms-ordering-move.summary b/cts/scheduler/summary/whitebox-ms-ordering-move.summary index 00076986ccf..6cb36983648 100644 --- a/cts/scheduler/summary/whitebox-ms-ordering-move.summary +++ b/cts/scheduler/summary/whitebox-ms-ordering-move.summary @@ -1,4 +1,6 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc1 lxc2 ] @@ -36,45 +38,10 @@ Transition Summary: * Move lxc1 ( rhel7-1 -> rhel7-2 ) Executing Cluster Transition: - * Resource action: rsc_rhel7-1 monitor on lxc2 - * Resource action: rsc_rhel7-2 monitor on lxc2 - * Resource action: rsc_rhel7-3 monitor on lxc2 - * Resource action: rsc_rhel7-4 monitor on lxc2 - * Resource action: rsc_rhel7-5 monitor on lxc2 - * Resource action: migrator monitor on lxc2 - * Resource action: ping-1 monitor on lxc2 - * Resource action: stateful-1 monitor on lxc2 - * Resource action: r192.168.122.207 monitor on lxc2 - * Resource action: petulant monitor on lxc2 - * Resource action: r192.168.122.208 monitor on lxc2 - * Resource action: lsb-dummy monitor on lxc2 - * Pseudo action: lxc-ms-master_demote_0 - * Resource action: lxc1 monitor on rhel7-5 - * Resource action: lxc1 monitor on rhel7-4 - * Resource action: lxc1 monitor on rhel7-3 - * Resource action: lxc1 monitor on rhel7-2 - * Resource action: lxc2 monitor on rhel7-5 - * Resource action: lxc2 monitor on rhel7-4 - * Resource action: lxc2 monitor on rhel7-3 - * Resource action: lxc2 monitor on rhel7-2 - * Resource action: lxc-ms demote on lxc1 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_stop_0 - * Resource action: lxc-ms stop on lxc1 - * Pseudo action: lxc-ms-master_stopped_0 - * Pseudo action: lxc-ms-master_start_0 - * Resource action: lxc1 stop on rhel7-1 - * Resource action: container1 stop on rhel7-1 - * Resource action: container1 start on rhel7-2 - * Resource action: lxc1 start on rhel7-2 - * Resource action: lxc-ms start on lxc1 - * Pseudo action: lxc-ms-master_running_0 - * Resource action: lxc1 monitor=30000 on rhel7-2 - * Pseudo action: lxc-ms-master_promote_0 - * Resource action: lxc-ms promote on lxc1 - * Pseudo action: lxc-ms-master_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc1 lxc2 ] @@ -100,7 +67,7 @@ Revised Cluster Status: * petulant (service:DummySD): Started rhel7-3 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-3 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 - * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-2 + * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): * Promoted: [ lxc1 ] diff --git a/cts/scheduler/summary/whitebox-ms-ordering.summary b/cts/scheduler/summary/whitebox-ms-ordering.summary index 06ac35671b9..f00bc6b4b11 100644 --- a/cts/scheduler/summary/whitebox-ms-ordering.summary +++ b/cts/scheduler/summary/whitebox-ms-ordering.summary @@ -1,12 +1,17 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node2 * container1 (ocf:heartbeat:VirtualDomain): FAILED * container2 (ocf:heartbeat:VirtualDomain): FAILED * Clone Set: lxc-ms-master [lxc-ms] (promotable): + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc1 + * lxc-ms (ocf:pacemaker:Stateful): FAILED lxc2 * Stopped: [ 18node1 18node2 18node3 ] Transition Summary: @@ -20,54 +25,19 @@ Transition Summary: * Start lxc2 ( 18node1 ) Executing Cluster Transition: - * Resource action: container1 monitor on 18node3 - * Resource action: container1 monitor on 18node2 - * Resource action: container1 monitor on 18node1 - * Resource action: container2 monitor on 18node3 - * Resource action: container2 monitor on 18node2 - * Resource action: container2 monitor on 18node1 - * Resource action: lxc-ms monitor on 18node3 - * Resource action: lxc-ms monitor on 18node2 - * Resource action: lxc-ms monitor on 18node1 - * Pseudo action: lxc-ms-master_demote_0 - * Resource action: lxc1 monitor on 18node3 - * Resource action: lxc1 monitor on 18node2 - * Resource action: lxc1 monitor on 18node1 - * Resource action: lxc2 monitor on 18node3 - * Resource action: lxc2 monitor on 18node2 - * Resource action: lxc2 monitor on 18node1 - * Pseudo action: stonith-lxc2-reboot on lxc2 - * Pseudo action: stonith-lxc1-reboot on lxc1 - * Resource action: container1 start on 18node1 - * Resource action: container2 start on 18node1 - * Pseudo action: lxc-ms_demote_0 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_stop_0 - * Resource action: lxc1 start on 18node1 - * Resource action: lxc2 start on 18node1 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms-master_stopped_0 - * Pseudo action: lxc-ms-master_start_0 - * Resource action: lxc1 monitor=30000 on 18node1 - * Resource action: lxc2 monitor=30000 on 18node1 - * Resource action: lxc-ms start on lxc1 - * Resource action: lxc-ms start on lxc2 - * Pseudo action: lxc-ms-master_running_0 - * Resource action: lxc-ms monitor=10000 on lxc2 - * Pseudo action: lxc-ms-master_promote_0 - * Resource action: lxc-ms promote on lxc1 - * Pseudo action: lxc-ms-master_promoted_0 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node2 - * container1 (ocf:heartbeat:VirtualDomain): Started 18node1 - * container2 (ocf:heartbeat:VirtualDomain): Started 18node1 + * container1 (ocf:heartbeat:VirtualDomain): FAILED + * container2 (ocf:heartbeat:VirtualDomain): FAILED * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Promoted: [ lxc1 ] - * Unpromoted: [ lxc2 ] + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc1 + * lxc-ms (ocf:pacemaker:Stateful): FAILED lxc2 + * Stopped: [ 18node1 18node2 18node3 ] diff --git a/cts/scheduler/summary/whitebox-nested-group.summary b/cts/scheduler/summary/whitebox-nested-group.summary index d97c0794006..915a316a962 100644 --- a/cts/scheduler/summary/whitebox-nested-group.summary +++ b/cts/scheduler/summary/whitebox-nested-group.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] + * GuestOnline: [ c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto2 @@ -30,73 +33,23 @@ Transition Summary: * Start c7auto4 ( c7auto1 ) Executing Cluster Transition: - * Resource action: fake1 monitor on c7auto3 - * Resource action: fake1 monitor on c7auto2 - * Resource action: fake1 monitor on c7auto1 - * Resource action: fake2 monitor on c7auto3 - * Resource action: fake2 monitor on c7auto2 - * Resource action: fake2 monitor on c7auto1 - * Resource action: fake3 monitor on c7auto3 - * Resource action: fake3 monitor on c7auto2 - * Resource action: fake3 monitor on c7auto1 - * Resource action: fake4 monitor on c7auto3 - * Resource action: fake4 monitor on c7auto2 - * Resource action: fake4 monitor on c7auto1 - * Resource action: fake5 monitor on c7auto3 - * Resource action: fake5 monitor on c7auto2 - * Resource action: fake5 monitor on c7auto1 - * Resource action: fake:0 monitor on c7auto2 - * Resource action: fake:1 monitor on c7auto3 - * Resource action: fake:3 monitor on c7auto1 - * Pseudo action: fake_clone_start_0 - * Pseudo action: fake_group_start_0 - * Resource action: fake_fs monitor on c7auto3 - * Resource action: fake_fs monitor on c7auto2 - * Resource action: fake_fs monitor on c7auto1 - * Resource action: c7auto4 monitor on c7auto3 - * Resource action: c7auto4 monitor on c7auto2 - * Resource action: c7auto4 monitor on c7auto1 - * Resource action: fake1 start on c7auto3 - * Resource action: fake3 start on c7auto2 - * Resource action: fake4 start on c7auto3 - * Resource action: fake:0 start on c7auto2 - * Resource action: fake:1 start on c7auto3 - * Resource action: fake:3 start on c7auto1 - * Resource action: fake_fs start on c7auto1 - * Resource action: container start on c7auto1 - * Resource action: c7auto4 start on c7auto1 - * Resource action: fake1 monitor=10000 on c7auto3 - * Resource action: fake2 start on c7auto4 - * Resource action: fake3 monitor=10000 on c7auto2 - * Resource action: fake4 monitor=10000 on c7auto3 - * Resource action: fake5 start on c7auto4 - * Resource action: fake:0 monitor=10000 on c7auto2 - * Resource action: fake:1 monitor=10000 on c7auto3 - * Resource action: fake:2 start on c7auto4 - * Resource action: fake:3 monitor=10000 on c7auto1 - * Pseudo action: fake_clone_running_0 - * Pseudo action: fake_group_running_0 - * Resource action: fake_fs monitor=10000 on c7auto1 - * Resource action: container monitor=10000 on c7auto1 - * Resource action: c7auto4 monitor=30000 on c7auto1 - * Resource action: fake2 monitor=10000 on c7auto4 - * Resource action: fake5 monitor=10000 on c7auto4 - * Resource action: fake:2 monitor=10000 on c7auto4 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ c7auto1 c7auto2 c7auto3 ] * GuestOnline: [ c7auto4 ] * Full List of Resources: * shooter (stonith:fence_phd_kvm): Started c7auto2 - * fake1 (ocf:heartbeat:Dummy): Started c7auto3 - * fake2 (ocf:heartbeat:Dummy): Started c7auto4 - * fake3 (ocf:heartbeat:Dummy): Started c7auto2 - * fake4 (ocf:heartbeat:Dummy): Started c7auto3 - * fake5 (ocf:heartbeat:Dummy): Started c7auto4 + * fake1 (ocf:heartbeat:Dummy): Stopped + * fake2 (ocf:heartbeat:Dummy): Stopped + * fake3 (ocf:heartbeat:Dummy): Stopped + * fake4 (ocf:heartbeat:Dummy): Stopped + * fake5 (ocf:heartbeat:Dummy): Stopped * Clone Set: fake_clone [fake]: - * Started: [ c7auto1 c7auto2 c7auto3 c7auto4 ] + * Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] * Resource Group: fake_group: - * fake_fs (ocf:heartbeat:Dummy): Started c7auto1 - * container (ocf:heartbeat:Dummy): Started c7auto1 + * fake_fs (ocf:heartbeat:Dummy): Stopped + * container (ocf:heartbeat:Dummy): Stopped diff --git a/cts/scheduler/summary/whitebox-orphan-ms.summary b/cts/scheduler/summary/whitebox-orphan-ms.summary index e7df2d81bf7..7beb280764e 100644 --- a/cts/scheduler/summary/whitebox-orphan-ms.summary +++ b/cts/scheduler/summary/whitebox-orphan-ms.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] @@ -37,40 +40,19 @@ Transition Summary: * Stop container1 ( 18node1 ) due to node availability Executing Cluster Transition: - * Resource action: FencingFail stop on 18node3 - * Resource action: lxc-ms demote on lxc2 - * Resource action: lxc-ms demote on lxc1 - * Resource action: FencingFail start on 18node1 - * Resource action: lxc-ms stop on lxc2 - * Resource action: lxc-ms stop on lxc1 - * Resource action: lxc-ms delete on 18node3 - * Resource action: lxc-ms delete on 18node2 - * Resource action: lxc-ms delete on 18node1 - * Resource action: lxc2 stop on 18node1 - * Resource action: lxc2 delete on 18node3 - * Resource action: lxc2 delete on 18node2 - * Resource action: lxc2 delete on 18node1 - * Resource action: container2 stop on 18node1 - * Resource action: container2 delete on 18node3 - * Resource action: container2 delete on 18node2 - * Resource action: container2 delete on 18node1 - * Resource action: lxc1 stop on 18node1 - * Resource action: lxc1 delete on 18node3 - * Resource action: lxc1 delete on 18node2 - * Resource action: lxc1 delete on 18node1 - * Resource action: container1 stop on 18node1 - * Resource action: container1 delete on 18node3 - * Resource action: container1 delete on 18node2 - * Resource action: container1 delete on 18node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18node1 18node2 18node3 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started 18node2 * FencingPass (stonith:fence_dummy): Started 18node3 - * FencingFail (stonith:fence_dummy): Started 18node1 + * FencingFail (stonith:fence_dummy): Started 18node3 * rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1 * rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node2 * rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3 @@ -85,3 +67,8 @@ Revised Cluster Status: * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 + * container2 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node1 + * lxc1 (ocf:pacemaker:remote): ORPHANED Started 18node1 + * lxc-ms (ocf:pacemaker:Stateful): ORPHANED Promoted [ lxc1 lxc2 ] + * lxc2 (ocf:pacemaker:remote): ORPHANED Started 18node1 + * container1 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node1 diff --git a/cts/scheduler/summary/whitebox-orphaned.summary b/cts/scheduler/summary/whitebox-orphaned.summary index 8d5efb48ee7..3ecb88c98e0 100644 --- a/cts/scheduler/summary/whitebox-orphaned.summary +++ b/cts/scheduler/summary/whitebox-orphaned.summary @@ -1,4 +1,7 @@ Current cluster status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] @@ -21,39 +24,28 @@ Transition Summary: * Move B ( lxc1 -> lxc2 ) * Stop container1 ( 18node2 ) due to node availability * Stop lxc1 ( 18node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: M-clone_stop_0 - * Resource action: A monitor on lxc2 - * Resource action: B stop on lxc1 - * Resource action: B monitor on lxc2 - * Resource action: D monitor on lxc2 - * Cluster action: clear_failcount for container1 on 18node2 - * Cluster action: clear_failcount for lxc1 on 18node2 - * Resource action: M stop on lxc1 - * Pseudo action: M-clone_stopped_0 - * Resource action: B start on lxc2 - * Resource action: lxc1 stop on 18node2 - * Resource action: lxc1 delete on 18node3 - * Resource action: lxc1 delete on 18node2 - * Resource action: lxc1 delete on 18node1 - * Resource action: B monitor=10000 on lxc2 - * Resource action: container1 stop on 18node2 - * Resource action: container1 delete on 18node3 - * Resource action: container1 delete on 18node2 - * Resource action: container1 delete on 18node1 Revised Cluster Status: + * Cluster Summary: + * CIB syntax has errors (for details, run crm_verify -LV) + * Node List: * Online: [ 18node1 18node2 18node3 ] - * GuestOnline: [ lxc2 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: + * M (ocf:pacemaker:Dummy): ORPHANED Started lxc1 * Started: [ 18node1 18node2 18node3 lxc2 ] * A (ocf:pacemaker:Dummy): Started 18node1 - * B (ocf:pacemaker:Dummy): Started lxc2 + * B (ocf:pacemaker:Dummy): Started lxc1 * C (ocf:pacemaker:Dummy): Started lxc2 * D (ocf:pacemaker:Dummy): Started 18node1 + * container1 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node2 + * lxc1 (ocf:pacemaker:remote): ORPHANED Started 18node2 diff --git a/cts/scheduler/summary/whitebox-start.summary b/cts/scheduler/summary/whitebox-start.summary index e17cde17a9e..de0ac59752e 100644 --- a/cts/scheduler/summary/whitebox-start.summary +++ b/cts/scheduler/summary/whitebox-start.summary @@ -1,7 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] - * GuestOnline: [ lxc2 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: * container1 (ocf:heartbeat:VirtualDomain): Stopped @@ -23,34 +25,22 @@ Transition Summary: * Start lxc1 ( 18node1 ) Executing Cluster Transition: - * Resource action: container1 start on 18node1 - * Pseudo action: M-clone_start_0 - * Resource action: A monitor on lxc2 - * Resource action: B stop on lxc2 - * Resource action: D monitor on lxc2 - * Resource action: lxc1 start on 18node1 - * Resource action: M start on lxc1 - * Pseudo action: M-clone_running_0 - * Resource action: A stop on 18node1 - * Resource action: B start on 18node3 - * Resource action: lxc1 monitor=30000 on 18node1 - * Resource action: M monitor=10000 on lxc1 - * Resource action: A start on lxc1 - * Resource action: B monitor=10000 on 18node3 - * Resource action: A monitor=10000 on lxc1 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: - * container1 (ocf:heartbeat:VirtualDomain): Started 18node1 + * container1 (ocf:heartbeat:VirtualDomain): Stopped * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: - * Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] - * A (ocf:pacemaker:Dummy): Started lxc1 - * B (ocf:pacemaker:Dummy): Started 18node3 + * Started: [ 18node1 18node2 18node3 lxc2 ] + * Stopped: [ lxc1 ] + * A (ocf:pacemaker:Dummy): Started 18node1 + * B (ocf:pacemaker:Dummy): Started lxc2 * C (ocf:pacemaker:Dummy): Started lxc2 * D (ocf:pacemaker:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/whitebox-stop.summary b/cts/scheduler/summary/whitebox-stop.summary index a7a5e0fceb6..bc248ebbe19 100644 --- a/cts/scheduler/summary/whitebox-stop.summary +++ b/cts/scheduler/summary/whitebox-stop.summary @@ -1,6 +1,8 @@ 1 of 14 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1 lxc2 ] @@ -21,33 +23,25 @@ Transition Summary: * Stop M:4 ( lxc1 ) due to node availability * Move B ( lxc1 -> lxc2 ) * Stop lxc1 ( 18node2 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Pseudo action: M-clone_stop_0 - * Resource action: A monitor on lxc2 - * Resource action: B stop on lxc1 - * Resource action: B monitor on lxc2 - * Resource action: D monitor on lxc2 - * Resource action: M stop on lxc1 - * Pseudo action: M-clone_stopped_0 - * Resource action: B start on lxc2 - * Resource action: lxc1 stop on 18node2 - * Resource action: container1 stop on 18node2 - * Resource action: B monitor=10000 on lxc2 Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18node1 18node2 18node3 ] - * GuestOnline: [ lxc2 ] + * GuestOnline: [ lxc1 lxc2 ] * Full List of Resources: - * container1 (ocf:heartbeat:VirtualDomain): Stopped (disabled) + * container1 (ocf:heartbeat:VirtualDomain): Started 18node2 (disabled) * container2 (ocf:heartbeat:VirtualDomain): Started 18node2 * shoot1 (stonith:fence_xvm): Started 18node3 * Clone Set: M-clone [M]: - * Started: [ 18node1 18node2 18node3 lxc2 ] - * Stopped: [ lxc1 ] + * Started: [ 18node1 18node2 18node3 lxc1 lxc2 ] * A (ocf:pacemaker:Dummy): Started 18node1 - * B (ocf:pacemaker:Dummy): Started lxc2 + * B (ocf:pacemaker:Dummy): Started lxc1 * C (ocf:pacemaker:Dummy): Started lxc2 * D (ocf:pacemaker:Dummy): Started 18node1 diff --git a/cts/scheduler/summary/whitebox-unexpectedly-running.summary b/cts/scheduler/summary/whitebox-unexpectedly-running.summary index 597349719de..33018fe6079 100644 --- a/cts/scheduler/summary/whitebox-unexpectedly-running.summary +++ b/cts/scheduler/summary/whitebox-unexpectedly-running.summary @@ -1,6 +1,9 @@ Current cluster status: + * Cluster Summary: + * Node List: * Online: [ 18builder ] + * GuestOnline: [ remote1 remote2 ] * Full List of Resources: * FAKE (ocf:pacemaker:Dummy): Started 18builder @@ -13,23 +16,14 @@ Transition Summary: * Start remote2 ( 18builder ) Executing Cluster Transition: - * Resource action: FAKE monitor=60000 on 18builder - * Resource action: FAKE-crashed stop on 18builder - * Resource action: remote1 monitor on 18builder - * Resource action: remote2 monitor on 18builder - * Pseudo action: stonith-remote2-reboot on remote2 - * Resource action: FAKE-crashed start on 18builder - * Resource action: remote1 start on 18builder - * Resource action: remote2 start on 18builder - * Resource action: FAKE-crashed monitor=60000 on 18builder - * Resource action: remote1 monitor=30000 on 18builder - * Resource action: remote2 monitor=30000 on 18builder Revised Cluster Status: + * Cluster Summary: + * Node List: * Online: [ 18builder ] * GuestOnline: [ remote1 remote2 ] * Full List of Resources: * FAKE (ocf:pacemaker:Dummy): Started 18builder - * FAKE-crashed (ocf:pacemaker:Dummy): Started 18builder + * FAKE-crashed (ocf:pacemaker:Dummy): FAILED 18builder diff --git a/cts/scheduler/summary/year-2038.summary b/cts/scheduler/summary/year-2038.summary index edaed2246d7..f8a9ebc6086 100644 --- a/cts/scheduler/summary/year-2038.summary +++ b/cts/scheduler/summary/year-2038.summary @@ -1,5 +1,7 @@ Using the original execution date of: 2038-02-17 06:13:20Z Current cluster status: + * Cluster Summary: + * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] @@ -35,7 +37,7 @@ Current cluster status: * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) * Started: [ overcloud-novacompute-0 ] - * Stopped: [ controller-0 controller-1 controller-2 ] + * Stopped: [ controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 @@ -51,22 +53,15 @@ Transition Summary: * Start ip-10.0.0.110 ( controller-1 ) * Recover stonith-fence_compute-fence-nova ( controller-2 ) * Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability +Transition failed: terminated +An invalid transition was produced Executing Cluster Transition: - * Resource action: overcloud-novacompute-1 stop on controller-1 - * Resource action: stonith-fence_compute-fence-nova stop on controller-2 - * Fencing overcloud-novacompute-1 (reboot) - * Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1 - * Resource action: ip-10.0.0.110 start on controller-1 - * Resource action: stonith-fence_compute-fence-nova start on controller-2 - * Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2 - * Pseudo action: compute-unfence-trigger-clone_stop_0 - * Resource action: ip-10.0.0.110 monitor=10000 on controller-1 - * Pseudo action: compute-unfence-trigger_stop_0 - * Pseudo action: compute-unfence-trigger-clone_stopped_0 Using the original execution date of: 2038-02-17 06:13:20Z Revised Cluster Status: + * Cluster Summary: + * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] @@ -75,7 +70,7 @@ Revised Cluster Status: * Full List of Resources: * overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0 - * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED + * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED controller-1 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 @@ -89,7 +84,7 @@ Revised Cluster Status: * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2 - * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Started controller-1 + * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0 @@ -98,10 +93,11 @@ Revised Cluster Status: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 - * stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2 + * stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: + * compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) * Started: [ overcloud-novacompute-0 ] - * Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ] + * Stopped: [ controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 From e93a0ad958a7413a0463eb17f5fd1fe0f26ed56e Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Wed, 10 Apr 2024 12:10:30 -0400 Subject: [PATCH 6/7] debug 1 --- cts/cli/regression.crm_mon.exp | 375 +++++++++++++++-------- cts/cli/regression.feature_set.exp | 22 +- cts/cli/regression.tools.exp | 465 ++++++++++++++++------------- cts/cli/regression.validity.exp | 188 ++---------- lib/pacemaker/pcmk_output.c | 45 ++- lib/pengine/pe_output.c | 3 +- 6 files changed, 574 insertions(+), 524 deletions(-) diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index 7f2f0248223..1fe46faba37 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -4,12 +4,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -46,7 +47,7 @@ Active Resources: - + @@ -242,9 +243,12 @@ Active Resources: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output - OK (0) =#=#=#= * Passed: crm_mon - XML output @@ -254,6 +258,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -481,33 +486,34 @@ Active Resources: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output without the node section - OK (0) =#=#=#= * Passed: crm_mon - XML output without the node section =#=#=#= Begin test: Text output with only the node section =#=#=#= -Cluster Summary: - -Node List: - * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] -=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#= -* Passed: crm_mon - Text output with only the node section +(log_assertion_as@utils.c:275) error: text_increment_list: Triggered fatal assertion at output_text.c:250 : tail != NULL +cts/cts-cli: line 152: 1430218 Aborted (core dumped) crm_mon -1 --exclude=all --include=nodes +CIB syntax has errors (for details, run crm_verify -LV) +=#=#=#= End test: Text output with only the node section - Interrupted by signal (134) =#=#=#= +* Failed (rc=134): crm_mon - Text output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -596,7 +602,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 - * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= @@ -605,7 +610,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -614,7 +619,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: ping-clone [ping]: @@ -722,7 +727,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) - * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= @@ -731,13 +735,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster02 @@ -826,7 +830,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 - * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= @@ -835,7 +838,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -864,7 +867,7 @@ Node List: * GuestNode httpd-bundle-1: online: * Resources: * httpd (ocf:heartbeat:apache): Started - * GuestNode httpd-bundle-2: online: + * GuestNode httpd-bundle-2: OFFLINE: * Resources: Node Attributes: @@ -936,7 +939,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 - * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= @@ -945,7 +947,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1046,7 +1048,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 - * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: XML output grouped by node =#=#=#= @@ -1123,7 +1124,7 @@ Negative Location Constraints: - + @@ -1297,9 +1298,12 @@ Negative Location Constraints: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - XML output grouped by node @@ -1309,7 +1313,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1361,7 +1365,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 - * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by node =#=#=#= Begin test: XML output filtered by node =#=#=#= @@ -1485,9 +1488,12 @@ Negative Location Constraints: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node @@ -1497,7 +1503,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1555,7 +1561,6 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 - * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by tag =#=#=#= Begin test: XML output filtered by tag =#=#=#= @@ -1691,9 +1696,12 @@ Negative Location Constraints: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by tag @@ -1703,13 +1711,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 @@ -1744,7 +1752,7 @@ Operations: - + @@ -1768,7 +1776,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource tag @@ -1778,6 +1790,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1809,9 +1822,12 @@ Active Resources: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node that doesn't exist @@ -1821,12 +1837,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Clone Set: ping-clone [ping]: @@ -1858,6 +1875,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1891,13 +1909,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 @@ -1932,7 +1950,7 @@ Operations: - + @@ -1956,7 +1974,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by primitive resource @@ -1966,13 +1988,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: exim-group: @@ -2010,7 +2032,7 @@ Operations: - + @@ -2041,7 +2063,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource @@ -2051,13 +2077,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: exim-group: @@ -2092,7 +2118,7 @@ Operations: - + @@ -2117,7 +2143,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource member @@ -2127,13 +2157,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -2173,7 +2203,7 @@ Operations: - + @@ -2208,7 +2238,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource @@ -2218,13 +2252,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -2264,7 +2298,7 @@ Operations: - + @@ -2299,7 +2333,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource instance @@ -2309,7 +2347,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2318,7 +2356,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: ping-clone [ping]: @@ -2358,7 +2396,7 @@ Operations: - + @@ -2390,7 +2428,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by exact clone resource instance @@ -2400,12 +2442,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * No active resources @@ -2427,7 +2470,7 @@ Active Resources: - + @@ -2440,7 +2483,11 @@ Active Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource that doesn't exist @@ -2450,12 +2497,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): @@ -2471,12 +2519,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2501,7 +2550,7 @@ Full List of Resources: - + @@ -2590,7 +2639,11 @@ Full List of Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by inactive bundle resource @@ -2600,12 +2653,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2629,7 +2683,7 @@ Full List of Resources: - + @@ -2689,7 +2743,11 @@ Full List of Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled IP address resource @@ -2699,12 +2757,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2728,7 +2787,7 @@ Full List of Resources: - + @@ -2786,7 +2845,11 @@ Full List of Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled container @@ -2796,12 +2859,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2825,7 +2889,7 @@ Full List of Resources: - + @@ -2885,7 +2949,11 @@ Full List of Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundle connection @@ -2895,12 +2963,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2928,7 +2997,7 @@ Full List of Resources: - + @@ -2996,7 +3065,11 @@ Full List of Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled primitive resource @@ -3006,7 +3079,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3015,7 +3088,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3058,7 +3131,7 @@ Operations: - + @@ -3106,7 +3179,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by clone name in cloned group @@ -3116,7 +3193,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3125,7 +3202,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3168,7 +3245,7 @@ Operations: - + @@ -3216,7 +3293,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by group name in cloned group @@ -3226,7 +3307,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3235,7 +3316,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3276,7 +3357,7 @@ Operations: - + @@ -3310,7 +3391,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact group instance name in cloned group @@ -3320,7 +3405,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3329,7 +3414,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3372,7 +3457,7 @@ Operations: - + @@ -3420,7 +3505,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by primitive name in cloned group @@ -3430,7 +3519,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3439,7 +3528,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: online + * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3480,7 +3569,7 @@ Operations: - + @@ -3514,7 +3603,11 @@ Operations: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group @@ -3526,6 +3619,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3704,7 +3798,11 @@ unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bun - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - XML output of partially active resources @@ -3716,6 +3814,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3760,7 +3859,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3852,6 +3951,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3873,6 +3973,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3896,6 +3997,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3916,6 +4018,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3941,7 +4044,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax is valid + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4036,6 +4139,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4124,7 +4228,11 @@ unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bun - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, filtered by node @@ -4134,6 +4242,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 2 nodes configured * 3 resource instances configured @@ -4188,7 +4297,11 @@ Active Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output of active unmanaged resource on offline node - OK (0) =#=#=#= * Passed: crm_mon - XML output of active unmanaged resource on offline node @@ -4198,6 +4311,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 2 nodes configured * 3 resource instances configured @@ -4220,6 +4334,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 2 nodes configured * 3 resource instances configured @@ -4242,6 +4357,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -4297,7 +4413,7 @@ Full List of Resources: - + @@ -4493,9 +4609,12 @@ Full List of Resources: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output of all resources with maintenance-mode enabled - OK (0) =#=#=#= * Passed: crm_mon - XML output of all resources with maintenance-mode enabled @@ -4505,6 +4624,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -4512,7 +4632,7 @@ Node List: * Node cluster02: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 ] Full List of Resources: * Clone Set: ping-clone [ping]: @@ -4557,7 +4677,7 @@ Full List of Resources: - + @@ -4754,9 +4874,12 @@ Full List of Resources: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output of all resources with maintenance enabled for a node - OK (0) =#=#=#= * Passed: crm_mon - XML output of all resources with maintenance enabled for a node @@ -4766,6 +4889,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -4818,7 +4942,7 @@ Full List of Resources: - + @@ -5014,9 +5138,12 @@ Full List of Resources: - - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output of all resources with maintenance meta attribute true - OK (0) =#=#=#= * Passed: crm_mon - XML output of all resources with maintenance meta attribute true @@ -5026,6 +5153,7 @@ Cluster Summary: * Current DC: cent7-host2 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 10 resource instances configured @@ -5050,6 +5178,7 @@ Cluster Summary: * Current DC: cent7-host2 (3232262829) (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 10 resource instances configured diff --git a/cts/cli/regression.feature_set.exp b/cts/cli/regression.feature_set.exp index a0428736392..092a9f91eb1 100644 --- a/cts/cli/regression.feature_set.exp +++ b/cts/cli/regression.feature_set.exp @@ -53,6 +53,7 @@ Cluster Summary: * Current DC: cluster01 (1) (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 4 resource instances configured @@ -60,7 +61,7 @@ Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.1 * Node cluster03 (3): OFFLINE - * GuestNode guest01-0@: online + * GuestNode guest01-0@: OFFLINE * RemoteNode remote01 (4): OFFLINE Active Resources: @@ -82,7 +83,7 @@ Active Resources: - + @@ -96,7 +97,11 @@ Active Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, no mixed status - OK (0) =#=#=#= * Passed: crm_mon - XML output, no mixed status @@ -152,6 +157,7 @@ Cluster Summary: * Current DC: cluster01 (1) (version) - MIXED-VERSION partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 4 resource instances configured @@ -159,7 +165,7 @@ Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.0 * Node cluster03 (3): OFFLINE - * GuestNode guest01-0@: online + * GuestNode guest01-0@: OFFLINE * RemoteNode remote01 (4): OFFLINE Active Resources: @@ -181,7 +187,7 @@ Active Resources: - + @@ -195,7 +201,11 @@ Active Resources: - + + + This tool can only check complete configurations (i.e. those starting with <cib>). + + =#=#=#= End test: XML output, mixed status - OK (0) =#=#=#= * Passed: crm_mon - XML output, mixed status diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index 10201580d42..fa49e9bf066 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -1457,9 +1457,7 @@ unpack_resources error: Either configure some or disable STONITH with the stoni unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity pcmk__verify error: CIB did not pass schema validation Current cluster status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: * No resources @@ -1471,9 +1469,7 @@ Transition Summary: Executing Cluster Transition: Revised Cluster Status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] @@ -1726,9 +1722,7 @@ unpack_resources error: Either configure some or disable STONITH with the stoni unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity pcmk__verify error: CIB did not pass schema validation Current cluster status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] @@ -3903,8 +3897,7 @@ Error performing operation: No such object * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] @@ -3917,18 +3910,21 @@ Transition Summary: * Start Fence ( node1 ) Executing Cluster Transition: + * Resource action: dummy monitor on node1 + * Resource action: Fence monitor on node1 + * Resource action: dummy start on node1 + * Resource action: Fence start on node1 Revised Cluster Status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Stopped - * Fence (stonith:fence_true): Stopped + * dummy (ocf:pacemaker:Dummy): Started node1 + * Fence (stonith:fence_true): Started node1 =#=#=#= Current cib after: Bring resources online =#=#=#= - + @@ -3964,14 +3960,25 @@ Revised Cluster Status: + + + + + + + + + + =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= +crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= - + @@ -4000,28 +4007,37 @@ Revised Cluster Status: - - - + + + + + + + + + + + -=#=#=#= End test: Try to move a resource to its existing location - OK (0) =#=#=#= -* Failed (rc=000): crm_resource - Try to move a resource to its existing location +=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= +* Passed: crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= -crm_resource: Resource 'dummy' not moved: active in 0 locations. -To prevent 'dummy' from running on a specific location, specify a node. +WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. + This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. + This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= @@ -4053,7 +4069,7 @@ To prevent 'dummy' from running on a specific location, specify a node. - + @@ -4061,13 +4077,23 @@ To prevent 'dummy' from running on a specific location, specify a node. + + + + + + + + + + -=#=#=#= End test: Move a resource from its existing location - Incorrect usage (64) =#=#=#= -* Failed (rc=064): crm_resource - Move a resource from its existing location +=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= +* Passed: crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= -Removing constraint: cli-prefer-dummy +Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= @@ -4105,6 +4131,16 @@ Removing constraint: cli-prefer-dummy + + + + + + + + + + @@ -4149,6 +4185,16 @@ false + + + + + + + + + + @@ -4192,6 +4238,16 @@ false + + + + + + + + + + @@ -4267,6 +4323,16 @@ false + + + + + + + + + + @@ -4324,6 +4390,16 @@ false + + + + + + + + + + @@ -4370,6 +4446,16 @@ false + + + + + + + + + + @@ -4417,6 +4503,16 @@ true + + + + + + + + + + @@ -4463,6 +4559,16 @@ true + + + + + + + + + + @@ -4707,6 +4813,16 @@ ticketB revoked + + + + + + + + + + @@ -4891,6 +5007,16 @@ Error performing operation: No such object + + + + + + + + + + @@ -4901,36 +5027,31 @@ Error performing operation: No such object * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Stopped - * Fence (stonith:fence_true): Stopped + * dummy (ocf:pacemaker:Dummy): Started node1 + * Fence (stonith:fence_true): Started node1 Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: - * Start dummy ( node1 ) - * Start Fence ( node2 ) + * Move Fence ( node1 -> node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 - * Resource action: dummy monitor on node1 + * Resource action: Fence stop on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 - * Resource action: Fence monitor on node1 - * Resource action: dummy start on node1 * Resource action: Fence start on node2 Revised Cluster Status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 node2 node3 ] @@ -4938,7 +5059,7 @@ Revised Cluster Status: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= - + @@ -4982,7 +5103,7 @@ Revised Cluster Status: - + @@ -5069,7 +5190,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - + @@ -5166,7 +5287,7 @@ Locations: - + @@ -5204,8 +5325,7 @@ Locations: * Passed: crm_resource - Ban dummy from node2 =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 node2 node3 ] @@ -5217,18 +5337,19 @@ Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: + * Resource action: dummy stop on node1 + * Resource action: dummy start on node3 Revised Cluster Status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node1 + * dummy (ocf:pacemaker:Dummy): Started node3 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= - + @@ -5272,10 +5393,10 @@ Revised Cluster Status: - + - + @@ -5299,7 +5420,7 @@ Revised Cluster Status: - + @@ -5313,14 +5434,10 @@ Revised Cluster Status: * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 =#=#=#= - - - crm_resource: Error performing operation: Requested item already exists - - + =#=#=#= Current cib after: Move dummy to node1 =#=#=#= - + @@ -5352,8 +5469,8 @@ Revised Cluster Status: - + @@ -5364,10 +5481,10 @@ Revised Cluster Status: - + - + @@ -5391,7 +5508,7 @@ Revised Cluster Status: - + @@ -5401,12 +5518,12 @@ Revised Cluster Status: -=#=#=#= End test: Move dummy to node1 - Requested item already exists (108) =#=#=#= -* Failed (rc=108): crm_resource - Move dummy to node1 +=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#= +* Passed: crm_resource - Move dummy to node1 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= - + @@ -5438,7 +5555,7 @@ Removing constraint: cli-ban-dummy-on-node2 - + @@ -5449,10 +5566,10 @@ Removing constraint: cli-ban-dummy-on-node2 - + - + @@ -5476,7 +5593,7 @@ Removing constraint: cli-ban-dummy-on-node2 - + @@ -5498,7 +5615,7 @@ Removing constraint: cli-ban-dummy-on-node2 Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= - + @@ -5536,7 +5653,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone - + @@ -5546,7 +5663,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= - + @@ -5588,7 +5705,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te - + @@ -5603,7 +5720,7 @@ Multiple attributes match name=is-managed A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= - + @@ -5645,7 +5762,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5655,7 +5772,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= - + @@ -5697,7 +5814,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-manage - + @@ -5711,7 +5828,7 @@ Multiple attributes match name=is-managed Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= - + @@ -5753,7 +5870,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5768,7 +5885,7 @@ Multiple attributes match name=is-managed A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= - + @@ -5808,7 +5925,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -5819,7 +5936,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= - + @@ -5857,7 +5974,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -5867,7 +5984,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= - + @@ -5907,7 +6024,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te - + @@ -5918,7 +6035,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= - + @@ -5958,7 +6075,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5968,7 +6085,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= - + @@ -6010,7 +6127,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone - + @@ -6023,7 +6140,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= - + @@ -6063,7 +6180,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6072,7 +6189,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Restore duplicates =#=#=#= =#=#=#= Current cib after: Restore duplicates =#=#=#= - + @@ -6114,7 +6231,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6128,7 +6245,7 @@ Multiple attributes match name=is-managed Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= - + @@ -6168,7 +6285,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -6177,7 +6294,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#= - + @@ -6221,7 +6338,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -6231,7 +6348,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= - + @@ -6279,7 +6396,7 @@ Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attrib - + @@ -6290,7 +6407,7 @@ Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attrib Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= - + @@ -6341,7 +6458,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr - + @@ -6350,7 +6467,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#= - + @@ -6390,7 +6507,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr - + @@ -6400,7 +6517,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= - + @@ -6440,7 +6557,6 @@ Migration will take effect until: - @@ -6455,7 +6571,7 @@ Migration will take effect until: * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= - + @@ -6508,7 +6624,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= - + @@ -6564,7 +6680,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= - + @@ -6614,7 +6730,7 @@ Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= - + @@ -6661,7 +6777,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= - + @@ -6709,7 +6825,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= - + @@ -6770,7 +6886,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_resource - Show why a resource is not running on an unhealthy node =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= - + @@ -7626,7 +7742,6 @@ export overcloud-rabbit-2=overcloud-rabbit-2 =#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#= - @@ -7734,6 +7849,8 @@ export overcloud-rabbit-2=overcloud-rabbit-2 + + @@ -7926,36 +8043,6 @@ export overcloud-rabbit-2=overcloud-rabbit-2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -7963,14 +8050,30 @@ export overcloud-rabbit-2=overcloud-rabbit-2 -/tmp/cts-cli.ta_outfile.qPkR4CJfAb:1: element pacemaker-result: Relax-NG validity error : Expecting element status, got cluster_status -/tmp/cts-cli.ta_outfile.qPkR4CJfAb:1: element pacemaker-result: Relax-NG validity error : Element pacemaker-result failed to validate content -/tmp/cts-cli.ta_outfile.qPkR4CJfAb fails to validate -=#=#=#= End test: Show allocation scores with crm_simulate - Failed to validate (3) =#=#=#= -* Failed (rc=003): crm_simulate - Show allocation scores with crm_simulate +=#=#=#= End test: Show allocation scores with crm_simulate - OK (0) =#=#=#= +* Passed: crm_simulate - Show allocation scores with crm_simulate =#=#=#= Begin test: Show utilization with crm_simulate =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure +CIB syntax has errors (for details, run crm_verify -LV) +[ cluster01 cluster02 ] +[ httpd-bundle-0 httpd-bundle-1 ] + +Started: [ cluster01 cluster02 ] +Fencing (stonith:fence_xvm): Started cluster01 +dummy (ocf:pacemaker:Dummy): Started cluster02 +Stopped (disabled): [ cluster01 cluster02 ] +inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) +inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) +httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 +httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 +httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped +Public-IP (ocf:heartbeat:IPaddr): Started cluster02 +Email (lsb:exim): Started cluster02 +Started: [ cluster01 cluster02 ] +Promoted: [ cluster02 ] +Unpromoted: [ cluster01 ] + Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 Original: cluster01 capacity: Original: cluster02 capacity: @@ -8003,36 +8106,6 @@ Remaining: httpd-bundle-0 capacity: Remaining: httpd-bundle-1 capacity: Remaining: httpd-bundle-2 capacity: -[ cluster01 cluster02 ] -[ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] - -Started: [ cluster01 cluster02 ] -Fencing (stonith:fence_xvm): Started cluster01 -dummy (ocf:pacemaker:Dummy): Started cluster02 -Stopped (disabled): [ cluster01 cluster02 ] -inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) -inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) -httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 -httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 -httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped -Public-IP (ocf:heartbeat:IPaddr): Started cluster02 -Email (lsb:exim): Started cluster02 -Started: [ cluster01 cluster02 ] -Promoted: [ cluster02 ] -Unpromoted: [ cluster01 ] - -Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 -Original: cluster01 capacity: -Original: cluster02 capacity: -Original: httpd-bundle-0 capacity: -Original: httpd-bundle-1 capacity: -Original: httpd-bundle-2 capacity: -Remaining: cluster01 capacity: -Remaining: cluster02 capacity: -Remaining: httpd-bundle-0 capacity: -Remaining: httpd-bundle-1 capacity: -Remaining: httpd-bundle-2 capacity: - Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) =#=#=#= End test: Show utilization with crm_simulate - OK (0) =#=#=#= @@ -8041,12 +8114,10 @@ Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: - * Cluster Summary: -Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8096,11 +8167,10 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8130,12 +8200,10 @@ Revised Cluster Status: 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: - * Cluster Summary: -Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8186,12 +8254,11 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster02 ] * OFFLINE: [ cluster01 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8223,12 +8290,10 @@ Revised Cluster Status: 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: - * Cluster Summary: -Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8312,12 +8377,11 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: - * Cluster Summary: - + * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8541,12 +8605,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: + * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp index 4b898731c17..c775060c18a 100644 --- a/cts/cli/regression.validity.exp +++ b/cts/cli/regression.validity.exp @@ -218,166 +218,26 @@ pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.1 unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) -update_validation debug: pacemaker-1.2-style configuration is also valid for pacemaker-1.3 -update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) -update_validation debug: Configuration valid for schema: pacemaker-1.3 -update_validation debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0 -update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.0 -update_validation debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1 -update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.1 -update_validation debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2 -update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.2 -update_validation debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3 -update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.3 -update_validation debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4 -update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.4 -update_validation debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5 -update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.5 -update_validation debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6 -update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.6 -update_validation debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7 -update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.7 -update_validation debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8 -update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.8 -update_validation debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9 -update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.9 -update_validation debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10 -update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.10 -update_validation debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0 -update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.0 -update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1 -update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.1 -update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2 -update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.2 -update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 -update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.3 -update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.4 -update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 -update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.5 -update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 -update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.6 -update_validation debug: pacemaker-3.6-style configuration is also valid for pacemaker-3.7 -update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.7 -update_validation debug: pacemaker-3.7-style configuration is also valid for pacemaker-3.8 -update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.8 -update_validation debug: pacemaker-3.8-style configuration is also valid for pacemaker-3.9 -update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.9 -update_validation trace: Stopping at pacemaker-3.9 -update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.9 pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) -update_validation debug: pacemaker-1.2-style configuration is also valid for pacemaker-1.3 -update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) -update_validation debug: Configuration valid for schema: pacemaker-1.3 -update_validation debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0 -update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.0 -update_validation debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1 -update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.1 -update_validation debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2 -update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.2 -update_validation debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3 -update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.3 -update_validation debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4 -update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.4 -update_validation debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5 -update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.5 -update_validation debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6 -update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.6 -update_validation debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7 -update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.7 -update_validation debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8 -update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.8 -update_validation debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9 -update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.9 -update_validation debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10 -update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.10 -update_validation debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0 -update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.0 -update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1 -update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.1 -update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2 -update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.2 -update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 -update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.3 -update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.4 -update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 -update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.5 -update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 -update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.6 -update_validation debug: pacemaker-3.6-style configuration is also valid for pacemaker-3.7 -update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.7 -update_validation debug: pacemaker-3.7-style configuration is also valid for pacemaker-3.8 -update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.8 -update_validation debug: pacemaker-3.8-style configuration is also valid for pacemaker-3.9 -update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.9 -update_validation trace: Stopping at pacemaker-3.9 -update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.9 pcmk__verify error: CIB did not pass schema validation Current cluster status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * No resources + * dummy1 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Stopped Transition Summary: Executing Cluster Transition: Revised Cluster Status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * No resources + * dummy1 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#= Begin test: Make resulting CIB valid, although without validate-with attribute =#=#=#= @@ -403,32 +263,26 @@ Schema validation of configuration is disabled (support for validate-with set to unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) pcmk__verify error: CIB did not pass schema validation Current cluster status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) - * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy1 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Stopped Transition Summary: Executing Cluster Transition: Revised Cluster Status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) - * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy1 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Run crm_simulate with valid CIB, but without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#= Begin test: Make resulting CIB invalid, and without validate-with attribute =#=#=#= @@ -558,31 +412,25 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constr unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) pcmk__verify error: CIB did not pass schema validation Current cluster status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) - * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy1 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Stopped Transition Summary: Executing Cluster Transition: Revised Cluster Status: - * Cluster Summary: - * CIB syntax has errors (for details, run crm_verify -LV) - + * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) - * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy1 (ocf:pacemaker:Dummy): Stopped + * dummy2 (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Run crm_simulate with invalid CIB, also without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB, also without validate-with attribute diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c index 1403275bc45..330414f2fba 100644 --- a/lib/pacemaker/pcmk_output.c +++ b/lib/pacemaker/pcmk_output.c @@ -1938,9 +1938,6 @@ cluster_status_xml(pcmk__output_t *out, va_list args) GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); - pcmk__output_t *verify_out; - int verify_rc; - out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts, show_opts); @@ -2001,21 +1998,6 @@ cluster_status_xml(pcmk__output_t *out, va_list args) false); } - - /* If there are verification errors, always print a statement about that, even if not requested */ - - pcmk__output_new(&verify_out, "none", NULL, NULL); - verify_rc = pcmk__verify(scheduler, verify_out, scheduler->input); - pcmk__output_free(verify_out); - - if (verify_rc == pcmk_rc_ok) { - if (pcmk_is_set(section_opts, pcmk_section_verify)) { - out->info(out, "CIB syntax is valid"); - } - } else { - out->info(out, "CIB syntax has errors (for details, run crm_verify -LV)."); - } - return pcmk_rc_ok; } @@ -2488,28 +2470,38 @@ ticket_constraints_default(pcmk__output_t *out, va_list args) PCMK__OUTPUT_ARGS("cluster-verify", "pcmk_scheduler_t *", "int") static int cluster_verify_text(pcmk__output_t *out, va_list args) { - - /* If there are verification errors, always print a statement about that, even if not requested */ - pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); int section_opts = va_arg(args, int); pcmk__output_t *verify_out; int verify_rc; + int rc = pcmk_rc_ok; + + (void)(verify_rc); + (void)(section_opts); pcmk__output_new(&verify_out, "none", NULL, NULL); + + scheduler = pe_new_working_set(); + scheduler->priv = verify_out; + verify_rc = pcmk__verify(scheduler, verify_out, scheduler->input); + + pe_free_working_set(scheduler); pcmk__output_free(verify_out); if (verify_rc == pcmk_rc_ok) { if (pcmk_is_set(section_opts, pcmk_section_verify)) { + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->list_item(out, NULL, "CIB syntax is valid"); } } else { + /* If there are verification errors, always print a statement about that, even if not requested */ + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->list_item(out, NULL, "CIB syntax has errors (for details, run crm_verify -LV)"); } - return pcmk_rc_ok; + return rc; } static int @@ -2604,12 +2596,19 @@ static int cluster_verify_xml(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); int section_opts = va_arg(args, int); + int rc = pcmk_rc_ok; if (pcmk_is_set(section_opts, pcmk_section_verify)) { + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); + scheduler = pe_new_working_set(); + scheduler->priv = out; + pcmk__verify(scheduler, out, scheduler->input); + + pe_free_working_set(scheduler); } - return pcmk_rc_ok; + return rc; } PCMK__OUTPUT_ARGS("ticket-state", "xmlNode *") diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c index 1df8ea6e0f5..ad1194e40e0 100644 --- a/lib/pengine/pe_output.c +++ b/lib/pengine/pe_output.c @@ -451,7 +451,6 @@ cluster_summary(pcmk__output_t *out, va_list args) { scheduler->localhost, last_written, user, client, origin); } - PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-verify", scheduler, section_opts); if (pcmk_is_set(section_opts, pcmk_section_counts)) { @@ -529,8 +528,8 @@ cluster_summary_html(pcmk__output_t *out, va_list args) { scheduler->localhost, last_written, user, client, origin); } - PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-verify", scheduler, section_opts); + if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); From a18c0b15e15472255a25d7be2eedab58f5db8f63 Mon Sep 17 00:00:00 2001 From: Grace Chin Date: Wed, 29 May 2024 12:01:04 -0400 Subject: [PATCH 7/7] call pcmk__verify in cluster-summary --- cts/cli/regression.crm_mon.exp | 138 +++++------ cts/cli/regression.feature_set.exp | 6 +- cts/cli/regression.tools.exp | 360 ++++++----------------------- cts/cli/regression.validity.exp | 76 +++++- lib/pacemaker/pcmk_output.c | 60 ----- lib/pengine/pe_output.c | 50 +++- 6 files changed, 245 insertions(+), 445 deletions(-) diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index 1fe46faba37..c0c43473dc3 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -4,13 +4,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -258,7 +257,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -496,24 +494,24 @@ Active Resources: =#=#=#= End test: XML output without the node section - OK (0) =#=#=#= * Passed: crm_mon - XML output without the node section =#=#=#= Begin test: Text output with only the node section =#=#=#= -(log_assertion_as@utils.c:275) error: text_increment_list: Triggered fatal assertion at output_text.c:250 : tail != NULL -cts/cts-cli: line 152: 1430218 Aborted (core dumped) crm_mon -1 --exclude=all --include=nodes -CIB syntax has errors (for details, run crm_verify -LV) -=#=#=#= End test: Text output with only the node section - Interrupted by signal (134) =#=#=#= -* Failed (rc=134): crm_mon - Text output with only the node section +Node List: + * Online: [ cluster01 cluster02 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] +=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#= +* Passed: crm_mon - Text output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -602,6 +600,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= @@ -610,7 +609,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -619,7 +618,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: ping-clone [ping]: @@ -727,6 +726,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) + * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= @@ -735,13 +735,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster02 @@ -830,6 +830,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= @@ -838,7 +839,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -867,7 +868,7 @@ Node List: * GuestNode httpd-bundle-1: online: * Resources: * httpd (ocf:heartbeat:apache): Started - * GuestNode httpd-bundle-2: OFFLINE: + * GuestNode httpd-bundle-2: online: * Resources: Node Attributes: @@ -939,6 +940,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= @@ -947,7 +949,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1048,6 +1050,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: XML output grouped by node =#=#=#= @@ -1313,7 +1316,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1365,6 +1368,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by node =#=#=#= Begin test: XML output filtered by node =#=#=#= @@ -1503,7 +1507,7 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1561,6 +1565,7 @@ Operations: Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 + * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by tag =#=#=#= Begin test: XML output filtered by tag =#=#=#= @@ -1711,13 +1716,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 @@ -1790,7 +1795,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1837,13 +1841,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Clone Set: ping-clone [ping]: @@ -1875,7 +1878,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -1909,13 +1911,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 @@ -1988,13 +1990,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Resource Group: exim-group: @@ -2077,13 +2079,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Resource Group: exim-group: @@ -2157,13 +2159,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -2252,13 +2254,13 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: @@ -2347,7 +2349,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -2356,7 +2358,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: ping-clone [ping]: @@ -2442,13 +2444,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * No active resources @@ -2497,13 +2498,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): @@ -2519,13 +2519,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2653,13 +2652,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2757,13 +2755,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2859,13 +2856,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -2963,13 +2959,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: @@ -3079,7 +3074,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3088,7 +3083,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3193,7 +3188,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3202,7 +3197,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3307,7 +3302,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3316,7 +3311,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3405,7 +3400,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3414,7 +3409,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3519,7 +3514,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -3528,7 +3523,7 @@ Node List: * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online - * GuestNode httpd-bundle-2@: OFFLINE + * GuestNode httpd-bundle-2@: online Active Resources: * Clone Set: mysql-clone-group [mysql-group]: @@ -3619,7 +3614,6 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3814,7 +3808,6 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3859,7 +3852,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3951,7 +3944,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3973,7 +3965,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -3997,7 +3988,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4018,7 +4008,6 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4044,7 +4033,7 @@ Cluster Summary: * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) + * CIB syntax is valid * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4139,7 +4128,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 16 resource instances configured (1 DISABLED) @@ -4242,7 +4230,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 2 nodes configured * 3 resource instances configured @@ -4311,7 +4298,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 2 nodes configured * 3 resource instances configured @@ -4334,7 +4320,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 2 nodes configured * 3 resource instances configured @@ -4357,7 +4342,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -4624,7 +4608,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -4632,7 +4615,7 @@ Node List: * Node cluster02: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 ] - * GuestOnline: [ httpd-bundle-0 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-2 ] Full List of Resources: * Clone Set: ping-clone [ping]: @@ -4889,7 +4872,6 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) @@ -5153,7 +5135,6 @@ Cluster Summary: * Current DC: cent7-host2 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 10 resource instances configured @@ -5178,7 +5159,6 @@ Cluster Summary: * Current DC: cent7-host2 (3232262829) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 4 nodes configured * 10 resource instances configured diff --git a/cts/cli/regression.feature_set.exp b/cts/cli/regression.feature_set.exp index 092a9f91eb1..d5e6a928c49 100644 --- a/cts/cli/regression.feature_set.exp +++ b/cts/cli/regression.feature_set.exp @@ -53,7 +53,6 @@ Cluster Summary: * Current DC: cluster01 (1) (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 4 resource instances configured @@ -61,7 +60,7 @@ Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.1 * Node cluster03 (3): OFFLINE - * GuestNode guest01-0@: OFFLINE + * GuestNode guest01-0@: online * RemoteNode remote01 (4): OFFLINE Active Resources: @@ -157,7 +156,6 @@ Cluster Summary: * Current DC: cluster01 (1) (version) - MIXED-VERSION partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 4 resource instances configured @@ -165,7 +163,7 @@ Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.0 * Node cluster03 (3): OFFLINE - * GuestNode guest01-0@: OFFLINE + * GuestNode guest01-0@: online * RemoteNode remote01 (4): OFFLINE Active Resources: diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index fa49e9bf066..253e0841c3c 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -3897,7 +3897,6 @@ Error performing operation: No such object * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] @@ -3910,21 +3909,16 @@ Transition Summary: * Start Fence ( node1 ) Executing Cluster Transition: - * Resource action: dummy monitor on node1 - * Resource action: Fence monitor on node1 - * Resource action: dummy start on node1 - * Resource action: Fence start on node1 Revised Cluster Status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node1 - * Fence (stonith:fence_true): Started node1 + * dummy (ocf:pacemaker:Dummy): Stopped + * Fence (stonith:fence_true): Stopped =#=#=#= Current cib after: Bring resources online =#=#=#= - + @@ -3960,25 +3954,14 @@ Revised Cluster Status: - - - - - - - - - - =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= -crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= - + @@ -4007,37 +3990,28 @@ crm_resource: Error performing operation: Requested item already exists - + + + - - - - - - - - - - -=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= -* Passed: crm_resource - Try to move a resource to its existing location +=#=#=#= End test: Try to move a resource to its existing location - OK (0) =#=#=#= +* Failed (rc=000): crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= -WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. - This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. - This will be the case even if node1 is the last node in the cluster +crm_resource: Resource 'dummy' not moved: active in 0 locations. +To prevent 'dummy' from running on a specific location, specify a node. =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= @@ -4069,7 +4043,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - + @@ -4077,23 +4051,13 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - - - - - - - - - - -=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= -* Passed: crm_resource - Move a resource from its existing location +=#=#=#= End test: Move a resource from its existing location - Incorrect usage (64) =#=#=#= +* Failed (rc=064): crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= -Removing constraint: cli-ban-dummy-on-node1 +Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= @@ -4131,16 +4095,6 @@ Removing constraint: cli-ban-dummy-on-node1 - - - - - - - - - - @@ -4185,16 +4139,6 @@ false - - - - - - - - - - @@ -4238,16 +4182,6 @@ false - - - - - - - - - - @@ -4323,16 +4257,6 @@ false - - - - - - - - - - @@ -4390,16 +4314,6 @@ false - - - - - - - - - - @@ -4446,16 +4360,6 @@ false - - - - - - - - - - @@ -4503,16 +4407,6 @@ true - - - - - - - - - - @@ -4559,16 +4453,6 @@ true - - - - - - - - - - @@ -4629,16 +4513,6 @@ false - - - - - - - - - - @@ -4685,16 +4559,6 @@ false - - - - - - - - - - @@ -4757,16 +4621,6 @@ ticketB revoked - - - - - - - - - - @@ -4813,16 +4667,6 @@ ticketB revoked - - - - - - - - - - @@ -4871,16 +4715,6 @@ ticketB revoked - - - - - - - - - - @@ -4949,16 +4783,6 @@ Constraints XML: - - - - - - - - - - @@ -5007,16 +4831,6 @@ Error performing operation: No such object - - - - - - - - - - @@ -5027,31 +4841,32 @@ Error performing operation: No such object * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node1 - * Fence (stonith:fence_true): Started node1 + * dummy (ocf:pacemaker:Dummy): Stopped + * Fence (stonith:fence_true): Stopped Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: - * Move Fence ( node1 -> node2 ) + * Start dummy ( node1 ) + * Start Fence ( node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 - * Resource action: Fence stop on node1 + * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 + * Resource action: Fence monitor on node1 + * Resource action: dummy start on node1 * Resource action: Fence start on node2 Revised Cluster Status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 node2 node3 ] @@ -5059,7 +4874,7 @@ Revised Cluster Status: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= - + @@ -5103,7 +4918,7 @@ Revised Cluster Status: - + @@ -5190,7 +5005,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - + @@ -5287,7 +5102,7 @@ Locations: - + @@ -5325,7 +5140,6 @@ Locations: * Passed: crm_resource - Ban dummy from node2 =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 node2 node3 ] @@ -5337,19 +5151,16 @@ Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: - * Resource action: dummy stop on node1 - * Resource action: dummy start on node3 Revised Cluster Status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: - * dummy (ocf:pacemaker:Dummy): Started node3 + * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= - + @@ -5393,10 +5204,10 @@ Revised Cluster Status: - + - + @@ -5420,7 +5231,7 @@ Revised Cluster Status: - + @@ -5434,7 +5245,11 @@ Revised Cluster Status: * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 =#=#=#= - + + + crm_resource: Error performing operation: Requested item already exists + + =#=#=#= Current cib after: Move dummy to node1 =#=#=#= @@ -5469,8 +5284,8 @@ Revised Cluster Status: + - @@ -5481,10 +5296,10 @@ Revised Cluster Status: - + - + @@ -5508,7 +5323,7 @@ Revised Cluster Status: - + @@ -5518,8 +5333,8 @@ Revised Cluster Status: -=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#= -* Passed: crm_resource - Move dummy to node1 +=#=#=#= End test: Move dummy to node1 - Requested item already exists (108) =#=#=#= +* Failed (rc=108): crm_resource - Move dummy to node1 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= @@ -5555,7 +5370,7 @@ Removing constraint: cli-ban-dummy-on-node2 - + @@ -5566,10 +5381,10 @@ Removing constraint: cli-ban-dummy-on-node2 - + - + @@ -5593,7 +5408,7 @@ Removing constraint: cli-ban-dummy-on-node2 - + @@ -5653,7 +5468,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone - + @@ -5705,7 +5520,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te - + @@ -5762,7 +5577,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5814,7 +5629,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-manage - + @@ -5870,7 +5685,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -5925,7 +5740,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -5974,7 +5789,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6024,7 +5839,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te - + @@ -6075,7 +5890,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i - + @@ -6127,7 +5942,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone - + @@ -6180,7 +5995,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6231,7 +6046,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma - + @@ -6285,7 +6100,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -6338,7 +6153,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na - + @@ -6396,7 +6211,7 @@ Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attrib - + @@ -6458,7 +6273,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr - + @@ -6507,7 +6322,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr - + @@ -6517,7 +6332,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= - + @@ -6557,6 +6372,7 @@ Migration will take effect until: + @@ -8055,9 +7871,8 @@ export overcloud-rabbit-2=overcloud-rabbit-2 =#=#=#= Begin test: Show utilization with crm_simulate =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure -CIB syntax has errors (for details, run crm_verify -LV) [ cluster01 cluster02 ] -[ httpd-bundle-0 httpd-bundle-1 ] +[ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Started: [ cluster01 cluster02 ] Fencing (stonith:fence_xvm): Started cluster01 @@ -8074,32 +7889,12 @@ Started: [ cluster01 cluster02 ] Promoted: [ cluster02 ] Unpromoted: [ cluster01 ] -Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 +Only 'private' parameters to 1m-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 Original: cluster01 capacity: Original: cluster02 capacity: Original: httpd-bundle-0 capacity: Original: httpd-bundle-1 capacity: Original: httpd-bundle-2 capacity: -pcmk__assign_resource: ping:0 utilization on cluster02: -pcmk__assign_resource: ping:1 utilization on cluster01: -pcmk__assign_resource: Fencing utilization on cluster01: -pcmk__assign_resource: dummy utilization on cluster02: -pcmk__assign_resource: httpd-bundle-docker-0 utilization on cluster01: -pcmk__assign_resource: httpd-bundle-docker-1 utilization on cluster02: -pcmk__assign_resource: httpd-bundle-ip-192.168.122.131 utilization on cluster01: -pcmk__assign_resource: httpd-bundle-0 utilization on cluster01: -pcmk__assign_resource: httpd:0 utilization on httpd-bundle-0: -pcmk__assign_resource: httpd-bundle-ip-192.168.122.132 utilization on cluster02: -pcmk__assign_resource: httpd-bundle-1 utilization on cluster02: -pcmk__assign_resource: httpd:1 utilization on httpd-bundle-1: -pcmk__assign_resource: httpd-bundle-2 utilization on cluster01: -pcmk__assign_resource: httpd:2 utilization on httpd-bundle-2: -pcmk__assign_resource: Public-IP utilization on cluster02: -pcmk__assign_resource: Email utilization on cluster02: -pcmk__assign_resource: mysql-proxy:0 utilization on cluster02: -pcmk__assign_resource: mysql-proxy:1 utilization on cluster01: -pcmk__assign_resource: promotable-rsc:0 utilization on cluster02: -pcmk__assign_resource: promotable-rsc:1 utilization on cluster01: Remaining: cluster01 capacity: Remaining: cluster02 capacity: Remaining: httpd-bundle-0 capacity: @@ -8114,10 +7909,9 @@ Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8167,10 +7961,9 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8200,10 +7993,9 @@ Revised Cluster Status: 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8254,11 +8046,10 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster02 ] * OFFLINE: [ cluster01 ] - * GuestOnline: [ httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8290,10 +8081,9 @@ Revised Cluster Status: 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8377,11 +8167,10 @@ Executing Cluster Transition: * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: - * CIB syntax has errors (for details, run crm_verify -LV) * Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] - * GuestOnline: [ httpd-bundle-0 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Clone Set: ping-clone [ping]: @@ -8605,13 +8394,12 @@ Cluster Summary: * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: - * CIB syntax has errors (for details, run crm_verify -LV) * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] - * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] Active Resources: * Clone Set: ping-clone [ping]: diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp index c775060c18a..81c400f7f39 100644 --- a/cts/cli/regression.validity.exp +++ b/cts/cli/regression.validity.exp @@ -218,16 +218,65 @@ pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.1 unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +pcmk__update_schema debug: Schema pacemaker-1.2 validates +pcmk__update_schema debug: Schema pacemaker-1.3 validates +pcmk__update_schema debug: Schema pacemaker-2.0 validates +pcmk__update_schema debug: Schema pacemaker-2.1 validates +pcmk__update_schema debug: Schema pacemaker-2.2 validates +pcmk__update_schema debug: Schema pacemaker-2.3 validates +pcmk__update_schema debug: Schema pacemaker-2.4 validates +pcmk__update_schema debug: Schema pacemaker-2.5 validates +pcmk__update_schema debug: Schema pacemaker-2.6 validates +pcmk__update_schema debug: Schema pacemaker-2.7 validates +pcmk__update_schema debug: Schema pacemaker-2.8 validates +pcmk__update_schema debug: Schema pacemaker-2.9 validates +pcmk__update_schema debug: Schema pacemaker-2.10 validates +pcmk__update_schema debug: Schema pacemaker-3.0 validates +pcmk__update_schema debug: Schema pacemaker-3.1 validates +pcmk__update_schema debug: Schema pacemaker-3.2 validates +pcmk__update_schema debug: Schema pacemaker-3.3 validates +pcmk__update_schema debug: Schema pacemaker-3.4 validates +pcmk__update_schema debug: Schema pacemaker-3.5 validates +pcmk__update_schema debug: Schema pacemaker-3.6 validates +pcmk__update_schema debug: Schema pacemaker-3.7 validates +pcmk__update_schema debug: Schema pacemaker-3.8 validates +pcmk__update_schema debug: Schema pacemaker-3.9 validates +pcmk__update_schema debug: Schema pacemaker-3.10 validates +pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.10 pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +pcmk__update_schema debug: Schema pacemaker-1.2 validates +pcmk__update_schema debug: Schema pacemaker-1.3 validates +pcmk__update_schema debug: Schema pacemaker-2.0 validates +pcmk__update_schema debug: Schema pacemaker-2.1 validates +pcmk__update_schema debug: Schema pacemaker-2.2 validates +pcmk__update_schema debug: Schema pacemaker-2.3 validates +pcmk__update_schema debug: Schema pacemaker-2.4 validates +pcmk__update_schema debug: Schema pacemaker-2.5 validates +pcmk__update_schema debug: Schema pacemaker-2.6 validates +pcmk__update_schema debug: Schema pacemaker-2.7 validates +pcmk__update_schema debug: Schema pacemaker-2.8 validates +pcmk__update_schema debug: Schema pacemaker-2.9 validates +pcmk__update_schema debug: Schema pacemaker-2.10 validates +pcmk__update_schema debug: Schema pacemaker-3.0 validates +pcmk__update_schema debug: Schema pacemaker-3.1 validates +pcmk__update_schema debug: Schema pacemaker-3.2 validates +pcmk__update_schema debug: Schema pacemaker-3.3 validates +pcmk__update_schema debug: Schema pacemaker-3.4 validates +pcmk__update_schema debug: Schema pacemaker-3.5 validates +pcmk__update_schema debug: Schema pacemaker-3.6 validates +pcmk__update_schema debug: Schema pacemaker-3.7 validates +pcmk__update_schema debug: Schema pacemaker-3.8 validates +pcmk__update_schema debug: Schema pacemaker-3.9 validates +pcmk__update_schema debug: Schema pacemaker-3.10 validates +pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.10 pcmk__verify error: CIB did not pass schema validation Current cluster status: * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * No resources Transition Summary: @@ -236,8 +285,7 @@ Executing Cluster Transition: Revised Cluster Status: * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * No resources =#=#=#= End test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#= Begin test: Make resulting CIB valid, although without validate-with attribute =#=#=#= @@ -263,16 +311,18 @@ Schema validation of configuration is disabled (support for validate-with set to unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) pcmk__verify error: CIB did not pass schema validation Current cluster status: * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) Transition Summary: @@ -281,8 +331,8 @@ Executing Cluster Transition: Revised Cluster Status: * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) =#=#=#= End test: Run crm_simulate with valid CIB, but without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#= Begin test: Make resulting CIB invalid, and without validate-with attribute =#=#=#= @@ -412,16 +462,18 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constr unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) pcmk__verify error: CIB did not pass schema validation unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) pcmk__verify error: CIB did not pass schema validation Current cluster status: * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) Transition Summary: @@ -430,7 +482,7 @@ Executing Cluster Transition: Revised Cluster Status: * CIB syntax has errors (for details, run crm_verify -LV) * Full List of Resources: - * dummy1 (ocf:pacemaker:Dummy): Stopped - * dummy2 (ocf:pacemaker:Dummy): Stopped + * dummy1 (ocf:pacemaker:Dummy): Stopped (unmanaged) + * dummy2 (ocf:pacemaker:Dummy): Stopped (unmanaged) =#=#=#= End test: Run crm_simulate with invalid CIB, also without validate-with attribute - OK (0) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB, also without validate-with attribute diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c index 330414f2fba..12a6710571c 100644 --- a/lib/pacemaker/pcmk_output.c +++ b/lib/pacemaker/pcmk_output.c @@ -2467,43 +2467,6 @@ ticket_constraints_default(pcmk__output_t *out, va_list args) return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("cluster-verify", "pcmk_scheduler_t *", "int") -static int -cluster_verify_text(pcmk__output_t *out, va_list args) { - pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); - int section_opts = va_arg(args, int); - - pcmk__output_t *verify_out; - int verify_rc; - int rc = pcmk_rc_ok; - - (void)(verify_rc); - (void)(section_opts); - - pcmk__output_new(&verify_out, "none", NULL, NULL); - - scheduler = pe_new_working_set(); - scheduler->priv = verify_out; - - verify_rc = pcmk__verify(scheduler, verify_out, scheduler->input); - - pe_free_working_set(scheduler); - pcmk__output_free(verify_out); - - if (verify_rc == pcmk_rc_ok) { - if (pcmk_is_set(section_opts, pcmk_section_verify)) { - PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); - out->list_item(out, NULL, "CIB syntax is valid"); - } - } else { - /* If there are verification errors, always print a statement about that, even if not requested */ - PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); - out->list_item(out, NULL, "CIB syntax has errors (for details, run crm_verify -LV)"); - } - - return rc; -} - static int add_ticket_element_with_constraints(xmlNode *node, void *userdata) { @@ -2591,26 +2554,6 @@ ticket_constraints_xml(pcmk__output_t *out, va_list args) return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("cluster-verify", "pcmk_scheduler_t *", "int") -static int -cluster_verify_xml(pcmk__output_t *out, va_list args) { - pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); - int section_opts = va_arg(args, int); - int rc = pcmk_rc_ok; - - if (pcmk_is_set(section_opts, pcmk_section_verify)) { - PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); - scheduler = pe_new_working_set(); - scheduler->priv = out; - - pcmk__verify(scheduler, out, scheduler->input); - - pe_free_working_set(scheduler); - } - - return rc; -} - PCMK__OUTPUT_ARGS("ticket-state", "xmlNode *") static int ticket_state_default(pcmk__output_t *out, va_list args) @@ -2672,9 +2615,6 @@ static pcmk__message_entry_t fmt_functions[] = { { "cluster-status", "default", pcmk__cluster_status_text }, { "cluster-status", "html", cluster_status_html }, { "cluster-status", "xml", cluster_status_xml }, - { "cluster-verify", "default", cluster_verify_text }, - { "cluster-verify", "html", cluster_verify_xml }, - { "cluster-verify", "xml", cluster_verify_xml }, { "crmadmin-node", "default", crmadmin_node }, { "crmadmin-node", "text", crmadmin_node_text }, { "crmadmin-node", "xml", crmadmin_node_xml }, diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c index ad1194e40e0..a8144efc839 100644 --- a/lib/pengine/pe_output.c +++ b/lib/pengine/pe_output.c @@ -18,6 +18,8 @@ #include #include +#include + const char * pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts) { @@ -409,11 +411,16 @@ cluster_summary(pcmk__output_t *out, va_list args) { (enum pcmk_pacemakerd_state) va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); + pcmk__output_t *verify_out; + void *priv_orig; + int verify_rc; int rc = pcmk_rc_no_output; - const char *stack_s = get_cluster_stack(scheduler); + (void)(verify_rc); + (void)(section_opts); + if (pcmk_is_set(section_opts, pcmk_section_stack)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-stack", stack_s, pcmkd_state); @@ -451,7 +458,27 @@ cluster_summary(pcmk__output_t *out, va_list args) { scheduler->localhost, last_written, user, client, origin); } - out->message(out, "cluster-verify", scheduler, section_opts); + // Use the existing scheduler, but avoid scheduler output + pcmk__output_new(&verify_out, "none", NULL, NULL); + //scheduler = pe_new_working_set(); + priv_orig = scheduler->priv; + scheduler->priv = verify_out; + + verify_rc = pcmk__verify(scheduler, verify_out, scheduler->input); + scheduler->priv = priv_orig; + //pe_free_working_set(scheduler); + pcmk__output_free(verify_out); + + if (verify_rc == pcmk_rc_ok) { + if (pcmk_is_set(section_opts, pcmk_section_verify)) { + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); + out->list_item(out, NULL, "CIB syntax is valid"); + } + } else { + /* If there are verification errors, always print a statement about that, even if not requested */ + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); + out->list_item(out, NULL, "CIB syntax has errors (for details, run crm_verify -LV)"); + } if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); @@ -485,7 +512,9 @@ cluster_summary_html(pcmk__output_t *out, va_list args) { (enum pcmk_pacemakerd_state) va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); + pcmk__output_t *verify_out; + void *priv_orig; int rc = pcmk_rc_no_output; const char *stack_s = get_cluster_stack(scheduler); @@ -528,9 +557,22 @@ cluster_summary_html(pcmk__output_t *out, va_list args) { scheduler->localhost, last_written, user, client, origin); } - out->message(out, "cluster-verify", scheduler, section_opts); - + if (pcmk_is_set(section_opts, pcmk_section_verify)) { + PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); + // Use the existing scheduler, but avoid scheduler output + pcmk__output_new(&verify_out, "none", NULL, NULL); + //scheduler = pe_new_working_set(); + scheduler->priv = out; + priv_orig = scheduler->priv; + scheduler->priv = verify_out; + + pcmk__verify(scheduler, verify_out, scheduler->input); + scheduler->priv = priv_orig; + //pe_free_working_set(scheduler); + pcmk__output_free(verify_out); + } + if (pcmk_is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes),