pacemaker 2.1.8-2.1.8
Scalable High-Availability cluster resource manager
Loading...
Searching...
No Matches
pcmk_scheduler.c
Go to the documentation of this file.
1/*
2 * Copyright 2004-2024 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10#include <crm_internal.h>
11
12#include <crm/crm.h>
13#include <crm/cib.h>
14#include <crm/cib/internal.h>
15#include <crm/common/xml.h>
18
19#include <glib.h>
20
21#include <crm/pengine/status.h>
22#include <pacemaker-internal.h>
24
26
42static void
43check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
44 enum pcmk__check_parameters check)
45{
46 const char *reason = NULL;
47 pcmk__op_digest_t *digest_data = NULL;
48
49 switch (check) {
51 if (pcmk__check_action_config(rsc, node, rsc_op)
52 && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
53 NULL)) {
54 reason = "action definition changed";
55 }
56 break;
57
59 digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
60 rsc->cluster);
61 switch (digest_data->rc) {
63 crm_trace("Resource %s history entry %s on %s has "
64 "no digest to compare",
65 rsc->id, pcmk__xe_id(rsc_op), node->details->id);
66 break;
68 break;
69 default:
70 reason = "resource parameters have changed";
71 break;
72 }
73 break;
74 }
75 if (reason != NULL) {
76 pe__clear_failcount(rsc, node, reason, rsc->cluster);
77 }
78}
79
90static bool
91failcount_clear_action_exists(const pcmk_node_t *node,
92 const pcmk_resource_t *rsc)
93{
95 TRUE);
96
97 if (list != NULL) {
98 g_list_free(list);
99 return true;
100 }
101 return false;
102}
103
111static void
112check_failure_threshold(gpointer data, gpointer user_data)
113{
114 pcmk_resource_t *rsc = data;
115 const pcmk_node_t *node = user_data;
116
117 // If this is a collective resource, apply recursively to children instead
118 if (rsc->children != NULL) {
119 g_list_foreach(rsc->children, check_failure_threshold, user_data);
120 return;
121 }
122
123 if (!failcount_clear_action_exists(node, rsc)) {
124 /* Don't force the resource away from this node due to a failcount
125 * that's going to be cleared.
126 *
127 * @TODO Failcount clearing can be scheduled in
128 * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
129 * schedule_resource_actions() via check_params(). This runs well before
130 * then, so it cannot detect those, meaning we might check the migration
131 * threshold when we shouldn't. Worst case, we stop or move the
132 * resource, then move it back in the next transition.
133 */
134 pcmk_resource_t *failed = NULL;
135
136 if (pcmk__threshold_reached(rsc, node, &failed)) {
138 "__fail_limit__", rsc->cluster);
139 }
140 }
141}
142
156static void
157apply_exclusive_discovery(gpointer data, gpointer user_data)
158{
159 pcmk_resource_t *rsc = data;
160 const pcmk_node_t *node = user_data;
161
162 if (rsc->exclusive_discover
163 || pe__const_top_resource(rsc, false)->exclusive_discover) {
164 pcmk_node_t *match = NULL;
165
166 // If this is a collective resource, apply recursively to children
167 g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
168
169 match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
170 if ((match != NULL)
172 match->weight = -PCMK_SCORE_INFINITY;
173 }
174 }
175}
176
184static void
185apply_stickiness(gpointer data, gpointer user_data)
186{
187 pcmk_resource_t *rsc = data;
188 pcmk_node_t *node = NULL;
189
190 // If this is a collective resource, apply recursively to children instead
191 if (rsc->children != NULL) {
192 g_list_foreach(rsc->children, apply_stickiness, NULL);
193 return;
194 }
195
196 /* A resource is sticky if it is managed, has stickiness configured, and is
197 * active on a single node.
198 */
200 || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
201 return;
202 }
203
204 node = rsc->running_on->data;
205
206 /* In a symmetric cluster, stickiness can always be used. In an
207 * asymmetric cluster, we have to check whether the resource is still
208 * allowed on the node, so we don't keep the resource somewhere it is no
209 * longer explicitly enabled.
210 */
212 && (g_hash_table_lookup(rsc->allowed_nodes,
213 node->details->id) == NULL)) {
214 pcmk__rsc_debug(rsc,
215 "Ignoring %s stickiness because the cluster is "
216 "asymmetric and %s is not explicitly allowed",
217 rsc->id, pcmk__node_name(node));
218 return;
219 }
220
221 pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
222 rsc->id, rsc->stickiness, pcmk__node_name(node));
223 resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
224}
225
232static void
233apply_shutdown_locks(pcmk_scheduler_t *scheduler)
234{
236 return;
237 }
238 for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
239 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
240
241 rsc->cmds->shutdown_lock(rsc);
242 }
243}
244
251static void
252count_available_nodes(pcmk_scheduler_t *scheduler)
253{
255 return;
256 }
257
258 // @COMPAT for API backward compatibility only (cluster does not use value)
259 for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
260 pcmk_node_t *node = (pcmk_node_t *) iter->data;
261
262 if ((node != NULL) && (node->weight >= 0) && node->details->online
263 && (node->details->type != node_ping)) {
265 }
266 }
267 crm_trace("Online node count: %d", scheduler->max_valid_nodes);
268}
269
270/*
271 * \internal
272 * \brief Apply node-specific scheduling criteria
273 *
274 * After the CIB has been unpacked, process node-specific scheduling criteria
275 * including shutdown locks, location constraints, resource stickiness,
276 * migration thresholds, and exclusive resource discovery.
277 */
278static void
279apply_node_criteria(pcmk_scheduler_t *scheduler)
280{
281 crm_trace("Applying node-specific scheduling criteria");
282 apply_shutdown_locks(scheduler);
283 count_available_nodes(scheduler);
285 g_list_foreach(scheduler->resources, apply_stickiness, NULL);
286
287 for (GList *node_iter = scheduler->nodes; node_iter != NULL;
288 node_iter = node_iter->next) {
289 for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
290 rsc_iter = rsc_iter->next) {
291 check_failure_threshold(rsc_iter->data, node_iter->data);
292 apply_exclusive_discovery(rsc_iter->data, node_iter->data);
293 }
294 }
295}
296
303static void
304assign_resources(pcmk_scheduler_t *scheduler)
305{
306 GList *iter = NULL;
307
308 crm_trace("Assigning resources to nodes");
309
313 }
315
317 /* Assign remote connection resources first (which will also assign any
318 * colocation dependencies). If the connection is migrating, always
319 * prefer the partial migration target.
320 */
321 for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
322 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
323
324 if (rsc->is_remote_node) {
325 pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
326 rsc->id);
327 rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
328 }
329 }
330 }
331
332 /* now do the rest of the resources */
333 for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
334 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
335
336 if (!rsc->is_remote_node) {
337 pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
338 rsc->xml->name, rsc->id);
339 rsc->cmds->assign(rsc, NULL, true);
340 }
341 }
342
344}
345
353static void
354clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
355{
356 pcmk_resource_t *rsc = data;
357
358 if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
359 return;
360 }
361 crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
362
363 /* There's no need to recurse into rsc->children because those
364 * should just be unassigned clone instances.
365 */
366
367 for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
368 pcmk_node_t *node = (pcmk_node_t *) iter->data;
369 pcmk_action_t *clear_op = NULL;
370
371 if (!node->details->online) {
372 continue;
373 }
374 if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
375 continue;
376 }
377
378 clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
379 rsc->cluster);
380
381 /* We can't use order_action_then_stop() here because its
382 * pcmk__ar_guest_allowed breaks things
383 */
384 pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
385 NULL, pcmk__ar_ordered, rsc->cluster);
386 }
387}
388
395static void
396schedule_resource_actions(pcmk_scheduler_t *scheduler)
397{
398 // Process deferred action checks
399 pe__foreach_param_check(scheduler, check_params);
401
403 crm_trace("Scheduling probes");
405 }
406
408 g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
409 NULL);
410 }
411
412 crm_trace("Scheduling resource actions");
413 for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
414 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
415
416 rsc->cmds->create_actions(rsc);
417 }
418}
419
428static bool
429is_managed(const pcmk_resource_t *rsc)
430{
432 return true;
433 }
434 for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
435 if (is_managed((pcmk_resource_t *) iter->data)) {
436 return true;
437 }
438 }
439 return false;
440}
441
450static bool
451any_managed_resources(const pcmk_scheduler_t *scheduler)
452{
453 for (const GList *iter = scheduler->resources;
454 iter != NULL; iter = iter->next) {
455 if (is_managed((const pcmk_resource_t *) iter->data)) {
456 return true;
457 }
458 }
459 return false;
460}
461
471static bool
472needs_fencing(const pcmk_node_t *node, bool have_managed)
473{
474 return have_managed && node->details->unclean
475 && pe_can_fence(node->details->data_set, node);
476}
477
486static bool
487needs_shutdown(const pcmk_node_t *node)
488{
489 if (pcmk__is_pacemaker_remote_node(node)) {
490 /* Do not send shutdown actions for Pacemaker Remote nodes.
491 * @TODO We might come up with a good use for this in the future.
492 */
493 return false;
494 }
495 return node->details->online && node->details->shutdown;
496}
497
508static GList *
509add_nondc_fencing(GList *list, pcmk_action_t *action,
511{
513 && (list != NULL)) {
514 /* Concurrent fencing is disabled, so order each non-DC
515 * fencing in a chain. If there is any DC fencing or
516 * shutdown, it will be ordered after the last action in the
517 * chain later.
518 */
520 }
521 return g_list_prepend(list, action);
522}
523
530static pcmk_action_t *
531schedule_fencing(pcmk_node_t *node)
532{
533 pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
534 FALSE, node->details->data_set);
535
536 pcmk__sched_warn("Scheduling node %s for fencing", pcmk__node_name(node));
537 pcmk__order_vs_fence(fencing, node->details->data_set);
538 return fencing;
539}
540
547static void
548schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
549{
550 pcmk_action_t *dc_down = NULL;
551 bool integrity_lost = false;
552 bool have_managed = any_managed_resources(scheduler);
553 GList *fencing_ops = NULL;
554 GList *shutdown_ops = NULL;
555
556 crm_trace("Scheduling fencing and shutdowns as needed");
557 if (!have_managed) {
558 crm_notice("No fencing will be done until there are resources "
559 "to manage");
560 }
561
562 // Check each node for whether it needs fencing or shutdown
563 for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
564 pcmk_node_t *node = (pcmk_node_t *) iter->data;
565 pcmk_action_t *fencing = NULL;
566
567 /* Guest nodes are "fenced" by recovering their container resource,
568 * so handle them separately.
569 */
570 if (pcmk__is_guest_or_bundle_node(node)) {
571 if (node->details->remote_requires_reset && have_managed
572 && pe_can_fence(scheduler, node)) {
573 pcmk__fence_guest(node);
574 }
575 continue;
576 }
577
578 if (needs_fencing(node, have_managed)) {
579 fencing = schedule_fencing(node);
580
581 // Track DC and non-DC fence actions separately
582 if (node->details->is_dc) {
583 dc_down = fencing;
584 } else {
585 fencing_ops = add_nondc_fencing(fencing_ops, fencing,
586 scheduler);
587 }
588
589 } else if (needs_shutdown(node)) {
591
592 // Track DC and non-DC shutdown actions separately
593 if (node->details->is_dc) {
594 dc_down = down_op;
595 } else {
596 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
597 }
598 }
599
600 if ((fencing == NULL) && node->details->unclean) {
601 integrity_lost = true;
602 pcmk__config_warn("Node %s is unclean but cannot be fenced",
603 pcmk__node_name(node));
604 }
605 }
606
607 if (integrity_lost) {
609 pcmk__config_warn("Resource functionality and data integrity "
610 "cannot be guaranteed (configure, enable, "
611 "and test fencing to correct this)");
612
614 crm_notice("Unclean nodes will not be fenced until quorum is "
615 "attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
617 }
618 }
619
620 if (dc_down != NULL) {
621 /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
622 * DC elections. However, we don't want to order non-DC shutdowns before
623 * a DC *fencing*, because even though we don't want a node that's
624 * shutting down to become DC, the DC fencing could be ordered before a
625 * clone stop that's also ordered before the shutdowns, thus leading to
626 * a graph loop.
627 */
628 if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
630 pcmk__order_after_each(dc_down, shutdown_ops);
631 }
632
633 // Order any non-DC fencing before any DC fencing or shutdown
634
636 /* With concurrent fencing, order each non-DC fencing action
637 * separately before any DC fencing or shutdown.
638 */
639 pcmk__order_after_each(dc_down, fencing_ops);
640 } else if (fencing_ops != NULL) {
641 /* Without concurrent fencing, the non-DC fencing actions are
642 * already ordered relative to each other, so we just need to order
643 * the DC fencing after the last action in the chain (which is the
644 * first item in the list).
645 */
646 order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
648 }
649 }
650 g_list_free(fencing_ops);
651 g_list_free(shutdown_ops);
652}
653
654static void
655log_resource_details(pcmk_scheduler_t *scheduler)
656{
658 GList *all = NULL;
659
660 /* Due to the `crm_mon --node=` feature, out->message() for all the
661 * resource-related messages expects a list of nodes that we are allowed to
662 * output information for. Here, we create a wildcard to match all nodes.
663 */
664 all = g_list_prepend(all, (gpointer) "*");
665
666 for (GList *item = scheduler->resources; item != NULL; item = item->next) {
667 pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
668
669 // Log all resources except inactive orphans
671 || (rsc->role != pcmk_role_stopped)) {
672 out->message(out, pcmk__map_element_name(rsc->xml), 0UL, rsc, all,
673 all);
674 }
675 }
676
677 g_list_free(all);
678}
679
680static void
681log_all_actions(pcmk_scheduler_t *scheduler)
682{
683 /* This only ever outputs to the log, so ignore whatever output object was
684 * previously set and just log instead.
685 */
686 pcmk__output_t *prev_out = scheduler->priv;
687 pcmk__output_t *out = NULL;
688
689 if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
690 return;
691 }
692
695 pcmk__output_set_log_level(out, LOG_NOTICE);
696 scheduler->priv = out;
697
698 out->begin_list(out, NULL, NULL, "Actions");
700 out->end_list(out);
701 out->finish(out, CRM_EX_OK, true, NULL);
703
704 scheduler->priv = prev_out;
705}
706
713static void
714log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
715{
716 const uint64_t flags = pcmk_action_optional
719
720 crm_trace("Required but unrunnable actions:");
721 for (const GList *iter = scheduler->actions;
722 iter != NULL; iter = iter->next) {
723
724 const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
725
726 if (!pcmk_any_flags_set(action->flags, flags)) {
727 pcmk__log_action("\t", action, true);
728 }
729 }
730}
731
740static void
741unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
742{
743 const char* localhost_save = NULL;
744
746 crm_trace("Reusing previously calculated cluster status");
748 return;
749 }
750
751 if (scheduler->localhost) {
752 localhost_save = scheduler->localhost;
753 }
754
755 CRM_ASSERT(cib != NULL);
756 crm_trace("Calculating cluster status");
757
758 /* This will zero the entire struct without freeing anything first, so
759 * callers should never call pcmk__schedule_actions() with a populated data
760 * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
761 * previously called, whether directly or via pcmk__schedule_actions()).
762 */
764
765 if (localhost_save) {
766 scheduler->localhost = localhost_save;
767 }
768
770 scheduler->input = cib;
771 cluster_status(scheduler); // Sets pcmk_sched_have_status
772}
773
782void
783pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
785{
786 unpack_cib(cib, flags, scheduler);
791 return;
792 }
793
795 && pcmk__is_daemon) {
796 log_resource_details(scheduler);
797 }
798
799 apply_node_criteria(scheduler);
800
802 return;
803 }
804
807 assign_resources(scheduler);
808 schedule_resource_actions(scheduler);
809
810 /* Remote ordering constraints need to happen prior to calculating fencing
811 * because it is one more place we can mark nodes as needing fencing.
812 */
814
815 schedule_fencing_and_shutdowns(scheduler);
817 log_all_actions(scheduler);
819
820 if (get_crm_log_level() == LOG_TRACE) {
821 log_unrunnable_actions(scheduler);
822 }
823}
824
846int
849{
850 // Allows for cleaner syntax than dereferencing the scheduler argument
851 pcmk_scheduler_t *new_scheduler = NULL;
852
853 new_scheduler = pe_new_working_set();
854 if (new_scheduler == NULL) {
855 return ENOMEM;
856 }
857
858 pcmk__set_scheduler_flags(new_scheduler,
860
861 // Populate the scheduler data
862
863 // Make our own copy of the given input or fetch the CIB and use that
864 if (input != NULL) {
865 new_scheduler->input = pcmk__xml_copy(NULL, input);
866 if (new_scheduler->input == NULL) {
867 out->err(out, "Failed to copy input XML");
868 pe_free_working_set(new_scheduler);
869 return ENOMEM;
870 }
871
872 } else {
873 int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
874
875 if (rc != pcmk_rc_ok) {
876 pe_free_working_set(new_scheduler);
877 return rc;
878 }
879 }
880
881 // Make our own copy of the given crm_time_t object; otherwise
882 // cluster_status() populates with the current time
883 if (date != NULL) {
884 // pcmk_copy_time() guarantees non-NULL
885 new_scheduler->now = pcmk_copy_time(date);
886 }
887
888 // Unpack everything
889 cluster_status(new_scheduler);
890 *scheduler = new_scheduler;
891
892 return pcmk_rc_ok;
893}
@ pcmk__ar_ordered
Actions are ordered (optionally, if no other flags are set)
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition actions.h:46
@ pcmk_action_runnable
Definition actions.h:207
@ pcmk_action_pseudo
Definition actions.h:204
@ pcmk_action_optional
Definition actions.h:210
#define PCMK_ACTION_DO_SHUTDOWN
Definition actions.h:51
int cib__signon_query(pcmk__output_t *out, cib_t **cib, xmlNode **cib_object)
Definition cib_utils.c:965
Cluster Configuration.
bool pcmk__is_daemon
Definition logging.c:47
uint64_t flags
Definition remote.c:3
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition util.h:98
char data[0]
Definition cpg.c:10
A dumping ground.
@ pcmk__digest_match
@ pcmk__digest_unknown
@ pcmk__fc_effective
crm_time_t * pcmk_copy_time(const crm_time_t *source)
Definition iso8601.c:1424
struct crm_time_s crm_time_t
Definition iso8601.h:32
G_GNUC_INTERNAL void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pcmk_action_t *action, bool details)
G_GNUC_INTERNAL pcmk_action_t * pcmk__new_shutdown_action(pcmk_node_t *node)
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__fence_guest(pcmk_node_t *node)
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed)
G_GNUC_INTERNAL void pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__sort_resources(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_orderings(pcmk_scheduler_t *sched)
G_GNUC_INTERNAL bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *xml_op)
G_GNUC_INTERNAL void pcmk__order_after_each(pcmk_action_t *after, GList *list)
G_GNUC_INTERNAL void pcmk__create_graph(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_locations(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__output_actions(pcmk_scheduler_t *scheduler)
#define CRM_TRACE_INIT_DATA(name)
Definition logging.h:143
unsigned int get_crm_log_level(void)
Definition logging.c:1074
#define crm_notice(fmt, args...)
Definition logging.h:395
#define crm_trace(fmt, args...)
Definition logging.h:402
#define LOG_TRACE
Definition logging.h:38
#define pcmk__config_warn(fmt...)
pcmk_scheduler_t * scheduler
xmlNode * input
@ node_ping
Definition nodes.h:42
@ pcmk_probe_exclusive
Definition nodes.h:57
#define PCMK_OPT_NO_QUORUM_POLICY
Definition options.h:46
#define PCMK_VALUE_IGNORE
Definition options.h:161
#define PCMK_VALUE_DEFAULT
Definition options.h:142
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition output_log.c:390
void pcmk__output_free(pcmk__output_t *out)
Definition output.c:30
int pcmk__log_output_new(pcmk__output_t **out)
Definition output.c:291
const char * action
Definition pcmk_fence.c:30
int pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date, pcmk_scheduler_t **scheduler)
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
void pcmk__register_lib_messages(pcmk__output_t *out)
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
Definition utils.c:36
pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler)
Schedule a controller operation to clear a fail count.
Definition failcounts.c:458
GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
void pe__free_param_checks(pcmk_scheduler_t *scheduler)
Definition remote.c:232
const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
Definition complex.c:1032
pcmk__op_digest_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Definition pe_digest.c:394
gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, uint32_t flags)
Definition utils.c:457
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler)
Definition utils.c:359
#define stop_key(rsc)
Definition internal.h:213
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition failcounts.c:361
void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void(*cb)(pcmk_resource_t *, pcmk_node_t *, const xmlNode *, enum pcmk__check_parameters))
Definition remote.c:216
void pe__register_messages(pcmk__output_t *out)
Definition pe_output.c:3440
pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler)
@ pcmk_rsc_removed
Definition resources.h:85
@ pcmk_rsc_managed
Definition resources.h:88
#define CRM_ASSERT(expr)
Definition results.h:42
@ CRM_EX_OK
Success.
Definition results.h:255
@ pcmk_rc_ok
Definition results.h:162
@ pcmk_role_stopped
Stopped.
Definition roles.h:36
@ pcmk_sched_stop_removed_resources
Definition scheduler.h:108
@ pcmk_sched_symmetric_cluster
Definition scheduler.h:83
@ pcmk_sched_fencing_enabled
Definition scheduler.h:89
@ pcmk_sched_probe_resources
Definition scheduler.h:142
@ pcmk_sched_have_remote_nodes
Definition scheduler.h:148
@ pcmk_sched_no_compat
Definition scheduler.h:170
@ pcmk_sched_shutdown_lock
Definition scheduler.h:136
@ pcmk_sched_no_counts
Definition scheduler.h:164
@ pcmk_sched_location_only
Definition scheduler.h:158
@ pcmk_sched_quorate
Definition scheduler.h:80
@ pcmk_sched_concurrent_fencing
Definition scheduler.h:102
@ pcmk_sched_validate_only
Definition scheduler.h:183
@ pcmk_sched_have_status
Definition scheduler.h:145
#define pcmk__rsc_trace(rsc, fmt, args...)
pcmk__check_parameters
@ pcmk__check_last_failure
@ pcmk__check_active
#define pcmk__sched_warn(fmt...)
#define pcmk__rsc_debug(rsc, fmt, args...)
#define pcmk__set_scheduler_flags(scheduler, flags_to_set)
#define PCMK_SCORE_INFINITY
Integer score to use to represent "infinity".
Definition scores.h:24
Cluster status and scheduling.
void pe_free_working_set(pcmk_scheduler_t *scheduler)
Free scheduler data.
Definition status.c:50
pcmk_scheduler_t * pe_new_working_set(void)
Create a new object to hold scheduler data.
Definition status.c:34
gboolean cluster_status(pcmk_scheduler_t *scheduler)
Definition status.c:96
void set_working_set_defaults(pcmk_scheduler_t *scheduler)
Definition status.c:407
@ pcmk__str_none
@ pcmk__str_casei
enum pcmk__digest_result rc
This structure contains everything that makes up a single output formatter.
void(* end_list)(pcmk__output_t *out)
int(* message)(pcmk__output_t *out, const char *message_id,...)
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
int(*) int(*) void(* err)(pcmk__output_t *out, const char *format,...) G_GNUC_PRINTF(2
char * task
Definition actions.h:343
pcmk_resource_t * rsc
Definition actions.h:340
int weight
Definition nodes.h:162
int rsc_discover_mode
Definition nodes.h:170
struct pe_node_shared_s * details
Definition nodes.h:167
gboolean shutdown
Definition nodes.h:97
const char * id
Definition nodes.h:72
gboolean online
Definition nodes.h:80
pcmk_scheduler_t * data_set
Definition nodes.h:153
gboolean is_dc
Definition nodes.h:100
gboolean unclean
Definition nodes.h:91
gboolean remote_requires_reset
Definition nodes.h:112
enum node_type type
Definition nodes.h:74
pcmk_assignment_methods_t * cmds
Definition resources.h:413
GList * running_on
Definition resources.h:456
pcmk_node_t * partial_migration_target
Definition resources.h:450
GList * children
Definition resources.h:471
pcmk_scheduler_t * cluster
Definition resources.h:408
gboolean exclusive_discover
Definition resources.h:432
gboolean is_remote_node
Definition resources.h:431
xmlNode * xml
Definition resources.h:400
GHashTable * allowed_nodes
Definition resources.h:462
unsigned long long flags
Definition resources.h:428
enum rsc_role_e role
Definition resources.h:464
const char * placement_strategy
Definition scheduler.h:206
xmlNode * input
Definition scheduler.h:196
GList * resources
Definition scheduler.h:231
unsigned long long flags
Definition scheduler.h:211
crm_time_t * now
Definition scheduler.h:198
const char * localhost
Definition scheduler.h:251
void(* create_actions)(pcmk_resource_t *rsc)
void(* shutdown_lock)(pcmk_resource_t *rsc)
pcmk_node_t *(* assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail)
Wrappers for and extensions to libxml2.
xmlNode * pcmk__xml_copy(xmlNode *parent, xmlNode *src)
Definition xml.c:883