diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index ec912700d193..fb4b26846b5f 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -503,6 +503,206 @@ DEFPY (no_ipv6_pim_ucast_bsm,
return pim_process_no_unicast_bsm_cmd(vty);
}
+DEFPY (ipv6_pim_candidate_bsr,
+ ipv6_pim_candidate_bsr_cmd,
+ "[no] ipv6 pim candidate-bsr [{priority (0-255)|source
}]",
+ NO_STR
+ IPV6_STR
+ "pim multicast routing\n"
+ "Make this router a Candidate BSR\n"
+ "BSR Priority (higher wins)\n"
+ "BSR Priority (higher wins)\n"
+ "Specify IP address for BSR operation\n"
+ "Local address to use\n"
+ "Local address to use\n"
+ "Interface to pick address from\n"
+ "Interface to pick address from\n"
+ "Pick highest loopback address (default)\n"
+ "Pick highest address from any interface\n")
+{
+ char cand_bsr_xpath[XPATH_MAXLEN];
+ const struct lyd_node *vrf_dnode;
+ const char *vrfname;
+
+ if (vty->xpath_index) {
+ vrf_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ VTY_CURR_XPATH);
+
+ if (!vrf_dnode) {
+ vty_out(vty,
+ "%% Failed to get vrf dnode in candidate db\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = yang_dnode_get_string(vrf_dnode, "./name");
+ } else
+ vrfname = VRF_DEFAULT_NAME;
+
+ snprintf(cand_bsr_xpath, sizeof(cand_bsr_xpath), FRR_PIM_CAND_BSR_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv6");
+
+ if (no)
+ nb_cli_enqueue_change(vty, cand_bsr_xpath, NB_OP_DESTROY, NULL);
+ else {
+ char xpath2[XPATH_MAXLEN + 16];
+
+ nb_cli_enqueue_change(vty, cand_bsr_xpath, NB_OP_CREATE, NULL);
+
+ if (any) {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-any",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ } else if (ifname) {
+ snprintf(xpath2, sizeof(xpath2), "%s/interface",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, ifname);
+ } else if (address_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/address",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE,
+ address_str);
+ } else {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-loopback",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ }
+
+ if (priority_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/bsr-priority",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_MODIFY,
+ priority_str);
+ }
+ }
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (ipv6_pim_candidate_rp,
+ ipv6_pim_candidate_rp_cmd,
+ "[no] ipv6 pim candidate-rp [{priority (0-255)|interval (1-4294967295)|source }]",
+ NO_STR
+ IPV6_STR
+ "pim multicast routing\n"
+ "Make this router a Candidate RP\n"
+ "RP Priority (lower wins)\n"
+ "RP Priority (lower wins)\n"
+ "Advertisement interval (seconds)\n"
+ "Advertisement interval (seconds)\n"
+ "Specify IP address for RP operation\n"
+ "Local address to use\n"
+ "Local address to use\n"
+ "Interface to pick address from\n"
+ "Interface to pick address from\n"
+ "Pick highest loopback address (default)\n"
+ "Pick highest address from any interface\n")
+{
+ char cand_rp_xpath[XPATH_MAXLEN];
+ const struct lyd_node *vrf_dnode;
+ const char *vrfname;
+
+ if (vty->xpath_index) {
+ vrf_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ VTY_CURR_XPATH);
+
+ if (!vrf_dnode) {
+ vty_out(vty,
+ "%% Failed to get vrf dnode in candidate db\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = yang_dnode_get_string(vrf_dnode, "./name");
+ } else
+ vrfname = VRF_DEFAULT_NAME;
+
+ snprintf(cand_rp_xpath, sizeof(cand_rp_xpath), FRR_PIM_CAND_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv6");
+
+ if (no)
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_DESTROY, NULL);
+ else {
+ char xpath2[XPATH_MAXLEN + 24];
+
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_CREATE, NULL);
+
+ if (any) {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-any",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ } else if (ifname) {
+ snprintf(xpath2, sizeof(xpath2), "%s/interface",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, ifname);
+ } else if (address_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/address",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE,
+ address_str);
+ } else {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-loopback",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ }
+
+ if (priority_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/rp-priority",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_MODIFY,
+ priority_str);
+ }
+ if (interval_str) {
+ snprintf(xpath2, sizeof(xpath2),
+ "%s/advertisement-interval", cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_MODIFY,
+ interval_str);
+ }
+ }
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (ipv6_pim_candidate_rp_group,
+ ipv6_pim_candidate_rp_group_cmd,
+ "[no] ipv6 pim candidate-rp group X:X::X:X/M",
+ NO_STR
+ IPV6_STR
+ "pim multicast routing\n"
+ "Make this router a Candidate RP\n"
+ "Configure groups to become candidate RP for\n"
+ "Multicast group prefix\n")
+{
+ char cand_rp_xpath[XPATH_MAXLEN];
+ const struct lyd_node *vrf_dnode;
+ const char *vrfname;
+
+ if (vty->xpath_index) {
+ vrf_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ VTY_CURR_XPATH);
+
+ if (!vrf_dnode) {
+ vty_out(vty,
+ "%% Failed to get vrf dnode in candidate db\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = yang_dnode_get_string(vrf_dnode, "./name");
+ } else
+ vrfname = VRF_DEFAULT_NAME;
+
+ snprintf(cand_rp_xpath, sizeof(cand_rp_xpath),
+ FRR_PIM_CAND_RP_XPATH "/group-list", "frr-pim:pimd", "pim",
+ vrfname, "frr-routing:ipv6");
+
+ if (no)
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_DESTROY,
+ group_str);
+ else
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_CREATE,
+ group_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
DEFPY (ipv6_ssmpingd,
ipv6_ssmpingd_cmd,
"ipv6 ssmpingd [X:X::X:X]$source",
@@ -874,6 +1074,106 @@ DEFPY (show_ipv6_pim_secondary,
return pim_show_secondary_helper(vrf, vty);
}
+DEFPY (show_ipv6_pim_cand_rp,
+ show_ipv6_pim_cand_rp_cmd,
+ "show ipv6 pim candidate-rp [vrf VRF_NAME] [json$uj]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ "PIM Candidate RP state\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ struct vrf *vrf = pim_cmd_lookup(vty, vrf_name);
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+ json_object *json = NULL;
+
+ if (!vrf || !vrf->info)
+ return CMD_WARNING;
+
+ pim = (struct pim_instance *)vrf->info;
+ scope = &pim->global_scope;
+
+ if (!scope->cand_rp_addrsel.run) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty,
+ "This router is not currently operating as Candidate RP\n");
+ return CMD_SUCCESS;
+ }
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_addf(json, "address", "%pPA",
+ &scope->cand_rp_addrsel.run_addr);
+ json_object_int_add(json, "priority", scope->cand_rp_prio);
+ json_object_int_add(json, "nextAdvertisementMsec",
+ event_timer_remain_msec(
+ scope->cand_rp_adv_timer));
+
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(json,
+ JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ return CMD_SUCCESS;
+ }
+
+ vty_out(vty, "Candidate-RP\nAddress: %pPA\nPriority: %u\n\n",
+ &scope->cand_rp_addrsel.run_addr, scope->cand_rp_prio);
+ vty_out(vty, "Next adv.: %lu msec\n",
+ event_timer_remain_msec(scope->cand_rp_adv_timer));
+
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_pim_bsr_rpdb,
+ show_ipv6_pim_bsr_rpdb_cmd,
+ "show ipv6 pim bsr candidate-rps [vrf VRF_NAME] [json$uj]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ "boot-strap router information\n"
+ "Candidate RPs\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ struct vrf *vrf = pim_cmd_lookup(vty, vrf_name);
+
+ if (!vrf || !vrf->info)
+ return CMD_WARNING;
+
+ struct pim_instance *pim = vrf->info;
+ struct bsm_scope *scope = &pim->global_scope;
+
+ return pim_crp_db_show(vty, scope);
+}
+
+DEFPY (show_ipv6_pim_bsr_groups,
+ show_ipv6_pim_bsr_groups_cmd,
+ "show ipv6 pim bsr groups [vrf VRF_NAME] [json$uj]",
+ SHOW_STR
+ IPV6_STR
+ PIM_STR
+ "boot-strap router information\n"
+ "Candidate RP groups\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ struct vrf *vrf = pim_cmd_lookup(vty, vrf_name);
+
+ if (!vrf || !vrf->info)
+ return CMD_WARNING;
+
+ struct pim_instance *pim = vrf->info;
+ struct bsm_scope *scope = &pim->global_scope;
+
+ return pim_crp_groups_show(vty, scope);
+}
+
+
DEFPY (show_ipv6_pim_statistics,
show_ipv6_pim_statistics_cmd,
"show ipv6 pim [vrf NAME] statistics [interface WORD$word] [json$json]",
@@ -1813,11 +2113,21 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE,
&interface_no_ipv6_mld_last_member_query_interval_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_candidate_bsr_cmd);
+ install_element(VRF_NODE, &ipv6_pim_candidate_bsr_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_candidate_rp_cmd);
+ install_element(VRF_NODE, &ipv6_pim_candidate_rp_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_candidate_rp_group_cmd);
+ install_element(VRF_NODE, &ipv6_pim_candidate_rp_group_cmd);
+
install_element(VIEW_NODE, &show_ipv6_pim_rp_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_rp_vrf_all_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_rpf_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_rpf_vrf_all_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_secondary_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_cand_rp_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_bsr_rpdb_cmd);
+ install_element(VIEW_NODE, &show_ipv6_pim_bsr_groups_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_statistics_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_upstream_cmd);
install_element(VIEW_NODE, &show_ipv6_pim_upstream_vrf_all_cmd);
diff --git a/pimd/pim6_main.c b/pimd/pim6_main.c
index 5ce6985c4520..07b70ae2b3df 100644
--- a/pimd/pim6_main.c
+++ b/pimd/pim6_main.c
@@ -94,6 +94,7 @@ struct frr_signal_t pim6d_signals[] = {
},
};
+/* clang-format off */
static const struct frr_yang_module_info *const pim6d_yang_modules[] = {
&frr_filter_info,
&frr_interface_info,
@@ -102,10 +103,10 @@ static const struct frr_yang_module_info *const pim6d_yang_modules[] = {
&frr_routing_info,
&frr_pim_info,
&frr_pim_rp_info,
+ &frr_pim_candidate_info,
&frr_gmp_info,
};
-/* clang-format off */
FRR_DAEMON_INFO(pim6d, PIM6,
.vty_port = PIM6D_VTY_PORT,
.proghelp = "Protocol Independent Multicast (RFC7761) for IPv6",
diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h
index ecba739a5a4e..7b0c3f0350e2 100644
--- a/pimd/pim_addr.h
+++ b/pimd/pim_addr.h
@@ -14,11 +14,13 @@
#if PIM_IPV == 4
typedef struct in_addr pim_addr;
+typedef struct prefix_ipv4 prefix_pim;
#define PIM_ADDRSTRLEN INET_ADDRSTRLEN
#define PIM_AF AF_INET
#define PIM_AFI AFI_IP
#define PIM_PROTO_REG IPPROTO_RAW
+#define PIM_IANA_AFI IANA_AFI_IPV4
#define PIM_IPADDR IPADDR_V4
#define ipaddr_pim ipaddr_v4
#define PIM_MAX_BITLEN IPV4_MAX_BITLEN
@@ -44,11 +46,13 @@ union pimprefixconstptr {
#else
typedef struct in6_addr pim_addr;
+typedef struct prefix_ipv6 prefix_pim;
#define PIM_ADDRSTRLEN INET6_ADDRSTRLEN
#define PIM_AF AF_INET6
#define PIM_AFI AFI_IP6
#define PIM_PROTO_REG IPPROTO_PIM
+#define PIM_IANA_AFI IANA_AFI_IPV6
#define PIM_IPADDR IPADDR_V6
#define ipaddr_pim ipaddr_v6
#define PIM_MAX_BITLEN IPV6_MAX_BITLEN
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index df9161943d37..d32e81122fa1 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -10,6 +10,14 @@
#include "config.h"
#endif
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
#include "if.h"
#include "pimd.h"
#include "pim_iface.h"
@@ -23,18 +31,32 @@
#include "pim_time.h"
#include "pim_zebra.h"
#include "pim_util.h"
+#include "pim_sock.h"
/* Functions forward declaration */
static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
int hold_time);
+static void pim_bsm_accept_any(struct bsm_scope *scope);
+static void pim_cand_bsr_trigger(struct bsm_scope *scope, bool verbose);
+static void pim_cand_bsr_pending(struct bsm_scope *scope);
/* Memory Types */
DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info");
DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_INFO, "PIM BSR advertised RP info");
-DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_FRAG, "PIM BSM fragment");
+DEFINE_MTYPE(PIMD, PIM_BSM_FRAG, "PIM BSM fragment");
DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet");
+DEFINE_MTYPE_STATIC(PIMD, PIM_CAND_RP_GRP, "PIM Candidate RP group");
+
+static int cand_rp_group_cmp(const struct cand_rp_group *a,
+ const struct cand_rp_group *b)
+{
+ return prefix_cmp(&a->p, &b->p);
+}
+
+DECLARE_RBTREE_UNIQ(cand_rp_groups, struct cand_rp_group, item,
+ cand_rp_group_cmp);
/* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
#define MAX_IP_HDR_LEN 24
@@ -90,7 +112,7 @@ static void pim_bsm_frag_free(struct bsm_frag *bsfrag)
XFREE(MTYPE_PIM_BSM_FRAG, bsfrag);
}
-static void pim_bsm_frags_free(struct bsm_scope *scope)
+void pim_bsm_frags_free(struct bsm_scope *scope)
{
struct bsm_frag *bsfrag;
@@ -140,12 +162,12 @@ static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
return bsgrp;
}
+/* BS timer for NO_INFO, ACCEPT_ANY & ACCEPT_PREFERRED.
+ * Candidate BSR handling is separate further below
+ */
static void pim_on_bs_timer(struct event *t)
{
- struct route_node *rn;
struct bsm_scope *scope;
- struct bsgrp_node *bsgrp_node;
- struct bsm_rpinfo *bsrp;
scope = EVENT_ARG(t);
EVENT_OFF(scope->bs_timer);
@@ -154,7 +176,20 @@ static void pim_on_bs_timer(struct event *t)
zlog_debug("%s: Bootstrap Timer expired for scope: %d",
__func__, scope->sz_id);
+ assertf(scope->state <= ACCEPT_PREFERRED, "state=%d", scope->state);
pim_nht_bsr_del(scope->pim, scope->current_bsr);
+
+ pim_bsm_accept_any(scope);
+}
+
+static void pim_bsm_accept_any(struct bsm_scope *scope)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp_node;
+ struct bsm_rpinfo *bsrp;
+
+ EVENT_OFF(scope->t_ebsr_regen_bsm);
+
/* Reset scope zone data */
scope->state = ACCEPT_ANY;
scope->current_bsr = PIMADDR_ANY;
@@ -181,6 +216,11 @@ static void pim_on_bs_timer(struct event *t)
pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
bsgrp_node->pend_rp_cnt = 0;
}
+
+ /* we're leaving ACCEPT_PREFERRED, which doubles as C-BSR if we're
+ * configured to be a Candidate BSR. See if we're P-BSR now.
+ */
+ pim_cand_bsr_trigger(scope, false);
}
static void pim_bs_timer_stop(struct bsm_scope *scope)
@@ -212,36 +252,129 @@ static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
pim_bs_timer_start(scope, bs_timeout);
}
+static void bsm_unicast_sock_read(struct event *t)
+{
+ struct bsm_scope *scope = EVENT_ARG(t);
+
+ event_add_read(router->master, bsm_unicast_sock_read, scope,
+ scope->unicast_sock, &scope->unicast_read);
+
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
+ socklen_t fromlen = sizeof(from);
+ socklen_t tolen = sizeof(to);
+ ifindex_t ifindex;
+ struct interface *ifp;
+ uint8_t buf[PIM_PIM_BUFSIZE_READ];
+ int len, i;
+
+ for (i = 0; i < router->packet_process; i++) {
+ pim_sgaddr sg;
+
+ len = pim_socket_recvfromto(scope->unicast_sock, buf,
+ sizeof(buf), &from, &fromlen, &to,
+ &tolen, &ifindex);
+ if (len < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EWOULDBLOCK || errno == EAGAIN)
+ break;
+
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("Received errno: %d %s", errno,
+ safe_strerror(errno));
+ break;
+ }
+
+#if PIM_IPV == 4
+ sg.src = ((struct sockaddr_in *)&from)->sin_addr;
+ sg.grp = ((struct sockaddr_in *)&to)->sin_addr;
+#else
+ sg.src = ((struct sockaddr_in6 *)&from)->sin6_addr;
+ sg.grp = ((struct sockaddr_in6 *)&to)->sin6_addr;
+#endif
+
+ /*
+ * What? So with vrf's the incoming packet is received
+ * on the vrf interface but recvfromto above returns
+ * the right ifindex, so just use it. We know
+ * it's the right interface because we bind to it
+ */
+ ifp = if_lookup_by_index(ifindex, scope->pim->vrf->vrf_id);
+ if (!ifp) {
+ zlog_warn("Received incoming PIM packet on unknown ifindex %d",
+ ifindex);
+ break;
+ }
+
+ int fail = pim_pim_packet(ifp, buf, len, sg, false);
+
+ if (fail) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("%s: pim_pim_packet() return=%d",
+ __func__, fail);
+ break;
+ }
+ }
+}
+
void pim_bsm_proc_init(struct pim_instance *pim)
{
- memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
-
- pim->global_scope.sz_id = PIM_GBL_SZ_ID;
- pim->global_scope.bsrp_table = route_table_init();
- pim->global_scope.accept_nofwd_bsm = true;
- pim->global_scope.state = NO_INFO;
- pim->global_scope.pim = pim;
- bsm_frags_init(pim->global_scope.bsm_frags);
- pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
+ struct bsm_scope *scope = &pim->global_scope;
+
+ memset(scope, 0, sizeof(*scope));
+
+ scope->sz_id = PIM_GBL_SZ_ID;
+ scope->bsrp_table = route_table_init();
+ scope->accept_nofwd_bsm = true;
+ scope->state = NO_INFO;
+ scope->pim = pim;
+ bsm_frags_init(scope->bsm_frags);
+ pim_bs_timer_start(scope, PIM_BS_TIME);
+
+ scope->cand_rp_interval = PIM_CRP_ADV_INTERVAL;
+ cand_rp_groups_init(scope->cand_rp_groups);
+
+ scope->unicast_sock = pim_socket_raw(IPPROTO_PIM);
+ set_nonblocking(scope->unicast_sock);
+ sockopt_reuseaddr(scope->unicast_sock);
+ setsockopt_ipv6_pktinfo(scope->unicast_sock, 1);
+ pim_socket_ip_hdr(scope->unicast_sock);
+
+ frr_with_privs (&pimd_privs) {
+ vrf_bind(pim->vrf->vrf_id, scope->unicast_sock, NULL);
+ }
+
+ event_add_read(router->master, bsm_unicast_sock_read, scope,
+ scope->unicast_sock, &scope->unicast_read);
}
void pim_bsm_proc_free(struct pim_instance *pim)
{
+ struct bsm_scope *scope = &pim->global_scope;
struct route_node *rn;
struct bsgrp_node *bsgrp;
+ struct cand_rp_group *crpgrp;
- pim_bs_timer_stop(&pim->global_scope);
- pim_bsm_frags_free(&pim->global_scope);
+ EVENT_OFF(scope->unicast_read);
+ close(scope->unicast_sock);
- for (rn = route_top(pim->global_scope.bsrp_table); rn;
- rn = route_next(rn)) {
+ pim_bs_timer_stop(scope);
+ pim_bsm_frags_free(scope);
+
+ for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
bsgrp = rn->info;
if (!bsgrp)
continue;
pim_free_bsgrp_data(bsgrp);
}
- route_table_finish(pim->global_scope.bsrp_table);
+ while ((crpgrp = cand_rp_groups_pop(scope->cand_rp_groups)))
+ XFREE(MTYPE_PIM_CAND_RP_GRP, crpgrp);
+
+ cand_rp_groups_fini(scope->cand_rp_groups);
+
+ route_table_finish(scope->bsrp_table);
}
static bool is_hold_time_elapsed(void *data)
@@ -512,9 +645,6 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,
uint32_t bsr_prio)
{
- if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr))
- return true;
-
if (bsr_prio > pim->global_scope.current_bsr_prio)
return true;
@@ -523,6 +653,11 @@ static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,
return true;
else
return false;
+ } else if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr)) {
+ /* BSR config changed, lower prio now. local BSR check
+ * is handled separately in pim_bsm_update()
+ */
+ return true;
} else
return false;
}
@@ -530,17 +665,52 @@ static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,
static void pim_bsm_update(struct pim_instance *pim, pim_addr bsr,
uint32_t bsr_prio)
{
- if (pim_addr_cmp(bsr, pim->global_scope.current_bsr)) {
- pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
- pim_nht_bsr_add(pim, bsr);
-
- pim->global_scope.current_bsr = bsr;
- pim->global_scope.current_bsr_first_ts =
- pim_time_monotonic_sec();
- pim->global_scope.state = ACCEPT_PREFERRED;
- }
pim->global_scope.current_bsr_prio = bsr_prio;
pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
+
+ if (pim->global_scope.bsr_addrsel.run &&
+ pim->global_scope.cand_bsr_prio > bsr_prio &&
+ pim->global_scope.state < BSR_PENDING) {
+ /* current BSR is now less preferred than ourselves */
+ pim_cand_bsr_pending(&pim->global_scope);
+ return;
+ }
+
+ if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr))
+ return;
+
+ switch (pim->global_scope.state) {
+ case BSR_PENDING:
+ if (PIM_DEBUG_BSM)
+ zlog_debug("Candidate BSR dropping out of BSR election, better BSR (%u, %pPA)",
+ bsr_prio, &bsr);
+ break;
+
+ case BSR_ELECTED:
+ if (PIM_DEBUG_BSM)
+ zlog_debug("Lost BSR status, better BSR (%u, %pPA)",
+ bsr_prio, &bsr);
+ break;
+
+ case NO_INFO:
+ case ACCEPT_ANY:
+ case ACCEPT_PREFERRED:
+ break;
+ }
+
+ EVENT_OFF(pim->global_scope.t_ebsr_regen_bsm);
+
+ if (pim->global_scope.state == BSR_ELECTED)
+ pim_crp_db_clear(&pim->global_scope);
+ else
+ pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
+ pim_nht_bsr_add(pim, bsr);
+
+ pim->global_scope.current_bsr = bsr;
+ pim->global_scope.current_bsr_first_ts = pim_time_monotonic_sec();
+ pim->global_scope.state = ACCEPT_PREFERRED;
+
+ pim_cand_rp_trigger(&pim->global_scope);
}
void pim_bsm_clear(struct pim_instance *pim)
@@ -555,7 +725,12 @@ void pim_bsm_clear(struct pim_instance *pim)
struct rp_info *rp_info;
bool upstream_updated = false;
- pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
+ EVENT_OFF(pim->global_scope.t_ebsr_regen_bsm);
+
+ if (pim->global_scope.state == BSR_ELECTED)
+ pim_crp_db_clear(&pim->global_scope);
+ else
+ pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
/* Reset scope zone data */
pim->global_scope.accept_nofwd_bsm = false;
@@ -1338,35 +1513,6 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
}
}
- /* Drop if bsr is not preferred bsr */
- if (!is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) {
- if (PIM_DEBUG_BSM)
- zlog_debug("%s : Received a non-preferred BSM",
- __func__);
- pim->bsm_dropped++;
- return -1;
- }
-
- if (no_fwd) {
- /* only accept no-forward BSM if quick refresh on startup */
- if ((pim->global_scope.accept_nofwd_bsm)
- || (frag_tag == pim->global_scope.bsm_frag_tag)) {
- pim->global_scope.accept_nofwd_bsm = false;
- } else {
- if (PIM_DEBUG_BSM)
- zlog_debug(
- "%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false",
- __func__, &bsr_addr);
- pim->bsm_dropped++;
- pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
- return -1;
- }
- }
-
- /* BSM packet is seen, so resetting accept_nofwd_bsm to false */
- if (pim->global_scope.accept_nofwd_bsm)
- pim->global_scope.accept_nofwd_bsm = false;
-
if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) {
/* Multicast BSMs are only accepted if source interface & IP
* match RPF towards the BSR's IP address, or they have
@@ -1403,6 +1549,59 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
return -1;
}
+ /* when the BSR restarts, it can get its own BSR advertisement thrown
+ * back at it, and without this we'll go into ACCEPT_PREFERRED with
+ * ourselves as the BSR when we should be in BSR_ELECTED.
+ */
+ CPP_NOTICE("invalidate BSR when address added locally");
+ if (if_address_is_local(&bshdr->bsr_addr.addr, PIM_AF,
+ pim->vrf->vrf_id)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Dropping BSM from ourselves", __func__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ /* Drop if bsr is not preferred bsr */
+ if (is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) {
+ /* continue below */
+ } else if (pim->global_scope.state == BSR_PENDING && !no_fwd) {
+ /* in P-BSR state, non-preferred BSMs are forwarded, but
+ * content is ignored.
+ */
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Forwarding non-preferred BSM during Pending-BSR state",
+ __func__);
+
+ pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
+ return -1;
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Received a non-preferred BSM",
+ __func__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (no_fwd) {
+ /* only accept no-forward BSM if quick refresh on startup */
+ if ((pim->global_scope.accept_nofwd_bsm) ||
+ (frag_tag == pim->global_scope.bsm_frag_tag)) {
+ pim->global_scope.accept_nofwd_bsm = false;
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false",
+ __func__, &bsr_addr);
+ pim->bsm_dropped++;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ return -1;
+ }
+ }
+
+ /* BSM packet is seen, so resetting accept_nofwd_bsm to false */
+ if (pim->global_scope.accept_nofwd_bsm)
+ pim->global_scope.accept_nofwd_bsm = false;
+
if (empty_bsm) {
if (PIM_DEBUG_BSM)
zlog_debug("%s : Empty Pref BSM received", __func__);
@@ -1413,9 +1612,8 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
(buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
(buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
frag_tag)) {
- if (PIM_DEBUG_BSM) {
- zlog_debug("%s, Parsing BSM failed.", __func__);
- }
+ zlog_warn("BSM from %pPA failed to parse",
+ (pim_addr *)&bshdr->bsr_addr.addr);
pim->bsm_dropped++;
return -1;
}
@@ -1451,3 +1649,581 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
return 0;
}
+
+static void pim_elec_bsr_timer(struct event *t)
+{
+ struct bsm_scope *scope = EVENT_ARG(t);
+ struct bsm_frag *frag;
+ struct bsm_hdr *hdr;
+
+ assert(scope->state == BSR_ELECTED);
+
+ scope->bsm_frag_tag++;
+ frag = bsm_frags_first(scope->bsm_frags);
+ assert(frag);
+
+ hdr = (struct bsm_hdr *)(frag->data + PIM_MSG_HEADER_LEN);
+ hdr->frag_tag = htons(scope->bsm_frag_tag);
+
+ unsigned timer = PIM_BS_TIME;
+
+ if (scope->changed_bsm_trigger) {
+ zlog_debug("Sending triggered BSM");
+ scope->changed_bsm_trigger--;
+ timer = 5;
+ } else {
+ zlog_debug("Sending scheduled BSM");
+ pim_bsm_sent(scope);
+ }
+
+ pim_bsm_fwd_whole_sz(scope->pim, frag->data, frag->size, scope->sz_id);
+ scope->current_bsr_last_ts = pim_time_monotonic_sec();
+
+ event_add_timer(router->master, pim_elec_bsr_timer, scope, timer,
+ &scope->bs_timer);
+}
+
+void pim_bsm_changed(struct bsm_scope *scope)
+{
+ struct event t;
+
+ EVENT_OFF(scope->bs_timer);
+ scope->changed_bsm_trigger = 2;
+
+ t.arg = scope;
+ pim_elec_bsr_timer(&t);
+}
+
+static void pim_cand_bsr_pending_expire(struct event *t)
+{
+ struct bsm_scope *scope = EVENT_ARG(t);
+
+ assertf(scope->state == BSR_PENDING, "state=%d", scope->state);
+ assertf(pim_addr_is_any(scope->current_bsr), "current_bsr=%pPA",
+ &scope->current_bsr);
+
+ zlog_debug("Elected BSR, wait expired without preferable BSMs");
+
+ scope->state = BSR_ELECTED;
+ scope->current_bsr_prio = scope->cand_bsr_prio;
+ scope->current_bsr = scope->bsr_addrsel.run_addr;
+
+ //pim_bsm_frags_free(scope);
+ scope->bsm_frag_tag = frr_weak_random();
+ scope->current_bsr_first_ts = pim_time_monotonic_sec();
+
+ pim_cand_rp_trigger(scope);
+ pim_bsm_generate(scope);
+}
+
+#if PIM_IPV == 6
+static float bsr_addr_delay(pim_addr best, pim_addr local)
+{
+ unsigned pos;
+ uint32_t best_4b, local_4b;
+ float delay_log;
+
+ for (pos = 0; pos < 12; pos++) {
+ if (best.s6_addr[pos] != local.s6_addr[pos])
+ break;
+ }
+
+ memcpy(&best_4b, &best.s6_addr[pos], 4);
+ memcpy(&local_4b, &local.s6_addr[pos], 4);
+
+ delay_log = log2(1 + ntohl(best_4b) - ntohl(local_4b));
+ delay_log += (12 - pos) * 8;
+ return delay_log / 64.;
+}
+#endif
+
+static void pim_cand_bsr_pending(struct bsm_scope *scope)
+{
+ unsigned bs_rand_override;
+ uint8_t best_prio;
+ pim_addr best_addr;
+ float prio_delay, addr_delay;
+
+ EVENT_OFF(scope->bs_timer);
+ EVENT_OFF(scope->t_ebsr_regen_bsm);
+ scope->state = BSR_PENDING;
+
+ best_prio = MAX(scope->cand_bsr_prio, scope->current_bsr_prio);
+ best_addr = pim_addr_cmp(scope->bsr_addrsel.run_addr,
+ scope->current_bsr) > 0
+ ? scope->bsr_addrsel.run_addr
+ : scope->current_bsr;
+
+ /* RFC5059 sec.5 */
+#if PIM_IPV == 4
+ if (scope->cand_bsr_prio == best_prio) {
+ prio_delay = 0.; /* log2(1) = 0 */
+ addr_delay = log2(1 + ntohl(best_addr.s_addr) -
+ ntohl(scope->bsr_addrsel.run_addr.s_addr)) /
+ 16.;
+ } else {
+ prio_delay = 2. * log2(1 + best_prio - scope->cand_bsr_prio);
+ addr_delay = 2 - (ntohl(scope->bsr_addrsel.run_addr.s_addr) /
+ (float)(1 << 31));
+ }
+#else
+ if (scope->cand_bsr_prio == best_prio) {
+ prio_delay = 0.; /* log2(1) = 0 */
+ addr_delay = bsr_addr_delay(best_addr,
+ scope->bsr_addrsel.run_addr);
+ } else {
+ prio_delay = 2. * log2(1 + best_prio - scope->cand_bsr_prio);
+ addr_delay = 2 -
+ (ntohl(scope->bsr_addrsel.run_addr.s6_addr32[0]) /
+ (float)(1 << 31));
+ }
+#endif
+
+ bs_rand_override = 5000 + (int)((prio_delay + addr_delay) * 1000.);
+
+ zlog_debug("Pending-BSR (%u, %pPA), waiting %ums", scope->cand_bsr_prio,
+ &scope->bsr_addrsel.run_addr, bs_rand_override);
+ event_add_timer_msec(router->master, pim_cand_bsr_pending_expire, scope,
+ bs_rand_override, &scope->bs_timer);
+}
+
+static inline pim_addr if_highest_addr(pim_addr cur, struct interface *ifp)
+{
+ struct connected *connected;
+
+ frr_each (if_connected, ifp->connected, connected) {
+ pim_addr conn_addr;
+
+ if (connected->address->family != PIM_AF)
+ continue;
+
+ conn_addr = pim_addr_from_prefix(connected->address);
+ /* highest address */
+ if (pim_addr_cmp(conn_addr, cur) > 0)
+ cur = conn_addr;
+ }
+ return cur;
+}
+
+static void cand_addrsel_clear(struct cand_addrsel *asel)
+{
+ asel->run = false;
+ asel->run_addr = PIMADDR_ANY;
+}
+
+/* returns whether address or active changed */
+static bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf)
+{
+ bool is_any = false, prev_run = asel->run;
+ struct interface *ifp = NULL;
+ pim_addr new_addr = PIMADDR_ANY;
+
+ if (!asel->cfg_enable)
+ goto out_disable;
+
+ switch (asel->cfg_mode) {
+ case CAND_ADDR_EXPLICIT:
+ new_addr = asel->cfg_addr;
+ ifp = if_lookup_address_local(&asel->cfg_addr, PIM_AF,
+ vrf->vrf_id);
+ break;
+
+ case CAND_ADDR_IFACE:
+ ifp = if_lookup_by_name_vrf(asel->cfg_ifname, vrf);
+
+ if (ifp)
+ new_addr = if_highest_addr(PIMADDR_ANY, ifp);
+ break;
+
+ case CAND_ADDR_ANY:
+ is_any = true;
+ /* fallthru */
+ case CAND_ADDR_LO:
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ if (!if_is_up(ifp))
+ continue;
+ if (is_any || if_is_loopback(ifp) || if_is_vrf(ifp))
+ new_addr = if_highest_addr(new_addr, ifp);
+ }
+ break;
+ }
+
+ if (ifp && !if_is_up(ifp))
+ goto out_disable;
+
+ if (pim_addr_is_any(new_addr))
+ goto out_disable;
+
+ /* nothing changed re. address (don't care about interface changes) */
+ if (asel->run && !pim_addr_cmp(asel->run_addr, new_addr))
+ return !prev_run;
+
+ asel->run = true;
+ asel->run_addr = new_addr;
+ return true;
+
+out_disable:
+ asel->run = false;
+ asel->run_addr = PIMADDR_ANY;
+
+ return prev_run;
+}
+
+static void pim_cand_bsr_stop(struct bsm_scope *scope, bool verbose)
+{
+ cand_addrsel_clear(&scope->bsr_addrsel);
+
+ switch (scope->state) {
+ case NO_INFO:
+ case ACCEPT_ANY:
+ case ACCEPT_PREFERRED:
+ return;
+ case BSR_PENDING:
+ case BSR_ELECTED:
+ break;
+ }
+
+ zlog_debug("Candidate BSR ceasing operation");
+
+ EVENT_OFF(scope->t_ebsr_regen_bsm);
+ EVENT_OFF(scope->bs_timer);
+ pim_crp_db_clear(scope);
+ pim_bsm_accept_any(scope);
+}
+
+static void pim_cand_bsr_trigger(struct bsm_scope *scope, bool verbose)
+{
+ /* this is called on all state changes even if we aren't configured
+ * to be C-BSR at all.
+ */
+ if (!scope->bsr_addrsel.run)
+ return;
+
+ if (scope->current_bsr_prio > scope->cand_bsr_prio) {
+ assert(scope->state == ACCEPT_PREFERRED);
+ if (!verbose)
+ return;
+
+ zlog_debug("Candidate BSR: known better BSR %pPA (higher priority %u > %u)",
+ &scope->current_bsr, scope->current_bsr_prio,
+ scope->cand_bsr_prio);
+ return;
+ } else if (scope->current_bsr_prio == scope->cand_bsr_prio &&
+ pim_addr_cmp(scope->current_bsr,
+ scope->bsr_addrsel.run_addr) > 0) {
+ assert(scope->state == ACCEPT_PREFERRED);
+ if (!verbose)
+ return;
+
+ zlog_debug("Candidate BSR: known better BSR %pPA (higher address > %pPA)",
+ &scope->current_bsr, &scope->bsr_addrsel.run_addr);
+ return;
+ }
+
+ if (!pim_addr_cmp(scope->current_bsr, scope->bsr_addrsel.run_addr))
+ return;
+
+ pim_cand_bsr_pending(scope);
+}
+
+void pim_cand_bsr_apply(struct bsm_scope *scope)
+{
+ if (!cand_addrsel_update(&scope->bsr_addrsel, scope->pim->vrf))
+ return;
+
+ if (!scope->bsr_addrsel.run) {
+ pim_cand_bsr_stop(scope, true);
+ return;
+ }
+
+ zlog_debug("Candidate BSR: %pPA, priority %u",
+ &scope->bsr_addrsel.run_addr, scope->cand_bsr_prio);
+
+ pim_cand_bsr_trigger(scope, true);
+}
+
+static void pim_cand_rp_adv_stop_maybe(struct bsm_scope *scope)
+{
+ /* actual check whether stop should be sent - covers address
+ * changes as well as run_addr = 0.0.0.0 (C-RP shutdown)
+ */
+ if (pim_addr_is_any(scope->cand_rp_prev_addr) ||
+ !pim_addr_cmp(scope->cand_rp_prev_addr,
+ scope->cand_rp_addrsel.run_addr))
+ return;
+
+ switch (scope->state) {
+ case ACCEPT_PREFERRED:
+ case BSR_ELECTED:
+ break;
+
+ case NO_INFO:
+ case ACCEPT_ANY:
+ case BSR_PENDING:
+ default:
+ return;
+ }
+
+ zlog_debug("Candidate-RP (-, %pPA) deregistering self to %pPA",
+ &scope->cand_rp_prev_addr, &scope->current_bsr);
+
+ struct cand_rp_msg *msg;
+ uint8_t buf[PIM_MSG_HEADER_LEN + sizeof(*msg) + sizeof(pim_encoded_group)];
+
+ msg = (struct cand_rp_msg *)(&buf[PIM_MSG_HEADER_LEN]);
+ msg->prefix_cnt = 0;
+ msg->rp_prio = 255;
+ msg->rp_holdtime = 0;
+ msg->rp_addr.family = PIM_IANA_AFI;
+ msg->rp_addr.reserved = 0;
+ msg->rp_addr.addr = scope->cand_rp_prev_addr;
+
+ pim_msg_build_header(PIMADDR_ANY, scope->current_bsr, buf, sizeof(buf),
+ PIM_MSG_TYPE_CANDIDATE, false);
+
+ if (pim_msg_send(scope->unicast_sock, PIMADDR_ANY, scope->current_bsr,
+ buf, sizeof(buf), NULL)) {
+ zlog_warn("failed to send Cand-RP message: %m");
+ }
+
+ scope->cand_rp_prev_addr = PIMADDR_ANY;
+}
+
+static void pim_cand_rp_adv(struct event *t)
+{
+ struct bsm_scope *scope = EVENT_ARG(t);
+ int next_msec;
+
+ pim_cand_rp_adv_stop_maybe(scope);
+
+ if (!scope->cand_rp_addrsel.run) {
+ scope->cand_rp_adv_trigger = 0;
+ return;
+ }
+
+ switch (scope->state) {
+ case ACCEPT_PREFERRED:
+ case BSR_ELECTED:
+ break;
+
+ case ACCEPT_ANY:
+ case BSR_PENDING:
+ case NO_INFO:
+ default:
+ /* state change will retrigger */
+ scope->cand_rp_adv_trigger = 0;
+
+ zlog_warn("Candidate-RP advertisement not sent in state %d",
+ scope->state);
+ return;
+ }
+
+ zlog_debug("Candidate-RP (%u, %pPA) advertising %zu groups to %pPA",
+ scope->cand_rp_prio, &scope->cand_rp_addrsel.run_addr,
+ cand_rp_groups_count(scope->cand_rp_groups),
+ &scope->current_bsr);
+
+ struct cand_rp_group *grp;
+ struct cand_rp_msg *msg;
+ uint8_t buf[PIM_MSG_HEADER_LEN + sizeof(*msg) +
+ sizeof(pim_encoded_group) *
+ cand_rp_groups_count(scope->cand_rp_groups)];
+ size_t i = 0;
+
+
+ msg = (struct cand_rp_msg *)(&buf[PIM_MSG_HEADER_LEN]);
+ msg->prefix_cnt = cand_rp_groups_count(scope->cand_rp_groups);
+ msg->rp_prio = scope->cand_rp_prio;
+ msg->rp_holdtime =
+ htons(MAX(151, (scope->cand_rp_interval * 5 + 1) / 2));
+ msg->rp_addr.family = PIM_IANA_AFI;
+ msg->rp_addr.reserved = 0;
+ msg->rp_addr.addr = scope->cand_rp_addrsel.run_addr;
+
+ frr_each (cand_rp_groups, scope->cand_rp_groups, grp) {
+ memset(&msg->groups[i], 0, sizeof(msg->groups[i]));
+
+ msg->groups[i].family = PIM_IANA_AFI;
+ msg->groups[i].mask = grp->p.prefixlen;
+ msg->groups[i].addr = grp->p.prefix;
+ i++;
+ }
+
+ scope->cand_rp_prev_addr = scope->cand_rp_addrsel.run_addr;
+
+ pim_msg_build_header(scope->cand_rp_addrsel.run_addr, scope->current_bsr,
+ buf, sizeof(buf), PIM_MSG_TYPE_CANDIDATE, false);
+
+ if (pim_msg_send(scope->unicast_sock, scope->cand_rp_addrsel.run_addr,
+ scope->current_bsr, buf, sizeof(buf), NULL)) {
+ zlog_warn("failed to send Cand-RP message: %m");
+ }
+
+ /* -1s...+1s */
+ next_msec = (frr_weak_random() & 2047) - 1024;
+
+ if (scope->cand_rp_adv_trigger) {
+ scope->cand_rp_adv_trigger--;
+ next_msec += 2000;
+ } else
+ next_msec += scope->cand_rp_interval * 1000;
+
+ event_add_timer_msec(router->master, pim_cand_rp_adv, scope, next_msec,
+ &scope->cand_rp_adv_timer);
+}
+
+void pim_cand_rp_trigger(struct bsm_scope *scope)
+{
+ if (scope->cand_rp_adv_trigger && scope->cand_rp_addrsel.run) {
+ scope->cand_rp_adv_trigger = PIM_CRP_ADV_TRIGCOUNT;
+
+ /* already scheduled to send triggered advertisements, don't
+ * reschedule so burst changes don't result in an advertisement
+ * burst
+ */
+ return;
+ }
+
+ EVENT_OFF(scope->cand_rp_adv_timer);
+
+ if (!scope->cand_rp_addrsel.run)
+ return;
+
+ scope->cand_rp_adv_trigger = PIM_CRP_ADV_TRIGCOUNT;
+
+ struct event t;
+
+ t.arg = scope;
+ pim_cand_rp_adv(&t);
+}
+
+void pim_cand_rp_apply(struct bsm_scope *scope)
+{
+ if (!cand_addrsel_update(&scope->cand_rp_addrsel, scope->pim->vrf))
+ return;
+
+ if (!scope->cand_rp_addrsel.run) {
+ zlog_debug("Candidate RP ceasing operation");
+
+ cand_addrsel_clear(&scope->cand_rp_addrsel);
+ EVENT_OFF(scope->cand_rp_adv_timer);
+ pim_cand_rp_adv_stop_maybe(scope);
+ scope->cand_rp_adv_trigger = 0;
+ return;
+ }
+
+ zlog_debug("Candidate RP: %pPA, priority %u",
+ &scope->cand_rp_addrsel.run_addr, scope->cand_rp_prio);
+
+ pim_cand_rp_trigger(scope);
+}
+
+void pim_cand_rp_grp_add(struct bsm_scope *scope, const prefix_pim *p)
+{
+ struct cand_rp_group *grp, ref;
+
+ ref.p = *p;
+ grp = cand_rp_groups_find(scope->cand_rp_groups, &ref);
+ if (grp)
+ return;
+
+ grp = XCALLOC(MTYPE_PIM_CAND_RP_GRP, sizeof(*grp));
+ grp->p = *p;
+ cand_rp_groups_add(scope->cand_rp_groups, grp);
+
+ pim_cand_rp_trigger(scope);
+}
+
+void pim_cand_rp_grp_del(struct bsm_scope *scope, const prefix_pim *p)
+{
+ struct cand_rp_group *grp, ref;
+
+ ref.p = *p;
+ grp = cand_rp_groups_find(scope->cand_rp_groups, &ref);
+ if (!grp)
+ return;
+
+ cand_rp_groups_del(scope->cand_rp_groups, grp);
+ XFREE(MTYPE_PIM_CAND_RP_GRP, grp);
+
+ pim_cand_rp_trigger(scope);
+}
+
+static struct event *t_cand_addrs_reapply;
+
+static void pim_cand_addrs_reapply(struct event *t)
+{
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
+ struct pim_instance *pi = vrf->info;
+
+ if (!pi)
+ continue;
+
+ /* these call cand_addrsel_update() and apply changes */
+ pim_cand_bsr_apply(&pi->global_scope);
+ pim_cand_rp_apply(&pi->global_scope);
+ }
+}
+
+void pim_cand_addrs_changed(void)
+{
+ EVENT_OFF(t_cand_addrs_reapply);
+ event_add_timer_msec(router->master, pim_cand_addrs_reapply, NULL, 1,
+ &t_cand_addrs_reapply);
+}
+
+static void cand_addrsel_config_write(struct vty *vty,
+ struct cand_addrsel *addrsel)
+{
+ switch (addrsel->cfg_mode) {
+ case CAND_ADDR_LO:
+ break;
+ case CAND_ADDR_ANY:
+ vty_out(vty, " source any");
+ break;
+ case CAND_ADDR_IFACE:
+ vty_out(vty, " source interface %s", addrsel->cfg_ifname);
+ break;
+ case CAND_ADDR_EXPLICIT:
+ vty_out(vty, " source address %pPA", &addrsel->cfg_addr);
+ break;
+ }
+}
+
+int pim_cand_config_write(struct pim_instance *pim, struct vty *vty,
+ const char *indent)
+{
+ struct bsm_scope *scope = &pim->global_scope;
+ int ret = 0;
+
+ if (scope->cand_rp_addrsel.cfg_enable) {
+ vty_out(vty, "%sip pim candidate-rp", indent);
+ if (scope->cand_rp_prio != 192)
+ vty_out(vty, " priority %u", scope->cand_rp_prio);
+ if (scope->cand_rp_interval != PIM_CRP_ADV_INTERVAL)
+ vty_out(vty, " interval %u", scope->cand_rp_interval);
+ cand_addrsel_config_write(vty, &scope->cand_rp_addrsel);
+ vty_out(vty, "\n");
+ ret++;
+
+ struct cand_rp_group *group;
+
+ frr_each (cand_rp_groups, scope->cand_rp_groups, group) {
+ vty_out(vty, "%sip pim candidate-rp group %pFX\n",
+ indent, &group->p);
+ ret++;
+ }
+ }
+
+ if (scope->bsr_addrsel.cfg_enable) {
+ vty_out(vty, "%sip pim candidate-bsr", indent);
+ if (scope->cand_bsr_prio != 64)
+ vty_out(vty, " priority %u", scope->cand_bsr_prio);
+ cand_addrsel_config_write(vty, &scope->bsr_addrsel);
+ vty_out(vty, "\n");
+ ret++;
+ }
+ return ret;
+}
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
index fb09e3b1cc36..b9e3539297c9 100644
--- a/pimd/pim_bsm.h
+++ b/pimd/pim_bsm.h
@@ -21,6 +21,13 @@
#define PIM_BS_TIME 60 /* RFC 5059 - Sec 5 */
#define PIM_BSR_DEFAULT_TIMEOUT 130 /* RFC 5059 - Sec 5 */
+/* number of times to include rp-count = 0 ranges */
+#define PIM_BSR_DEAD_COUNT 3
+
+#define PIM_CRP_ADV_TRIGCOUNT 3
+#define PIM_CRP_ADV_INTERVAL 60
+#define PIM_CRP_HOLDTIME 150
+
/* These structures are only encoded IPv4 specific */
#define PIM_BSM_HDR_LEN sizeof(struct bsm_hdr)
#define PIM_BSM_GRP_LEN sizeof(struct bsmmsg_grpinfo)
@@ -33,19 +40,61 @@
* ==============
*/
-/* Non candidate BSR states */
-enum ncbsr_state {
+/* BSR states
+ *
+ * Candidate BSR starts at BSR_PENDING, moves to AP or E depending on
+ * loss/win. Will never go into AA (because in that case it'd become BSR
+ * itself.)
+ *
+ * Non-Candidate BSR starts at NO_INFO, moves to AP & AA depending on
+ * a BSR being available or not.
+ */
+enum bsr_state {
NO_INFO = 0,
ACCEPT_ANY,
- ACCEPT_PREFERRED
+ ACCEPT_PREFERRED, /* = same as C-BSR if candidate */
+ BSR_PENDING,
+ BSR_ELECTED,
+};
+
+enum cand_addr {
+ CAND_ADDR_LO = 0,
+ CAND_ADDR_ANY,
+ CAND_ADDR_IFACE,
+ CAND_ADDR_EXPLICIT,
};
+/* used separately for Cand-RP and Cand-BSR */
+struct cand_addrsel {
+ bool cfg_enable;
+ enum cand_addr cfg_mode : 8;
+
+ /* only valid for mode==CAND_ADDR_IFACE */
+ char cfg_ifname[IFNAMSIZ];
+ /* only valid for mode==CAND_ADDR_EXPLICIT */
+ pim_addr cfg_addr;
+
+ /* running state updated based on above on zebra events */
+ pim_addr run_addr;
+ bool run;
+};
+
+
PREDECL_DLIST(bsm_frags);
+PREDECL_RBTREE_UNIQ(cand_rp_groups);
+
+/* n*m "table" accessed both by-RP and by-group */
+PREDECL_RBTREE_UNIQ(bsr_crp_rps);
+PREDECL_RBTREE_UNIQ(bsr_crp_groups);
+
+PREDECL_RBTREE_UNIQ(bsr_crp_rp_groups);
+PREDECL_RBTREE_UNIQ(bsr_crp_group_rps);
/* BSM scope - bsm processing is per scope */
struct bsm_scope {
int sz_id; /* scope zone id */
- enum ncbsr_state state; /* non candidate BSR state */
+ enum bsr_state state; /* BSR state */
+
bool accept_nofwd_bsm; /* no fwd bsm accepted for scope */
pim_addr current_bsr; /* current elected BSR for the sz */
uint32_t current_bsr_prio; /* current BSR priority */
@@ -60,6 +109,93 @@ struct bsm_scope {
struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */
struct event *bs_timer; /* Boot strap timer */
+
+ /* Candidate BSR config */
+ struct cand_addrsel bsr_addrsel;
+ uint8_t cand_bsr_prio;
+
+ /* Candidate BSR state */
+ uint8_t current_cand_bsr_prio;
+ /* if nothing changed from Cand-RP data we received, less work... */
+ bool elec_rp_data_changed;
+
+ /* data that the E-BSR keeps - not to be confused with Candidate-RP
+ * stuff below. These two here are the info about all the Cand-RPs
+ * that we as a BSR received information for in Cand-RP-adv packets.
+ */
+ struct bsr_crp_rps_head ebsr_rps[1];
+ struct bsr_crp_groups_head ebsr_groups[1];
+
+ /* set if we have any group ranges where we're currently advertising
+ * rp-count = 0 (includes both ranges without any RPs as well as
+ * ranges with only NHT-unreachable RPs)
+ */
+ bool ebsr_have_dead_pending;
+ unsigned changed_bsm_trigger;
+
+ struct event *t_ebsr_regen_bsm;
+
+ /* Candidate RP config */
+ struct cand_addrsel cand_rp_addrsel;
+ uint8_t cand_rp_prio;
+ unsigned int cand_rp_interval; /* default: PIM_CRP_ADV_INTERVAL=60 */
+ /* holdtime is not configurable, always 2.5 * interval. */
+ struct cand_rp_groups_head cand_rp_groups[1];
+
+ /* Candidate RP state */
+ int unicast_sock;
+ struct event *unicast_read;
+ struct event *cand_rp_adv_timer;
+ unsigned int cand_rp_adv_trigger; /* # trigg. C-RP-Adv left to send */
+
+ /* for sending holdtime=0 zap */
+ pim_addr cand_rp_prev_addr;
+};
+
+struct cand_rp_group {
+ struct cand_rp_groups_item item;
+
+ prefix_pim p;
+};
+
+struct bsr_crp_group {
+ struct bsr_crp_groups_item item;
+
+ prefix_pim range;
+ struct bsr_crp_group_rps_head rps[1];
+
+ size_t n_selected;
+ bool deleted_selected : 1;
+
+ /* number of times we've advertised this range with rp-count = 0 */
+ unsigned dead_count;
+};
+
+struct bsr_crp_rp {
+ struct bsr_crp_rps_item item;
+
+ pim_addr addr;
+ struct bsr_crp_rp_groups_head groups[1];
+
+ struct bsm_scope *scope;
+ struct event *t_hold;
+ time_t seen_first;
+ time_t seen_last;
+
+ uint16_t holdtime;
+ uint8_t prio;
+ bool nht_ok;
+};
+
+/* "n * m" RP<->Group tie-in */
+struct bsr_crp_item {
+ struct bsr_crp_rp_groups_item r_g_item;
+ struct bsr_crp_group_rps_item g_r_item;
+
+ struct bsr_crp_group *group;
+ struct bsr_crp_rp *rp;
+
+ bool selected : 1;
};
/* BSM packet (= fragment) - this is stored as list in bsm_frags inside scope
@@ -200,6 +336,14 @@ struct bsmmsg_rpinfo {
uint8_t reserved;
} __attribute__((packed));
+struct cand_rp_msg {
+ uint8_t prefix_cnt;
+ uint8_t rp_prio;
+ uint16_t rp_holdtime;
+ pim_encoded_unicast rp_addr;
+ pim_encoded_group groups[0];
+} __attribute__((packed));
+
/* API */
void pim_bsm_proc_init(struct pim_instance *pim);
void pim_bsm_proc_free(struct pim_instance *pim);
@@ -210,4 +354,33 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
struct prefix *grp);
+
+void pim_bsm_generate(struct bsm_scope *scope);
+void pim_bsm_changed(struct bsm_scope *scope);
+void pim_bsm_sent(struct bsm_scope *scope);
+void pim_bsm_frags_free(struct bsm_scope *scope);
+
+void pim_cand_bsr_apply(struct bsm_scope *scope);
+void pim_cand_rp_apply(struct bsm_scope *scope);
+void pim_cand_rp_trigger(struct bsm_scope *scope);
+void pim_cand_rp_grp_add(struct bsm_scope *scope, const prefix_pim *p);
+void pim_cand_rp_grp_del(struct bsm_scope *scope, const prefix_pim *p);
+
+void pim_cand_addrs_changed(void);
+
+int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf,
+ uint32_t buf_size);
+
+struct pim_nexthop_cache;
+void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc);
+
+void pim_crp_db_clear(struct bsm_scope *scope);
+int pim_crp_db_show(struct vty *vty, struct bsm_scope *scope);
+int pim_crp_groups_show(struct vty *vty, struct bsm_scope *scope);
+
+int pim_cand_config_write(struct pim_instance *pim, struct vty *vty,
+ const char *indent);
+
+DECLARE_MTYPE(PIM_BSM_FRAG);
+
#endif
diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c
new file mode 100644
index 000000000000..5d98ba626deb
--- /dev/null
+++ b/pimd/pim_bsr_rpdb.c
@@ -0,0 +1,641 @@
+/*
+ * PIM RP database for BSR operation
+ *
+ * Copyright (C) 2021 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "if.h"
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_instance.h"
+#include "pim_rpf.h"
+#include "pim_hello.h"
+#include "pim_pim.h"
+#include "pim_nht.h"
+#include "pim_bsm.h"
+#include "pim_time.h"
+
+/* safety limits to prevent DoS/memory exhaustion attacks against the BSR
+ *
+ * The BSR is more susceptible than other PIM protocol operation because
+ * Candidate-RP messages are unicast to the BSR without any 2-way interaction
+ * and can thus be spoofed blindly(!) from anywhere in the internet.
+ *
+ * Everything else is on-link, multicast, or requires an adjacency - much
+ * harder to mess with.
+ */
+
+/* total number of RPs we keep information for */
+static size_t bsr_max_rps = 1024;
+
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSR_CRP, "PIM BSR C-RP");
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSR_GROUP, "PIM BSR range");
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSR_ITEM, "PIM BSR C-RP range item");
+
+static int rp_cmp(const struct bsr_crp_rp *a, const struct bsr_crp_rp *b)
+{
+ return pim_addr_cmp(a->addr, b->addr);
+}
+
+DECLARE_RBTREE_UNIQ(bsr_crp_rps, struct bsr_crp_rp, item, rp_cmp);
+
+static int group_cmp(const struct bsr_crp_group *a,
+ const struct bsr_crp_group *b)
+{
+ return prefix_cmp(&a->range, &b->range);
+}
+
+DECLARE_RBTREE_UNIQ(bsr_crp_groups, struct bsr_crp_group, item, group_cmp);
+
+static int r_g_cmp(const struct bsr_crp_item *a, const struct bsr_crp_item *b)
+{
+ return prefix_cmp(&a->group->range, &b->group->range);
+}
+
+DECLARE_RBTREE_UNIQ(bsr_crp_rp_groups, struct bsr_crp_item, r_g_item, r_g_cmp);
+
+static int g_r_cmp(const struct bsr_crp_item *a, const struct bsr_crp_item *b)
+{
+ const struct bsr_crp_rp *rp_a = a->rp, *rp_b = b->rp;
+
+ /* NHT-failed RPs last */
+ if (rp_a->nht_ok > rp_b->nht_ok)
+ return -1;
+ if (rp_a->nht_ok < rp_b->nht_ok)
+ return 1;
+
+ /* This function determines BSR policy in what subset of the received
+ * RP candidates to advertise. The BSR is free to make its choices
+ * any way it deems useful
+ */
+
+ /* lower numeric values are better */
+ if (rp_a->prio < rp_b->prio)
+ return -1;
+ if (rp_a->prio > rp_b->prio)
+ return 1;
+
+ /* prefer older RP for less churn */
+ if (rp_a->seen_first < rp_b->seen_first)
+ return -1;
+ if (rp_a->seen_first > rp_b->seen_first)
+ return 1;
+
+ return pim_addr_cmp(rp_a->addr, rp_b->addr);
+}
+
+DECLARE_RBTREE_UNIQ(bsr_crp_group_rps, struct bsr_crp_item, g_r_item, g_r_cmp);
+
+void pim_bsm_generate(struct bsm_scope *scope)
+{
+ struct bsm_frag *frag;
+ struct bsm_hdr *hdr;
+ bool have_dead = false;
+
+ assertf(scope->state == BSR_ELECTED, "state=%d", scope->state);
+
+ pim_bsm_frags_free(scope);
+
+ struct bsr_crp_group *group;
+ struct bsr_crp_item *item;
+ struct bsr_crp_rp *rp;
+ size_t n_groups = 0, n_rps = 0;
+
+ frr_each (bsr_crp_groups, scope->ebsr_groups, group) {
+ if (group->n_selected == 0) {
+ if (group->dead_count >= PIM_BSR_DEAD_COUNT)
+ continue;
+
+ have_dead = true;
+ } else
+ group->dead_count = 0;
+
+ n_groups++;
+ n_rps += group->n_selected;
+ }
+
+ zlog_debug("Generating BSM (%zu ranges, %zu RPs)", n_groups, n_rps);
+
+ size_t datalen = PIM_MSG_HEADER_LEN + sizeof(*hdr) +
+ n_groups * sizeof(struct bsmmsg_grpinfo) +
+ n_rps * sizeof(struct bsmmsg_rpinfo);
+
+ frag = XCALLOC(MTYPE_PIM_BSM_FRAG, sizeof(*frag) + datalen);
+
+ uint8_t *pos = frag->data + PIM_MSG_HEADER_LEN;
+ uint8_t *end = frag->data + datalen;
+
+ hdr = (struct bsm_hdr *)pos;
+ pos += sizeof(*hdr);
+ assert(pos <= end);
+
+ CPP_NOTICE("FIXME make BSR hashmasklen configurable");
+#if PIM_IPV == 6
+ hdr->hm_len = 126;
+#else
+ hdr->hm_len = 30;
+#endif
+ hdr->bsr_prio = scope->current_bsr_prio;
+ hdr->bsr_addr.family = PIM_IANA_AFI;
+ hdr->bsr_addr.reserved = 0;
+ hdr->bsr_addr.addr = scope->bsr_addrsel.run_addr;
+
+ frr_each (bsr_crp_groups, scope->ebsr_groups, group) {
+ if (group->n_selected == 0 &&
+ group->dead_count >= PIM_BSR_DEAD_COUNT)
+ continue;
+
+ struct bsmmsg_grpinfo *gi = (struct bsmmsg_grpinfo *)pos;
+
+ pos += sizeof(*gi);
+ assert(pos <= end);
+
+ gi->group.family = PIM_MSG_ADDRESS_FAMILY;
+ gi->group.mask = group->range.prefixlen;
+ gi->group.addr = group->range.prefix;
+
+ size_t n_added = 0;
+
+ frr_each (bsr_crp_group_rps, group->rps, item) {
+ if (!item->selected)
+ break;
+
+ struct bsmmsg_rpinfo *ri = (struct bsmmsg_rpinfo *)pos;
+
+ pos += sizeof(*ri);
+ assert(pos <= end);
+
+ rp = item->rp;
+ ri->rpaddr.family = PIM_MSG_ADDRESS_FAMILY;
+ ri->rpaddr.addr = rp->addr;
+ ri->rp_holdtime = htons(rp->holdtime);
+ ri->rp_pri = rp->prio;
+
+ n_added++;
+ }
+
+ gi->rp_count = group->n_selected;
+ gi->frag_rp_count = n_added;
+ assert(n_added == group->n_selected);
+ CPP_NOTICE("implement fragmentation");
+ }
+
+ assertf(pos == end, "end-pos=%td", end - pos);
+ frag->size = datalen;
+
+ bsm_frags_add_head(scope->bsm_frags, frag);
+
+ scope->ebsr_have_dead_pending = have_dead;
+
+ pim_bsm_changed(scope);
+}
+
+static void pim_bsm_generate_timer(struct event *t)
+{
+ struct bsm_scope *scope = EVENT_ARG(t);
+
+ pim_bsm_generate(scope);
+}
+
+static void pim_bsm_generate_sched(struct bsm_scope *scope)
+{
+ assertf(scope->state == BSR_ELECTED, "state=%d", scope->state);
+
+ if (scope->t_ebsr_regen_bsm)
+ return;
+
+ CPP_NOTICE("timer");
+ event_add_timer(router->master, pim_bsm_generate_timer, scope, 1,
+ &scope->t_ebsr_regen_bsm);
+}
+
+void pim_bsm_sent(struct bsm_scope *scope)
+{
+ struct bsr_crp_group *group;
+ bool have_dead = false, changed = false;
+
+ if (!scope->ebsr_have_dead_pending)
+ return;
+
+ frr_each_safe (bsr_crp_groups, scope->ebsr_groups, group) {
+ if (group->n_selected != 0)
+ continue;
+
+ if (group->dead_count < PIM_BSR_DEAD_COUNT) {
+ group->dead_count++;
+ have_dead = true;
+ continue;
+ }
+
+ changed = true;
+
+ if (bsr_crp_group_rps_count(group->rps))
+ /* have RPs, but none selected */
+ continue;
+
+ /* no reason to keep this range anymore */
+ bsr_crp_groups_del(scope->ebsr_groups, group);
+ bsr_crp_group_rps_fini(group->rps);
+ XFREE(MTYPE_PIM_BSR_GROUP, group);
+ continue;
+ }
+
+ scope->ebsr_have_dead_pending = have_dead;
+ if (changed)
+ pim_bsm_generate_sched(scope);
+}
+
+static void bsr_crp_reselect(struct bsm_scope *scope,
+ struct bsr_crp_group *group)
+{
+ bool changed = false;
+ struct bsr_crp_item *item;
+ size_t n_selected = 0;
+
+ frr_each (bsr_crp_group_rps, group->rps, item) {
+ bool select = false;
+
+ /* hardcode best 2 RPs for now */
+ if (item->rp->nht_ok && n_selected < 2) {
+ select = true;
+ n_selected++;
+ }
+
+ if (item->selected != select) {
+ changed = true;
+ item->selected = select;
+ }
+ }
+
+ changed |= group->deleted_selected;
+ group->deleted_selected = false;
+ group->n_selected = n_selected;
+
+ if (changed)
+ pim_bsm_generate_sched(scope);
+
+ scope->elec_rp_data_changed |= changed;
+
+ CPP_NOTICE("cleanup unused groups after holdover");
+}
+
+/* changing rp->nht_ok or rp->prio affects the sort order in group->rp
+ * lists, so need a delete & re-add if either changes
+ */
+static void pim_crp_nht_prio_change(struct bsr_crp_rp *rp, bool nht_ok,
+ uint8_t prio)
+{
+ struct bsr_crp_item *item;
+
+ frr_each (bsr_crp_rp_groups, rp->groups, item)
+ bsr_crp_group_rps_del(item->group->rps, item);
+
+ rp->prio = prio;
+ rp->nht_ok = nht_ok;
+
+ frr_each (bsr_crp_rp_groups, rp->groups, item) {
+ bsr_crp_group_rps_add(item->group->rps, item);
+ bsr_crp_reselect(rp->scope, item->group);
+ }
+}
+
+static struct bsr_crp_group *group_get(struct bsm_scope *scope,
+ prefix_pim *range)
+{
+ struct bsr_crp_group *group, ref;
+
+ ref.range = *range;
+ group = bsr_crp_groups_find(scope->ebsr_groups, &ref);
+ if (!group) {
+ group = XCALLOC(MTYPE_PIM_BSR_GROUP, sizeof(*group));
+ group->range = *range;
+ bsr_crp_group_rps_init(group->rps);
+ bsr_crp_groups_add(scope->ebsr_groups, group);
+ }
+ return group;
+}
+
+static void pim_crp_update(struct bsr_crp_rp *rp, struct cand_rp_msg *msg,
+ size_t ngroups)
+{
+ struct bsr_crp_rp_groups_head oldgroups[1];
+ struct bsr_crp_item *item, itemref;
+ struct bsr_crp_group *group, groupref;
+
+ //struct bsm_scope *scope = rp->scope;
+
+ bsr_crp_rp_groups_init(oldgroups);
+ bsr_crp_rp_groups_swap_all(rp->groups, oldgroups);
+
+ itemref.rp = rp;
+ itemref.group = &groupref;
+
+ assert(msg || ngroups == 0);
+
+ for (size_t i = 0; i < ngroups; i++) {
+ if (msg->groups[i].family != PIM_MSG_ADDRESS_FAMILY)
+ continue;
+ if (msg->groups[i].bidir)
+ continue;
+
+ prefix_pim pfx;
+
+ pfx.family = PIM_AF;
+ pfx.prefixlen = msg->groups[i].mask;
+ pfx.prefix = msg->groups[i].addr;
+
+#if PIM_IPV == 4
+ if (pfx.prefixlen < 4)
+ continue;
+ if (!IPV4_CLASS_DE(ntohl(pfx.prefix.s_addr)))
+ continue;
+#endif
+
+ apply_mask(&pfx);
+
+ groupref.range = pfx;
+ item = bsr_crp_rp_groups_find(oldgroups, &itemref);
+
+ if (item) {
+ bsr_crp_rp_groups_del(oldgroups, item);
+ bsr_crp_rp_groups_add(rp->groups, item);
+ continue;
+ }
+
+ group = group_get(rp->scope, &pfx);
+
+ item = XCALLOC(MTYPE_PIM_BSR_ITEM, sizeof(*item));
+ item->rp = rp;
+ item->group = group;
+
+ bsr_crp_group_rps_add(group->rps, item);
+ bsr_crp_rp_groups_add(rp->groups, item);
+
+ bsr_crp_reselect(rp->scope, group);
+ }
+
+ while ((item = bsr_crp_rp_groups_pop(oldgroups))) {
+ group = item->group;
+ if (item->selected)
+ group->deleted_selected = true;
+
+ bsr_crp_group_rps_del(group->rps, item);
+ XFREE(MTYPE_PIM_BSR_ITEM, item);
+
+ bsr_crp_reselect(rp->scope, group);
+ }
+ bsr_crp_rp_groups_fini(oldgroups);
+
+ if (msg && msg->rp_prio != rp->prio)
+ pim_crp_nht_prio_change(rp, rp->nht_ok, msg->rp_prio);
+}
+
+void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
+{
+ struct bsm_scope *scope = &pim->global_scope;
+ struct bsr_crp_rp *rp, ref;
+ bool ok;
+
+ ref.addr = pnc->rpf.rpf_addr;
+ rp = bsr_crp_rps_find(scope->ebsr_rps, &ref);
+ assertf(rp, "addr=%pPA", &ref.addr);
+
+ ok = CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID);
+ if (ok == rp->nht_ok)
+ return;
+
+ zlog_debug("Candidate-RP %pPA NHT %s", &rp->addr, ok ? "UP" : "DOWN");
+ pim_crp_nht_prio_change(rp, ok, rp->prio);
+}
+
+static void pim_crp_free(struct pim_instance *pim, struct bsr_crp_rp *rp)
+{
+ EVENT_OFF(rp->t_hold);
+ pim_nht_candrp_del(pim, rp->addr);
+ bsr_crp_rp_groups_fini(rp->groups);
+
+ XFREE(MTYPE_PIM_BSR_CRP, rp);
+}
+
+static void pim_crp_expire(struct event *t)
+{
+ struct bsr_crp_rp *rp = EVENT_ARG(t);
+ struct pim_instance *pim = rp->scope->pim;
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("Candidate-RP %pPA holdtime expired", &rp->addr);
+
+ pim_crp_update(rp, NULL, 0);
+
+ bsr_crp_rps_del(rp->scope->ebsr_rps, rp);
+ pim_crp_free(pim, rp);
+}
+
+int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf,
+ uint32_t buf_size)
+{
+ struct pim_interface *pim_ifp = NULL;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: multicast not enabled on interface %s",
+ __func__, ifp->name);
+ return -1;
+ }
+
+ //pim_ifp->pim_ifstat_bsm_rx++;
+ pim = pim_ifp->pim;
+ //pim->bsm_rcvd++;
+
+ if (!pim_ifp->bsm_enable) {
+ zlog_warn("%s: BSM not enabled on interface %s", __func__,
+ ifp->name);
+ //pim_ifp->pim_ifstat_bsm_cfg_miss++;
+ //pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (buf_size < (PIM_MSG_HEADER_LEN + sizeof(struct cand_rp_msg))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: received buffer length of %d which is too small to properly decode",
+ __func__, buf_size);
+ return -1;
+ }
+
+ scope = &pim->global_scope;
+
+ if (scope->state < BSR_PENDING) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("received Candidate-RP message from %pPA while not BSR",
+ &src_dst->src);
+ return -1;
+ }
+
+ size_t remain = buf_size;
+ struct cand_rp_msg *crp_hdr;
+
+ buf += PIM_MSG_HEADER_LEN;
+ remain -= PIM_MSG_HEADER_LEN;
+
+ crp_hdr = (struct cand_rp_msg *)buf;
+ buf += sizeof(*crp_hdr);
+ remain -= sizeof(*crp_hdr);
+
+ size_t ngroups = crp_hdr->prefix_cnt;
+
+ if (remain < ngroups * sizeof(struct pim_encoded_group_ipv4)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("truncated Candidate-RP advertisement for RP %pPA from %pPA (too short for %zu groups)",
+ (pim_addr *)&crp_hdr->rp_addr.addr,
+ &src_dst->src, ngroups);
+ return -1;
+ }
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("Candidate-RP: %pPA, prio=%u (from %pPA, %zu groups)",
+ (pim_addr *)&crp_hdr->rp_addr.addr, crp_hdr->rp_prio,
+ &src_dst->src, ngroups);
+
+
+ struct bsr_crp_rp *rp, ref;
+
+ ref.addr = crp_hdr->rp_addr.addr;
+ rp = bsr_crp_rps_find(scope->ebsr_rps, &ref);
+
+ if (!rp) {
+ if (bsr_crp_rps_count(scope->ebsr_rps) >= bsr_max_rps) {
+ zlog_err("BSR: number of tracked Candidate RPs (%zu) exceeds DoS-protection limit (%zu), dropping advertisement for RP %pPA (packet source %pPA)",
+ bsr_crp_rps_count(scope->ebsr_rps),
+ bsr_max_rps, (pim_addr *)&crp_hdr->rp_addr.addr,
+ &src_dst->src);
+ return -1;
+ }
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("new Candidate-RP: %pPA (from %pPA)",
+ (pim_addr *)&crp_hdr->rp_addr.addr,
+ &src_dst->src);
+
+ rp = XCALLOC(MTYPE_PIM_BSR_CRP, sizeof(*rp));
+ rp->scope = scope;
+ rp->addr = crp_hdr->rp_addr.addr;
+ rp->prio = 255;
+ bsr_crp_rp_groups_init(rp->groups);
+ rp->seen_first = monotime(NULL);
+
+ bsr_crp_rps_add(scope->ebsr_rps, rp);
+ rp->nht_ok = pim_nht_candrp_add(pim, rp->addr);
+ }
+
+ rp->seen_last = monotime(NULL);
+ rp->holdtime = ntohs(crp_hdr->rp_holdtime);
+
+ EVENT_OFF(rp->t_hold);
+ event_add_timer(router->master, pim_crp_expire, rp,
+ ntohs(crp_hdr->rp_holdtime), &rp->t_hold);
+
+ pim_crp_update(rp, crp_hdr, ngroups);
+ return 0;
+}
+
+void pim_crp_db_clear(struct bsm_scope *scope)
+{
+ struct bsr_crp_rp *rp;
+ struct bsr_crp_group *group;
+ struct bsr_crp_item *item;
+
+ while ((rp = bsr_crp_rps_pop(scope->ebsr_rps))) {
+ while ((item = bsr_crp_rp_groups_pop(rp->groups))) {
+ group = item->group;
+
+ if (item->selected)
+ group->deleted_selected = true;
+
+ bsr_crp_group_rps_del(group->rps, item);
+ XFREE(MTYPE_PIM_BSR_ITEM, item);
+ }
+ pim_crp_free(scope->pim, rp);
+ }
+
+ while ((group = bsr_crp_groups_pop(scope->ebsr_groups))) {
+ assertf(!bsr_crp_group_rps_count(group->rps),
+ "range=%pFX rp_count=%zu", &group->range,
+ bsr_crp_group_rps_count(group->rps));
+
+ bsr_crp_group_rps_fini(group->rps);
+ XFREE(MTYPE_PIM_BSR_GROUP, group);
+ }
+}
+
+int pim_crp_db_show(struct vty *vty, struct bsm_scope *scope)
+{
+ struct bsr_crp_rp *rp;
+ struct bsr_crp_item *item;
+
+ vty_out(vty, "RP/Group NHT Prio Uptime Hold\n");
+
+ frr_each (bsr_crp_rps, scope->ebsr_rps, rp) {
+ vty_out(vty, "%-15pPA %4s %4u %8ld %4lu\n", &rp->addr,
+ rp->nht_ok ? "UP" : "DOWN", rp->prio,
+ (long)(monotime(NULL) - rp->seen_first),
+ event_timer_remain_second(rp->t_hold));
+
+ frr_each (bsr_crp_rp_groups, rp->groups, item)
+ vty_out(vty, "%c %-18pFX\n", item->selected ? '>' : ' ',
+ &item->group->range);
+ }
+
+ return CMD_SUCCESS;
+}
+
+int pim_crp_groups_show(struct vty *vty, struct bsm_scope *scope)
+{
+ struct bsr_crp_group *group;
+ struct bsr_crp_item *item;
+
+ if (scope->ebsr_have_dead_pending)
+ vty_out(vty, "have_dead_pending\n");
+
+ frr_each (bsr_crp_groups, scope->ebsr_groups, group) {
+ vty_out(vty, "%c %pFX", group->n_selected ? '^' : '!',
+ &group->range);
+ if (group->n_selected == 0) {
+ vty_out(vty, " (dead %u)", group->dead_count);
+ }
+ vty_out(vty, "\n");
+
+ frr_each (bsr_crp_group_rps, group->rps, item)
+ vty_out(vty, "%c %pPA\n", item->selected ? '>' : ' ',
+ &item->rp->addr);
+ }
+
+ return CMD_SUCCESS;
+}
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index a2d756a96a20..3f2e41dbca64 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -2727,6 +2727,114 @@ DEFPY (show_ip_pim_bsrp,
return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json);
}
+DEFUN (show_ip_pim_cand_rp,
+ show_ip_pim_cand_rp_cmd,
+ "show ip pim candidate-rp [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "PIM Candidate RP state\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+ json_object *json = NULL;
+
+ if (!vrf || !vrf->info)
+ return CMD_WARNING;
+
+ pim = (struct pim_instance *)vrf->info;
+ scope = &pim->global_scope;
+
+ if (!scope->cand_rp_addrsel.run) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty,
+ "This router is not currently operating as Candidate RP\n");
+ return CMD_SUCCESS;
+ }
+
+ if (uj) {
+ char buf[INET_ADDRSTRLEN];
+
+ json = json_object_new_object();
+ inet_ntop(AF_INET, &scope->cand_rp_addrsel.run_addr, buf,
+ sizeof(buf));
+ json_object_string_add(json, "address", buf);
+ json_object_int_add(json, "priority", scope->cand_rp_prio);
+ json_object_int_add(json, "nextAdvertisementMsec",
+ pim_time_timer_remain_msec(
+ scope->cand_rp_adv_timer));
+
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(json,
+ JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ return CMD_SUCCESS;
+ }
+
+ vty_out(vty, "Candidate-RP\nAddress: %pI4\nPriority: %u\n\n",
+ &scope->cand_rp_addrsel.run_addr, scope->cand_rp_prio);
+ vty_out(vty, "Next adv.: %lu msec\n",
+ pim_time_timer_remain_msec(scope->cand_rp_adv_timer));
+
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_bsr_rpdb,
+ show_ip_pim_bsr_rpdb_cmd,
+ "show ip pim bsr candidate-rps [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "boot-strap router information\n"
+ "Candidate RPs\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+ //bool uj = use_json(argc, argv);
+
+ if (!vrf || !vrf->info)
+ return CMD_WARNING;
+
+ struct pim_instance *pim = vrf->info;
+ struct bsm_scope *scope = &pim->global_scope;
+
+ return pim_crp_db_show(vty, scope);
+}
+
+DEFUN (show_ip_pim_bsr_groups,
+ show_ip_pim_bsr_groups_cmd,
+ "show ip pim bsr groups [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "boot-strap router information\n"
+ "Candidate RP groups\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
+ //bool uj = use_json(argc, argv);
+
+ if (!vrf || !vrf->info)
+ return CMD_WARNING;
+
+ struct pim_instance *pim = vrf->info;
+ struct bsm_scope *scope = &pim->global_scope;
+
+ return pim_crp_groups_show(vty, scope);
+}
+
DEFPY (show_ip_pim_statistics,
show_ip_pim_statistics_cmd,
"show ip pim [vrf NAME] statistics [interface WORD$word] [json$json]",
@@ -3326,6 +3434,206 @@ DEFPY (no_ip_pim_rp_prefix_list,
return pim_process_no_rp_plist_cmd(vty, rp_str, plist);
}
+DEFPY (ip_pim_candidate_bsr,
+ ip_pim_candidate_bsr_cmd,
+ "[no] ip pim candidate-bsr [{priority (0-255)|source }]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Make this router a Candidate BSR\n"
+ "BSR Priority (higher wins)\n"
+ "BSR Priority (higher wins)\n"
+ "Specify IP address for BSR operation\n"
+ "Local address to use\n"
+ "Local address to use\n"
+ "Interface to pick address from\n"
+ "Interface to pick address from\n"
+ "Pick highest loopback address (default)\n"
+ "Pick highest address from any interface\n")
+{
+ char cand_bsr_xpath[XPATH_MAXLEN];
+ const struct lyd_node *vrf_dnode;
+ const char *vrfname;
+
+ if (vty->xpath_index) {
+ vrf_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ VTY_CURR_XPATH);
+
+ if (!vrf_dnode) {
+ vty_out(vty,
+ "%% Failed to get vrf dnode in candidate db\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = yang_dnode_get_string(vrf_dnode, "./name");
+ } else
+ vrfname = VRF_DEFAULT_NAME;
+
+ snprintf(cand_bsr_xpath, sizeof(cand_bsr_xpath), FRR_PIM_CAND_BSR_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+
+ if (no)
+ nb_cli_enqueue_change(vty, cand_bsr_xpath, NB_OP_DESTROY, NULL);
+ else {
+ char xpath2[XPATH_MAXLEN + 16];
+
+ nb_cli_enqueue_change(vty, cand_bsr_xpath, NB_OP_CREATE, NULL);
+
+ if (any) {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-any",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ } else if (ifname) {
+ snprintf(xpath2, sizeof(xpath2), "%s/interface",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, ifname);
+ } else if (address_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/address",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE,
+ address_str);
+ } else {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-loopback",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ }
+
+ if (priority_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/bsr-priority",
+ cand_bsr_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_MODIFY,
+ priority_str);
+ }
+ }
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (ip_pim_candidate_rp,
+ ip_pim_candidate_rp_cmd,
+ "[no] ip pim candidate-rp [{priority (0-255)|interval (1-4294967295)|source }]",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Make this router a Candidate RP\n"
+ "RP Priority (lower wins)\n"
+ "RP Priority (lower wins)\n"
+ "Advertisement interval (seconds)\n"
+ "Advertisement interval (seconds)\n"
+ "Specify IP address for RP operation\n"
+ "Local address to use\n"
+ "Local address to use\n"
+ "Interface to pick address from\n"
+ "Interface to pick address from\n"
+ "Pick highest loopback address (default)\n"
+ "Pick highest address from any interface\n")
+{
+ char cand_rp_xpath[XPATH_MAXLEN];
+ const struct lyd_node *vrf_dnode;
+ const char *vrfname;
+
+ if (vty->xpath_index) {
+ vrf_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ VTY_CURR_XPATH);
+
+ if (!vrf_dnode) {
+ vty_out(vty,
+ "%% Failed to get vrf dnode in candidate db\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = yang_dnode_get_string(vrf_dnode, "./name");
+ } else
+ vrfname = VRF_DEFAULT_NAME;
+
+ snprintf(cand_rp_xpath, sizeof(cand_rp_xpath), FRR_PIM_CAND_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+
+ if (no)
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_DESTROY, NULL);
+ else {
+ char xpath2[XPATH_MAXLEN + 24];
+
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_CREATE, NULL);
+
+ if (any) {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-any",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ } else if (ifname) {
+ snprintf(xpath2, sizeof(xpath2), "%s/interface",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, ifname);
+ } else if (address_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/address",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE,
+ address_str);
+ } else {
+ snprintf(xpath2, sizeof(xpath2), "%s/if-loopback",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_CREATE, NULL);
+ }
+
+ if (priority_str) {
+ snprintf(xpath2, sizeof(xpath2), "%s/rp-priority",
+ cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_MODIFY,
+ priority_str);
+ }
+ if (interval_str) {
+ snprintf(xpath2, sizeof(xpath2),
+ "%s/advertisement-interval", cand_rp_xpath);
+ nb_cli_enqueue_change(vty, xpath2, NB_OP_MODIFY,
+ interval_str);
+ }
+ }
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (ip_pim_candidate_rp_group,
+ ip_pim_candidate_rp_group_cmd,
+ "[no] ip pim candidate-rp group A.B.C.D/M",
+ NO_STR
+ IP_STR
+ "pim multicast routing\n"
+ "Make this router a Candidate RP\n"
+ "Configure groups to become candidate RP for\n"
+ "Multicast group prefix\n")
+{
+ char cand_rp_xpath[XPATH_MAXLEN];
+ const struct lyd_node *vrf_dnode;
+ const char *vrfname;
+
+ if (vty->xpath_index) {
+ vrf_dnode = yang_dnode_get(vty->candidate_config->dnode,
+ VTY_CURR_XPATH);
+
+ if (!vrf_dnode) {
+ vty_out(vty,
+ "%% Failed to get vrf dnode in candidate db\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = yang_dnode_get_string(vrf_dnode, "./name");
+ } else
+ vrfname = VRF_DEFAULT_NAME;
+
+ snprintf(cand_rp_xpath, sizeof(cand_rp_xpath),
+ FRR_PIM_CAND_RP_XPATH "/group-list", "frr-pim:pimd", "pim",
+ vrfname, "frr-routing:ipv4");
+
+ if (no)
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_DESTROY,
+ group_str);
+ else
+ nb_cli_enqueue_change(vty, cand_rp_xpath, NB_OP_CREATE,
+ group_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
DEFUN (ip_pim_ssm_prefix_list,
ip_pim_ssm_prefix_list_cmd,
"ip pim ssm prefix-list PREFIXLIST4_NAME",
@@ -6490,6 +6798,14 @@ void pim_cmd_init(void)
install_element(CONFIG_NODE, &no_ip_igmp_group_watermark_cmd);
install_element(VRF_NODE, &no_ip_igmp_group_watermark_cmd);
+ install_element(CONFIG_NODE, &ip_pim_candidate_bsr_cmd);
+ install_element(VRF_NODE, &ip_pim_candidate_bsr_cmd);
+
+ install_element(CONFIG_NODE, &ip_pim_candidate_rp_cmd);
+ install_element(VRF_NODE, &ip_pim_candidate_rp_cmd);
+ install_element(CONFIG_NODE, &ip_pim_candidate_rp_group_cmd);
+ install_element(VRF_NODE, &ip_pim_candidate_rp_group_cmd);
+
install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd);
@@ -6589,6 +6905,9 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_cand_rp_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsr_rpdb_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsr_groups_cmd);
install_element(VIEW_NODE, &show_ip_pim_statistics_cmd);
install_element(ENABLE_NODE, &clear_ip_mroute_count_cmd);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 5e50a09355cf..cdd4fbf8c94c 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -5293,6 +5293,12 @@ void pim_show_bsr(struct pim_instance *pim, struct vty *vty, bool uj)
case ACCEPT_PREFERRED:
strlcpy(bsr_state, "ACCEPT_PREFERRED", sizeof(bsr_state));
break;
+ case BSR_PENDING:
+ strlcpy(bsr_state, "BSR_PENDING", sizeof(bsr_state));
+ break;
+ case BSR_ELECTED:
+ strlcpy(bsr_state, "BSR_ELECTED", sizeof(bsr_state));
+ break;
default:
strlcpy(bsr_state, "", sizeof(bsr_state));
}
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index dcb6116012e5..79adc7ca8b72 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -1683,6 +1683,8 @@ static int pim_ifp_up(struct interface *ifp)
}
}
}
+
+ pim_cand_addrs_changed();
return 0;
}
@@ -1719,6 +1721,7 @@ static int pim_ifp_down(struct interface *ifp)
pim_ifstat_reset(ifp);
}
+ pim_cand_addrs_changed();
return 0;
}
diff --git a/pimd/pim_main.c b/pimd/pim_main.c
index 400db396c289..f88aca719eda 100644
--- a/pimd/pim_main.c
+++ b/pimd/pim_main.c
@@ -59,6 +59,7 @@ struct zebra_privs_t pimd_privs = {
.cap_num_p = array_size(_caps_p),
.cap_num_i = 0};
+/* clang-format off */
static const struct frr_yang_module_info *const pimd_yang_modules[] = {
&frr_filter_info,
&frr_interface_info,
@@ -67,10 +68,10 @@ static const struct frr_yang_module_info *const pimd_yang_modules[] = {
&frr_routing_info,
&frr_pim_info,
&frr_pim_rp_info,
+ &frr_pim_candidate_info,
&frr_gmp_info,
};
-/* clang-format off */
FRR_DAEMON_INFO(pimd, PIM,
.vty_port = PIMD_VTY_PORT,
.proghelp = "Implementation of the PIM routing protocol.",
diff --git a/pimd/pim_msg.h b/pimd/pim_msg.h
index 56923b7ec18f..1f916af88196 100644
--- a/pimd/pim_msg.h
+++ b/pimd/pim_msg.h
@@ -148,6 +148,7 @@ struct pim_encoded_source_ipv6 {
typedef struct pim_encoded_ipv4_unicast pim_encoded_unicast;
typedef struct pim_encoded_group_ipv4 pim_encoded_group;
typedef struct pim_encoded_source_ipv4 pim_encoded_source;
+#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV4
typedef struct ip ipv_hdr;
#define IPV_SRC(ip_hdr) ((ip_hdr))->ip_src
#define IPV_DST(ip_hdr) ((ip_hdr))->ip_dst
@@ -156,6 +157,7 @@ typedef struct ip ipv_hdr;
typedef struct pim_encoded_ipv6_unicast pim_encoded_unicast;
typedef struct pim_encoded_group_ipv6 pim_encoded_group;
typedef struct pim_encoded_source_ipv6 pim_encoded_source;
+#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV6
typedef struct ip6_hdr ipv_hdr;
#define IPV_SRC(ip_hdr) ((ip_hdr))->ip6_src
#define IPV_DST(ip_hdr) ((ip_hdr))->ip6_dst
diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c
index 339935f81a57..e08d88f882a9 100644
--- a/pimd/pim_nb.c
+++ b/pimd/pim_nb.c
@@ -358,6 +358,112 @@ const struct frr_yang_module_info frr_pim_rp_info = {
}
};
+const struct frr_yang_module_info frr_pim_candidate_info = {
+ .name = "frr-pim-candidate",
+ .nodes = {
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/bsr-priority",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_priority_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/address",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/interface",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/if-loopback",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/if-any",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy,
+ }
+ },
+
+ /* Candidate-RP */
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/rp-priority",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_priority_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/advertisement-interval",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_adv_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/group-list",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/address",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/interface",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/if-loopback",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/if-any",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = NULL,
+ },
+ }
+};
+
/* clang-format off */
const struct frr_yang_module_info frr_gmp_info = {
.name = "frr-gmp",
diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h
index 0321d076f0da..e98de83b8550 100644
--- a/pimd/pim_nb.h
+++ b/pimd/pim_nb.h
@@ -9,6 +9,7 @@
extern const struct frr_yang_module_info frr_pim_info;
extern const struct frr_yang_module_info frr_pim_rp_info;
+extern const struct frr_yang_module_info frr_pim_candidate_info;
extern const struct frr_yang_module_info frr_gmp_info;
/* frr-pim prototypes*/
@@ -152,6 +153,40 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy(
struct nb_cb_destroy_args *args);
+/* frr-cand-bsr */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_priority_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy(
+ struct nb_cb_destroy_args *args);
+
+/* frr-candidate */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_priority_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_adv_interval_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_destroy(
+ struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy(
+ struct nb_cb_destroy_args *args);
+
/* frr-gmp prototypes*/
int lib_interface_gmp_address_family_create(
struct nb_cb_create_args *args);
@@ -211,6 +246,16 @@ int routing_control_plane_protocols_name_validate(
"control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
"frr-pim:pim/address-family[address-family='%s']/" \
"frr-pim-rp:rp/static-rp/rp-list[rp-address='%s']"
+#define FRR_PIM_CAND_BSR_XPATH \
+ "/frr-routing:routing/control-plane-protocols/" \
+ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
+ "frr-pim:pim/address-family[address-family='%s']/" \
+ "frr-pim-candidate:candidate-bsr"
+#define FRR_PIM_CAND_RP_XPATH \
+ "/frr-routing:routing/control-plane-protocols/" \
+ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
+ "frr-pim:pim/address-family[address-family='%s']/" \
+ "frr-pim-candidate:candidate-rp"
#define FRR_GMP_INTERFACE_XPATH \
"./frr-gmp:gmp/address-family[address-family='%s']"
#define FRR_GMP_ENABLE_XPATH \
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 4f1a4a18524e..1e6eafc9a576 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -2495,6 +2495,391 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
return NB_OK;
}
+static void yang_addrsel(struct cand_addrsel *addrsel,
+ const struct lyd_node *node)
+{
+ memset(addrsel->cfg_ifname, 0, sizeof(addrsel->cfg_ifname));
+ addrsel->cfg_addr = PIMADDR_ANY;
+
+ if (yang_dnode_exists(node, "if-any")) {
+ addrsel->cfg_mode = CAND_ADDR_ANY;
+ } else if (yang_dnode_exists(node, "address")) {
+ addrsel->cfg_mode = CAND_ADDR_EXPLICIT;
+ yang_dnode_get_pimaddr(&addrsel->cfg_addr, node, "address");
+ } else if (yang_dnode_exists(node, "interface")) {
+ addrsel->cfg_mode = CAND_ADDR_IFACE;
+ strlcpy(addrsel->cfg_ifname,
+ yang_dnode_get_string(node, "interface"),
+ sizeof(addrsel->cfg_ifname));
+ } else if (yang_dnode_exists(node, "if-loopback")) {
+ addrsel->cfg_mode = CAND_ADDR_LO;
+ }
+}
+
+static int candidate_bsr_addrsel(struct bsm_scope *scope,
+ const struct lyd_node *cand_bsr_node)
+{
+ yang_addrsel(&scope->bsr_addrsel, cand_bsr_node);
+ pim_cand_bsr_apply(scope);
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->bsr_addrsel.cfg_enable = true;
+ scope->cand_bsr_prio = yang_dnode_get_uint8(args->dnode,
+ "bsr-priority");
+
+ candidate_bsr_addrsel(scope, args->dnode);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->bsr_addrsel.cfg_enable = false;
+
+ pim_cand_bsr_apply(scope);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_priority_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->cand_bsr_prio = yang_dnode_get_uint8(args->dnode, NULL);
+
+ CPP_NOTICE("FIXME: force prio update");
+ candidate_bsr_addrsel(scope, args->dnode);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+ const struct lyd_node *cand_bsr_node;
+
+ cand_bsr_node = yang_dnode_get_parent(args->dnode, "candidate-bsr");
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ return candidate_bsr_addrsel(scope, cand_bsr_node);
+ }
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+ const struct lyd_node *cand_bsr_node;
+
+ cand_bsr_node = yang_dnode_get_parent(args->dnode, "candidate-bsr");
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ return candidate_bsr_addrsel(scope, cand_bsr_node);
+ }
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ /* nothing to do here, we'll get a CREATE for something else */
+ return NB_OK;
+}
+
+static int candidate_rp_addrsel(struct bsm_scope *scope,
+ const struct lyd_node *cand_rp_node)
+{
+ yang_addrsel(&scope->cand_rp_addrsel, cand_rp_node);
+ pim_cand_rp_apply(scope);
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->cand_rp_addrsel.cfg_enable = true;
+ scope->cand_rp_prio = yang_dnode_get_uint8(args->dnode,
+ "rp-priority");
+ scope->cand_rp_interval =
+ yang_dnode_get_uint32(args->dnode,
+ "advertisement-interval");
+
+ candidate_rp_addrsel(scope, args->dnode);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->cand_rp_addrsel.cfg_enable = false;
+
+ pim_cand_rp_apply(scope);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_priority_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->cand_rp_prio = yang_dnode_get_uint8(args->dnode, NULL);
+
+ pim_cand_rp_trigger(scope);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_adv_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ scope->cand_rp_interval = yang_dnode_get_uint32(args->dnode,
+ NULL);
+
+ pim_cand_rp_trigger(scope);
+ break;
+ }
+
+ return NB_OK;
+}
+
+#if PIM_IPV == 4
+#define yang_dnode_get_pim_p yang_dnode_get_ipv4p
+#else
+#define yang_dnode_get_pim_p yang_dnode_get_ipv6p
+#endif
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+ prefix_pim p;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ yang_dnode_get_pim_p(&p, args->dnode, ".");
+ pim_cand_rp_grp_add(scope, &p);
+ break;
+ }
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+ prefix_pim p;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ yang_dnode_get_pim_p(&p, args->dnode, ".");
+ pim_cand_rp_grp_del(scope, &p);
+ break;
+ }
+ return NB_OK;
+}
+
+static int candidate_rp_addrsel_common(enum nb_event event,
+ const struct lyd_node *dnode)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ struct bsm_scope *scope;
+
+ dnode = lyd_parent(dnode);
+
+ switch (event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(dnode, NULL, true);
+ pim = vrf->info;
+ scope = &pim->global_scope;
+
+ candidate_rp_addrsel(scope, dnode);
+ break;
+ }
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create(
+ struct nb_cb_create_args *args)
+{
+ return candidate_rp_addrsel_common(args->event, args->dnode);
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify(
+ struct nb_cb_modify_args *args)
+{
+ return candidate_rp_addrsel_common(args->event, args->dnode);
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ /* nothing to do here - we'll get a create or modify event too */
+ return NB_OK;
+}
+
/*
* XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family
*/
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index 32cdf4bf8289..57dcff3b4710 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -161,18 +161,27 @@ void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
pnc->bsr_count++;
}
+bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr)
+{
+ struct pim_nexthop_cache *pnc;
+
+ pnc = pim_nht_get(pim, addr);
+
+ pnc->candrp_count++;
+ return CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID);
+}
+
static void pim_nht_drop_maybe(struct pim_instance *pim,
struct pim_nexthop_cache *pnc)
{
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u",
- __func__, &pnc->rpf.rpf_addr, pim->vrf->name,
- pnc->rp_list->count, pnc->upstream_hash->count,
- pnc->bsr_count);
+ zlog_debug("%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u Cand-RP count:%u",
+ __func__, &pnc->rpf.rpf_addr, pim->vrf->name,
+ pnc->rp_list->count, pnc->upstream_hash->count,
+ pnc->bsr_count, pnc->candrp_count);
- if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0
- && pnc->bsr_count == 0) {
+ if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 &&
+ pnc->bsr_count == 0 && pnc->candrp_count == 0) {
struct zclient *zclient = pim_zebra_zclient_get();
pim_sendmsg_zebra_rnh(pim, zclient, pnc,
@@ -258,6 +267,27 @@ void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)
pim_nht_drop_maybe(pim, pnc);
}
+void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr)
+{
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache lookup;
+
+ lookup.rpf.rpf_addr = addr;
+
+ pnc = hash_lookup(pim->rpf_hash, &lookup);
+
+ if (!pnc) {
+ zlog_warn("attempting to delete nonexistent NHT C-RP entry %pPA",
+ &addr);
+ return;
+ }
+
+ assertf(pnc->candrp_count > 0, "addr=%pPA", &addr);
+ pnc->candrp_count--;
+
+ pim_nht_drop_maybe(pim, pnc);
+}
+
bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct interface *src_ifp, pim_addr src_ip)
{
@@ -900,6 +930,9 @@ void pim_nexthop_update(struct vrf *vrf, struct prefix *match,
pim_update_rp_nh(pim, pnc);
if (pnc->upstream_hash->count)
pim_update_upstream_nh(pim, pnc);
+
+ if (pnc->candrp_count)
+ pim_crp_nht_update(pim, pnc);
}
int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index a1feb76e3b73..e74b375dc6d2 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -38,6 +38,7 @@ struct pim_nexthop_cache {
* same BSR
*/
uint32_t bsr_count;
+ uint32_t candrp_count;
};
struct pnc_hash_walk_data {
@@ -71,4 +72,10 @@ void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct interface *src_ifp, pim_addr src_ip);
void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp);
+
+/* wrappers for usage with Candidate RPs in BSMs */
+bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr);
+void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr);
+void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc);
+
#endif
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
index 1bc265b138f6..770cc92e91c1 100644
--- a/pimd/pim_pim.c
+++ b/pimd/pim_pim.c
@@ -139,7 +139,7 @@ static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, pim_addr addr)
}
int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
- pim_sgaddr sg)
+ pim_sgaddr sg, bool is_mcast)
{
struct iovec iov[2], *iovp = iov;
#if PIM_IPV == 4
@@ -274,6 +274,16 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
return -1;
}
+ if (!is_mcast) {
+ if (header->type == PIM_MSG_TYPE_CANDIDATE)
+ return pim_crp_process(ifp, &sg, pim_msg, pim_msg_len);
+
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "ignoring link traffic on BSR unicast socket");
+ return -1;
+ }
+
switch (header->type) {
case PIM_MSG_TYPE_HELLO:
return pim_hello_recv(ifp, sg.src, pim_msg + PIM_MSG_HEADER_LEN,
@@ -322,6 +332,13 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
return pim_bsm_process(ifp, &sg, pim_msg, pim_msg_len, no_fwd);
break;
+ case PIM_MSG_TYPE_CANDIDATE:
+ /* return pim_crp_process(ifp, &sg, pim_msg, pim_msg_len); */
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "ignoring Candidate-RP packet on multicast socket");
+ return 0;
+
default:
if (PIM_DEBUG_PIM_PACKETS) {
zlog_debug(
@@ -395,7 +412,7 @@ static void pim_sock_read(struct event *t)
sg.grp = ((struct sockaddr_in6 *)&to)->sin6_addr;
#endif
- int fail = pim_pim_packet(ifp, buf, len, sg);
+ int fail = pim_pim_packet(ifp, buf, len, sg, true);
if (fail) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug("%s: pim_pim_packet() return=%d",
@@ -636,17 +653,15 @@ static int pim_msg_send_frame(pim_addr src, pim_addr dst, ifindex_t ifindex,
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
int pim_msg_size, struct interface *ifp)
{
- struct pim_interface *pim_ifp;
-
+ if (ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
- pim_ifp = ifp->info;
-
- if (pim_ifp->pim_passive_enable) {
- if (PIM_DEBUG_PIM_PACKETS)
- zlog_debug(
- "skip sending PIM message on passive interface %s",
- ifp->name);
- return 0;
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("skip sending PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
}
#if PIM_IPV == 4
@@ -710,7 +725,7 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug("%s: to %pPA on %s: msg_size=%d checksum=%x",
- __func__, &dst, ifp->name, pim_msg_size,
+ __func__, &dst, ifp ? ifp->name : "*", pim_msg_size,
header->checksum);
if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {
@@ -718,7 +733,7 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
}
pim_msg_send_frame(fd, (char *)buffer, sendlen, (struct sockaddr *)&to,
- tolen, ifp->name);
+ tolen, ifp ? ifp->name : "*");
return 0;
#else
@@ -727,7 +742,7 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
iovector[0].iov_base = pim_msg;
iovector[0].iov_len = pim_msg_size;
- pim_msg_send_frame(src, dst, ifp->ifindex, &iovector[0], fd);
+ pim_msg_send_frame(src, dst, ifp ? ifp->ifindex : 0, &iovector[0], fd);
return 0;
#endif
diff --git a/pimd/pim_pim.h b/pimd/pim_pim.h
index 35e693013ac1..13ccbb81b317 100644
--- a/pimd/pim_pim.h
+++ b/pimd/pim_pim.h
@@ -42,7 +42,7 @@ void pim_hello_restart_now(struct interface *ifp);
void pim_hello_restart_triggered(struct interface *ifp);
int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
- pim_sgaddr sg);
+ pim_sgaddr sg, bool is_mcast);
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
int pim_msg_size, struct interface *ifp);
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index d8d25712a30f..b0fb8a509af2 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -543,6 +543,9 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
pim_zebra_update_all_interfaces(pim);
pim_rp_check_interfaces(pim, rp_all);
+ if (rp_all->i_am_rp && PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("new RP %pPA for %pFX is ourselves",
+ &rp_all->rp.rpf_addr, &rp_all->group);
pim_rp_refresh_group_to_rp_mapping(pim);
pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
NULL);
@@ -634,6 +637,9 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
pim_zebra_update_all_interfaces(pim);
pim_rp_check_interfaces(pim, rp_info);
+ if (rp_info->i_am_rp && PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("new RP %pPA for %pFX is ourselves",
+ &rp_info->rp.rpf_addr, &rp_info->group);
pim_rp_refresh_group_to_rp_mapping(pim);
/* Register addr with Zebra NHT */
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 0f6547ee2e9a..f04f34032fea 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -188,6 +188,7 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
}
writes += pim_rp_config_write(pim, vty, spaces);
+ writes += pim_cand_config_write(pim, vty, spaces);
if (pim->vrf->vrf_id == VRF_DEFAULT) {
if (router->register_suppress_time
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
index 04cd087e6a07..3374a5b3ed65 100644
--- a/pimd/pim_zebra.c
+++ b/pimd/pim_zebra.c
@@ -154,6 +154,8 @@ static int pim_zebra_if_address_add(ZAPI_CALLBACK_ARGS)
pim_if_addr_add_all(ifp);
}
}
+
+ pim_cand_addrs_changed();
return 0;
}
@@ -202,6 +204,8 @@ static int pim_zebra_if_address_del(ZAPI_CALLBACK_ARGS)
}
connected_free(&c);
+
+ pim_cand_addrs_changed();
return 0;
}
diff --git a/pimd/subdir.am b/pimd/subdir.am
index 1e787a3525c6..48f1e3b7243d 100644
--- a/pimd/subdir.am
+++ b/pimd/subdir.am
@@ -17,6 +17,7 @@ pim_common = \
pimd/pim_assert.c \
pimd/pim_bfd.c \
pimd/pim_bsm.c \
+ pimd/pim_bsr_rpdb.c \
pimd/pim_cmd_common.c \
pimd/pim_errors.c \
pimd/pim_hello.c \
@@ -76,6 +77,7 @@ pimd_pimd_SOURCES = \
nodist_pimd_pimd_SOURCES = \
yang/frr-pim.yang.c \
yang/frr-pim-rp.yang.c \
+ yang/frr-pim-candidate.yang.c \
yang/frr-gmp.yang.c \
# end
@@ -89,6 +91,7 @@ pimd_pim6d_SOURCES = \
nodist_pimd_pim6d_SOURCES = \
yang/frr-pim.yang.c \
yang/frr-pim-rp.yang.c \
+ yang/frr-pim-candidate.yang.c \
yang/frr-gmp.yang.c \
# end
@@ -160,12 +163,12 @@ clippy_scan += \
# end
pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
-pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP)
+pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP) -lm
if PIM6D
sbin_PROGRAMS += pimd/pim6d
pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6
-pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP)
+pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP) -lm
endif
pimd_test_igmpv3_join_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
diff --git a/yang/frr-pim-candidate.yang b/yang/frr-pim-candidate.yang
new file mode 100644
index 000000000000..09d0a0635368
--- /dev/null
+++ b/yang/frr-pim-candidate.yang
@@ -0,0 +1,174 @@
+module frr-pim-candidate {
+ yang-version "1.1";
+ namespace "http://frrouting.org/yang/pim-candidate";
+
+ prefix frr-pim-candidate;
+
+ import frr-interface {
+ prefix frr-interface;
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import frr-routing {
+ prefix "frr-rt";
+ }
+
+ import frr-pim {
+ prefix "frr-pim";
+ }
+
+ import frr-route-types {
+ prefix frr-route-types;
+ }
+
+ organization
+ "FRRouting";
+
+ contact
+ "FRR Users List:
+ FRR Development List: ";
+
+ description
+ "The module defines a collection of YANG definitions common for
+ all PIM (Protocol Independent Multicast) Candidate RP & BSR
+ (Rendezvous Point & Bootstrap Router) operation.
+
+ Copyright 2020 FRRouting
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
+
+ revision 2021-05-04 {
+ description
+ "Initial revision.";
+ reference
+ "TBD";
+ }
+
+ /*
+ * Groupings
+ */
+ grouping candidate-bsr-container {
+ description
+ "Grouping of Candidate BSR settings.";
+
+ container candidate-bsr {
+ presence
+ "Enable router to be a Candidate BSR.";
+
+ description
+ "Candidate BSR settings";
+
+ leaf bsr-priority {
+ type uint8;
+ default "64";
+ description
+ "BSR priority for this router, higher values win.";
+ }
+
+ choice source-address-or-interface {
+ description "IP address to use for BSR operation";
+ default if-loopback;
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf interface {
+ type frr-interface:interface-ref;
+ }
+ leaf if-loopback {
+ type empty;
+ }
+ leaf if-any {
+ type empty;
+ }
+ }
+ } // candidate-bsr
+ } // candidate-bsr-container
+
+ grouping candidate-rp-container {
+ description
+ "Grouping of Candidate RP settings.";
+
+ container candidate-rp {
+ presence
+ "Enable router to be a Candidate RP.";
+
+ description
+ "Candidate RP settings";
+
+ leaf rp-priority {
+ type uint8;
+ default "192";
+ description
+ "RP priority for this router, lower values win.";
+ }
+
+ leaf advertisement-interval {
+ type uint32 {
+ range 1..4294967295;
+ }
+ default "60";
+ description
+ "RP advertisement interval (seconds). Holdtime is 2.5 times this.";
+ }
+
+ leaf-list group-list {
+ type frr-route-types:ip-multicast-group-prefix;
+ description
+ "List of multicast group address.";
+ }
+
+ choice source-address-or-interface {
+ description "IP address to use for RP operation";
+ default if-loopback;
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf interface {
+ type frr-interface:interface-ref;
+ }
+ leaf if-loopback {
+ type empty;
+ }
+ leaf if-any {
+ type empty;
+ }
+ }
+ }
+ }
+
+ /*
+ * Configuration data nodes
+ */
+ augment "/frr-rt:routing/frr-rt:control-plane-protocols/"
+ + "frr-rt:control-plane-protocol/frr-pim:pim/"
+ + "frr-pim:address-family" {
+ description "PIM Candidate RP augmentation.";
+
+ uses candidate-bsr-container;
+ uses candidate-rp-container;
+ }
+}
diff --git a/yang/subdir.am b/yang/subdir.am
index 71aa04087858..786bd0bca654 100644
--- a/yang/subdir.am
+++ b/yang/subdir.am
@@ -80,6 +80,7 @@ if PIMD
dist_yangmodels_DATA += yang/frr-gmp.yang
dist_yangmodels_DATA += yang/frr-pim.yang
dist_yangmodels_DATA += yang/frr-pim-rp.yang
+dist_yangmodels_DATA += yang/frr-pim-candidate.yang
endif
if BGPD