* depends on context (e.g. some daemons might augment "frr-interface"
* while others don't).
*/
- if (!valid && callback_implemented && operation != NB_CB_GET_NEXT
- && operation != NB_CB_GET_KEYS && operation != NB_CB_LOOKUP_ENTRY)
+ if (!valid && callback_implemented && operation != NB_CB_GET_NEXT &&
+ operation != NB_CB_GET_KEYS && operation != NB_CB_LIST_ENTRY_DONE &&
+ operation != NB_CB_LOOKUP_ENTRY)
flog_warn(EC_LIB_NB_CB_UNNEEDED,
"unneeded '%s' callback for '%s'",
nb_cb_operation_name(operation), nb_node->xpath);
state_optional);
error += nb_node_validate_cb(nb_node, NB_CB_GET_KEYS, !!nb_node->cbs.get_keys,
state_optional);
+ error += nb_node_validate_cb(nb_node, NB_CB_LIST_ENTRY_DONE, !!nb_node->cbs.list_entry_done,
+ true);
error += nb_node_validate_cb(nb_node, NB_CB_LOOKUP_ENTRY, !!nb_node->cbs.lookup_entry,
state_optional);
error += nb_node_validate_cb(nb_node, NB_CB_RPC, !!nb_node->cbs.rpc,
return nb_node->cbs.get_keys(&args);
}
+void nb_callback_list_entry_done(const struct nb_node *nb_node, const void *parent_list_entry,
+ const void *list_entry)
+{
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CFG_CBS) || !nb_node->cbs.list_entry_done)
+ return;
+
+ DEBUGD(&nb_dbg_cbs_state,
+ "northbound callback (list_entry_done): node [%s] parent_list_entry [%p] list_entry [%p]",
+ nb_node->xpath, parent_list_entry, list_entry);
+
+ nb_node->cbs.list_entry_done(parent_list_entry, list_entry);
+}
+
const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
const void *parent_list_entry,
const struct yang_list_keys *keys)
case NB_CB_GET_ELEM:
case NB_CB_GET_NEXT:
case NB_CB_GET_KEYS:
+ case NB_CB_LIST_ENTRY_DONE:
case NB_CB_LOOKUP_ENTRY:
case NB_CB_RPC:
case NB_CB_NOTIFY:
}
return true;
case NB_CB_GET_KEYS:
+ case NB_CB_LIST_ENTRY_DONE:
case NB_CB_LOOKUP_ENTRY:
switch (snode->nodetype) {
case LYS_LIST:
return "get_next";
case NB_CB_GET_KEYS:
return "get_keys";
+ case NB_CB_LIST_ENTRY_DONE:
+ return "list_entry_done";
case NB_CB_LOOKUP_ENTRY:
return "lookup_entry";
case NB_CB_RPC:
NB_CB_GET_ELEM,
NB_CB_GET_NEXT,
NB_CB_GET_KEYS,
+ NB_CB_LIST_ENTRY_DONE,
NB_CB_LOOKUP_ENTRY,
NB_CB_RPC,
NB_CB_NOTIFY,
*/
int (*get_keys)(struct nb_cb_get_keys_args *args);
+ /*
+ * Operational data callback for YANG lists.
+ *
+ * This callback function is called to cleanup any resources that may be
+ * held by a backend opaque `list_entry` value (e.g., a lock). It is
+ * called when the northbound code is done using a `list_entry` value it
+ * obtained using the lookup_entry() callback. It is also called on the
+ * `list_entry` returned from the get_next() or lookup_next() callbacks
+ * if the iteration aborts before walking to the end of the list. The
+ * intention is to allow any resources (e.g., a lock) to now be
+ * released.
+ *
+ * args
+ * parent_list_entry - pointer to the parent list entry
+ * list_entry - value returned previously from `lookup_entry()`
+ */
+ void (*list_entry_done)(const void *parent_list_entry, const void *list_entry);
+
/*
* Operational data callback for YANG lists.
*
extern const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
const void *parent_list_entry,
const struct yang_list_keys *keys);
+extern void nb_callback_list_entry_done(const struct nb_node *nb_node,
+ const void *parent_list_entry, const void *list_entry);
extern const void *nb_callback_lookup_node_entry(struct lyd_node *node,
const void *parent_list_entry);
extern const void *nb_callback_lookup_next(const struct nb_node *nb_node,
static const void *nb_op_list_lookup_entry(struct nb_op_yield_state *ys, struct nb_node *nb_node,
const struct nb_op_node_info *pni, struct lyd_node *node,
const struct yang_list_keys *keys);
+static void nb_op_list_list_entry_done(struct nb_op_yield_state *ys, struct nb_node *nb_node,
+ const struct nb_op_node_info *pni, const void *list_entry);
+static void ys_pop_inner(struct nb_op_yield_state *ys);
/* -------------------- */
/* Function Definitions */
darr_free(ys->non_specific_predicate);
darr_free(ys->query_tokstr);
darr_free(ys->schema_path);
+ /* need to cleanup resources, so pop these individually */
+ while (darr_len(ys->node_infos))
+ ys_pop_inner(ys);
darr_free(ys->node_infos);
darr_free(ys->xpath_orig);
darr_free(ys->xpath);
static void ys_pop_inner(struct nb_op_yield_state *ys)
{
- uint len = darr_len(ys->node_infos);
+ struct nb_op_node_info *ni, *pni;
+ struct nb_node *nb_node;
+ int i = darr_lasti(ys->node_infos);
- assert(len);
- darr_setlen(ys->node_infos, len - 1);
+ pni = i > 0 ? &ys->node_infos[i - 1] : NULL;
+ ni = &ys->node_infos[i];
+
+ /* list_entry's propagate so only free the first occurance */
+ if (ni->list_entry && (!pni || pni->list_entry != ni->list_entry)) {
+ nb_node = ni->schema ? ni->schema->priv : NULL;
+ if (nb_node)
+ nb_op_list_list_entry_done(ys, nb_node, pni, ni->list_entry);
+ }
+ darr_setlen(ys->node_infos, i);
ys_trim_xpath(ys);
}
return 0;
}
+static void nb_op_list_list_entry_done(struct nb_op_yield_state *ys, struct nb_node *nb_node,
+ const struct nb_op_node_info *pni, const void *list_entry)
+{
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_HAS_GET_TREE))
+ return;
+
+ nb_callback_list_entry_done(nb_node, pni ? pni->list_entry : NULL, list_entry);
+}
/**
* nb_op_add_leaf() - Add leaf data to the get tree results
*
* If the schema path (original query) is longer than our current node
* info stack (current xpath location), we are building back up to the
- * base of the user query, return the next schema node from the query
- * string (schema_path).
+ * base of the walk at the end of the user query path, return the next
+ * schema node from the query string (schema_path).
*/
if (last != NULL)
assert(last->schema == parent);
*/
assert(!list_start);
is_specific_node = true;
+
+ /*
+ * Release the entry back to the daemon
+ */
+ assert(ni->list_entry == list_entry);
+ nb_op_list_list_entry_done(ys, nn, pni, list_entry);
+ ni->list_entry = NULL;
+
+ /*
+ * Continue on as we may reap the resulting node
+ * if empty.
+ */
list_entry = NULL;
}
list_entry = nb_op_list_get_next(ys, nn, pni, list_entry);
}
+ /*
+ * The walk API is that get/lookup_next returns NULL
+ * when done, those callbacks are also is responsible
+ * for releasing any state associated with previous
+ * list_entry's (e.g., any locks) during the iteration.
+ * Therefore we need to zero out the last top level
+ * list_entry so we don't mistakenly call the
+ * list_entry_done() callback on it.
+ */
+ if (!is_specific_node && !list_start && !list_entry)
+ ni->list_entry = NULL;
+
/*
* (FN:A) Reap empty list element? Check to see if we
* should reap an empty list element. We do this if the
* have no non-key children, check for this condition
* and do not reap if true.
*/
- if (!list_start && ni->inner &&
- !lyd_child_no_keys(ni->inner) &&
+ if (!list_start && ni->inner && !lyd_child_no_keys(ni->inner) &&
/* not the top element with a key match */
- !((darr_ilen(ys->node_infos) ==
- darr_ilen(ys->schema_path) - 1) &&
+ !((darr_ilen(ys->node_infos) == darr_ilen(ys->schema_path) - 1) &&
lysc_is_key((*darr_last(ys->schema_path)))) &&
- /* is this at or below the base? */
- darr_ilen(ys->node_infos) <= ys->query_base_level)
+ /* is this list entry below the query base? */
+ darr_ilen(ys->node_infos) - 1 < ys->query_base_level)
ys_free_inner(ys, ni);
-
if (!list_entry) {
/*
* List Iteration Done
ni->xpath_len = len;
}
+ /* Save the new list_entry early so it can be cleaned up on error */
+ ni->list_entry = list_entry;
+ ni->schema = sib;
+
/* Need to get keys. */
if (!CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)) {
ret = nb_op_list_get_keys(ys, nn, list_entry, &ni->keys);
if (ret) {
- darr_pop(ys->node_infos);
ret = NB_ERR_RESOURCE;
goto done;
}
.inner,
sib, &ni->keys, &node);
if (err) {
- darr_pop(ys->node_infos);
ret = NB_ERR_RESOURCE;
goto done;
}
* Save the new list entry with the list node info
*/
ni->inner = node;
- ni->schema = node->schema;
- ni->list_entry = list_entry;
+ assert(ni->schema == node->schema);
ni->niters += 1;
ni->nents += 1;