Replace use of the deprecated macro with direct function call.
Signed-off-by: Mark Stapp <mjs@cisco.com>
static void _bfd_sess_remove(struct bfd_session_params *bsp)
{
/* Cancel any pending installation request. */
- EVENT_OFF(bsp->installev);
+ event_cancel(&bsp->installev);
/* Not installed, nothing to do. */
if (!bsp->installed)
bsp->installed = false;
/* Cancel any pending installation request. */
- EVENT_OFF(bsp->installev);
+ event_cancel(&bsp->installev);
/* Ask for installation. */
bsp->lastev = BSE_INSTALL;
* update state
*/
if (ldp_sync_info && ldp_sync_info->enabled == LDP_IGP_SYNC_ENABLED) {
- EVENT_OFF(ldp_sync_info->t_holddown);
+ event_cancel(&ldp_sync_info->t_holddown);
if (ldp_sync_info->state == LDP_IGP_SYNC_STATE_REQUIRED_UP)
ldp_sync_info->state =
conn->fd = -1;
}
- EVENT_OFF(conn->read_ev);
- EVENT_OFF(conn->write_ev);
- EVENT_OFF(conn->proc_msg_ev);
+ event_cancel(&conn->read_ev);
+ event_cancel(&conn->write_ev);
+ event_cancel(&conn->proc_msg_ev);
mgmt_msg_destroy(ms);
}
{
assert(client->conn.is_client);
- EVENT_OFF(client->conn_retry_tmr);
+ event_cancel(&client->conn_retry_tmr);
free(client->sopath);
msg_conn_cleanup(&client->conn);
DEBUGD(server->debug, "Closing %s server", server->idtag);
if (server->listen_ev)
- EVENT_OFF(server->listen_ev);
+ event_cancel(&server->listen_ev);
msg_server_list_del(&msg_servers, server);
__dbg("terminating: timer: %p timer arg: %p walk %p", nb_notif_timer, args, nb_notif_walk);
- EVENT_OFF(nb_notif_timer);
+ event_cancel(&nb_notif_timer);
if (nb_notif_walk) {
/* Grab walk args from walk if active. */
if (ys) {
if (ys->user_tree && ys->user_tree_unlock)
ys->user_tree_unlock(ys->user_tree, ys->user_tree_lock);
- EVENT_OFF(ys->walk_ev);
+ event_cancel(&ys->walk_ev);
nb_op_walks_del(&nb_op_walks, ys);
/* if we have a branch then free up it's libyang tree */
if (!nofree_tree && ys_root_node(ys))
if (!module->sr_subscription)
continue;
sr_unsubscribe(module->sr_subscription);
- EVENT_OFF(module->sr_thread);
+ event_cancel(&module->sr_thread);
}
if (session)
void pullwr_del(struct pullwr *pullwr)
{
- EVENT_OFF(pullwr->writer);
+ event_cancel(&pullwr->writer);
XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
XFREE(MTYPE_PULLWR_HEAD, pullwr);
event_add_read(r->master, resolver_cb_socket_readable, resfd, resfd->fd,
&resfd->t_read);
/* ^ ordering important:
- * ares_process_fd may transitively call EVENT_OFF(resfd->t_read)
+ * ares_process_fd may transitively call event_cancel(&resfd->t_read)
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
*/
ares_process_fd(r->channel, resfd->fd, ARES_SOCKET_BAD);
event_add_write(r->master, resolver_cb_socket_writable, resfd,
resfd->fd, &resfd->t_write);
/* ^ ordering important:
- * ares_process_fd may transitively call EVENT_OFF(resfd->t_write)
+ * ares_process_fd may transitively call event_cancel(&resfd->t_write)
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
*/
ares_process_fd(r->channel, ARES_SOCKET_BAD, resfd->fd);
{
struct timeval *tv, tvbuf;
- EVENT_OFF(r->timeout);
+ event_cancel(&r->timeout);
tv = ares_timeout(r->channel, NULL, &tvbuf);
if (tv) {
unsigned int timeoutms = tv->tv_sec * 1000 + tv->tv_usec / 1000;
assert(resfd->state == r);
if (!readable)
- EVENT_OFF(resfd->t_read);
+ event_cancel(&resfd->t_read);
else if (!resfd->t_read)
event_add_read(r->master, resolver_cb_socket_readable, resfd,
fd, &resfd->t_read);
if (!writable)
- EVENT_OFF(resfd->t_write);
+ event_cancel(&resfd->t_write);
else if (!resfd->t_write)
event_add_write(r->master, resolver_cb_socket_writable, resfd,
fd, &resfd->t_write);
{
struct spf_backoff *backoff = EVENT_ARG(thread);
- EVENT_OFF(backoff->t_timetolearn);
+ event_cancel(&backoff->t_timetolearn);
timerclear(&backoff->first_event_time);
backoff->state = SPF_BACKOFF_QUIET;
backoff_debug("SPF Back-off(%s) HOLDDOWN elapsed, move to state %s",
/* Tempolary disable read thread. */
if (vty->lines == 0)
- EVENT_OFF(vty->t_read);
+ event_cancel(&vty->t_read);
/* Function execution continue. */
erase = ((vty->status == VTY_MORE || vty->status == VTY_MORELINE));
if (!stdio_vty)
return;
- EVENT_OFF(stdio_vty->t_write);
- EVENT_OFF(stdio_vty->t_read);
- EVENT_OFF(stdio_vty->t_timeout);
+ event_cancel(&stdio_vty->t_write);
+ event_cancel(&stdio_vty->t_read);
+ event_cancel(&stdio_vty->t_timeout);
if (stdio_termios)
tcsetattr(0, TCSANOW, &stdio_orig_termios);
struct vty_serv *vtyserv;
while ((vtyserv = vtyservs_pop(vty_servs))) {
- EVENT_OFF(vtyserv->t_accept);
+ event_cancel(&vtyserv->t_accept);
close(vtyserv->sock);
XFREE(MTYPE_VTY_SERV, vtyserv);
}
}
/* Cancel threads.*/
- EVENT_OFF(vty->t_read);
- EVENT_OFF(vty->t_write);
- EVENT_OFF(vty->t_timeout);
+ event_cancel(&vty->t_read);
+ event_cancel(&vty->t_write);
+ event_cancel(&vty->t_timeout);
if (vty->pass_fd != -1) {
close(vty->pass_fd);
/* Time out treatment. */
if (vty->v_timeout) {
- EVENT_OFF(vty->t_timeout);
+ event_cancel(&vty->t_timeout);
event_add_timer(vty_master, vty_timeout, vty,
vty->v_timeout, &vty->t_timeout);
}
&vty->t_write);
break;
case VTY_TIMEOUT_RESET:
- EVENT_OFF(vty->t_timeout);
+ event_cancel(&vty->t_timeout);
if (vty->v_timeout)
event_add_timer(vty_master, vty_timeout, vty,
vty->v_timeout, &vty->t_timeout);
list_delete(&wheel->wheel_slot_lists[i]);
}
- EVENT_OFF(wheel->timer);
+ event_cancel(&wheel->timer);
XFREE(MTYPE_TIMER_WHEEL_LIST, wheel->wheel_slot_lists);
XFREE(MTYPE_TIMER_WHEEL, wheel);
}
{
struct work_queue *wq = *wqp;
- EVENT_OFF(wq->thread);
+ event_cancel(&wq->thread);
while (!work_queue_empty(wq)) {
struct work_queue_item *item = work_queue_last_item(wq);
*/
void work_queue_plug(struct work_queue *wq)
{
- EVENT_OFF(wq->thread);
+ event_cancel(&wq->thread);
UNSET_FLAG(wq->flags, WQ_UNPLUGGED);
}
zlog_debug("zclient %p stopped", zclient);
/* Stop threads. */
- EVENT_OFF(zclient->t_read);
- EVENT_OFF(zclient->t_connect);
- EVENT_OFF(zclient->t_write);
+ event_cancel(&zclient->t_read);
+ event_cancel(&zclient->t_connect);
+ event_cancel(&zclient->t_write);
/* Reset streams. */
stream_reset(zclient->ibuf);
__func__, zclient->sock);
return zclient_failed(zclient);
case BUFFER_EMPTY:
- EVENT_OFF(zclient->t_write);
+ event_cancel(&zclient->t_write);
return ZCLIENT_SEND_SUCCESS;
case BUFFER_PENDING:
event_add_write(zclient->master, zclient_flush_data, zclient,
if (!--a->refcount) {
mgmt_be_adapters_del(&mgmt_be_adapters, a);
- EVENT_OFF(a->conn_init_ev);
+ event_cancel(&a->conn_init_ev);
msg_server_conn_delete(a->conn);
XFREE(MTYPE_MGMTD_BE_ADPATER, a);
}
static void quit(int exit_code)
{
- EVENT_OFF(event_timeout);
+ event_cancel(&event_timeout);
darr_free(__client_cbs.notif_xpaths);
darr_free(__client_cbs.rpc_xpaths);
if (success) {
/* Stop the commit-timeout timer */
/* XXX why only on success? */
- EVENT_OFF(txn->comm_cfg_timeout);
+ event_cancel(&txn->comm_cfg_timeout);
create_cmt_info_rec =
(result != MGMTD_NO_CFG_CHANGES &&
int ret = NB_OK;
/* cancel timer and send reply onward */
- EVENT_OFF(txn->get_tree_timeout);
+ event_cancel(&txn->get_tree_timeout);
if (!get_tree->simple_xpath && get_tree->client_results) {
/*
uint64_t req_id = txn_req->req_id;
/* cancel timer and send reply onward */
- EVENT_OFF(txn->rpc_timeout);
+ event_cancel(&txn->rpc_timeout);
if (rpc->errstr)
mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -EINVAL,
* cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
* more details.
*/
- EVENT_OFF(txn->comm_cfg_timeout);
+ event_cancel(&txn->comm_cfg_timeout);
mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
break;
case MGMTD_COMMIT_PHASE_MAX:
if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
if (mgmt_txn_mm->cfg_txn == *txn)
mgmt_txn_mm->cfg_txn = NULL;
- EVENT_OFF((*txn)->proc_get_cfg);
- EVENT_OFF((*txn)->proc_get_data);
- EVENT_OFF((*txn)->proc_comm_cfg);
- EVENT_OFF((*txn)->comm_cfg_timeout);
- EVENT_OFF((*txn)->get_tree_timeout);
+ event_cancel(&(*txn)->proc_get_cfg);
+ event_cancel(&(*txn)->proc_get_data);
+ event_cancel(&(*txn)->proc_comm_cfg);
+ event_cancel(&(*txn)->comm_cfg_timeout);
+ event_cancel(&(*txn)->get_tree_timeout);
if (!in_hash_free)
hash_release(mgmt_txn_mm->txn_hash, *txn);
nhrp_peer_notify_del(c->cur.peer, &c->peer_notifier);
nhrp_peer_unref(c->cur.peer);
nhrp_peer_unref(c->new.peer);
- EVENT_OFF(c->t_timeout);
- EVENT_OFF(c->t_auth);
+ event_cancel(&c->t_timeout);
+ event_cancel(&c->t_auth);
XFREE(MTYPE_NHRP_CACHE, c);
}
static void nhrp_cache_reset_new(struct nhrp_cache *c)
{
- EVENT_OFF(c->t_auth);
+ event_cancel(&c->t_auth);
if (notifier_list_anywhere(&c->newpeer_notifier))
nhrp_peer_notify_del(c->new.peer, &c->newpeer_notifier);
nhrp_peer_unref(c->new.peer);
static void nhrp_cache_update_timers(struct nhrp_cache *c)
{
- EVENT_OFF(c->t_timeout);
+ event_cancel(&c->t_timeout);
switch (c->cur.type) {
case NHRP_CACHE_INVALID:
static void evmgr_connection_error(struct event_manager *evmgr)
{
- EVENT_OFF(evmgr->t_read);
- EVENT_OFF(evmgr->t_write);
+ event_cancel(&evmgr->t_read);
+ event_cancel(&evmgr->t_write);
zbuf_reset(&evmgr->ibuf);
zbufq_reset(&evmgr->obuf);
void netlink_mcast_set_nflog_group(int nlgroup)
{
if (netlink_mcast_log_fd >= 0) {
- EVENT_OFF(netlink_mcast_log_thread);
+ event_cancel(&netlink_mcast_log_thread);
close(netlink_mcast_log_fd);
netlink_mcast_log_fd = -1;
debugf(NHRP_DEBUG_COMMON, "De-register nflog group");
/* Success - schedule next registration, and route NHS */
r->timeout = 2;
holdtime = nifp->afi[nhs->afi].holdtime;
- EVENT_OFF(r->t_register);
+ event_cancel(&r->t_register);
/* RFC 2332 5.2.3 - Registration is recommend to be renewed
* every one third of holdtime */
case NOTIFY_PEER_MTU_CHANGED:
debugf(NHRP_DEBUG_COMMON, "NHS: Flush timer for %pSU",
&r->peer->vc->remote.nbma);
- EVENT_OFF(r->t_register);
+ event_cancel(&r->t_register);
event_add_timer_msec(master, nhrp_reg_send_req, r, 10,
&r->t_register);
break;
nhrp_peer_notify_del(r->peer, &r->peer_notifier);
nhrp_peer_unref(r->peer);
nhrp_reglist_del(&r->nhs->reglist_head, r);
- EVENT_OFF(r->t_register);
+ event_cancel(&r->t_register);
XFREE(MTYPE_NHRP_REGISTRATION, r);
}
frr_each_safe (nhrp_reglist, &nhs->reglist_head, r)
nhrp_reg_delete(r);
- EVENT_OFF(nhs->t_resolve);
+ event_cancel(&nhs->t_resolve);
nhrp_nhslist_del(&nifp->afi[afi].nhslist_head, nhs);
free((void *)nhs->nbma_fqdn);
XFREE(MTYPE_NHRP_NHS, nhs);
debugf(NHRP_DEBUG_COMMON, "Deleting peer ref:%d remote:%pSU local:%pSU",
p->ref, &p->vc->remote.nbma, &p->vc->local.nbma);
- EVENT_OFF(p->t_fallback);
- EVENT_OFF(p->t_timer);
+ event_cancel(&p->t_fallback);
+ event_cancel(&p->t_timer);
if (nifp->peer_hash)
hash_release(nifp->peer_hash, p);
nhrp_interface_notify_del(p->ifp, &p->ifp_notifier);
online = nifp->enabled && (!nifp->ipsec_profile || vc->ipsec);
if (p->online != online) {
- EVENT_OFF(p->t_fallback);
+ event_cancel(&p->t_fallback);
if (online && notifier_active(&p->notifier_list)) {
/* If we requested the IPsec connection, delay
* the up notification a bit to allow things
struct interface *ifp = p->ifp;
struct nhrp_interface *nifp = ifp->info;
- EVENT_OFF(p->t_timer);
+ event_cancel(&p->t_timer);
if (p->online) {
debugf(NHRP_DEBUG_COMMON,
s->route_installed = 0;
}
- EVENT_OFF(s->t_shortcut_purge);
+ event_cancel(&s->t_shortcut_purge);
if (holding_time) {
s->expiring = 0;
s->holding_time = holding_time;
struct route_node *rn;
afi_t afi = family2afi(PREFIX_FAMILY(s->p));
- EVENT_OFF(s->t_shortcut_purge);
- EVENT_OFF(s->t_retry_resolution);
+ event_cancel(&s->t_shortcut_purge);
+ event_cancel(&s->t_retry_resolution);
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
debugf(NHRP_DEBUG_ROUTE, "Shortcut %pFX purged", s->p);
{
struct nhrp_shortcut *s = EVENT_ARG(t);
s->t_shortcut_purge = NULL;
- EVENT_OFF(s->t_retry_resolution);
+ event_cancel(&s->t_retry_resolution);
nhrp_shortcut_delete(s, NULL);
}
int holding_time = pp->if_ad->holdtime;
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
- EVENT_OFF(s->t_shortcut_purge);
- EVENT_OFF(s->t_retry_resolution);
+ event_cancel(&s->t_shortcut_purge);
+ event_cancel(&s->t_retry_resolution);
event_add_timer(master, nhrp_shortcut_do_purge, s, 1,
&s->t_shortcut_purge);
s = nhrp_shortcut_get(&p);
if (s && s->type != NHRP_CACHE_INCOMPLETE) {
s->addr = *addr;
- EVENT_OFF(s->t_shortcut_purge);
- EVENT_OFF(s->t_retry_resolution);
+ event_cancel(&s->t_shortcut_purge);
+ event_cancel(&s->t_retry_resolution);
event_add_timer(master, nhrp_shortcut_do_purge, s,
NHRPD_DEFAULT_PURGE_TIME, &s->t_shortcut_purge);
{
struct nhrp_shortcut *s = EVENT_ARG(t);
- EVENT_OFF(s->t_retry_resolution);
+ event_cancel(&s->t_retry_resolution);
debugf(NHRP_DEBUG_COMMON, "Shortcut: Retrying Resolution Request");
nhrp_shortcut_send_resolution_req(s, true);
}
void nhrp_shortcut_purge(struct nhrp_shortcut *s, int force)
{
- EVENT_OFF(s->t_shortcut_purge);
- EVENT_OFF(s->t_retry_resolution);
+ event_cancel(&s->t_shortcut_purge);
+ event_cancel(&s->t_retry_resolution);
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
if (force) {
{
nhrp_vc_reset();
- EVENT_OFF(vici->t_read);
- EVENT_OFF(vici->t_write);
+ event_cancel(&vici->t_read);
+ event_cancel(&vici->t_write);
zbuf_reset(&vici->ibuf);
zbufq_reset(&vici->obuf);