Module: sip-router Branch: tmp/tm_async_reply_support Commit: a426434e80aca76bf32f54c2659e5a5be73bb78e URL: http://git.sip-router.org/cgi-bin/gitweb.cgi/sip-router/?a=commit;h=a426434e...
Author: Jason Penton jason.penton@smilecoms.com Committer: Jason Penton jason.penton@smilecoms.com Date: Thu Jul 11 17:48:18 2013 +0200
modules/tm: added dedicated lock for async continue, instead of using reply lock
---
modules/tm/h_table.c | 1 + modules/tm/h_table.h | 3 +++ modules/tm/lock.c | 35 ++++++++++++++++++++++++++++++++++- modules/tm/lock.h | 1 + modules/tm/t_reply.c | 42 ------------------------------------------ modules/tm/t_reply.h | 2 ++ modules/tm/t_suspend.c | 4 ++-- 7 files changed, 43 insertions(+), 45 deletions(-)
diff --git a/modules/tm/h_table.c b/modules/tm/h_table.c index 92e8ba8..0f24d96 100644 --- a/modules/tm/h_table.c +++ b/modules/tm/h_table.c @@ -380,6 +380,7 @@ struct cell* build_cell( struct sip_msg* p_msg )
init_synonym_id(new_cell); init_cell_lock( new_cell ); + init_async_lock( new_cell ); t_stats_created(); return new_cell;
diff --git a/modules/tm/h_table.h b/modules/tm/h_table.h index 966f374..e4f2fa1 100644 --- a/modules/tm/h_table.h +++ b/modules/tm/h_table.h @@ -444,6 +444,9 @@ typedef struct cell
/* protection against concurrent reply processing */ ser_lock_t reply_mutex; + + /* protect against concurrent async continues */ + ser_lock_t async_mutex; ticks_t fr_timeout; /* final response interval for retr_bufs */ ticks_t fr_inv_timeout; /* final inv. response interval for retr_bufs */ diff --git a/modules/tm/lock.c b/modules/tm/lock.c index 461f58e..224e64b 100644 --- a/modules/tm/lock.c +++ b/modules/tm/lock.c @@ -71,6 +71,7 @@ static int sem_nr; gen_lock_set_t* entry_semaphore=0; gen_lock_set_t* reply_semaphore=0; +gen_lock_set_t* async_semaphore=0; #endif
@@ -100,6 +101,10 @@ again: lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); } + if (async_semaphore!=0){ + lock_set_destroy(async_semaphore); + lock_set_dealloc(async_semaphore); + } if (i==0){ LOG(L_CRIT, "lock_initialize: could not allocate semaphore" @@ -154,6 +159,20 @@ again: i--; goto again; } + + i++; + if (((async_semaphore=lock_set_alloc(i))==0)|| + (lock_set_init(async_semaphore)==0)){ + if (async_semaphore){ + lock_set_dealloc(async_semaphore); + async_semaphore=0; + } + DBG("DEBUG:lock_initialize: async semaphore initialization" + " failure: %s\n", strerror(errno)); + probe_run=1; + i--; + goto again; + }
/* return success */ LOG(L_INFO, "INFO: semaphore arrays of size %d allocated\n", sem_nr ); @@ -193,7 +212,11 @@ void lock_cleanup() lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); }; - entry_semaphore = reply_semaphore = 0; + if (async_semaphore !=0) { + lock_set_destroy(async_semaphore); + lock_set_dealloc(async_semaphore); + }; + entry_semaphore = reply_semaphore = async_semaphore = 0;
} #endif /*GEN_LOCK_T_PREFERED*/ @@ -201,6 +224,16 @@ void lock_cleanup()
+int init_async_lock( struct cell *cell ) +{ +#ifdef GEN_LOCK_T_PREFERED + lock_init(&cell->async_mutex); +#else + cell->async_mutex.semaphore_set=async_semaphore; + cell->async_mutex.semaphore_index = cell->hash_index % sem_nr; +#endif /* GEN_LOCK_T_PREFERED */ + return 0; +}
int init_cell_lock( struct cell *cell ) { diff --git a/modules/tm/lock.h b/modules/tm/lock.h index ee06cab..54239de 100644 --- a/modules/tm/lock.h +++ b/modules/tm/lock.h @@ -75,6 +75,7 @@ void lock_cleanup(void);
int init_cell_lock( struct cell *cell ); +int init_async_lock( struct cell *cell ); int init_entry_lock( struct s_table* ht, struct entry *entry );
diff --git a/modules/tm/t_reply.c b/modules/tm/t_reply.c index 4ab4532..c009a18 100644 --- a/modules/tm/t_reply.c +++ b/modules/tm/t_reply.c @@ -952,29 +952,12 @@ void faked_env_resp( struct cell *t, struct sip_msg *msg) }
void faked_env_async( struct cell *t, struct sip_msg *msg) { - static int backup_route_type; - static struct cell *backup_t; - static int backup_branch; - static unsigned int backup_msgid; - static avp_list_t* backup_user_from, *backup_user_to; - static avp_list_t* backup_domain_from, *backup_domain_to; - static avp_list_t* backup_uri_from, *backup_uri_to; -#ifdef WITH_XAVP - static sr_xavp_t **backup_xavps; -#endif - static struct socket_info* backup_si; - - static struct lump *backup_add_rm; - static struct lump *backup_body_lumps; - static struct lump_rpl *backup_reply_lump; -
if (msg) { /* remember we are back in request processing, but process * a shmem-ed replica of the request; advertise it in route type; * for example t_reply needs to know that */ - backup_route_type=get_route_type(); set_route_type(t->async_backup.backup_route); if (t->async_backup.ruri_new) { ruri_mark_new(); @@ -987,37 +970,12 @@ void faked_env_async( struct cell *t, struct sip_msg *msg) { ruri_mark_consumed(); /* in failure route we assume ruri should not be used again for forking */ } - /* also, tm actions look in beginning whether transaction is - * set -- whether we are called from a reply-processing - * or a timer process, we need to set current transaction; - * otherwise the actions would attempt to look the transaction - * up (unnecessary overhead, refcounting) - */ - /* backup */ - backup_t=get_t(); - backup_branch=get_t_branch(); - backup_msgid=global_msg_id; /* fake transaction and message id */ global_msg_id=msg->id; set_t(t, t->async_backup.backup_branch); - /* make available the avp list from transaction */
- backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from ); - backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to ); - backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from ); - backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to ); - backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from ); - backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to ); -#ifdef WITH_XAVP - backup_xavps = xavp_set_list(&t->xavps_list); -#endif /* set default send address to the saved value */ - backup_si=bind_address; bind_address=t->uac[0].request.dst.send_sock; - /* backup lump lists */ - backup_add_rm = t->uas.request->add_rm; - backup_body_lumps = t->uas.request->body_lumps; - backup_reply_lump = t->uas.request->reply_lump; } else { /* on async restore - we don't need to do anything because we never really had an environment to restore prior to the suspend anyway ;) */ diff --git a/modules/tm/t_reply.h b/modules/tm/t_reply.h index 6ac7928..d2f1370 100644 --- a/modules/tm/t_reply.h +++ b/modules/tm/t_reply.h @@ -148,6 +148,8 @@ int t_get_reply_totag(struct sip_msg *msg, str *totag);
#define LOCK_REPLIES(_t) lock(&(_t)->reply_mutex ) #define UNLOCK_REPLIES(_t) unlock(&(_t)->reply_mutex ) +#define LOCK_ASYNC_CONTINUE(_t) lock(&(_t)->async_mutex ) +#define UNLOCK_ASYNC_CONTINUE(_t) unlock(&(_t)->async_mutex )
/* This function is called whenever a reply for our module is received; * we need to register this function on module initialization; diff --git a/modules/tm/t_suspend.c b/modules/tm/t_suspend.c index adc8faa..c7e331d 100644 --- a/modules/tm/t_suspend.c +++ b/modules/tm/t_suspend.c @@ -203,7 +203,7 @@ int t_continue(unsigned int hash_index, unsigned int label,
/* The transaction has to be locked to protect it * form calling t_continue() multiple times simultaneously */ - LOCK_REPLIES(t); + LOCK_ASYNC_CONTINUE(t);
/* Try to find the blind UAC, and cancel its fr timer. * We assume that the last blind uac called t_continue(). */ @@ -303,7 +303,7 @@ int t_continue(unsigned int hash_index, unsigned int label, } }
- UNLOCK_REPLIES(t); + UNLOCK_ASYNC_CONTINUE(t);
/* unref the transaction */ t_unref(t->uas.request);