/*************************************************************************** * engine_select.c -- select() based IO engine. * * * ***********************IMPORTANT NSOCK LICENSE TERMS*********************** * * * The nsock parallel socket event library is (C) 1999-2011 Insecure.Com * * LLC This library is free software; you may redistribute and/or * * modify it under the terms of the GNU General Public License as * * published by the Free Software Foundation; Version 2. This guarantees * * your right to use, modify, and redistribute this software under certain * * conditions. If this license is unacceptable to you, Insecure.Com LLC * * may be willing to sell alternative licenses (contact * * sales@insecure.com ). * * * * As a special exception to the GPL terms, Insecure.Com LLC grants * * permission to link the code of this program with any version of the * * OpenSSL library which is distributed under a license identical to that * * listed in the included docs/licenses/OpenSSL.txt file, and distribute * * linked combinations including the two. You must obey the GNU GPL in all * * respects for all of the code used other than OpenSSL. If you modify * * this file, you may extend this exception to your version of the file, * * but you are not obligated to do so. * * * * If you received these files with a written license agreement stating * * terms other than the (GPL) terms above, then that alternative license * * agreement takes precedence over this comment. * * * * Source is provided to this software because we believe users have a * * right to know exactly what a program is going to do before they run it. * * This also allows you to audit the software for security holes (none * * have been found so far). * * * * Source code also allows you to port Nmap to new platforms, fix bugs, * * and add new features. You are highly encouraged to send your changes * * to nmap-dev@insecure.org for possible incorporation into the main * * distribution. By sending these changes to Fyodor or one of the * * Insecure.Org development mailing lists, it is assumed that you are * * offering the Nmap Project (Insecure.Com LLC) the unlimited, * * non-exclusive right to reuse, modify, and relicense the code. Nmap * * will always be available Open Source, but this is important because the * * inability to relicense code has caused devastating problems for other * * Free Software projects (such as KDE and NASM). We also occasionally * * relicense the code to third parties as discussed above. If you wish to * * specify special license conditions of your contributions, just say so * * when you send them. * * * * This program is distributed in the hope that it will be useful, but * * WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * * General Public License v2.0 for more details * * (http://www.gnu.org/licenses/gpl-2.0.html). * * * ***************************************************************************/ /* $Id$ */ #include #include "nsock_internal.h" #if HAVE_PCAP #include "nsock_pcap.h" #endif #ifdef WIN32 #define CHECKED_FD_SET FD_SET #else #define CHECKED_FD_SET(fd, set) \ do { \ if ((fd) < FD_SETSIZE) { \ FD_SET((fd), (set)); \ } else { \ fatal("%s:%ld: Attempt to FD_SET fd %d, which is not less than" \ " FD_SETSIZE (%d). Try using a lower parallelism.", \ __FILE__, __LINE__, (fd), FD_SETSIZE); \ } \ } while (0) #endif #ifdef WIN32 #define CHECKED_FD_CLR FD_CLR #else #define CHECKED_FD_CLR(fd, set) \ do { \ if ((fd) < FD_SETSIZE) { \ FD_CLR((fd), (set)); \ } else { \ fatal("%s:%ld: Attempt to FD_CLR fd %d, which is not less than" \ " FD_SETSIZE (%d). Try using a lower parallelism.", \ __FILE__, __LINE__, (fd), FD_SETSIZE); \ } \ } while (0) #endif /* --- ENGINE INTERFACE PROTOTYPES --- */ static int select_init(mspool *nsp); static void select_destroy(mspool *nsp); static int select_iod_register(mspool *nsp, msiod *iod, enum engine_event ev); static int select_iod_unregister(mspool *nsp, msiod *iod); static int select_iod_modify(mspool *nsp, msiod *iod, enum engine_event ev_set, enum engine_event ev_clr); static int select_loop(mspool *nsp, int msec_timeout); /* ---- ENGINE DEFINITION ---- */ struct io_engine engine_select = { "select", select_init, select_destroy, select_iod_register, select_iod_unregister, select_iod_modify, select_loop }; /* --- INTERNAL PROTOTYPES --- */ static void iterate_through_event_lists(mspool *nsp); /* defined in nsock_core.c */ int pcap_read_on_nonselect(mspool *nsp); void process_event(mspool *nsp, gh_list *evlist, msevent *nse, enum engine_event ev); extern struct timeval nsock_tod; /* * Engine specific data structure */ struct select_info { /* Descriptors from which have pending READ events */ fd_set fds_master_r; /* Descriptors which we are tryint to WRITE to */ fd_set fds_master_w; /* looking for exceptional events -- used with connect */ fd_set fds_master_x; /* For keeping track of the select results */ fd_set fds_results_r, fds_results_w, fds_results_x; /* The highest sd we have set in any of our fd_set's (max_sd + 1 is used in * select() calls). Note that it can be -1, when there are no valid sockets */ int max_sd; }; int select_init(mspool *nsp) { struct select_info *sinfo; sinfo = (struct select_info *)safe_malloc(sizeof(struct select_info)); FD_ZERO(&sinfo->fds_master_r); FD_ZERO(&sinfo->fds_master_w); FD_ZERO(&sinfo->fds_master_x); sinfo->max_sd = -1; nsp->engine_data = (void *)sinfo; return 1; } void select_destroy(mspool *nsp) { assert(nsp->engine_data != NULL); free(nsp->engine_data); } int select_iod_register(mspool *nsp, msiod *iod, enum engine_event ev) { assert(!IOD_PROPGET(iod, IOD_REGISTERED)); nsock_trace(nsp, "Registering IOD #%lu", iod->id); iod->watched_events = ev; select_iod_modify(nsp, iod, ev, EV_NONE); IOD_PROPSET(iod, IOD_REGISTERED); return 1; } int select_iod_unregister(mspool *nsp, msiod *iod) { struct select_info *sinfo = (struct select_info *)nsp->engine_data; iod->watched_events = EV_NONE; /* some IODs can be unregistered here if they're associated to an event that was * immediately completed */ if (IOD_PROPGET(iod, IOD_REGISTERED)) { #if HAVE_PCAP if (iod->pcap) { int sd = ((mspcap *)iod->pcap)->pcap_desc; if (sd >= 0) { CHECKED_FD_CLR(sd, &sinfo->fds_master_r); CHECKED_FD_CLR(sd, &sinfo->fds_results_r); } } else #endif { CHECKED_FD_CLR(iod->sd, &sinfo->fds_master_r); CHECKED_FD_CLR(iod->sd, &sinfo->fds_master_w); CHECKED_FD_CLR(iod->sd, &sinfo->fds_results_r); CHECKED_FD_CLR(iod->sd, &sinfo->fds_results_w); } if (sinfo->max_sd == iod->sd) sinfo->max_sd--; IOD_PROPCLR(iod, IOD_REGISTERED); } return 1; } int select_iod_modify(mspool *nsp, msiod *iod, enum engine_event ev_set, enum engine_event ev_clr) { int sd; struct select_info *sinfo = (struct select_info *)nsp->engine_data; assert((ev_set & ev_clr) == 0); iod->watched_events |= ev_set; iod->watched_events &= ~ev_clr; sd = nsi_getsd(iod); /* -- set events -- */ if (ev_set & EV_READ) CHECKED_FD_SET(sd, &sinfo->fds_master_r); if (ev_set & EV_WRITE) CHECKED_FD_SET(sd, &sinfo->fds_master_w); if (ev_set & EV_EXCEPT) CHECKED_FD_SET(sd, &sinfo->fds_master_x); /* -- clear events -- */ if (ev_clr & EV_READ) CHECKED_FD_CLR(sd, &sinfo->fds_master_r); if (ev_clr & EV_WRITE) CHECKED_FD_CLR(sd, &sinfo->fds_master_w); if (ev_clr & EV_EXCEPT) CHECKED_FD_CLR(sd, &sinfo->fds_master_x); /* -- update max_sd -- */ if (ev_set != EV_NONE) sinfo->max_sd = MAX(sinfo->max_sd,sd); else if (iod->events_pending == 1 && (sinfo->max_sd == sd)) sinfo->max_sd--; return 1; } int select_loop(mspool *nsp, int msec_timeout) { int results_left = 0; int event_msecs; /* msecs before an event goes off */ int combined_msecs; int sock_err = 0; struct timeval select_tv; struct timeval *select_tv_p; struct select_info *sinfo = (struct select_info *)nsp->engine_data; assert(msec_timeout >= -1); if (nsp->events_pending == 0) return 0; /* No need to wait on 0 events ... */ do { if (nsp->tracelevel > 6) nsock_trace(nsp, "wait_for_events"); if (nsp->next_ev.tv_sec == 0) event_msecs = -1; /* None of the events specified a timeout */ else event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nsp->next_ev, nsock_tod)); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* Force a low timeout when capturing packets on systems where * the pcap descriptor is not select()able. */ if (has_pending_pcap_read(nsp)) if (event_msecs > PCAP_POLL_INTERVAL) event_msecs = PCAP_POLL_INTERVAL; #endif #endif /* We cast to unsigned because we want -1 to be very high (since it means no * timeout) */ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout); /* Set up the timeval pointer we will give to select() */ memset(&select_tv, 0, sizeof(select_tv)); if (combined_msecs > 0) { select_tv.tv_sec = combined_msecs / 1000; select_tv.tv_usec = (combined_msecs % 1000) * 1000; select_tv_p = &select_tv; } else if (combined_msecs == 0) { /* we want the tv_sec and tv_usec to be zero but they already are from bzero */ select_tv_p = &select_tv; } else { assert(combined_msecs == -1); select_tv_p = NULL; } #if HAVE_PCAP /* do non-blocking read on pcap devices that doesn't support select() * If there is anything read, don't do usleep() or select(), just leave this loop */ if (pcap_read_on_nonselect(nsp)) { /* okay, something was read. */ } else #endif { /* Set up the descriptors for select */ sinfo->fds_results_r = sinfo->fds_master_r; sinfo->fds_results_w = sinfo->fds_master_w; sinfo->fds_results_x = sinfo->fds_master_x; results_left = fselect(sinfo->max_sd + 1, &sinfo->fds_results_r, &sinfo->fds_results_w, &sinfo->fds_results_x, select_tv_p); if (results_left == -1) sock_err = socket_errno(); } gettimeofday(&nsock_tod, NULL); /* Due to usleep or select delay */ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occured */ if (results_left == -1 && sock_err != EINTR) { nsock_trace(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err)); nsp->errnum = sock_err; return -1; } iterate_through_event_lists(nsp); return 1; } /* ---- INTERNAL FUNCTIONS ---- */ /* Iterate through all the event lists (such as connect_events, read_events, * timer_events, etc) and take action for those that have completed (due to * timeout, i/o, etc) */ void iterate_through_event_lists(mspool *nsp) { gh_list_elem *iod, *iod_next, *iod_last, *timer_last; gh_list_elem *current, *next, *last; struct select_info *sinfo = (struct select_info *)nsp->engine_data; msevent *nse; /* Clear it -- We will find the next event as we go through the list */ nsp->next_ev.tv_sec = 0; /* We keep the events seperate because we want to handle them in the * order: connect => read => write => timer for several reasons: * * 1) Makes sure we have gone through all the net i/o events before * a timer expires (would be a shame to timeout after the data was * available but before we delivered the events * * 2) The connect() results often lead to a read or write that can be * processed in the same cycle. In the same way, read() often * leads to write(). */ iod_last = GH_LIST_LAST_ELEM(&nsp->active_iods); timer_last = GH_LIST_LAST_ELEM(&nsp->timer_events); /* foreach list */ for (iod = GH_LIST_FIRST_ELEM(&nsp->active_iods); iod != NULL && GH_LIST_ELEM_PREV(iod) != iod_last; iod = iod_next) { int i = 0; msiod *nsi; gh_list *ev_lists[5]; nsi = (msiod *)GH_LIST_ELEM_DATA(iod); assert(nsi); IOD_PROPSET(nsi, IOD_LOCK); ev_lists[i++] = &nsi->connect_events; ev_lists[i++] = &nsi->read_events; ev_lists[i++] = &nsi->write_events; #if HAVE_PCAP ev_lists[i++] = &nsi->pcap_read_events; #endif ev_lists[i] = NULL; for (i = 0; ev_lists[i] != NULL; i++) { last = GH_LIST_LAST_ELEM(ev_lists[i]); for (current = GH_LIST_FIRST_ELEM(ev_lists[i]); current != NULL && GH_LIST_ELEM_PREV(current) != last; current = next) { enum engine_event evmask = EV_NONE; int sd; nse = (msevent *)GH_LIST_ELEM_DATA(current); #if HAVE_PCAP if (nse->iod->pcap) sd = ((mspcap *)nse->iod->pcap)->pcap_desc; else #endif sd = nse->iod->sd; if (FD_ISSET(sd, &sinfo->fds_results_r)) evmask |= EV_READ; if (FD_ISSET(sd, &sinfo->fds_results_w)) evmask |= EV_WRITE; if (FD_ISSET(sd, &sinfo->fds_results_x)) evmask |= EV_EXCEPT; process_event(nsp, ev_lists[i], nse, evmask); next = GH_LIST_ELEM_NEXT(current); if (nse->event_done) gh_list_remove_elem(ev_lists[i], current); } } iod_next = GH_LIST_ELEM_NEXT(iod); IOD_PROPCLR(nsi, IOD_LOCK); if (IOD_PROPGET(nsi, IOD_TO_DELETE)) nsi_delete(nsi, nsi->pending_resp); } for (current = GH_LIST_FIRST_ELEM(&nsp->timer_events); current != NULL && GH_LIST_ELEM_PREV(current) != timer_last; current = next) { nse = (msevent *) GH_LIST_ELEM_DATA(current); process_event(nsp, &nsp->timer_events, nse, EV_NONE); next = GH_LIST_ELEM_NEXT(current); if (nse->event_done) gh_list_remove_elem(&nsp->timer_events, current); } }