The member variable in the ev_loop struct in libev, libevev_loop

Source: Internet
Author: User

The member variable in the ev_loop struct in libev, libevev_loop

1. ev_loop is the struct used by libev to describe the event loop. The definition in libev is relatively difficult. Here we extract it and make comments to facilitate learning. Libev is defined as follows:

struct ev_loop{  ev_tstamp ev_rt_now;  #define ev_rt_now ((loop)->ev_rt_now)  #define VAR(name,decl) decl;    #include "ev_vars.h"  #undef VAR};#include "ev_wrap.h"static struct ev_loop default_loop_struct;EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0;

Ev_tstamp is a double type and a time event count type. Here, define is used to define ev_rt_now as (loop)-> ev_rt_now). You can directly use ev_rt_now in the program, this is the definition of the structure behind it. This may be some programming skills of the author.

#define VAR(name,decl) decl; #define VARx(type,name) VAR(name, type name)

The two rows indicate a meaning, and the name defined by type is equal to the type name; for example, int backend; it is equivalent to defining a variable in the struct. Specific variables are defined in the ev_vars.h header file.

The ev_warp.h file serves the same purpose as # define ev_rt_now (loop)-> ev_rt_now. The code for the two files is as follows:

#define VARx(type,name) VAR(name, type name)VARx(ev_tstamp, now_floor) /* last time we refreshed rt_time */VARx(ev_tstamp, mn_now)    /* monotonic clock "now" */VARx(ev_tstamp, rtmn_diff) /* difference realtime - monotonic time *//* for reverse feeding of events */VARx(W *, rfeeds)VARx(int, rfeedmax)VARx(int, rfeedcnt)VAR (pendings, ANPENDING *pendings [NUMPRI])VAR (pendingmax, int pendingmax [NUMPRI])VAR (pendingcnt, int pendingcnt [NUMPRI])VARx(int, pendingpri) /* highest priority currently pending */VARx(ev_prepare, pending_w) /* dummy pending watcher */VARx(ev_tstamp, io_blocktime)VARx(ev_tstamp, timeout_blocktime)VARx(int, backend)VARx(int, activecnt) /* total number of active events ("refcount") */VARx(EV_ATOMIC_T, loop_done)  /* signal by ev_break */VARx(int, backend_fd)VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev))VAR (backend_poll  , void (*backend_poll)(EV_P_ ev_tstamp timeout))VARx(ANFD *, anfds)VARx(int, anfdmax)VAR (evpipe, int evpipe [2])VARx(ev_io, pipe_w)VARx(EV_ATOMIC_T, pipe_write_wanted)VARx(EV_ATOMIC_T, pipe_write_skipped)#if !defined(_WIN32) || EV_GENWRAPVARx(pid_t, curpid)#endifVARx(char, postfork)  /* true if we need to recreate kernel state after fork */#if EV_USE_SELECT || EV_GENWRAPVARx(void *, vec_ri)VARx(void *, vec_ro)VARx(void *, vec_wi)VARx(void *, vec_wo)#if defined(_WIN32) || EV_GENWRAPVARx(void *, vec_eo)#endifVARx(int, vec_max)#endif#if EV_USE_POLL || EV_GENWRAPVARx(struct pollfd *, polls)VARx(int, pollmax)VARx(int, pollcnt)VARx(int *, pollidxs) /* maps fds into structure indices */VARx(int, pollidxmax)#endif#if EV_USE_EPOLL || EV_GENWRAPVARx(struct epoll_event *, epoll_events)VARx(int, epoll_eventmax)VARx(int *, epoll_eperms)VARx(int, epoll_epermcnt)VARx(int, epoll_epermmax)#endif#if EV_USE_KQUEUE || EV_GENWRAPVARx(pid_t, kqueue_fd_pid)VARx(struct kevent *, kqueue_changes)VARx(int, kqueue_changemax)VARx(int, kqueue_changecnt)VARx(struct kevent *, kqueue_events)VARx(int, kqueue_eventmax)#endif#if EV_USE_PORT || EV_GENWRAPVARx(struct port_event *, port_events)VARx(int, port_eventmax)#endif#if EV_USE_IOCP || EV_GENWRAPVARx(HANDLE, iocp)#endifVARx(int *, fdchanges)VARx(int, fdchangemax)VARx(int, fdchangecnt)VARx(ANHE *, timers)VARx(int, timermax)VARx(int, timercnt)#if EV_PERIODIC_ENABLE || EV_GENWRAPVARx(ANHE *, periodics)VARx(int, periodicmax)VARx(int, periodiccnt)#endif#if EV_IDLE_ENABLE || EV_GENWRAPVAR (idles, ev_idle **idles [NUMPRI])VAR (idlemax, int idlemax [NUMPRI])VAR (idlecnt, int idlecnt [NUMPRI])#endifVARx(int, idleall) /* total number */VARx(struct ev_prepare **, prepares)VARx(int, preparemax)VARx(int, preparecnt)VARx(struct ev_check **, checks)VARx(int, checkmax)VARx(int, checkcnt)#if EV_FORK_ENABLE || EV_GENWRAPVARx(struct ev_fork **, forks)VARx(int, forkmax)VARx(int, forkcnt)#endif#if EV_CLEANUP_ENABLE || EV_GENWRAPVARx(struct ev_cleanup **, cleanups)VARx(int, cleanupmax)VARx(int, cleanupcnt)#endif#if EV_ASYNC_ENABLE || EV_GENWRAPVARx(EV_ATOMIC_T, async_pending)VARx(struct ev_async **, asyncs)VARx(int, asyncmax)VARx(int, asynccnt)#endif#if EV_USE_INOTIFY || EV_GENWRAPVARx(int, fs_fd)VARx(ev_io, fs_w)VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE])#endifVARx(EV_ATOMIC_T, sig_pending)#if EV_USE_SIGNALFD || EV_GENWRAPVARx(int, sigfd)VARx(ev_io, sigfd_w)VARx(sigset_t, sigfd_set)#endifVARx(unsigned int, origflags) /* original loop flags */#if EV_FEATURE_API || EV_GENWRAPVARx(unsigned int, loop_count) /* total number of loop iterations/blocks */VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */VARx(void *, userdata)/* C++ doesn't support the ev_loop_callback typedef here. stinks. */VAR (release_cb, void (*release_cb)(EV_P) EV_THROW)VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_THROW)VAR (invoke_cb , ev_loop_callback invoke_cb)#endif#undef VARx

 

#define acquire_cb ((loop)->acquire_cb)#define activecnt ((loop)->activecnt)#define anfdmax ((loop)->anfdmax)#define anfds ((loop)->anfds)#define async_pending ((loop)->async_pending)#define asynccnt ((loop)->asynccnt)#define asyncmax ((loop)->asyncmax)#define asyncs ((loop)->asyncs)#define backend ((loop)->backend)#define backend_fd ((loop)->backend_fd)#define backend_mintime ((loop)->backend_mintime)#define backend_modify ((loop)->backend_modify)#define backend_poll ((loop)->backend_poll)#define checkcnt ((loop)->checkcnt)#define checkmax ((loop)->checkmax)#define checks ((loop)->checks)#define cleanupcnt ((loop)->cleanupcnt)#define cleanupmax ((loop)->cleanupmax)#define cleanups ((loop)->cleanups)#define curpid ((loop)->curpid)#define epoll_epermcnt ((loop)->epoll_epermcnt)#define epoll_epermmax ((loop)->epoll_epermmax)#define epoll_eperms ((loop)->epoll_eperms)#define epoll_eventmax ((loop)->epoll_eventmax)#define epoll_events ((loop)->epoll_events)#define evpipe ((loop)->evpipe)#define fdchangecnt ((loop)->fdchangecnt)#define fdchangemax ((loop)->fdchangemax)#define fdchanges ((loop)->fdchanges)#define forkcnt ((loop)->forkcnt)#define forkmax ((loop)->forkmax)#define forks ((loop)->forks)#define fs_2625 ((loop)->fs_2625)#define fs_fd ((loop)->fs_fd)#define fs_hash ((loop)->fs_hash)#define fs_w ((loop)->fs_w)#define idleall ((loop)->idleall)#define idlecnt ((loop)->idlecnt)#define idlemax ((loop)->idlemax)#define idles ((loop)->idles)#define invoke_cb ((loop)->invoke_cb)#define io_blocktime ((loop)->io_blocktime)#define iocp ((loop)->iocp)#define kqueue_changecnt ((loop)->kqueue_changecnt)#define kqueue_changemax ((loop)->kqueue_changemax)#define kqueue_changes ((loop)->kqueue_changes)#define kqueue_eventmax ((loop)->kqueue_eventmax)#define kqueue_events ((loop)->kqueue_events)#define kqueue_fd_pid ((loop)->kqueue_fd_pid)#define loop_count ((loop)->loop_count)#define loop_depth ((loop)->loop_depth)#define loop_done ((loop)->loop_done)#define mn_now ((loop)->mn_now)#define now_floor ((loop)->now_floor)#define origflags ((loop)->origflags)#define pending_w ((loop)->pending_w)#define pendingcnt ((loop)->pendingcnt)#define pendingmax ((loop)->pendingmax)#define pendingpri ((loop)->pendingpri)#define pendings ((loop)->pendings)#define periodiccnt ((loop)->periodiccnt)#define periodicmax ((loop)->periodicmax)#define periodics ((loop)->periodics)#define pipe_w ((loop)->pipe_w)#define pipe_write_skipped ((loop)->pipe_write_skipped)#define pipe_write_wanted ((loop)->pipe_write_wanted)#define pollcnt ((loop)->pollcnt)#define pollidxmax ((loop)->pollidxmax)#define pollidxs ((loop)->pollidxs)#define pollmax ((loop)->pollmax)#define polls ((loop)->polls)#define port_eventmax ((loop)->port_eventmax)#define port_events ((loop)->port_events)#define postfork ((loop)->postfork)#define preparecnt ((loop)->preparecnt)#define preparemax ((loop)->preparemax)#define prepares ((loop)->prepares)#define release_cb ((loop)->release_cb)#define rfeedcnt ((loop)->rfeedcnt)#define rfeedmax ((loop)->rfeedmax)#define rfeeds ((loop)->rfeeds)#define rtmn_diff ((loop)->rtmn_diff)#define sig_pending ((loop)->sig_pending)#define sigfd ((loop)->sigfd)#define sigfd_set ((loop)->sigfd_set)#define sigfd_w ((loop)->sigfd_w)#define timeout_blocktime ((loop)->timeout_blocktime)#define timercnt ((loop)->timercnt)#define timermax ((loop)->timermax)#define timers ((loop)->timers)#define userdata ((loop)->userdata)#define vec_eo ((loop)->vec_eo)#define vec_max ((loop)->vec_max)#define vec_ri ((loop)->vec_ri)#define vec_ro ((loop)->vec_ro)#define vec_wi ((loop)->vec_wi)#define vec_wo ((loop)->vec_wo)


Analysis and comparison of several classic network server architecture models

Compared with the traditional network programming method, event-driven greatly reduces resource usage, increases service reception capability, and improves network transmission efficiency. As for the server model mentioned in this article, you can refer to a lot of implementation code on the search network. Therefore, this article will not stick to the display and analysis of source code, but focus on the introduction and comparison of models. The server model using the libev event-driven Library provides the implementation code. This article involves the thread/time legend, which only indicates that the thread does have a blocking latency on each IO, but does not guarantee the correctness of the latency ratio and IO execution sequence. In addition, the interfaces mentioned in this article are only Unix/Linux interfaces that I am familiar with. Windows interfaces are not recommended. You can check the corresponding Windows interfaces on your own. Almost all of the network programming interfaces that programmers encounter for the first time start with interfaces such as listen (), send (), and recv. By using these interfaces, you can easily build server/client models. We assume that we want to create a simple server program to provide a single client with a content service similar to "one question and one answer. Figure 1. Simple Server/client model with one answer, we noticed that most socket interfaces are blocking. The so-called blocking interface means that a system call (generally an I/O interface) does not return the call result and keeps the current thread congested. It is returned only when the system call gets the result or times out and an error occurs. In fact, almost all I/O interfaces (including socket interfaces) are blocked unless otherwise specified. This brings a big problem to network programming. For example, when sending () is called, the thread will be blocked. During this period, the thread cannot perform any operations or respond to any network requests. This poses a challenge to network programming with multiple clients and multiple business logic. At this time, many programmers may choose multiple threads to solve this problem. The simplest solution for multi-threaded server programs to deal with multi-client network applications is to use multiple threads (or multi-process) on the server side ). Multi-thread (or multi-process) is designed to give each connection an independent thread (or process), so that the blocking of any connection will not affect other connections. The specific use of multi-process or multi-thread does not have a specific mode. Traditionally, the process overhead is much greater than the thread. Therefore, if you need to provide services for a large number of clients at the same time, you are not recommended to use multiple processes; if a single service execution body consumes a large amount of CPU resources, such as large-scale or long-term data operations or file access, the process is safer. Generally, use pthread_create () to create a new thread and fork () to create a new process. We assume that we have higher requirements for the above server/client model, that is, to allow the server to provide Q & A services for multiple clients at the same time. So we have the following model. Figure 2. in the preceding thread/time legend, the main thread continuously waits for client connection requests. If a connection exists, a new thread is created, and provides the same Q & A service for the queue in the new thread. Many beginners may not understand why a socket can be accept multiple times. In fact, the socket designer may leave a foreshadowing for the case of multiple clients, so that accept () can return a new socket. The following is the prototype of the accept interface: int accept (int s, struct sockaddr * addr, socklen_t * addrlen); input parameter s is from socket (), bind () and listen () the socket handle value that follows in. After bind () and listen () are executed, the operating system has started to listen to all connection requests at the specified port. If there is a request, the connection request is added to the Request queue. The accept () interface is called to extract the first connection information from the request queue of socket s and create a new socket return handle similar to that of socket s. New so... remaining full text>

Analysis and comparison of several classic network server architecture models

Compared with the traditional network programming method, event-driven greatly reduces resource usage, increases service reception capability, and improves network transmission efficiency. As for the server model mentioned in this article, you can refer to a lot of implementation code on the search network. Therefore, this article will not stick to the display and analysis of source code, but focus on the introduction and comparison of models. The server model using the libev event-driven Library provides the implementation code. This article involves the thread/time legend, which only indicates that the thread does have a blocking latency on each IO, but does not guarantee the correctness of the latency ratio and IO execution sequence. In addition, the interfaces mentioned in this article are only Unix/Linux interfaces that I am familiar with. Windows interfaces are not recommended. You can check the corresponding Windows interfaces on your own. Almost all of the network programming interfaces that programmers encounter for the first time start with interfaces such as listen (), send (), and recv. By using these interfaces, you can easily build server/client models. We assume that we want to create a simple server program to provide a single client with a content service similar to "one question and one answer. Figure 1. Simple Server/client model with one answer, we noticed that most socket interfaces are blocking. The so-called blocking interface means that a system call (generally an I/O interface) does not return the call result and keeps the current thread congested. It is returned only when the system call gets the result or times out and an error occurs. In fact, almost all I/O interfaces (including socket interfaces) are blocked unless otherwise specified. This brings a big problem to network programming. For example, when sending () is called, the thread will be blocked. During this period, the thread cannot perform any operations or respond to any network requests. This poses a challenge to network programming with multiple clients and multiple business logic. At this time, many programmers may choose multiple threads to solve this problem. The simplest solution for multi-threaded server programs to deal with multi-client network applications is to use multiple threads (or multi-process) on the server side ). Multi-thread (or multi-process) is designed to give each connection an independent thread (or process), so that the blocking of any connection will not affect other connections. The specific use of multi-process or multi-thread does not have a specific mode. Traditionally, the process overhead is much greater than the thread. Therefore, if you need to provide services for a large number of clients at the same time, multi-process is not recommended; if a single service execution body consumes a large amount of CPU resources, such as large-scale or long-term data operations or file access, the process is safer. Generally, use pthread_create () to create a new thread and fork () to create a new process. We assume that we have higher requirements for the above server/client model, that is, to allow the server to provide Q & A services for multiple clients at the same time. So we have the following model. Figure 2. in the preceding thread/time legend, the main thread continuously waits for client connection requests. If a connection exists, a new thread is created, and provides the same Q & A service for the queue in the new thread. Many beginners may not understand why a socket can be accept multiple times. In fact, the socket designer may leave a foreshadowing for the case of multiple clients, so that accept () can return a new socket. The following is the prototype of the accept interface: int accept (int s, struct sockaddr * addr, socklen_t * addrlen); input parameter s is from socket (), bind () and listen () the socket handle value that follows in. After bind () and listen () are executed, the operating system has started to listen to all connection requests at the specified port. If there is a request, the connection request is added to the Request queue. The accept () interface is called to extract the first connection information from the request queue of socket s and create a new socket return handle similar to that of socket s. The new socket handle is later... the remaining full text>

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.