tailq介绍 TAILQ是linux内核对双向队列操作的一种抽象,能实现操作队列需要的各种操作:插入元素,删除元素,遍历队列等,其封装是对应的宏定义,下面详细说明tailq的操作,从定义,初始化...tailq的宏定义API (1)定义:TAILQ_ENTRY(type) 初始化一个type类型的entry #define TAILQ_ENTRY(type) \ struct {...(head) 初始化头部,其中head是上面的TAILQ_HEAD #define TAILQ_INIT(head) do { \ TAILQ_FIRST...(5)删除:TAILQ_REMOVE(head, elm, field) head是TAILQ_HEAD的头部,elm是对应需要处理的节点,field就是对应上面的TAILQ_ENTRY #define...(6)遍历:TAILQ_FOREACH(var, head, field) var是临时变量,head对应TAILQ_HEAD的定义,field对应TAILQ_ENTRY #define TAILQ_FOREACH
. */ // 放入队列的操作都存在这个tailq队列里 struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s...(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); rd_kafka_q_t *fwdq; int cnt =...的队列挂到tmpq这个临时tailq上面, 减少lock的时间 TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link); /* Zero out queue...(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); int32_t cnt = 0; int64_t size...(rko = TAILQ_FIRST(&rkq->rkq_q)) && timeout_ms !
(name, type) _TAILQ_HEAD(name, struct type,): #define _TAILQ_HEAD(name, type, qual)...(type) _TAILQ_ENTRY(struct type,) 实际是用最少侵入式的方式实现了一个类似于C++的模板的机制, 定义中的type就是队列里元素的类型, 可以是任意struct类型, 这个..._TAILQ_ENTRY(type, qual)放在这个struct类型的定义里,是其的一个成员, 然后各个元素通过这个TAILQ_ENTRY成员彼此串联起来, 松耦合在一起 #define _TAILQ_ENTRY...tqh_last)) 要理解这个其实关键一点是上面定义的TAILQ_HEAD和TAILQ_ENTRY在结构和内存布局上是一样一样的....(head)->tqh_last)是最后一个元素的next指针的地址, 因为TAILQ_ENTRY(type)这个定义是没有类型名的,我们不能直接cast成 TAILQ_ENTRY(type)类型, 所有就只能
(_nty_coroutine) ready_next; //等待TAILQ_ENTRY(_nty_coroutine) defer_next;TAILQ_ENTRY(_nty_coroutine) cond_next...;TAILQ_ENTRY(_nty_coroutine) io_next;TAILQ_ENTRY(_nty_coroutine) compute_next;struct {void *buf;size_t...sched->sleeping); //红黑树初始化RB_INIT(&sched->waiting);sched->birth = nty_coroutine_usec_now(); //获取当前时间TAILQ_INIT...(&sched->ready); //队列初始化TAILQ_INIT(&sched->defer);LIST_INIT(&sched->busy); //链表初始化这里调度器就是对各种状态协程调度,...TAILQ_EMPTY(&sched->ready)) {nty_coroutine *co = TAILQ_FIRST(&sched->ready);TAILQ_REMOVE(&co->sched->
主要是通过TimerManager来管理多个timer, 达到处理定时任务的效果 TimerManager定义: typedef struct rd_kafka_timers_s { TAILQ_HEAD...rkts_lock; cnd_t rkts_cond; int rkts_enabled; } rd_kafka_timers_t; 使用TAILQ...rtmr_callback) (rd_kafka_timers_t *rkts, void *arg); void *rtmr_arg; } rd_kafka_timer_t; rtmr_link : TAILQ...(first = TAILQ_FIRST(&rkts->rkts_timers)) || first->rtmr_next > rtmr->rtmr_next) { TAILQ_INSERT_HEAD...int)(sleeptime / 1000)); } } now = rd_clock(); while ((rtmr = TAILQ_FIRST
(rd_segment_s) seg_link; /*<< rbuf_segments Link */ tailq元素 char *seg_p; /**...内包一个rd_segment_t的list 定义: typedef struct rd_buf_s { struct rd_segment_head rbuf_segments; /**< TAILQ...list of segments */ segment tailq的头指针 size_t rbuf_segment_cnt; /**seg_absof) seg = TAILQ_FIRST(&rbuf->rbuf_segments); // 遍历查找...= seg ; next = TAILQ_PREV(next, rd_segment_head, seg_link)) rd_buf_destroy_segment(rbuf
(, rd_kafka_metadata_cache_entry) rkmc_expiry; // 使用tailq来存储所有被cached的entry, 过期时间早的会被排在 tailq的前面...rk_metadata_cache.rkmc_avl, rd_kafka_metadata_cache_entry_cmp, 0); // 初初化tailq...rkmce->rkmce_mtopic.partitions), rd_kafka_metadata_partition_id_cmp); // 插到缓存tailq...(&rk->rk_metadata_cache.rkmc_expiry); // 清除每一个entry while ((rkmce = TAILQ_FIRST...rd_ts_t now = rd_clock(); struct rd_kafka_metadata_cache_entry *rkmce; //过期时间早的会被排在 tailq
struct rd_kafka_msg_s { rd_kafka_message_t rkm_rkmessage; /* MUST be first field */ // 使其成为tailq...的元素 TAILQ_ENTRY(rd_kafka_msg_s) rkm_link; int rkm_flags; // 时间戳, 分两类: 客户端生间时的时间和...rd_kafka_msgq_t 所在文件: src/rdkafka_msg.h 其实就是简单封装的rd_kafka_msg_t队列 定义: typedef struct rd_kafka_msgq_s { TAILQ_HEAD...rd_kafka_msg_t *rd_kafka_msgq_pop (rd_kafka_msgq_t *rkmq) { rd_kafka_msg_t *rkm; if (((rkm = TAILQ_FIRST...RD_UNUSED void rd_kafka_msgq_enq (rd_kafka_msgq_t *rkmq, rd_kafka_msg_t *rkm) { TAILQ_INSERT_TAIL
在librdkafka内部使用rd_kafka_itopic, 它也有自己的引用计数, 有点罗嗦啊~ 定义: struct rd_kafka_itopic_s { // 定义成tailq...的元素 TAILQ_ENTRY(rd_kafka_itopic_s) rkt_link; // 引用计数 rd_refcnt_t rkt_refcnt;...rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA); // 加入到对应的rk_kafka_t中的topic列表 TAILQ_INSERT_TAIL...rd_list_t query_topics; rd_list_init(&query_topics, 0, rd_free); rd_kafka_rdlock(rk); TAILQ_FOREACH...rktp_msgq); cnt = rd_atomic32_get(&uas.rkmq_msg_cnt); rd_kafka_toppar_unlock(rktp_ua); TAILQ_FOREACH_SAFE
rdkafka_buf.h 这个结构涉及的操作很多, 我会在后面随着代码的深入了解来作补充和更正 定义: struct rd_kafka_buf_s { /* rd_kafka_buf_t */ TAILQ_ENTRY...(rd_kafka_buf_s) rkbuf_link; // 这个rd_kafka_buf_s定义为tailq的元素 int32_t rkbuf_corrid; // 对应于kafka协议中...rd_kafka_bufq_t 所在文件: src/rdkafka_buf.h 将上面的rd_kafka_buf_s封装成队列 定义: typedef struct rd_kafka_bufq_s { TAILQ_HEAD
int rte_bus_scan(void) { int ret; struct rte_bus *bus = NULL; /*遍历bus链表,执行相应bus的scan函数*/ TAILQ_FOREACH...0000:00:04.0/uio/uio2 [root@domain uio1]# ls /dev/uio1 /*mknod*/ /dev/uio1 3.2 网卡资源链表管理 全局变量rte_uio_tailq...(gdb) p rte_uio_tailq.head[0] /*全局资源尾队列头*/ $7 = { tailq_head = { tqh_first = 0x7f577fd9e200,...查询到PCI:0000:00:07.0对应mapped_pci_resource资源,gdb输出map资源信息*/ (gdb) p (struct mapped_pci_resource)rte_uio_tailq.head...->tailq_head.tqh_first.next.tqe_next.next.tqe_next.next.tqe_next $18 = { next = { tqe_next = 0x0
首先给出event结构体的声明,它位于event.h文件中: 1struct event { 2 3 TAILQ_ENTRY (event) ev_next; 4 5 TAILQ_ENTRY...(event) ev_active_next; 6 7 TAILQ_ENTRY (event) ev_signal_next; 8 9 unsigned int min_heap_idx
. */ int final; //父节点 struct ConfNode_ *parent; //头节点,next节点 TAILQ_HEAD(, ConfNode_) head; TAILQ_ENTRY..."eth0") */ TAILQ_ENTRY(LiveDeviceName_) next;} LiveDeviceName;pd = SCCalloc(1, sizeof(LiveDeviceName)...);TAILQ_INSERT_TAIL(&pre_live_devices, pd, next);g_engine_mode默认为IDS,IPS是可以drop包(--simulate-ips)EngineModeSetIPS
, leader, 生产, 消费, 各种定时timer都在里面 定义, 这个结构体巨庞大 struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */ TAILQ_ENTRY...(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */ TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink...rd_kafka_broker_t link*/ CIRCLEQ_ENTRY(rd_kafka_toppar_s) rktp_fetchlink; /* rkb_fetch_toppars */ TAILQ_ENTRY...(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_itopic_t link*/ TAILQ_ENTRY(rd_kafka_toppar_s)...(rk); /* Find or create topic */ // 所有的 rd_kafka_itopic_t对象都存在rd_kafka_t的rkt_topic的tailq
我们会在后期分析具体的流程时再作深入讨论 ---- struct rd_kafka_op_s 所在文件: src/rdkafka_op.h(c) 定义: struct rd_kafka_op_s { // 加上tailq...的元素域 TAILQ_ENTRY(rd_kafka_op_s) rko_link; // op的类型 rd_kafka_op_type_t rko_type;
(knote) kn_selnext; /* for struct selinfo */ struct knlist *kn_knlist; /* f_attach populated */ TAILQ_ENTRY...fd索引的数组(同open file table)用于关闭fd时删除对应的knode struct kqueue { struct mtx kq_lock; int kq_refcnt; TAILQ_ENTRY...(kqueue) kq_list; TAILQ_HEAD(, knote) kq_head; /* list of pending event */ int kq_count; /* number
, 回掉函数和参数,事件优先级是当前event_base的中间级别(current_base->nactivequeues/2). event对象的定义见下: struct event { TAILQ_ENTRY...(event) ev_next; TAILQ_ENTRY (event) ev_active_next; TAILQ_ENTRY (event) ev_signal_next;
标记 13 switch (queue) { 14 case EVLIST_INSERTED: // I/O或Signal事件,加入已注册事件链表 15 TAILQ_INSERT_TAIL...case EVLIST_ACTIVE: // 就绪事件,加入激活链表 18 base->event_count_active++; 19 TAILQ_INSERT_TAIL
首先,event_config_new生成了一个event_config,struct event_config定义如下: struct event_config { TAILQ_HEAD(event_configq
create_ib_device ibv_query_device nvmf_rdma_is_rxe_device TAILQ_INSERT_TAIL...SPDK_NVME_OPC_WRITE -> WRITE sqid:28 cid:33 nsid:1 lba:0 len:2 SGL DATA BLOCK OFFSET 0x0 len:0x400 TAILQ_INSERT_TAIL...bdev_io_submit(bdev_io) spdk_bdev_io_get_thread(bdev_io) TAILQ_INSERT_TAIL...bdev_io_submit(bdev_io) bdev_io_do_submit(bdev_ch, bdev_io) spdk_likely(TAILQ_EMPTY...accel_sequence_get_task task->op_code = ACCEL_OPC_COPY TAILQ_INSERT_TAIL
领取专属 10元无门槛券
手把手带您无忧上云