2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * internal queue handling
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifndef __IPZ_PT_FN_H__
44 #define __IPZ_PT_FN_H__
46 #define EHCA_PAGESHIFT 12
47 #define EHCA_PAGESIZE 4096UL
48 #define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
49 #define EHCA_PT_ENTRIES 512UL
51 #include "ehca_tools.h"
55 struct ipz_small_queue_page;
57 /* struct generic ehca page */
59 u8 entries[EHCA_PAGESIZE];
62 #define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
64 struct ipz_small_queue_page {
66 unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
70 struct list_head list;
73 /* struct generic queue in linux kernel virtual memory (kv) */
75 u64 current_q_offset; /* current queue entry */
77 struct ipz_page **queue_pages; /* array of pages belonging to queue */
78 u32 qe_size; /* queue entry size */
80 u32 queue_length; /* queue length allocated in bytes */
82 u32 toggle_state; /* toggle flag - per page */
83 u32 offset; /* save offset within page for small_qp */
84 struct ipz_small_queue_page *small_page;
88 * return current Queue Entry for a certain q_offset
89 * returns address (kv) of Queue Entry
91 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
93 struct ipz_page *current_page;
94 if (q_offset >= queue->queue_length)
96 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
97 return ¤t_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
101 * return current Queue Entry
102 * returns address (kv) of Queue Entry
104 static inline void *ipz_qeit_get(struct ipz_queue *queue)
106 return ipz_qeit_calc(queue, queue->current_q_offset);
110 * return current Queue Page , increment Queue Page iterator from
111 * page to page in struct ipz_queue, last increment will return 0! and
113 * returns address (kv) of Queue Page
114 * warning don't use in parallel with ipz_QE_get_inc()
116 void *ipz_qpageit_get_inc(struct ipz_queue *queue);
119 * return current Queue Entry, increment Queue Entry iterator by one
120 * step in struct ipz_queue, will wrap in ringbuffer
121 * returns address (kv) of Queue Entry BEFORE increment
122 * warning don't use in parallel with ipz_qpageit_get_inc()
124 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
126 void *ret = ipz_qeit_get(queue);
127 queue->current_q_offset += queue->qe_size;
128 if (queue->current_q_offset >= queue->queue_length) {
129 queue->current_q_offset = 0;
130 /* toggle the valid flag */
131 queue->toggle_state = (~queue->toggle_state) & 1;
138 * return a bool indicating whether current Queue Entry is valid
140 static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
142 struct ehca_cqe *cqe = ipz_qeit_get(queue);
143 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
147 * return current Queue Entry, increment Queue Entry iterator by one
148 * step in struct ipz_queue, will wrap in ringbuffer
149 * returns address (kv) of Queue Entry BEFORE increment
150 * returns 0 and does not increment, if wrong valid state
151 * warning don't use in parallel with ipz_qpageit_get_inc()
153 static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
155 return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
159 * returns and resets Queue Entry iterator
160 * returns address (kv) of first Queue Entry
162 static inline void *ipz_qeit_reset(struct ipz_queue *queue)
164 queue->current_q_offset = 0;
165 return ipz_qeit_get(queue);
169 * return the q_offset corresponding to an absolute address
171 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
174 * return the next queue offset. don't modify the queue.
176 static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
178 offset += queue->qe_size;
179 if (offset >= queue->queue_length) offset = 0;
183 /* struct generic page table */
185 u64 entries[EHCA_PT_ENTRIES];
188 /* struct page table for a queue, only to be used in pf */
190 /* queue page tables (kv), use u64 because we know the element length */
193 u32 n_ptes; /* number of page table entries */
194 u64 *current_pte_addr;
198 * constructor for a ipz_queue_t, placement new for ipz_queue_t,
199 * new for all dependent datastructors
200 * all QP Tables are the same
204 * returns true if ok, false if out of memory
206 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
207 const u32 nr_of_pages, const u32 pagesize,
208 const u32 qe_size, const u32 nr_of_sg,
212 * destructor for a ipz_queue_t
214 * see ipz_queue_ctor()
215 * returns true if ok, false if queue was NULL-ptr of free failed
217 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
220 * constructor for a ipz_qpt_t,
221 * placement new for struct ipz_queue, new for all dependent datastructors
222 * all QP Tables are the same,
224 * -# allocate+pin queue
226 * -# allocate+pin PTs
227 * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
228 * -# the ring must have room for exactly nr_of_PTEs
231 void ipz_qpt_ctor(struct ipz_qpt *qpt,
235 const u8 lowbyte, const u8 toggle,
236 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
239 * return current Queue Entry, increment Queue Entry iterator by one
240 * step in struct ipz_queue, will wrap in ringbuffer
241 * returns address (kv) of Queue Entry BEFORE increment
242 * warning don't use in parallel with ipz_qpageit_get_inc()
243 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
244 * fix EQ page problems
246 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
249 * return current Event Queue Entry, increment Queue Entry iterator
250 * by one step in struct ipz_queue if valid, will wrap in ringbuffer
251 * returns address (kv) of Queue Entry BEFORE increment
252 * returns 0 and does not increment, if wrong valid state
253 * warning don't use in parallel with ipz_queue_QPageit_get_inc()
254 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
256 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
258 void *ret = ipz_qeit_get(queue);
260 if ((qe >> 7) != (queue->toggle_state & 1))
262 ipz_qeit_eq_get_inc(queue); /* this is a good one */
266 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
268 void *ret = ipz_qeit_get(queue);
270 if ((qe >> 7) != (queue->toggle_state & 1))
275 /* returns address (GX) of first queue entry */
276 static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
278 return be64_to_cpu(qpt->qpts[0]);
281 /* returns address (kv) of first page of queue page table */
282 static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
287 #endif /* __IPZ_PT_FN_H__ */