New xdr_rdma.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 2006, The Ohio State University. All rights reserved.
28 *
29 * Portions of this source code is developed by the team members of
30 * The Ohio State University's Network-Based Computing Laboratory (NBCL),
31 * headed by Professor Dhabaleswar K. (DK) Panda.
32 *
33 * Acknowledgements to contributions from developors:
34 * Ranjit Noronha: noronha@cse.ohio-state.edu
35 * Lei Chai : chail@cse.ohio-state.edu
36 * Weikuan Yu : yuw@cse.ohio-state.edu
37 *
38 */
39
40 #pragma ident "@(#)xdr_rdma.c 1.4 05/06/08 SMI"
41
42 /*
43 * xdr_rdma.c, XDR implementation using RDMA to move large chunks
44 */
45
46 #include <sys/param.h>
47 #include <sys/types.h>
48 #include <sys/systm.h>
49 #include <sys/kmem.h>
50
51 #include <rpc/types.h>
52 #include <rpc/xdr.h>
53 #include <sys/cmn_err.h>
54 #include <rpc/rpc_sztypes.h>
55 #include <rpc/rpc_rdma.h>
56
57 static struct xdr_ops *xdrrdma_ops(void);
58
59 /*int rdma_xdr_long_reply_debug = 0x0;*/
60
61 /*
62 * A chunk list entry identifies a chunk
63 * of opaque data to be moved separately
64 * from the rest of the RPC message.
65 * xp_min_chunk = 0, is a special case for ENCODING, which means
66 * do not chunk the incoming stream of data.
67 */
68
69 struct private {
70 caddr_t xp_offp;
71 int xp_min_chunk;
72 uint_t xp_flags; /* Controls setting for rdma xdr */
73 int xp_buf_size; /* size of xdr buffer */
74 struct clist *xp_cl; /* head of chunk list */
75 struct clist **xp_cl_next; /* location to place/find next chunk */
76 CONN *xp_conn; /* connection for chunk data xfer */
77 };
78
79
80 /*
81 * The procedure xdrrdma_create initializes a stream descriptor for a
82 * memory buffer.
83 */
84 void
85 xdrrdma_create(XDR *xdrs, caddr_t addr, uint_t size,
86 int min_chunk, struct clist *cl, enum xdr_op op, CONN *conn)
87 {
88 struct private *xdrp;
89 struct clist *cle;
90
91 xdrs->x_op = op;
92 xdrs->x_ops = xdrrdma_ops();
93 xdrs->x_base = addr;
94 xdrs->x_handy = size;
95 xdrs->x_public = NULL;
96
97 xdrp = (struct private *)kmem_zalloc(sizeof (struct private), KM_SLEEP);
98 xdrs->x_private = (caddr_t)xdrp;
99 xdrp->xp_offp = addr;
100 xdrp->xp_min_chunk = min_chunk;
101 xdrp->xp_flags = 0;
102 xdrp->xp_buf_size = size;
103 xdrp->xp_cl = cl;
104 if (op == XDR_ENCODE && cl != NULL) {
105 /* Find last element in chunk list and set xp_cl_next */
106 for (cle = cl; cle->c_next != NULL; cle = cle->c_next);
107 xdrp->xp_cl_next = &(cle->c_next);
108 } else
109 xdrp->xp_cl_next = &(xdrp->xp_cl);
110 xdrp->xp_conn = conn;
111 if (xdrp->xp_min_chunk == 0)
112 xdrp->xp_flags |= RDMA_NOCHUNK;
113 }
114
115 /* ARGSUSED */
116 void
117 xdrrdma_destroy(XDR *xdrs)
118 {
119 (void) kmem_free(xdrs->x_private, sizeof (struct private));
120 }
121
122 struct clist *
123 xdrrdma_clist(XDR *xdrs) {
124 return (((struct private *)(xdrs->x_private))->xp_cl);
125 }
126
127 static bool_t
128 xdrrdma_getint32(XDR *xdrs, int32_t *int32p)
129 {
130 struct private *xdrp = (struct private *)(xdrs->x_private);
131
132 if ((xdrs->x_handy -= (int)sizeof (int32_t)) < 0)
133 return (FALSE);
134
135 /* LINTED pointer alignment */
136 *int32p = (int32_t)ntohl((uint32_t)(*((int32_t *)(xdrp->xp_offp))));
137 xdrp->xp_offp += sizeof (int32_t);
138
139 return (TRUE);
140 }
141
142 static bool_t
143 xdrrdma_putint32(XDR *xdrs, int32_t *int32p)
144 {
145 struct private *xdrp = (struct private *)(xdrs->x_private);
146
147 if ((xdrs->x_handy -= (int)sizeof (int32_t)) < 0)
148 return (FALSE);
149
150 /* LINTED pointer alignment */
151 *(int32_t *)xdrp->xp_offp = (int32_t)htonl((uint32_t)(*int32p));
152 xdrp->xp_offp += sizeof (int32_t);
153
154 return (TRUE);
155 }
156
157 /*
158 * DECODE some bytes from an XDR stream
159 */
160 static bool_t
161 xdrrdma_getbytes(XDR *xdrs, caddr_t addr, int len)
162 {
163 struct private *xdrp = (struct private *)(xdrs->x_private);
164 struct clist *cle = *(xdrp->xp_cl_next);
165 struct clist *cls = *(xdrp->xp_cl_next);
166 struct clist cl;
167 bool_t retval = TRUE;
168 uint32_t total_len=len;
169 uint32_t sum_len=0;
170 uint32_t total_segments=0;
171 uint32_t actual_segments=0;
172 uint32_t status;
173 uint32_t i;
174 uint32_t alen;
175 while(cle) {
176 total_segments++;
177 cle=cle->c_next;
178 }
179
180 cle = *(xdrp->xp_cl_next);
181 /*
182 * If there was a chunk at the current offset
183 * first record the destination address and length
184 * in the chunk list that came with the message, then
185 * RDMA READ the chunk data.
186 */
187 if (cle != NULL &&
188 cle->c_xdroff == (xdrp->xp_offp - xdrs->x_base)) {
189 for(actual_segments=0; actual_segments < total_segments; actual_segments++) {
190 if(total_len <= 0)
191 goto mem_sync;
192 cle->c_daddr = (uint64)(uintptr_t)addr + sum_len;
193 /*cle->c_len = len;*/
194 alen = 0;
195 if(cle->c_len > total_len) {
196 alen = cle->c_len;
197 cle->c_len = total_len;
198 }
199 if(!alen)
200 xdrp->xp_cl_next = &cle->c_next;
201
202
203 sum_len += cle->c_len;
204 total_len -= cle->c_len;
205
206 if((total_segments - actual_segments - 1) == 0 && total_len > 0 ){
207 cmn_err(CE_WARN,"Provided read chunks are too short\n");
208 retval = FALSE;
209 }
210
211 if((total_segments - actual_segments - 1) > 0 && total_len == 0 ){
212 #ifdef DEBUG
213 cmn_err(CE_NOTE,"Provided read chunks are too long [total=%d, actual=%d]\n",total_segments,actual_segments);
214 #endif
215 }
216 /*
217 * RDMA READ the chunk data from the remote end.
218 * First prep the destination buffer by registering
219 * it, then RDMA READ the chunk data. Since we are
220 * doing streaming memory, sync the destination buffer
221 * to CPU and deregister the buffer.
222 */
223 if (xdrp->xp_conn == NULL) {
224 return (FALSE);
225 }
226
227 cl = *cle;
228 cl.c_next = NULL;
229 if (clist_register(xdrp->xp_conn, &cl, 0) != RDMA_SUCCESS) {
230 return (FALSE);
231 }
232 cle->c_dmemhandle = cl.c_dmemhandle;
233 cle->c_dsynchandle = cl.c_dsynchandle;
234
235 /*
236 * Now read the chunk in
237 */
238 if((total_segments - actual_segments - 1) == 0 || total_len == 0){
239 status = RDMA_READ(xdrp->xp_conn, &cl, WAIT);
240 } else {
241 status = RDMA_READ(xdrp->xp_conn, &cl, NOWAIT);
242 }
243 if (status != RDMA_SUCCESS) {
244 #ifdef DEBUG
245 cmn_err(CE_WARN,
246 "xdrrdma_getbytes: RDMA_READ failed\n");
247 #endif
248 retval = FALSE;
249 goto out;
250 }
251 cle = cle->c_next;
252 }
253 mem_sync:
254 /*
255 * sync the memory for cpu
256 */
257 cle = cls;
258 cl = *cle;
259 cl.c_next = NULL;
260 cl.c_len = sum_len;
261 if (clist_syncmem(xdrp->xp_conn, &cl, 0) != RDMA_SUCCESS) {
262 retval = FALSE;
263 goto out;
264 }
265 out:
266 /*
267 * Deregister the chunks
268 */
269 cle = cls;
270 cl = *cle;
271 cl.c_next = NULL;
272 cl.c_len = sum_len;
273 (void) clist_deregister(xdrp->xp_conn, &cl, 0);
274 if(alen){
275 cle->c_saddr = (uint64)(uintptr_t)cle->c_saddr + cle->c_len;
276 cle->c_len = alen - cle->c_len;
277 }
278 return (retval);
279 }
280
281 if ((xdrs->x_handy -= len) < 0)
282 return (FALSE);
283
284 bcopy(xdrp->xp_offp, addr, len);
285 xdrp->xp_offp += len;
286
287 return (TRUE);
288 }
289
290 /*
291 * ENCODE some bytes into an XDR stream
292 * xp_min_chunk = 0, means the stream of bytes contain no chunks
293 * to seperate out, and if the bytes do not fit in the supplied
294 * buffer, grow the buffer and free the old buffer.
295 */
296 static bool_t
297 xdrrdma_putbytes(XDR *xdrs, caddr_t addr, int len)
298 {
299 struct private *xdrp = (struct private *)(xdrs->x_private);
300 struct clist *clzero = xdrp->xp_cl;
301
302 /*
303 * If this chunk meets the minimum chunk size
304 * then don't encode it. Just record its address
305 * and length in a chunk list entry so that it
306 * can be moved separately via RDMA.
307 */
308 if (!(xdrp->xp_flags & RDMA_NOCHUNK) && xdrp->xp_min_chunk != 0 &&
309 len >= xdrp->xp_min_chunk) {
310 struct clist *cle;
311 int offset = xdrp->xp_offp - xdrs->x_base;
312
313 cle = (struct clist *)kmem_zalloc(sizeof (struct clist),
314 KM_SLEEP);
315 cle->c_xdroff = offset;
316 cle->c_len = len;
317 cle->c_saddr = (uint64)(uintptr_t)addr;
318 cle->c_next = NULL;
319
320 *(xdrp->xp_cl_next) = cle;
321 xdrp->xp_cl_next = &(cle->c_next);
322
323 return (TRUE);
324 }
325
326 if ((xdrs->x_handy -= len) < 0) {
327 if (xdrp->xp_min_chunk == 0) {
328 int newbuflen, encodelen;
329 caddr_t newbuf;
330
331 xdrs->x_handy += len;
332 encodelen = xdrp->xp_offp - xdrs->x_base;
333 newbuflen = xdrp->xp_buf_size + len;
334 newbuf = kmem_zalloc(newbuflen, KM_SLEEP);
335 bcopy(xdrs->x_base, newbuf, encodelen);
336 (void) kmem_free(xdrs->x_base, xdrp->xp_buf_size);
337 xdrs->x_base = newbuf;
338 xdrp->xp_offp = newbuf + encodelen;
339 xdrp->xp_buf_size = newbuflen;
340 if (xdrp->xp_min_chunk == 0 && clzero->c_xdroff == 0) {
341 clzero->c_len = newbuflen;
342 clzero->c_saddr = (uint64)(uintptr_t)newbuf;
343 }
344 } else
345 return (FALSE);
346 }
347
348 bcopy(addr, xdrp->xp_offp, len);
349 xdrp->xp_offp += len;
350
351 return (TRUE);
352 }
353
354 uint_t
355 xdrrdma_getpos(XDR *xdrs)
356 {
357 struct private *xdrp = (struct private *)(xdrs->x_private);
358
359 return ((uint_t)((uintptr_t)xdrp->xp_offp - (uintptr_t)xdrs->x_base));
360 }
361
362 bool_t
363 xdrrdma_setpos(XDR *xdrs, uint_t pos)
364 {
365 struct private *xdrp = (struct private *)(xdrs->x_private);
366
367 caddr_t newaddr = xdrs->x_base + pos;
368 caddr_t lastaddr = xdrp->xp_offp + xdrs->x_handy;
369 ptrdiff_t diff;
370
371 if (newaddr > lastaddr)
372 return (FALSE);
373
374 xdrp->xp_offp = newaddr;
375 diff = lastaddr - newaddr;
376 xdrs->x_handy = (int)diff;
377
378 return (TRUE);
379 }
380
381 /* ARGSUSED */
382 static rpc_inline_t *
383 xdrrdma_inline(XDR *xdrs, int len)
384 {
385 rpc_inline_t *buf = NULL;
386 struct private *xdrp = (struct private *)(xdrs->x_private);
387 struct clist *cle = *(xdrp->xp_cl_next);
388
389 if (xdrs->x_op == XDR_DECODE) {
390 /*
391 * Since chunks aren't in-line, check to see whether
392 * there is a chunk in the inline range.
393 */
394 if (cle != NULL &&
395 cle->c_xdroff <= (xdrp->xp_offp - xdrs->x_base + len))
396 return (NULL);
397 }
398
399 if ((xdrs->x_handy < len) || (xdrp->xp_min_chunk != 0 &&
400 len >= xdrp->xp_min_chunk)) {
401 return (NULL);
402 } else {
403 xdrs->x_handy -= len;
404 /* LINTED pointer alignment */
405 buf = (rpc_inline_t *)xdrp->xp_offp;
406 xdrp->xp_offp += len;
407 return (buf);
408 }
409 }
410
411 static bool_t
412 xdrrdma_control(XDR *xdrs, int request, void *info)
413 {
414 int32_t *int32p;
415 int len;
416 uint_t in_flags;
417 struct private *xdrp = (struct private *)(xdrs->x_private);
418
419 switch (request) {
420 case XDR_PEEK:
421 /*
422 * Return the next 4 byte unit in the XDR stream.
423 */
424 if (xdrs->x_handy < sizeof (int32_t))
425 return (FALSE);
426
427 int32p = (int32_t *)info;
428 *int32p = (int32_t)ntohl((uint32_t)
429 (*((int32_t *)(xdrp->xp_offp))));
430
431 return (TRUE);
432
433 case XDR_SKIPBYTES:
434 /*
435 * Skip the next N bytes in the XDR stream.
436 */
437 int32p = (int32_t *)info;
438 len = RNDUP((int)(*int32p));
439 if ((xdrs->x_handy -= len) < 0)
440 return (FALSE);
441 xdrp->xp_offp += len;
442
443 return (TRUE);
444
445 case XDR_RDMASET:
446 /*
447 * Set the flags provided in the *info in xp_flags for rdma xdr
448 * stream control.
449 */
450 int32p = (int32_t *)info;
451 in_flags = (uint_t)(*int32p);
452
453 xdrp->xp_flags |= in_flags;
454 return (TRUE);
455
456 case XDR_RDMAGET:
457 /*
458 * Get the flags provided in xp_flags return through *info
459 */
460 int32p = (int32_t *)info;
461
462 *int32p = (int32_t)xdrp->xp_flags;
463 return (TRUE);
464
465 default:
466 return (FALSE);
467 }
468 }
469
470 static struct xdr_ops *
471 xdrrdma_ops(void)
472 {
473 static struct xdr_ops ops;
474
475 if (ops.x_getint32 == NULL) {
476 ops.x_getbytes = xdrrdma_getbytes;
477 ops.x_putbytes = xdrrdma_putbytes;
478 ops.x_getpostn = xdrrdma_getpos;
479 ops.x_setpostn = xdrrdma_setpos;
480 ops.x_inline = xdrrdma_inline;
481 ops.x_destroy = xdrrdma_destroy;
482 ops.x_control = xdrrdma_control;
483 ops.x_getint32 = xdrrdma_getint32;
484 ops.x_putint32 = xdrrdma_putint32;
485 }
486 return (&ops);
487 }
488
489 /*
490 * Not all fields in struct clist are interesting to the
491 * RPC over RDMA protocol. Only XDR the interesting fields.
492 */
493 bool_t
494 xdr_clist(XDR *xdrs, clist *objp)
495 {
496
497 if (!xdr_uint32(xdrs, &objp->c_xdroff))
498 return (FALSE);
499 if (!xdr_uint32(xdrs, &objp->c_smemhandle.mrc_rmr))
500 return (FALSE);
501 if (!xdr_uint32(xdrs, &objp->c_len))
502 return (FALSE);
503 if (!xdr_uint64(xdrs, &objp->c_saddr))
504 return (FALSE);
505 if (!xdr_pointer(xdrs, (char **)&objp->c_next, sizeof (clist),
506 (xdrproc_t)xdr_clist))
507 return (FALSE);
508 return (TRUE);
509 }
510
511 bool_t
512 xdr_do_clist(XDR *xdrs, clist **clp)
513 {
514 return (xdr_pointer(xdrs, (char **)clp,
515 sizeof (clist), (xdrproc_t)xdr_clist));
516 }
517
518 uint_t
519 xdr_getbufsize(XDR *xdrs)
520 {
521 struct private *xdrp = (struct private *)(xdrs->x_private);
522
523 return ((uint_t)xdrp->xp_buf_size);
524 }
525
526 bool_t
527 xdr_encode_wlist(XDR *xdrs, clist *w, uint_t num_segment)
528 {
529 bool_t vfalse = FALSE, vtrue = TRUE;
530 int i;
531
532 /* does a wlist exist? */
533 if (w == NULL) {
534 return (xdr_bool(xdrs, &vfalse));
535 }
536
537 /* Encode N consecutive segments, 1, N, HLOO, ..., HLOO, 0 */
538 if (! xdr_bool(xdrs, &vtrue))
539 return (FALSE);
540
541 if (! xdr_uint32(xdrs, &num_segment))
542 return (FALSE);
543 for(i=0; i<num_segment; i++){
544 if (! xdr_uint32(xdrs, &w->c_dmemhandle.mrc_rmr))
545 return (FALSE);
546
547 if (! xdr_uint32(xdrs, &w->c_len))
548 return (FALSE);
549
550 if (! xdr_uint64(xdrs, &w->c_daddr))
551 return (FALSE);
552
553 w = w->c_next;
554 }
555 if (!xdr_bool(xdrs, &vfalse))
556 return (FALSE);
557
558 return (TRUE);
559 }
560
561 bool_t
562 xdr_decode_wlist(XDR *xdrs, struct clist **w, bool_t *wlist_exists)
563 {
564 struct clist *tmp;
565 bool_t more = FALSE;
566 uint32_t seg_array_len;
567 uint32_t i;
568
569 if (! xdr_bool(xdrs, &more))
570 return (FALSE);
571
572 /* is there a wlist? */
573 if (more == FALSE) {
574 *wlist_exists = FALSE;
575 return (TRUE);
576 }
577
578 *wlist_exists = TRUE;
579
580 if (! xdr_uint32(xdrs, &seg_array_len))
581 return (FALSE);
582
583 tmp = *w = (struct clist *)kmem_zalloc(sizeof (struct clist),
584 KM_SLEEP);
585 /* *w = empty_cl; */
586 for (i = 0; i < seg_array_len; i++) {
587 if (! xdr_uint32(xdrs, &tmp->c_dmemhandle.mrc_rmr))
588 return (FALSE);
589 if (! xdr_uint32(xdrs, &tmp->c_len))
590 return (FALSE);
591 if (! xdr_uint64(xdrs, &tmp->c_daddr))
592 return (FALSE);
593 if (i < seg_array_len - 1) {
594 tmp->c_next = (struct clist *)
595 mem_alloc(sizeof(struct clist));
596 tmp = tmp->c_next;
597 } else {
598 tmp->c_next = NULL;
599 }
600 }
601
602 more = FALSE;
603 if (!xdr_bool(xdrs, &more))
604 return (FALSE);
605
606 return (TRUE);
607 }
608
609 bool_t
610 xdr_decode_wlist_new(XDR *xdrs, struct clist **wclp, bool_t *wwl,
611 uint32_t *total_length,CONN *conn)
612 {
613 struct clist *first, *prev, *ncl;
614 char *memp;
615 #ifdef SERVER_REG_CACHE
616 /*struct private *xdrp ; = (struct private *)(xdrs->x_private)*/
617 rib_lrc_entry_t *long_reply_buf = NULL;
618 #endif
619 uint32_t num_wclist;
620 uint32_t wcl_length = 0;
621 uint32_t i;
622 bool_t more = FALSE;
623
624 *wclp = NULL;
625 *wwl = FALSE;
626 *total_length=0;
627
628 if (! xdr_bool(xdrs, &more)) {
629 return (FALSE);
630 }
631
632 if (more == FALSE) {
633 return (TRUE);
634 }
635
636 *wwl = TRUE;
637 if (! xdr_uint32(xdrs, &num_wclist)) {
638 cmn_err(CE_NOTE, "Error interpretting list length");
639 return (FALSE);
640 }
641
642 first = prev = ncl = (struct clist *)
643 kmem_zalloc(num_wclist*sizeof(struct clist), KM_SLEEP);
644
645 if (!first) {
646 cmn_err(CE_NOTE, "Not able to allocate memory");
647 return (FALSE);
648 }
649
650 more = TRUE;
651 for (i = 0; i < num_wclist; i++) {
652 if (! xdr_uint32(xdrs, &ncl->c_dmemhandle.mrc_rmr))
653 return (FALSE);
654 if (! xdr_uint32(xdrs, &ncl->c_len))
655 return (FALSE);
656 if (! xdr_uint64(xdrs, &ncl->c_daddr))
657 return (FALSE);
658
659 if (ncl->c_len > MAX_SVC_XFER_SIZE) {
660 cmn_err(CE_NOTE, "write chunk length too big");
661 ncl->c_len = MAX_SVC_XFER_SIZE;
662 }
663 if (i > 0) {
664 prev->c_next = ncl;
665 }
666 wcl_length += ncl->c_len;
667 prev = ncl;
668 ncl ++ ;
669 }
670
671 more = FALSE;
672 if (!xdr_bool(xdrs, &more))
673 return (FALSE);
674
675 #ifdef SERVER_REG_CACHE
676 long_reply_buf = RDMA_GET_SERVER_CACHE_BUF(conn,wcl_length*sizeof(char));
677 first->long_reply_buf = (uint64)long_reply_buf;
678 memp = long_reply_buf->lrc_buf;
679 #else
680 memp = (char *) kmem_alloc(wcl_length*sizeof(char), KM_SLEEP);
681 #endif
682 if (!memp) {
683 cmn_err(CE_NOTE, "Not able to allocate memory for chunks");
684 kmem_free((void*) first, num_wclist*sizeof(struct clist));
685 return (FALSE);
686 }
687 ncl = first;
688 for (i = 0; i < num_wclist; i++) {
689 #ifdef SERVER_REG_CACHE
690 ncl->long_reply_buf = (uint64)long_reply_buf;
691 #endif
692 ncl->c_saddr = (uint64_t) memp;
693 memp += ncl->c_len;
694 ncl++;
695 }
696
697 *wclp = first;
698 *total_length = wcl_length;
699 return (TRUE);
700 }
701
702 /*
703 * XDR decode the long reply write chunk.
704 */
705 bool_t
706 xdr_decode_reply_wchunk(XDR *xdrs, struct clist **clist,CONN *conn)
707 {
708 uint32_t mem_handle = 0;
709 uint32_t length = 0;
710 uint64 offset = 0;
711 bool_t have_rchunk = FALSE;
712 uint32_t seg_array_len = 0;
713 struct clist *first = NULL, *prev = NULL, *ncl = NULL;
714 char *memp;
715 uint32_t num_wclist;
716 uint32_t wcl_length = 0;
717 uint32_t i;
718 rdma_buf_t long_rpc = {0};
719
720 if (!xdr_bool(xdrs, &have_rchunk))
721 return (FALSE);
722
723 if (have_rchunk == FALSE)
724 return (TRUE);
725
726 if (! xdr_uint32(xdrs, &num_wclist)) {
727 cmn_err(CE_NOTE, "Error interpretting list length");
728 return (FALSE);
729 }
730 if (num_wclist == 0) {
731 return (FALSE);
732 }
733
734 first = prev = ncl = (struct clist *)
735 kmem_zalloc(num_wclist*sizeof(struct clist), KM_SLEEP);
736 if (!first) {
737 cmn_err(CE_NOTE, "Not able to allocate memory");
738 return (FALSE);
739 }
740
741 for (i = 0; i < num_wclist; i++) {
742 if (! xdr_uint32(xdrs, &ncl->c_dmemhandle.mrc_rmr))
743 return (FALSE);
744 if (! xdr_uint32(xdrs, &ncl->c_len))
745 return (FALSE);
746 if (! xdr_uint64(xdrs, &ncl->c_daddr))
747 return (FALSE);
748
749 if (ncl->c_len > MAX_SVC_XFER_SIZE) {
750 cmn_err(CE_NOTE, "reply chunk length too big");
751 ncl->c_len = MAX_SVC_XFER_SIZE;
752 }
753 if(!(ncl->c_dmemhandle.mrc_rmr && (ncl->c_len > 0) && ncl->c_daddr))
754 cmn_err(CE_WARN,"Client sent invalid segment address\n");
755 if (i > 0) {
756 prev->c_next = ncl;
757 }
758 wcl_length += ncl->c_len;
759 prev = ncl;
760 ncl ++ ;
761 }
762 if(num_wclist){
763 long_rpc.type = CHUNK_BUFFER;
764 #ifdef SERVER_REG_CACHE
765 long_rpc.long_reply_buf = RDMA_GET_SERVER_CACHE_BUF(conn,wcl_length);
766 memp = long_rpc.addr = long_rpc.long_reply_buf->lrc_buf;
767 #else
768 memp = long_rpc.addr = kmem_zalloc(wcl_length, KM_SLEEP);
769 #endif
770 ncl = first;
771
772 for (i = 0; i < num_wclist; i++) {
773 #ifdef SERVER_REG_CACHE
774 ncl->long_reply_buf = (uint64)long_rpc.long_reply_buf;
775 #endif
776 ncl->c_saddr = (uint64_t) memp;
777 memp += ncl->c_len;
778 ncl++;
779 }
780 }
781 *clist=first;
782 return (TRUE);
783 }
784
785 bool_t
786 xdr_encode_reply_wchunk(XDR *xdrs, struct clist *lrc_entry,uint32_t seg_array_len)
787 {
788 int i;
789 bool_t long_reply_exists = TRUE;
790 uint32_t length ;
791 uint64 offset ;
792 if(seg_array_len>0){
793 if (!xdr_bool(xdrs, &long_reply_exists))
794 return (FALSE);
795 if (!xdr_uint32(xdrs, &seg_array_len))
796 return (FALSE);
797
798 for(i=0;i<seg_array_len;i++){
799 if(!lrc_entry)
800 return FALSE;
801 length = lrc_entry->c_len;
802 offset = (uint64)lrc_entry->c_daddr;
803
804 if (!xdr_uint32(xdrs, &lrc_entry->c_dmemhandle.mrc_rmr))
805 return (FALSE);
806 if (!xdr_uint32(xdrs, &length))
807 return (FALSE);
808 if (!xdr_uint64(xdrs, &offset))
809 return (FALSE);
810 lrc_entry = lrc_entry->c_next;
811 }
812 } else {
813 long_reply_exists = FALSE;
814 if(!xdr_bool(xdrs, &long_reply_exists))
815 return (FALSE);
816 }
817 return (TRUE);
818 }