7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "@(#)xdr_rdma.c 1.4 05/06/08 SMI"
28
29 /*
30 * xdr_rdma.c, XDR implementation using RDMA to move large chunks
31 */
32
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kmem.h>
37
38 #include <rpc/types.h>
39 #include <rpc/xdr.h>
40 #include <sys/cmn_err.h>
41 #include <rpc/rpc_sztypes.h>
42 #include <rpc/rpc_rdma.h>
43
44 static struct xdr_ops *xdrrdma_ops(void);
45
46 /*
47 * A chunk list entry identifies a chunk
48 * of opaque data to be moved separately
49 * from the rest of the RPC message.
50 * xp_min_chunk = 0, is a special case for ENCODING, which means
51 * do not chunk the incoming stream of data.
52 */
53
54 struct private {
55 caddr_t xp_offp;
56 int xp_min_chunk;
57 uint_t xp_flags; /* Controls setting for rdma xdr */
58 int xp_buf_size; /* size of xdr buffer */
59 struct clist *xp_cl; /* head of chunk list */
60 struct clist **xp_cl_next; /* location to place/find next chunk */
61 CONN *xp_conn; /* connection for chunk data xfer */
62 };
63
64
65 /*
130 struct private *xdrp = (struct private *)(xdrs->x_private);
131
132 if ((xdrs->x_handy -= (int)sizeof (int32_t)) < 0)
133 return (FALSE);
134
135 /* LINTED pointer alignment */
136 *(int32_t *)xdrp->xp_offp = (int32_t)htonl((uint32_t)(*int32p));
137 xdrp->xp_offp += sizeof (int32_t);
138
139 return (TRUE);
140 }
141
142 /*
143 * DECODE some bytes from an XDR stream
144 */
145 static bool_t
146 xdrrdma_getbytes(XDR *xdrs, caddr_t addr, int len)
147 {
148 struct private *xdrp = (struct private *)(xdrs->x_private);
149 struct clist *cle = *(xdrp->xp_cl_next);
150 struct clist cl;
151 bool_t retval = TRUE;
152
153 /*
154 * If there was a chunk at the current offset
155 * first record the destination address and length
156 * in the chunk list that came with the message, then
157 * RDMA READ the chunk data.
158 */
159 if (cle != NULL &&
160 cle->c_xdroff == (xdrp->xp_offp - xdrs->x_base)) {
161 cle->c_daddr = (uint64)(uintptr_t)addr;
162 cle->c_len = len;
163 xdrp->xp_cl_next = &cle->c_next;
164
165 /*
166 * RDMA READ the chunk data from the remote end.
167 * First prep the destination buffer by registering
168 * it, then RDMA READ the chunk data. Since we are
169 * doing streaming memory, sync the destination buffer
170 * to CPU and deregister the buffer.
171 */
172 if (xdrp->xp_conn == NULL) {
173 return (FALSE);
174 }
175
176 cl = *cle;
177 cl.c_next = NULL;
178 if (clist_register(xdrp->xp_conn, &cl, 0) != RDMA_SUCCESS) {
179 return (FALSE);
180 }
181
182 /*
183 * Now read the chunk in
184 */
185 if (RDMA_READ(xdrp->xp_conn, &cl, WAIT) != RDMA_SUCCESS) {
186 #ifdef DEBUG
187 cmn_err(CE_WARN,
188 "xdrrdma_getbytes: RDMA_READ failed\n");
189 #endif
190 retval = FALSE;
191 goto out;
192 }
193 /*
194 * sync the memory for cpu
195 */
196 if (clist_syncmem(xdrp->xp_conn, &cl, 0) != RDMA_SUCCESS) {
197 retval = FALSE;
198 goto out;
199 }
200
201 out:
202 /*
203 * Deregister the chunks
204 */
205 (void) clist_deregister(xdrp->xp_conn, &cl, 0);
206 return (retval);
207 }
208
209 if ((xdrs->x_handy -= len) < 0)
210 return (FALSE);
211
212 bcopy(xdrp->xp_offp, addr, len);
213 xdrp->xp_offp += len;
214
215 return (TRUE);
216 }
217
218 /*
219 * ENCODE some bytes into an XDR stream
220 * xp_min_chunk = 0, means the stream of bytes contain no chunks
221 * to seperate out, and if the bytes do not fit in the supplied
222 * buffer, grow the buffer and free the old buffer.
223 */
224 static bool_t
225 xdrrdma_putbytes(XDR *xdrs, caddr_t addr, int len)
407 ops.x_setpostn = xdrrdma_setpos;
408 ops.x_inline = xdrrdma_inline;
409 ops.x_destroy = xdrrdma_destroy;
410 ops.x_control = xdrrdma_control;
411 ops.x_getint32 = xdrrdma_getint32;
412 ops.x_putint32 = xdrrdma_putint32;
413 }
414 return (&ops);
415 }
416
417 /*
418 * Not all fields in struct clist are interesting to the
419 * RPC over RDMA protocol. Only XDR the interesting fields.
420 */
421 bool_t
422 xdr_clist(XDR *xdrs, clist *objp)
423 {
424
425 if (!xdr_uint32(xdrs, &objp->c_xdroff))
426 return (FALSE);
427 if (!xdr_uint32(xdrs, &objp->c_len))
428 return (FALSE);
429 if (!xdr_uint32(xdrs, &objp->c_smemhandle.mrc_rmr))
430 return (FALSE);
431 if (!xdr_uint64(xdrs, &objp->c_saddr))
432 return (FALSE);
433 if (!xdr_pointer(xdrs, (char **)&objp->c_next, sizeof (clist),
434 (xdrproc_t)xdr_clist))
435 return (FALSE);
436 return (TRUE);
437 }
438
439 bool_t
440 xdr_do_clist(XDR *xdrs, clist **clp)
441 {
442 return (xdr_pointer(xdrs, (char **)clp,
443 sizeof (clist), (xdrproc_t)xdr_clist));
444 }
445
446 uint_t
447 xdr_getbufsize(XDR *xdrs)
448 {
449 struct private *xdrp = (struct private *)(xdrs->x_private);
450
451 return ((uint_t)xdrp->xp_buf_size);
452 }
|
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 2006, The Ohio State University. All rights reserved.
28 *
29 * Portions of this source code is developed by the team members of
30 * The Ohio State University's Network-Based Computing Laboratory (NBCL),
31 * headed by Professor Dhabaleswar K. (DK) Panda.
32 *
33 * Acknowledgements to contributions from developors:
34 * Ranjit Noronha: noronha@cse.ohio-state.edu
35 * Lei Chai : chail@cse.ohio-state.edu
36 * Weikuan Yu : yuw@cse.ohio-state.edu
37 *
38 */
39
40 #pragma ident "@(#)xdr_rdma.c 1.4 05/06/08 SMI"
41
42 /*
43 * xdr_rdma.c, XDR implementation using RDMA to move large chunks
44 */
45
46 #include <sys/param.h>
47 #include <sys/types.h>
48 #include <sys/systm.h>
49 #include <sys/kmem.h>
50
51 #include <rpc/types.h>
52 #include <rpc/xdr.h>
53 #include <sys/cmn_err.h>
54 #include <rpc/rpc_sztypes.h>
55 #include <rpc/rpc_rdma.h>
56
57 static struct xdr_ops *xdrrdma_ops(void);
58
59 /*int rdma_xdr_long_reply_debug = 0x0;*/
60
61 /*
62 * A chunk list entry identifies a chunk
63 * of opaque data to be moved separately
64 * from the rest of the RPC message.
65 * xp_min_chunk = 0, is a special case for ENCODING, which means
66 * do not chunk the incoming stream of data.
67 */
68
69 struct private {
70 caddr_t xp_offp;
71 int xp_min_chunk;
72 uint_t xp_flags; /* Controls setting for rdma xdr */
73 int xp_buf_size; /* size of xdr buffer */
74 struct clist *xp_cl; /* head of chunk list */
75 struct clist **xp_cl_next; /* location to place/find next chunk */
76 CONN *xp_conn; /* connection for chunk data xfer */
77 };
78
79
80 /*
145 struct private *xdrp = (struct private *)(xdrs->x_private);
146
147 if ((xdrs->x_handy -= (int)sizeof (int32_t)) < 0)
148 return (FALSE);
149
150 /* LINTED pointer alignment */
151 *(int32_t *)xdrp->xp_offp = (int32_t)htonl((uint32_t)(*int32p));
152 xdrp->xp_offp += sizeof (int32_t);
153
154 return (TRUE);
155 }
156
157 /*
158 * DECODE some bytes from an XDR stream
159 */
160 static bool_t
161 xdrrdma_getbytes(XDR *xdrs, caddr_t addr, int len)
162 {
163 struct private *xdrp = (struct private *)(xdrs->x_private);
164 struct clist *cle = *(xdrp->xp_cl_next);
165 struct clist *cls = *(xdrp->xp_cl_next);
166 struct clist cl;
167 bool_t retval = TRUE;
168 uint32_t total_len=len;
169 uint32_t sum_len=0;
170 uint32_t total_segments=0;
171 uint32_t actual_segments=0;
172 uint32_t status;
173 uint32_t i;
174 uint32_t alen;
175 while(cle) {
176 total_segments++;
177 cle=cle->c_next;
178 }
179
180 cle = *(xdrp->xp_cl_next);
181 /*
182 * If there was a chunk at the current offset
183 * first record the destination address and length
184 * in the chunk list that came with the message, then
185 * RDMA READ the chunk data.
186 */
187 if (cle != NULL &&
188 cle->c_xdroff == (xdrp->xp_offp - xdrs->x_base)) {
189 for(actual_segments=0; actual_segments < total_segments; actual_segments++) {
190 if(total_len <= 0)
191 goto mem_sync;
192 cle->c_daddr = (uint64)(uintptr_t)addr + sum_len;
193 /*cle->c_len = len;*/
194 alen = 0;
195 if(cle->c_len > total_len) {
196 alen = cle->c_len;
197 cle->c_len = total_len;
198 }
199 if(!alen)
200 xdrp->xp_cl_next = &cle->c_next;
201
202
203 sum_len += cle->c_len;
204 total_len -= cle->c_len;
205
206 if((total_segments - actual_segments - 1) == 0 && total_len > 0 ){
207 cmn_err(CE_WARN,"Provided read chunks are too short\n");
208 retval = FALSE;
209 }
210
211 if((total_segments - actual_segments - 1) > 0 && total_len == 0 ){
212 #ifdef DEBUG
213 cmn_err(CE_NOTE,"Provided read chunks are too long [total=%d, actual=%d]\n",total_segments,actual_segments);
214 #endif
215 }
216 /*
217 * RDMA READ the chunk data from the remote end.
218 * First prep the destination buffer by registering
219 * it, then RDMA READ the chunk data. Since we are
220 * doing streaming memory, sync the destination buffer
221 * to CPU and deregister the buffer.
222 */
223 if (xdrp->xp_conn == NULL) {
224 return (FALSE);
225 }
226
227 cl = *cle;
228 cl.c_next = NULL;
229 if (clist_register(xdrp->xp_conn, &cl, 0) != RDMA_SUCCESS) {
230 return (FALSE);
231 }
232 cle->c_dmemhandle = cl.c_dmemhandle;
233 cle->c_dsynchandle = cl.c_dsynchandle;
234
235 /*
236 * Now read the chunk in
237 */
238 if((total_segments - actual_segments - 1) == 0 || total_len == 0){
239 status = RDMA_READ(xdrp->xp_conn, &cl, WAIT);
240 } else {
241 status = RDMA_READ(xdrp->xp_conn, &cl, NOWAIT);
242 }
243 if (status != RDMA_SUCCESS) {
244 #ifdef DEBUG
245 cmn_err(CE_WARN,
246 "xdrrdma_getbytes: RDMA_READ failed\n");
247 #endif
248 retval = FALSE;
249 goto out;
250 }
251 cle = cle->c_next;
252 }
253 mem_sync:
254 /*
255 * sync the memory for cpu
256 */
257 cle = cls;
258 cl = *cle;
259 cl.c_next = NULL;
260 cl.c_len = sum_len;
261 if (clist_syncmem(xdrp->xp_conn, &cl, 0) != RDMA_SUCCESS) {
262 retval = FALSE;
263 goto out;
264 }
265 out:
266 /*
267 * Deregister the chunks
268 */
269 cle = cls;
270 cl = *cle;
271 cl.c_next = NULL;
272 cl.c_len = sum_len;
273 (void) clist_deregister(xdrp->xp_conn, &cl, 0);
274 if(alen){
275 cle->c_saddr = (uint64)(uintptr_t)cle->c_saddr + cle->c_len;
276 cle->c_len = alen - cle->c_len;
277 }
278 return (retval);
279 }
280
281 if ((xdrs->x_handy -= len) < 0)
282 return (FALSE);
283
284 bcopy(xdrp->xp_offp, addr, len);
285 xdrp->xp_offp += len;
286
287 return (TRUE);
288 }
289
290 /*
291 * ENCODE some bytes into an XDR stream
292 * xp_min_chunk = 0, means the stream of bytes contain no chunks
293 * to seperate out, and if the bytes do not fit in the supplied
294 * buffer, grow the buffer and free the old buffer.
295 */
296 static bool_t
297 xdrrdma_putbytes(XDR *xdrs, caddr_t addr, int len)
479 ops.x_setpostn = xdrrdma_setpos;
480 ops.x_inline = xdrrdma_inline;
481 ops.x_destroy = xdrrdma_destroy;
482 ops.x_control = xdrrdma_control;
483 ops.x_getint32 = xdrrdma_getint32;
484 ops.x_putint32 = xdrrdma_putint32;
485 }
486 return (&ops);
487 }
488
489 /*
490 * Not all fields in struct clist are interesting to the
491 * RPC over RDMA protocol. Only XDR the interesting fields.
492 */
493 bool_t
494 xdr_clist(XDR *xdrs, clist *objp)
495 {
496
497 if (!xdr_uint32(xdrs, &objp->c_xdroff))
498 return (FALSE);
499 if (!xdr_uint32(xdrs, &objp->c_smemhandle.mrc_rmr))
500 return (FALSE);
501 if (!xdr_uint32(xdrs, &objp->c_len))
502 return (FALSE);
503 if (!xdr_uint64(xdrs, &objp->c_saddr))
504 return (FALSE);
505 if (!xdr_pointer(xdrs, (char **)&objp->c_next, sizeof (clist),
506 (xdrproc_t)xdr_clist))
507 return (FALSE);
508 return (TRUE);
509 }
510
511 bool_t
512 xdr_do_clist(XDR *xdrs, clist **clp)
513 {
514 return (xdr_pointer(xdrs, (char **)clp,
515 sizeof (clist), (xdrproc_t)xdr_clist));
516 }
517
518 uint_t
519 xdr_getbufsize(XDR *xdrs)
520 {
521 struct private *xdrp = (struct private *)(xdrs->x_private);
522
523 return ((uint_t)xdrp->xp_buf_size);
524 }
525
526 bool_t
527 xdr_encode_wlist(XDR *xdrs, clist *w, uint_t num_segment)
528 {
529 bool_t vfalse = FALSE, vtrue = TRUE;
530 int i;
531
532 /* does a wlist exist? */
533 if (w == NULL) {
534 return (xdr_bool(xdrs, &vfalse));
535 }
536
537 /* Encode N consecutive segments, 1, N, HLOO, ..., HLOO, 0 */
538 if (! xdr_bool(xdrs, &vtrue))
539 return (FALSE);
540
541 if (! xdr_uint32(xdrs, &num_segment))
542 return (FALSE);
543 for(i=0; i<num_segment; i++){
544 if (! xdr_uint32(xdrs, &w->c_dmemhandle.mrc_rmr))
545 return (FALSE);
546
547 if (! xdr_uint32(xdrs, &w->c_len))
548 return (FALSE);
549
550 if (! xdr_uint64(xdrs, &w->c_daddr))
551 return (FALSE);
552
553 w = w->c_next;
554 }
555 if (!xdr_bool(xdrs, &vfalse))
556 return (FALSE);
557
558 return (TRUE);
559 }
560
561 bool_t
562 xdr_decode_wlist(XDR *xdrs, struct clist **w, bool_t *wlist_exists)
563 {
564 struct clist *tmp;
565 bool_t more = FALSE;
566 uint32_t seg_array_len;
567 uint32_t i;
568
569 if (! xdr_bool(xdrs, &more))
570 return (FALSE);
571
572 /* is there a wlist? */
573 if (more == FALSE) {
574 *wlist_exists = FALSE;
575 return (TRUE);
576 }
577
578 *wlist_exists = TRUE;
579
580 if (! xdr_uint32(xdrs, &seg_array_len))
581 return (FALSE);
582
583 tmp = *w = (struct clist *)kmem_zalloc(sizeof (struct clist),
584 KM_SLEEP);
585 /* *w = empty_cl; */
586 for (i = 0; i < seg_array_len; i++) {
587 if (! xdr_uint32(xdrs, &tmp->c_dmemhandle.mrc_rmr))
588 return (FALSE);
589 if (! xdr_uint32(xdrs, &tmp->c_len))
590 return (FALSE);
591 if (! xdr_uint64(xdrs, &tmp->c_daddr))
592 return (FALSE);
593 if (i < seg_array_len - 1) {
594 tmp->c_next = (struct clist *)
595 mem_alloc(sizeof(struct clist));
596 tmp = tmp->c_next;
597 } else {
598 tmp->c_next = NULL;
599 }
600 }
601
602 more = FALSE;
603 if (!xdr_bool(xdrs, &more))
604 return (FALSE);
605
606 return (TRUE);
607 }
608
609 bool_t
610 xdr_decode_wlist_new(XDR *xdrs, struct clist **wclp, bool_t *wwl,
611 uint32_t *total_length,CONN *conn)
612 {
613 struct clist *first, *prev, *ncl;
614 char *memp;
615 #ifdef SERVER_REG_CACHE
616 /*struct private *xdrp ; = (struct private *)(xdrs->x_private)*/
617 rib_lrc_entry_t *long_reply_buf = NULL;
618 #endif
619 uint32_t num_wclist;
620 uint32_t wcl_length = 0;
621 uint32_t i;
622 bool_t more = FALSE;
623
624 *wclp = NULL;
625 *wwl = FALSE;
626 *total_length=0;
627
628 if (! xdr_bool(xdrs, &more)) {
629 return (FALSE);
630 }
631
632 if (more == FALSE) {
633 return (TRUE);
634 }
635
636 *wwl = TRUE;
637 if (! xdr_uint32(xdrs, &num_wclist)) {
638 cmn_err(CE_NOTE, "Error interpretting list length");
639 return (FALSE);
640 }
641
642 first = prev = ncl = (struct clist *)
643 kmem_zalloc(num_wclist*sizeof(struct clist), KM_SLEEP);
644
645 if (!first) {
646 cmn_err(CE_NOTE, "Not able to allocate memory");
647 return (FALSE);
648 }
649
650 more = TRUE;
651 for (i = 0; i < num_wclist; i++) {
652 if (! xdr_uint32(xdrs, &ncl->c_dmemhandle.mrc_rmr))
653 return (FALSE);
654 if (! xdr_uint32(xdrs, &ncl->c_len))
655 return (FALSE);
656 if (! xdr_uint64(xdrs, &ncl->c_daddr))
657 return (FALSE);
658
659 if (ncl->c_len > MAX_SVC_XFER_SIZE) {
660 cmn_err(CE_NOTE, "write chunk length too big");
661 ncl->c_len = MAX_SVC_XFER_SIZE;
662 }
663 if (i > 0) {
664 prev->c_next = ncl;
665 }
666 wcl_length += ncl->c_len;
667 prev = ncl;
668 ncl ++ ;
669 }
670
671 more = FALSE;
672 if (!xdr_bool(xdrs, &more))
673 return (FALSE);
674
675 #ifdef SERVER_REG_CACHE
676 long_reply_buf = RDMA_GET_SERVER_CACHE_BUF(conn,wcl_length*sizeof(char));
677 first->long_reply_buf = (uint64)long_reply_buf;
678 memp = long_reply_buf->lrc_buf;
679 #else
680 memp = (char *) kmem_alloc(wcl_length*sizeof(char), KM_SLEEP);
681 #endif
682 if (!memp) {
683 cmn_err(CE_NOTE, "Not able to allocate memory for chunks");
684 kmem_free((void*) first, num_wclist*sizeof(struct clist));
685 return (FALSE);
686 }
687 ncl = first;
688 for (i = 0; i < num_wclist; i++) {
689 #ifdef SERVER_REG_CACHE
690 ncl->long_reply_buf = (uint64)long_reply_buf;
691 #endif
692 ncl->c_saddr = (uint64_t) memp;
693 memp += ncl->c_len;
694 ncl++;
695 }
696
697 *wclp = first;
698 *total_length = wcl_length;
699 return (TRUE);
700 }
701
702 /*
703 * XDR decode the long reply write chunk.
704 */
705 bool_t
706 xdr_decode_reply_wchunk(XDR *xdrs, struct clist **clist,CONN *conn)
707 {
708 uint32_t mem_handle = 0;
709 uint32_t length = 0;
710 uint64 offset = 0;
711 bool_t have_rchunk = FALSE;
712 uint32_t seg_array_len = 0;
713 struct clist *first = NULL, *prev = NULL, *ncl = NULL;
714 char *memp;
715 uint32_t num_wclist;
716 uint32_t wcl_length = 0;
717 uint32_t i;
718 rdma_buf_t long_rpc = {0};
719
720 if (!xdr_bool(xdrs, &have_rchunk))
721 return (FALSE);
722
723 if (have_rchunk == FALSE)
724 return (TRUE);
725
726 if (! xdr_uint32(xdrs, &num_wclist)) {
727 cmn_err(CE_NOTE, "Error interpretting list length");
728 return (FALSE);
729 }
730 if (num_wclist == 0) {
731 return (FALSE);
732 }
733
734 first = prev = ncl = (struct clist *)
735 kmem_zalloc(num_wclist*sizeof(struct clist), KM_SLEEP);
736 if (!first) {
737 cmn_err(CE_NOTE, "Not able to allocate memory");
738 return (FALSE);
739 }
740
741 for (i = 0; i < num_wclist; i++) {
742 if (! xdr_uint32(xdrs, &ncl->c_dmemhandle.mrc_rmr))
743 return (FALSE);
744 if (! xdr_uint32(xdrs, &ncl->c_len))
745 return (FALSE);
746 if (! xdr_uint64(xdrs, &ncl->c_daddr))
747 return (FALSE);
748
749 if (ncl->c_len > MAX_SVC_XFER_SIZE) {
750 cmn_err(CE_NOTE, "reply chunk length too big");
751 ncl->c_len = MAX_SVC_XFER_SIZE;
752 }
753 if(!(ncl->c_dmemhandle.mrc_rmr && (ncl->c_len > 0) && ncl->c_daddr))
754 cmn_err(CE_WARN,"Client sent invalid segment address\n");
755 if (i > 0) {
756 prev->c_next = ncl;
757 }
758 wcl_length += ncl->c_len;
759 prev = ncl;
760 ncl ++ ;
761 }
762 if(num_wclist){
763 long_rpc.type = CHUNK_BUFFER;
764 #ifdef SERVER_REG_CACHE
765 long_rpc.long_reply_buf = RDMA_GET_SERVER_CACHE_BUF(conn,wcl_length);
766 memp = long_rpc.addr = long_rpc.long_reply_buf->lrc_buf;
767 #else
768 memp = long_rpc.addr = kmem_zalloc(wcl_length, KM_SLEEP);
769 #endif
770 ncl = first;
771
772 for (i = 0; i < num_wclist; i++) {
773 #ifdef SERVER_REG_CACHE
774 ncl->long_reply_buf = (uint64)long_rpc.long_reply_buf;
775 #endif
776 ncl->c_saddr = (uint64_t) memp;
777 memp += ncl->c_len;
778 ncl++;
779 }
780 }
781 *clist=first;
782 return (TRUE);
783 }
784
785 bool_t
786 xdr_encode_reply_wchunk(XDR *xdrs, struct clist *lrc_entry,uint32_t seg_array_len)
787 {
788 int i;
789 bool_t long_reply_exists = TRUE;
790 uint32_t length ;
791 uint64 offset ;
792 if(seg_array_len>0){
793 if (!xdr_bool(xdrs, &long_reply_exists))
794 return (FALSE);
795 if (!xdr_uint32(xdrs, &seg_array_len))
796 return (FALSE);
797
798 for(i=0;i<seg_array_len;i++){
799 if(!lrc_entry)
800 return FALSE;
801 length = lrc_entry->c_len;
802 offset = (uint64)lrc_entry->c_daddr;
803
804 if (!xdr_uint32(xdrs, &lrc_entry->c_dmemhandle.mrc_rmr))
805 return (FALSE);
806 if (!xdr_uint32(xdrs, &length))
807 return (FALSE);
808 if (!xdr_uint64(xdrs, &offset))
809 return (FALSE);
810 lrc_entry = lrc_entry->c_next;
811 }
812 } else {
813 long_reply_exists = FALSE;
814 if(!xdr_bool(xdrs, &long_reply_exists))
815 return (FALSE);
816 }
817 return (TRUE);
818 }
|