772 */
773 static int
774 xpvtap_segmf_register(xpvtap_state_t *state)
775 {
776 struct seg *seg;
777 uint64_t pte_ma;
778 struct as *as;
779 caddr_t uaddr;
780 uint_t pgcnt;
781 int i;
782
783
784 as = state->bt_map.um_as;
785 pgcnt = btopr(state->bt_map.um_guest_size);
786 uaddr = state->bt_map.um_guest_pages;
787
788 if (pgcnt == 0) {
789 return (DDI_FAILURE);
790 }
791
792 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
793
794 seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
795 if ((seg == NULL) || ((uaddr + state->bt_map.um_guest_size) >
796 (seg->s_base + seg->s_size))) {
797 AS_LOCK_EXIT(as, &as->a_lock);
798 return (DDI_FAILURE);
799 }
800
801 /*
802 * lock down the htables so the HAT can't steal them. Register the
803 * PTE MA's for each gref page with seg_mf so we can do user space
804 * gref mappings.
805 */
806 for (i = 0; i < pgcnt; i++) {
807 hat_prepare_mapping(as->a_hat, uaddr, &pte_ma);
808 hat_devload(as->a_hat, uaddr, PAGESIZE, (pfn_t)0,
809 PROT_READ | PROT_WRITE | PROT_USER | HAT_UNORDERED_OK,
810 HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
811 hat_release_mapping(as->a_hat, uaddr);
812 segmf_add_gref_pte(seg, uaddr, pte_ma);
813 uaddr += PAGESIZE;
814 }
815
816 state->bt_map.um_registered = B_TRUE;
817
818 AS_LOCK_EXIT(as, &as->a_lock);
819
820 return (DDI_SUCCESS);
821 }
822
823
824 /*
825 * xpvtap_segmf_unregister()
826 * as_callback routine
827 */
828 /*ARGSUSED*/
829 static void
830 xpvtap_segmf_unregister(struct as *as, void *arg, uint_t event)
831 {
832 xpvtap_state_t *state;
833 caddr_t uaddr;
834 uint_t pgcnt;
835 int i;
836
837
838 state = (xpvtap_state_t *)arg;
839 if (!state->bt_map.um_registered) {
840 /* remove the callback (which is this routine) */
841 (void) as_delete_callback(as, arg);
842 return;
843 }
844
845 pgcnt = btopr(state->bt_map.um_guest_size);
846 uaddr = state->bt_map.um_guest_pages;
847
848 /* unmap any outstanding req's grefs */
849 xpvtap_rs_flush(state->bt_map.um_rs, xpvtap_user_request_unmap, state);
850
851 /* Unlock the gref pages */
852 for (i = 0; i < pgcnt; i++) {
853 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
854 hat_prepare_mapping(as->a_hat, uaddr, NULL);
855 hat_unload(as->a_hat, uaddr, PAGESIZE, HAT_UNLOAD_UNLOCK);
856 hat_release_mapping(as->a_hat, uaddr);
857 AS_LOCK_EXIT(as, &as->a_lock);
858 uaddr += PAGESIZE;
859 }
860
861 /* remove the callback (which is this routine) */
862 (void) as_delete_callback(as, arg);
863
864 state->bt_map.um_registered = B_FALSE;
865 }
866
867
868 /*
869 * xpvtap_user_init()
870 */
871 static int
872 xpvtap_user_init(xpvtap_state_t *state)
873 {
874 xpvtap_user_map_t *map;
875 int e;
876
877
1211 return (DDI_FAILURE);
1212 }
1213 }
1214
1215 /* alloc an ID for the user ring */
1216 e = xpvtap_rs_alloc(state->bt_map.um_rs, uid);
1217 if (e != DDI_SUCCESS) {
1218 return (DDI_FAILURE);
1219 }
1220
1221 /* if we don't have any segments to map, we're done */
1222 if ((req->operation == BLKIF_OP_WRITE_BARRIER) ||
1223 (req->operation == BLKIF_OP_FLUSH_DISKCACHE) ||
1224 (req->nr_segments == 0)) {
1225 return (DDI_SUCCESS);
1226 }
1227
1228 /* get the apps gref address */
1229 uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, *uid);
1230
1231 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1232 seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
1233 if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
1234 (seg->s_base + seg->s_size))) {
1235 AS_LOCK_EXIT(as, &as->a_lock);
1236 return (DDI_FAILURE);
1237 }
1238
1239 /* if we are reading from disk, we are writing into memory */
1240 flags = 0;
1241 if (req->operation == BLKIF_OP_READ) {
1242 flags |= SEGMF_GREF_WR;
1243 }
1244
1245 /* Load the grefs into seg_mf */
1246 for (i = 0; i < req->nr_segments; i++) {
1247 gref[i] = req->seg[i].gref;
1248 }
1249 (void) segmf_add_grefs(seg, uaddr, flags, gref, req->nr_segments,
1250 domid);
1251
1252 AS_LOCK_EXIT(as, &as->a_lock);
1253
1254 return (DDI_SUCCESS);
1255 }
1256
1257
1258 /*
1259 * xpvtap_user_request_push()
1260 */
1261 static int
1262 xpvtap_user_request_push(xpvtap_state_t *state, blkif_request_t *req,
1263 uint_t uid)
1264 {
1265 blkif_request_t *outstanding_req;
1266 blkif_front_ring_t *uring;
1267 blkif_request_t *target;
1268 xpvtap_user_map_t *map;
1269
1270
1271 uring = &state->bt_user_ring.ur_ring;
1272 map = &state->bt_map;
1297 blkif_request_t *req;
1298 struct seg *seg;
1299 struct as *as;
1300 caddr_t uaddr;
1301 int e;
1302
1303
1304 as = state->bt_map.um_as;
1305 if (as == NULL) {
1306 return;
1307 }
1308
1309 /* get a copy of the original request */
1310 req = &state->bt_map.um_outstanding_reqs[uid];
1311
1312 /* unmap the grefs for this request */
1313 if ((req->operation != BLKIF_OP_WRITE_BARRIER) &&
1314 (req->operation != BLKIF_OP_FLUSH_DISKCACHE) &&
1315 (req->nr_segments != 0)) {
1316 uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, uid);
1317 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1318 seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
1319 if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
1320 (seg->s_base + seg->s_size))) {
1321 AS_LOCK_EXIT(as, &as->a_lock);
1322 xpvtap_rs_free(state->bt_map.um_rs, uid);
1323 return;
1324 }
1325
1326 e = segmf_release_grefs(seg, uaddr, req->nr_segments);
1327 if (e != 0) {
1328 cmn_err(CE_WARN, "unable to release grefs");
1329 }
1330
1331 AS_LOCK_EXIT(as, &as->a_lock);
1332 }
1333
1334 /* free up the user ring id */
1335 xpvtap_rs_free(state->bt_map.um_rs, uid);
1336 }
1337
1338
1339 static int
1340 xpvtap_user_response_get(xpvtap_state_t *state, blkif_response_t *resp,
1341 uint_t *uid)
1342 {
1343 blkif_front_ring_t *uring;
1344 blkif_response_t *target;
1345
1346
1347 uring = &state->bt_user_ring.ur_ring;
1348
1349 if (!RING_HAS_UNCONSUMED_RESPONSES(uring)) {
1350 return (B_FALSE);
1351 }
|
772 */
773 static int
774 xpvtap_segmf_register(xpvtap_state_t *state)
775 {
776 struct seg *seg;
777 uint64_t pte_ma;
778 struct as *as;
779 caddr_t uaddr;
780 uint_t pgcnt;
781 int i;
782
783
784 as = state->bt_map.um_as;
785 pgcnt = btopr(state->bt_map.um_guest_size);
786 uaddr = state->bt_map.um_guest_pages;
787
788 if (pgcnt == 0) {
789 return (DDI_FAILURE);
790 }
791
792 AS_LOCK_ENTER(as, RW_READER);
793
794 seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
795 if ((seg == NULL) || ((uaddr + state->bt_map.um_guest_size) >
796 (seg->s_base + seg->s_size))) {
797 AS_LOCK_EXIT(as);
798 return (DDI_FAILURE);
799 }
800
801 /*
802 * lock down the htables so the HAT can't steal them. Register the
803 * PTE MA's for each gref page with seg_mf so we can do user space
804 * gref mappings.
805 */
806 for (i = 0; i < pgcnt; i++) {
807 hat_prepare_mapping(as->a_hat, uaddr, &pte_ma);
808 hat_devload(as->a_hat, uaddr, PAGESIZE, (pfn_t)0,
809 PROT_READ | PROT_WRITE | PROT_USER | HAT_UNORDERED_OK,
810 HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
811 hat_release_mapping(as->a_hat, uaddr);
812 segmf_add_gref_pte(seg, uaddr, pte_ma);
813 uaddr += PAGESIZE;
814 }
815
816 state->bt_map.um_registered = B_TRUE;
817
818 AS_LOCK_EXIT(as);
819
820 return (DDI_SUCCESS);
821 }
822
823
824 /*
825 * xpvtap_segmf_unregister()
826 * as_callback routine
827 */
828 /*ARGSUSED*/
829 static void
830 xpvtap_segmf_unregister(struct as *as, void *arg, uint_t event)
831 {
832 xpvtap_state_t *state;
833 caddr_t uaddr;
834 uint_t pgcnt;
835 int i;
836
837
838 state = (xpvtap_state_t *)arg;
839 if (!state->bt_map.um_registered) {
840 /* remove the callback (which is this routine) */
841 (void) as_delete_callback(as, arg);
842 return;
843 }
844
845 pgcnt = btopr(state->bt_map.um_guest_size);
846 uaddr = state->bt_map.um_guest_pages;
847
848 /* unmap any outstanding req's grefs */
849 xpvtap_rs_flush(state->bt_map.um_rs, xpvtap_user_request_unmap, state);
850
851 /* Unlock the gref pages */
852 for (i = 0; i < pgcnt; i++) {
853 AS_LOCK_ENTER(as, RW_WRITER);
854 hat_prepare_mapping(as->a_hat, uaddr, NULL);
855 hat_unload(as->a_hat, uaddr, PAGESIZE, HAT_UNLOAD_UNLOCK);
856 hat_release_mapping(as->a_hat, uaddr);
857 AS_LOCK_EXIT(as);
858 uaddr += PAGESIZE;
859 }
860
861 /* remove the callback (which is this routine) */
862 (void) as_delete_callback(as, arg);
863
864 state->bt_map.um_registered = B_FALSE;
865 }
866
867
868 /*
869 * xpvtap_user_init()
870 */
871 static int
872 xpvtap_user_init(xpvtap_state_t *state)
873 {
874 xpvtap_user_map_t *map;
875 int e;
876
877
1211 return (DDI_FAILURE);
1212 }
1213 }
1214
1215 /* alloc an ID for the user ring */
1216 e = xpvtap_rs_alloc(state->bt_map.um_rs, uid);
1217 if (e != DDI_SUCCESS) {
1218 return (DDI_FAILURE);
1219 }
1220
1221 /* if we don't have any segments to map, we're done */
1222 if ((req->operation == BLKIF_OP_WRITE_BARRIER) ||
1223 (req->operation == BLKIF_OP_FLUSH_DISKCACHE) ||
1224 (req->nr_segments == 0)) {
1225 return (DDI_SUCCESS);
1226 }
1227
1228 /* get the apps gref address */
1229 uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, *uid);
1230
1231 AS_LOCK_ENTER(as, RW_READER);
1232 seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
1233 if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
1234 (seg->s_base + seg->s_size))) {
1235 AS_LOCK_EXIT(as);
1236 return (DDI_FAILURE);
1237 }
1238
1239 /* if we are reading from disk, we are writing into memory */
1240 flags = 0;
1241 if (req->operation == BLKIF_OP_READ) {
1242 flags |= SEGMF_GREF_WR;
1243 }
1244
1245 /* Load the grefs into seg_mf */
1246 for (i = 0; i < req->nr_segments; i++) {
1247 gref[i] = req->seg[i].gref;
1248 }
1249 (void) segmf_add_grefs(seg, uaddr, flags, gref, req->nr_segments,
1250 domid);
1251
1252 AS_LOCK_EXIT(as);
1253
1254 return (DDI_SUCCESS);
1255 }
1256
1257
1258 /*
1259 * xpvtap_user_request_push()
1260 */
1261 static int
1262 xpvtap_user_request_push(xpvtap_state_t *state, blkif_request_t *req,
1263 uint_t uid)
1264 {
1265 blkif_request_t *outstanding_req;
1266 blkif_front_ring_t *uring;
1267 blkif_request_t *target;
1268 xpvtap_user_map_t *map;
1269
1270
1271 uring = &state->bt_user_ring.ur_ring;
1272 map = &state->bt_map;
1297 blkif_request_t *req;
1298 struct seg *seg;
1299 struct as *as;
1300 caddr_t uaddr;
1301 int e;
1302
1303
1304 as = state->bt_map.um_as;
1305 if (as == NULL) {
1306 return;
1307 }
1308
1309 /* get a copy of the original request */
1310 req = &state->bt_map.um_outstanding_reqs[uid];
1311
1312 /* unmap the grefs for this request */
1313 if ((req->operation != BLKIF_OP_WRITE_BARRIER) &&
1314 (req->operation != BLKIF_OP_FLUSH_DISKCACHE) &&
1315 (req->nr_segments != 0)) {
1316 uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, uid);
1317 AS_LOCK_ENTER(as, RW_READER);
1318 seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
1319 if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
1320 (seg->s_base + seg->s_size))) {
1321 AS_LOCK_EXIT(as);
1322 xpvtap_rs_free(state->bt_map.um_rs, uid);
1323 return;
1324 }
1325
1326 e = segmf_release_grefs(seg, uaddr, req->nr_segments);
1327 if (e != 0) {
1328 cmn_err(CE_WARN, "unable to release grefs");
1329 }
1330
1331 AS_LOCK_EXIT(as);
1332 }
1333
1334 /* free up the user ring id */
1335 xpvtap_rs_free(state->bt_map.um_rs, uid);
1336 }
1337
1338
1339 static int
1340 xpvtap_user_response_get(xpvtap_state_t *state, blkif_response_t *resp,
1341 uint_t *uid)
1342 {
1343 blkif_front_ring_t *uring;
1344 blkif_response_t *target;
1345
1346
1347 uring = &state->bt_user_ring.ur_ring;
1348
1349 if (!RING_HAS_UNCONSUMED_RESPONSES(uring)) {
1350 return (B_FALSE);
1351 }
|