DPDK  18.11.10
rte_vhost.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_VHOST_H_
6 #define _RTE_VHOST_H_
7 
13 #include <stdint.h>
14 #include <sys/eventfd.h>
15 
16 #include <rte_memory.h>
17 #include <rte_mempool.h>
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 /* These are not C++-aware. */
24 #include <linux/vhost.h>
25 #include <linux/virtio_ring.h>
26 #include <linux/virtio_net.h>
27 
28 #define RTE_VHOST_USER_CLIENT (1ULL << 0)
29 #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
30 #define RTE_VHOST_USER_DEQUEUE_ZERO_COPY (1ULL << 2)
31 #define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
32 #define RTE_VHOST_USER_POSTCOPY_SUPPORT (1ULL << 4)
33 
34 /* Features. */
35 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
36  #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
37 #endif
38 
39 #ifndef VIRTIO_NET_F_MQ
40  #define VIRTIO_NET_F_MQ 22
41 #endif
42 
43 #ifndef VIRTIO_NET_F_MTU
44  #define VIRTIO_NET_F_MTU 3
45 #endif
46 
47 #ifndef VIRTIO_F_ANY_LAYOUT
48  #define VIRTIO_F_ANY_LAYOUT 27
49 #endif
50 
52 #ifndef VHOST_USER_PROTOCOL_F_MQ
53 #define VHOST_USER_PROTOCOL_F_MQ 0
54 #endif
55 
56 #ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
57 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
58 #endif
59 
60 #ifndef VHOST_USER_PROTOCOL_F_RARP
61 #define VHOST_USER_PROTOCOL_F_RARP 2
62 #endif
63 
64 #ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
65 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
66 #endif
67 
68 #ifndef VHOST_USER_PROTOCOL_F_NET_MTU
69 #define VHOST_USER_PROTOCOL_F_NET_MTU 4
70 #endif
71 
72 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
73 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
74 #endif
75 
76 #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
77 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
78 #endif
79 
80 #ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT
81 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
82 #endif
83 
84 #ifndef VHOST_USER_PROTOCOL_F_CONFIG
85 #define VHOST_USER_PROTOCOL_F_CONFIG 9
86 #endif
87 
88 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
89 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
90 #endif
91 
92 #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
93 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
94 #endif
95 
97 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
98 #define VHOST_USER_F_PROTOCOL_FEATURES 30
99 #endif
100 
101 
107  uint64_t guest_phys_addr;
108  uint64_t guest_user_addr;
109  uint64_t host_user_addr;
110  uint64_t size;
111  void *mmap_addr;
112  uint64_t mmap_size;
113  int fd;
114 };
115 
120  uint32_t nregions;
121  struct rte_vhost_mem_region regions[];
122 };
123 
124 struct rte_vhost_vring {
125  struct vring_desc *desc;
126  struct vring_avail *avail;
127  struct vring_used *used;
128  uint64_t log_guest_addr;
129 
131  int callfd;
132 
133  int kickfd;
134  uint16_t size;
135 };
136 
141  int (*new_device)(int vid);
142  void (*destroy_device)(int vid);
144  int (*vring_state_changed)(int vid, uint16_t queue_id, int enable);
152  int (*features_changed)(int vid, uint64_t features);
153 
154  int (*new_connection)(int vid);
155  void (*destroy_connection)(int vid);
156 
163  void (*guest_notified)(int vid);
164 
165  void *reserved[1];
166 };
167 
183 __rte_deprecated
184 static __rte_always_inline uint64_t
185 rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
186 {
187  struct rte_vhost_mem_region *reg;
188  uint32_t i;
189 
190  for (i = 0; i < mem->nregions; i++) {
191  reg = &mem->regions[i];
192  if (gpa >= reg->guest_phys_addr &&
193  gpa < reg->guest_phys_addr + reg->size) {
194  return gpa - reg->guest_phys_addr +
195  reg->host_user_addr;
196  }
197  }
198 
199  return 0;
200 }
201 
218 static __rte_always_inline uint64_t
220  uint64_t gpa, uint64_t *len)
221 {
222  struct rte_vhost_mem_region *r;
223  uint32_t i;
224 
225  for (i = 0; i < mem->nregions; i++) {
226  r = &mem->regions[i];
227  if (gpa >= r->guest_phys_addr &&
228  gpa < r->guest_phys_addr + r->size) {
229 
230  if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
231  *len = r->guest_phys_addr + r->size - gpa;
232 
233  return gpa - r->guest_phys_addr +
234  r->host_user_addr;
235  }
236  }
237  *len = 0;
238 
239  return 0;
240 }
241 
242 #define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
243 
262 void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
263 
282 void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
283  uint64_t offset, uint64_t len);
284 
285 int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
286 
291 int rte_vhost_driver_register(const char *path, uint64_t flags);
292 
293 /* Unregister vhost driver. This is only meaningful to vhost user. */
294 int rte_vhost_driver_unregister(const char *path);
295 
306 int __rte_experimental
307 rte_vhost_driver_attach_vdpa_device(const char *path, int did);
308 
317 int __rte_experimental
318 rte_vhost_driver_detach_vdpa_device(const char *path);
319 
328 int __rte_experimental
329 rte_vhost_driver_get_vdpa_device_id(const char *path);
330 
341 int rte_vhost_driver_set_features(const char *path, uint64_t features);
342 
358 int rte_vhost_driver_enable_features(const char *path, uint64_t features);
359 
372 int rte_vhost_driver_disable_features(const char *path, uint64_t features);
373 
384 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
385 
396 int __rte_experimental
398  uint64_t *protocol_features);
399 
410 int __rte_experimental
411 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
412 
423 int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
424 
425 /* Register callbacks. */
426 int rte_vhost_driver_callback_register(const char *path,
427  struct vhost_device_ops const * const ops);
428 
440 int rte_vhost_driver_start(const char *path);
441 
455 int rte_vhost_get_mtu(int vid, uint16_t *mtu);
456 
467 int rte_vhost_get_numa_node(int vid);
468 
483 __rte_deprecated
484 uint32_t rte_vhost_get_queue_num(int vid);
485 
495 uint16_t rte_vhost_get_vring_num(int vid);
496 
511 int rte_vhost_get_ifname(int vid, char *buf, size_t len);
512 
524 uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
525 
526 struct rte_mbuf;
527 struct rte_mempool;
544 uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
545  struct rte_mbuf **pkts, uint16_t count);
546 
564 uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
565  struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
566 
581 int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
582 
595 int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
596  struct rte_vhost_vring *vring);
597 
609 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
610 
621 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
622 
635 int __rte_experimental
636 rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
637 
652 int __rte_experimental
653 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
654  uint16_t *last_avail_idx, uint16_t *last_used_idx);
655 
670 int __rte_experimental
671 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
672  uint16_t last_avail_idx, uint16_t last_used_idx);
673 
682 int __rte_experimental
684 
685 #ifdef __cplusplus
686 }
687 #endif
688 
689 #endif /* _RTE_VHOST_H_ */
static __rte_deprecated __rte_always_inline uint64_t rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
Definition: rte_vhost.h:185
int rte_vhost_driver_start(const char *path)
#define __rte_always_inline
Definition: rte_common.h:146
int rte_vhost_driver_register(const char *path, uint64_t flags)
uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id)
int rte_vhost_driver_disable_features(const char *path, uint64_t features)
void rte_vhost_log_used_vring(int vid, uint16_t vring_idx, uint64_t offset, uint64_t len)
int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx, struct rte_vhost_vring *vring)
uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count)
__rte_deprecated uint32_t rte_vhost_get_queue_num(int vid)
int __rte_experimental rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx)
int rte_vhost_get_mtu(int vid, uint16_t *mtu)
int __rte_experimental rte_vhost_set_vring_base(int vid, uint16_t queue_id, uint16_t last_avail_idx, uint16_t last_used_idx)
static __rte_always_inline uint64_t rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem, uint64_t gpa, uint64_t *len)
Definition: rte_vhost.h:219
#define unlikely(x)
uint16_t rte_vhost_get_vring_num(int vid)
int rte_vhost_get_numa_node(int vid)
int __rte_experimental rte_vhost_driver_detach_vdpa_device(const char *path)
int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
int rte_vhost_vring_call(int vid, uint16_t vring_idx)
int rte_vhost_get_negotiated_features(int vid, uint64_t *features)
uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid)
int __rte_experimental rte_vhost_get_vdpa_device_id(int vid)
uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
int __rte_experimental rte_vhost_driver_get_protocol_features(const char *path, uint64_t *protocol_features)
int __rte_experimental rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size)
int rte_vhost_driver_get_features(const char *path, uint64_t *features)
int rte_vhost_driver_set_features(const char *path, uint64_t features)
int __rte_experimental rte_vhost_driver_attach_vdpa_device(const char *path, int did)
int rte_vhost_get_ifname(int vid, char *buf, size_t len)
int __rte_experimental rte_vhost_driver_get_vdpa_device_id(const char *path)
int __rte_experimental rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
int rte_vhost_driver_enable_features(const char *path, uint64_t features)
void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)