qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

blkif.h (30878B)


      1 /******************************************************************************
      2  * blkif.h
      3  *
      4  * Unified block-device I/O interface for Xen guest OSes.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a copy
      7  * of this software and associated documentation files (the "Software"), to
      8  * deal in the Software without restriction, including without limitation the
      9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
     10  * sell copies of the Software, and to permit persons to whom the Software is
     11  * furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     22  * DEALINGS IN THE SOFTWARE.
     23  *
     24  * Copyright (c) 2003-2004, Keir Fraser
     25  * Copyright (c) 2012, Spectra Logic Corporation
     26  */
     27 
     28 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
     29 #define __XEN_PUBLIC_IO_BLKIF_H__
     30 
     31 #include "ring.h"
     32 #include "../grant_table.h"
     33 
     34 /*
     35  * Front->back notifications: When enqueuing a new request, sending a
     36  * notification can be made conditional on req_event (i.e., the generic
     37  * hold-off mechanism provided by the ring macros). Backends must set
     38  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
     39  *
     40  * Back->front notifications: When enqueuing a new response, sending a
     41  * notification can be made conditional on rsp_event (i.e., the generic
     42  * hold-off mechanism provided by the ring macros). Frontends must set
     43  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
     44  */
     45 
     46 #ifndef blkif_vdev_t
     47 #define blkif_vdev_t   uint16_t
     48 #endif
     49 #define blkif_sector_t uint64_t
     50 
     51 /*
     52  * Feature and Parameter Negotiation
     53  * =================================
     54  * The two halves of a Xen block driver utilize nodes within the XenStore to
     55  * communicate capabilities and to negotiate operating parameters.  This
     56  * section enumerates these nodes which reside in the respective front and
     57  * backend portions of the XenStore, following the XenBus convention.
     58  *
     59  * All data in the XenStore is stored as strings.  Nodes specifying numeric
     60  * values are encoded in decimal.  Integer value ranges listed below are
     61  * expressed as fixed sized integer types capable of storing the conversion
     62  * of a properly formated node string, without loss of information.
     63  *
     64  * Any specified default value is in effect if the corresponding XenBus node
     65  * is not present in the XenStore.
     66  *
     67  * XenStore nodes in sections marked "PRIVATE" are solely for use by the
     68  * driver side whose XenBus tree contains them.
     69  *
     70  * XenStore nodes marked "DEPRECATED" in their notes section should only be
     71  * used to provide interoperability with legacy implementations.
     72  *
     73  * See the XenBus state transition diagram below for details on when XenBus
     74  * nodes must be published and when they can be queried.
     75  *
     76  *****************************************************************************
     77  *                            Backend XenBus Nodes
     78  *****************************************************************************
     79  *
     80  *------------------ Backend Device Identification (PRIVATE) ------------------
     81  *
     82  * mode
     83  *      Values:         "r" (read only), "w" (writable)
     84  *
     85  *      The read or write access permissions to the backing store to be
     86  *      granted to the frontend.
     87  *
     88  * params
     89  *      Values:         string
     90  *
     91  *      A free formatted string providing sufficient information for the
     92  *      hotplug script to attach the device and provide a suitable
     93  *      handler (ie: a block device) for blkback to use.
     94  *
     95  * physical-device
     96  *      Values:         "MAJOR:MINOR"
     97  *      Notes: 11
     98  *
     99  *      MAJOR and MINOR are the major number and minor number of the
    100  *      backing device respectively.
    101  *
    102  * physical-device-path
    103  *      Values:         path string
    104  *
    105  *      A string that contains the absolute path to the disk image. On
    106  *      NetBSD and Linux this is always a block device, while on FreeBSD
    107  *      it can be either a block device or a regular file.
    108  *
    109  * type
    110  *      Values:         "file", "phy", "tap"
    111  *
    112  *      The type of the backing device/object.
    113  *
    114  *
    115  * direct-io-safe
    116  *      Values:         0/1 (boolean)
    117  *      Default Value:  0
    118  *
    119  *      The underlying storage is not affected by the direct IO memory
    120  *      lifetime bug.  See:
    121  *        http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
    122  *
    123  *      Therefore this option gives the backend permission to use
    124  *      O_DIRECT, notwithstanding that bug.
    125  *
    126  *      That is, if this option is enabled, use of O_DIRECT is safe,
    127  *      in circumstances where we would normally have avoided it as a
    128  *      workaround for that bug.  This option is not relevant for all
    129  *      backends, and even not necessarily supported for those for
    130  *      which it is relevant.  A backend which knows that it is not
    131  *      affected by the bug can ignore this option.
    132  *
    133  *      This option doesn't require a backend to use O_DIRECT, so it
    134  *      should not be used to try to control the caching behaviour.
    135  *
    136  *--------------------------------- Features ---------------------------------
    137  *
    138  * feature-barrier
    139  *      Values:         0/1 (boolean)
    140  *      Default Value:  0
    141  *
    142  *      A value of "1" indicates that the backend can process requests
    143  *      containing the BLKIF_OP_WRITE_BARRIER request opcode.  Requests
    144  *      of this type may still be returned at any time with the
    145  *      BLKIF_RSP_EOPNOTSUPP result code.
    146  *
    147  * feature-flush-cache
    148  *      Values:         0/1 (boolean)
    149  *      Default Value:  0
    150  *
    151  *      A value of "1" indicates that the backend can process requests
    152  *      containing the BLKIF_OP_FLUSH_DISKCACHE request opcode.  Requests
    153  *      of this type may still be returned at any time with the
    154  *      BLKIF_RSP_EOPNOTSUPP result code.
    155  *
    156  * feature-discard
    157  *      Values:         0/1 (boolean)
    158  *      Default Value:  0
    159  *
    160  *      A value of "1" indicates that the backend can process requests
    161  *      containing the BLKIF_OP_DISCARD request opcode.  Requests
    162  *      of this type may still be returned at any time with the
    163  *      BLKIF_RSP_EOPNOTSUPP result code.
    164  *
    165  * feature-persistent
    166  *      Values:         0/1 (boolean)
    167  *      Default Value:  0
    168  *      Notes: 7
    169  *
    170  *      A value of "1" indicates that the backend can keep the grants used
    171  *      by the frontend driver mapped, so the same set of grants should be
    172  *      used in all transactions. The maximum number of grants the backend
    173  *      can map persistently depends on the implementation, but ideally it
    174  *      should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
    175  *      feature the backend doesn't need to unmap each grant, preventing
    176  *      costly TLB flushes. The backend driver should only map grants
    177  *      persistently if the frontend supports it. If a backend driver chooses
    178  *      to use the persistent protocol when the frontend doesn't support it,
    179  *      it will probably hit the maximum number of persistently mapped grants
    180  *      (due to the fact that the frontend won't be reusing the same grants),
    181  *      and fall back to non-persistent mode. Backend implementations may
    182  *      shrink or expand the number of persistently mapped grants without
    183  *      notifying the frontend depending on memory constraints (this might
    184  *      cause a performance degradation).
    185  *
    186  *      If a backend driver wants to limit the maximum number of persistently
    187  *      mapped grants to a value less than RING_SIZE *
    188  *      BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
    189  *      discard the grants that are less commonly used. Using a LRU in the
    190  *      backend driver paired with a LIFO queue in the frontend will
    191  *      allow us to have better performance in this scenario.
    192  *
    193  *----------------------- Request Transport Parameters ------------------------
    194  *
    195  * max-ring-page-order
    196  *      Values:         <uint32_t>
    197  *      Default Value:  0
    198  *      Notes:          1, 3
    199  *
    200  *      The maximum supported size of the request ring buffer in units of
    201  *      lb(machine pages). (e.g. 0 == 1 page,  1 = 2 pages, 2 == 4 pages,
    202  *      etc.).
    203  *
    204  * max-ring-pages
    205  *      Values:         <uint32_t>
    206  *      Default Value:  1
    207  *      Notes:          DEPRECATED, 2, 3
    208  *
    209  *      The maximum supported size of the request ring buffer in units of
    210  *      machine pages.  The value must be a power of 2.
    211  *
    212  *------------------------- Backend Device Properties -------------------------
    213  *
    214  * discard-enable
    215  *      Values:         0/1 (boolean)
    216  *      Default Value:  1
    217  *
    218  *      This optional property, set by the toolstack, instructs the backend
    219  *      to offer (or not to offer) discard to the frontend. If the property
    220  *      is missing the backend should offer discard if the backing storage
    221  *      actually supports it.
    222  *
    223  * discard-alignment
    224  *      Values:         <uint32_t>
    225  *      Default Value:  0
    226  *      Notes:          4, 5
    227  *
    228  *      The offset, in bytes from the beginning of the virtual block device,
    229  *      to the first, addressable, discard extent on the underlying device.
    230  *
    231  * discard-granularity
    232  *      Values:         <uint32_t>
    233  *      Default Value:  <"sector-size">
    234  *      Notes:          4
    235  *
    236  *      The size, in bytes, of the individually addressable discard extents
    237  *      of the underlying device.
    238  *
    239  * discard-secure
    240  *      Values:         0/1 (boolean)
    241  *      Default Value:  0
    242  *      Notes:          10
    243  *
    244  *      A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
    245  *      requests with the BLKIF_DISCARD_SECURE flag set.
    246  *
    247  * info
    248  *      Values:         <uint32_t> (bitmap)
    249  *
    250  *      A collection of bit flags describing attributes of the backing
    251  *      device.  The VDISK_* macros define the meaning of each bit
    252  *      location.
    253  *
    254  * sector-size
    255  *      Values:         <uint32_t>
    256  *
    257  *      The logical block size, in bytes, of the underlying storage. This
    258  *      must be a power of two with a minimum value of 512.
    259  *
    260  *      NOTE: Because of implementation bugs in some frontends this must be
    261  *            set to 512, unless the frontend advertizes a non-zero value
    262  *            in its "feature-large-sector-size" xenbus node. (See below).
    263  *
    264  * physical-sector-size
    265  *      Values:         <uint32_t>
    266  *      Default Value:  <"sector-size">
    267  *
    268  *      The physical block size, in bytes, of the backend storage. This
    269  *      must be an integer multiple of "sector-size".
    270  *
    271  * sectors
    272  *      Values:         <uint64_t>
    273  *
    274  *      The size of the backend device, expressed in units of "sector-size".
    275  *      The product of "sector-size" and "sectors" must also be an integer
    276  *      multiple of "physical-sector-size", if that node is present.
    277  *
    278  *****************************************************************************
    279  *                            Frontend XenBus Nodes
    280  *****************************************************************************
    281  *
    282  *----------------------- Request Transport Parameters -----------------------
    283  *
    284  * event-channel
    285  *      Values:         <uint32_t>
    286  *
    287  *      The identifier of the Xen event channel used to signal activity
    288  *      in the ring buffer.
    289  *
    290  * ring-ref
    291  *      Values:         <uint32_t>
    292  *      Notes:          6
    293  *
    294  *      The Xen grant reference granting permission for the backend to map
    295  *      the sole page in a single page sized ring buffer.
    296  *
    297  * ring-ref%u
    298  *      Values:         <uint32_t>
    299  *      Notes:          6
    300  *
    301  *      For a frontend providing a multi-page ring, a "number of ring pages"
    302  *      sized list of nodes, each containing a Xen grant reference granting
    303  *      permission for the backend to map the page of the ring located
    304  *      at page index "%u".  Page indexes are zero based.
    305  *
    306  * protocol
    307  *      Values:         string (XEN_IO_PROTO_ABI_*)
    308  *      Default Value:  XEN_IO_PROTO_ABI_NATIVE
    309  *
    310  *      The machine ABI rules governing the format of all ring request and
    311  *      response structures.
    312  *
    313  * ring-page-order
    314  *      Values:         <uint32_t>
    315  *      Default Value:  0
    316  *      Maximum Value:  MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
    317  *      Notes:          1, 3
    318  *
    319  *      The size of the frontend allocated request ring buffer in units
    320  *      of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
    321  *      etc.).
    322  *
    323  * num-ring-pages
    324  *      Values:         <uint32_t>
    325  *      Default Value:  1
    326  *      Maximum Value:  MAX(max-ring-pages,(0x1 << max-ring-page-order))
    327  *      Notes:          DEPRECATED, 2, 3
    328  *
    329  *      The size of the frontend allocated request ring buffer in units of
    330  *      machine pages.  The value must be a power of 2.
    331  *
    332  *--------------------------------- Features ---------------------------------
    333  *
    334  * feature-persistent
    335  *      Values:         0/1 (boolean)
    336  *      Default Value:  0
    337  *      Notes: 7, 8, 9
    338  *
    339  *      A value of "1" indicates that the frontend will reuse the same grants
    340  *      for all transactions, allowing the backend to map them with write
    341  *      access (even when it should be read-only). If the frontend hits the
    342  *      maximum number of allowed persistently mapped grants, it can fallback
    343  *      to non persistent mode. This will cause a performance degradation,
    344  *      since the backend driver will still try to map those grants
    345  *      persistently. Since the persistent grants protocol is compatible with
    346  *      the previous protocol, a frontend driver can choose to work in
    347  *      persistent mode even when the backend doesn't support it.
    348  *
    349  *      It is recommended that the frontend driver stores the persistently
    350  *      mapped grants in a LIFO queue, so a subset of all persistently mapped
    351  *      grants gets used commonly. This is done in case the backend driver
    352  *      decides to limit the maximum number of persistently mapped grants
    353  *      to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
    354  *
    355  * feature-large-sector-size
    356  *      Values:         0/1 (boolean)
    357  *      Default Value:  0
    358  *
    359  *      A value of "1" indicates that the frontend will correctly supply and
    360  *      interpret all sector-based quantities in terms of the "sector-size"
    361  *      value supplied in the backend info, whatever that may be set to.
    362  *      If this node is not present or its value is "0" then it is assumed
    363  *      that the frontend requires that the logical block size is 512 as it
    364  *      is hardcoded (which is the case in some frontend implementations).
    365  *
    366  *------------------------- Virtual Device Properties -------------------------
    367  *
    368  * device-type
    369  *      Values:         "disk", "cdrom", "floppy", etc.
    370  *
    371  * virtual-device
    372  *      Values:         <uint32_t>
    373  *
    374  *      A value indicating the physical device to virtualize within the
    375  *      frontend's domain.  (e.g. "The first ATA disk", "The third SCSI
    376  *      disk", etc.)
    377  *
    378  *      See docs/misc/vbd-interface.txt for details on the format of this
    379  *      value.
    380  *
    381  * Notes
    382  * -----
    383  * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
    384  *     PV drivers.
    385  * (2) Multi-page ring buffer scheme first used in some RedHat distributions
    386  *     including a distribution deployed on certain nodes of the Amazon
    387  *     EC2 cluster.
    388  * (3) Support for multi-page ring buffers was implemented independently,
    389  *     in slightly different forms, by both Citrix and RedHat/Amazon.
    390  *     For full interoperability, block front and backends should publish
    391  *     identical ring parameters, adjusted for unit differences, to the
    392  *     XenStore nodes used in both schemes.
    393  * (4) Devices that support discard functionality may internally allocate space
    394  *     (discardable extents) in units that are larger than the exported logical
    395  *     block size. If the backing device has such discardable extents the
    396  *     backend should provide both discard-granularity and discard-alignment.
    397  *     Providing just one of the two may be considered an error by the frontend.
    398  *     Backends supporting discard should include discard-granularity and
    399  *     discard-alignment even if it supports discarding individual sectors.
    400  *     Frontends should assume discard-alignment == 0 and discard-granularity
    401  *     == sector size if these keys are missing.
    402  * (5) The discard-alignment parameter allows a physical device to be
    403  *     partitioned into virtual devices that do not necessarily begin or
    404  *     end on a discardable extent boundary.
    405  * (6) When there is only a single page allocated to the request ring,
    406  *     'ring-ref' is used to communicate the grant reference for this
    407  *     page to the backend.  When using a multi-page ring, the 'ring-ref'
    408  *     node is not created.  Instead 'ring-ref0' - 'ring-refN' are used.
    409  * (7) When using persistent grants data has to be copied from/to the page
    410  *     where the grant is currently mapped. The overhead of doing this copy
    411  *     however doesn't suppress the speed improvement of not having to unmap
    412  *     the grants.
    413  * (8) The frontend driver has to allow the backend driver to map all grants
    414  *     with write access, even when they should be mapped read-only, since
    415  *     further requests may reuse these grants and require write permissions.
    416  * (9) Linux implementation doesn't have a limit on the maximum number of
    417  *     grants that can be persistently mapped in the frontend driver, but
    418  *     due to the frontent driver implementation it should never be bigger
    419  *     than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
    420  *(10) The discard-secure property may be present and will be set to 1 if the
    421  *     backing device supports secure discard.
    422  *(11) Only used by Linux and NetBSD.
    423  */
    424 
    425 /*
    426  * Multiple hardware queues/rings:
    427  * If supported, the backend will write the key "multi-queue-max-queues" to
    428  * the directory for that vbd, and set its value to the maximum supported
    429  * number of queues.
    430  * Frontends that are aware of this feature and wish to use it can write the
    431  * key "multi-queue-num-queues" with the number they wish to use, which must be
    432  * greater than zero, and no more than the value reported by the backend in
    433  * "multi-queue-max-queues".
    434  *
    435  * For frontends requesting just one queue, the usual event-channel and
    436  * ring-ref keys are written as before, simplifying the backend processing
    437  * to avoid distinguishing between a frontend that doesn't understand the
    438  * multi-queue feature, and one that does, but requested only one queue.
    439  *
    440  * Frontends requesting two or more queues must not write the toplevel
    441  * event-channel and ring-ref keys, instead writing those keys under sub-keys
    442  * having the name "queue-N" where N is the integer ID of the queue/ring for
    443  * which those keys belong. Queues are indexed from zero.
    444  * For example, a frontend with two queues must write the following set of
    445  * queue-related keys:
    446  *
    447  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
    448  * /local/domain/1/device/vbd/0/queue-0 = ""
    449  * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
    450  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
    451  * /local/domain/1/device/vbd/0/queue-1 = ""
    452  * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
    453  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
    454  *
    455  * It is also possible to use multiple queues/rings together with
    456  * feature multi-page ring buffer.
    457  * For example, a frontend requests two queues/rings and the size of each ring
    458  * buffer is two pages must write the following set of related keys:
    459  *
    460  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
    461  * /local/domain/1/device/vbd/0/ring-page-order = "1"
    462  * /local/domain/1/device/vbd/0/queue-0 = ""
    463  * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
    464  * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
    465  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
    466  * /local/domain/1/device/vbd/0/queue-1 = ""
    467  * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
    468  * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
    469  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
    470  *
    471  */
    472 
    473 /*
    474  * STATE DIAGRAMS
    475  *
    476  *****************************************************************************
    477  *                                   Startup                                 *
    478  *****************************************************************************
    479  *
    480  * Tool stack creates front and back nodes with state XenbusStateInitialising.
    481  *
    482  * Front                                Back
    483  * =================================    =====================================
    484  * XenbusStateInitialising              XenbusStateInitialising
    485  *  o Query virtual device               o Query backend device identification
    486  *    properties.                          data.
    487  *  o Setup OS device instance.          o Open and validate backend device.
    488  *                                       o Publish backend features and
    489  *                                         transport parameters.
    490  *                                                      |
    491  *                                                      |
    492  *                                                      V
    493  *                                      XenbusStateInitWait
    494  *
    495  * o Query backend features and
    496  *   transport parameters.
    497  * o Allocate and initialize the
    498  *   request ring.
    499  * o Publish transport parameters
    500  *   that will be in effect during
    501  *   this connection.
    502  *              |
    503  *              |
    504  *              V
    505  * XenbusStateInitialised
    506  *
    507  *                                       o Query frontend transport parameters.
    508  *                                       o Connect to the request ring and
    509  *                                         event channel.
    510  *                                       o Publish backend device properties.
    511  *                                                      |
    512  *                                                      |
    513  *                                                      V
    514  *                                      XenbusStateConnected
    515  *
    516  *  o Query backend device properties.
    517  *  o Finalize OS virtual device
    518  *    instance.
    519  *              |
    520  *              |
    521  *              V
    522  * XenbusStateConnected
    523  *
    524  * Note: Drivers that do not support any optional features, or the negotiation
    525  *       of transport parameters, can skip certain states in the state machine:
    526  *
    527  *       o A frontend may transition to XenbusStateInitialised without
    528  *         waiting for the backend to enter XenbusStateInitWait.  In this
    529  *         case, default transport parameters are in effect and any
    530  *         transport parameters published by the frontend must contain
    531  *         their default values.
    532  *
    533  *       o A backend may transition to XenbusStateInitialised, bypassing
    534  *         XenbusStateInitWait, without waiting for the frontend to first
    535  *         enter the XenbusStateInitialised state.  In this case, default
    536  *         transport parameters are in effect and any transport parameters
    537  *         published by the backend must contain their default values.
    538  *
    539  *       Drivers that support optional features and/or transport parameter
    540  *       negotiation must tolerate these additional state transition paths.
    541  *       In general this means performing the work of any skipped state
    542  *       transition, if it has not already been performed, in addition to the
    543  *       work associated with entry into the current state.
    544  */
    545 
    546 /*
    547  * REQUEST CODES.
    548  */
    549 #define BLKIF_OP_READ              0
    550 #define BLKIF_OP_WRITE             1
    551 /*
    552  * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
    553  * operation code ("barrier request") must be completed prior to the
    554  * execution of the barrier request.  All writes issued after the barrier
    555  * request must not execute until after the completion of the barrier request.
    556  *
    557  * Optional.  See "feature-barrier" XenBus node documentation above.
    558  */
    559 #define BLKIF_OP_WRITE_BARRIER     2
    560 /*
    561  * Commit any uncommitted contents of the backing device's volatile cache
    562  * to stable storage.
    563  *
    564  * Optional.  See "feature-flush-cache" XenBus node documentation above.
    565  */
    566 #define BLKIF_OP_FLUSH_DISKCACHE   3
    567 /*
    568  * Used in SLES sources for device specific command packet
    569  * contained within the request. Reserved for that purpose.
    570  */
    571 #define BLKIF_OP_RESERVED_1        4
    572 /*
    573  * Indicate to the backend device that a region of storage is no longer in
    574  * use, and may be discarded at any time without impact to the client.  If
    575  * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
    576  * discarded region on the device must be rendered unrecoverable before the
    577  * command returns.
    578  *
    579  * This operation is analogous to performing a trim (ATA) or unamp (SCSI),
    580  * command on a native device.
    581  *
    582  * More information about trim/unmap operations can be found at:
    583  * http://t13.org/Documents/UploadedDocuments/docs2008/
    584  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
    585  * http://www.seagate.com/staticfiles/support/disc/manuals/
    586  *     Interface%20manuals/100293068c.pdf
    587  *
    588  * Optional.  See "feature-discard", "discard-alignment",
    589  * "discard-granularity", and "discard-secure" in the XenBus node
    590  * documentation above.
    591  */
    592 #define BLKIF_OP_DISCARD           5
    593 
    594 /*
    595  * Recognized if "feature-max-indirect-segments" in present in the backend
    596  * xenbus info. The "feature-max-indirect-segments" node contains the maximum
    597  * number of segments allowed by the backend per request. If the node is
    598  * present, the frontend might use blkif_request_indirect structs in order to
    599  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
    600  * maximum number of indirect segments is fixed by the backend, but the
    601  * frontend can issue requests with any number of indirect segments as long as
    602  * it's less than the number provided by the backend. The indirect_grefs field
    603  * in blkif_request_indirect should be filled by the frontend with the
    604  * grant references of the pages that are holding the indirect segments.
    605  * These pages are filled with an array of blkif_request_segment that hold the
    606  * information about the segments. The number of indirect pages to use is
    607  * determined by the number of segments an indirect request contains. Every
    608  * indirect page can contain a maximum of
    609  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
    610  * calculate the number of indirect pages to use we have to do
    611  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
    612  *
    613  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
    614  * create the "feature-max-indirect-segments" node!
    615  */
    616 #define BLKIF_OP_INDIRECT          6
    617 
    618 /*
    619  * Maximum scatter/gather segments per request.
    620  * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
    621  * NB. This could be 12 if the ring indexes weren't stored in the same page.
    622  */
    623 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
    624 
    625 /*
    626  * Maximum number of indirect pages to use per request.
    627  */
    628 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
    629 
    630 /*
    631  * NB. 'first_sect' and 'last_sect' in blkif_request_segment, as well as
    632  * 'sector_number' in blkif_request, blkif_request_discard and
    633  * blkif_request_indirect are sector-based quantities. See the description
    634  * of the "feature-large-sector-size" frontend xenbus node above for
    635  * more information.
    636  */
    637 struct blkif_request_segment {
    638     grant_ref_t gref;        /* reference to I/O buffer frame        */
    639     /* @first_sect: first sector in frame to transfer (inclusive).   */
    640     /* @last_sect: last sector in frame to transfer (inclusive).     */
    641     uint8_t     first_sect, last_sect;
    642 };
    643 
    644 /*
    645  * Starting ring element for any I/O request.
    646  */
    647 struct blkif_request {
    648     uint8_t        operation;    /* BLKIF_OP_???                         */
    649     uint8_t        nr_segments;  /* number of segments                   */
    650     blkif_vdev_t   handle;       /* only for read/write requests         */
    651     uint64_t       id;           /* private guest value, echoed in resp  */
    652     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
    653     struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    654 };
    655 typedef struct blkif_request blkif_request_t;
    656 
    657 /*
    658  * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
    659  * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
    660  */
    661 struct blkif_request_discard {
    662     uint8_t        operation;    /* BLKIF_OP_DISCARD                     */
    663     uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
    664 #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0      */
    665     blkif_vdev_t   handle;       /* same as for read/write requests      */
    666     uint64_t       id;           /* private guest value, echoed in resp  */
    667     blkif_sector_t sector_number;/* start sector idx on disk             */
    668     uint64_t       nr_sectors;   /* number of contiguous sectors to discard*/
    669 };
    670 typedef struct blkif_request_discard blkif_request_discard_t;
    671 
    672 struct blkif_request_indirect {
    673     uint8_t        operation;    /* BLKIF_OP_INDIRECT                    */
    674     uint8_t        indirect_op;  /* BLKIF_OP_{READ/WRITE}                */
    675     uint16_t       nr_segments;  /* number of segments                   */
    676     uint64_t       id;           /* private guest value, echoed in resp  */
    677     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
    678     blkif_vdev_t   handle;       /* same as for read/write requests      */
    679     grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
    680 #ifdef __i386__
    681     uint64_t       pad;          /* Make it 64 byte aligned on i386      */
    682 #endif
    683 };
    684 typedef struct blkif_request_indirect blkif_request_indirect_t;
    685 
    686 struct blkif_response {
    687     uint64_t        id;              /* copied from request */
    688     uint8_t         operation;       /* copied from request */
    689     int16_t         status;          /* BLKIF_RSP_???       */
    690 };
    691 typedef struct blkif_response blkif_response_t;
    692 
    693 /*
    694  * STATUS RETURN CODES.
    695  */
    696  /* Operation not supported (only happens on barrier writes). */
    697 #define BLKIF_RSP_EOPNOTSUPP  -2
    698  /* Operation failed for some unspecified reason (-EIO). */
    699 #define BLKIF_RSP_ERROR       -1
    700  /* Operation completed successfully. */
    701 #define BLKIF_RSP_OKAY         0
    702 
    703 /*
    704  * Generate blkif ring structures and types.
    705  */
    706 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
    707 
    708 #define VDISK_CDROM        0x1
    709 #define VDISK_REMOVABLE    0x2
    710 #define VDISK_READONLY     0x4
    711 
    712 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */