qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

aspeed_hace.c (16544B)


      1 /*
      2  * ASPEED Hash and Crypto Engine
      3  *
      4  * Copyright (C) 2021 IBM Corp.
      5  *
      6  * Joel Stanley <joel@jms.id.au>
      7  *
      8  * SPDX-License-Identifier: GPL-2.0-or-later
      9  */
     10 
     11 #include "qemu/osdep.h"
     12 #include "qemu/log.h"
     13 #include "qemu/error-report.h"
     14 #include "hw/misc/aspeed_hace.h"
     15 #include "qapi/error.h"
     16 #include "migration/vmstate.h"
     17 #include "crypto/hash.h"
     18 #include "hw/qdev-properties.h"
     19 #include "hw/irq.h"
     20 
     21 #define R_CRYPT_CMD     (0x10 / 4)
     22 
     23 #define R_STATUS        (0x1c / 4)
     24 #define HASH_IRQ        BIT(9)
     25 #define CRYPT_IRQ       BIT(12)
     26 #define TAG_IRQ         BIT(15)
     27 
     28 #define R_HASH_SRC      (0x20 / 4)
     29 #define R_HASH_DEST     (0x24 / 4)
     30 #define R_HASH_KEY_BUFF (0x28 / 4)
     31 #define R_HASH_SRC_LEN  (0x2c / 4)
     32 
     33 #define R_HASH_CMD      (0x30 / 4)
     34 /* Hash algorithm selection */
     35 #define  HASH_ALGO_MASK                 (BIT(4) | BIT(5) | BIT(6))
     36 #define  HASH_ALGO_MD5                  0
     37 #define  HASH_ALGO_SHA1                 BIT(5)
     38 #define  HASH_ALGO_SHA224               BIT(6)
     39 #define  HASH_ALGO_SHA256               (BIT(4) | BIT(6))
     40 #define  HASH_ALGO_SHA512_SERIES        (BIT(5) | BIT(6))
     41 /* SHA512 algorithm selection */
     42 #define  SHA512_HASH_ALGO_MASK          (BIT(10) | BIT(11) | BIT(12))
     43 #define  HASH_ALGO_SHA512_SHA512        0
     44 #define  HASH_ALGO_SHA512_SHA384        BIT(10)
     45 #define  HASH_ALGO_SHA512_SHA256        BIT(11)
     46 #define  HASH_ALGO_SHA512_SHA224        (BIT(10) | BIT(11))
     47 /* HMAC modes */
     48 #define  HASH_HMAC_MASK                 (BIT(7) | BIT(8))
     49 #define  HASH_DIGEST                    0
     50 #define  HASH_DIGEST_HMAC               BIT(7)
     51 #define  HASH_DIGEST_ACCUM              BIT(8)
     52 #define  HASH_HMAC_KEY                  (BIT(7) | BIT(8))
     53 /* Cascaded operation modes */
     54 #define  HASH_ONLY                      0
     55 #define  HASH_ONLY2                     BIT(0)
     56 #define  HASH_CRYPT_THEN_HASH           BIT(1)
     57 #define  HASH_HASH_THEN_CRYPT           (BIT(0) | BIT(1))
     58 /* Other cmd bits */
     59 #define  HASH_IRQ_EN                    BIT(9)
     60 #define  HASH_SG_EN                     BIT(18)
     61 /* Scatter-gather data list */
     62 #define SG_LIST_LEN_SIZE                4
     63 #define SG_LIST_LEN_MASK                0x0FFFFFFF
     64 #define SG_LIST_LEN_LAST                BIT(31)
     65 #define SG_LIST_ADDR_SIZE               4
     66 #define SG_LIST_ADDR_MASK               0x7FFFFFFF
     67 #define SG_LIST_ENTRY_SIZE              (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
     68 
     69 static const struct {
     70     uint32_t mask;
     71     QCryptoHashAlgorithm algo;
     72 } hash_algo_map[] = {
     73     { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 },
     74     { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 },
     75     { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 },
     76     { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 },
     77     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 },
     78     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 },
     79     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 },
     80 };
     81 
     82 static int hash_algo_lookup(uint32_t reg)
     83 {
     84     int i;
     85 
     86     reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
     87 
     88     for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
     89         if (reg == hash_algo_map[i].mask) {
     90             return hash_algo_map[i].algo;
     91         }
     92     }
     93 
     94     return -1;
     95 }
     96 
     97 /**
     98  * Check whether the request contains padding message.
     99  *
    100  * @param s             aspeed hace state object
    101  * @param iov           iov of current request
    102  * @param req_len       length of the current request
    103  * @param total_msg_len length of all acc_mode requests(excluding padding msg)
    104  * @param pad_offset    start offset of padding message
    105  */
    106 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
    107                         hwaddr req_len, uint32_t *total_msg_len,
    108                         uint32_t *pad_offset)
    109 {
    110     *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
    111     /*
    112      * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
    113      * last request. The last request should contain padding message.
    114      * We check whether message contains padding by
    115      *   1. Get total message length. If the current message contains
    116      *      padding, the last 8 bytes are total message length.
    117      *   2. Check whether the total message length is valid.
    118      *      If it is valid, the value should less than or equal to
    119      *      total_req_len.
    120      *   3. Current request len - padding_size to get padding offset.
    121      *      The padding message's first byte should be 0x80
    122      */
    123     if (*total_msg_len <= s->total_req_len) {
    124         uint32_t padding_size = s->total_req_len - *total_msg_len;
    125         uint8_t *padding = iov->iov_base;
    126         *pad_offset = req_len - padding_size;
    127         if (padding[*pad_offset] == 0x80) {
    128             return true;
    129         }
    130     }
    131 
    132     return false;
    133 }
    134 
    135 static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
    136                            uint32_t *pad_offset)
    137 {
    138     int i, iov_count;
    139     if (*pad_offset != 0) {
    140         s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
    141         s->iov_cache[s->iov_count].iov_len = *pad_offset;
    142         ++s->iov_count;
    143     }
    144     for (i = 0; i < s->iov_count; i++) {
    145         iov[i].iov_base = s->iov_cache[i].iov_base;
    146         iov[i].iov_len = s->iov_cache[i].iov_len;
    147     }
    148     iov_count = s->iov_count;
    149     s->iov_count = 0;
    150     s->total_req_len = 0;
    151     return iov_count;
    152 }
    153 
    154 /**
    155  * Generate iov for accumulative mode.
    156  *
    157  * @param s             aspeed hace state object
    158  * @param iov           iov of the current request
    159  * @param id            index of the current iov
    160  * @param req_len       length of the current request
    161  *
    162  * @return count of iov
    163  */
    164 static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
    165                             hwaddr *req_len)
    166 {
    167     uint32_t pad_offset;
    168     uint32_t total_msg_len;
    169     s->total_req_len += *req_len;
    170 
    171     if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
    172         if (s->iov_count) {
    173             return reconstruct_iov(s, iov, id, &pad_offset);
    174         }
    175 
    176         *req_len -= s->total_req_len - total_msg_len;
    177         s->total_req_len = 0;
    178         iov[id].iov_len = *req_len;
    179     } else {
    180         s->iov_cache[s->iov_count].iov_base = iov->iov_base;
    181         s->iov_cache[s->iov_count].iov_len = *req_len;
    182         ++s->iov_count;
    183     }
    184 
    185     return id + 1;
    186 }
    187 
    188 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
    189                               bool acc_mode)
    190 {
    191     struct iovec iov[ASPEED_HACE_MAX_SG];
    192     g_autofree uint8_t *digest_buf;
    193     size_t digest_len = 0;
    194     int niov = 0;
    195     int i;
    196 
    197     if (sg_mode) {
    198         uint32_t len = 0;
    199 
    200         for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
    201             uint32_t addr, src;
    202             hwaddr plen;
    203 
    204             if (i == ASPEED_HACE_MAX_SG) {
    205                 qemu_log_mask(LOG_GUEST_ERROR,
    206                         "aspeed_hace: guest failed to set end of sg list marker\n");
    207                 break;
    208             }
    209 
    210             src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
    211 
    212             len = address_space_ldl_le(&s->dram_as, src,
    213                                        MEMTXATTRS_UNSPECIFIED, NULL);
    214 
    215             addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
    216                                         MEMTXATTRS_UNSPECIFIED, NULL);
    217             addr &= SG_LIST_ADDR_MASK;
    218 
    219             plen = len & SG_LIST_LEN_MASK;
    220             iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
    221                                                 MEMTXATTRS_UNSPECIFIED);
    222 
    223             if (acc_mode) {
    224                 niov = gen_acc_mode_iov(s, iov, i, &plen);
    225 
    226             } else {
    227                 iov[i].iov_len = plen;
    228             }
    229         }
    230     } else {
    231         hwaddr len = s->regs[R_HASH_SRC_LEN];
    232 
    233         iov[0].iov_len = len;
    234         iov[0].iov_base = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
    235                                             &len, false,
    236                                             MEMTXATTRS_UNSPECIFIED);
    237         i = 1;
    238 
    239         if (s->iov_count) {
    240             /*
    241              * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
    242              * Thus if we received a request with sg_mode disabled, it is
    243              * required to check whether cache is empty. If no, we should
    244              * combine cached iov and the current iov.
    245              */
    246             uint32_t total_msg_len;
    247             uint32_t pad_offset;
    248             s->total_req_len += len;
    249             if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
    250                 niov = reconstruct_iov(s, iov, 0, &pad_offset);
    251             }
    252         }
    253     }
    254 
    255     if (niov) {
    256         i = niov;
    257     }
    258 
    259     if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
    260         qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
    261         return;
    262     }
    263 
    264     if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
    265                             MEMTXATTRS_UNSPECIFIED,
    266                             digest_buf, digest_len)) {
    267         qemu_log_mask(LOG_GUEST_ERROR,
    268                       "aspeed_hace: address space write failed\n");
    269     }
    270 
    271     for (; i > 0; i--) {
    272         address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
    273                             iov[i - 1].iov_len, false,
    274                             iov[i - 1].iov_len);
    275     }
    276 
    277     /*
    278      * Set status bits to indicate completion. Testing shows hardware sets
    279      * these irrespective of HASH_IRQ_EN.
    280      */
    281     s->regs[R_STATUS] |= HASH_IRQ;
    282 }
    283 
    284 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
    285 {
    286     AspeedHACEState *s = ASPEED_HACE(opaque);
    287 
    288     addr >>= 2;
    289 
    290     if (addr >= ASPEED_HACE_NR_REGS) {
    291         qemu_log_mask(LOG_GUEST_ERROR,
    292                       "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
    293                       __func__, addr << 2);
    294         return 0;
    295     }
    296 
    297     return s->regs[addr];
    298 }
    299 
    300 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
    301                               unsigned int size)
    302 {
    303     AspeedHACEState *s = ASPEED_HACE(opaque);
    304     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
    305 
    306     addr >>= 2;
    307 
    308     if (addr >= ASPEED_HACE_NR_REGS) {
    309         qemu_log_mask(LOG_GUEST_ERROR,
    310                       "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
    311                       __func__, addr << 2);
    312         return;
    313     }
    314 
    315     switch (addr) {
    316     case R_STATUS:
    317         if (data & HASH_IRQ) {
    318             data &= ~HASH_IRQ;
    319 
    320             if (s->regs[addr] & HASH_IRQ) {
    321                 qemu_irq_lower(s->irq);
    322             }
    323         }
    324         break;
    325     case R_HASH_SRC:
    326         data &= ahc->src_mask;
    327         break;
    328     case R_HASH_DEST:
    329         data &= ahc->dest_mask;
    330         break;
    331     case R_HASH_KEY_BUFF:
    332         data &= ahc->key_mask;
    333         break;
    334     case R_HASH_SRC_LEN:
    335         data &= 0x0FFFFFFF;
    336         break;
    337     case R_HASH_CMD: {
    338         int algo;
    339         data &= ahc->hash_mask;
    340 
    341         if ((data & HASH_DIGEST_HMAC)) {
    342             qemu_log_mask(LOG_UNIMP,
    343                           "%s: HMAC mode not implemented\n",
    344                           __func__);
    345         }
    346         if (data & BIT(1)) {
    347             qemu_log_mask(LOG_UNIMP,
    348                           "%s: Cascaded mode not implemented\n",
    349                           __func__);
    350         }
    351         algo = hash_algo_lookup(data);
    352         if (algo < 0) {
    353                 qemu_log_mask(LOG_GUEST_ERROR,
    354                         "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
    355                         __func__, data & ahc->hash_mask);
    356                 break;
    357         }
    358         do_hash_operation(s, algo, data & HASH_SG_EN,
    359                 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
    360 
    361         if (data & HASH_IRQ_EN) {
    362             qemu_irq_raise(s->irq);
    363         }
    364         break;
    365     }
    366     case R_CRYPT_CMD:
    367         qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
    368                        __func__);
    369         break;
    370     default:
    371         break;
    372     }
    373 
    374     s->regs[addr] = data;
    375 }
    376 
    377 static const MemoryRegionOps aspeed_hace_ops = {
    378     .read = aspeed_hace_read,
    379     .write = aspeed_hace_write,
    380     .endianness = DEVICE_LITTLE_ENDIAN,
    381     .valid = {
    382         .min_access_size = 1,
    383         .max_access_size = 4,
    384     },
    385 };
    386 
    387 static void aspeed_hace_reset(DeviceState *dev)
    388 {
    389     struct AspeedHACEState *s = ASPEED_HACE(dev);
    390 
    391     memset(s->regs, 0, sizeof(s->regs));
    392     s->iov_count = 0;
    393     s->total_req_len = 0;
    394 }
    395 
    396 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
    397 {
    398     AspeedHACEState *s = ASPEED_HACE(dev);
    399     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
    400 
    401     sysbus_init_irq(sbd, &s->irq);
    402 
    403     memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
    404             TYPE_ASPEED_HACE, 0x1000);
    405 
    406     if (!s->dram_mr) {
    407         error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
    408         return;
    409     }
    410 
    411     address_space_init(&s->dram_as, s->dram_mr, "dram");
    412 
    413     sysbus_init_mmio(sbd, &s->iomem);
    414 }
    415 
    416 static Property aspeed_hace_properties[] = {
    417     DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
    418                      TYPE_MEMORY_REGION, MemoryRegion *),
    419     DEFINE_PROP_END_OF_LIST(),
    420 };
    421 
    422 
    423 static const VMStateDescription vmstate_aspeed_hace = {
    424     .name = TYPE_ASPEED_HACE,
    425     .version_id = 1,
    426     .minimum_version_id = 1,
    427     .fields = (VMStateField[]) {
    428         VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
    429         VMSTATE_UINT32(total_req_len, AspeedHACEState),
    430         VMSTATE_UINT32(iov_count, AspeedHACEState),
    431         VMSTATE_END_OF_LIST(),
    432     }
    433 };
    434 
    435 static void aspeed_hace_class_init(ObjectClass *klass, void *data)
    436 {
    437     DeviceClass *dc = DEVICE_CLASS(klass);
    438 
    439     dc->realize = aspeed_hace_realize;
    440     dc->reset = aspeed_hace_reset;
    441     device_class_set_props(dc, aspeed_hace_properties);
    442     dc->vmsd = &vmstate_aspeed_hace;
    443 }
    444 
    445 static const TypeInfo aspeed_hace_info = {
    446     .name = TYPE_ASPEED_HACE,
    447     .parent = TYPE_SYS_BUS_DEVICE,
    448     .instance_size = sizeof(AspeedHACEState),
    449     .class_init = aspeed_hace_class_init,
    450     .class_size = sizeof(AspeedHACEClass)
    451 };
    452 
    453 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
    454 {
    455     DeviceClass *dc = DEVICE_CLASS(klass);
    456     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
    457 
    458     dc->desc = "AST2400 Hash and Crypto Engine";
    459 
    460     ahc->src_mask = 0x0FFFFFFF;
    461     ahc->dest_mask = 0x0FFFFFF8;
    462     ahc->key_mask = 0x0FFFFFC0;
    463     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
    464 }
    465 
    466 static const TypeInfo aspeed_ast2400_hace_info = {
    467     .name = TYPE_ASPEED_AST2400_HACE,
    468     .parent = TYPE_ASPEED_HACE,
    469     .class_init = aspeed_ast2400_hace_class_init,
    470 };
    471 
    472 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
    473 {
    474     DeviceClass *dc = DEVICE_CLASS(klass);
    475     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
    476 
    477     dc->desc = "AST2500 Hash and Crypto Engine";
    478 
    479     ahc->src_mask = 0x3fffffff;
    480     ahc->dest_mask = 0x3ffffff8;
    481     ahc->key_mask = 0x3FFFFFC0;
    482     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
    483 }
    484 
    485 static const TypeInfo aspeed_ast2500_hace_info = {
    486     .name = TYPE_ASPEED_AST2500_HACE,
    487     .parent = TYPE_ASPEED_HACE,
    488     .class_init = aspeed_ast2500_hace_class_init,
    489 };
    490 
    491 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
    492 {
    493     DeviceClass *dc = DEVICE_CLASS(klass);
    494     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
    495 
    496     dc->desc = "AST2600 Hash and Crypto Engine";
    497 
    498     ahc->src_mask = 0x7FFFFFFF;
    499     ahc->dest_mask = 0x7FFFFFF8;
    500     ahc->key_mask = 0x7FFFFFF8;
    501     ahc->hash_mask = 0x00147FFF;
    502 }
    503 
    504 static const TypeInfo aspeed_ast2600_hace_info = {
    505     .name = TYPE_ASPEED_AST2600_HACE,
    506     .parent = TYPE_ASPEED_HACE,
    507     .class_init = aspeed_ast2600_hace_class_init,
    508 };
    509 
    510 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
    511 {
    512     DeviceClass *dc = DEVICE_CLASS(klass);
    513     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
    514 
    515     dc->desc = "AST1030 Hash and Crypto Engine";
    516 
    517     ahc->src_mask = 0x7FFFFFFF;
    518     ahc->dest_mask = 0x7FFFFFF8;
    519     ahc->key_mask = 0x7FFFFFF8;
    520     ahc->hash_mask = 0x00147FFF;
    521 }
    522 
    523 static const TypeInfo aspeed_ast1030_hace_info = {
    524     .name = TYPE_ASPEED_AST1030_HACE,
    525     .parent = TYPE_ASPEED_HACE,
    526     .class_init = aspeed_ast1030_hace_class_init,
    527 };
    528 
    529 static void aspeed_hace_register_types(void)
    530 {
    531     type_register_static(&aspeed_ast2400_hace_info);
    532     type_register_static(&aspeed_ast2500_hace_info);
    533     type_register_static(&aspeed_ast2600_hace_info);
    534     type_register_static(&aspeed_ast1030_hace_info);
    535     type_register_static(&aspeed_hace_info);
    536 }
    537 
    538 type_init(aspeed_hace_register_types);