qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

plot.py (19027B)


      1 #
      2 # Migration test graph plotting
      3 #
      4 # Copyright (c) 2016 Red Hat, Inc.
      5 #
      6 # This library is free software; you can redistribute it and/or
      7 # modify it under the terms of the GNU Lesser General Public
      8 # License as published by the Free Software Foundation; either
      9 # version 2.1 of the License, or (at your option) any later version.
     10 #
     11 # This library is distributed in the hope that it will be useful,
     12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 # Lesser General Public License for more details.
     15 #
     16 # You should have received a copy of the GNU Lesser General Public
     17 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 #
     19 
     20 import sys
     21 
     22 
     23 class Plot(object):
     24 
     25     # Generated using
     26     # http://tools.medialab.sciences-po.fr/iwanthue/
     27     COLORS = ["#CD54D0",
     28               "#79D94C",
     29               "#7470CD",
     30               "#D2D251",
     31               "#863D79",
     32               "#76DDA6",
     33               "#D4467B",
     34               "#61923D",
     35               "#CB9CCA",
     36               "#D98F36",
     37               "#8CC8DA",
     38               "#CE4831",
     39               "#5E7693",
     40               "#9B803F",
     41               "#412F4C",
     42               "#CECBA6",
     43               "#6D3229",
     44               "#598B73",
     45               "#C8827C",
     46               "#394427"]
     47 
     48     def __init__(self,
     49                  reports,
     50                  migration_iters,
     51                  total_guest_cpu,
     52                  split_guest_cpu,
     53                  qemu_cpu,
     54                  vcpu_cpu):
     55 
     56         self._reports = reports
     57         self._migration_iters = migration_iters
     58         self._total_guest_cpu = total_guest_cpu
     59         self._split_guest_cpu = split_guest_cpu
     60         self._qemu_cpu = qemu_cpu
     61         self._vcpu_cpu = vcpu_cpu
     62         self._color_idx = 0
     63 
     64     def _next_color(self):
     65         color = self.COLORS[self._color_idx]
     66         self._color_idx += 1
     67         if self._color_idx >= len(self.COLORS):
     68             self._color_idx = 0
     69         return color
     70 
     71     def _get_progress_label(self, progress):
     72         if progress:
     73             return "\n\n" + "\n".join(
     74                 ["Status: %s" % progress._status,
     75                  "Iteration: %d" % progress._ram._iterations,
     76                  "Throttle: %02d%%" % progress._throttle_pcent,
     77                  "Dirty rate: %dMB/s" % (progress._ram._dirty_rate_pps * 4 / 1024.0)])
     78         else:
     79             return "\n\n" + "\n".join(
     80                 ["Status: %s" % "none",
     81                  "Iteration: %d" % 0])
     82 
     83     def _find_start_time(self, report):
     84         startqemu = report._qemu_timings._records[0]._timestamp
     85         startguest = report._guest_timings._records[0]._timestamp
     86         if startqemu < startguest:
     87             return startqemu
     88         else:
     89             return stasrtguest
     90 
     91     def _get_guest_max_value(self, report):
     92         maxvalue = 0
     93         for record in report._guest_timings._records:
     94             if record._value > maxvalue:
     95                 maxvalue = record._value
     96         return maxvalue
     97 
     98     def _get_qemu_max_value(self, report):
     99         maxvalue = 0
    100         oldvalue = None
    101         oldtime = None
    102         for record in report._qemu_timings._records:
    103             if oldvalue is not None:
    104                 cpudelta = (record._value - oldvalue) / 1000.0
    105                 timedelta = record._timestamp - oldtime
    106                 if timedelta == 0:
    107                     continue
    108                 util = cpudelta / timedelta * 100.0
    109             else:
    110                 util = 0
    111             oldvalue = record._value
    112             oldtime = record._timestamp
    113 
    114             if util > maxvalue:
    115                 maxvalue = util
    116         return maxvalue
    117 
    118     def _get_total_guest_cpu_graph(self, report, starttime):
    119         xaxis = []
    120         yaxis = []
    121         labels = []
    122         progress_idx = -1
    123         for record in report._guest_timings._records:
    124             while ((progress_idx + 1) < len(report._progress_history) and
    125                    report._progress_history[progress_idx + 1]._now < record._timestamp):
    126                 progress_idx = progress_idx + 1
    127 
    128             if progress_idx >= 0:
    129                 progress = report._progress_history[progress_idx]
    130             else:
    131                 progress = None
    132 
    133             xaxis.append(record._timestamp - starttime)
    134             yaxis.append(record._value)
    135             labels.append(self._get_progress_label(progress))
    136 
    137         from plotly import graph_objs as go
    138         return go.Scatter(x=xaxis,
    139                           y=yaxis,
    140                           name="Guest PIDs: %s" % report._scenario._name,
    141                           mode='lines',
    142                           line={
    143                               "dash": "solid",
    144                               "color": self._next_color(),
    145                               "shape": "linear",
    146                               "width": 1
    147                           },
    148                           text=labels)
    149 
    150     def _get_split_guest_cpu_graphs(self, report, starttime):
    151         threads = {}
    152         for record in report._guest_timings._records:
    153             if record._tid in threads:
    154                 continue
    155             threads[record._tid] = {
    156                 "xaxis": [],
    157                 "yaxis": [],
    158                 "labels": [],
    159             }
    160 
    161         progress_idx = -1
    162         for record in report._guest_timings._records:
    163             while ((progress_idx + 1) < len(report._progress_history) and
    164                    report._progress_history[progress_idx + 1]._now < record._timestamp):
    165                 progress_idx = progress_idx + 1
    166 
    167             if progress_idx >= 0:
    168                 progress = report._progress_history[progress_idx]
    169             else:
    170                 progress = None
    171 
    172             threads[record._tid]["xaxis"].append(record._timestamp - starttime)
    173             threads[record._tid]["yaxis"].append(record._value)
    174             threads[record._tid]["labels"].append(self._get_progress_label(progress))
    175 
    176 
    177         graphs = []
    178         from plotly import graph_objs as go
    179         for tid in threads.keys():
    180             graphs.append(
    181                 go.Scatter(x=threads[tid]["xaxis"],
    182                            y=threads[tid]["yaxis"],
    183                            name="PID %s: %s" % (tid, report._scenario._name),
    184                            mode="lines",
    185                            line={
    186                                "dash": "solid",
    187                                "color": self._next_color(),
    188                                "shape": "linear",
    189                                "width": 1
    190                            },
    191                            text=threads[tid]["labels"]))
    192         return graphs
    193 
    194     def _get_migration_iters_graph(self, report, starttime):
    195         xaxis = []
    196         yaxis = []
    197         labels = []
    198         for progress in report._progress_history:
    199             xaxis.append(progress._now - starttime)
    200             yaxis.append(0)
    201             labels.append(self._get_progress_label(progress))
    202 
    203         from plotly import graph_objs as go
    204         return go.Scatter(x=xaxis,
    205                           y=yaxis,
    206                           text=labels,
    207                           name="Migration iterations",
    208                           mode="markers",
    209                           marker={
    210                               "color": self._next_color(),
    211                               "symbol": "star",
    212                               "size": 5
    213                           })
    214 
    215     def _get_qemu_cpu_graph(self, report, starttime):
    216         xaxis = []
    217         yaxis = []
    218         labels = []
    219         progress_idx = -1
    220 
    221         first = report._qemu_timings._records[0]
    222         abstimestamps = [first._timestamp]
    223         absvalues = [first._value]
    224 
    225         for record in report._qemu_timings._records[1:]:
    226             while ((progress_idx + 1) < len(report._progress_history) and
    227                    report._progress_history[progress_idx + 1]._now < record._timestamp):
    228                 progress_idx = progress_idx + 1
    229 
    230             if progress_idx >= 0:
    231                 progress = report._progress_history[progress_idx]
    232             else:
    233                 progress = None
    234 
    235             oldvalue = absvalues[-1]
    236             oldtime = abstimestamps[-1]
    237 
    238             cpudelta = (record._value - oldvalue) / 1000.0
    239             timedelta = record._timestamp - oldtime
    240             if timedelta == 0:
    241                 continue
    242             util = cpudelta / timedelta * 100.0
    243 
    244             abstimestamps.append(record._timestamp)
    245             absvalues.append(record._value)
    246 
    247             xaxis.append(record._timestamp - starttime)
    248             yaxis.append(util)
    249             labels.append(self._get_progress_label(progress))
    250 
    251         from plotly import graph_objs as go
    252         return go.Scatter(x=xaxis,
    253                           y=yaxis,
    254                           yaxis="y2",
    255                           name="QEMU: %s" % report._scenario._name,
    256                           mode='lines',
    257                           line={
    258                               "dash": "solid",
    259                               "color": self._next_color(),
    260                               "shape": "linear",
    261                               "width": 1
    262                           },
    263                           text=labels)
    264 
    265     def _get_vcpu_cpu_graphs(self, report, starttime):
    266         threads = {}
    267         for record in report._vcpu_timings._records:
    268             if record._tid in threads:
    269                 continue
    270             threads[record._tid] = {
    271                 "xaxis": [],
    272                 "yaxis": [],
    273                 "labels": [],
    274                 "absvalue": [record._value],
    275                 "abstime": [record._timestamp],
    276             }
    277 
    278         progress_idx = -1
    279         for record in report._vcpu_timings._records:
    280             while ((progress_idx + 1) < len(report._progress_history) and
    281                    report._progress_history[progress_idx + 1]._now < record._timestamp):
    282                 progress_idx = progress_idx + 1
    283 
    284             if progress_idx >= 0:
    285                 progress = report._progress_history[progress_idx]
    286             else:
    287                 progress = None
    288 
    289             oldvalue = threads[record._tid]["absvalue"][-1]
    290             oldtime = threads[record._tid]["abstime"][-1]
    291 
    292             cpudelta = (record._value - oldvalue) / 1000.0
    293             timedelta = record._timestamp - oldtime
    294             if timedelta == 0:
    295                 continue
    296             util = cpudelta / timedelta * 100.0
    297             if util > 100:
    298                 util = 100
    299 
    300             threads[record._tid]["absvalue"].append(record._value)
    301             threads[record._tid]["abstime"].append(record._timestamp)
    302 
    303             threads[record._tid]["xaxis"].append(record._timestamp - starttime)
    304             threads[record._tid]["yaxis"].append(util)
    305             threads[record._tid]["labels"].append(self._get_progress_label(progress))
    306 
    307 
    308         graphs = []
    309         from plotly import graph_objs as go
    310         for tid in threads.keys():
    311             graphs.append(
    312                 go.Scatter(x=threads[tid]["xaxis"],
    313                            y=threads[tid]["yaxis"],
    314                            yaxis="y2",
    315                            name="VCPU %s: %s" % (tid, report._scenario._name),
    316                            mode="lines",
    317                            line={
    318                                "dash": "solid",
    319                                "color": self._next_color(),
    320                                "shape": "linear",
    321                                "width": 1
    322                            },
    323                            text=threads[tid]["labels"]))
    324         return graphs
    325 
    326     def _generate_chart_report(self, report):
    327         graphs = []
    328         starttime = self._find_start_time(report)
    329         if self._total_guest_cpu:
    330             graphs.append(self._get_total_guest_cpu_graph(report, starttime))
    331         if self._split_guest_cpu:
    332             graphs.extend(self._get_split_guest_cpu_graphs(report, starttime))
    333         if self._qemu_cpu:
    334             graphs.append(self._get_qemu_cpu_graph(report, starttime))
    335         if self._vcpu_cpu:
    336             graphs.extend(self._get_vcpu_cpu_graphs(report, starttime))
    337         if self._migration_iters:
    338             graphs.append(self._get_migration_iters_graph(report, starttime))
    339         return graphs
    340 
    341     def _generate_annotation(self, starttime, progress):
    342         return {
    343             "text": progress._status,
    344             "x": progress._now - starttime,
    345             "y": 10,
    346         }
    347 
    348     def _generate_annotations(self, report):
    349         starttime = self._find_start_time(report)
    350         annotations = {}
    351         started = False
    352         for progress in report._progress_history:
    353             if progress._status == "setup":
    354                 continue
    355             if progress._status not in annotations:
    356                 annotations[progress._status] = self._generate_annotation(starttime, progress)
    357 
    358         return annotations.values()
    359 
    360     def _generate_chart(self):
    361         from plotly.offline import plot
    362         from plotly import graph_objs as go
    363 
    364         graphs = []
    365         yaxismax = 0
    366         yaxismax2 = 0
    367         for report in self._reports:
    368             graphs.extend(self._generate_chart_report(report))
    369 
    370             maxvalue = self._get_guest_max_value(report)
    371             if maxvalue > yaxismax:
    372                 yaxismax = maxvalue
    373 
    374             maxvalue = self._get_qemu_max_value(report)
    375             if maxvalue > yaxismax2:
    376                 yaxismax2 = maxvalue
    377 
    378         yaxismax += 100
    379         if not self._qemu_cpu:
    380             yaxismax2 = 110
    381         yaxismax2 += 10
    382 
    383         annotations = []
    384         if self._migration_iters:
    385             for report in self._reports:
    386                 annotations.extend(self._generate_annotations(report))
    387 
    388         layout = go.Layout(title="Migration comparison",
    389                            xaxis={
    390                                "title": "Wallclock time (secs)",
    391                                "showgrid": False,
    392                            },
    393                            yaxis={
    394                                "title": "Memory update speed (ms/GB)",
    395                                "showgrid": False,
    396                                "range": [0, yaxismax],
    397                            },
    398                            yaxis2={
    399                                "title": "Hostutilization (%)",
    400                                "overlaying": "y",
    401                                "side": "right",
    402                                "range": [0, yaxismax2],
    403                                "showgrid": False,
    404                            },
    405                            annotations=annotations)
    406 
    407         figure = go.Figure(data=graphs, layout=layout)
    408 
    409         return plot(figure,
    410                     show_link=False,
    411                     include_plotlyjs=False,
    412                     output_type="div")
    413 
    414 
    415     def _generate_report(self):
    416         pieces = []
    417         for report in self._reports:
    418             pieces.append("""
    419 <h3>Report %s</h3>
    420 <table>
    421 """ % report._scenario._name)
    422 
    423             pieces.append("""
    424   <tr class="subhead">
    425     <th colspan="2">Test config</th>
    426   </tr>
    427   <tr>
    428     <th>Emulator:</th>
    429     <td>%s</td>
    430   </tr>
    431   <tr>
    432     <th>Kernel:</th>
    433     <td>%s</td>
    434   </tr>
    435   <tr>
    436     <th>Ramdisk:</th>
    437     <td>%s</td>
    438   </tr>
    439   <tr>
    440     <th>Transport:</th>
    441     <td>%s</td>
    442   </tr>
    443   <tr>
    444     <th>Host:</th>
    445     <td>%s</td>
    446   </tr>
    447 """ % (report._binary, report._kernel,
    448        report._initrd, report._transport, report._dst_host))
    449 
    450             hardware = report._hardware
    451             pieces.append("""
    452   <tr class="subhead">
    453     <th colspan="2">Hardware config</th>
    454   </tr>
    455   <tr>
    456     <th>CPUs:</th>
    457     <td>%d</td>
    458   </tr>
    459   <tr>
    460     <th>RAM:</th>
    461     <td>%d GB</td>
    462   </tr>
    463   <tr>
    464     <th>Source CPU bind:</th>
    465     <td>%s</td>
    466   </tr>
    467   <tr>
    468     <th>Source RAM bind:</th>
    469     <td>%s</td>
    470   </tr>
    471   <tr>
    472     <th>Dest CPU bind:</th>
    473     <td>%s</td>
    474   </tr>
    475   <tr>
    476     <th>Dest RAM bind:</th>
    477     <td>%s</td>
    478   </tr>
    479   <tr>
    480     <th>Preallocate RAM:</th>
    481     <td>%s</td>
    482   </tr>
    483   <tr>
    484     <th>Locked RAM:</th>
    485     <td>%s</td>
    486   </tr>
    487   <tr>
    488     <th>Huge pages:</th>
    489     <td>%s</td>
    490   </tr>
    491 """ % (hardware._cpus, hardware._mem,
    492        ",".join(hardware._src_cpu_bind),
    493        ",".join(hardware._src_mem_bind),
    494        ",".join(hardware._dst_cpu_bind),
    495        ",".join(hardware._dst_mem_bind),
    496        "yes" if hardware._prealloc_pages else "no",
    497        "yes" if hardware._locked_pages else "no",
    498        "yes" if hardware._huge_pages else "no"))
    499 
    500             scenario = report._scenario
    501             pieces.append("""
    502   <tr class="subhead">
    503     <th colspan="2">Scenario config</th>
    504   </tr>
    505   <tr>
    506     <th>Max downtime:</th>
    507     <td>%d milli-sec</td>
    508   </tr>
    509   <tr>
    510     <th>Max bandwidth:</th>
    511     <td>%d MB/sec</td>
    512   </tr>
    513   <tr>
    514     <th>Max iters:</th>
    515     <td>%d</td>
    516   </tr>
    517   <tr>
    518     <th>Max time:</th>
    519     <td>%d secs</td>
    520   </tr>
    521   <tr>
    522     <th>Pause:</th>
    523     <td>%s</td>
    524   </tr>
    525   <tr>
    526     <th>Pause iters:</th>
    527     <td>%d</td>
    528   </tr>
    529   <tr>
    530     <th>Post-copy:</th>
    531     <td>%s</td>
    532   </tr>
    533   <tr>
    534     <th>Post-copy iters:</th>
    535     <td>%d</td>
    536   </tr>
    537   <tr>
    538     <th>Auto-converge:</th>
    539     <td>%s</td>
    540   </tr>
    541   <tr>
    542     <th>Auto-converge iters:</th>
    543     <td>%d</td>
    544   </tr>
    545   <tr>
    546     <th>MT compression:</th>
    547     <td>%s</td>
    548   </tr>
    549   <tr>
    550     <th>MT compression threads:</th>
    551     <td>%d</td>
    552   </tr>
    553   <tr>
    554     <th>XBZRLE compression:</th>
    555     <td>%s</td>
    556   </tr>
    557   <tr>
    558     <th>XBZRLE compression cache:</th>
    559     <td>%d%% of RAM</td>
    560   </tr>
    561 """ % (scenario._downtime, scenario._bandwidth,
    562        scenario._max_iters, scenario._max_time,
    563        "yes" if scenario._pause else "no", scenario._pause_iters,
    564        "yes" if scenario._post_copy else "no", scenario._post_copy_iters,
    565        "yes" if scenario._auto_converge else "no", scenario._auto_converge_step,
    566        "yes" if scenario._compression_mt else "no", scenario._compression_mt_threads,
    567        "yes" if scenario._compression_xbzrle else "no", scenario._compression_xbzrle_cache))
    568 
    569             pieces.append("""
    570 </table>
    571 """)
    572 
    573         return "\n".join(pieces)
    574 
    575     def _generate_style(self):
    576         return """
    577 #report table tr th {
    578     text-align: right;
    579 }
    580 #report table tr td {
    581     text-align: left;
    582 }
    583 #report table tr.subhead th {
    584     background: rgb(192, 192, 192);
    585     text-align: center;
    586 }
    587 
    588 """
    589 
    590     def generate_html(self, fh):
    591         print("""<html>
    592   <head>
    593     <script type="text/javascript" src="plotly.min.js">
    594     </script>
    595     <style type="text/css">
    596 %s
    597     </style>
    598     <title>Migration report</title>
    599   </head>
    600   <body>
    601     <h1>Migration report</h1>
    602     <h2>Chart summary</h2>
    603     <div id="chart">
    604 """ % self._generate_style(), file=fh)
    605         print(self._generate_chart(), file=fh)
    606         print("""
    607     </div>
    608     <h2>Report details</h2>
    609     <div id="report">
    610 """, file=fh)
    611         print(self._generate_report(), file=fh)
    612         print("""
    613     </div>
    614   </body>
    615 </html>
    616 """, file=fh)
    617 
    618     def generate(self, filename):
    619         if filename is None:
    620             self.generate_html(sys.stdout)
    621         else:
    622             with open(filename, "w") as fh:
    623                 self.generate_html(fh)