trex_stl_stats.py revision 00bfc58e
1#!/router/bin/python
2
3from .utils import text_tables
4from .utils.text_opts import format_text, format_threshold, format_num
5from .trex_stl_types import StatNotAvailable, is_integer
6
7from collections import namedtuple, OrderedDict, deque
8import sys
9import copy
10import datetime
11import time
12import re
13import math
14import threading
15import pprint
16
17GLOBAL_STATS = 'g'
18PORT_STATS = 'p'
19PORT_GRAPH = 'pg'
20PORT_STATUS = 'ps'
21STREAMS_STATS = 's'
22LATENCY_STATS = 'ls'
23LATENCY_HISTOGRAM = 'lh'
24CPU_STATS = 'c'
25MBUF_STATS = 'm'
26
27ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS, LATENCY_STATS, PORT_GRAPH, LATENCY_HISTOGRAM, CPU_STATS, MBUF_STATS]
28COMPACT = [GLOBAL_STATS, PORT_STATS]
29GRAPH_PORT_COMPACT = [GLOBAL_STATS, PORT_GRAPH]
30SS_COMPAT = [GLOBAL_STATS, STREAMS_STATS] # stream stats
31LS_COMPAT = [GLOBAL_STATS, LATENCY_STATS] # latency stats
32LH_COMPAT = [GLOBAL_STATS, LATENCY_HISTOGRAM] # latency histogram
33UT_COMPAT = [GLOBAL_STATS, CPU_STATS, MBUF_STATS] # utilization
34
35ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
36
37def round_float (f):
38    return float("%.2f" % f) if type(f) is float else f
39
40def try_int(i):
41    try:
42        return int(i)
43    except:
44        return i
45
46# deep mrege of dicts dst = src + dst
47def deep_merge_dicts (dst, src):
48    for k, v in src.items():
49        # if not exists - deep copy it
50        if not k in dst:
51            dst[k] = copy.deepcopy(v)
52        else:
53            if isinstance(v, dict):
54                deep_merge_dicts(dst[k], v)
55
56# BPS L1 from pps and BPS L2
57def calc_bps_L1 (bps, pps):
58    if (pps == 0) or (bps == 0):
59        return 0
60
61    factor = bps / (pps * 8.0)
62    return bps * ( 1 + (20 / factor) )
63#
64
65def is_intable (value):
66    try:
67        int(value)
68        return True
69    except ValueError:
70        return False
71
72# use to calculate diffs relative to the previous values
73# for example, BW
74def calculate_diff (samples):
75    total = 0.0
76
77    weight_step = 1.0 / sum(range(0, len(samples)))
78    weight = weight_step
79
80    for i in range(0, len(samples) - 1):
81        current = samples[i] if samples[i] > 0 else 1
82        next = samples[i + 1] if samples[i + 1] > 0 else 1
83
84        s = 100 * ((float(next) / current) - 1.0)
85
86        # block change by 100%
87        total  += (min(s, 100) * weight)
88        weight += weight_step
89
90    return total
91
92
93# calculate by absolute values and not relatives (useful for CPU usage in % and etc.)
94def calculate_diff_raw (samples):
95    total = 0.0
96
97    weight_step = 1.0 / sum(range(0, len(samples)))
98    weight = weight_step
99
100    for i in range(0, len(samples) - 1):
101        current = samples[i]
102        next = samples[i + 1]
103
104        total  += ( (next - current) * weight )
105        weight += weight_step
106
107    return total
108
109get_number_of_bytes_cache = {}
110# get number of bytes: '64b'->64, '9kb'->9000 etc.
111def get_number_of_bytes(val):
112    if val not in get_number_of_bytes_cache:
113        get_number_of_bytes_cache[val] = int(val[:-1].replace('k', '000'))
114    return get_number_of_bytes_cache[val]
115
116# a simple object to keep a watch over a field
117class WatchedField(object):
118
119    def __init__ (self, name, suffix, high_th, low_th, events_handler):
120        self.name           = name
121        self.suffix         = suffix
122        self.high_th        = high_th
123        self.low_th         = low_th
124        self.events_handler = events_handler
125
126        self.hot     = False
127        self.current = None
128
129    def update (self, value):
130        if value is None:
131            return
132
133        if value > self.high_th and not self.hot:
134            self.events_handler.log_warning("{0} is high: {1}{2}".format(self.name, value, self.suffix))
135            self.hot = True
136
137        if value < self.low_th and self.hot:
138            self.hot = False
139
140        self.current = value
141
142
143
144class CTRexInfoGenerator(object):
145    """
146    This object is responsible of generating stats and information from objects maintained at
147    STLClient and the ports.
148    """
149
150    def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref, latency_stats_ref, util_stats_ref, xstats_ref, async_monitor):
151        self._global_stats = global_stats_ref
152        self._ports_dict = ports_dict_ref
153        self._rx_stats_ref = rx_stats_ref
154        self._latency_stats_ref = latency_stats_ref
155        self._util_stats_ref = util_stats_ref
156        self._xstats_ref = xstats_ref
157        self._async_monitor = async_monitor
158
159    def generate_single_statistic(self, port_id_list, statistic_type):
160        if statistic_type == GLOBAL_STATS:
161            return self._generate_global_stats()
162
163        elif statistic_type == PORT_STATS:
164            return self._generate_port_stats(port_id_list)
165
166        elif statistic_type == PORT_GRAPH:
167            return self._generate_port_graph(port_id_list)
168
169        elif statistic_type == PORT_STATUS:
170            return self._generate_port_status(port_id_list)
171
172        elif statistic_type == STREAMS_STATS:
173            return self._generate_streams_stats()
174
175        elif statistic_type == LATENCY_STATS:
176            return self._generate_latency_stats()
177
178        elif statistic_type == LATENCY_HISTOGRAM:
179            return self._generate_latency_histogram()
180
181        elif statistic_type == CPU_STATS:
182            return self._generate_cpu_util_stats()
183
184        elif statistic_type == MBUF_STATS:
185            return self._generate_mbuf_util_stats()
186
187        else:
188            # ignore by returning empty object
189            return {}
190
191    def generate_streams_info(self, port_id_list, stream_id_list):
192        relevant_ports = self.__get_relevant_ports(port_id_list)
193        return_data = OrderedDict()
194
195        for port_obj in relevant_ports:
196            streams_data = self._generate_single_port_streams_info(port_obj, stream_id_list)
197            if not streams_data:
198                continue
199            hdr_key = "Port {port}:".format(port= port_obj.port_id)
200
201            # TODO: test for other ports with same stream structure, and join them
202            return_data[hdr_key] = streams_data
203
204        return return_data
205
206    def _generate_global_stats(self):
207        global_stats = self._global_stats
208
209        stats_data = OrderedDict([("connection", "{host}, Port {port}".format(host=global_stats.connection_info.get("server"),
210                                                                     port=global_stats.connection_info.get("sync_port"))),
211                             ("version", "{ver}, UUID: {uuid}".format(ver=global_stats.server_version.get("version", "N/A"),
212                                                                      uuid="N/A")),
213
214                             ("cpu_util.", "{0}% @ {2} cores ({3} per port) {1}".format( format_threshold(round_float(global_stats.get("m_cpu_util")), [85, 100], [0, 85]),
215                                                                                         global_stats.get_trend_gui("m_cpu_util", use_raw = True),
216                                                                                         global_stats.system_info.get('dp_core_count'),
217                                                                                         global_stats.system_info.get('dp_core_count_per_port'),
218                                                                                         )),
219
220                             ("rx_cpu_util.", "{0}% {1}".format( format_threshold(round_float(global_stats.get("m_rx_cpu_util")), [85, 100], [0, 85]),
221                                                                global_stats.get_trend_gui("m_rx_cpu_util", use_raw = True))),
222
223                             ("async_util.", "{0}% / {1}".format( format_threshold(round_float(self._async_monitor.get_cpu_util()), [85, 100], [0, 85]),
224                                                                 format_num(self._async_monitor.get_bps() / 8.0, suffix = "B/sec"))),
225
226
227                             (" ", ""),
228
229                             ("total_tx_L2", "{0} {1}".format( global_stats.get("m_tx_bps", format=True, suffix="b/sec"),
230                                                                global_stats.get_trend_gui("m_tx_bps"))),
231
232                            ("total_tx_L1", "{0} {1}".format( global_stats.get("m_tx_bps_L1", format=True, suffix="b/sec"),
233                                                                global_stats.get_trend_gui("m_tx_bps_L1"))),
234
235                             ("total_rx", "{0} {1}".format( global_stats.get("m_rx_bps", format=True, suffix="b/sec"),
236                                                              global_stats.get_trend_gui("m_rx_bps"))),
237
238                             ("total_pps", "{0} {1}".format( global_stats.get("m_tx_pps", format=True, suffix="pkt/sec"),
239                                                              global_stats.get_trend_gui("m_tx_pps"))),
240
241                             #("  ", ""),
242
243                             ("drop_rate", "{0}".format( format_num(global_stats.get("m_rx_drop_bps"),
244                                                                    suffix = 'b/sec',
245                                                                    opts = 'green' if (global_stats.get("m_rx_drop_bps")== 0) else 'red'),
246                                                            )),
247
248                             ("queue_full", "{0}".format( format_num(global_stats.get_rel("m_total_queue_full"),
249                                                                     suffix = 'pkts',
250                                                                     compact = False,
251                                                                     opts = 'green' if (global_stats.get_rel("m_total_queue_full")== 0) else 'red'))),
252
253                             ]
254                            )
255
256        # build table representation
257        stats_table = text_tables.TRexTextInfo()
258        stats_table.set_cols_align(["l", "l"])
259
260        stats_table.add_rows([[k.replace("_", " ").title(), v]
261                              for k, v in stats_data.items()],
262                             header=False)
263
264        return {"global_statistics": ExportableStats(stats_data, stats_table)}
265
266    def _generate_streams_stats (self):
267        flow_stats = self._rx_stats_ref
268        # for TUI - maximum 4
269        pg_ids = list(filter(is_intable, flow_stats.latest_stats.keys()))[:4]
270        stream_count = len(pg_ids)
271
272        sstats_data = OrderedDict([ ('Tx pps',  []),
273                                        ('Tx bps L2',      []),
274                                        ('Tx bps L1',      []),
275                                        ('---', [''] * stream_count),
276                                        ('Rx pps',      []),
277                                        ('Rx bps',      []),
278                                        ('----', [''] * stream_count),
279                                        ('opackets',    []),
280                                        ('ipackets',    []),
281                                        ('obytes',      []),
282                                        ('ibytes',      []),
283                                        ('-----', [''] * stream_count),
284                                        ('tx_pkts',     []),
285                                        ('rx_pkts',     []),
286                                        ('tx_bytes',    []),
287                                        ('rx_bytes',    [])
288                                      ])
289
290
291
292        # maximum 4
293        for pg_id in pg_ids:
294
295            sstats_data['Tx pps'].append(flow_stats.get([pg_id, 'tx_pps_lpf', 'total'], format = True, suffix = "pps"))
296            sstats_data['Tx bps L2'].append(flow_stats.get([pg_id, 'tx_bps_lpf', 'total'], format = True, suffix = "bps"))
297
298            sstats_data['Tx bps L1'].append(flow_stats.get([pg_id, 'tx_bps_L1_lpf', 'total'], format = True, suffix = "bps"))
299
300            sstats_data['Rx pps'].append(flow_stats.get([pg_id, 'rx_pps_lpf', 'total'], format = True, suffix = "pps"))
301            sstats_data['Rx bps'].append(flow_stats.get([pg_id, 'rx_bps_lpf', 'total'], format = True, suffix = "bps"))
302
303            sstats_data['opackets'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total']))
304            sstats_data['ipackets'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total']))
305            sstats_data['obytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total']))
306            sstats_data['ibytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total']))
307            sstats_data['tx_bytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total'], format = True, suffix = "B"))
308            sstats_data['rx_bytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total'], format = True, suffix = "B"))
309            sstats_data['tx_pkts'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total'], format = True, suffix = "pkts"))
310            sstats_data['rx_pkts'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total'], format = True, suffix = "pkts"))
311
312
313        stats_table = text_tables.TRexTextTable()
314        stats_table.set_cols_align(["l"] + ["r"] * stream_count)
315        stats_table.set_cols_width([10] + [17]   * stream_count)
316        stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
317
318        stats_table.add_rows([[k] + v
319                              for k, v in sstats_data.items()],
320                              header=False)
321
322        header = ["PG ID"] + [key for key in pg_ids]
323        stats_table.header(header)
324
325        return {"streams_statistics": ExportableStats(sstats_data, stats_table)}
326
327    def _generate_latency_stats(self):
328        lat_stats = self._latency_stats_ref
329        latency_window_size = 10
330
331        # for TUI - maximum 5
332        pg_ids = list(filter(is_intable, lat_stats.latest_stats.keys()))[:5]
333        stream_count = len(pg_ids)
334        lstats_data = OrderedDict([('TX pkts',       []),
335                                   ('RX pkts',       []),
336                                   ('Max latency',   []),
337                                   ('Avg latency',   []),
338                                   ('-- Window --', [''] * stream_count),
339                                   ('Last (max)',     []),
340                                  ] + [('Last-%s' % i, []) for i in range(1, latency_window_size)] + [
341                                   ('---', [''] * stream_count),
342                                   ('Jitter',        []),
343                                   ('----', [''] * stream_count),
344                                   ('Errors',        []),
345                                  ])
346
347        with lat_stats.lock:
348            history = [x for x in lat_stats.history]
349        flow_stats = self._rx_stats_ref.get_stats()
350        for pg_id in pg_ids:
351            lstats_data['TX pkts'].append(flow_stats[pg_id]['tx_pkts']['total'] if pg_id in flow_stats else '')
352            lstats_data['RX pkts'].append(flow_stats[pg_id]['rx_pkts']['total'] if pg_id in flow_stats else '')
353            lstats_data['Avg latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'average'])))
354            lstats_data['Max latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'total_max'])))
355            lstats_data['Last (max)'].append(try_int(lat_stats.get([pg_id, 'latency', 'last_max'])))
356            for i in range(1, latency_window_size):
357                val = history[-i - 1].get(pg_id, {}).get('latency', {}).get('last_max', '') if len(history) > i else ''
358                lstats_data['Last-%s' % i].append(try_int(val))
359            lstats_data['Jitter'].append(try_int(lat_stats.get([pg_id, 'latency', 'jitter'])))
360            errors = 0
361            seq_too_low = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_low'])
362            if is_integer(seq_too_low):
363                errors += seq_too_low
364            seq_too_high = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_high'])
365            if is_integer(seq_too_high):
366                errors += seq_too_high
367            lstats_data['Errors'].append(format_num(errors,
368                                            opts = 'green' if errors == 0 else 'red'))
369
370
371        stats_table = text_tables.TRexTextTable()
372        stats_table.set_cols_align(["l"] + ["r"] * stream_count)
373        stats_table.set_cols_width([12] + [14]   * stream_count)
374        stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
375        stats_table.add_rows([[k] + v
376                              for k, v in lstats_data.items()],
377                              header=False)
378
379        header = ["PG ID"] + [key for key in pg_ids]
380        stats_table.header(header)
381
382        return {"latency_statistics": ExportableStats(lstats_data, stats_table)}
383
384    def _generate_latency_histogram(self):
385        lat_stats = self._latency_stats_ref.latest_stats
386        max_histogram_size = 17
387
388        # for TUI - maximum 5
389        pg_ids = list(filter(is_intable, lat_stats.keys()))[:5]
390
391        merged_histogram = {}
392        for pg_id in pg_ids:
393            merged_histogram.update(lat_stats[pg_id]['latency']['histogram'])
394        histogram_size = min(max_histogram_size, len(merged_histogram))
395
396        stream_count = len(pg_ids)
397        stats_table = text_tables.TRexTextTable()
398        stats_table.set_cols_align(["l"] + ["r"] * stream_count)
399        stats_table.set_cols_width([12] + [14]   * stream_count)
400        stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
401
402        for i in range(max_histogram_size - histogram_size):
403            if i == 0 and not merged_histogram:
404                stats_table.add_row(['  No Data'] + [' '] * stream_count)
405            else:
406                stats_table.add_row([' '] * (stream_count + 1))
407        for key in list(reversed(sorted(merged_histogram.keys())))[:histogram_size]:
408            hist_vals = []
409            for pg_id in pg_ids:
410                hist_vals.append(lat_stats[pg_id]['latency']['histogram'].get(key, ' '))
411            stats_table.add_row([key] + hist_vals)
412
413        stats_table.add_row(['- Counters -'] + [' '] * stream_count)
414        err_cntrs_dict = OrderedDict()
415        for pg_id in pg_ids:
416            for err_cntr in sorted(lat_stats[pg_id]['err_cntrs'].keys()):
417                if err_cntr not in err_cntrs_dict:
418                    err_cntrs_dict[err_cntr] = [lat_stats[pg_id]['err_cntrs'][err_cntr]]
419                else:
420                    err_cntrs_dict[err_cntr].append(lat_stats[pg_id]['err_cntrs'][err_cntr])
421        for err_cntr, val_list in err_cntrs_dict.items():
422            stats_table.add_row([err_cntr] + val_list)
423        header = ["PG ID"] + [key for key in pg_ids]
424        stats_table.header(header)
425        return {"latency_histogram": ExportableStats(None, stats_table)}
426
427    def _generate_cpu_util_stats(self):
428        util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
429
430        stats_table = text_tables.TRexTextTable()
431        if util_stats:
432            if 'cpu' not in util_stats:
433                raise Exception("Excepting 'cpu' section in stats %s" % util_stats)
434            cpu_stats = util_stats['cpu']
435            hist_len = len(cpu_stats[0]["history"])
436            avg_len = min(5, hist_len)
437            show_len = min(15, hist_len)
438            stats_table.header(['Thread', 'Avg', 'Latest'] + list(range(-1, 0 - show_len, -1)))
439            stats_table.set_cols_align(['l'] + ['r'] * (show_len + 1))
440            stats_table.set_cols_width([10, 3, 6] + [3] * (show_len - 1))
441            stats_table.set_cols_dtype(['t'] * (show_len + 2))
442
443            for i in range(min(14, len(cpu_stats))):
444                history = cpu_stats[i]["history"]
445                ports = cpu_stats[i]["ports"]
446                avg = int(round(sum(history[:avg_len]) / avg_len))
447
448                # decode active ports for core
449                if ports == [-1, -1]:
450                    interfaces = "(IDLE)"
451                elif not -1 in ports:
452                    interfaces = "({:},{:})".format(ports[0], ports[1])
453                else:
454                    interfaces = "({:})".format(ports[0] if ports[0] != -1 else ports[1])
455
456                thread = "{:2} {:^7}".format(i, interfaces)
457                stats_table.add_row([thread, avg] + history[:show_len])
458        else:
459            stats_table.add_row(['No Data.'])
460        return {'cpu_util(%)': ExportableStats(None, stats_table)}
461
462    def _generate_mbuf_util_stats(self):
463        util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
464        stats_table = text_tables.TRexTextTable()
465        if util_stats:
466            if 'mbuf_stats' not in util_stats:
467                raise Exception("Excepting 'mbuf_stats' section in stats %s" % util_stats)
468            mbuf_stats = util_stats['mbuf_stats']
469            for mbufs_per_socket in mbuf_stats.values():
470                first_socket_mbufs = mbufs_per_socket
471                break
472            if not self._util_stats_ref.mbuf_types_list:
473                mbuf_keys = list(first_socket_mbufs.keys())
474                mbuf_keys.sort(key = get_number_of_bytes)
475                self._util_stats_ref.mbuf_types_list = mbuf_keys
476            types_len = len(self._util_stats_ref.mbuf_types_list)
477            stats_table.set_cols_align(['l'] + ['r'] * (types_len + 1))
478            stats_table.set_cols_width([10] + [7] * (types_len + 1))
479            stats_table.set_cols_dtype(['t'] * (types_len + 2))
480            stats_table.header([''] + self._util_stats_ref.mbuf_types_list + ['RAM(MB)'])
481            total_list = []
482            sum_totals = 0
483            for mbuf_type in self._util_stats_ref.mbuf_types_list:
484                sum_totals += first_socket_mbufs[mbuf_type][1] * get_number_of_bytes(mbuf_type) + 64
485                total_list.append(first_socket_mbufs[mbuf_type][1])
486            sum_totals *= len(list(mbuf_stats.values()))
487            total_list.append(int(sum_totals/1e6))
488            stats_table.add_row(['Total:'] + total_list)
489            stats_table.add_row(['Used:'] + [''] * (types_len + 1))
490            for socket_name in sorted(list(mbuf_stats.keys())):
491                mbufs = mbuf_stats[socket_name]
492                socket_show_name = socket_name.replace('cpu-', '').replace('-', ' ').capitalize() + ':'
493                sum_used = 0
494                used_list = []
495                percentage_list = []
496                for mbuf_type in self._util_stats_ref.mbuf_types_list:
497                    used = mbufs[mbuf_type][1] - mbufs[mbuf_type][0]
498                    sum_used += used * get_number_of_bytes(mbuf_type) + 64
499                    used_list.append(used)
500                    percentage_list.append('%s%%' % int(100 * used / mbufs[mbuf_type][1]))
501                used_list.append(int(sum_used/1e6))
502                stats_table.add_row([socket_show_name] + used_list)
503                stats_table.add_row(['Percent:'] + percentage_list + [''])
504        else:
505            stats_table.add_row(['No Data.'])
506        return {'mbuf_util': ExportableStats(None, stats_table)}
507
508    @staticmethod
509    def _get_rational_block_char(value, range_start, interval):
510        # in Konsole, utf-8 is sometimes printed with artifacts, return ascii for now
511        #return 'X' if value >= range_start + float(interval) / 2 else ' '
512
513        if sys.__stdout__.encoding != 'UTF-8':
514            return 'X' if value >= range_start + float(interval) / 2 else ' '
515
516        value -= range_start
517        ratio = float(value) / interval
518        if ratio <= 0.0625:
519            return u' '         # empty block
520        if ratio <= 0.1875:
521            return u'\u2581'    # 1/8
522        if ratio <= 0.3125:
523            return u'\u2582'    # 2/8
524        if ratio <= 0.4375:
525            return u'\u2583'    # 3/8
526        if ratio <= 0.5625:
527            return u'\u2584'    # 4/8
528        if ratio <= 0.6875:
529            return u'\u2585'    # 5/8
530        if ratio <= 0.8125:
531            return u'\u2586'    # 6/8
532        if ratio <= 0.9375:
533            return u'\u2587'    # 7/8
534        return u'\u2588'        # full block
535
536    def _generate_port_graph(self, port_id_list):
537        relevant_port = self.__get_relevant_ports(port_id_list)[0]
538        hist_len = len(relevant_port.port_stats.history)
539        hist_maxlen = relevant_port.port_stats.history.maxlen
540        util_tx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['tx_percentage']) for i in range(hist_len)]
541        util_rx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['rx_percentage']) for i in range(hist_len)]
542
543
544        stats_table = text_tables.TRexTextTable()
545        stats_table.header([' Util(%)', 'TX', 'RX'])
546        stats_table.set_cols_align(['c', 'c', 'c'])
547        stats_table.set_cols_width([8, hist_maxlen, hist_maxlen])
548        stats_table.set_cols_dtype(['t', 't', 't'])
549
550        for y in range(95, -1, -5):
551            stats_table.add_row([y, ''.join([self._get_rational_block_char(util_tx, y, 5) for util_tx in util_tx_hist]),
552                                    ''.join([self._get_rational_block_char(util_rx, y, 5) for util_rx in util_rx_hist])])
553
554        return {"port_graph": ExportableStats({}, stats_table)}
555
556    def _generate_port_stats(self, port_id_list):
557        relevant_ports = self.__get_relevant_ports(port_id_list)
558
559        return_stats_data = {}
560        per_field_stats = OrderedDict([("owner", []),
561                                       ("state", []),
562                                       ("speed", []),
563                                       ("CPU util.", []),
564                                       ("--", []),
565                                       ("Tx bps L2", []),
566                                       ("Tx bps L1", []),
567                                       ("Tx pps", []),
568                                       ("Line Util.", []),
569
570                                       ("---", []),
571                                       ("Rx bps", []),
572                                       ("Rx pps", []),
573
574                                       ("----", []),
575                                       ("opackets", []),
576                                       ("ipackets", []),
577                                       ("obytes", []),
578                                       ("ibytes", []),
579                                       ("tx-bytes", []),
580                                       ("rx-bytes", []),
581                                       ("tx-pkts", []),
582                                       ("rx-pkts", []),
583
584                                       ("-----", []),
585                                       ("oerrors", []),
586                                       ("ierrors", []),
587
588                                      ]
589                                      )
590
591        total_stats = CPortStats(None)
592
593        for port_obj in relevant_ports:
594            # fetch port data
595            port_stats = port_obj.generate_port_stats()
596
597            total_stats += port_obj.port_stats
598
599            # populate to data structures
600            return_stats_data[port_obj.port_id] = port_stats
601            self.__update_per_field_dict(port_stats, per_field_stats)
602
603        total_cols = len(relevant_ports)
604        header = ["port"] + [port.port_id for port in relevant_ports]
605
606        if (total_cols > 1):
607            self.__update_per_field_dict(total_stats.generate_stats(), per_field_stats)
608            header += ['total']
609            total_cols += 1
610
611        stats_table = text_tables.TRexTextTable()
612        stats_table.set_cols_align(["l"] + ["r"] * total_cols)
613        stats_table.set_cols_width([10] + [17]   * total_cols)
614        stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
615
616        stats_table.add_rows([[k] + v
617                              for k, v in per_field_stats.items()],
618                              header=False)
619
620        stats_table.header(header)
621
622        return {"port_statistics": ExportableStats(return_stats_data, stats_table)}
623
624    def _generate_port_status(self, port_id_list):
625        relevant_ports = self.__get_relevant_ports(port_id_list)
626
627        return_stats_data = {}
628        per_field_status = OrderedDict([("driver", []),
629                                        ("maximum", []),
630                                        ("status", []),
631                                        ("promiscuous", []),
632                                        ("--", []),
633                                        ("HW src mac", []),
634                                        ("SW src mac", []),
635                                        ("SW dst mac", []),
636                                        ("---", []),
637                                        ("PCI Address", []),
638                                        ("NUMA Node", []),
639                                        ]
640                                       )
641
642        for port_obj in relevant_ports:
643            # fetch port data
644            # port_stats = self._async_stats.get_port_stats(port_obj.port_id)
645            port_status = port_obj.generate_port_status()
646
647            # populate to data structures
648            return_stats_data[port_obj.port_id] = port_status
649
650            self.__update_per_field_dict(port_status, per_field_status)
651
652        stats_table = text_tables.TRexTextTable()
653        stats_table.set_cols_align(["l"] + ["c"]*len(relevant_ports))
654        stats_table.set_cols_width([15] + [20] * len(relevant_ports))
655
656        stats_table.add_rows([[k] + v
657                              for k, v in per_field_status.items()],
658                             header=False)
659        stats_table.header(["port"] + [port.port_id
660                                       for port in relevant_ports])
661
662        return {"port_status": ExportableStats(return_stats_data, stats_table)}
663
664    def _generate_single_port_streams_info(self, port_obj, stream_id_list):
665
666        return_streams_data = port_obj.generate_loaded_streams_sum()
667
668        if not return_streams_data.get("streams"):
669            # we got no streams available
670            return None
671
672        # FORMAT VALUES ON DEMAND
673
674        # because we mutate this - deep copy before
675        return_streams_data = copy.deepcopy(return_streams_data)
676
677        p_type_field_len = 0
678
679        for stream_id, stream_id_sum in return_streams_data['streams'].items():
680            stream_id_sum['packet_type'] = self._trim_packet_headers(stream_id_sum['packet_type'], 30)
681            p_type_field_len = max(p_type_field_len, len(stream_id_sum['packet_type']))
682
683        info_table = text_tables.TRexTextTable()
684        info_table.set_cols_align(["c"] + ["l"] + ["r"] + ["c"] + ["r"] + ["c"])
685        info_table.set_cols_width([10]   + [p_type_field_len]  + [8]   + [16]  + [15]  + [12])
686        info_table.set_cols_dtype(["t"] + ["t"] + ["t"] + ["t"] + ["t"] + ["t"])
687
688        info_table.add_rows([v.values()
689                             for k, v in return_streams_data['streams'].items()],
690                             header=False)
691        info_table.header(["ID", "packet type", "length", "mode", "rate", "next stream"])
692
693        return ExportableStats(return_streams_data, info_table)
694
695
696    def __get_relevant_ports(self, port_id_list):
697        # fetch owned ports
698        ports = [port_obj
699                 for _, port_obj in self._ports_dict.items()
700                 if port_obj.port_id in port_id_list]
701
702        # display only the first FOUR options, by design
703        if len(ports) > 4:
704            #self.logger is not defined
705            #self.logger.log(format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta'))
706            ports = ports[:4]
707        return ports
708
709    def __update_per_field_dict(self, dict_src_data, dict_dest_ref):
710        for key, val in dict_src_data.items():
711            if key in dict_dest_ref:
712                dict_dest_ref[key].append(val)
713
714    @staticmethod
715    def _trim_packet_headers(headers_str, trim_limit):
716        if len(headers_str) < trim_limit:
717            # do nothing
718            return headers_str
719        else:
720            return (headers_str[:trim_limit-3] + "...")
721
722
723
724class CTRexStats(object):
725    """ This is an abstract class to represent a stats object """
726
727    def __init__(self):
728        self.reference_stats = {}
729        self.latest_stats = {}
730        self.last_update_ts = time.time()
731        self.history = deque(maxlen = 47)
732        self.lock = threading.Lock()
733        self.has_baseline = False
734
735    ######## abstract methods ##########
736
737    # get stats for user / API
738    def get_stats (self):
739        raise NotImplementedError()
740
741    # generate format stats (for TUI)
742    def generate_stats(self):
743        raise NotImplementedError()
744
745    # called when a snapshot arrives - add more fields
746    def _update (self, snapshot, baseline):
747        raise NotImplementedError()
748
749
750    ######## END abstract methods ##########
751
752    def update(self, snapshot, baseline):
753
754        # no update is valid before baseline
755        if not self.has_baseline and not baseline:
756            return
757
758        # call the underlying method
759        rc = self._update(snapshot)
760        if not rc:
761            return
762
763        # sync one time
764        if not self.has_baseline and baseline:
765            self.reference_stats = copy.deepcopy(self.latest_stats)
766            self.has_baseline = True
767
768        # save history
769        with self.lock:
770            self.history.append(self.latest_stats)
771
772
773    def clear_stats(self):
774        self.reference_stats = copy.deepcopy(self.latest_stats)
775        self.history.clear()
776
777
778    def invalidate (self):
779        self.latest_stats = {}
780
781
782    def _get (self, src, field, default = None):
783        if isinstance(field, list):
784            # deep
785            value = src
786            for level in field:
787                if not level in value:
788                    return default
789                value = value[level]
790        else:
791            # flat
792            if not field in src:
793                return default
794            value = src[field]
795
796        return value
797
798    def get(self, field, format=False, suffix="", opts = None):
799        value = self._get(self.latest_stats, field)
800        if value == None:
801            return 'N/A'
802
803        return value if not format else format_num(value, suffix = suffix, opts = opts)
804
805
806    def get_rel(self, field, format=False, suffix=""):
807        ref_value = self._get(self.reference_stats, field)
808        latest_value = self._get(self.latest_stats, field)
809
810        # latest value is an aggregation - must contain the value
811        if latest_value == None:
812            return 'N/A'
813
814        if ref_value == None:
815            ref_value = 0
816
817        value = latest_value - ref_value
818
819        return value if not format else format_num(value, suffix)
820
821
822    # get trend for a field
823    def get_trend (self, field, use_raw = False, percision = 10.0):
824        if field not in self.latest_stats:
825            return 0
826
827        # not enough history - no trend
828        if len(self.history) < 5:
829            return 0
830
831        # absolute value is too low 0 considered noise
832        if self.latest_stats[field] < percision:
833            return 0
834
835        # must lock, deque is not thread-safe for iteration
836        with self.lock:
837            field_samples = [sample[field] for sample in list(self.history)[-5:]]
838
839        if use_raw:
840            return calculate_diff_raw(field_samples)
841        else:
842            return calculate_diff(field_samples)
843
844
845    def get_trend_gui (self, field, show_value = False, use_raw = False, up_color = 'red', down_color = 'green'):
846        v = self.get_trend(field, use_raw)
847
848        value = abs(v)
849
850        # use arrows if utf-8 is supported
851        if sys.__stdout__.encoding == 'UTF-8':
852            arrow = u'\u25b2' if v > 0 else u'\u25bc'
853        else:
854            arrow = ''
855
856        if sys.version_info < (3,0):
857            arrow = arrow.encode('utf-8')
858
859        color = up_color if v > 0 else down_color
860
861        # change in 1% is not meaningful
862        if value < 1:
863            return ""
864
865        elif value > 5:
866
867            if show_value:
868                return format_text("{0}{0}{0} {1:.2f}%".format(arrow,v), color)
869            else:
870                return format_text("{0}{0}{0}".format(arrow), color)
871
872        elif value > 2:
873
874            if show_value:
875                return format_text("{0}{0} {1:.2f}%".format(arrow,v), color)
876            else:
877                return format_text("{0}{0}".format(arrow), color)
878
879        else:
880            if show_value:
881                return format_text("{0} {1:.2f}%".format(arrow,v), color)
882            else:
883                return format_text("{0}".format(arrow), color)
884
885
886
887class CGlobalStats(CTRexStats):
888
889    def __init__(self, connection_info, server_version, ports_dict_ref, events_handler):
890        super(CGlobalStats, self).__init__()
891
892        self.connection_info = connection_info
893        self.server_version  = server_version
894        self._ports_dict     = ports_dict_ref
895        self.events_handler  = events_handler
896
897        self.watched_cpu_util    = WatchedField('CPU util.', '%', 85, 60, events_handler)
898        self.watched_rx_cpu_util = WatchedField('RX core util.', '%', 85, 60, events_handler)
899
900    def get_stats (self):
901        stats = {}
902
903        # absolute
904        stats['cpu_util']    = self.get("m_cpu_util")
905        stats['rx_cpu_util'] = self.get("m_rx_cpu_util")
906        stats['bw_per_core'] = self.get("m_bw_per_core")
907
908        stats['tx_bps'] = self.get("m_tx_bps")
909        stats['tx_pps'] = self.get("m_tx_pps")
910
911        stats['rx_bps'] = self.get("m_rx_bps")
912        stats['rx_pps'] = self.get("m_rx_pps")
913        stats['rx_drop_bps'] = self.get("m_rx_drop_bps")
914
915        # relatives
916        stats['queue_full'] = self.get_rel("m_total_queue_full")
917
918        return stats
919
920
921
922    def _update(self, snapshot):
923        # L1 bps
924        bps = snapshot.get("m_tx_bps")
925        pps = snapshot.get("m_tx_pps")
926
927        snapshot['m_tx_bps_L1'] = calc_bps_L1(bps, pps)
928
929
930        # simple...
931        self.latest_stats = snapshot
932
933        self.watched_cpu_util.update(snapshot.get('m_cpu_util'))
934        self.watched_rx_cpu_util.update(snapshot.get('m_rx_cpu_util'))
935
936        return True
937
938
939class CPortStats(CTRexStats):
940
941    def __init__(self, port_obj):
942        super(CPortStats, self).__init__()
943        self._port_obj = port_obj
944
945    @staticmethod
946    def __merge_dicts (target, src):
947        for k, v in src.items():
948            if k in target:
949                target[k] += v
950            else:
951                target[k] = v
952
953
954    def __add__ (self, x):
955        if not isinstance(x, CPortStats):
956            raise TypeError("cannot add non stats object to stats")
957
958        # main stats
959        if not self.latest_stats:
960            self.latest_stats = {}
961
962        self.__merge_dicts(self.latest_stats, x.latest_stats)
963
964        # reference stats
965        if x.reference_stats:
966            if not self.reference_stats:
967                self.reference_stats = x.reference_stats.copy()
968            else:
969                self.__merge_dicts(self.reference_stats, x.reference_stats)
970
971        # history - should be traverse with a lock
972        with self.lock, x.lock:
973            if not self.history:
974                self.history = copy.deepcopy(x.history)
975            else:
976                for h1, h2 in zip(self.history, x.history):
977                    self.__merge_dicts(h1, h2)
978
979        return self
980
981    # for port we need to do something smarter
982    def get_stats (self):
983        stats = {}
984
985        stats['opackets'] = self.get_rel("opackets")
986        stats['ipackets'] = self.get_rel("ipackets")
987        stats['obytes']   = self.get_rel("obytes")
988        stats['ibytes']   = self.get_rel("ibytes")
989        stats['oerrors']  = self.get_rel("oerrors")
990        stats['ierrors']  = self.get_rel("ierrors")
991
992        stats['tx_bps']     = self.get("m_total_tx_bps")
993        stats['tx_pps']     = self.get("m_total_tx_pps")
994        stats['tx_bps_L1']  = self.get("m_total_tx_bps_L1")
995        stats['tx_util']    = self.get("m_tx_util")
996
997        stats['rx_bps']     = self.get("m_total_rx_bps")
998        stats['rx_pps']     = self.get("m_total_rx_pps")
999        stats['rx_bps_L1']  = self.get("m_total_rx_bps_L1")
1000        stats['rx_util']    = self.get("m_rx_util")
1001
1002        return stats
1003
1004
1005
1006    def _update(self, snapshot):
1007        speed = self._port_obj.get_speed_bps()
1008
1009        # L1 bps
1010        tx_bps  = snapshot.get("m_total_tx_bps")
1011        tx_pps  = snapshot.get("m_total_tx_pps")
1012        rx_bps  = snapshot.get("m_total_rx_bps")
1013        rx_pps  = snapshot.get("m_total_rx_pps")
1014        ts_diff = 0.5 # TODO: change this to real ts diff from server
1015
1016        bps_tx_L1 = calc_bps_L1(tx_bps, tx_pps)
1017        bps_rx_L1 = calc_bps_L1(rx_bps, rx_pps)
1018
1019        snapshot['m_total_tx_bps_L1'] = bps_tx_L1
1020        if speed:
1021            snapshot['m_tx_util'] = (bps_tx_L1 / speed) * 100.0
1022        else:
1023            snapshot['m_tx_util'] = 0
1024
1025        snapshot['m_total_rx_bps_L1'] = bps_rx_L1
1026        if speed:
1027            snapshot['m_rx_util'] = (bps_rx_L1 / speed) * 100.0
1028        else:
1029            snapshot['m_rx_util'] = 0
1030
1031        # TX line util not smoothed
1032        diff_tx_pkts = snapshot.get('opackets', 0) - self.latest_stats.get('opackets', 0)
1033        diff_tx_bytes = snapshot.get('obytes', 0) - self.latest_stats.get('obytes', 0)
1034        tx_bps_L1 = calc_bps_L1(8.0 * diff_tx_bytes / ts_diff, float(diff_tx_pkts) / ts_diff)
1035        if speed:
1036            snapshot['tx_percentage'] = 100.0 * tx_bps_L1 / speed
1037        else:
1038            snapshot['tx_percentage'] = 0
1039
1040        # RX line util not smoothed
1041        diff_rx_pkts = snapshot.get('ipackets', 0) - self.latest_stats.get('ipackets', 0)
1042        diff_rx_bytes = snapshot.get('ibytes', 0) - self.latest_stats.get('ibytes', 0)
1043        rx_bps_L1 = calc_bps_L1(8.0 * diff_rx_bytes / ts_diff, float(diff_rx_pkts) / ts_diff)
1044        if speed:
1045            snapshot['rx_percentage'] = 100.0 * rx_bps_L1 / speed
1046        else:
1047            snapshot['rx_percentage'] = 0
1048
1049        # simple...
1050        self.latest_stats = snapshot
1051
1052        return True
1053
1054
1055    def generate_stats(self):
1056
1057        state = self._port_obj.get_port_state_name() if self._port_obj else ""
1058        if state == "ACTIVE":
1059            state = format_text(state, 'green', 'bold')
1060        elif state == "PAUSE":
1061            state = format_text(state, 'magenta', 'bold')
1062        else:
1063            state = format_text(state, 'bold')
1064
1065        # default rate format modifiers
1066        rate_format = {'bpsl1': None, 'bps': None, 'pps': None, 'percentage': 'bold'}
1067
1068        # mark owned ports by color
1069        if self._port_obj:
1070            owner = self._port_obj.get_owner()
1071            rate_format[self._port_obj.last_factor_type] = ('blue', 'bold')
1072            if self._port_obj.is_acquired():
1073                owner = format_text(owner, 'green')
1074
1075        else:
1076            owner = ''
1077
1078
1079        return {"owner": owner,
1080                "state": "{0}".format(state),
1081                "speed": self._port_obj.get_formatted_speed() if self._port_obj else '',
1082                "CPU util.": "{0} {1}%".format(self.get_trend_gui("m_cpu_util", use_raw = True),
1083                                               format_threshold(round_float(self.get("m_cpu_util")), [85, 100], [0, 85])) if self._port_obj else '' ,
1084                "--": " ",
1085                "---": " ",
1086                "----": " ",
1087                "-----": " ",
1088
1089                "Tx bps L1": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps_L1", show_value = False),
1090                                               self.get("m_total_tx_bps_L1", format = True, suffix = "bps", opts = rate_format['bpsl1'])),
1091
1092                "Tx bps L2": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps", show_value = False),
1093                                                self.get("m_total_tx_bps", format = True, suffix = "bps", opts = rate_format['bps'])),
1094
1095                "Line Util.": "{0} {1}".format(self.get_trend_gui("m_tx_util", show_value = False) if self._port_obj else "",
1096                                               self.get("m_tx_util", format = True, suffix = "%", opts = rate_format['percentage']) if self._port_obj else ""),
1097
1098                "Rx bps": "{0} {1}".format(self.get_trend_gui("m_total_rx_bps", show_value = False),
1099                                            self.get("m_total_rx_bps", format = True, suffix = "bps")),
1100
1101                "Tx pps": "{0} {1}".format(self.get_trend_gui("m_total_tx_pps", show_value = False),
1102                                            self.get("m_total_tx_pps", format = True, suffix = "pps", opts = rate_format['pps'])),
1103
1104                "Rx pps": "{0} {1}".format(self.get_trend_gui("m_total_rx_pps", show_value = False),
1105                                            self.get("m_total_rx_pps", format = True, suffix = "pps")),
1106
1107                 "opackets" : self.get_rel("opackets"),
1108                 "ipackets" : self.get_rel("ipackets"),
1109                 "obytes"   : self.get_rel("obytes"),
1110                 "ibytes"   : self.get_rel("ibytes"),
1111
1112                 "tx-bytes": self.get_rel("obytes", format = True, suffix = "B"),
1113                 "rx-bytes": self.get_rel("ibytes", format = True, suffix = "B"),
1114                 "tx-pkts": self.get_rel("opackets", format = True, suffix = "pkts"),
1115                 "rx-pkts": self.get_rel("ipackets", format = True, suffix = "pkts"),
1116
1117                 "oerrors"  : format_num(self.get_rel("oerrors"),
1118                                         compact = False,
1119                                         opts = 'green' if (self.get_rel("oerrors")== 0) else 'red'),
1120
1121                 "ierrors"  : format_num(self.get_rel("ierrors"),
1122                                         compact = False,
1123                                         opts = 'green' if (self.get_rel("ierrors")== 0) else 'red'),
1124
1125                }
1126
1127
1128class CLatencyStats(CTRexStats):
1129    def __init__(self, ports):
1130        super(CLatencyStats, self).__init__()
1131
1132
1133    # for API
1134    def get_stats (self):
1135        return copy.deepcopy(self.latest_stats)
1136
1137
1138    def _update(self, snapshot):
1139        if snapshot is None:
1140            snapshot = {}
1141        output = {}
1142
1143        output['global'] = {}
1144        for field in ['bad_hdr', 'old_flow']:
1145            if 'global' in snapshot and field in snapshot['global']:
1146                output['global'][field] = snapshot['global'][field]
1147            else:
1148                output['global'][field] = 0
1149
1150        # we care only about the current active keys
1151        pg_ids = list(filter(is_intable, snapshot.keys()))
1152
1153        for pg_id in pg_ids:
1154            current_pg = snapshot.get(pg_id)
1155            int_pg_id = int(pg_id)
1156            output[int_pg_id] = {}
1157            output[int_pg_id]['err_cntrs'] = current_pg['err_cntrs']
1158            output[int_pg_id]['latency'] = {}
1159
1160            if 'latency' in current_pg:
1161                for field in ['jitter', 'average', 'total_max', 'last_max']:
1162                    if field in current_pg['latency']:
1163                        output[int_pg_id]['latency'][field] = current_pg['latency'][field]
1164                    else:
1165                        output[int_pg_id]['latency'][field] = StatNotAvailable(field)
1166
1167                if 'histogram' in current_pg['latency']:
1168                    output[int_pg_id]['latency']['histogram'] = {int(elem): current_pg['latency']['histogram'][elem]
1169                                                                 for elem in current_pg['latency']['histogram']}
1170                    min_val = min(output[int_pg_id]['latency']['histogram'].keys())
1171                    if min_val == 0:
1172                        min_val = 2
1173                    output[int_pg_id]['latency']['total_min'] = min_val
1174                else:
1175                    output[int_pg_id]['latency']['total_min'] = StatNotAvailable('total_min')
1176                    output[int_pg_id]['latency']['histogram'] = {}
1177
1178        self.latest_stats = output
1179        return True
1180
1181
1182# RX stats objects - COMPLEX :-(
1183class CRxStats(CTRexStats):
1184    def __init__(self, ports):
1185        super(CRxStats, self).__init__()
1186        self.ports = ports
1187
1188
1189    # calculates a diff between previous snapshot
1190    # and current one
1191    def calculate_diff_sec (self, current, prev):
1192        if not 'ts' in current:
1193            raise ValueError("INTERNAL ERROR: RX stats snapshot MUST contain 'ts' field")
1194
1195        if prev:
1196            prev_ts   = prev['ts']
1197            now_ts    = current['ts']
1198            diff_sec  = (now_ts['value'] - prev_ts['value']) / float(now_ts['freq'])
1199        else:
1200            diff_sec = 0.0
1201
1202        return diff_sec
1203
1204
1205    # this is the heart of the complex
1206    def process_single_pg (self, current_pg, prev_pg):
1207
1208        # start with the previous PG
1209        output = copy.deepcopy(prev_pg)
1210
1211        for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
1212            # is in the first time ? (nothing in prev)
1213            if field not in output:
1214                output[field] = {}
1215
1216            # does the current snapshot has this field ?
1217            if field in current_pg:
1218                for port, pv in current_pg[field].items():
1219                    if not is_intable(port):
1220                        continue
1221
1222                    output[field][port] = pv
1223
1224            # sum up
1225            total = None
1226            for port, pv in output[field].items():
1227                if not is_intable(port):
1228                    continue
1229                if total is None:
1230                    total = 0
1231                total += pv
1232
1233            output[field]['total'] = total
1234
1235
1236        return output
1237
1238
1239    def process_snapshot (self, current, prev):
1240
1241        # final output
1242        output = {}
1243
1244        # copy timestamp field
1245        output['ts'] = current['ts']
1246
1247        # global (not per pg_id) error counters
1248        output['global'] = {}
1249        for field in ['rx_err', 'tx_err']:
1250            output['global'][field] = {}
1251            if 'global' in current and field in current['global']:
1252                for port in current['global'][field]:
1253                    output['global'][field][int(port)] = current['global'][field][port]
1254
1255        # we care only about the current active keys
1256        pg_ids = list(filter(is_intable, current.keys()))
1257
1258        for pg_id in pg_ids:
1259
1260            current_pg = current.get(pg_id, {})
1261
1262            # first time - we do not care
1263            if current_pg.get('first_time'):
1264                # new value - ignore history
1265                output[pg_id] = self.process_single_pg(current_pg, {})
1266                self.reference_stats[pg_id] = {}
1267
1268                # 'dry' B/W
1269                self.calculate_bw_for_pg(output[pg_id])
1270
1271            else:
1272                # aggregate the two values
1273                prev_pg = prev.get(pg_id, {})
1274                output[pg_id] = self.process_single_pg(current_pg, prev_pg)
1275
1276                # calculate B/W
1277                diff_sec = self.calculate_diff_sec(current, prev)
1278                self.calculate_bw_for_pg(output[pg_id], prev_pg, diff_sec)
1279
1280
1281        # cleanp old reference values - they are dead
1282        ref_pg_ids = list(filter(is_intable, self.reference_stats.keys()))
1283
1284        deleted_pg_ids = set(ref_pg_ids).difference(pg_ids)
1285        for d_pg_id in deleted_pg_ids:
1286            del self.reference_stats[d_pg_id]
1287
1288        return output
1289
1290
1291
1292    def calculate_bw_for_pg (self, pg_current, pg_prev = None, diff_sec = 0.0):
1293        # no previous values
1294        if (not pg_prev) or not (diff_sec > 0):
1295            pg_current['tx_pps']        = {}
1296            pg_current['tx_bps']        = {}
1297            pg_current['tx_bps_L1']     = {}
1298            pg_current['tx_line_util']  = {}
1299            pg_current['rx_pps']        = {}
1300            pg_current['rx_bps']        = {}
1301            pg_current['rx_bps_L1']     = {}
1302            pg_current['rx_line_util']  = {}
1303
1304            pg_current['tx_pps_lpf']    = {}
1305            pg_current['tx_bps_lpf']    = {}
1306            pg_current['tx_bps_L1_lpf'] = {}
1307            pg_current['rx_pps_lpf']    = {}
1308            pg_current['rx_bps_lpf']    = {}
1309            pg_current['rx_bps_L1_lpf'] = {}
1310            return
1311
1312        # TX
1313        for port in pg_current['tx_pkts'].keys():
1314
1315            prev_tx_pps   = pg_prev['tx_pps'].get(port)
1316            now_tx_pkts   = pg_current['tx_pkts'].get(port)
1317            prev_tx_pkts  = pg_prev['tx_pkts'].get(port)
1318            pg_current['tx_pps'][port], pg_current['tx_pps_lpf'][port] = self.calc_pps(prev_tx_pps, now_tx_pkts, prev_tx_pkts, diff_sec)
1319
1320            prev_tx_bps   = pg_prev['tx_bps'].get(port)
1321            now_tx_bytes  = pg_current['tx_bytes'].get(port)
1322            prev_tx_bytes = pg_prev['tx_bytes'].get(port)
1323
1324            pg_current['tx_bps'][port], pg_current['tx_bps_lpf'][port] = self.calc_bps(prev_tx_bps, now_tx_bytes, prev_tx_bytes, diff_sec)
1325
1326            if pg_current['tx_bps'].get(port) != None and pg_current['tx_pps'].get(port) != None:
1327                pg_current['tx_bps_L1'][port] = calc_bps_L1(pg_current['tx_bps'][port], pg_current['tx_pps'][port])
1328                pg_current['tx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['tx_bps_lpf'][port], pg_current['tx_pps_lpf'][port])
1329            else:
1330                pg_current['tx_bps_L1'][port] = None
1331                pg_current['tx_bps_L1_lpf'][port] = None
1332
1333
1334        # RX
1335        for port in pg_current['rx_pkts'].keys():
1336
1337            prev_rx_pps   = pg_prev['rx_pps'].get(port)
1338            now_rx_pkts   = pg_current['rx_pkts'].get(port)
1339            prev_rx_pkts  = pg_prev['rx_pkts'].get(port)
1340            pg_current['rx_pps'][port], pg_current['rx_pps_lpf'][port] = self.calc_pps(prev_rx_pps, now_rx_pkts, prev_rx_pkts, diff_sec)
1341
1342            prev_rx_bps   = pg_prev['rx_bps'].get(port)
1343            now_rx_bytes  = pg_current['rx_bytes'].get(port)
1344            prev_rx_bytes = pg_prev['rx_bytes'].get(port)
1345            pg_current['rx_bps'][port], pg_current['rx_bps_lpf'][port] = self.calc_bps(prev_rx_bps, now_rx_bytes, prev_rx_bytes, diff_sec)
1346            if pg_current['rx_bps'].get(port) != None and pg_current['rx_pps'].get(port) != None:
1347                pg_current['rx_bps_L1'][port] = calc_bps_L1(pg_current['rx_bps'][port], pg_current['rx_pps'][port])
1348                pg_current['rx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['rx_bps_lpf'][port], pg_current['rx_pps_lpf'][port])
1349            else:
1350                pg_current['rx_bps_L1'][port] = None
1351                pg_current['rx_bps_L1_lpf'][port] = None
1352
1353
1354    def calc_pps (self, prev_bw, now, prev, diff_sec):
1355        return self.calc_bw(prev_bw, now, prev, diff_sec, False)
1356
1357
1358    def calc_bps (self, prev_bw, now, prev, diff_sec):
1359        return self.calc_bw(prev_bw, now, prev, diff_sec, True)
1360
1361    # returns tuple - first value is real, second is low pass filtered
1362    def calc_bw (self, prev_bw, now, prev, diff_sec, is_bps):
1363        # B/W is not valid when the values are None
1364        if (now is None) or (prev is None):
1365            return (None, None)
1366
1367        # calculate the B/W for current snapshot
1368        current_bw = (now - prev) / diff_sec
1369        if is_bps:
1370            current_bw *= 8
1371
1372        # previous B/W is None ? ignore it
1373        if prev_bw is None:
1374            prev_bw = 0
1375
1376        return (current_bw, 0.5 * prev_bw + 0.5 * current_bw)
1377
1378
1379
1380
1381    def _update (self, snapshot):
1382        #print(snapshot)
1383        # generate a new snapshot
1384        new_snapshot = self.process_snapshot(snapshot, self.latest_stats)
1385
1386        #print new_snapshot
1387        # advance
1388        self.latest_stats = new_snapshot
1389
1390
1391        return True
1392
1393
1394
1395    # for API
1396    def get_stats (self):
1397        stats = {}
1398
1399        for pg_id, value in self.latest_stats.items():
1400            # skip non ints
1401            if not is_intable(pg_id):
1402                # 'global' stats are in the same level of the pg_ids. We do want them to go to the user
1403                if pg_id == 'global':
1404                    stats[pg_id] = value
1405                continue
1406            # bare counters
1407            stats[int(pg_id)] = {}
1408            for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
1409                val = self.get_rel([pg_id, field, 'total'])
1410                stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
1411                for port in value[field].keys():
1412                    if is_intable(port):
1413                        val = self.get_rel([pg_id, field, port])
1414                        stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
1415
1416            # BW values
1417            for field in ['tx_pps', 'tx_bps', 'tx_bps_L1', 'rx_pps', 'rx_bps', 'rx_bps_L1']:
1418                val = self.get([pg_id, field, 'total'])
1419                stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
1420                for port in value[field].keys():
1421                    if is_intable(port):
1422                        val = self.get([pg_id, field, port])
1423                        stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
1424
1425        return stats
1426
1427class CUtilStats(CTRexStats):
1428
1429    def __init__(self, client):
1430        super(CUtilStats, self).__init__()
1431        self.client = client
1432        self.history = deque(maxlen = 1)
1433        self.mbuf_types_list = None
1434        self.last_update_ts = -999
1435
1436    def get_stats(self, use_1sec_cache = False):
1437        time_now = time.time()
1438        if self.last_update_ts + 1 < time_now or not self.history or not use_1sec_cache:
1439            if self.client.is_connected():
1440                rc = self.client._transmit('get_utilization')
1441                if not rc:
1442                    raise Exception(rc)
1443                self.last_update_ts = time_now
1444                self.history.append(rc.data())
1445            else:
1446                self.history.append({})
1447
1448        return self.history[-1]
1449
1450class CXStats(CTRexStats):
1451
1452    def __init__(self, client):
1453        super(CXStats, self).__init__()
1454        self.client = client
1455        self.history = deque(maxlen = 1)
1456        self.names = {}
1457        self.last_update_ts = -999
1458
1459    def get_stats(self, port_id, use_1sec_cache = False):
1460        time_now = time.time()
1461        if self.last_update_ts + 1 < time_now or not self.history or not use_1sec_cache:
1462            if self.client.is_connected():
1463                rc = self.client._transmit('get_port_xstats_values', params = {'port_id': port_id})
1464                if not rc:
1465                    raise Exception(rc)
1466                self.last_update_ts = time_now
1467                values = rc.data().get('xstats_values', [])
1468                if len(values) != len(self.names): # need to update names ("keys")
1469                    rc = self.client._transmit('get_port_xstats_names', params = {'port_id': port_id})
1470                    if not rc:
1471                        raise Exception(rc)
1472                    self.names = rc.data().get('xstats_names', [])
1473                if len(values) != len(self.names):
1474                    raise Exception('Length of get_xstats_names: %s and get_port_xstats_values: %s' % (len(self.names), len(values)))
1475                self.history.append(dict([(key, val) for key, val in zip(self.names, values)]))
1476            else:
1477                self.history.append({})
1478
1479        stats = {}
1480        for key, val in self.history[-1].items():
1481            stats[key] = self.history[-1][key] - self.reference_stats.get(key, 0)
1482        return stats
1483
1484if __name__ == "__main__":
1485    pass
1486
1487