trex_stl_stats.py revision 6e1919c3
1#!/router/bin/python
2
3from .utils import text_tables
4from .utils.text_opts import format_text, format_threshold, format_num
5from .trex_stl_types import StatNotAvailable, is_integer
6from .trex_stl_exceptions import STLError
7
8from collections import namedtuple, OrderedDict, deque
9import sys
10import copy
11import datetime
12import time
13import re
14import math
15import threading
16import pprint
17
18GLOBAL_STATS = 'g'
19PORT_STATS = 'p'
20PORT_GRAPH = 'pg'
21PORT_STATUS = 'ps'
22STREAMS_STATS = 's'
23LATENCY_STATS = 'ls'
24LATENCY_HISTOGRAM = 'lh'
25CPU_STATS = 'c'
26MBUF_STATS = 'm'
27EXTENDED_STATS = 'x'
28EXTENDED_INC_ZERO_STATS = 'xz'
29
30ALL_STATS_OPTS = [GLOBAL_STATS, PORT_STATS, PORT_STATUS, STREAMS_STATS, LATENCY_STATS, PORT_GRAPH, LATENCY_HISTOGRAM, CPU_STATS, MBUF_STATS, EXTENDED_STATS, EXTENDED_INC_ZERO_STATS]
31COMPACT = [GLOBAL_STATS, PORT_STATS]
32GRAPH_PORT_COMPACT = [GLOBAL_STATS, PORT_GRAPH]
33SS_COMPAT = [GLOBAL_STATS, STREAMS_STATS] # stream stats
34LS_COMPAT = [GLOBAL_STATS, LATENCY_STATS] # latency stats
35LH_COMPAT = [GLOBAL_STATS, LATENCY_HISTOGRAM] # latency histogram
36UT_COMPAT = [GLOBAL_STATS, CPU_STATS, MBUF_STATS] # utilization
37
38ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
39
40def round_float (f):
41    return float("%.2f" % f) if type(f) is float else f
42
43def try_int(i):
44    try:
45        return int(i)
46    except:
47        return i
48
49# deep mrege of dicts dst = src + dst
50def deep_merge_dicts (dst, src):
51    for k, v in src.items():
52        # if not exists - deep copy it
53        if not k in dst:
54            dst[k] = copy.deepcopy(v)
55        else:
56            if isinstance(v, dict):
57                deep_merge_dicts(dst[k], v)
58
59# BPS L1 from pps and BPS L2
60def calc_bps_L1 (bps, pps):
61    if (pps == 0) or (bps == 0):
62        return 0
63
64    factor = bps / (pps * 8.0)
65    return bps * ( 1 + (20 / factor) )
66#
67
68def is_intable (value):
69    try:
70        int(value)
71        return True
72    except ValueError:
73        return False
74
75# use to calculate diffs relative to the previous values
76# for example, BW
77def calculate_diff (samples):
78    total = 0.0
79
80    weight_step = 1.0 / sum(range(0, len(samples)))
81    weight = weight_step
82
83    for i in range(0, len(samples) - 1):
84        current = samples[i] if samples[i] > 0 else 1
85        next = samples[i + 1] if samples[i + 1] > 0 else 1
86
87        s = 100 * ((float(next) / current) - 1.0)
88
89        # block change by 100%
90        total  += (min(s, 100) * weight)
91        weight += weight_step
92
93    return total
94
95
96# calculate by absolute values and not relatives (useful for CPU usage in % and etc.)
97def calculate_diff_raw (samples):
98    total = 0.0
99
100    weight_step = 1.0 / sum(range(0, len(samples)))
101    weight = weight_step
102
103    for i in range(0, len(samples) - 1):
104        current = samples[i]
105        next = samples[i + 1]
106
107        total  += ( (next - current) * weight )
108        weight += weight_step
109
110    return total
111
112get_number_of_bytes_cache = {}
113# get number of bytes: '64b'->64, '9kb'->9000 etc.
114def get_number_of_bytes(val):
115    if val not in get_number_of_bytes_cache:
116        get_number_of_bytes_cache[val] = int(val[:-1].replace('k', '000'))
117    return get_number_of_bytes_cache[val]
118
119# a simple object to keep a watch over a field
120class WatchedField(object):
121
122    def __init__ (self, name, suffix, high_th, low_th, events_handler):
123        self.name           = name
124        self.suffix         = suffix
125        self.high_th        = high_th
126        self.low_th         = low_th
127        self.events_handler = events_handler
128
129        self.hot     = False
130        self.current = None
131
132    def update (self, value):
133        if value is None:
134            return
135
136        if value > self.high_th and not self.hot:
137            self.events_handler.log_warning("{0} is high: {1}{2}".format(self.name, value, self.suffix))
138            self.hot = True
139
140        if value < self.low_th and self.hot:
141            self.hot = False
142
143        self.current = value
144
145
146
147class CTRexInfoGenerator(object):
148    """
149    This object is responsible of generating stats and information from objects maintained at
150    STLClient and the ports.
151    """
152
153    def __init__(self, global_stats_ref, ports_dict_ref, rx_stats_ref, latency_stats_ref, util_stats_ref, xstats_ref, async_monitor):
154        self._global_stats = global_stats_ref
155        self._ports_dict = ports_dict_ref
156        self._rx_stats_ref = rx_stats_ref
157        self._latency_stats_ref = latency_stats_ref
158        self._util_stats_ref = util_stats_ref
159        self._xstats_ref = xstats_ref
160        self._async_monitor = async_monitor
161
162    def generate_single_statistic(self, port_id_list, statistic_type):
163        if statistic_type == GLOBAL_STATS:
164            return self._generate_global_stats()
165
166        elif statistic_type == PORT_STATS:
167            return self._generate_port_stats(port_id_list)
168
169        elif statistic_type == PORT_GRAPH:
170            return self._generate_port_graph(port_id_list)
171
172        elif statistic_type == PORT_STATUS:
173            return self._generate_port_status(port_id_list)
174
175        elif statistic_type == STREAMS_STATS:
176            return self._generate_streams_stats()
177
178        elif statistic_type == LATENCY_STATS:
179            return self._generate_latency_stats()
180
181        elif statistic_type == LATENCY_HISTOGRAM:
182            return self._generate_latency_histogram()
183
184        elif statistic_type == CPU_STATS:
185            return self._generate_cpu_util_stats()
186
187        elif statistic_type == MBUF_STATS:
188            return self._generate_mbuf_util_stats()
189
190        elif statistic_type == EXTENDED_STATS:
191            return self._generate_xstats(port_id_list, include_zero_lines = False)
192
193        elif statistic_type == EXTENDED_INC_ZERO_STATS:
194            return self._generate_xstats(port_id_list, include_zero_lines = True)
195
196        else:
197            # ignore by returning empty object
198            return {}
199
200    def generate_streams_info(self, port_id_list, stream_id_list):
201        relevant_ports = self.__get_relevant_ports(port_id_list)
202        return_data = OrderedDict()
203
204        for port_obj in relevant_ports:
205            streams_data = self._generate_single_port_streams_info(port_obj, stream_id_list)
206            if not streams_data:
207                continue
208            hdr_key = "Port {port}:".format(port= port_obj.port_id)
209
210            # TODO: test for other ports with same stream structure, and join them
211            return_data[hdr_key] = streams_data
212
213        return return_data
214
215    def _generate_global_stats(self):
216        global_stats = self._global_stats
217
218        stats_data_left = OrderedDict([("connection", "{host}, Port {port}".format(host=global_stats.connection_info.get("server"),
219                                                                     port=global_stats.connection_info.get("sync_port"))),
220                             ("version", "{ver}".format(ver=global_stats.server_version.get("version", "N/A"))),
221
222                             ("cpu_util.", "{0}% @ {2} cores ({3} per port) {1}".format( format_threshold(round_float(global_stats.get("m_cpu_util")), [85, 100], [0, 85]),
223                                                                                         global_stats.get_trend_gui("m_cpu_util", use_raw = True),
224                                                                                         global_stats.system_info.get('dp_core_count'),
225                                                                                         global_stats.system_info.get('dp_core_count_per_port'),
226                                                                                         )),
227
228                             ("rx_cpu_util.", "{0}% {1}".format( format_threshold(round_float(global_stats.get("m_rx_cpu_util")), [85, 100], [0, 85]),
229                                                                global_stats.get_trend_gui("m_rx_cpu_util", use_raw = True))),
230
231                             ("async_util.", "{0}% / {1}".format( format_threshold(round_float(self._async_monitor.get_cpu_util()), [85, 100], [0, 85]),
232                                                                 format_num(self._async_monitor.get_bps() / 8.0, suffix = "B/sec"))),
233                            ])
234
235        stats_data_right = OrderedDict([
236                             ("total_tx_L2", "{0} {1}".format( global_stats.get("m_tx_bps", format=True, suffix="b/sec"),
237                                                                global_stats.get_trend_gui("m_tx_bps"))),
238
239                             ("total_tx_L1", "{0} {1}".format( global_stats.get("m_tx_bps_L1", format=True, suffix="b/sec"),
240                                                                global_stats.get_trend_gui("m_tx_bps_L1"))),
241
242                             ("total_rx", "{0} {1}".format( global_stats.get("m_rx_bps", format=True, suffix="b/sec"),
243                                                              global_stats.get_trend_gui("m_rx_bps"))),
244
245                             ("total_pps", "{0} {1}".format( global_stats.get("m_tx_pps", format=True, suffix="pkt/sec"),
246                                                              global_stats.get_trend_gui("m_tx_pps"))),
247
248                             ("drop_rate", "{0}".format( format_num(global_stats.get("m_rx_drop_bps"),
249                                                                    suffix = 'b/sec',
250                                                                    opts = 'green' if (global_stats.get("m_rx_drop_bps")== 0) else 'red'),
251                                                            )),
252
253                             ("queue_full", "{0}".format( format_num(global_stats.get_rel("m_total_queue_full"),
254                                                                     suffix = 'pkts',
255                                                                     compact = False,
256                                                                     opts = 'green' if (global_stats.get_rel("m_total_queue_full")== 0) else 'red'))),
257                             ])
258
259        # build table representation
260        stats_table = text_tables.TRexTextInfo()
261        stats_table.set_cols_align(["l", "l"])
262        stats_table.set_deco(0)
263        stats_table.set_cols_width([50, 45])
264        max_lines = max(len(stats_data_left), len(stats_data_right))
265        for line_num in range(max_lines):
266            row = []
267            if line_num < len(stats_data_left):
268                key = list(stats_data_left.keys())[line_num]
269                row.append('{:<12} : {}'.format(key, stats_data_left[key]))
270            else:
271                row.append('')
272            if line_num < len(stats_data_right):
273                key = list(stats_data_right.keys())[line_num]
274                row.append('{:<12} : {}'.format(key, stats_data_right[key]))
275            else:
276                row.append('')
277            stats_table.add_row(row)
278
279        return {"global_statistics": ExportableStats(None, stats_table)}
280
281    def _generate_streams_stats (self):
282        flow_stats = self._rx_stats_ref
283        # for TUI - maximum 4
284        pg_ids = list(filter(is_intable, flow_stats.latest_stats.keys()))[:4]
285        stream_count = len(pg_ids)
286
287        sstats_data = OrderedDict([ ('Tx pps',  []),
288                                        ('Tx bps L2',      []),
289                                        ('Tx bps L1',      []),
290                                        ('---', [''] * stream_count),
291                                        ('Rx pps',      []),
292                                        ('Rx bps',      []),
293                                        ('----', [''] * stream_count),
294                                        ('opackets',    []),
295                                        ('ipackets',    []),
296                                        ('obytes',      []),
297                                        ('ibytes',      []),
298                                        ('-----', [''] * stream_count),
299                                        ('tx_pkts',     []),
300                                        ('rx_pkts',     []),
301                                        ('tx_bytes',    []),
302                                        ('rx_bytes',    [])
303                                      ])
304
305
306
307        # maximum 4
308        for pg_id in pg_ids:
309
310            sstats_data['Tx pps'].append(flow_stats.get([pg_id, 'tx_pps_lpf', 'total'], format = True, suffix = "pps"))
311            sstats_data['Tx bps L2'].append(flow_stats.get([pg_id, 'tx_bps_lpf', 'total'], format = True, suffix = "bps"))
312
313            sstats_data['Tx bps L1'].append(flow_stats.get([pg_id, 'tx_bps_L1_lpf', 'total'], format = True, suffix = "bps"))
314
315            sstats_data['Rx pps'].append(flow_stats.get([pg_id, 'rx_pps_lpf', 'total'], format = True, suffix = "pps"))
316            sstats_data['Rx bps'].append(flow_stats.get([pg_id, 'rx_bps_lpf', 'total'], format = True, suffix = "bps"))
317
318            sstats_data['opackets'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total']))
319            sstats_data['ipackets'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total']))
320            sstats_data['obytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total']))
321            sstats_data['ibytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total']))
322            sstats_data['tx_bytes'].append(flow_stats.get_rel([pg_id, 'tx_bytes', 'total'], format = True, suffix = "B"))
323            sstats_data['rx_bytes'].append(flow_stats.get_rel([pg_id, 'rx_bytes', 'total'], format = True, suffix = "B"))
324            sstats_data['tx_pkts'].append(flow_stats.get_rel([pg_id, 'tx_pkts', 'total'], format = True, suffix = "pkts"))
325            sstats_data['rx_pkts'].append(flow_stats.get_rel([pg_id, 'rx_pkts', 'total'], format = True, suffix = "pkts"))
326
327
328        stats_table = text_tables.TRexTextTable()
329        stats_table.set_cols_align(["l"] + ["r"] * stream_count)
330        stats_table.set_cols_width([10] + [17]   * stream_count)
331        stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
332
333        stats_table.add_rows([[k] + v
334                              for k, v in sstats_data.items()],
335                              header=False)
336
337        header = ["PG ID"] + [key for key in pg_ids]
338        stats_table.header(header)
339
340        return {"streams_statistics": ExportableStats(sstats_data, stats_table)}
341
342    def _generate_latency_stats(self):
343        lat_stats = self._latency_stats_ref
344        latency_window_size = 14
345
346        # for TUI - maximum 5
347        pg_ids = list(filter(is_intable, lat_stats.latest_stats.keys()))[:5]
348        stream_count = len(pg_ids)
349        lstats_data = OrderedDict([('TX pkts',       []),
350                                   ('RX pkts',       []),
351                                   ('Max latency',   []),
352                                   ('Avg latency',   []),
353                                   ('-- Window --', [''] * stream_count),
354                                   ('Last (max)',     []),
355                                  ] + [('Last-%s' % i, []) for i in range(1, latency_window_size)] + [
356                                   ('---', [''] * stream_count),
357                                   ('Jitter',        []),
358                                   ('----', [''] * stream_count),
359                                   ('Errors',        []),
360                                  ])
361
362        with lat_stats.lock:
363            history = [x for x in lat_stats.history]
364        flow_stats = self._rx_stats_ref.get_stats()
365        for pg_id in pg_ids:
366            lstats_data['TX pkts'].append(flow_stats[pg_id]['tx_pkts']['total'] if pg_id in flow_stats else '')
367            lstats_data['RX pkts'].append(flow_stats[pg_id]['rx_pkts']['total'] if pg_id in flow_stats else '')
368            lstats_data['Avg latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'average'])))
369            lstats_data['Max latency'].append(try_int(lat_stats.get([pg_id, 'latency', 'total_max'])))
370            lstats_data['Last (max)'].append(try_int(lat_stats.get([pg_id, 'latency', 'last_max'])))
371            for i in range(1, latency_window_size):
372                val = history[-i - 1].get(pg_id, {}).get('latency', {}).get('last_max', '') if len(history) > i else ''
373                lstats_data['Last-%s' % i].append(try_int(val))
374            lstats_data['Jitter'].append(try_int(lat_stats.get([pg_id, 'latency', 'jitter'])))
375            errors = 0
376            seq_too_low = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_low'])
377            if is_integer(seq_too_low):
378                errors += seq_too_low
379            seq_too_high = lat_stats.get([pg_id, 'err_cntrs', 'seq_too_high'])
380            if is_integer(seq_too_high):
381                errors += seq_too_high
382            lstats_data['Errors'].append(format_num(errors,
383                                            opts = 'green' if errors == 0 else 'red'))
384
385
386        stats_table = text_tables.TRexTextTable()
387        stats_table.set_cols_align(["l"] + ["r"] * stream_count)
388        stats_table.set_cols_width([12] + [14]   * stream_count)
389        stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
390        stats_table.add_rows([[k] + v
391                              for k, v in lstats_data.items()],
392                              header=False)
393
394        header = ["PG ID"] + [key for key in pg_ids]
395        stats_table.header(header)
396
397        return {"latency_statistics": ExportableStats(lstats_data, stats_table)}
398
399    def _generate_latency_histogram(self):
400        lat_stats = self._latency_stats_ref.latest_stats
401        max_histogram_size = 17
402
403        # for TUI - maximum 5
404        pg_ids = list(filter(is_intable, lat_stats.keys()))[:5]
405
406        merged_histogram = {}
407        for pg_id in pg_ids:
408            merged_histogram.update(lat_stats[pg_id]['latency']['histogram'])
409        histogram_size = min(max_histogram_size, len(merged_histogram))
410
411        stream_count = len(pg_ids)
412        stats_table = text_tables.TRexTextTable()
413        stats_table.set_cols_align(["l"] + ["r"] * stream_count)
414        stats_table.set_cols_width([12] + [14]   * stream_count)
415        stats_table.set_cols_dtype(['t'] + ['t'] * stream_count)
416
417        for i in range(max_histogram_size - histogram_size):
418            if i == 0 and not merged_histogram:
419                stats_table.add_row(['  No Data'] + [' '] * stream_count)
420            else:
421                stats_table.add_row([' '] * (stream_count + 1))
422        for key in list(reversed(sorted(merged_histogram.keys())))[:histogram_size]:
423            hist_vals = []
424            for pg_id in pg_ids:
425                hist_vals.append(lat_stats[pg_id]['latency']['histogram'].get(key, ' '))
426            stats_table.add_row([key] + hist_vals)
427
428        stats_table.add_row(['- Counters -'] + [' '] * stream_count)
429        err_cntrs_dict = OrderedDict()
430        for pg_id in pg_ids:
431            for err_cntr in sorted(lat_stats[pg_id]['err_cntrs'].keys()):
432                if err_cntr not in err_cntrs_dict:
433                    err_cntrs_dict[err_cntr] = [lat_stats[pg_id]['err_cntrs'][err_cntr]]
434                else:
435                    err_cntrs_dict[err_cntr].append(lat_stats[pg_id]['err_cntrs'][err_cntr])
436        for err_cntr, val_list in err_cntrs_dict.items():
437            stats_table.add_row([err_cntr] + val_list)
438        header = ["PG ID"] + [key for key in pg_ids]
439        stats_table.header(header)
440        return {"latency_histogram": ExportableStats(None, stats_table)}
441
442    def _generate_cpu_util_stats(self):
443        util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
444
445        stats_table = text_tables.TRexTextTable()
446        if util_stats:
447            if 'cpu' not in util_stats:
448                raise Exception("Excepting 'cpu' section in stats %s" % util_stats)
449            cpu_stats = util_stats['cpu']
450            hist_len = len(cpu_stats[0]["history"])
451            avg_len = min(5, hist_len)
452            show_len = min(15, hist_len)
453            stats_table.header(['Thread', 'Avg', 'Latest'] + list(range(-1, 0 - show_len, -1)))
454            stats_table.set_cols_align(['l'] + ['r'] * (show_len + 1))
455            stats_table.set_cols_width([10, 3, 6] + [3] * (show_len - 1))
456            stats_table.set_cols_dtype(['t'] * (show_len + 2))
457
458            for i in range(min(18, len(cpu_stats))):
459                history = cpu_stats[i]["history"]
460                ports = cpu_stats[i]["ports"]
461                avg = int(round(sum(history[:avg_len]) / avg_len))
462
463                # decode active ports for core
464                if ports == [-1, -1]:
465                    interfaces = "(IDLE)"
466                elif not -1 in ports:
467                    interfaces = "({:},{:})".format(ports[0], ports[1])
468                else:
469                    interfaces = "({:})".format(ports[0] if ports[0] != -1 else ports[1])
470
471                thread = "{:2} {:^7}".format(i, interfaces)
472                stats_table.add_row([thread, avg] + history[:show_len])
473        else:
474            stats_table.add_row(['No Data.'])
475        return {'cpu_util(%)': ExportableStats(None, stats_table)}
476
477    def _generate_mbuf_util_stats(self):
478        util_stats = self._util_stats_ref.get_stats(use_1sec_cache = True)
479        stats_table = text_tables.TRexTextTable()
480        if util_stats:
481            if 'mbuf_stats' not in util_stats:
482                raise Exception("Excepting 'mbuf_stats' section in stats %s" % util_stats)
483            mbuf_stats = util_stats['mbuf_stats']
484            for mbufs_per_socket in mbuf_stats.values():
485                first_socket_mbufs = mbufs_per_socket
486                break
487            if not self._util_stats_ref.mbuf_types_list:
488                mbuf_keys = list(first_socket_mbufs.keys())
489                mbuf_keys.sort(key = get_number_of_bytes)
490                self._util_stats_ref.mbuf_types_list = mbuf_keys
491            types_len = len(self._util_stats_ref.mbuf_types_list)
492            stats_table.set_cols_align(['l'] + ['r'] * (types_len + 1))
493            stats_table.set_cols_width([10] + [7] * (types_len + 1))
494            stats_table.set_cols_dtype(['t'] * (types_len + 2))
495            stats_table.header([''] + self._util_stats_ref.mbuf_types_list + ['RAM(MB)'])
496            total_list = []
497            sum_totals = 0
498            for mbuf_type in self._util_stats_ref.mbuf_types_list:
499                sum_totals += first_socket_mbufs[mbuf_type][1] * get_number_of_bytes(mbuf_type) + 64
500                total_list.append(first_socket_mbufs[mbuf_type][1])
501            sum_totals *= len(list(mbuf_stats.values()))
502            total_list.append(int(sum_totals/1e6))
503            stats_table.add_row(['Total:'] + total_list)
504            stats_table.add_row(['Used:'] + [''] * (types_len + 1))
505            for socket_name in sorted(list(mbuf_stats.keys())):
506                mbufs = mbuf_stats[socket_name]
507                socket_show_name = socket_name.replace('cpu-', '').replace('-', ' ').capitalize() + ':'
508                sum_used = 0
509                used_list = []
510                percentage_list = []
511                for mbuf_type in self._util_stats_ref.mbuf_types_list:
512                    used = mbufs[mbuf_type][1] - mbufs[mbuf_type][0]
513                    sum_used += used * get_number_of_bytes(mbuf_type) + 64
514                    used_list.append(used)
515                    percentage_list.append('%s%%' % int(100 * used / mbufs[mbuf_type][1]))
516                used_list.append(int(sum_used/1e6))
517                stats_table.add_row([socket_show_name] + used_list)
518                stats_table.add_row(['Percent:'] + percentage_list + [''])
519        else:
520            stats_table.add_row(['No Data.'])
521        return {'mbuf_util': ExportableStats(None, stats_table)}
522
523    def _generate_xstats(self, port_id_list, include_zero_lines = False):
524        relevant_ports = [port.port_id for port in self.__get_relevant_ports(port_id_list)]
525        # get the data on relevant ports
526        xstats_data = OrderedDict()
527        for port_id in relevant_ports:
528            for key, val in self._xstats_ref.get_stats(port_id).items():
529                if key not in xstats_data:
530                    xstats_data[key] = []
531                xstats_data[key].append(val)
532
533        # put into table
534        stats_table = text_tables.TRexTextTable()
535        stats_table.header(['Name:'] + ['Port %s:' % port_id for port_id in relevant_ports])
536        stats_table.set_cols_align(['l'] + ['r'] * len(relevant_ports))
537        stats_table.set_cols_width([30] + [15] * len(relevant_ports))
538        stats_table.set_cols_dtype(['t'] * (len(relevant_ports) + 1))
539        for key, arr in xstats_data.items():
540            if include_zero_lines or list(filter(None, arr)):
541                key = key[:28]
542                stats_table.add_row([key] + arr)
543        return {'xstats:': ExportableStats(None, stats_table)}
544
545    @staticmethod
546    def _get_rational_block_char(value, range_start, interval):
547        # in Konsole, utf-8 is sometimes printed with artifacts, return ascii for now
548        #return 'X' if value >= range_start + float(interval) / 2 else ' '
549
550        if sys.__stdout__.encoding != 'UTF-8':
551            return 'X' if value >= range_start + float(interval) / 2 else ' '
552
553        value -= range_start
554        ratio = float(value) / interval
555        if ratio <= 0.0625:
556            return u' '         # empty block
557        if ratio <= 0.1875:
558            return u'\u2581'    # 1/8
559        if ratio <= 0.3125:
560            return u'\u2582'    # 2/8
561        if ratio <= 0.4375:
562            return u'\u2583'    # 3/8
563        if ratio <= 0.5625:
564            return u'\u2584'    # 4/8
565        if ratio <= 0.6875:
566            return u'\u2585'    # 5/8
567        if ratio <= 0.8125:
568            return u'\u2586'    # 6/8
569        if ratio <= 0.9375:
570            return u'\u2587'    # 7/8
571        return u'\u2588'        # full block
572
573    def _generate_port_graph(self, port_id_list):
574        relevant_port = self.__get_relevant_ports(port_id_list)[0]
575        hist_len = len(relevant_port.port_stats.history)
576        hist_maxlen = relevant_port.port_stats.history.maxlen
577        util_tx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['tx_percentage']) for i in range(hist_len)]
578        util_rx_hist = [0] * (hist_maxlen - hist_len) + [round(relevant_port.port_stats.history[i]['rx_percentage']) for i in range(hist_len)]
579
580
581        stats_table = text_tables.TRexTextTable()
582        stats_table.header([' Util(%)', 'TX', 'RX'])
583        stats_table.set_cols_align(['c', 'c', 'c'])
584        stats_table.set_cols_width([8, hist_maxlen, hist_maxlen])
585        stats_table.set_cols_dtype(['t', 't', 't'])
586
587        for y in range(95, -1, -5):
588            stats_table.add_row([y, ''.join([self._get_rational_block_char(util_tx, y, 5) for util_tx in util_tx_hist]),
589                                    ''.join([self._get_rational_block_char(util_rx, y, 5) for util_rx in util_rx_hist])])
590
591        return {"port_graph": ExportableStats({}, stats_table)}
592
593    def _generate_port_stats(self, port_id_list):
594        relevant_ports = self.__get_relevant_ports(port_id_list)
595
596        return_stats_data = {}
597        per_field_stats = OrderedDict([("owner", []),
598                                       ('link', []),
599                                       ("state", []),
600                                       ("speed", []),
601                                       ("CPU util.", []),
602                                       ("--", []),
603                                       ("Tx bps L2", []),
604                                       ("Tx bps L1", []),
605                                       ("Tx pps", []),
606                                       ("Line Util.", []),
607
608                                       ("---", []),
609                                       ("Rx bps", []),
610                                       ("Rx pps", []),
611
612                                       ("----", []),
613                                       ("opackets", []),
614                                       ("ipackets", []),
615                                       ("obytes", []),
616                                       ("ibytes", []),
617                                       ("tx-bytes", []),
618                                       ("rx-bytes", []),
619                                       ("tx-pkts", []),
620                                       ("rx-pkts", []),
621
622                                       ("-----", []),
623                                       ("oerrors", []),
624                                       ("ierrors", []),
625
626                                      ])
627
628        total_stats = CPortStats(None)
629
630        for port_obj in relevant_ports:
631            # fetch port data
632            port_stats = port_obj.generate_port_stats()
633
634            total_stats += port_obj.port_stats
635
636            # populate to data structures
637            return_stats_data[port_obj.port_id] = port_stats
638            self.__update_per_field_dict(port_stats, per_field_stats)
639
640        total_cols = len(relevant_ports)
641        header = ["port"] + [port.port_id for port in relevant_ports]
642
643        if (total_cols > 1):
644            self.__update_per_field_dict(total_stats.generate_stats(), per_field_stats)
645            header += ['total']
646            total_cols += 1
647
648        stats_table = text_tables.TRexTextTable()
649        stats_table.set_cols_align(["l"] + ["r"] * total_cols)
650        stats_table.set_cols_width([10] + [17]   * total_cols)
651        stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
652
653        stats_table.add_rows([[k] + v
654                              for k, v in per_field_stats.items()],
655                              header=False)
656
657        stats_table.header(header)
658
659        return {"port_statistics": ExportableStats(return_stats_data, stats_table)}
660
661    def _generate_port_status(self, port_id_list):
662        relevant_ports = self.__get_relevant_ports(port_id_list)
663
664        return_stats_data = {}
665        per_field_status = OrderedDict([("driver", []),
666                                        ("description", []),
667                                        ("link status", []),
668                                        ("link speed", []),
669                                        ("port status", []),
670                                        ("promiscuous", []),
671                                        ("flow ctrl", []),
672                                        ("--", []),
673                                        ("src IPv4", []),
674                                        ("src MAC", []),
675                                        ("---", []),
676                                        ("Destination", []),
677                                        ("ARP Resolution", []),
678                                        ("----", []),
679                                        ("PCI Address", []),
680                                        ("NUMA Node", []),
681                                        ("-----", []),
682                                        ("RX Filter Mode", []),
683                                        ("RX Queueing", []),
684                                        ("RX sniffer", []),
685                                        ]
686                                       )
687
688        for port_obj in relevant_ports:
689            # fetch port data
690            # port_stats = self._async_stats.get_port_stats(port_obj.port_id)
691            port_status = port_obj.generate_port_status()
692
693            # populate to data structures
694            return_stats_data[port_obj.port_id] = port_status
695
696            self.__update_per_field_dict(port_status, per_field_status)
697
698        stats_table = text_tables.TRexTextTable()
699        stats_table.set_cols_align(["l"] + ["c"]*len(relevant_ports))
700        stats_table.set_cols_width([15] + [20] * len(relevant_ports))
701
702        stats_table.add_rows([[k] + v
703                              for k, v in per_field_status.items()],
704                             header=False)
705        stats_table.header(["port"] + [port.port_id
706                                       for port in relevant_ports])
707
708        return {"port_status": ExportableStats(return_stats_data, stats_table)}
709
710    def _generate_single_port_streams_info(self, port_obj, stream_id_list):
711
712        return_streams_data = port_obj.generate_loaded_streams_sum()
713
714        if not return_streams_data.get("streams"):
715            # we got no streams available
716            return None
717
718        # FORMAT VALUES ON DEMAND
719
720        # because we mutate this - deep copy before
721        return_streams_data = copy.deepcopy(return_streams_data)
722
723        p_type_field_len = 0
724
725        for stream_id, stream_id_sum in return_streams_data['streams'].items():
726            stream_id_sum['packet_type'] = self._trim_packet_headers(stream_id_sum['packet_type'], 30)
727            p_type_field_len = max(p_type_field_len, len(stream_id_sum['packet_type']))
728
729        info_table = text_tables.TRexTextTable()
730        info_table.set_cols_align(["c"] + ["l"] + ["r"] + ["c"] + ["r"] + ["c"])
731        info_table.set_cols_width([10]   + [p_type_field_len]  + [8]   + [16]  + [15]  + [12])
732        info_table.set_cols_dtype(["t"] + ["t"] + ["t"] + ["t"] + ["t"] + ["t"])
733
734        info_table.add_rows([v.values()
735                             for k, v in return_streams_data['streams'].items()],
736                             header=False)
737        info_table.header(["ID", "packet type", "length", "mode", "rate", "next stream"])
738
739        return ExportableStats(return_streams_data, info_table)
740
741
742    def __get_relevant_ports(self, port_id_list):
743        # fetch owned ports
744        ports = [port_obj
745                 for _, port_obj in self._ports_dict.items()
746                 if port_obj.port_id in port_id_list]
747
748        # display only the first FOUR options, by design
749        if len(ports) > 4:
750            #self.logger is not defined
751            #self.logger.log(format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta'))
752            ports = ports[:4]
753        return ports
754
755    def __update_per_field_dict(self, dict_src_data, dict_dest_ref):
756        for key, val in dict_src_data.items():
757            if key in dict_dest_ref:
758                dict_dest_ref[key].append(val)
759
760    @staticmethod
761    def _trim_packet_headers(headers_str, trim_limit):
762        if len(headers_str) < trim_limit:
763            # do nothing
764            return headers_str
765        else:
766            return (headers_str[:trim_limit-3] + "...")
767
768
769
770class CTRexStats(object):
771    """ This is an abstract class to represent a stats object """
772
773    def __init__(self):
774        self.reference_stats = {}
775        self.latest_stats = {}
776        self.last_update_ts = time.time()
777        self.history = deque(maxlen = 47)
778        self.lock = threading.Lock()
779        self.has_baseline = False
780
781    ######## abstract methods ##########
782
783    # get stats for user / API
784    def get_stats (self):
785        raise NotImplementedError()
786
787    # generate format stats (for TUI)
788    def generate_stats(self):
789        raise NotImplementedError()
790
791    # called when a snapshot arrives - add more fields
792    def _update (self, snapshot, baseline):
793        raise NotImplementedError()
794
795
796    ######## END abstract methods ##########
797
798    def update(self, snapshot, baseline):
799
800        # no update is valid before baseline
801        if not self.has_baseline and not baseline:
802            return
803
804        # call the underlying method
805        rc = self._update(snapshot)
806        if not rc:
807            return
808
809        # sync one time
810        if not self.has_baseline and baseline:
811            self.reference_stats = copy.deepcopy(self.latest_stats)
812            self.has_baseline = True
813
814        # save history
815        with self.lock:
816            self.history.append(self.latest_stats)
817
818
819    def clear_stats(self):
820        self.reference_stats = copy.deepcopy(self.latest_stats)
821        self.history.clear()
822
823
824    def invalidate (self):
825        self.latest_stats = {}
826
827
828    def _get (self, src, field, default = None):
829        if isinstance(field, list):
830            # deep
831            value = src
832            for level in field:
833                if not level in value:
834                    return default
835                value = value[level]
836        else:
837            # flat
838            if not field in src:
839                return default
840            value = src[field]
841
842        return value
843
844    def get(self, field, format=False, suffix="", opts = None):
845        value = self._get(self.latest_stats, field)
846        if value == None:
847            return 'N/A'
848
849        return value if not format else format_num(value, suffix = suffix, opts = opts)
850
851
852    def get_rel(self, field, format=False, suffix=""):
853        ref_value = self._get(self.reference_stats, field)
854        latest_value = self._get(self.latest_stats, field)
855
856        # latest value is an aggregation - must contain the value
857        if latest_value == None:
858            return 'N/A'
859
860        if ref_value == None:
861            ref_value = 0
862
863        value = latest_value - ref_value
864
865        return value if not format else format_num(value, suffix)
866
867
868    # get trend for a field
869    def get_trend (self, field, use_raw = False, percision = 10.0):
870        if field not in self.latest_stats:
871            return 0
872
873        # not enough history - no trend
874        if len(self.history) < 5:
875            return 0
876
877        # absolute value is too low 0 considered noise
878        if self.latest_stats[field] < percision:
879            return 0
880
881        # must lock, deque is not thread-safe for iteration
882        with self.lock:
883            field_samples = [sample[field] for sample in list(self.history)[-5:]]
884
885        if use_raw:
886            return calculate_diff_raw(field_samples)
887        else:
888            return calculate_diff(field_samples)
889
890
891    def get_trend_gui (self, field, show_value = False, use_raw = False, up_color = 'red', down_color = 'green'):
892        v = self.get_trend(field, use_raw)
893
894        value = abs(v)
895
896        # use arrows if utf-8 is supported
897        if sys.__stdout__.encoding == 'UTF-8':
898            arrow = u'\u25b2' if v > 0 else u'\u25bc'
899        else:
900            arrow = ''
901
902        if sys.version_info < (3,0):
903            arrow = arrow.encode('utf-8')
904
905        color = up_color if v > 0 else down_color
906
907        # change in 1% is not meaningful
908        if value < 1:
909            return ""
910
911        elif value > 5:
912
913            if show_value:
914                return format_text("{0}{0}{0} {1:.2f}%".format(arrow,v), color)
915            else:
916                return format_text("{0}{0}{0}".format(arrow), color)
917
918        elif value > 2:
919
920            if show_value:
921                return format_text("{0}{0} {1:.2f}%".format(arrow,v), color)
922            else:
923                return format_text("{0}{0}".format(arrow), color)
924
925        else:
926            if show_value:
927                return format_text("{0} {1:.2f}%".format(arrow,v), color)
928            else:
929                return format_text("{0}".format(arrow), color)
930
931
932
933class CGlobalStats(CTRexStats):
934
935    def __init__(self, connection_info, server_version, ports_dict_ref, events_handler):
936        super(CGlobalStats, self).__init__()
937
938        self.connection_info = connection_info
939        self.server_version  = server_version
940        self._ports_dict     = ports_dict_ref
941        self.events_handler  = events_handler
942
943        self.watched_cpu_util    = WatchedField('CPU util.', '%', 85, 60, events_handler)
944        self.watched_rx_cpu_util = WatchedField('RX core util.', '%', 85, 60, events_handler)
945
946    def get_stats (self):
947        stats = {}
948
949        # absolute
950        stats['cpu_util']    = self.get("m_cpu_util")
951        stats['rx_cpu_util'] = self.get("m_rx_cpu_util")
952        stats['bw_per_core'] = self.get("m_bw_per_core")
953
954        stats['tx_bps'] = self.get("m_tx_bps")
955        stats['tx_pps'] = self.get("m_tx_pps")
956
957        stats['rx_bps'] = self.get("m_rx_bps")
958        stats['rx_pps'] = self.get("m_rx_pps")
959        stats['rx_drop_bps'] = self.get("m_rx_drop_bps")
960
961        # relatives
962        stats['queue_full'] = self.get_rel("m_total_queue_full")
963
964        return stats
965
966
967
968    def _update(self, snapshot):
969        # L1 bps
970        bps = snapshot.get("m_tx_bps")
971        pps = snapshot.get("m_tx_pps")
972
973        snapshot['m_tx_bps_L1'] = calc_bps_L1(bps, pps)
974
975
976        # simple...
977        self.latest_stats = snapshot
978
979        self.watched_cpu_util.update(snapshot.get('m_cpu_util'))
980        self.watched_rx_cpu_util.update(snapshot.get('m_rx_cpu_util'))
981
982        return True
983
984
985class CPortStats(CTRexStats):
986
987    def __init__(self, port_obj):
988        super(CPortStats, self).__init__()
989        self._port_obj = port_obj
990
991    @staticmethod
992    def __merge_dicts (target, src):
993        for k, v in src.items():
994            if k in target:
995                target[k] += v
996            else:
997                target[k] = v
998
999
1000    def __add__ (self, x):
1001        if not isinstance(x, CPortStats):
1002            raise TypeError("cannot add non stats object to stats")
1003
1004        # main stats
1005        if not self.latest_stats:
1006            self.latest_stats = {}
1007
1008        self.__merge_dicts(self.latest_stats, x.latest_stats)
1009
1010        # reference stats
1011        if x.reference_stats:
1012            if not self.reference_stats:
1013                self.reference_stats = x.reference_stats.copy()
1014            else:
1015                self.__merge_dicts(self.reference_stats, x.reference_stats)
1016
1017        # history - should be traverse with a lock
1018        with self.lock, x.lock:
1019            if not self.history:
1020                self.history = copy.deepcopy(x.history)
1021            else:
1022                for h1, h2 in zip(self.history, x.history):
1023                    self.__merge_dicts(h1, h2)
1024
1025        return self
1026
1027    # for port we need to do something smarter
1028    def get_stats (self):
1029        stats = {}
1030
1031        stats['opackets'] = self.get_rel("opackets")
1032        stats['ipackets'] = self.get_rel("ipackets")
1033        stats['obytes']   = self.get_rel("obytes")
1034        stats['ibytes']   = self.get_rel("ibytes")
1035        stats['oerrors']  = self.get_rel("oerrors")
1036        stats['ierrors']  = self.get_rel("ierrors")
1037
1038        stats['tx_bps']     = self.get("m_total_tx_bps")
1039        stats['tx_pps']     = self.get("m_total_tx_pps")
1040        stats['tx_bps_L1']  = self.get("m_total_tx_bps_L1")
1041        stats['tx_util']    = self.get("m_tx_util")
1042
1043        stats['rx_bps']     = self.get("m_total_rx_bps")
1044        stats['rx_pps']     = self.get("m_total_rx_pps")
1045        stats['rx_bps_L1']  = self.get("m_total_rx_bps_L1")
1046        stats['rx_util']    = self.get("m_rx_util")
1047
1048        return stats
1049
1050
1051
1052    def _update(self, snapshot):
1053        speed = self._port_obj.get_speed_bps()
1054
1055        # L1 bps
1056        tx_bps  = snapshot.get("m_total_tx_bps")
1057        tx_pps  = snapshot.get("m_total_tx_pps")
1058        rx_bps  = snapshot.get("m_total_rx_bps")
1059        rx_pps  = snapshot.get("m_total_rx_pps")
1060        ts_diff = 0.5 # TODO: change this to real ts diff from server
1061
1062        bps_tx_L1 = calc_bps_L1(tx_bps, tx_pps)
1063        bps_rx_L1 = calc_bps_L1(rx_bps, rx_pps)
1064
1065        snapshot['m_total_tx_bps_L1'] = bps_tx_L1
1066        if speed:
1067            snapshot['m_tx_util'] = (bps_tx_L1 / speed) * 100.0
1068        else:
1069            snapshot['m_tx_util'] = 0
1070
1071        snapshot['m_total_rx_bps_L1'] = bps_rx_L1
1072        if speed:
1073            snapshot['m_rx_util'] = (bps_rx_L1 / speed) * 100.0
1074        else:
1075            snapshot['m_rx_util'] = 0
1076
1077        # TX line util not smoothed
1078        diff_tx_pkts = snapshot.get('opackets', 0) - self.latest_stats.get('opackets', 0)
1079        diff_tx_bytes = snapshot.get('obytes', 0) - self.latest_stats.get('obytes', 0)
1080        tx_bps_L1 = calc_bps_L1(8.0 * diff_tx_bytes / ts_diff, float(diff_tx_pkts) / ts_diff)
1081        if speed:
1082            snapshot['tx_percentage'] = 100.0 * tx_bps_L1 / speed
1083        else:
1084            snapshot['tx_percentage'] = 0
1085
1086        # RX line util not smoothed
1087        diff_rx_pkts = snapshot.get('ipackets', 0) - self.latest_stats.get('ipackets', 0)
1088        diff_rx_bytes = snapshot.get('ibytes', 0) - self.latest_stats.get('ibytes', 0)
1089        rx_bps_L1 = calc_bps_L1(8.0 * diff_rx_bytes / ts_diff, float(diff_rx_pkts) / ts_diff)
1090        if speed:
1091            snapshot['rx_percentage'] = 100.0 * rx_bps_L1 / speed
1092        else:
1093            snapshot['rx_percentage'] = 0
1094
1095        # simple...
1096        self.latest_stats = snapshot
1097
1098        return True
1099
1100
1101    def generate_stats(self):
1102
1103        port_state = self._port_obj.get_port_state_name() if self._port_obj else ""
1104        if port_state == "TRANSMITTING":
1105            port_state = format_text(port_state, 'green', 'bold')
1106        elif port_state == "PAUSE":
1107            port_state = format_text(port_state, 'magenta', 'bold')
1108        else:
1109            port_state = format_text(port_state, 'bold')
1110
1111        if self._port_obj:
1112            if 'link' in self._port_obj.attr:
1113                if self._port_obj.attr.get('link', {}).get('up') == False:
1114                    link_state = format_text('DOWN', 'red', 'bold')
1115                else:
1116                    link_state = 'UP'
1117            else:
1118                link_state = 'N/A'
1119        else:
1120            link_state = ''
1121
1122        # default rate format modifiers
1123        rate_format = {'bpsl1': None, 'bps': None, 'pps': None, 'percentage': 'bold'}
1124
1125        # mark owned ports by color
1126        if self._port_obj:
1127            owner = self._port_obj.get_owner()
1128            rate_format[self._port_obj.last_factor_type] = ('blue', 'bold')
1129            if self._port_obj.is_acquired():
1130                owner = format_text(owner, 'green')
1131
1132        else:
1133            owner = ''
1134
1135
1136        return {"owner": owner,
1137                "state": "{0}".format(port_state),
1138                'link': link_state,
1139                "speed": self._port_obj.get_formatted_speed() if self._port_obj else '',
1140                "CPU util.": "{0} {1}%".format(self.get_trend_gui("m_cpu_util", use_raw = True),
1141                                               format_threshold(round_float(self.get("m_cpu_util")), [85, 100], [0, 85])) if self._port_obj else '' ,
1142                "--": " ",
1143                "---": " ",
1144                "----": " ",
1145                "-----": " ",
1146
1147                "Tx bps L1": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps_L1", show_value = False),
1148                                               self.get("m_total_tx_bps_L1", format = True, suffix = "bps", opts = rate_format['bpsl1'])),
1149
1150                "Tx bps L2": "{0} {1}".format(self.get_trend_gui("m_total_tx_bps", show_value = False),
1151                                                self.get("m_total_tx_bps", format = True, suffix = "bps", opts = rate_format['bps'])),
1152
1153                "Line Util.": "{0} {1}".format(self.get_trend_gui("m_tx_util", show_value = False) if self._port_obj else "",
1154                                               self.get("m_tx_util", format = True, suffix = "%", opts = rate_format['percentage']) if self._port_obj else ""),
1155
1156                "Rx bps": "{0} {1}".format(self.get_trend_gui("m_total_rx_bps", show_value = False),
1157                                            self.get("m_total_rx_bps", format = True, suffix = "bps")),
1158
1159                "Tx pps": "{0} {1}".format(self.get_trend_gui("m_total_tx_pps", show_value = False),
1160                                            self.get("m_total_tx_pps", format = True, suffix = "pps", opts = rate_format['pps'])),
1161
1162                "Rx pps": "{0} {1}".format(self.get_trend_gui("m_total_rx_pps", show_value = False),
1163                                            self.get("m_total_rx_pps", format = True, suffix = "pps")),
1164
1165                 "opackets" : self.get_rel("opackets"),
1166                 "ipackets" : self.get_rel("ipackets"),
1167                 "obytes"   : self.get_rel("obytes"),
1168                 "ibytes"   : self.get_rel("ibytes"),
1169
1170                 "tx-bytes": self.get_rel("obytes", format = True, suffix = "B"),
1171                 "rx-bytes": self.get_rel("ibytes", format = True, suffix = "B"),
1172                 "tx-pkts": self.get_rel("opackets", format = True, suffix = "pkts"),
1173                 "rx-pkts": self.get_rel("ipackets", format = True, suffix = "pkts"),
1174
1175                 "oerrors"  : format_num(self.get_rel("oerrors"),
1176                                         compact = False,
1177                                         opts = 'green' if (self.get_rel("oerrors")== 0) else 'red'),
1178
1179                 "ierrors"  : format_num(self.get_rel("ierrors"),
1180                                         compact = False,
1181                                         opts = 'green' if (self.get_rel("ierrors")== 0) else 'red'),
1182
1183                }
1184
1185
1186class CLatencyStats(CTRexStats):
1187    def __init__(self, ports):
1188        super(CLatencyStats, self).__init__()
1189
1190
1191    # for API
1192    def get_stats (self):
1193        return copy.deepcopy(self.latest_stats)
1194
1195
1196    def _update(self, snapshot):
1197        if snapshot is None:
1198            snapshot = {}
1199        output = {}
1200
1201        output['global'] = {}
1202        for field in ['bad_hdr', 'old_flow']:
1203            if 'global' in snapshot and field in snapshot['global']:
1204                output['global'][field] = snapshot['global'][field]
1205            else:
1206                output['global'][field] = 0
1207
1208        # we care only about the current active keys
1209        pg_ids = list(filter(is_intable, snapshot.keys()))
1210
1211        for pg_id in pg_ids:
1212            current_pg = snapshot.get(pg_id)
1213            int_pg_id = int(pg_id)
1214            output[int_pg_id] = {}
1215            output[int_pg_id]['err_cntrs'] = current_pg['err_cntrs']
1216            output[int_pg_id]['latency'] = {}
1217
1218            if 'latency' in current_pg:
1219                for field in ['jitter', 'average', 'total_max', 'last_max']:
1220                    if field in current_pg['latency']:
1221                        output[int_pg_id]['latency'][field] = current_pg['latency'][field]
1222                    else:
1223                        output[int_pg_id]['latency'][field] = StatNotAvailable(field)
1224
1225                if 'histogram' in current_pg['latency']:
1226                    output[int_pg_id]['latency']['histogram'] = {int(elem): current_pg['latency']['histogram'][elem]
1227                                                                 for elem in current_pg['latency']['histogram']}
1228                    min_val = min(output[int_pg_id]['latency']['histogram'].keys())
1229                    if min_val == 0:
1230                        min_val = 2
1231                    output[int_pg_id]['latency']['total_min'] = min_val
1232                else:
1233                    output[int_pg_id]['latency']['total_min'] = StatNotAvailable('total_min')
1234                    output[int_pg_id]['latency']['histogram'] = {}
1235
1236        self.latest_stats = output
1237        return True
1238
1239
1240# RX stats objects - COMPLEX :-(
1241class CRxStats(CTRexStats):
1242    def __init__(self, ports):
1243        super(CRxStats, self).__init__()
1244        self.ports = ports
1245
1246
1247    # calculates a diff between previous snapshot
1248    # and current one
1249    def calculate_diff_sec (self, current, prev):
1250        if not 'ts' in current:
1251            raise ValueError("INTERNAL ERROR: RX stats snapshot MUST contain 'ts' field")
1252
1253        if prev:
1254            prev_ts   = prev['ts']
1255            now_ts    = current['ts']
1256            diff_sec  = (now_ts['value'] - prev_ts['value']) / float(now_ts['freq'])
1257        else:
1258            diff_sec = 0.0
1259
1260        return diff_sec
1261
1262
1263    # this is the heart of the complex
1264    def process_single_pg (self, current_pg, prev_pg):
1265
1266        # start with the previous PG
1267        output = copy.deepcopy(prev_pg)
1268
1269        for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
1270            # is in the first time ? (nothing in prev)
1271            if field not in output:
1272                output[field] = {}
1273
1274            # does the current snapshot has this field ?
1275            if field in current_pg:
1276                for port, pv in current_pg[field].items():
1277                    if not is_intable(port):
1278                        continue
1279
1280                    output[field][port] = pv
1281
1282            # sum up
1283            total = None
1284            for port, pv in output[field].items():
1285                if not is_intable(port):
1286                    continue
1287                if total is None:
1288                    total = 0
1289                total += pv
1290
1291            output[field]['total'] = total
1292
1293
1294        return output
1295
1296
1297    def process_snapshot (self, current, prev):
1298
1299        # final output
1300        output = {}
1301
1302        # copy timestamp field
1303        output['ts'] = current['ts']
1304
1305        # global (not per pg_id) error counters
1306        output['global'] = {}
1307        for field in ['rx_err', 'tx_err']:
1308            output['global'][field] = {}
1309            if 'global' in current and field in current['global']:
1310                for port in current['global'][field]:
1311                    output['global'][field][int(port)] = current['global'][field][port]
1312
1313        # we care only about the current active keys
1314        pg_ids = list(filter(is_intable, current.keys()))
1315
1316        for pg_id in pg_ids:
1317
1318            current_pg = current.get(pg_id, {})
1319
1320            # first time - we do not care
1321            if current_pg.get('first_time'):
1322                # new value - ignore history
1323                output[pg_id] = self.process_single_pg(current_pg, {})
1324                self.reference_stats[pg_id] = {}
1325
1326                # 'dry' B/W
1327                self.calculate_bw_for_pg(output[pg_id])
1328
1329            else:
1330                # aggregate the two values
1331                prev_pg = prev.get(pg_id, {})
1332                output[pg_id] = self.process_single_pg(current_pg, prev_pg)
1333
1334                # calculate B/W
1335                diff_sec = self.calculate_diff_sec(current, prev)
1336                self.calculate_bw_for_pg(output[pg_id], prev_pg, diff_sec)
1337
1338
1339        # cleanp old reference values - they are dead
1340        ref_pg_ids = list(filter(is_intable, self.reference_stats.keys()))
1341
1342        deleted_pg_ids = set(ref_pg_ids).difference(pg_ids)
1343        for d_pg_id in deleted_pg_ids:
1344            del self.reference_stats[d_pg_id]
1345
1346        return output
1347
1348
1349
1350    def calculate_bw_for_pg (self, pg_current, pg_prev = None, diff_sec = 0.0):
1351        # no previous values
1352        if (not pg_prev) or not (diff_sec > 0):
1353            pg_current['tx_pps']        = {}
1354            pg_current['tx_bps']        = {}
1355            pg_current['tx_bps_L1']     = {}
1356            pg_current['tx_line_util']  = {}
1357            pg_current['rx_pps']        = {}
1358            pg_current['rx_bps']        = {}
1359            pg_current['rx_bps_L1']     = {}
1360            pg_current['rx_line_util']  = {}
1361
1362            pg_current['tx_pps_lpf']    = {}
1363            pg_current['tx_bps_lpf']    = {}
1364            pg_current['tx_bps_L1_lpf'] = {}
1365            pg_current['rx_pps_lpf']    = {}
1366            pg_current['rx_bps_lpf']    = {}
1367            pg_current['rx_bps_L1_lpf'] = {}
1368            return
1369
1370        # TX
1371        for port in pg_current['tx_pkts'].keys():
1372
1373            prev_tx_pps   = pg_prev['tx_pps'].get(port)
1374            now_tx_pkts   = pg_current['tx_pkts'].get(port)
1375            prev_tx_pkts  = pg_prev['tx_pkts'].get(port)
1376            pg_current['tx_pps'][port], pg_current['tx_pps_lpf'][port] = self.calc_pps(prev_tx_pps, now_tx_pkts, prev_tx_pkts, diff_sec)
1377
1378            prev_tx_bps   = pg_prev['tx_bps'].get(port)
1379            now_tx_bytes  = pg_current['tx_bytes'].get(port)
1380            prev_tx_bytes = pg_prev['tx_bytes'].get(port)
1381
1382            pg_current['tx_bps'][port], pg_current['tx_bps_lpf'][port] = self.calc_bps(prev_tx_bps, now_tx_bytes, prev_tx_bytes, diff_sec)
1383
1384            if pg_current['tx_bps'].get(port) != None and pg_current['tx_pps'].get(port) != None:
1385                pg_current['tx_bps_L1'][port] = calc_bps_L1(pg_current['tx_bps'][port], pg_current['tx_pps'][port])
1386                pg_current['tx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['tx_bps_lpf'][port], pg_current['tx_pps_lpf'][port])
1387            else:
1388                pg_current['tx_bps_L1'][port] = None
1389                pg_current['tx_bps_L1_lpf'][port] = None
1390
1391
1392        # RX
1393        for port in pg_current['rx_pkts'].keys():
1394
1395            prev_rx_pps   = pg_prev['rx_pps'].get(port)
1396            now_rx_pkts   = pg_current['rx_pkts'].get(port)
1397            prev_rx_pkts  = pg_prev['rx_pkts'].get(port)
1398            pg_current['rx_pps'][port], pg_current['rx_pps_lpf'][port] = self.calc_pps(prev_rx_pps, now_rx_pkts, prev_rx_pkts, diff_sec)
1399
1400            prev_rx_bps   = pg_prev['rx_bps'].get(port)
1401            now_rx_bytes  = pg_current['rx_bytes'].get(port)
1402            prev_rx_bytes = pg_prev['rx_bytes'].get(port)
1403            pg_current['rx_bps'][port], pg_current['rx_bps_lpf'][port] = self.calc_bps(prev_rx_bps, now_rx_bytes, prev_rx_bytes, diff_sec)
1404            if pg_current['rx_bps'].get(port) != None and pg_current['rx_pps'].get(port) != None:
1405                pg_current['rx_bps_L1'][port] = calc_bps_L1(pg_current['rx_bps'][port], pg_current['rx_pps'][port])
1406                pg_current['rx_bps_L1_lpf'][port] = calc_bps_L1(pg_current['rx_bps_lpf'][port], pg_current['rx_pps_lpf'][port])
1407            else:
1408                pg_current['rx_bps_L1'][port] = None
1409                pg_current['rx_bps_L1_lpf'][port] = None
1410
1411
1412    def calc_pps (self, prev_bw, now, prev, diff_sec):
1413        return self.calc_bw(prev_bw, now, prev, diff_sec, False)
1414
1415
1416    def calc_bps (self, prev_bw, now, prev, diff_sec):
1417        return self.calc_bw(prev_bw, now, prev, diff_sec, True)
1418
1419    # returns tuple - first value is real, second is low pass filtered
1420    def calc_bw (self, prev_bw, now, prev, diff_sec, is_bps):
1421        # B/W is not valid when the values are None
1422        if (now is None) or (prev is None):
1423            return (None, None)
1424
1425        # calculate the B/W for current snapshot
1426        current_bw = (now - prev) / diff_sec
1427        if is_bps:
1428            current_bw *= 8
1429
1430        # previous B/W is None ? ignore it
1431        if prev_bw is None:
1432            prev_bw = 0
1433
1434        return (current_bw, 0.5 * prev_bw + 0.5 * current_bw)
1435
1436
1437
1438
1439    def _update (self, snapshot):
1440        #print(snapshot)
1441        # generate a new snapshot
1442        new_snapshot = self.process_snapshot(snapshot, self.latest_stats)
1443
1444        #print new_snapshot
1445        # advance
1446        self.latest_stats = new_snapshot
1447
1448
1449        return True
1450
1451
1452
1453    # for API
1454    def get_stats (self):
1455        stats = {}
1456
1457        for pg_id, value in self.latest_stats.items():
1458            # skip non ints
1459            if not is_intable(pg_id):
1460                # 'global' stats are in the same level of the pg_ids. We do want them to go to the user
1461                if pg_id == 'global':
1462                    stats[pg_id] = value
1463                continue
1464            # bare counters
1465            stats[int(pg_id)] = {}
1466            for field in ['tx_pkts', 'tx_bytes', 'rx_pkts', 'rx_bytes']:
1467                val = self.get_rel([pg_id, field, 'total'])
1468                stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
1469                for port in value[field].keys():
1470                    if is_intable(port):
1471                        val = self.get_rel([pg_id, field, port])
1472                        stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
1473
1474            # BW values
1475            for field in ['tx_pps', 'tx_bps', 'tx_bps_L1', 'rx_pps', 'rx_bps', 'rx_bps_L1']:
1476                val = self.get([pg_id, field, 'total'])
1477                stats[int(pg_id)][field] = {'total': val if val != 'N/A' else StatNotAvailable(field)}
1478                for port in value[field].keys():
1479                    if is_intable(port):
1480                        val = self.get([pg_id, field, port])
1481                        stats[int(pg_id)][field][int(port)] = val if val != 'N/A' else StatNotAvailable(field)
1482
1483        return stats
1484
1485class CUtilStats(CTRexStats):
1486
1487    def __init__(self, client):
1488        super(CUtilStats, self).__init__()
1489        self.client = client
1490        self.history = deque(maxlen = 1)
1491        self.mbuf_types_list = None
1492        self.last_update_ts = -999
1493
1494    def get_stats(self, use_1sec_cache = False):
1495        time_now = time.time()
1496        if self.last_update_ts + 1 < time_now or not self.history or not use_1sec_cache:
1497            if self.client.is_connected():
1498                rc = self.client._transmit('get_utilization')
1499                if not rc:
1500                    raise STLError(rc)
1501                self.last_update_ts = time_now
1502                self.history.append(rc.data())
1503            else:
1504                self.history.append({})
1505
1506        return self.history[-1]
1507
1508class CXStats(CTRexStats):
1509
1510    def __init__(self, client):
1511        super(CXStats, self).__init__()
1512        self.client = client
1513        self.names = []
1514        self.last_update_ts = -999
1515
1516    def clear_stats(self, port_id = None):
1517        if port_id == None:
1518            ports = self.client.get_all_ports()
1519        elif type(port_id) is list:
1520            ports = port_id
1521        else:
1522            ports = [port_id]
1523
1524        for port_id in ports:
1525            self.reference_stats[port_id] = self.get_stats(port_id, relative = False)
1526
1527    def get_stats(self, port_id, use_1sec_cache = False, relative = True):
1528        time_now = time.time()
1529        if self.last_update_ts + 1 < time_now or not self.latest_stats or not use_1sec_cache:
1530            if self.client.is_connected():
1531                rc = self.client._transmit('get_port_xstats_values', params = {'port_id': port_id})
1532                if not rc:
1533                    raise STLError(rc)
1534                self.last_update_ts = time_now
1535                values = rc.data().get('xstats_values', [])
1536                if len(values) != len(self.names): # need to update names ("keys")
1537                    rc = self.client._transmit('get_port_xstats_names', params = {'port_id': port_id})
1538                    if not rc:
1539                        raise STLError(rc)
1540                    self.names = rc.data().get('xstats_names', [])
1541                if len(values) != len(self.names):
1542                    raise STLError('Length of get_xstats_names: %s and get_port_xstats_values: %s' % (len(self.names), len(values)))
1543                self.latest_stats[port_id] = OrderedDict([(key, val) for key, val in zip(self.names, values)])
1544
1545        stats = OrderedDict()
1546        for key, val in self.latest_stats[port_id].items():
1547            if relative:
1548                stats[key] = self.get_rel([port_id, key])
1549            else:
1550                stats[key] = self.get([port_id, key])
1551        return stats
1552
1553if __name__ == "__main__":
1554    pass
1555
1556