From b2417d82c464029a4de6ffb32f58964462830445 Mon Sep 17 00:00:00 2001 From: AbhishekCandela Date: Thu, 8 Jan 2026 15:09:17 +0530 Subject: [PATCH 1/4] Implement robot-driven multicast test automation - Add perform_robo and perform_robo_multicast execution flow - Store detailed per-station and upstream results in JSON - Append per-coordinate and per-station CSV outputs - Add final aggregated robot multicast CSV report Signed-off-by: AbhishekCandela --- py-scripts/test_l3.py | 215 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) diff --git a/py-scripts/test_l3.py b/py-scripts/test_l3.py index 0e720ea39..5badf8abc 100755 --- a/py-scripts/test_l3.py +++ b/py-scripts/test_l3.py @@ -2088,6 +2088,220 @@ def build(self, rebuild=False): "PASS: Stations & CX build finished: created/updated: %s stations and %s connections." % (self.station_count, self.cx_count)) + def perform_robo_multicast(self, coordinate, rotation): + """Run multicast test at specific coordinate and rotation, storing results with position data.""" + + # Store current position information before starting test + position_key = f"coord_{coordinate}_rot_{rotation if rotation is not None else ''}" + + logger.info(f"Starting multicast test at coordinate: {coordinate}, rotation: {rotation}") + self.start(False, coordinate, rotation) + + logger.info("Test complete, stopping traffic") + self.stop() + + self.webgui_finalize(coordinate, rotation) + + # Collect and store test results for this position + self._collect_position_results(position_key, coordinate, rotation) + + def _collect_position_results(self, position_key, coordinate, rotation): + if position_key not in self.multicast_robot_results: + self.multicast_robot_results[position_key] = { + "coordinate": coordinate, + "rotation": rotation, + "upstream": {}, + "stations": [], + "summary": {}, + } + + endp_data = self.json_get( + "endp/all?fields=name,tx+rate,rx+rate,rx+bytes,a/b,tos,eid,type,rx+drop+%25" + ) + endpoints = {} + + if endp_data and "endpoint" in endp_data: + for endp_item in endp_data["endpoint"]: + for name, info in endp_item.items(): + endpoints[name] = info + + eth_tx_total = 0 + sta_rx_total = 0 + stations_data = [] + + for name, info in endpoints.items(): + if "MLT-mrx-" in name: + rx_rate = info.get("rx rate", 0) + rx_bytes = info.get("rx bytes", 0) + drop_percent = info.get("rx drop %", 0.0) + sta_rx_total += rx_rate if isinstance(rx_rate, int) else 0 + stations_data.append({ + "station": name, + "rx_rate_bps": rx_rate, + "rx_bytes": rx_bytes, + "drop_percent": drop_percent, + "coordinate": coordinate, + "rotation": rotation, + }) + elif "MLT-mtx-" in name: + tx_rate = info.get("tx rate", 0) + tx_bytes = info.get("tx bytes", 0) + drop_percent = info.get("tx drop %", 0.0) + eth_tx_total += tx_rate if isinstance(tx_rate, int) else 0 + upstream_data = { + "endpoint": name, + "tx_rate_bps": tx_rate, + "tx_bytes": tx_bytes, + "drop_percent": drop_percent, + "coordinate": coordinate, + "rotation": rotation, + } + self.multicast_robot_results[position_key]["upstream"] = upstream_data + + total_tx_rate = eth_tx_total + total_rx_rate = sta_rx_total + avg_drop = ( + sum([s["drop_percent"] for s in stations_data]) / len(stations_data) + if stations_data else 0.0 + ) + + summary = { + "coordinate": coordinate, + "rotation": rotation, + "total_tx_rate_bps": total_tx_rate, + "total_rx_rate_bps": total_rx_rate, + "throughput_mbps": round(total_rx_rate / 1e6, 2), + "average_drop_percent": round(avg_drop, 2), + "endpoint_count": len(stations_data), + "timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + } + + self.multicast_robot_results[position_key]["stations"] = stations_data + self.multicast_robot_results[position_key]["summary"] = summary + + json_filename = os.path.join(self.result_dir, "test_l3_robot_multicast_detailed.json") + with open(json_filename, "w") as jsonfile: + json.dump(self.multicast_robot_results, jsonfile, indent=2, default=str) + logger.info(f"[RobotTest] Updated detailed JSON: {json_filename}") + + coord_csv = os.path.join( + self.result_dir, f"overall_multicast_throughput_{coordinate}.csv" + ) + + write_header = not os.path.exists(coord_csv) + with open(coord_csv, "a") as f: + if write_header: + f.write( + "coordinate,rotation,total_tx_rate_bps,total_rx_rate_bps,throughput_mbps,average_drop_percent,endpoint_count,timestamp\n" + ) + f.write( + f"{coordinate},{rotation},{total_tx_rate},{total_rx_rate},{total_rx_rate/1e6:.2f},{avg_drop:.2f},{len(stations_data)},{summary['timestamp']}\n" + ) + + self._write_robot_station_csv(stations_data, summary["timestamp"]) + + logger.info( + f"[RobotTest] Coord={coordinate}, Rot={rotation}, Stations={len(stations_data)}, Avg Throughput={total_rx_rate/1e6:.2f} Mbps" + ) + + def _write_robot_station_csv(self, stations_data, timestamp): + """ + Write per-station multicast RX results (Wi-Fi side) into a separate CSV file. + """ + if not stations_data: + return + + station_csv = os.path.join(self.result_dir, "robot_station_data.csv") + write_header = not os.path.exists(station_csv) + # Append station data to CSV + with open(station_csv, "a") as f: + if write_header: + f.write("coordinate,rotation,station,rx_rate_bps,rx_bytes,drop_percent,timestamp\n") + for st in stations_data: + f.write( + f"{st['coordinate']},{st['rotation']},{st['station']}," + f"{st['rx_rate_bps']},{st['rx_bytes']},{st['drop_percent']},{timestamp}\n" + ) + + def perform_robo(self): + """Main robot test execution with coordinate and rotation iteration.""" + + self.robot_rotation_enabled = (hasattr(self, 'rotation_list') and self.rotation_list) + + # Iterate through all coordinates + for coord_index, coordinate in enumerate(self.coordinate_list): + logger.info(f"Moving to coordinate {coord_index}: {coordinate}") + + pause_coord,test_stopped_by_user=self.robot_obj.wait_for_battery(self.stop) + if pause_coord: + print("Test stopped by user, exiting...") + exit(0) + if self.test_stopped_user: + break + + # Move robot to coordinate + robo_moved = self.robot_obj.move_to_coordinate(coordinate) + + if robo_moved: + logger.info(f"Successfully moved to coordinate {coordinate}") + if not self.robot_rotation_enabled: + # No rotation mode - run test once at this coordinate + self.perform_robo_multicast(coordinate=coordinate, rotation=None) + else: + # Rotation mode - run test at each rotation angle + for angle_index, rotation_angle in enumerate(self.rotation_list): + pause_coord,test_stopped_by_user=self.robot_obj.wait_for_battery(self.stop) + if pause_coord: + print("Test stopped by user, exiting...") + exit(0) + logger.info(f"Rotating to angle {angle_index}: {rotation_angle} degrees") + + robo_rotated = self.robot_obj.rotate_angle(rotation_angle) + + if robo_rotated: + logger.info(f"Successfully rotated to {rotation_angle} degrees") + self.perform_robo_multicast(coordinate=coordinate, rotation=rotation_angle) + else: + logger.error(f"Failed to rotate to angle {rotation_angle} at coordinate {coordinate}") + else: + logger.error(f"Failed to move to coordinate {coordinate}") + + # Generate final report after all tests + self._generate_robot_test_report() + + def _generate_robot_test_report(self): + """Generate comprehensive report of all robot test results.""" + + if not self.multicast_robot_results: + logger.warning("No robot test results to report") + return + + # Create CSV report + csv_filename = f"{self.outfile[:-4]}_robot_multicast_results.csv" + with open(csv_filename, 'w', newline='') as csvfile: + fieldnames = [ + 'position_key', 'coordinate', 'rotation', + 'total_tx_rate_bps', 'total_rx_rate_bps', 'throughput_mbps', + 'average_drop_percent', 'endpoint_count' + ] + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + # write each position's summary + for position_key, result_data in self.multicast_robot_results.items(): + test_data = result_data.get('summary', {}) + writer.writerow({ + 'position_key': position_key, + 'coordinate': test_data.get('coordinate', ''), + 'rotation': test_data.get('rotation', ''), + 'total_tx_rate_bps': test_data.get('total_tx_rate_bps', 0), + 'total_rx_rate_bps': test_data.get('total_rx_rate_bps', 0), + 'throughput_mbps': test_data.get('throughput_mbps', 0), + 'average_drop_percent': test_data.get('average_drop_percent', 0), + 'endpoint_count': test_data.get('endpoint_count', 0), + }) + + logger.info(f"Robot test report generated: {csv_filename}") + def l3_endp_port_data(self, tos): """ Args: @@ -8101,6 +8315,7 @@ def parse_args(): test_l3_parser.add_argument("--real", action="store_true", help='For testing on real devies') test_l3_parser.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true') test_l3_parser.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0") + parser.add_argument('--help_summary', default=None, action="store_true", From c88e58494a8c78fc8645380d52a2c1d51d64054c Mon Sep 17 00:00:00 2001 From: AbhishekCandela Date: Thu, 8 Jan 2026 15:17:29 +0530 Subject: [PATCH 2/4] Integrate robot-driven multicast testing into test_l3 workflow - Add RobotClass integration and robot execution parameters - Introduce CLI flags for robot test, IP, coordinates, and rotations - Extend L3 start flow to accept coordinate and rotation context - Track and report average RSSI during multicast runs - Generate per-coordinate and per-rotation throughput CSV outputs Verified: python3 test_l3.py --lfmgr 192.168.207.78 --test_duration 1m --polling_interval 1s --upstream_port eth1 --endp_type mc_udp --rates_are_totals --side_b_min_bps=10000000 --test_tag test_l3 --use_existing_station_list --existing_station_list 1.12.wlan0 --cleanup_cx --tos BE --test_name Sample_test --dowebgui True --local_lf_report_dir /home/lanforge/Pictures/local/interop-webGUI/results/Sample_test --robot_test --coordinate 21,29 --rotation "10" --robot_ip 192.168.200.179 Signed-off-by: AbhishekCandela --- py-scripts/test_l3.py | 137 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 119 insertions(+), 18 deletions(-) diff --git a/py-scripts/test_l3.py b/py-scripts/test_l3.py index 5badf8abc..0420680d2 100755 --- a/py-scripts/test_l3.py +++ b/py-scripts/test_l3.py @@ -705,6 +705,8 @@ lf_attenuator = importlib.import_module("py-scripts.lf_atten_mod_test") lf_modify_radio = importlib.import_module("py-scripts.lf_modify_radio") lf_cleanup = importlib.import_module("py-scripts.lf_cleanup") +lf_base_robo = importlib.import_module("py-scripts.lf_base_robo") +from lf_base_robo import RobotClass Realm = realm.Realm logger = logging.getLogger(__name__) @@ -837,7 +839,11 @@ def __init__(self, real=False, expected_passfail_value=None, device_csv_name=None, - group_name=None): + group_name=None, + robot_test=False, + robot_ip=None, + coordinate=None, + rotation=None): self.eth_endps = [] self.cx_names = [] @@ -1379,6 +1385,35 @@ def __init__(self, self.csv_results_writer = csv.writer( self.csv_results_file, delimiter=",") + # Add robot test parameters + if rotation: + self.rotation_list = rotation.split(',') + else: + self.rotation_list = None + self.robo_test = robot_test + + if self.robo_test: + self.coordinate_list = coordinate.split(',') + self.robo_ip = robot_ip + self.robot_obj = RobotClass(robo_ip=self.robo_ip, angle_list=self.rotation_list) + + # self.robot_obj = RobotClass() # Fake Server Testing + base_dir = os.path.dirname(os.path.dirname(self.result_dir)) + nav_data = os.path.join(base_dir, 'nav_data.json') + with open(nav_data, "w") as file: + json.dump({}, file) + + # self.robot_obj.robo_ip = f"{self.robo_ip}" # Fake Server Testing + self.robot_obj.nav_data_path=nav_data + self.robot_obj.result_directory=os.path.dirname(nav_data) + self.robot_obj.runtime_dir=self.result_dir + + self.robot_obj.testname=self.test_name + + self.robot_test_data = {} + self.multicast_robot_results = {} + self.test_stopped_user = False + # if it is a dataplane test the side_a is not None and an ethernet port # if side_a is None then side_a is radios if not self.dataplane: @@ -2432,7 +2467,7 @@ def l3_endp_port_data(self, tos): return client_dict_A - def start(self, print_pass=False) -> int: + def start(self, print_pass=False, coordinate=None, rotation=None) -> int: """Run configured Layer-3 variable time test. Args: @@ -2602,13 +2637,26 @@ def start(self, print_pass=False) -> int: # Create a DataFrame with columns for download rate, upload rate, and RSSI columns = ['download_rate_A', 'upload_rate_A', 'RSSI'] individual_device_data[r_id] = pd.DataFrame(columns=columns) + + # Calculate average RSSI + rssi_values = [] + for i in range(len(l3_port_data['resource_alias_A'])): - row_data = [l3_port_data['dl_A'][i], l3_port_data['ul_A'][i], l3_port_data['port_signal_A'][i]] + port_signal = l3_port_data['port_signal_A'][i] + + row_data = [l3_port_data['dl_A'][i], l3_port_data['ul_A'][i], port_signal] r_id = l3_port_data['resource_alias_A'][i].split('_')[0] # Append new row to the device-specific DataFrame individual_device_data[r_id].loc[len(individual_device_data[r_id])] = row_data # for each resource individual csv will be created here individual_device_data[r_id].to_csv(f'{self.result_dir}/individual_device_data_{r_id}.csv', index=False) + # Collect RSSI for average calculation + try: + rssi_val = float(port_signal) + rssi_values.append(rssi_val) + except (ValueError, TypeError): + continue + time_difference = abs(end_time - datetime.datetime.now()) total_hours = time_difference.total_seconds() / 3600 remaining_minutes = (total_hours % 1) * 60 @@ -2619,21 +2667,60 @@ def start(self, print_pass=False) -> int: for k, v in endp_rx_map.items(): if 'MLT-' in k: total += v + + if rssi_values: + avg_rssi = sum(rssi_values) / len(rssi_values) + else: + avg_rssi = 0 + self.overall.append( - {self.tos[0]: total, "timestamp": self.get_time_stamp_local(), - "status": "Running", - "start_time": start_time.strftime('%Y-%m-%d-%H-%M-%S'), - "end_time": end_time.strftime('%Y-%m-%d-%H-%M-%S'), "remaining_time": remaining_time}) + { + self.tos[0]: total, + "timestamp": self.get_time_stamp_local(), + "status": "Running", + "start_time": start_time.strftime('%Y-%m-%d-%H-%M-%S'), + "end_time": end_time.strftime('%Y-%m-%d-%H-%M-%S'), + "remaining_time": remaining_time, + "RSSI": avg_rssi + }) + df1 = pd.DataFrame(self.overall) - df1.to_csv('{}/overall_multicast_throughput.csv'.format(self.result_dir), index=False) - with open(self.result_dir + "/../../Running_instances/{}_{}_running.json".format(self.ip, - self.test_name), - 'r') as file: - data = json.load(file) - if data["status"] != "Running": - logging.warning('Test is stopped by the user') - self.overall[len(self.overall) - 1]["end_time"] = self.get_time_stamp_local() - break + if coordinate is not None: + df1['coordinate'] = coordinate + if rotation is not None: + df1['rotation'] = rotation + else: + rotation = None + df1.to_csv('{}/overall_multicast_throughput_coord_{}_rot_{}.csv'.format( + self.result_dir, coordinate, rotation), index=False) + else: + df1.to_csv('{}/overall_multicast_throughput.csv'.format(self.result_dir), index=False) + running_file = f"{self.result_dir}/../../Running_instances/{self.ip}_{self.test_name}_running.json" + try: + with open(running_file, "r") as file: + data = json.load(file) + # If file exists but test is stopped + if data.get("status") != "Running": + logging.warning("Test is stopped by the user") + self.test_stopped_user = True + self.overall[-1]["end_time"] = self.get_time_stamp_local() + break + + except FileNotFoundError: + logging.warning(f"Running instance file not found: {running_file}") + self.overall[-1]["end_time"] = self.get_time_stamp_local() + break + + except json.JSONDecodeError: + logging.warning(f"Running instance file corrupted or empty: {running_file}") + self.overall[-1]["end_time"] = self.get_time_stamp_local() + break + + except Exception as e: + logging.error(f"Unexpected error reading running.json: {e}") + self.overall[-1]["end_time"] = self.get_time_stamp_local() + break + if not self.dowebgui: logger.debug(log_msg) @@ -8315,7 +8402,11 @@ def parse_args(): test_l3_parser.add_argument("--real", action="store_true", help='For testing on real devies') test_l3_parser.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true') test_l3_parser.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0") - + test_l3_parser.add_argument('--robot_test',help='to trigger robot test', action='store_true') + test_l3_parser.add_argument('--robot_ip', type=str,default='localhost', help='hostname for where Robot server is running') + test_l3_parser.add_argument('--coordinate', type=str, default=None, help="The coordinate contains list of coordinates to be") + test_l3_parser.add_argument('--rotation', type=str, default=None, help="The rotation contains list of rotations to be") + parser.add_argument('--help_summary', default=None, action="store_true", @@ -9097,6 +9188,12 @@ def main(): # for uniformity from webGUI result_dir as variable is used insead of local_lf_report_dir result_dir=args.local_lf_report_dir, + # for Robot execution + robot_test=args.robot_test, + robot_ip=args.robot_ip, + coordinate=args.coordinate, + rotation=args.rotation, + # wifi extra configuration key_mgmt_list=key_mgmt_list, pairwise_list=pairwise_list, @@ -9155,7 +9252,11 @@ def main(): # Run test logger.info("Starting test") - ip_var_test.start(False) + if (args.robot_test and any(etype in args.endp_type for etype in ["mc_udp", "mc_udp6"])): + logger.info("Multicast robot test detected") + ip_var_test.perform_robo() + else: + ip_var_test.start(False) if args.wait > 0: logger.info(f"Pausing {args.wait} seconds for manual inspection before test conclusion and " From 57e8c5a0e5563738113dc6947f8acfe92256b936 Mon Sep 17 00:00:00 2001 From: AbhishekCandela Date: Thu, 8 Jan 2026 15:20:37 +0530 Subject: [PATCH 3/4] Enhance webgui_finalize to support robot coordinate/rotation outputs - Extend webgui_finalize to accept coordinate and rotation context - Generate per-position multicast throughput CSV filenames Verified: python3 test_l3.py --lfmgr 192.168.207.78 --test_duration 1m --polling_interval 1s --upstream_port eth1 --endp_type mc_udp --rates_are_totals --side_b_min_bps=10000000 --test_tag test_l3 --use_existing_station_list --existing_station_list 1.12.wlan0 --cleanup_cx --tos BE --test_name Sample_test --dowebgui True --local_lf_report_dir /home/lanforge/Pictures/local/interop-webGUI/results/Sample_test --robot_test --coordinate 21,29 --rotation "10" --robot_ip 192.168.200.179 Signed-off-by: AbhishekCandela --- py-scripts/test_l3.py | 61 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/py-scripts/test_l3.py b/py-scripts/test_l3.py index 0420680d2..cb780e18b 100755 --- a/py-scripts/test_l3.py +++ b/py-scripts/test_l3.py @@ -6912,16 +6912,63 @@ def copy_reports_to_home_dir(self): os.makedirs(test_name_dir) shutil.copytree(curr_path, test_name_dir, dirs_exist_ok=True) - def webgui_finalize(self): + def webgui_finalize(self, coord=None, rot=None): """Test report finalization run when in WebGUI mode.""" - last_entry = self.overall[len(self.overall) - 1] - last_entry["status"] = "Stopped" - last_entry["timestamp"] = self.get_time_stamp_local() - last_entry["end_time"] = self.get_time_stamp_local() - self.overall.append(last_entry) + print(f"DEBUG: result_dir = {self.result_dir}") + print(f"DEBUG: coord = {coord}, rot = {rot}") + + if not self.overall: + logger.warning("webgui_finalize() called but self.overall is empty. Creating default entry.") + last_entry = { + "status": "Stopped", + "timestamp": self.get_time_stamp_local(), + "end_time": self.get_time_stamp_local() + } + self.overall.append(last_entry) + else: + # Get the last entry and preserve RSSI data + last_entry = self.overall[-1].copy() + last_entry["status"] = "Stopped" + last_entry["timestamp"] = self.get_time_stamp_local() + last_entry["end_time"] = self.get_time_stamp_local() + + rssi_keys = [k for k in last_entry.keys() if k.startswith('rssi_')] + + self.overall.append(last_entry) df1 = pd.DataFrame(self.overall) - df1.to_csv('{}/overall_multicast_throughput.csv'.format(self.result_dir), index=False) + if not hasattr(self, 'result_dir') or not self.result_dir: + # Create a default results directory + script_dir = os.path.dirname(os.path.abspath(__file__)) + self.result_dir = os.path.join(script_dir, "results", getattr(self, 'test_name', 'default_test')) + + os.makedirs(self.result_dir, exist_ok=True) + + # Handle rotation parameter consistently with perform_robo_multicast() + if coord is not None: + filename = f"overall_multicast_throughput_coord_{coord}_rot_{rot}.csv" + else: + filename = 'overall_multicast_throughput.csv' + + filepath = os.path.join(self.result_dir, filename) + print(f"DEBUG: Saving to {filepath}") + + try: + df1.to_csv(filepath, index=False) + print(f"INFO: Successfully saved results to {filepath}") + except PermissionError as e: + # Try alternative location if permission denied + print(f"ERROR: Permission denied for {filepath}. Trying alternative...") + alt_dir = os.path.join(os.path.expanduser("~"), "test_results") + os.makedirs(alt_dir, exist_ok=True) + alt_path = os.path.join(alt_dir, filename) + df1.to_csv(alt_path, index=False) + print(f"INFO: Saved to alternative location: {alt_path}") + except Exception as e: + print(f"ERROR: Failed to save CSV: {e}") + # Save to current directory as last resort + df1.to_csv(filename, index=False) + print(f"INFO: Saved to current directory: {filename}") def get_pass_fail_list(self, tos, up, down): res_list = [] From 317b926a8edc693cdf5d9501a07c279b72fae19f Mon Sep 17 00:00:00 2001 From: AbhishekCandela Date: Thu, 8 Jan 2026 16:41:16 +0530 Subject: [PATCH 4/4] Extend test_l3 report generation with robot multicast support - Add robot report layout for multicast test results - Generate per-coordinate and rotation throughput graphs - Maintain backward compatibility for non-robot executions - robot multicast execution with and without rotations Verified: python3 test_l3.py --lfmgr 192.168.207.78 --test_duration 1m --polling_interval 1s --upstream_port eth1 --endp_type mc_udp --rates_are_totals --side_b_min_bps=10000000 --test_tag test_l3 --use_existing_station_list --existing_station_list 1.12.wlan0 --cleanup_cx --tos BE --test_name Sample_test --dowebgui True --local_lf_report_dir /home/lanforge/Pictures/local/interop-webGUI/results/Sample_test --robot_test --coordinate 21,29 --rotation "10" --robot_ip 192.168.200.179 Signed-off-by: AbhishekCandela --- py-scripts/test_l3.py | 823 ++++++++++++++++++++++++++++-------------- 1 file changed, 548 insertions(+), 275 deletions(-) diff --git a/py-scripts/test_l3.py b/py-scripts/test_l3.py index cb780e18b..ddc5ac47d 100755 --- a/py-scripts/test_l3.py +++ b/py-scripts/test_l3.py @@ -349,6 +349,48 @@ --iot_testname "Multicast_IoT_Test" --iot_device_list "switch.smart_plug_1_socket_1" + # Example : Command Line Interface to run Multicast robo test with Rotations + ./test_l3.py + --lfmgr 192.168.207.78 + --test_duration 1m + --polling_interval 1s + --upstream_port eth1 + --endp_type mc_udp + --rates_are_totals + --side_b_min_bps=10000000 + --test_tag test_l3 + --use_existing_station_list + --existing_station_list 1.12.wlan0 + --cleanup_cx + --tos BE + --test_name Sample_test + --dowebgui True + --local_lf_report_dir /home/lanforge/local/interop-webGUI/results/Sample_test + --robot_test + --coordinate 21,29 + --rotation "10" + --robot_ip 192.168.200.179 + + # Example : Command Line Interface to run Multicast robo test without Rotations + ./test_l3.py + --lfmgr 192.168.207.78 + --test_duration 1m + --polling_interval 1s + --upstream_port eth1 + --endp_type mc_udp + --rates_are_totals + --side_b_min_bps=10000000 + --test_tag test_l3 + --use_existing_station_list + --existing_station_list 1.12.wlan0 + --cleanup_cx + --tos BE + --test_name Sample_test + --dowebgui True + --local_lf_report_dir /home/lanforge/local/interop-webGUI/results/Sample_test + --robot_test + --coordinate 21,29 + --robot_ip 192.168.200.179 SCRIPT_CLASSIFICATION: Creation & Runs Traffic @@ -1404,11 +1446,10 @@ def __init__(self, json.dump({}, file) # self.robot_obj.robo_ip = f"{self.robo_ip}" # Fake Server Testing - self.robot_obj.nav_data_path=nav_data - self.robot_obj.result_directory=os.path.dirname(nav_data) - self.robot_obj.runtime_dir=self.result_dir - - self.robot_obj.testname=self.test_name + self.robot_obj.nav_data_path = nav_data + self.robot_obj.result_directory = os.path.dirname(nav_data) + self.robot_obj.runtime_dir = self.result_dir + self.robot_obj.testname = self.test_name self.robot_test_data = {} self.multicast_robot_results = {} @@ -2267,7 +2308,7 @@ def perform_robo(self): for coord_index, coordinate in enumerate(self.coordinate_list): logger.info(f"Moving to coordinate {coord_index}: {coordinate}") - pause_coord,test_stopped_by_user=self.robot_obj.wait_for_battery(self.stop) + pause_coord, test_stopped_by_user = self.robot_obj.wait_for_battery(self.stop) if pause_coord: print("Test stopped by user, exiting...") exit(0) @@ -2285,7 +2326,7 @@ def perform_robo(self): else: # Rotation mode - run test at each rotation angle for angle_index, rotation_angle in enumerate(self.rotation_list): - pause_coord,test_stopped_by_user=self.robot_obj.wait_for_battery(self.stop) + pause_coord, test_stopped_by_user = self.robot_obj.wait_for_battery(self.stop) if pause_coord: print("Test stopped by user, exiting...") exit(0) @@ -2334,7 +2375,7 @@ def _generate_robot_test_report(self): 'average_drop_percent': test_data.get('average_drop_percent', 0), 'endpoint_count': test_data.get('endpoint_count', 0), }) - + logger.info(f"Robot test report generated: {csv_filename}") def l3_endp_port_data(self, tos): @@ -2675,11 +2716,11 @@ def start(self, print_pass=False, coordinate=None, rotation=None) -> int: self.overall.append( { - self.tos[0]: total, + self.tos[0]: total, "timestamp": self.get_time_stamp_local(), "status": "Running", "start_time": start_time.strftime('%Y-%m-%d-%H-%M-%S'), - "end_time": end_time.strftime('%Y-%m-%d-%H-%M-%S'), + "end_time": end_time.strftime('%Y-%m-%d-%H-%M-%S'), "remaining_time": remaining_time, "RSSI": avg_rssi }) @@ -6327,6 +6368,8 @@ def add_live_view_images_to_report(self): It waits up to **60 seconds** for each image. If an image is found, it's added to the `report` on a new page; otherwise, it's skipped. """ + if self.robo_test: + self.total_floors = 1 for floor in range(0, int(self.total_floors)): throughput_image_path = os.path.join(self.result_dir, "live_view_images", f"{self.test_name}_throughput_{floor + 1}.png") rssi_image_path = os.path.join(self.result_dir, "live_view_images", f"{self.test_name}_rssi_{floor + 1}.png") @@ -6408,22 +6451,35 @@ def generate_report(self, config_devices=None, group_device_map=None, iot_summar "Total No. of Devices": self.station_count, } else: - test_input_info = { - "LANforge ip": self.lfmgr, - "LANforge port": self.lfmgr_port, - "Upstream": self.upstream_port, - "Test Duration": self.test_duration, - "Polling Interval": self.polling_interval, - "Total No. of Devices": self.station_count, - } + if self.robo_test: + test_input_info = { + "LANforge ip": self.lfmgr, + "LANforge port": self.lfmgr_port, + "Upstream": self.upstream_port, + "Test Duration": self.test_duration, + "Polling Interval": self.polling_interval, + "Total No. of Devices": self.station_count, + "Robot Coordinates": ", ".join(self.coordinate_list), + "Robot Rotations": ", ".join(self.rotation_list) if self.rotation_list and self.rotation_list[0] != "" else "None" + } + else: + test_input_info = { + "LANforge ip": self.lfmgr, + "LANforge port": self.lfmgr_port, + "Upstream": self.upstream_port, + "Test Duration": self.test_duration, + "Polling Interval": self.polling_interval, + "Total No. of Devices": self.station_count, + } self.report.set_table_title("Test Configuration") self.report.build_table_title() self.report.test_setup_table(value="Test Configuration", test_setup_data=test_input_info) - self.report.set_table_title("Radio Configuration") - self.report.build_table_title() + if not self.robo_test: + self.report.set_table_title("Radio Configuration") + self.report.build_table_title() wifi_mode_dict = { 0: 'AUTO', # 802.11g @@ -6497,138 +6553,344 @@ def generate_report(self, config_devices=None, group_device_map=None, iot_summar # try to do as a loop tos_list = ['BK', 'BE', 'VI', 'VO'] - for tos in tos_list: - # processing tos's which are included in test for real_clients ensuring no blocker for virtual - if (self.real or self.dowebgui) and tos not in self.tos: - continue - if (self.client_dict_A[tos]["ul_A"] and self.client_dict_A[tos]["dl_A"]): - min_bps_a = self.client_dict_A["min_bps_a"] - min_bps_b = self.client_dict_A["min_bps_b"] - - dataset_list = [self.client_dict_A[tos]["ul_A"], self.client_dict_A[tos]["dl_A"]] - # TODO possibly explain the wording for upload and download - dataset_length = len(self.client_dict_A[tos]["ul_A"]) - x_fig_size = 20 - y_fig_size = len(self.client_dict_A[tos]["clients_A"]) * .4 + 5 - logger.debug("length of clients_A {clients} resource_alias_A {alias_A}".format( - clients=len(self.client_dict_A[tos]["clients_A"]), alias_A=len(self.client_dict_A[tos]["resource_alias_A"]))) - logger.debug("clients_A {clients}".format(clients=self.client_dict_A[tos]["clients_A"])) - logger.debug("resource_alias_A {alias_A}".format(alias_A=self.client_dict_A[tos]["resource_alias_A"])) - - if int(min_bps_a) != 0: - self.report.set_obj_html( - _obj_title=f"Individual throughput measured upload tcp or udp bps: {min_bps_a}, download tcp, udp, or mcast bps: {min_bps_b} station for traffic {tos} (WiFi).", - _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} " - f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “" - f"Throughput in Mbps”.") - else: - self.report.set_obj_html( - _obj_title=f"Individual throughput mcast download bps: {min_bps_b} traffic {tos} (WiFi).", - _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} " - f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “" - f"Throughput in Mbps”.") + # Generate per-coordinate/rotation graphs and tables for robot test + if self.robo_test: + logger.info("Building per-coordinate/rotation graphs and tables for robot test (from memory dict)") + self.add_live_view_images_to_report() + if not hasattr(self, "multicast_robot_results") or not self.multicast_robot_results: + self.report.set_custom_html("

No robot test results found.

") + self.report.build_custom() + else: + # Iterate through each coordinate/rotation result + for _, result in self.multicast_robot_results.items(): + coord = result.get("coordinate", "NA") + rot = result.get("rotation", "NA") + stations = result.get("stations", []) + upstream = result.get("upstream", {}) + summary = result.get("summary", {}) + + # Section header + if rot is not None: + self.report.set_custom_html( + f"

Coordinate: {coord} | Rotation: {rot}°

" + ) + else: + self.report.set_custom_html( + f"

Coordinate: {coord}

" + ) + self.report.build_custom() + + if upstream: + tx_rate = upstream.get("tx_rate_bps", 0) + dataset_list = [[tx_rate]] + labels = ["TX (bps)"] + yaxis_categories = [upstream.get("endpoint", "eth-unknown")] + + graph = lf_graph.lf_bar_graph_horizontal( + _data_set=dataset_list, + _xaxis_name="TX Throughput (bps)", + _yaxis_name="Upstream Endpoint", + _yaxis_categories=yaxis_categories, + _graph_image_name=f"robot_coord_{coord}_rot_{rot}_upstream_tx", + _label=labels, + _color_name=["darkorange"], + _color_edge=["black"], + _graph_title=f"Upstream TX Throughput — Coord {coord}, Rot {rot}", + _title_size=10, + _figsize=(14, 4.5), + _show_bar_value=True, + _enable_csv=True, + _text_font=6, + _legend_loc="best", + _legend_box=(1.0, 1.0) + ) + graph_png = graph.build_bar_graph_horizontal() + self.report.set_graph_image(graph_png) + self.report.move_graph_image() + self.report.build_graph() + self.report.set_csv_filename(graph_png) + self.report.move_csv_file() + + df_up = pd.DataFrame([upstream]) + self.report.set_table_title("Upstream (Ethernet TX) Data") + self.report.build_table_title() + self.report.set_table_dataframe(df_up) + self.report.build_table() - self.report.build_objective() - - graph = lf_graph.lf_bar_graph_horizontal(_data_set=dataset_list, - _xaxis_name="Throughput in bps", - _yaxis_name="Client names", - # _yaxis_categories=self.client_dict_A[tos]["clients_A"], - _yaxis_categories=self.client_dict_A[tos]["resource_alias_A"], - _graph_image_name=f"{tos}_A", - _label=self.client_dict_A[tos]['labels'], - _color_name=self.client_dict_A[tos]['colors'], - _color_edge=['black'], - # traditional station side -A - _graph_title=f"Individual {tos} client side traffic measurement - side a (downstream)", - _title_size=10, - _figsize=(x_fig_size, y_fig_size), - _show_bar_value=True, - _enable_csv=True, - _text_font=8, - _legend_loc="best", - _legend_box=(1.0, 1.0) - ) - graph_png = graph.build_bar_graph_horizontal() - self.report.set_graph_image(graph_png) - self.report.move_graph_image() - self.report.build_graph() - self.report.set_csv_filename(graph_png) - self.report.move_csv_file() - if self.dowebgui and self.get_live_view: - self.add_live_view_images_to_report() - # For real devices appending the required data for pass fail criteria - if self.real: - up, down, off_up, off_down = [], [], [], [] - for i in self.client_dict_A[tos]['ul_A']: - up.append(int(i) / 1000000) - for i in self.client_dict_A[tos]['dl_A']: - down.append(int(i) / 1000000) - for i in self.client_dict_A[tos]['offered_upload_rate_A']: - off_up.append(int(i) / 1000000) - for i in self.client_dict_A[tos]['offered_download_rate_A']: - off_down.append(int(i) / 1000000) - # if either 'expected_passfail_value' or 'device_csv_name' is provided for pass/fail evaluation - if self.expected_passfail_value or self.device_csv_name: - test_input_list, pass_fail_list = self.get_pass_fail_list(tos, up, down) + if stations: + df_stations = pd.DataFrame(stations) + + dataset_list = [df_stations["rx_rate_bps"].tolist()] + labels = ["RX (bps)"] + yaxis_categories = df_stations["station"].tolist() + + graph = lf_graph.lf_bar_graph_horizontal( + _data_set=dataset_list, + _xaxis_name="RX Throughput (bps)", + _yaxis_name="Stations", + _yaxis_categories=yaxis_categories, + _graph_image_name=f"robot_coord_{coord}_rot_{rot}_station_rx", + _label=labels, + _color_name=["teal"], + _color_edge=["black"], + _graph_title=f"Receiver Station RX Throughput — Coord {coord}, Rot {rot}", + _title_size=10, + _figsize=(16, max(4.5, len(yaxis_categories) * 0.5)), + _show_bar_value=True, + _enable_csv=True, + _text_font=6, + _legend_loc="best", + _legend_box=(1.0, 1.0) + ) + graph_png = graph.build_bar_graph_horizontal() + self.report.set_graph_image(graph_png) + self.report.move_graph_image() + self.report.build_graph() + self.report.set_csv_filename(graph_png) + self.report.move_csv_file() + + avg_rx = df_stations["rx_rate_bps"].mean() + avg_drop = df_stations["drop_percent"].mean() + self.report.set_custom_html( + f"

Average RX Throughput: {avg_rx/1e6:.2f} Mbps
" + f"Average Drop Rate: {avg_drop:.2f}%
" + f"Total Stations: {len(df_stations)}

" + ) + self.report.build_custom() + + self.report.set_table_title("Receiver Station Data") + self.report.build_table_title() + self.report.set_table_dataframe(df_stations) + self.report.build_table() + + if summary: + df_summary = pd.DataFrame([summary]) + self.report.set_table_title("Coordinate Summary") + self.report.build_table_title() + self.report.set_table_dataframe(df_summary) + self.report.build_table() + + self.report.set_custom_html("
") + self.report.build_custom() + + all_summary = [] + for pos_key, res in self.multicast_robot_results.items(): + sm = res.get("summary") + if sm: + all_summary.append(sm) + # Generate aggregated rotation summary across all coordinates + if all_summary: + df_all = pd.DataFrame(all_summary) + try: + rot_summary = ( + df_all.groupby("rotation")[["total_rx_rate_bps", "total_tx_rate_bps"]] + .mean() + .reset_index() + .sort_values(by="rotation") + ) + dataset = [ + rot_summary["total_rx_rate_bps"].tolist(), + rot_summary["total_tx_rate_bps"].tolist() + ] + labels = ["Avg RX (bps)", "Avg TX (bps)"] + rotations = rot_summary["rotation"].astype(str).tolist() + + graph = lf_graph.lf_bar_graph_horizontal( + _data_set=dataset, + _xaxis_name="Average Throughput (bps)", + _yaxis_name="Rotation (°)", + _yaxis_categories=rotations, + _graph_image_name="robot_avg_rotation_summary", + _label=labels, + _color_name=["steelblue", "orange"], + _color_edge=["black"], + _graph_title="Average RX/TX Throughput vs Rotation", + _title_size=10, + _figsize=(15, max(4, len(rotations) * 0.5)), + _show_bar_value=True, + _enable_csv=True, + _text_font=6, + _legend_loc="best", + _legend_box=(1.0, 1.0) + ) + graph_png = graph.build_bar_graph_horizontal() + self.report.set_graph_image(graph_png) + self.report.move_graph_image() + self.report.build_graph() + self.report.set_csv_filename(graph_png) + self.report.move_csv_file() + + self.report.set_custom_html( + "

The above chart shows average RX/TX throughput aggregated " + "by rotation across all coordinates.

" + ) + self.report.build_custom() + except Exception as e: + logger.warning(f"Could not aggregate rotation summary: {e}") + else: + for tos in tos_list: + # processing tos's which are included in test for real_clients ensuring no blocker for virtual + if (self.real or self.dowebgui) and tos not in self.tos: + continue + if (self.client_dict_A[tos]["ul_A"] and self.client_dict_A[tos]["dl_A"]): + min_bps_a = self.client_dict_A["min_bps_a"] + min_bps_b = self.client_dict_A["min_bps_b"] + + dataset_list = [self.client_dict_A[tos]["ul_A"], self.client_dict_A[tos]["dl_A"]] + # TODO possibly explain the wording for upload and download + dataset_length = len(self.client_dict_A[tos]["ul_A"]) + x_fig_size = 20 + y_fig_size = len(self.client_dict_A[tos]["clients_A"]) * .4 + 5 + logger.debug("length of clients_A {clients} resource_alias_A {alias_A}".format( + clients=len(self.client_dict_A[tos]["clients_A"]), alias_A=len(self.client_dict_A[tos]["resource_alias_A"]))) + logger.debug("clients_A {clients}".format(clients=self.client_dict_A[tos]["clients_A"])) + logger.debug("resource_alias_A {alias_A}".format(alias_A=self.client_dict_A[tos]["resource_alias_A"])) + + if int(min_bps_a) != 0: + self.report.set_obj_html( + _obj_title=f"Individual throughput measured upload tcp or udp bps: {min_bps_a}, download tcp, udp, or mcast bps: {min_bps_b} station for traffic {tos} (WiFi).", + _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} " + f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “" + f"Throughput in Mbps”.") + else: + self.report.set_obj_html( + _obj_title=f"Individual throughput mcast download bps: {min_bps_b} traffic {tos} (WiFi).", + _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} " + f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “" + f"Throughput in Mbps”.") + + self.report.build_objective() + + graph = lf_graph.lf_bar_graph_horizontal(_data_set=dataset_list, + _xaxis_name="Throughput in bps", + _yaxis_name="Client names", + # _yaxis_categories=self.client_dict_A[tos]["clients_A"], + _yaxis_categories=self.client_dict_A[tos]["resource_alias_A"], + _graph_image_name=f"{tos}_A", + _label=self.client_dict_A[tos]['labels'], + _color_name=self.client_dict_A[tos]['colors'], + _color_edge=['black'], + # traditional station side -A + _graph_title=f"Individual {tos} client side traffic measurement - side a (downstream)", + _title_size=10, + _figsize=(x_fig_size, y_fig_size), + _show_bar_value=True, + _enable_csv=True, + _text_font=8, + _legend_loc="best", + _legend_box=(1.0, 1.0) + ) + graph_png = graph.build_bar_graph_horizontal() + self.report.set_graph_image(graph_png) + self.report.move_graph_image() + self.report.build_graph() + self.report.set_csv_filename(graph_png) + self.report.move_csv_file() + if self.dowebgui and self.get_live_view: + self.add_live_view_images_to_report() + # For real devices appending the required data for pass fail criteria + if self.real: + up, down, off_up, off_down = [], [], [], [] + for i in self.client_dict_A[tos]['ul_A']: + up.append(int(i) / 1000000) + for i in self.client_dict_A[tos]['dl_A']: + down.append(int(i) / 1000000) + for i in self.client_dict_A[tos]['offered_upload_rate_A']: + off_up.append(int(i) / 1000000) + for i in self.client_dict_A[tos]['offered_download_rate_A']: + off_down.append(int(i) / 1000000) + # if either 'expected_passfail_value' or 'device_csv_name' is provided for pass/fail evaluation + if self.expected_passfail_value or self.device_csv_name: + test_input_list, pass_fail_list = self.get_pass_fail_list(tos, up, down) - if self.real: - # When groups and profiles specifed for configuration - if self.group_name: - for key, val in group_device_map.items(): - # Generating Dataframe when Groups with their profiles and pass_fail case is specified + if self.real: + # When groups and profiles specifed for configuration + if self.group_name: + for key, val in group_device_map.items(): + # Generating Dataframe when Groups with their profiles and pass_fail case is specified + if self.expected_passfail_value or self.device_csv_name: + dataframe = self.generate_dataframe( + val, + self.client_dict_A[tos]['resource_alias_A'], + self.client_dict_A[tos]['resource_eid_A'], + self.client_dict_A[tos]['resource_host_A'], + self.client_dict_A[tos]['resource_hw_ver_A'], + self.client_dict_A[tos]["clients_A"], + self.client_dict_A[tos]['port_A'], + self.client_dict_A[tos]['mode_A'], + self.client_dict_A[tos]['mac_A'], + self.client_dict_A[tos]['ssid_A'], + self.client_dict_A[tos]['channel_A'], + self.client_dict_A[tos]['traffic_type_A'], + self.client_dict_A[tos]['traffic_protocol_A'], + off_up, + off_down, + up, + down, + test_input_list, + self.client_dict_A[tos]['download_rx_drop_percent_A'], + pass_fail_list) + # Generating Dataframe for groups when pass_fail case is not specified + else: + dataframe = self.generate_dataframe( + val, + self.client_dict_A[tos]['resource_alias_A'], + self.client_dict_A[tos]['resource_eid_A'], + self.client_dict_A[tos]['resource_host_A'], + self.client_dict_A[tos]['resource_hw_ver_A'], + self.client_dict_A[tos]["clients_A"], + self.client_dict_A[tos]['port_A'], + self.client_dict_A[tos]['mode_A'], + self.client_dict_A[tos]['mac_A'], + self.client_dict_A[tos]['ssid_A'], + self.client_dict_A[tos]['channel_A'], + self.client_dict_A[tos]['traffic_type_A'], + self.client_dict_A[tos]['traffic_protocol_A'], + off_up, + off_down, + up, + down, + [], + self.client_dict_A[tos]['download_rx_drop_percent_A'], + [],) + # When the client exists in either group. + if dataframe: + self.report.set_obj_html("", "Group: {}".format(key)) + self.report.build_objective() + dataframe1 = pd.DataFrame(dataframe) + self.report.set_table_dataframe(dataframe1) + self.report.build_table() + else: + tos_dataframe_A = { + " Client Alias ": self.client_dict_A[tos]['resource_alias_A'], + " Host eid ": self.client_dict_A[tos]['resource_eid_A'], + " Host Name ": self.client_dict_A[tos]['resource_host_A'], + " Device Type / Hw Ver ": self.client_dict_A[tos]['resource_hw_ver_A'], + " Endp Name": self.client_dict_A[tos]["clients_A"], + # TODO : port A being set to many times + " Port Name ": self.client_dict_A[tos]['port_A'], + " Mode ": self.client_dict_A[tos]['mode_A'], + " Mac ": self.client_dict_A[tos]['mac_A'], + " SSID ": self.client_dict_A[tos]['ssid_A'], + " Channel ": self.client_dict_A[tos]['channel_A'], + " Type of traffic ": self.client_dict_A[tos]['traffic_type_A'], + " Traffic Protocol ": self.client_dict_A[tos]['traffic_protocol_A'], + " Offered Upload Rate Per Client": self.client_dict_A[tos]['offered_upload_rate_A'], + " Offered Download Rate Per Client": self.client_dict_A[tos]['offered_download_rate_A'], + " Upload Rate Per Client": self.client_dict_A[tos]['ul_A'], + " Download Rate Per Client": self.client_dict_A[tos]['dl_A'], + " Drop Percentage (%)": self.client_dict_A[tos]['download_rx_drop_percent_A'], + } + # When pass_Fail criteria specified if self.expected_passfail_value or self.device_csv_name: - dataframe = self.generate_dataframe( - val, - self.client_dict_A[tos]['resource_alias_A'], - self.client_dict_A[tos]['resource_eid_A'], - self.client_dict_A[tos]['resource_host_A'], - self.client_dict_A[tos]['resource_hw_ver_A'], - self.client_dict_A[tos]["clients_A"], - self.client_dict_A[tos]['port_A'], - self.client_dict_A[tos]['mode_A'], - self.client_dict_A[tos]['mac_A'], - self.client_dict_A[tos]['ssid_A'], - self.client_dict_A[tos]['channel_A'], - self.client_dict_A[tos]['traffic_type_A'], - self.client_dict_A[tos]['traffic_protocol_A'], - off_up, - off_down, - up, - down, - test_input_list, - self.client_dict_A[tos]['download_rx_drop_percent_A'], - pass_fail_list) - # Generating Dataframe for groups when pass_fail case is not specified - else: - dataframe = self.generate_dataframe( - val, - self.client_dict_A[tos]['resource_alias_A'], - self.client_dict_A[tos]['resource_eid_A'], - self.client_dict_A[tos]['resource_host_A'], - self.client_dict_A[tos]['resource_hw_ver_A'], - self.client_dict_A[tos]["clients_A"], - self.client_dict_A[tos]['port_A'], - self.client_dict_A[tos]['mode_A'], - self.client_dict_A[tos]['mac_A'], - self.client_dict_A[tos]['ssid_A'], - self.client_dict_A[tos]['channel_A'], - self.client_dict_A[tos]['traffic_type_A'], - self.client_dict_A[tos]['traffic_protocol_A'], - off_up, - off_down, - up, - down, - [], - self.client_dict_A[tos]['download_rx_drop_percent_A'], - [],) - # When the client exists in either group. - if dataframe: - self.report.set_obj_html("", "Group: {}".format(key)) - self.report.build_objective() - dataframe1 = pd.DataFrame(dataframe) - self.report.set_table_dataframe(dataframe1) - self.report.build_table() + tos_dataframe_A[" Expected " + 'Download' + " Rate"] = [float(x) * 10**6 for x in test_input_list] + tos_dataframe_A[" Status "] = pass_fail_list + + dataframe3 = pd.DataFrame(tos_dataframe_A) + self.report.set_table_dataframe(dataframe3) + self.report.build_table() + + # For virtual clients else: tos_dataframe_A = { " Client Alias ": self.client_dict_A[tos]['resource_alias_A'], @@ -6636,7 +6898,6 @@ def generate_report(self, config_devices=None, group_device_map=None, iot_summar " Host Name ": self.client_dict_A[tos]['resource_host_A'], " Device Type / Hw Ver ": self.client_dict_A[tos]['resource_hw_ver_A'], " Endp Name": self.client_dict_A[tos]["clients_A"], - # TODO : port A being set to many times " Port Name ": self.client_dict_A[tos]['port_A'], " Mode ": self.client_dict_A[tos]['mode_A'], " Mac ": self.client_dict_A[tos]['mac_A'], @@ -6650,135 +6911,105 @@ def generate_report(self, config_devices=None, group_device_map=None, iot_summar " Download Rate Per Client": self.client_dict_A[tos]['dl_A'], " Drop Percentage (%)": self.client_dict_A[tos]['download_rx_drop_percent_A'], } - # When pass_Fail criteria specified - if self.expected_passfail_value or self.device_csv_name: - tos_dataframe_A[" Expected " + 'Download' + " Rate"] = [float(x) * 10**6 for x in test_input_list] - tos_dataframe_A[" Status "] = pass_fail_list - dataframe3 = pd.DataFrame(tos_dataframe_A) self.report.set_table_dataframe(dataframe3) self.report.build_table() - # For virtual clients - else: - tos_dataframe_A = { - " Client Alias ": self.client_dict_A[tos]['resource_alias_A'], - " Host eid ": self.client_dict_A[tos]['resource_eid_A'], - " Host Name ": self.client_dict_A[tos]['resource_host_A'], - " Device Type / Hw Ver ": self.client_dict_A[tos]['resource_hw_ver_A'], - " Endp Name": self.client_dict_A[tos]["clients_A"], - " Port Name ": self.client_dict_A[tos]['port_A'], - " Mode ": self.client_dict_A[tos]['mode_A'], - " Mac ": self.client_dict_A[tos]['mac_A'], - " SSID ": self.client_dict_A[tos]['ssid_A'], - " Channel ": self.client_dict_A[tos]['channel_A'], - " Type of traffic ": self.client_dict_A[tos]['traffic_type_A'], - " Traffic Protocol ": self.client_dict_A[tos]['traffic_protocol_A'], - " Offered Upload Rate Per Client": self.client_dict_A[tos]['offered_upload_rate_A'], - " Offered Download Rate Per Client": self.client_dict_A[tos]['offered_download_rate_A'], - " Upload Rate Per Client": self.client_dict_A[tos]['ul_A'], - " Download Rate Per Client": self.client_dict_A[tos]['dl_A'], - " Drop Percentage (%)": self.client_dict_A[tos]['download_rx_drop_percent_A'], + # TODO both client_dict_A and client_dict_B contains the same information + for tos in tos_list: + if (self.client_dict_B[tos]["ul_B"] and self.client_dict_B[tos]["dl_B"]): + min_bps_a = self.client_dict_B["min_bps_a"] + min_bps_b = self.client_dict_B["min_bps_b"] + + dataset_list = [self.client_dict_B[tos]["ul_B"], self.client_dict_B[tos]["dl_B"]] + dataset_length = len(self.client_dict_B[tos]["ul_B"]) + + x_fig_size = 20 + y_fig_size = len(self.client_dict_B[tos]["clients_B"]) * .4 + 5 + + self.report.set_obj_html( + _obj_title=f"Individual throughput upstream endp, offered upload bps: {min_bps_a} offered download bps: {min_bps_b} /station for traffic {tos} (WiFi).", + _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} " + f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “" + f"Throughput in Mbps”.") + self.report.build_objective() + + graph = lf_graph.lf_bar_graph_horizontal(_data_set=dataset_list, + _xaxis_name="Throughput in bps", + _yaxis_name="Client names", + # _yaxis_categories=self.client_dict_B[tos]["clients_B"], + _yaxis_categories=self.client_dict_B[tos]["resource_alias_B"], + _graph_image_name=f"{tos}_B", + _label=self.client_dict_B[tos]['labels'], + _color_name=self.client_dict_B[tos]['colors'], + _color_edge=['black'], + _graph_title=f"Individual {tos} upstream side traffic measurement - side b (WIFI) traffic", + _title_size=10, + _figsize=(x_fig_size, y_fig_size), + _show_bar_value=True, + _enable_csv=True, + _text_font=8, + _legend_loc="best", + _legend_box=(1.0, 1.0) + ) + graph_png = graph.build_bar_graph_horizontal() + self.report.set_graph_image(graph_png) + self.report.move_graph_image() + self.report.build_graph() + self.report.set_csv_filename(graph_png) + self.report.move_csv_file() + + tos_dataframe_B = { + " Client Alias ": self.client_dict_B[tos]['resource_alias_B'], + " Host eid ": self.client_dict_B[tos]['resource_eid_B'], + " Host Name ": self.client_dict_B[tos]['resource_host_B'], + " Device Type / HW Ver ": self.client_dict_B[tos]['resource_hw_ver_B'], + " Endp Name": self.client_dict_B[tos]["clients_B"], + # TODO get correct size + " Port Name ": self.client_dict_B[tos]['port_B'], + " Mode ": self.client_dict_B[tos]['mode_B'], + " Mac ": self.client_dict_B[tos]['mac_B'], + " SSID ": self.client_dict_B[tos]['ssid_B'], + " Channel ": self.client_dict_B[tos]['channel_B'], + " Type of traffic ": self.client_dict_B[tos]['traffic_type_B'], + " Traffic Protocol ": self.client_dict_B[tos]['traffic_protocol_B'], + " Offered Upload Rate Per Client": self.client_dict_B[tos]['offered_upload_rate_B'], + " Offered Download Rate Per Client": self.client_dict_B[tos]['offered_download_rate_B'], + " Upload Rate Per Client": self.client_dict_B[tos]['ul_B'], + " Download Rate Per Client": self.client_dict_B[tos]['dl_B'], + " Drop Percentage (%)": self.client_dict_B[tos]['download_rx_drop_percent_B'] } - dataframe3 = pd.DataFrame(tos_dataframe_A) + + dataframe3 = pd.DataFrame(tos_dataframe_B) self.report.set_table_dataframe(dataframe3) self.report.build_table() - # TODO both client_dict_A and client_dict_B contains the same information - for tos in tos_list: - if (self.client_dict_B[tos]["ul_B"] and self.client_dict_B[tos]["dl_B"]): - min_bps_a = self.client_dict_B["min_bps_a"] - min_bps_b = self.client_dict_B["min_bps_b"] - - dataset_list = [self.client_dict_B[tos]["ul_B"], self.client_dict_B[tos]["dl_B"]] - dataset_length = len(self.client_dict_B[tos]["ul_B"]) - - x_fig_size = 20 - y_fig_size = len(self.client_dict_B[tos]["clients_B"]) * .4 + 5 - - self.report.set_obj_html( - _obj_title=f"Individual throughput upstream endp, offered upload bps: {min_bps_a} offered download bps: {min_bps_b} /station for traffic {tos} (WiFi).", - _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} " - f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “" - f"Throughput in Mbps”.") - self.report.build_objective() - - graph = lf_graph.lf_bar_graph_horizontal(_data_set=dataset_list, - _xaxis_name="Throughput in bps", - _yaxis_name="Client names", - # _yaxis_categories=self.client_dict_B[tos]["clients_B"], - _yaxis_categories=self.client_dict_B[tos]["resource_alias_B"], - _graph_image_name=f"{tos}_B", - _label=self.client_dict_B[tos]['labels'], - _color_name=self.client_dict_B[tos]['colors'], - _color_edge=['black'], - _graph_title=f"Individual {tos} upstream side traffic measurement - side b (WIFI) traffic", - _title_size=10, - _figsize=(x_fig_size, y_fig_size), - _show_bar_value=True, - _enable_csv=True, - _text_font=8, - _legend_loc="best", - _legend_box=(1.0, 1.0) - ) - graph_png = graph.build_bar_graph_horizontal() - self.report.set_graph_image(graph_png) - self.report.move_graph_image() - self.report.build_graph() - self.report.set_csv_filename(graph_png) - self.report.move_csv_file() - - tos_dataframe_B = { - " Client Alias ": self.client_dict_B[tos]['resource_alias_B'], - " Host eid ": self.client_dict_B[tos]['resource_eid_B'], - " Host Name ": self.client_dict_B[tos]['resource_host_B'], - " Device Type / HW Ver ": self.client_dict_B[tos]['resource_hw_ver_B'], - " Endp Name": self.client_dict_B[tos]["clients_B"], - # TODO get correct size - " Port Name ": self.client_dict_B[tos]['port_B'], - " Mode ": self.client_dict_B[tos]['mode_B'], - " Mac ": self.client_dict_B[tos]['mac_B'], - " SSID ": self.client_dict_B[tos]['ssid_B'], - " Channel ": self.client_dict_B[tos]['channel_B'], - " Type of traffic ": self.client_dict_B[tos]['traffic_type_B'], - " Traffic Protocol ": self.client_dict_B[tos]['traffic_protocol_B'], - " Offered Upload Rate Per Client": self.client_dict_B[tos]['offered_upload_rate_B'], - " Offered Download Rate Per Client": self.client_dict_B[tos]['offered_download_rate_B'], - " Upload Rate Per Client": self.client_dict_B[tos]['ul_B'], - " Download Rate Per Client": self.client_dict_B[tos]['dl_B'], - " Drop Percentage (%)": self.client_dict_B[tos]['download_rx_drop_percent_B'] - } + # L3 total traffic # TODO csv_results_file present yet not readable + # self.report.set_table_title("Total Layer 3 Cross-Connect Traffic across all Stations") + # self.report.build_table_title() + # self.report.set_table_dataframe_from_csv(self.csv_results_file) + # self.report.build_table() + + # empty dictionarys evaluate to false , placing tables in output + if bool(self.dl_port_csv_files): + for key, value in self.dl_port_csv_files.items(): + if self.csv_data_to_report: + # read the csv file + self.report.set_table_title("Layer 3 Cx Traffic {key}".format(key=key)) + self.report.build_table_title() + self.report.set_table_dataframe_from_csv(value.name) + self.report.build_table() - dataframe3 = pd.DataFrame(tos_dataframe_B) - self.report.set_table_dataframe(dataframe3) - self.report.build_table() - - # L3 total traffic # TODO csv_results_file present yet not readable - # self.report.set_table_title("Total Layer 3 Cross-Connect Traffic across all Stations") - # self.report.build_table_title() - # self.report.set_table_dataframe_from_csv(self.csv_results_file) - # self.report.build_table() - - # empty dictionarys evaluate to false , placing tables in output - if bool(self.dl_port_csv_files): - for key, value in self.dl_port_csv_files.items(): - if self.csv_data_to_report: - # read the csv file - self.report.set_table_title("Layer 3 Cx Traffic {key}".format(key=key)) + # read in column heading and last line + df = pd.read_csv(value.name) + last_row = df.tail(1) + self.report.set_table_title( + "Layer 3 Cx Traffic Last Reporting Interval {key}".format(key=key)) self.report.build_table_title() - self.report.set_table_dataframe_from_csv(value.name) + self.report.set_table_dataframe(last_row) self.report.build_table() - - # read in column heading and last line - df = pd.read_csv(value.name) - last_row = df.tail(1) - self.report.set_table_title( - "Layer 3 Cx Traffic Last Reporting Interval {key}".format(key=key)) - self.report.build_table_title() - self.report.set_table_dataframe(last_row) - self.report.build_table() - if iot_summary: - self.build_iot_report_section(self.report, iot_summary) + if iot_summary: + self.build_iot_report_section(self.report, iot_summary) def write_report(self): """Write out HTML and PDF report as configured.""" @@ -6916,7 +7147,7 @@ def webgui_finalize(self, coord=None, rot=None): """Test report finalization run when in WebGUI mode.""" print(f"DEBUG: result_dir = {self.result_dir}") print(f"DEBUG: coord = {coord}, rot = {rot}") - + if not self.overall: logger.warning("webgui_finalize() called but self.overall is empty. Creating default entry.") last_entry = { @@ -7809,6 +8040,48 @@ def parse_args(): --debug --no_cleanup + # Example : Command Line Interface to run Multicast robo test with Rotations + ./test_l3.py + --lfmgr 192.168.207.78 + --test_duration 1m + --polling_interval 1s + --upstream_port eth1 + --endp_type mc_udp + --rates_are_totals + --side_b_min_bps=10000000 + --test_tag test_l3 + --use_existing_station_list + --existing_station_list 1.12.wlan0 + --cleanup_cx + --tos BE + --test_name Sample_test + --dowebgui True + --local_lf_report_dir /home/lanforge/local/interop-webGUI/results/Sample_test + --robot_test + --coordinate 21,29 + --rotation "10" + --robot_ip 192.168.200.179 + + # Example : Command Line Interface to run Multicast robo test without Rotations + ./test_l3.py + --lfmgr 192.168.207.78 + --test_duration 1m + --polling_interval 1s + --upstream_port eth1 + --endp_type mc_udp + --rates_are_totals + --side_b_min_bps=10000000 + --test_tag test_l3 + --use_existing_station_list + --existing_station_list 1.12.wlan0 + --cleanup_cx + --tos BE + --test_name Sample_test + --dowebgui True + --local_lf_report_dir /home/lanforge/local/interop-webGUI/results/Sample_test + --robot_test + --coordinate 21,29 + --robot_ip 192.168.200.179 SCRIPT_CLASSIFICATION: Creation & Runs Traffic @@ -8449,10 +8722,10 @@ def parse_args(): test_l3_parser.add_argument("--real", action="store_true", help='For testing on real devies') test_l3_parser.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true') test_l3_parser.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0") - test_l3_parser.add_argument('--robot_test',help='to trigger robot test', action='store_true') - test_l3_parser.add_argument('--robot_ip', type=str,default='localhost', help='hostname for where Robot server is running') - test_l3_parser.add_argument('--coordinate', type=str, default=None, help="The coordinate contains list of coordinates to be") - test_l3_parser.add_argument('--rotation', type=str, default=None, help="The rotation contains list of rotations to be") + test_l3_parser.add_argument('--robot_test', help='To trigger robot test', action='store_true') + test_l3_parser.add_argument('--robot_ip', type=str, help='IP where Robot server is running') + test_l3_parser.add_argument('--coordinate', type=str, default=None, help="Provide the coordinates to be placed on heatmap") + test_l3_parser.add_argument('--rotation', type=str, default=None, help="Provide the rotations involved for each coordinate") parser.add_argument('--help_summary', default=None, @@ -9300,8 +9573,8 @@ def main(): # Run test logger.info("Starting test") if (args.robot_test and any(etype in args.endp_type for etype in ["mc_udp", "mc_udp6"])): - logger.info("Multicast robot test detected") - ip_var_test.perform_robo() + logger.info("Multicast robot test detected") + ip_var_test.perform_robo() else: ip_var_test.start(False)