Release 260111
This commit is contained in:
0
selfdrive/locationd/test/__init__.py
Normal file
0
selfdrive/locationd/test/__init__.py
Normal file
112
selfdrive/locationd/test/test_calibrationd.py
Normal file
112
selfdrive/locationd/test/test_calibrationd.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal import log
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.selfdrive.locationd.calibrationd import Calibrator, INPUTS_NEEDED, INPUTS_WANTED, BLOCK_SIZE, MIN_SPEED_FILTER, \
|
||||
MAX_YAW_RATE_FILTER, SMOOTH_CYCLES, HEIGHT_INIT, MAX_ALLOWED_PITCH_SPREAD, MAX_ALLOWED_YAW_SPREAD
|
||||
|
||||
|
||||
def process_messages(c, cam_odo_calib, cycles,
|
||||
cam_odo_speed=MIN_SPEED_FILTER + 1,
|
||||
carstate_speed=MIN_SPEED_FILTER + 1,
|
||||
cam_odo_yr=0.0,
|
||||
cam_odo_speed_std=1e-3,
|
||||
cam_odo_height_std=1e-3):
|
||||
old_rpy_weight_prev = 0.0
|
||||
for _ in range(cycles):
|
||||
assert (old_rpy_weight_prev - c.old_rpy_weight < 1/SMOOTH_CYCLES + 1e-3)
|
||||
old_rpy_weight_prev = c.old_rpy_weight
|
||||
c.handle_v_ego(carstate_speed)
|
||||
c.handle_cam_odom([cam_odo_speed,
|
||||
np.sin(cam_odo_calib[2]) * cam_odo_speed,
|
||||
-np.sin(cam_odo_calib[1]) * cam_odo_speed],
|
||||
[0.0, 0.0, cam_odo_yr],
|
||||
[0.0, 0.0, 0.0],
|
||||
[cam_odo_speed_std, cam_odo_speed_std, cam_odo_speed_std],
|
||||
[0.0, 0.0, HEIGHT_INIT.item()],
|
||||
[cam_odo_height_std, cam_odo_height_std, cam_odo_height_std])
|
||||
|
||||
class TestCalibrationd:
|
||||
|
||||
def test_read_saved_params(self):
|
||||
msg = messaging.new_message('liveCalibration')
|
||||
msg.liveCalibration.validBlocks = random.randint(1, 10)
|
||||
msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)]
|
||||
msg.liveCalibration.height = [random.random() for _ in range(1)]
|
||||
Params().put("CalibrationParams", msg.to_bytes())
|
||||
c = Calibrator(param_put=True)
|
||||
|
||||
np.testing.assert_allclose(msg.liveCalibration.rpyCalib, c.rpy)
|
||||
np.testing.assert_allclose(msg.liveCalibration.height, c.height)
|
||||
assert msg.liveCalibration.validBlocks == c.valid_blocks
|
||||
|
||||
|
||||
def test_calibration_basics(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED)
|
||||
assert c.valid_blocks == INPUTS_WANTED
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
np.testing.assert_allclose(c.height, HEIGHT_INIT)
|
||||
c.reset()
|
||||
|
||||
|
||||
def test_calibration_low_speed_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed=MIN_SPEED_FILTER - 1)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, carstate_speed=MIN_SPEED_FILTER - 1)
|
||||
assert c.valid_blocks == 0
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
np.testing.assert_allclose(c.height, HEIGHT_INIT)
|
||||
|
||||
|
||||
def test_calibration_yaw_rate_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_yr=MAX_YAW_RATE_FILTER)
|
||||
assert c.valid_blocks == 0
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
np.testing.assert_allclose(c.height, HEIGHT_INIT)
|
||||
|
||||
|
||||
def test_calibration_speed_std_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed_std=1e3)
|
||||
assert c.valid_blocks == INPUTS_NEEDED
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
|
||||
|
||||
def test_calibration_speed_std_height_reject(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_height_std=1e3)
|
||||
assert c.valid_blocks == INPUTS_NEEDED
|
||||
np.testing.assert_allclose(c.rpy, np.zeros(3))
|
||||
|
||||
|
||||
def test_calibration_auto_reset(self):
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
|
||||
assert c.valid_blocks == INPUTS_NEEDED
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0], atol=1e-3)
|
||||
process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*0.9, MAX_ALLOWED_YAW_SPREAD*0.9], BLOCK_SIZE + 10)
|
||||
assert c.valid_blocks == INPUTS_NEEDED + 1
|
||||
assert c.cal_status == log.LiveCalibrationData.Status.calibrated
|
||||
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
|
||||
assert c.valid_blocks == INPUTS_NEEDED
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
|
||||
process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], BLOCK_SIZE + 10)
|
||||
assert c.valid_blocks == 1
|
||||
assert c.cal_status == log.LiveCalibrationData.Status.recalibrating
|
||||
np.testing.assert_allclose(c.rpy, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], atol=1e-2)
|
||||
|
||||
c = Calibrator(param_put=False)
|
||||
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
|
||||
assert c.valid_blocks == INPUTS_NEEDED
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
|
||||
process_messages(c, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], BLOCK_SIZE + 10)
|
||||
assert c.valid_blocks == 1
|
||||
assert c.cal_status == log.LiveCalibrationData.Status.recalibrating
|
||||
np.testing.assert_allclose(c.rpy, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], atol=1e-2)
|
||||
190
selfdrive/locationd/test/test_locationd_scenarios.py
Normal file
190
selfdrive/locationd/test/test_locationd_scenarios.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import numpy as np
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
|
||||
from openpilot.tools.lib.logreader import LogReader
|
||||
from openpilot.selfdrive.test.process_replay.migration import migrate_all
|
||||
from openpilot.selfdrive.test.process_replay.process_replay import replay_process_with_name
|
||||
|
||||
# TODO find a new segment to test
|
||||
TEST_ROUTE = "4019fff6e54cf1c7|00000123--4bc0d95ef6/5"
|
||||
GPS_MESSAGES = ['gpsLocationExternal', 'gpsLocation']
|
||||
SELECT_COMPARE_FIELDS = {
|
||||
'yaw_rate': ['angularVelocityDevice', 'z'],
|
||||
'roll': ['orientationNED', 'x'],
|
||||
'inputs_flag': ['inputsOK'],
|
||||
'sensors_flag': ['sensorsOK'],
|
||||
}
|
||||
JUNK_IDX = 100
|
||||
CONSISTENT_SPIKES_COUNT = 10
|
||||
|
||||
|
||||
class Scenario(Enum):
|
||||
BASE = 'base'
|
||||
GYRO_OFF = 'gyro_off'
|
||||
GYRO_SPIKE_MIDWAY = 'gyro_spike_midway'
|
||||
GYRO_CONSISTENT_SPIKES = 'gyro_consistent_spikes'
|
||||
ACCEL_OFF = 'accel_off'
|
||||
ACCEL_SPIKE_MIDWAY = 'accel_spike_midway'
|
||||
ACCEL_CONSISTENT_SPIKES = 'accel_consistent_spikes'
|
||||
SENSOR_TIMING_SPIKE_MIDWAY = 'timing_spikes'
|
||||
SENSOR_TIMING_CONSISTENT_SPIKES = 'timing_consistent_spikes'
|
||||
|
||||
|
||||
def get_select_fields_data(logs):
|
||||
def get_nested_keys(msg, keys):
|
||||
val = None
|
||||
for key in keys:
|
||||
val = getattr(msg if val is None else val, key) if isinstance(key, str) else val[key]
|
||||
return val
|
||||
lp = [x.livePose for x in logs if x.which() == 'livePose']
|
||||
data = defaultdict(list)
|
||||
for msg in lp:
|
||||
for key, fields in SELECT_COMPARE_FIELDS.items():
|
||||
data[key].append(get_nested_keys(msg, fields))
|
||||
for key in data:
|
||||
data[key] = np.array(data[key][JUNK_IDX:], dtype=float)
|
||||
return data
|
||||
|
||||
|
||||
def modify_logs_midway(logs, which, count, fn):
|
||||
non_which = [x for x in logs if x.which() != which]
|
||||
which = [x for x in logs if x.which() == which]
|
||||
temps = which[len(which) // 2:len(which) // 2 + count]
|
||||
for i, temp in enumerate(temps):
|
||||
temp = temp.as_builder()
|
||||
fn(temp)
|
||||
which[len(which) // 2 + i] = temp.as_reader()
|
||||
return sorted(non_which + which, key=lambda x: x.logMonoTime)
|
||||
|
||||
|
||||
def run_scenarios(scenario, logs):
|
||||
if scenario == Scenario.BASE:
|
||||
pass
|
||||
|
||||
elif scenario == Scenario.GYRO_OFF:
|
||||
logs = sorted([x for x in logs if x.which() != 'gyroscope'], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GYRO_SPIKE_MIDWAY or scenario == Scenario.GYRO_CONSISTENT_SPIKES:
|
||||
def gyro_spike(msg):
|
||||
msg.gyroscope.gyroUncalibrated.v[0] += 3.0
|
||||
count = 1 if scenario == Scenario.GYRO_SPIKE_MIDWAY else CONSISTENT_SPIKES_COUNT
|
||||
logs = modify_logs_midway(logs, 'gyroscope', count, gyro_spike)
|
||||
|
||||
elif scenario == Scenario.ACCEL_OFF:
|
||||
logs = sorted([x for x in logs if x.which() != 'accelerometer'], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.ACCEL_SPIKE_MIDWAY or scenario == Scenario.ACCEL_CONSISTENT_SPIKES:
|
||||
def acc_spike(msg):
|
||||
msg.accelerometer.acceleration.v[0] += 100.0
|
||||
count = 1 if scenario == Scenario.ACCEL_SPIKE_MIDWAY else CONSISTENT_SPIKES_COUNT
|
||||
logs = modify_logs_midway(logs, 'accelerometer', count, acc_spike)
|
||||
|
||||
elif scenario == Scenario.SENSOR_TIMING_SPIKE_MIDWAY or scenario == Scenario.SENSOR_TIMING_CONSISTENT_SPIKES:
|
||||
def timing_spike(msg):
|
||||
msg.accelerometer.timestamp -= int(0.150 * 1e9)
|
||||
count = 1 if scenario == Scenario.SENSOR_TIMING_SPIKE_MIDWAY else CONSISTENT_SPIKES_COUNT
|
||||
logs = modify_logs_midway(logs, 'accelerometer', count, timing_spike)
|
||||
|
||||
replayed_logs = replay_process_with_name(name='locationd', lr=logs)
|
||||
return get_select_fields_data(logs), get_select_fields_data(replayed_logs)
|
||||
|
||||
|
||||
class TestLocationdScenarios:
|
||||
"""
|
||||
Test locationd with different scenarios. In all these scenarios, we expect the following:
|
||||
- locationd kalman filter should never go unstable (we care mostly about yaw_rate, roll, gpsOK, inputsOK, sensorsOK)
|
||||
- faulty values should be ignored, with appropriate flags set
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
cls.logs = migrate_all(LogReader(TEST_ROUTE))
|
||||
|
||||
def test_base(self):
|
||||
"""
|
||||
Test: unchanged log
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll: unchanged
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.BASE, self.logs)
|
||||
assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.35))
|
||||
assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.55))
|
||||
|
||||
def test_gyro_off(self):
|
||||
"""
|
||||
Test: no gyroscope message for the entire segment
|
||||
Expected Result:
|
||||
- yaw_rate: 0
|
||||
- roll: 0
|
||||
- sensorsOK: False
|
||||
"""
|
||||
_, replayed_data = run_scenarios(Scenario.GYRO_OFF, self.logs)
|
||||
assert np.allclose(replayed_data['yaw_rate'], 0.0)
|
||||
assert np.allclose(replayed_data['roll'], 0.0)
|
||||
assert np.all(replayed_data['sensors_flag'] == 0.0)
|
||||
|
||||
def test_gyro_spike(self):
|
||||
"""
|
||||
Test: a gyroscope spike in the middle of the segment
|
||||
Expected Result:
|
||||
- yaw_rate: unchanged
|
||||
- roll: unchanged
|
||||
- inputsOK: False for some time after the spike, True for the rest
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GYRO_SPIKE_MIDWAY, self.logs)
|
||||
assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.35))
|
||||
assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.55))
|
||||
assert np.all(replayed_data['inputs_flag'] == orig_data['inputs_flag'])
|
||||
assert np.all(replayed_data['sensors_flag'] == orig_data['sensors_flag'])
|
||||
|
||||
def test_consistent_gyro_spikes(self):
|
||||
"""
|
||||
Test: consistent timing spikes for N gyroscope messages in the middle of the segment
|
||||
Expected Result: inputsOK becomes False after N of bad measurements
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.GYRO_CONSISTENT_SPIKES, self.logs)
|
||||
assert np.diff(replayed_data['inputs_flag'])[501] == -1.0
|
||||
assert np.diff(replayed_data['inputs_flag'])[708] == 1.0
|
||||
|
||||
def test_accel_off(self):
|
||||
"""
|
||||
Test: no accelerometer message for the entire segment
|
||||
Expected Result:
|
||||
- yaw_rate: 0
|
||||
- roll: 0
|
||||
- sensorsOK: False
|
||||
"""
|
||||
_, replayed_data = run_scenarios(Scenario.ACCEL_OFF, self.logs)
|
||||
assert np.allclose(replayed_data['yaw_rate'], 0.0)
|
||||
assert np.allclose(replayed_data['roll'], 0.0)
|
||||
assert np.all(replayed_data['sensors_flag'] == 0.0)
|
||||
|
||||
def test_accel_spike(self):
|
||||
"""
|
||||
ToDo:
|
||||
Test: an accelerometer spike in the middle of the segment
|
||||
Expected Result: Right now, the kalman filter is not robust to small spikes like it is to gyroscope spikes.
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.ACCEL_SPIKE_MIDWAY, self.logs)
|
||||
assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.35))
|
||||
assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.55))
|
||||
|
||||
def test_single_timing_spike(self):
|
||||
"""
|
||||
Test: timing of 150ms off for the single accelerometer message in the middle of the segment
|
||||
Expected Result: the message is ignored, and inputsOK is False for that time
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.SENSOR_TIMING_SPIKE_MIDWAY, self.logs)
|
||||
assert np.all(replayed_data['inputs_flag'] == orig_data['inputs_flag'])
|
||||
assert np.all(replayed_data['sensors_flag'] == orig_data['sensors_flag'])
|
||||
|
||||
def test_consistent_timing_spikes(self):
|
||||
"""
|
||||
Test: consistent timing spikes for N accelerometer messages in the middle of the segment
|
||||
Expected Result: inputsOK becomes False after N of bad measurements
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.SENSOR_TIMING_CONSISTENT_SPIKES, self.logs)
|
||||
assert np.diff(replayed_data['inputs_flag'])[501] == -1.0
|
||||
assert np.diff(replayed_data['inputs_flag'])[707] == 1.0
|
||||
Reference in New Issue
Block a user