Release 260111

This commit is contained in:
Comma Device
2026-01-11 18:23:29 +08:00
commit 3721ecbf8a
2601 changed files with 855070 additions and 0 deletions

0
selfdrive/ui/__init__.py Normal file
View File

BIN
selfdrive/ui/_spinner Executable file

Binary file not shown.

BIN
selfdrive/ui/_text Executable file

Binary file not shown.

233
selfdrive/ui/carrot.moc Normal file
View File

@@ -0,0 +1,233 @@
/****************************************************************************
** Meta object code from reading C++ file 'carrot.cc'
**
** Created by: The Qt Meta Object Compiler version 67 (Qt 5.12.8)
**
** WARNING! All changes made in this file will be lost!
*****************************************************************************/
#include <QtCore/qbytearray.h>
#include <QtCore/qmetatype.h>
#if !defined(Q_MOC_OUTPUT_REVISION)
#error "The header file 'carrot.cc' doesn't include <QObject>."
#elif Q_MOC_OUTPUT_REVISION != 67
#error "This file was generated using the moc from 5.12.8. It"
#error "cannot be used with the include files from this version of Qt."
#error "(The moc has changed too much.)"
#endif
QT_BEGIN_MOC_NAMESPACE
QT_WARNING_PUSH
QT_WARNING_DISABLE_DEPRECATED
struct qt_meta_stringdata_DrawPlot_t {
QByteArrayData data[1];
char stringdata0[9];
};
#define QT_MOC_LITERAL(idx, ofs, len) \
Q_STATIC_BYTE_ARRAY_DATA_HEADER_INITIALIZER_WITH_OFFSET(len, \
qptrdiff(offsetof(qt_meta_stringdata_DrawPlot_t, stringdata0) + ofs \
- idx * sizeof(QByteArrayData)) \
)
static const qt_meta_stringdata_DrawPlot_t qt_meta_stringdata_DrawPlot = {
{
QT_MOC_LITERAL(0, 0, 8) // "DrawPlot"
},
"DrawPlot"
};
#undef QT_MOC_LITERAL
static const uint qt_meta_data_DrawPlot[] = {
// content:
8, // revision
0, // classname
0, 0, // classinfo
0, 0, // methods
0, 0, // properties
0, 0, // enums/sets
0, 0, // constructors
0, // flags
0, // signalCount
0 // eod
};
void DrawPlot::qt_static_metacall(QObject *_o, QMetaObject::Call _c, int _id, void **_a)
{
Q_UNUSED(_o);
Q_UNUSED(_id);
Q_UNUSED(_c);
Q_UNUSED(_a);
}
QT_INIT_METAOBJECT const QMetaObject DrawPlot::staticMetaObject = { {
&QObject::staticMetaObject,
qt_meta_stringdata_DrawPlot.data,
qt_meta_data_DrawPlot,
qt_static_metacall,
nullptr,
nullptr
} };
const QMetaObject *DrawPlot::metaObject() const
{
return QObject::d_ptr->metaObject ? QObject::d_ptr->dynamicMetaObject() : &staticMetaObject;
}
void *DrawPlot::qt_metacast(const char *_clname)
{
if (!_clname) return nullptr;
if (!strcmp(_clname, qt_meta_stringdata_DrawPlot.stringdata0))
return static_cast<void*>(this);
return QObject::qt_metacast(_clname);
}
int DrawPlot::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
_id = QObject::qt_metacall(_c, _id, _a);
return _id;
}
struct qt_meta_stringdata_ModelDrawer_t {
QByteArrayData data[1];
char stringdata0[12];
};
#define QT_MOC_LITERAL(idx, ofs, len) \
Q_STATIC_BYTE_ARRAY_DATA_HEADER_INITIALIZER_WITH_OFFSET(len, \
qptrdiff(offsetof(qt_meta_stringdata_ModelDrawer_t, stringdata0) + ofs \
- idx * sizeof(QByteArrayData)) \
)
static const qt_meta_stringdata_ModelDrawer_t qt_meta_stringdata_ModelDrawer = {
{
QT_MOC_LITERAL(0, 0, 11) // "ModelDrawer"
},
"ModelDrawer"
};
#undef QT_MOC_LITERAL
static const uint qt_meta_data_ModelDrawer[] = {
// content:
8, // revision
0, // classname
0, 0, // classinfo
0, 0, // methods
0, 0, // properties
0, 0, // enums/sets
0, 0, // constructors
0, // flags
0, // signalCount
0 // eod
};
void ModelDrawer::qt_static_metacall(QObject *_o, QMetaObject::Call _c, int _id, void **_a)
{
Q_UNUSED(_o);
Q_UNUSED(_id);
Q_UNUSED(_c);
Q_UNUSED(_a);
}
QT_INIT_METAOBJECT const QMetaObject ModelDrawer::staticMetaObject = { {
&QObject::staticMetaObject,
qt_meta_stringdata_ModelDrawer.data,
qt_meta_data_ModelDrawer,
qt_static_metacall,
nullptr,
nullptr
} };
const QMetaObject *ModelDrawer::metaObject() const
{
return QObject::d_ptr->metaObject ? QObject::d_ptr->dynamicMetaObject() : &staticMetaObject;
}
void *ModelDrawer::qt_metacast(const char *_clname)
{
if (!_clname) return nullptr;
if (!strcmp(_clname, qt_meta_stringdata_ModelDrawer.stringdata0))
return static_cast<void*>(this);
return QObject::qt_metacast(_clname);
}
int ModelDrawer::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
_id = QObject::qt_metacall(_c, _id, _a);
return _id;
}
struct qt_meta_stringdata_DrawCarrot_t {
QByteArrayData data[1];
char stringdata0[11];
};
#define QT_MOC_LITERAL(idx, ofs, len) \
Q_STATIC_BYTE_ARRAY_DATA_HEADER_INITIALIZER_WITH_OFFSET(len, \
qptrdiff(offsetof(qt_meta_stringdata_DrawCarrot_t, stringdata0) + ofs \
- idx * sizeof(QByteArrayData)) \
)
static const qt_meta_stringdata_DrawCarrot_t qt_meta_stringdata_DrawCarrot = {
{
QT_MOC_LITERAL(0, 0, 10) // "DrawCarrot"
},
"DrawCarrot"
};
#undef QT_MOC_LITERAL
static const uint qt_meta_data_DrawCarrot[] = {
// content:
8, // revision
0, // classname
0, 0, // classinfo
0, 0, // methods
0, 0, // properties
0, 0, // enums/sets
0, 0, // constructors
0, // flags
0, // signalCount
0 // eod
};
void DrawCarrot::qt_static_metacall(QObject *_o, QMetaObject::Call _c, int _id, void **_a)
{
Q_UNUSED(_o);
Q_UNUSED(_id);
Q_UNUSED(_c);
Q_UNUSED(_a);
}
QT_INIT_METAOBJECT const QMetaObject DrawCarrot::staticMetaObject = { {
&QObject::staticMetaObject,
qt_meta_stringdata_DrawCarrot.data,
qt_meta_data_DrawCarrot,
qt_static_metacall,
nullptr,
nullptr
} };
const QMetaObject *DrawCarrot::metaObject() const
{
return QObject::d_ptr->metaObject ? QObject::d_ptr->dynamicMetaObject() : &staticMetaObject;
}
void *DrawCarrot::qt_metacast(const char *_clname)
{
if (!_clname) return nullptr;
if (!strcmp(_clname, qt_meta_stringdata_DrawCarrot.stringdata0))
return static_cast<void*>(this);
return QObject::qt_metacast(_clname);
}
int DrawCarrot::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
_id = QObject::qt_metacall(_c, _id, _a);
return _id;
}
QT_WARNING_POP
QT_END_MOC_NAMESPACE

View File

@@ -0,0 +1,4 @@
#!/usr/bin/env bash
cd /data/openpilot
exec ./launch_openpilot.sh

View File

@@ -0,0 +1,93 @@
/****************************************************************************
** Meta object code from reading C++ file 'onroad_home.cc'
**
** Created by: The Qt Meta Object Compiler version 67 (Qt 5.12.8)
**
** WARNING! All changes made in this file will be lost!
*****************************************************************************/
#include <QtCore/qbytearray.h>
#include <QtCore/qmetatype.h>
#if !defined(Q_MOC_OUTPUT_REVISION)
#error "The header file 'onroad_home.cc' doesn't include <QObject>."
#elif Q_MOC_OUTPUT_REVISION != 67
#error "This file was generated using the moc from 5.12.8. It"
#error "cannot be used with the include files from this version of Qt."
#error "(The moc has changed too much.)"
#endif
QT_BEGIN_MOC_NAMESPACE
QT_WARNING_PUSH
QT_WARNING_DISABLE_DEPRECATED
struct qt_meta_stringdata_OverlayDialog_t {
QByteArrayData data[1];
char stringdata0[14];
};
#define QT_MOC_LITERAL(idx, ofs, len) \
Q_STATIC_BYTE_ARRAY_DATA_HEADER_INITIALIZER_WITH_OFFSET(len, \
qptrdiff(offsetof(qt_meta_stringdata_OverlayDialog_t, stringdata0) + ofs \
- idx * sizeof(QByteArrayData)) \
)
static const qt_meta_stringdata_OverlayDialog_t qt_meta_stringdata_OverlayDialog = {
{
QT_MOC_LITERAL(0, 0, 13) // "OverlayDialog"
},
"OverlayDialog"
};
#undef QT_MOC_LITERAL
static const uint qt_meta_data_OverlayDialog[] = {
// content:
8, // revision
0, // classname
0, 0, // classinfo
0, 0, // methods
0, 0, // properties
0, 0, // enums/sets
0, 0, // constructors
0, // flags
0, // signalCount
0 // eod
};
void OverlayDialog::qt_static_metacall(QObject *_o, QMetaObject::Call _c, int _id, void **_a)
{
Q_UNUSED(_o);
Q_UNUSED(_id);
Q_UNUSED(_c);
Q_UNUSED(_a);
}
QT_INIT_METAOBJECT const QMetaObject OverlayDialog::staticMetaObject = { {
&QWidget::staticMetaObject,
qt_meta_stringdata_OverlayDialog.data,
qt_meta_data_OverlayDialog,
qt_static_metacall,
nullptr,
nullptr
} };
const QMetaObject *OverlayDialog::metaObject() const
{
return QObject::d_ptr->metaObject ? QObject::d_ptr->dynamicMetaObject() : &staticMetaObject;
}
void *OverlayDialog::qt_metacast(const char *_clname)
{
if (!_clname) return nullptr;
if (!strcmp(_clname, qt_meta_stringdata_OverlayDialog.stringdata0))
return static_cast<void*>(this);
return QWidget::qt_metacast(_clname);
}
int OverlayDialog::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
_id = QWidget::qt_metacall(_c, _id, _a);
return _id;
}
QT_WARNING_POP
QT_END_MOC_NAMESPACE

View File

@@ -0,0 +1,20 @@
import os
from cffi import FFI
import sip
from openpilot.common.ffi_wrapper import suffix
from openpilot.common.basedir import BASEDIR
def get_ffi():
lib = os.path.join(BASEDIR, "selfdrive", "ui", "qt", "libpython_helpers" + suffix())
ffi = FFI()
ffi.cdef("void set_main_window(void *w);")
return ffi, ffi.dlopen(lib)
def set_main_window(widget):
ffi, lib = get_ffi()
lib.set_main_window(ffi.cast('void*', sip.unwrapinstance(widget)))

BIN
selfdrive/ui/qt/spinner_larch64 Executable file

Binary file not shown.

BIN
selfdrive/ui/qt/text_larch64 Executable file

Binary file not shown.

260
selfdrive/ui/soundd.py Normal file
View File

@@ -0,0 +1,260 @@
import math
import numpy as np
import time
import wave
from cereal import car, messaging
from openpilot.common.basedir import BASEDIR
from openpilot.common.filter_simple import FirstOrderFilter
from openpilot.common.params import Params
from openpilot.common.realtime import Ratekeeper
from openpilot.common.retry import retry
from openpilot.common.swaglog import cloudlog
from openpilot.system import micd
SAMPLE_RATE = 48000
SAMPLE_BUFFER = 4096 # (approx 100ms)
MAX_VOLUME = 1.0
MIN_VOLUME = 0.1
SELFDRIVE_STATE_TIMEOUT = 5 # 5 seconds
FILTER_DT = 1. / (micd.SAMPLE_RATE / micd.FFT_SAMPLES)
AMBIENT_DB = 30 # DB where MIN_VOLUME is applied
DB_SCALE = 30 # AMBIENT_DB + DB_SCALE is where MAX_VOLUME is applied
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
sound_list: dict[int, tuple[str, int | None, float]] = {
# AudibleAlert, file name, play count (none for infinite)
AudibleAlert.engage: ("engage.wav", 1, float(Params().get_int("SoundVolumeAdjustEngage"))/100.),
AudibleAlert.disengage: ("disengage.wav", 1, float(Params().get_int("SoundVolumeAdjustEngage"))/100.),
AudibleAlert.refuse: ("refuse.wav", 1, MAX_VOLUME),
AudibleAlert.prompt: ("prompt.wav", 1, MAX_VOLUME),
AudibleAlert.promptRepeat: ("prompt.wav", None, MAX_VOLUME),
AudibleAlert.promptDistracted: ("prompt_distracted.wav", None, MAX_VOLUME),
AudibleAlert.warningSoft: ("warning_soft.wav", None, MAX_VOLUME),
AudibleAlert.warningImmediate: ("warning_immediate.wav", None, MAX_VOLUME),
AudibleAlert.longEngaged: ("tici_engaged.wav", None, MAX_VOLUME),
AudibleAlert.longDisengaged: ("tici_disengaged.wav", None, MAX_VOLUME),
AudibleAlert.trafficSignGreen: ("traffic_sign_green.wav", None, MAX_VOLUME),
AudibleAlert.trafficSignChanged: ("traffic_sign_changed.wav", None, MAX_VOLUME),
AudibleAlert.trafficError: ("audio_traffic_error.wav", None, MAX_VOLUME),
AudibleAlert.bsdWarning: ("audio_car_watchout.wav", None, MAX_VOLUME),
AudibleAlert.laneChange: ("audio_lane_change.wav", None, MAX_VOLUME),
AudibleAlert.stopStop: ("audio_stopstop.wav", None, MAX_VOLUME),
AudibleAlert.stopping: ("audio_stopping.wav", None, MAX_VOLUME),
AudibleAlert.autoHold: ("audio_auto_hold.wav", None, MAX_VOLUME),
AudibleAlert.engage2: ("audio_engage.wav", None, MAX_VOLUME),
AudibleAlert.disengage2: ("audio_disengage.wav", None, MAX_VOLUME),
AudibleAlert.speedDown: ("audio_speed_down.wav", None, MAX_VOLUME),
AudibleAlert.audioTurn: ("audio_turn.wav", None, MAX_VOLUME),
AudibleAlert.reverseGear: ("reverse_gear.wav", 1, float(Params().get_int("SoundVolumeAdjustEngage"))/100.),
AudibleAlert.audio1: ("audio_1.wav", None, MAX_VOLUME),
AudibleAlert.audio2: ("audio_2.wav", None, MAX_VOLUME),
AudibleAlert.audio3: ("audio_3.wav", None, MAX_VOLUME),
AudibleAlert.audio4: ("audio_4.wav", None, MAX_VOLUME),
AudibleAlert.audio5: ("audio_5.wav", None, MAX_VOLUME),
AudibleAlert.audio6: ("audio_6.wav", None, MAX_VOLUME),
AudibleAlert.audio7: ("audio_7.wav", None, MAX_VOLUME),
AudibleAlert.audio8: ("audio_8.wav", None, MAX_VOLUME),
AudibleAlert.audio9: ("audio_9.wav", None, MAX_VOLUME),
AudibleAlert.audio10: ("audio_10.wav", None, MAX_VOLUME),
}
def check_selfdrive_timeout_alert(sm):
ss_missing = time.monotonic() - sm.recv_time['selfdriveState']
if ss_missing > SELFDRIVE_STATE_TIMEOUT:
if sm['selfdriveState'].enabled and (ss_missing - SELFDRIVE_STATE_TIMEOUT) < 10:
return True
return False
def linear_resample(samples, original_rate, new_rate):
if original_rate == new_rate:
return samples
# Calculate the resampling factor and the number of samples in the resampled signal
resampling_factor = float(new_rate) / original_rate
num_resampled_samples = int(len(samples) * resampling_factor)
# Create the resampled signal array
resampled = np.zeros(num_resampled_samples, dtype=np.float32)
for i in range(num_resampled_samples):
# Calculate the original sample index
orig_index = i / resampling_factor
# Find the two nearest original samples
lower_index = int(orig_index)
upper_index = min(lower_index + 1, len(samples) - 1)
# Perform linear interpolation
resampled[i] = (samples[lower_index] * (upper_index - orig_index) +
samples[upper_index] * (orig_index - lower_index))
return resampled
class Soundd:
def __init__(self):
self.params = Params()
self.soundVolumeAdjust = 1.0
self.carrot_count_down = 0
self.lang = self.params.get('LanguageSetting', encoding='utf8')
self.load_sounds()
self.current_alert = AudibleAlert.none
self.current_volume = MIN_VOLUME
self.current_sound_frame = 0
self.selfdrive_timeout_alert = False
self.spl_filter_weighted = FirstOrderFilter(0, 2.5, FILTER_DT, initialized=False)
def load_sounds(self):
self.loaded_sounds: dict[int, np.ndarray] = {}
# Load all sounds
for sound in sound_list:
filename, play_count, volume = sound_list[sound]
if self.lang == "main_ko":
wavefile = wave.open(BASEDIR + "/selfdrive/assets/sounds/" + filename, 'r')
elif self.lang == "main_zh-CHS":
wavefile = wave.open(BASEDIR + "/selfdrive/assets/sounds_chs/" + filename, 'r')
else:
wavefile = wave.open(BASEDIR + "/selfdrive/assets/sounds_eng/" + filename, 'r')
#assert wavefile.getnchannels() == 1
assert wavefile.getsampwidth() == 2
#assert wavefile.getframerate() == SAMPLE_RATE
actual_sample_rate = wavefile.getframerate()
nchannels = wavefile.getnchannels()
#print("nchannels=", nchannels, ",sound=", sound_list[sound])
assert nchannels in [1,2]
#print("loading...")
length = wavefile.getnframes()
frames = wavefile.readframes(length)
samples = np.frombuffer(frames, dtype=np.int16)
if nchannels == 2:
samples = samples[0::2] / 2 + samples[1::2] / 2
resampled_samples = linear_resample(samples, actual_sample_rate, SAMPLE_RATE) * volume
self.loaded_sounds[sound] = resampled_samples.astype(np.float32) / (2**16/2)
def get_sound_data(self, frames): # get "frames" worth of data from the current alert sound, looping when required
ret = np.zeros(frames, dtype=np.float32)
if self.current_alert != AudibleAlert.none:
num_loops = sound_list[self.current_alert][1]
sound_data = self.loaded_sounds[self.current_alert]
written_frames = 0
current_sound_frame = self.current_sound_frame % len(sound_data)
loops = self.current_sound_frame // len(sound_data)
while written_frames < frames and (num_loops is None or loops < num_loops):
available_frames = sound_data.shape[0] - current_sound_frame
frames_to_write = min(available_frames, frames - written_frames)
ret[written_frames:written_frames+frames_to_write] = sound_data[current_sound_frame:current_sound_frame+frames_to_write]
written_frames += frames_to_write
self.current_sound_frame += frames_to_write
return ret * self.current_volume
def callback(self, data_out: np.ndarray, frames: int, time, status) -> None:
if status:
cloudlog.warning(f"soundd stream over/underflow: {status}")
data_out[:frames, 0] = self.get_sound_data(frames)
def update_alert(self, new_alert):
current_alert_played_once = self.current_alert == AudibleAlert.none or self.current_sound_frame > len(self.loaded_sounds[self.current_alert])
if self.current_alert != new_alert and (new_alert != AudibleAlert.none or current_alert_played_once):
self.current_alert = new_alert
self.current_sound_frame = 0
def update_carrot_alert(self, sm, new_alert):
if new_alert == AudibleAlert.none:
count_down = sm['carrotMan'].leftSec
if self.carrot_count_down != count_down:
self.carrot_count_down = count_down
if count_down == 0:
new_alert = AudibleAlert.longDisengaged
elif 0 < count_down <= 10:
new_alert = getattr(AudibleAlert, f'audio{count_down}')
elif count_down == 11:
new_alert = AudibleAlert.promptDistracted
return new_alert
def get_audible_alert(self, sm):
if sm.updated['selfdriveState']:
new_alert = sm['selfdriveState'].alertSound.raw
new_alert = self.update_carrot_alert(sm, new_alert)
self.update_alert(new_alert)
elif check_selfdrive_timeout_alert(sm):
self.update_alert(AudibleAlert.warningImmediate)
self.selfdrive_timeout_alert = True
elif self.selfdrive_timeout_alert:
self.update_alert(AudibleAlert.none)
self.selfdrive_timeout_alert = False
def calculate_volume(self, weighted_db):
volume = ((weighted_db - AMBIENT_DB) / DB_SCALE) * (MAX_VOLUME - MIN_VOLUME) + MIN_VOLUME
return math.pow(10, (np.clip(volume, MIN_VOLUME, MAX_VOLUME) - 1))
@retry(attempts=7, delay=3)
def get_stream(self, sd):
# reload sounddevice to reinitialize portaudio
sd._terminate()
sd._initialize()
return sd.OutputStream(channels=1, samplerate=SAMPLE_RATE, callback=self.callback, blocksize=SAMPLE_BUFFER)
def soundd_thread(self):
# sounddevice must be imported after forking processes
import sounddevice as sd
sm = messaging.SubMaster(['selfdriveState', 'soundPressure', 'carrotMan'])
with self.get_stream(sd) as stream:
rk = Ratekeeper(20)
cloudlog.info(f"soundd stream started: {stream.samplerate=} {stream.channels=} {stream.dtype=} {stream.device=}, {stream.blocksize=}")
while True:
sm.update(0)
if sm.updated['soundPressure'] and self.current_alert == AudibleAlert.none: # only update volume filter when not playing alert
self.spl_filter_weighted.update(sm["soundPressure"].soundPressureWeightedDb)
self.current_volume = self.calculate_volume(float(self.spl_filter_weighted.x)) * self.soundVolumeAdjust
self.get_audible_alert(sm)
rk.keep_time()
assert stream.active
self.soundVolumeAdjust = float(self.params.get_int("SoundVolumeAdjust"))/100.
def main():
s = Soundd()
s.soundd_thread()
if __name__ == "__main__":
main()

7
selfdrive/ui/spinner Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
if [ -f /TICI ] && [ ! -f _spinner ]; then
cp qt/spinner_larch64 _spinner
fi
exec ./_spinner "$1"

View File

22
selfdrive/ui/tests/body.py Executable file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env python3
import time
import cereal.messaging as messaging
if __name__ == "__main__":
while True:
pm = messaging.PubMaster(['carParams', 'carState'])
batt = 1.
while True:
msg = messaging.new_message('carParams')
msg.carParams.brand = "body"
msg.carParams.notCar = True
pm.send('carParams', msg)
for b in range(100, 0, -1):
msg = messaging.new_message('carState')
msg.carState.charging = True
msg.carState.fuelGauge = b / 100.
pm.send('carState', msg)
time.sleep(0.1)
time.sleep(1)

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -e
UI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"/..
TEST_TEXT="(WRAPPED_SOURCE_TEXT)"
TEST_TS_FILE=$UI_DIR/translations/main_test_en.ts
TEST_QM_FILE=$UI_DIR/translations/main_test_en.qm
# translation strings
UNFINISHED="<translation type=\"unfinished\"><\/translation>"
TRANSLATED="<translation>$TEST_TEXT<\/translation>"
mkdir -p $UI_DIR/translations
rm -f $TEST_TS_FILE $TEST_QM_FILE
lupdate -recursive "$UI_DIR" -ts $TEST_TS_FILE
sed -i "s/$UNFINISHED/$TRANSLATED/" $TEST_TS_FILE
lrelease $TEST_TS_FILE

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env python3
import os
import sys
import time
import json
from openpilot.common.basedir import BASEDIR
from openpilot.common.params import Params
from openpilot.selfdrive.selfdrived.alertmanager import set_offroad_alert
if __name__ == "__main__":
params = Params()
with open(os.path.join(BASEDIR, "selfdrive/selfdrived/alerts_offroad.json")) as f:
offroad_alerts = json.load(f)
t = 10 if len(sys.argv) < 2 else int(sys.argv[1])
while True:
print("setting alert update")
params.put_bool("UpdateAvailable", True)
r = open(os.path.join(BASEDIR, "RELEASES.md")).read()
r = r[:r.find('\n\n')] # Slice latest release notes
params.put("UpdaterNewReleaseNotes", r + "\n")
time.sleep(t)
params.put_bool("UpdateAvailable", False)
# cycle through normal alerts
for a in offroad_alerts:
print("setting alert:", a)
set_offroad_alert(a, True)
time.sleep(t)
set_offroad_alert(a, False)
print("no alert")
time.sleep(t)

View File

@@ -0,0 +1,35 @@
from cereal import car
from cereal import messaging
from cereal.messaging import SubMaster, PubMaster
from openpilot.selfdrive.ui.soundd import SELFDRIVE_STATE_TIMEOUT, check_selfdrive_timeout_alert
import time
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
class TestSoundd:
def test_check_selfdrive_timeout_alert(self):
sm = SubMaster(['selfdriveState'])
pm = PubMaster(['selfdriveState'])
for _ in range(100):
cs = messaging.new_message('selfdriveState')
cs.selfdriveState.enabled = True
pm.send("selfdriveState", cs)
time.sleep(0.01)
sm.update(0)
assert not check_selfdrive_timeout_alert(sm)
for _ in range(SELFDRIVE_STATE_TIMEOUT * 110):
sm.update(0)
time.sleep(0.01)
assert check_selfdrive_timeout_alert(sm)
# TODO: add test with micd for checking that soundd actually outputs sounds

View File

@@ -0,0 +1,117 @@
import pytest
import json
import os
import re
import xml.etree.ElementTree as ET
import string
import requests
from parameterized import parameterized_class
from openpilot.selfdrive.ui.update_translations import TRANSLATIONS_DIR, LANGUAGES_FILE
with open(LANGUAGES_FILE) as f:
translation_files = json.load(f)
UNFINISHED_TRANSLATION_TAG = "<translation type=\"unfinished\"" # non-empty translations can be marked unfinished
LOCATION_TAG = "<location "
FORMAT_ARG = re.compile("%[0-9]+")
@parameterized_class(("name", "file"), translation_files.items())
class TestTranslations:
name: str
file: str
@staticmethod
def _read_translation_file(path, file):
tr_file = os.path.join(path, f"{file}.ts")
with open(tr_file) as f:
return f.read()
def test_missing_translation_files(self):
assert os.path.exists(os.path.join(TRANSLATIONS_DIR, f"{self.file}.ts")), \
f"{self.name} has no XML translation file, run selfdrive/ui/update_translations.py"
@pytest.mark.skip("Only test unfinished translations before going to release")
def test_unfinished_translations(self):
cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file)
assert UNFINISHED_TRANSLATION_TAG not in cur_translations, \
f"{self.file} ({self.name}) translation file has unfinished translations. Finish translations or mark them as completed in Qt Linguist"
def test_vanished_translations(self):
cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file)
assert "<translation type=\"vanished\">" not in cur_translations, \
f"{self.file} ({self.name}) translation file has obsolete translations. Run selfdrive/ui/update_translations.py --vanish to remove them"
def test_finished_translations(self):
"""
Tests ran on each translation marked "finished"
Plural:
- that any numerus (plural) translations have all plural forms non-empty
- that the correct format specifier is used (%n)
Non-plural:
- that translation is not empty
- that translation format arguments are consistent
"""
tr_xml = ET.parse(os.path.join(TRANSLATIONS_DIR, f"{self.file}.ts"))
for context in tr_xml.getroot():
for message in context.iterfind("message"):
translation = message.find("translation")
source_text = message.find("source").text
# Do not test unfinished translations
if translation.get("type") == "unfinished":
continue
if message.get("numerus") == "yes":
numerusform = [t.text for t in translation.findall("numerusform")]
for nf in numerusform:
assert nf is not None, f"Ensure all plural translation forms are completed: {source_text}"
assert "%n" in nf, "Ensure numerus argument (%n) exists in translation."
assert FORMAT_ARG.search(nf) is None, f"Plural translations must use %n, not %1, %2, etc.: {numerusform}"
else:
assert translation.text is not None, f"Ensure translation is completed: {source_text}"
source_args = FORMAT_ARG.findall(source_text)
translation_args = FORMAT_ARG.findall(translation.text)
assert sorted(source_args) == sorted(translation_args), \
f"Ensure format arguments are consistent: `{source_text}` vs. `{translation.text}`"
def test_no_locations(self):
for line in self._read_translation_file(TRANSLATIONS_DIR, self.file).splitlines():
assert not line.strip().startswith(LOCATION_TAG), \
f"Line contains location tag: {line.strip()}, remove all line numbers."
def test_entities_error(self):
cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file)
matches = re.findall(r'@(\w+);', cur_translations)
assert len(matches) == 0, f"The string(s) {matches} were found with '@' instead of '&'"
def test_bad_language(self):
IGNORED_WORDS = {'pédale'}
match = re.search(r'_([a-zA-Z]{2,3})', self.file)
assert match, f"{self.name} - could not parse language"
response = requests.get(f"https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/master/{match.group(1)}")
response.raise_for_status()
banned_words = {line.strip() for line in response.text.splitlines()}
for context in ET.parse(os.path.join(TRANSLATIONS_DIR, f"{self.file}.ts")).getroot():
for message in context.iterfind("message"):
translation = message.find("translation")
if translation.get("type") == "unfinished":
continue
translation_text = " ".join([t.text for t in translation.findall("numerusform")]) if message.get("numerus") == "yes" else translation.text
if not translation_text:
continue
words = set(translation_text.translate(str.maketrans('', '', string.punctuation + '%n')).lower().split())
bad_words_found = words & (banned_words - IGNORED_WORDS)
assert not bad_words_found, f"Bad language found in {self.name}: '{translation_text}'. Bad word(s): {', '.join(bad_words_found)}"

310
selfdrive/ui/tests/test_ui/run.py Executable file
View File

@@ -0,0 +1,310 @@
#!/usr/bin/env python3
import capnp
import pathlib
import shutil
import sys
import os
import pywinctl
import pyautogui
import pickle
import time
from collections import namedtuple
from cereal import car, log
from msgq.visionipc import VisionIpcServer, VisionStreamType
from cereal.messaging import PubMaster, log_from_bytes, sub_sock
from openpilot.common.basedir import BASEDIR
from openpilot.common.params import Params
from openpilot.common.prefix import OpenpilotPrefix
from openpilot.common.transformations.camera import CameraConfig, DEVICE_CAMERAS
from openpilot.selfdrive.selfdrived.alertmanager import set_offroad_alert
from openpilot.selfdrive.test.helpers import with_processes
from openpilot.selfdrive.test.process_replay.migration import migrate, migrate_controlsState, migrate_carState
from openpilot.tools.lib.logreader import LogReader
from openpilot.tools.lib.framereader import FrameReader
from openpilot.tools.lib.route import Route
from openpilot.tools.lib.cache import DEFAULT_CACHE_DIR
UI_DELAY = 0.1 # may be slower on CI?
TEST_ROUTE = "a2a0ccea32023010|2023-07-27--13-01-19"
STREAMS: list[tuple[VisionStreamType, CameraConfig, bytes]] = []
OFFROAD_ALERTS = ['Offroad_StorageMissing', 'Offroad_IsTakingSnapshot']
DATA: dict[str, capnp.lib.capnp._DynamicStructBuilder] = dict.fromkeys(
["carParams", "deviceState", "pandaStates", "controlsState", "selfdriveState",
"liveCalibration", "modelV2", "radarState", "driverMonitoringState", "carState",
"driverStateV2", "roadCameraState", "wideRoadCameraState", "driverCameraState"], None)
def setup_homescreen(click, pm: PubMaster):
pass
def setup_settings_device(click, pm: PubMaster):
click(100, 100)
def setup_settings_toggles(click, pm: PubMaster):
setup_settings_device(click, pm)
click(278, 600)
time.sleep(UI_DELAY)
def setup_settings_software(click, pm: PubMaster):
setup_settings_device(click, pm)
click(278, 720)
time.sleep(UI_DELAY)
def setup_settings_firehose(click, pm: PubMaster):
click(1780, 730)
def setup_settings_developer(click, pm: PubMaster):
CP = car.CarParams()
CP.experimentalLongitudinalAvailable = True
Params().put("CarParamsPersistent", CP.to_bytes())
setup_settings_device(click, pm)
click(278, 970)
time.sleep(UI_DELAY)
def setup_onroad(click, pm: PubMaster):
vipc_server = VisionIpcServer("camerad")
for stream_type, cam, _ in STREAMS:
vipc_server.create_buffers(stream_type, 5, cam.width, cam.height)
vipc_server.start_listener()
uidebug_received_cnt = 0
packet_id = 0
uidebug_sock = sub_sock('uiDebug')
# Condition check for uiDebug processing
check_uidebug = DATA['deviceState'].deviceState.started and not DATA['carParams'].carParams.notCar
# Loop until 20 'uiDebug' messages are received
while uidebug_received_cnt <= 20:
for service, data in DATA.items():
if data:
data.clear_write_flag()
pm.send(service, data)
for stream_type, _, image in STREAMS:
vipc_server.send(stream_type, image, packet_id, packet_id, packet_id)
if check_uidebug:
while uidebug_sock.receive(non_blocking=True):
uidebug_received_cnt += 1
else:
uidebug_received_cnt += 1
packet_id += 1
time.sleep(0.05)
def setup_onroad_disengaged(click, pm: PubMaster):
DATA['selfdriveState'].selfdriveState.enabled = False
setup_onroad(click, pm)
DATA['selfdriveState'].selfdriveState.enabled = True
def setup_onroad_override(click, pm: PubMaster):
DATA['selfdriveState'].selfdriveState.state = log.SelfdriveState.OpenpilotState.overriding
setup_onroad(click, pm)
DATA['selfdriveState'].selfdriveState.state = log.SelfdriveState.OpenpilotState.enabled
def setup_onroad_wide(click, pm: PubMaster):
DATA['selfdriveState'].selfdriveState.experimentalMode = True
DATA["carState"].carState.vEgo = 1
setup_onroad(click, pm)
def setup_onroad_sidebar(click, pm: PubMaster):
setup_onroad(click, pm)
click(500, 500)
setup_onroad(click, pm)
def setup_onroad_wide_sidebar(click, pm: PubMaster):
setup_onroad_wide(click, pm)
click(500, 500)
setup_onroad_wide(click, pm)
def setup_body(click, pm: PubMaster):
DATA['carParams'].carParams.brand = "body"
DATA['carParams'].carParams.notCar = True
DATA['carState'].carState.charging = True
DATA['carState'].carState.fuelGauge = 50.0
setup_onroad(click, pm)
def setup_keyboard(click, pm: PubMaster):
setup_settings_device(click, pm)
click(250, 965)
click(1930, 420)
def setup_keyboard_uppercase(click, pm: PubMaster):
setup_keyboard(click, pm)
click(200, 800)
def setup_driver_camera(click, pm: PubMaster):
setup_settings_device(click, pm)
click(1950, 435)
DATA['deviceState'].deviceState.started = False
setup_onroad(click, pm)
DATA['deviceState'].deviceState.started = True
def setup_onroad_alert(click, pm: PubMaster, text1, text2, size, status=log.SelfdriveState.AlertStatus.normal):
print(f'setup onroad alert, size: {size}')
state = DATA['selfdriveState']
origin_state_bytes = state.to_bytes()
cs = state.selfdriveState
cs.alertText1 = text1
cs.alertText2 = text2
cs.alertSize = size
cs.alertStatus = status
cs.alertType = "test_onroad_alert"
setup_onroad(click, pm)
DATA['selfdriveState'] = log_from_bytes(origin_state_bytes).as_builder()
def setup_onroad_alert_small(click, pm: PubMaster):
setup_onroad_alert(click, pm, 'This is a small alert message', '', log.SelfdriveState.AlertSize.small)
def setup_onroad_alert_mid(click, pm: PubMaster):
setup_onroad_alert(click, pm, 'Medium Alert', 'This is a medium alert message', log.SelfdriveState.AlertSize.mid)
def setup_onroad_alert_full(click, pm: PubMaster):
setup_onroad_alert(click, pm, 'Full Alert', 'This is a full alert message', log.SelfdriveState.AlertSize.full)
def setup_offroad_alert(click, pm: PubMaster):
for alert in OFFROAD_ALERTS:
set_offroad_alert(alert, True)
# Toggle between settings and home to refresh the offroad alert widget
setup_settings_device(click, pm)
click(240, 216)
def setup_update_available(click, pm: PubMaster):
Params().put_bool("UpdateAvailable", True)
release_notes_path = os.path.join(BASEDIR, "RELEASES.md")
with open(release_notes_path) as file:
release_notes = file.read().split('\n\n', 1)[0]
Params().put("UpdaterNewReleaseNotes", release_notes + "\n")
setup_settings_device(click, pm)
click(240, 216)
def setup_pair_device(click, pm: PubMaster):
click(1950, 435)
click(1800, 900)
CASES = {
"homescreen": setup_homescreen,
"prime": setup_homescreen,
"pair_device": setup_pair_device,
"settings_device": setup_settings_device,
"settings_toggles": setup_settings_toggles,
"settings_software": setup_settings_software,
"settings_firehose": setup_settings_firehose,
"settings_developer": setup_settings_developer,
"onroad": setup_onroad,
"onroad_disengaged": setup_onroad_disengaged,
"onroad_override": setup_onroad_override,
"onroad_sidebar": setup_onroad_sidebar,
"onroad_alert_small": setup_onroad_alert_small,
"onroad_alert_mid": setup_onroad_alert_mid,
"onroad_alert_full": setup_onroad_alert_full,
"onroad_wide": setup_onroad_wide,
"onroad_wide_sidebar": setup_onroad_wide_sidebar,
"driver_camera": setup_driver_camera,
"body": setup_body,
"offroad_alert": setup_offroad_alert,
"update_available": setup_update_available,
"keyboard": setup_keyboard,
"keyboard_uppercase": setup_keyboard_uppercase
}
TEST_DIR = pathlib.Path(__file__).parent
TEST_OUTPUT_DIR = TEST_DIR / "report_1"
SCREENSHOTS_DIR = TEST_OUTPUT_DIR / "screenshots"
class TestUI:
def __init__(self):
os.environ["SCALE"] = "1"
sys.modules["mouseinfo"] = False
def setup(self):
self.pm = PubMaster(list(DATA.keys()))
DATA['deviceState'].deviceState.networkType = log.DeviceState.NetworkType.wifi
DATA['deviceState'].deviceState.lastAthenaPingTime = 0
for _ in range(10):
self.pm.send('deviceState', DATA['deviceState'])
DATA['deviceState'].clear_write_flag()
time.sleep(0.05)
try:
self.ui = pywinctl.getWindowsWithTitle("ui")[0]
except Exception as e:
print(f"failed to find ui window, assuming that it's in the top left (for Xvfb) {e}")
self.ui = namedtuple("bb", ["left", "top", "width", "height"])(0,0,2160,1080)
def screenshot(self, name):
im = pyautogui.screenshot(SCREENSHOTS_DIR / f"{name}.png", region=(self.ui.left, self.ui.top, self.ui.width, self.ui.height))
assert im.width == 2160
assert im.height == 1080
def click(self, x, y, *args, **kwargs):
pyautogui.click(self.ui.left + x, self.ui.top + y, *args, **kwargs)
time.sleep(UI_DELAY) # give enough time for the UI to react
@with_processes(["ui"])
def test_ui(self, name, setup_case):
self.setup()
setup_case(self.click, self.pm)
self.screenshot(name)
def create_screenshots():
if TEST_OUTPUT_DIR.exists():
shutil.rmtree(TEST_OUTPUT_DIR)
SCREENSHOTS_DIR.mkdir(parents=True)
route = Route(TEST_ROUTE)
segnum = 2
lr = LogReader(route.qlog_paths()[segnum])
DATA['carParams'] = next((event.as_builder() for event in lr if event.which() == 'carParams'), None)
for event in migrate(lr, [migrate_controlsState, migrate_carState]):
if event.which() in DATA:
DATA[event.which()] = event.as_builder()
if all(DATA.values()):
break
cam = DEVICE_CAMERAS[("tici", "ar0231")]
frames_cache = f'{DEFAULT_CACHE_DIR}/ui_frames'
if os.path.isfile(frames_cache):
with open(frames_cache, 'rb') as f:
frames = pickle.load(f)
road_img = frames[0]
wide_road_img = frames[1]
driver_img = frames[2]
else:
with open(frames_cache, 'wb') as f:
road_img = FrameReader(route.camera_paths()[segnum]).get(0, pix_fmt="nv12")[0]
wide_road_img = FrameReader(route.ecamera_paths()[segnum]).get(0, pix_fmt="nv12")[0]
driver_img = FrameReader(route.dcamera_paths()[segnum]).get(0, pix_fmt="nv12")[0]
pickle.dump([road_img, wide_road_img, driver_img], f)
STREAMS.append((VisionStreamType.VISION_STREAM_ROAD, cam.fcam, road_img.flatten().tobytes()))
STREAMS.append((VisionStreamType.VISION_STREAM_WIDE_ROAD, cam.ecam, wide_road_img.flatten().tobytes()))
STREAMS.append((VisionStreamType.VISION_STREAM_DRIVER, cam.dcam, driver_img.flatten().tobytes()))
t = TestUI()
for name, setup in CASES.items():
with OpenpilotPrefix():
params = Params()
params.put("DongleId", "123456789012345")
if name == 'prime':
params.put('PrimeType', '1')
elif name == 'pair_device':
params.put('ApiCache_Device', '{"is_paired":0, "prime_type":-1}')
t.test_ui(name, setup)
if __name__ == "__main__":
print("creating test screenshots")
create_screenshots()

View File

@@ -0,0 +1,34 @@
<html>
<style>
.column {
float: left;
width: 50%;
padding: 5px;
}
.row::after {
content: "";
clear: both;
display: table;
}
.image {
width: 100%;
}
</style>
{% for name, (image, ref_image) in cases.items() %}
<h1>{{name}}</h1>
<div class="row">
<div class="column">
<img class="image" src="{{ image }}" />
</div>
</div>
<br>
{% endfor %}
</html>

7
selfdrive/ui/text Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
if [ -f /TICI ] && [ ! -f _text ]; then
cp qt/text_larch64 _text
fi
exec ./_text "$1"

View File

@@ -0,0 +1,71 @@
# Multilanguage
[![languages](https://raw.githubusercontent.com/commaai/openpilot/badges/translation_badge.svg)](#)
## Contributing
Before getting started, make sure you have set up the openpilot Ubuntu development environment by reading the [tools README.md](/tools/README.md).
### Policy
Most of the languages supported by openpilot come from and are maintained by the community via pull requests. A pull request likely to be merged is one that [fixes a translation or adds missing translations.](https://github.com/commaai/openpilot/blob/master/selfdrive/ui/translations/README.md#improving-an-existing-language)
We also generally merge pull requests adding support for a new language if there are community members willing to maintain it. Maintaining a language is ensuring quality and completion of translations before each openpilot release.
comma may remove or hide language support from releases depending on translation quality and completeness.
### Adding a New Language
openpilot provides a few tools to help contributors manage their translations and to ensure quality. To get started:
1. Add your new language to [languages.json](/selfdrive/ui/translations/languages.json) with the appropriate [language code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) and the localized language name (Traditional Chinese is `中文(繁體)`).
2. Generate the XML translation file (`*.ts`):
```shell
selfdrive/ui/update_translations.py
```
3. Edit the translation file, marking each translation as completed:
```shell
linguist selfdrive/ui/translations/your_language_file.ts
```
4. View your finished translations by compiling and starting the UI, then find it in the language selector:
```shell
scons -j$(nproc) selfdrive/ui && selfdrive/ui/ui
```
5. Read [Checking the UI](#checking-the-ui) to double-check your translations fit in the UI.
### Improving an Existing Language
Follow step 3. above, you can review existing translations and add missing ones. Once you're done, just open a pull request to openpilot.
### Checking the UI
Different languages use varying space to convey the same message, so it's a good idea to double-check that your translations do not overlap and fit into each widget. Start the UI (step 4. above) and view each page, making adjustments to translations as needed.
#### To view offroad alerts:
With the UI started, you can view the offroad alerts with:
```shell
selfdrive/ui/tests/cycle_offroad_alerts.py
```
### Updating the UI
Any time you edit source code in the UI, you need to update the translations to ensure the line numbers and contexts are up to date (first step above).
### Testing
openpilot has a few unit tests to make sure all translations are up-to-date and that all strings are wrapped in a translation marker. They are run in CI, but you can also run them locally.
Tests translation files up to date:
```shell
selfdrive/ui/tests/test_translations.py
```
Tests all static source strings are wrapped:
```shell
selfdrive/ui/tests/create_test_translations.sh && selfdrive/ui/tests/test_translations
```
---
![multilanguage_onroad](https://user-images.githubusercontent.com/25857203/178912800-2c798af8-78e3-498e-9e19-35906e0bafff.png)

View File

@@ -0,0 +1,138 @@
#!/usr/bin/env python3
import argparse
import json
import os
import pathlib
import xml.etree.ElementTree as ET
from typing import cast
import requests
TRANSLATIONS_DIR = pathlib.Path(__file__).resolve().parent
TRANSLATIONS_LANGUAGES = TRANSLATIONS_DIR / "languages.json"
OPENAI_MODEL = "gpt-4"
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
OPENAI_PROMPT = "You are a professional translator from English to {language} (ISO 639 language code). " + \
"The following sentence or word is in the GUI of a software called openpilot, translate it accordingly."
def get_language_files(languages: list[str] = None) -> dict[str, pathlib.Path]:
files = {}
with open(TRANSLATIONS_LANGUAGES) as fp:
language_dict = json.load(fp)
for filename in language_dict.values():
path = TRANSLATIONS_DIR / f"{filename}.ts"
language = path.stem.split("main_")[1]
if languages is None or language in languages:
files[language] = path
return files
def translate_phrase(text: str, language: str) -> str:
response = requests.post(
"https://api.openai.com/v1/chat/completions",
json={
"model": OPENAI_MODEL,
"messages": [
{
"role": "system",
"content": OPENAI_PROMPT.format(language=language),
},
{
"role": "user",
"content": text,
},
],
"temperature": 0.8,
"max_tokens": 1024,
"top_p": 1,
},
headers={
"Authorization": f"Bearer {OPENAI_API_KEY}",
"Content-Type": "application/json",
},
)
if 400 <= response.status_code < 600:
raise requests.HTTPError(f'Error {response.status_code}: {response.json()}', response=response)
data = response.json()
return cast(str, data["choices"][0]["message"]["content"])
def translate_file(path: pathlib.Path, language: str, all_: bool) -> None:
tree = ET.parse(path)
root = tree.getroot()
for context in root.findall("./context"):
name = context.find("name")
if name is None:
raise ValueError("name not found")
print(f"Context: {name.text}")
for message in context.findall("./message"):
source = message.find("source")
translation = message.find("translation")
if source is None or translation is None:
raise ValueError("source or translation not found")
if not all_ and translation.attrib.get("type") != "unfinished":
continue
llm_translation = translate_phrase(cast(str, source.text), language)
print(f"Source: {source.text}\n" +
f"Current translation: {translation.text}\n" +
f"LLM translation: {llm_translation}")
translation.text = llm_translation
with path.open("w", encoding="utf-8") as fp:
fp.write('<?xml version="1.0" encoding="utf-8"?>\n' +
'<!DOCTYPE TS>\n' +
ET.tostring(root, encoding="utf-8").decode())
def main():
arg_parser = argparse.ArgumentParser("Auto translate")
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument("-a", "--all-files", action="store_true", help="Translate all files")
group.add_argument("-f", "--file", nargs="+", help="Translate the selected files. (Example: -f fr de)")
arg_parser.add_argument("-t", "--all-translations", action="store_true", default=False, help="Translate all sections. (Default: only unfinished)")
args = arg_parser.parse_args()
if OPENAI_API_KEY is None:
print("OpenAI API key is missing. (Hint: use `export OPENAI_API_KEY=YOUR-KEY` before you run the script).\n" +
"If you don't have one go to: https://beta.openai.com/account/api-keys.")
exit(1)
files = get_language_files(None if args.all_files else args.file)
if args.file:
missing_files = set(args.file) - set(files)
if len(missing_files):
print(f"No language files found: {missing_files}")
exit(1)
print(f"Translation mode: {'all' if args.all_translations else 'only unfinished'}. Files: {list(files)}")
for lang, path in files.items():
print(f"Translate {lang} ({path})")
translate_file(path, lang, args.all_translations)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env python3
import json
import os
import requests
import xml.etree.ElementTree as ET
from openpilot.common.basedir import BASEDIR
from openpilot.selfdrive.ui.tests.test_translations import UNFINISHED_TRANSLATION_TAG
from openpilot.selfdrive.ui.update_translations import LANGUAGES_FILE, TRANSLATIONS_DIR
TRANSLATION_TAG = "<translation"
BADGE_HEIGHT = 20 + 8
SHIELDS_URL = "https://img.shields.io/badge"
if __name__ == "__main__":
with open(LANGUAGES_FILE) as f:
translation_files = json.load(f)
badge_svg = []
max_badge_width = 0 # keep track of max width to set parent element
for idx, (name, file) in enumerate(translation_files.items()):
with open(os.path.join(TRANSLATIONS_DIR, f"{file}.ts")) as tr_f:
tr_file = tr_f.read()
total_translations = 0
unfinished_translations = 0
for line in tr_file.splitlines():
if TRANSLATION_TAG in line:
total_translations += 1
if UNFINISHED_TRANSLATION_TAG in line:
unfinished_translations += 1
percent_finished = int(100 - (unfinished_translations / total_translations * 100.))
color = f"rgb{(94, 188, 0) if percent_finished == 100 else (248, 255, 50) if percent_finished > 90 else (204, 55, 27)}"
# Download badge
badge_label = f"LANGUAGE {name}"
badge_message = f"{percent_finished}% complete"
if unfinished_translations != 0:
badge_message += f" ({unfinished_translations} unfinished)"
r = requests.get(f"{SHIELDS_URL}/{badge_label}-{badge_message}-{color}", timeout=10)
assert r.status_code == 200, "Error downloading badge"
content_svg = r.content.decode("utf-8")
xml = ET.fromstring(content_svg)
assert "width" in xml.attrib
max_badge_width = max(max_badge_width, int(xml.attrib["width"]))
# Make tag ids in each badge unique to combine them into one svg
for tag in ("r", "s"):
content_svg = content_svg.replace(f'id="{tag}"', f'id="{tag}{idx}"')
content_svg = content_svg.replace(f'"url(#{tag})"', f'"url(#{tag}{idx})"')
badge_svg.extend([f'<g transform="translate(0, {idx * BADGE_HEIGHT})">', content_svg, "</g>"])
badge_svg.insert(0, '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" ' +
f'height="{len(translation_files) * BADGE_HEIGHT}" width="{max_badge_width}">')
badge_svg.append("</svg>")
with open(os.path.join(BASEDIR, "translation_badge.svg"), "w") as badge_f:
badge_f.write("\n".join(badge_svg))

View File

@@ -0,0 +1,14 @@
{
"English": "main_en",
"Deutsch": "main_de",
"Français": "main_fr",
"Português": "main_pt-BR",
"Español": "main_es",
"Türkçe": "main_tr",
"العربية": "main_ar",
"ไทย": "main_th",
"中文(繁體)": "main_zh-CHT",
"中文(简体)": "main_zh-CHS",
"한국어": "main_ko",
"日本語": "main_ja"
}

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -0,0 +1,38 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="en_US">
<context>
<name>InputDialog</name>
<message numerus="yes">
<source>Need at least %n character(s)!</source>
<translation>
<numerusform>Need at least %n character!</numerusform>
<numerusform>Need at least %n characters!</numerusform>
</translation>
</message>
</context>
<context>
<name>QObject</name>
<message numerus="yes">
<source>%n minute(s) ago</source>
<translation>
<numerusform>%n minute ago</numerusform>
<numerusform>%n minutes ago</numerusform>
</translation>
</message>
<message numerus="yes">
<source>%n hour(s) ago</source>
<translation>
<numerusform>%n hour ago</numerusform>
<numerusform>%n hours ago</numerusform>
</translation>
</message>
<message numerus="yes">
<source>%n day(s) ago</source>
<translation>
<numerusform>%n day ago</numerusform>
<numerusform>%n days ago</numerusform>
</translation>
</message>
</context>
</TS>

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

BIN
selfdrive/ui/ui Executable file

Binary file not shown.

View File

@@ -0,0 +1,50 @@
#!/usr/bin/env python3
import argparse
import json
import os
from openpilot.common.basedir import BASEDIR
UI_DIR = os.path.join(BASEDIR, "selfdrive", "ui")
TRANSLATIONS_DIR = os.path.join(UI_DIR, "translations")
LANGUAGES_FILE = os.path.join(TRANSLATIONS_DIR, "languages.json")
TRANSLATIONS_INCLUDE_FILE = os.path.join(TRANSLATIONS_DIR, "alerts_generated.h")
PLURAL_ONLY = ["main_en"] # base language, only create entries for strings with plural forms
def generate_translations_include():
# offroad alerts
# TODO translate events from openpilot.selfdrive/controls/lib/events.py
content = "// THIS IS AN AUTOGENERATED FILE, PLEASE EDIT alerts_offroad.json\n"
with open(os.path.join(BASEDIR, "selfdrive/selfdrived/alerts_offroad.json")) as f:
for alert in json.load(f).values():
content += f'QT_TRANSLATE_NOOP("OffroadAlert", R"({alert["text"]})");\n'
with open(TRANSLATIONS_INCLUDE_FILE, "w") as f:
f.write(content)
def update_translations(vanish: bool = False, translation_files: None | list[str] = None, translations_dir: str = TRANSLATIONS_DIR):
if translation_files is None:
with open(LANGUAGES_FILE) as f:
translation_files = json.load(f).values()
for file in translation_files:
tr_file = os.path.join(translations_dir, f"{file}.ts")
args = f"lupdate -locations none -recursive {UI_DIR} -ts {tr_file} -I {BASEDIR}"
if vanish:
args += " -no-obsolete"
if file in PLURAL_ONLY:
args += " -pluralonly"
ret = os.system(args)
assert ret == 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Update translation files for UI",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--vanish", action="store_true", help="Remove translations with source text no longer found")
args = parser.parse_args()
generate_translations_include()
update_translations(args.vanish)