mirror of
https://github.com/openai/codex.git
synced 2026-04-28 18:32:04 +03:00
6.1 KiB
6.1 KiB
DOs
- Imports at top: Keep all
usestatements consolidated at the file top.
use std::{
io,
sync::{Arc, atomic::{AtomicBool, Ordering}},
time::Duration,
};
- Duration for timeouts: Use
Durationand name fields without units.
pub struct ServerOptions {
pub login_timeout: Option<Duration>,
// ...
}
// Init with explicit units
let opts = ServerOptions {
login_timeout: Some(Duration::from_secs(10 * 60)),
..ServerOptions::new(home, client_id)
};
- Cancellable timeout watcher: Disarm the timer when login completes; use
compare_exchangeandserver.unblock().
fn spawn_timeout_watcher(
done_rx: std::sync::mpsc::Receiver<()>,
timeout: Duration,
shutdown: Arc<AtomicBool>,
timed_out: Arc<AtomicBool>,
server: Arc<tiny_http::Server>,
) {
std::thread::spawn(move || {
if done_rx.recv_timeout(timeout).is_err()
&& shutdown.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_ok()
{
timed_out.store(true, Ordering::SeqCst);
server.unblock(); // promptly exit recv()
}
});
}
- Share with Arc, not Clone: Wrap server in
Arcand expose a minimalShutdownHandle.
pub struct ShutdownHandle {
shutdown: Arc<AtomicBool>,
server: Arc<tiny_http::Server>,
}
impl ShutdownHandle {
pub fn cancel(&self) {
self.shutdown.store(true, Ordering::SeqCst);
self.server.unblock();
}
}
- Handle recv() errors with shutdown flag: Treat
unblock()-caused errors as graceful exit.
let server = server.clone(); // Arc<Server>
let handle = std::thread::spawn(move || -> io::Result<()> {
while !shutdown.load(Ordering::SeqCst) {
match server.recv() {
Ok(req) => { /* handle request */ }
Err(e) => {
if shutdown.load(Ordering::SeqCst) { break; }
return Err(io::Error::other(e));
}
}
}
Ok(())
});
- Keep the message loop non-blocking: Reply immediately, then monitor completion in background.
let login_id = Uuid::new_v4();
outgoing.send_response(request_id, LoginChatGptResponse {
login_id,
auth_url: server.auth_url.clone(),
}).await;
let outgoing = outgoing.clone();
tokio::spawn(async move {
let res = tokio::task::spawn_blocking(move || server.block_until_done()).await;
let (success, error) = match res {
Ok(Ok(())) => (true, None),
Ok(Err(e)) => (false, Some(format!("Login server error: {e}"))),
Err(join) => (false, Some(format!("Join error: {join}"))),
};
outgoing.send_notification(OutgoingNotification {
method: LOGIN_CHATGPT_COMPLETE_EVENT.to_string(),
params: serde_json::to_value(LoginChatGptCompleteNotification { login_id, success, error }).ok(),
}).await;
});
- Single active login: Store one
ActiveLogininArc<Mutex<Option<_>>>; cancel previous and drop the lock before awaits.
struct ActiveLogin { shutdown: ShutdownHandle, login_id: Uuid }
{
let mut guard = self.active_login.lock().await;
if let Some(prev) = guard.take() { prev.shutdown.cancel(); }
*guard = Some(ActiveLogin { shutdown: server.cancel_handle(), login_id });
} // lock released here
- Explicit cancel API: Validate
login_idand respond clearly.
let active = { self.active_login.lock().await.take() };
match active {
Some(a) if a.login_id == login_id => {
a.shutdown.cancel();
outgoing.send_response(request_id, CancelLoginChatGptResponse {}).await;
}
_ => {
outgoing.send_error(request_id, JSONRPCErrorError {
code: INVALID_REQUEST_ERROR_CODE,
message: format!("login id not found: {login_id}"),
data: None,
}).await;
}
}
- Unify send path: Use a small enum to funnel response vs. error into one send site.
enum Reply<T> { Response(T), Error(JSONRPCErrorError) }
match reply {
Reply::Response(v) => outgoing.send_response(request_id, v).await,
Reply::Error(e) => outgoing.send_error(request_id, e).await,
}
DON’Ts
- Mid-file imports: Don’t add
usestatements below type/impl blocks.
// ❌ Avoid
// ... many lines later ...
use std::mem::ManuallyDrop;
- Clone owning server types: Don’t implement
Clonefor types that own threads/handles.
// ❌ Avoid
#[derive(Clone)]
pub struct LoginServer { server_handle: std::thread::JoinHandle<()> }
- Timeout as raw integers: Don’t use
Option<u64>with names like*_secs.
// ❌ Avoid
pub struct ServerOptions { pub login_timeout_secs: Option<u64> }
- Sleep-only timers: Don’t spawn a thread that always sleeps to completion.
// ❌ Avoid
std::thread::spawn(move || {
std::thread::sleep(Duration::from_secs(secs));
shutdown.store(true, Ordering::SeqCst);
// timer can’t be disarmed; lingers after success
});
- Dummy HTTP “nudge”: Don’t poke localhost to unblock; use
server.unblock().
// ❌ Avoid
let _ = std::net::TcpStream::connect(format!("127.0.0.1:{actual_port}"));
- Block the message processor: Don’t wait for login to finish before replying.
// ❌ Avoid
self.login_chatgpt(request_id).await; // performs blocking join inside
- Multiple concurrent logins: Don’t track a
HashMap<Uuid, ActiveLogin>unless truly needed.
// ❌ Avoid
active_logins: Arc<Mutex<HashMap<Uuid, ActiveLogin>>>
- Hold locks across awaits: Don’t keep a mutex guard while awaiting I/O.
// ❌ Avoid
let mut guard = self.active_login.lock().await;
outgoing.send_response(request_id, resp).await; // lock held here
- Wildcard where
Noneis intended: Prefer explicitNonematches.
// ❌ Avoid
match guard.as_ref().map(|l| l.login_id) { _ => /* ... */ }
// ✅ Prefer
match guard.as_ref().map(|l| l.login_id) {
None => { /* ... */ }
Some(id) => { /* ... */ }
}
- Over-testing serialization: Don’t unit-test JSON for every message type; keep tests focused.
// ❌ Avoid boilerplate serialization snapshot for each enum variant
// Prefer a small, representative set to verify tagging/shape.