Tightly integrate WhatsApp selectors into existing UIs

This commit is contained in:
2026-02-16 10:51:57 +00:00
parent a38339c809
commit 15af8af6b2
19 changed files with 2846 additions and 156 deletions

View File

@@ -22,12 +22,7 @@ if _signal_http_url:
parsed = urlparse(
_signal_http_url if "://" in _signal_http_url else f"http://{_signal_http_url}"
)
configured_host = (parsed.hostname or "").strip().lower()
runtime = os.getenv("container", "").strip().lower()
if configured_host == "signal" and runtime == "podman":
SIGNAL_HOST = "127.0.0.1"
else:
SIGNAL_HOST = parsed.hostname or "signal"
SIGNAL_HOST = parsed.hostname or "signal"
SIGNAL_PORT = parsed.port or 8080
else:
if settings.DEBUG:
@@ -276,18 +271,34 @@ class HandleMessage(Command):
envelope_source_uuid = envelope.get("sourceUuid")
envelope_source_number = envelope.get("sourceNumber")
envelope_source = envelope.get("source")
destination_number = (
raw.get("envelope", {})
.get("syncMessage", {})
.get("sentMessage", {})
.get("destination")
)
primary_identifier = dest if is_from_bot else source_uuid
identifier_candidates = _identifier_candidates(
primary_identifier,
source_uuid,
source_number,
source_value,
envelope_source_uuid,
envelope_source_number,
envelope_source,
dest,
)
if is_from_bot:
# Outbound events must route only by destination identity.
# Including the bot's own UUID/number leaks messages across people
# if "self" identifiers are linked anywhere.
identifier_candidates = _identifier_candidates(
dest,
destination_number,
primary_identifier,
)
else:
identifier_candidates = _identifier_candidates(
primary_identifier,
source_uuid,
source_number,
source_value,
envelope_source_uuid,
envelope_source_number,
envelope_source,
dest,
)
if not identifier_candidates:
log.warning("No Signal identifier available for message routing.")
return

View File

@@ -19,6 +19,8 @@ from core.util import logs
log = logs.get_logger("transport")
_RUNTIME_STATE_TTL = 60 * 60 * 24
_RUNTIME_COMMANDS_TTL = 60 * 15
_RUNTIME_COMMAND_RESULT_TTL = 60
_RUNTIME_CLIENTS: dict[str, Any] = {}
@@ -30,6 +32,14 @@ def _runtime_key(service: str) -> str:
return f"gia:service:runtime:{_service_key(service)}"
def _runtime_commands_key(service: str) -> str:
return f"gia:service:commands:{_service_key(service)}"
def _runtime_command_result_key(service: str, command_id: str) -> str:
return f"gia:service:command-result:{_service_key(service)}:{command_id}"
def _gateway_base(service: str) -> str:
key = f"{service.upper()}_HTTP_URL"
default = f"http://{service}:8080"
@@ -78,6 +88,59 @@ def update_runtime_state(service: str, **updates):
return state
def enqueue_runtime_command(service: str, action: str, payload: dict | None = None) -> str:
service_key = _service_key(service)
command_id = secrets.token_hex(12)
command = {
"id": command_id,
"action": str(action or "").strip(),
"payload": dict(payload or {}),
"created_at": int(time.time()),
}
key = _runtime_commands_key(service_key)
queued = list(cache.get(key) or [])
queued.append(command)
# Keep queue bounded to avoid unbounded growth.
if len(queued) > 200:
queued = queued[-200:]
cache.set(key, queued, timeout=_RUNTIME_COMMANDS_TTL)
return command_id
def pop_runtime_command(service: str) -> dict[str, Any] | None:
service_key = _service_key(service)
key = _runtime_commands_key(service_key)
queued = list(cache.get(key) or [])
if not queued:
return None
command = dict(queued.pop(0) or {})
cache.set(key, queued, timeout=_RUNTIME_COMMANDS_TTL)
return command
def set_runtime_command_result(service: str, command_id: str, result: dict | None = None):
service_key = _service_key(service)
result_key = _runtime_command_result_key(service_key, command_id)
payload = dict(result or {})
payload.setdefault("completed_at", int(time.time()))
cache.set(result_key, payload, timeout=_RUNTIME_COMMAND_RESULT_TTL)
async def wait_runtime_command_result(service: str, command_id: str, timeout: float = 20.0):
service_key = _service_key(service)
result_key = _runtime_command_result_key(service_key, command_id)
deadline = time.monotonic() + max(0.1, float(timeout or 0.0))
while time.monotonic() < deadline:
payload = cache.get(result_key)
if payload is not None:
cache.delete(result_key)
if isinstance(payload, dict):
return dict(payload)
return {}
await asyncio.sleep(0.2)
return None
def list_accounts(service: str):
"""
Return account identifiers for service UI list.
@@ -365,7 +428,37 @@ async def send_message_raw(service: str, recipient: str, text=None, attachments=
return runtime_result
except Exception as exc:
log.warning("%s runtime send failed: %s", service_key, exc)
log.warning("whatsapp send skipped: runtime is unavailable or not paired")
# Web/UI process cannot access UR in-process runtime client directly.
# Hand off send to UR via shared cache command queue.
command_attachments = []
for att in attachments or []:
row = dict(att or {})
# Keep payload cache-friendly and avoid embedding raw bytes.
for key in ("content",):
row.pop(key, None)
command_attachments.append(row)
command_id = enqueue_runtime_command(
service_key,
"send_message_raw",
{
"recipient": recipient,
"text": text or "",
"attachments": command_attachments,
},
)
command_result = await wait_runtime_command_result(
service_key,
command_id,
timeout=20.0,
)
if isinstance(command_result, dict):
if command_result.get("ok"):
ts = _parse_timestamp(command_result)
return ts if ts else True
err = str(command_result.get("error") or "").strip()
log.warning("whatsapp queued send failed: %s", err or "unknown")
return False
log.warning("whatsapp queued send timed out waiting for runtime result")
return False
if service_key == "instagram":

File diff suppressed because it is too large Load Diff