diff --git a/.gitignore b/.gitignore index ed8ebf583..af5429fc0 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,10 @@ -__pycache__ \ No newline at end of file +__pycache__ +.idea +.claude/ +books_database/state/ + +# Local-only development notes (kept out of the remote repository). +# All .md files except the root README.md are excluded so that human +# reviewers see a single concise document rather than scattered notes. +*.md +!/README.md \ No newline at end of file diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 000000000..b58b603fe --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,5 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ diff --git a/.idea/ds-practice-2026.iml b/.idea/ds-practice-2026.iml new file mode 100644 index 000000000..460d4026f --- /dev/null +++ b/.idea/ds-practice-2026.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 000000000..105ce2da2 --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 000000000..1d3ce46ba --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 000000000..8ce804af9 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 000000000..35eb1ddfb --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/README.md b/README.md index f7f53570f..af450ffe9 100644 --- a/README.md +++ b/README.md @@ -1,46 +1,216 @@ -# Distributed Systems @ University of Tartu +# Distributed Systems Practice — Checkpoint 3 -This repository contains the initial code for the practice sessions of the Distributed Systems course at the University of Tartu. +This repository extends the Checkpoint 2 system with the two new distributed features required by Checkpoint 3: -## Getting started +- a **replicated books database** — three replicas under synchronous primary-backup replication (rubric R1) +- a **distributed commitment protocol** — 2PC across the books database primary and a new payment service (rubric R2) -### Overview +The Checkpoint 2 features (vector clocks, leader election, mutual exclusion) are retained. The whole submission can be verified by one PowerShell script; see [Quick demo](#quick-demo-5-minutes) and the rubric mapping in [How CP3 requirements are met](#how-cp3-requirements-are-met). -The code consists of multiple services. Each service is located in a separate folder. The `frontend` service folder contains a Dockerfile and the code for an example bookstore application. Each backend service folder (e.g. `orchestrator` or `fraud_detection`) contains a Dockerfile, a requirements.txt file and the source code of the service. During the practice sessions, you will implement the missing functionality in these backend services, or extend the backend with new services. +## Quick demo (5 minutes) -There is also a `utils` folder that contains some helper code or specifications that are used by multiple services. Check the `utils` folder for more information. +1. **Start the stack** from the repository root. -### Running the code with Docker Compose [recommended] +```powershell +docker compose up --build -d +docker compose ps +``` + +Expected: 13 services running — the 9 from CP2 (`frontend`, `orchestrator`, 3 backend services, `order_queue`, 3 executor replicas) plus 3 `books_database` replicas and `payment_service`. -To run the code, you need to clone this repository, make sure you have Docker and Docker Compose installed, and run the following command in the root folder of the repository: +2. **Run the verifier** — the single source of truth that this submission works. -```bash -docker compose up +```powershell +.\scripts\checkpoint3-checks.ps1 # first run +.\scripts\checkpoint3-checks.ps1 -SkipBuild # quicker rerun ``` -This will start the system with the multiple services. Each service will be restarted automatically when you make changes to the code, so you don't have to restart the system manually while developing. If you want to know how the services are started and configured, check the `docker-compose.yaml` file. +Expected: `Passed: 19 Failed: 0`. The 19 checks cover Docker plumbing, primary election, the 2PC commit and oversold-abort paths, cross-replica read convergence, DB primary failover, the participant-failure recovery bonus (B2), and the concurrent-writes bonus (B1). -The checkpoint evaluations will be done using the code that is started with Docker Compose, so make sure that your code works with Docker Compose. +3. **(Optional)** Open `http://127.0.0.1:8080` for a manual order, or POST to `http://127.0.0.1:8081/checkout` with one of the prepared payloads (`test_checkout.json`, `test_checkout_oversold.json`, `test_checkout_fraud.json`, `test_checkout_empty_items.json`, `test_checkout_terms_false.json`). -If, for some reason, changes to the code are not reflected, try to force rebuilding the Docker images with the following command: +4. **Tear down** when finished. -```bash -docker compose up --build +```powershell +docker compose down ``` -### Run the code locally +## How CP3 requirements are met + +| # | Rubric item | Pts | Where it lives | How to see it pass | +|---|---|---:|---|---| +| R1 | Consistency protocol + DB module | 3 | [books_database/](books_database/), design rationale in [§A.1](#a1--consistency-protocol-design-r1) | verifier checks 7, 8, 16, 17 | +| R2 | Commitment protocol + new service | 3 | [order_executor/src/app.py](order_executor/src/app.py) `run_2pc`, [payment_service/](payment_service/), design rationale in [§A.2](#a2--commitment-protocol-design-r2--b3) | verifier checks 14, 15 | +| R3 | Logging | 1 | All services emit `[SVC] event=... key=value` lines | `docker compose logs` after any demo step | +| R4 | Project organization & docs | 1 | This README + the two diagrams below | (this document) | +| R5 | Consistency-protocol diagram | 1 | [docs/diagrams/consistency-protocol.svg](docs/diagrams/consistency-protocol.svg) | rendered in [Diagrams](#diagrams) | +| R6 | Commitment-protocol diagram | 1 | [docs/diagrams/commitment-protocol.svg](docs/diagrams/commitment-protocol.svg) | rendered in [Diagrams](#diagrams) | +| B1 | Concurrent-writes bonus | 1 | [Bonus B1](#bonus-b1--concurrent-writes) | verifier check 19 ([test_concurrent_writes.py](books_database/tests/test_concurrent_writes.py)) | +| B2 | Participant-failure recovery bonus | 1 | [Bonus B2](#bonus-b2--participant-failure-recovery) | verifier check 18 ([test_2pc_fail_injection.py](order_executor/tests/test_2pc_fail_injection.py)) plus [test_2pc_crash_recovery.py](order_executor/tests/test_2pc_crash_recovery.py) | +| B3 | Coordinator-failure analysis bonus | 1 | [§A.2.1–§A.2.3](#a21--coordinator-failure-analysis-bonus-b3) | analysis only — read §A.2.1 below | + +The two non-rubric handoff items are tracked outside this README: latest changes are committed on `individual-sten-qy-li`, and the `checkpoint-3` Git tag will be applied to the merge commit on `master` after team-lead review. + +## Diagrams + +### Consistency protocol (R5) + +![Consistency protocol diagram](docs/diagrams/consistency-protocol.svg) + +### Commitment protocol (R6) + +![Commitment protocol diagram](docs/diagrams/commitment-protocol.svg) + +The commitment-protocol diagram shows both a COMMIT path (both participants vote commit) and an ABORT path (DB votes abort on insufficient stock). + +## Bonus B1 — Concurrent writes + +> *"How do we deal with concurrent writes by different clients? Think of a solution for the problem of two simultaneous orders trying to update the stocks of the same book."* — [Guide9](https://courses.cs.ut.ee/2026/ds/spring/Main/Guide9) + +The primary in our synchronous primary-backup design is already the single serialization point for all writes; the design choice is therefore **what granularity to lock at on the primary**. We use **per-title locks**: each book title gets its own `threading.Lock`, created lazily via `get_key_lock(title)` in [books_database/src/app.py](books_database/src/app.py). The lock is held for the full read-validate-write-replicate span of a `Write` or 2PC `Commit`, so two concurrent decrements on the same title can never observe the same `old` value. Writes on *different* titles proceed in parallel because they acquire different locks. + +The 2PC `Prepare` handler also reasons about concurrency: under `pending_lock`, it computes a `reserved` map by summing every staged order in `pending_orders`, and votes abort with `insufficient_stock` if `current - reserved < requested`. So two simultaneous `Prepare`s for the same title cannot both reserve stock that only one can fulfill — exactly the "two simultaneous orders trying to update the stocks of the same book" case from the bonus prompt. + +Per-title was chosen over a single global lock because concurrent orders for *different* books are the common case in the demo (e.g. "Book A" and "Book B" in the same test run); serializing them through a global lock would be an artificial bottleneck. Per-title is the narrowest correct granularity for a key-value store with whole-key reads and writes. + +**Verification:** [books_database/tests/test_concurrent_writes.py](books_database/tests/test_concurrent_writes.py) (verifier check 19) drives 5 same-key writes plus 5 different-key writes from parallel threads and asserts that (a) same-key writes produce 10 distinct sequential sequence numbers with monotonically advancing `old → new` on the primary, (b) different-key writes overlap in time, and (c) all 3 replicas read the same final value for every key. + +## Bonus B2 — Participant-failure recovery + +> *"How do we deal with failing participants? … Devise and test a mechanism for simple recoveries in one of the services."* — [Guide10](https://courses.cs.ut.ee/2026/ds/spring/Main/Guide10) + +The `books_database` participant is fully recoverable across a crash in any 2PC phase. The mechanism has three parts, all in [books_database/src/app.py](books_database/src/app.py): + +1. **Stage to disk before voting commit.** In `Prepare()`, the participant calls `persist_pending(order_id, items)` which writes `/app/state/txn_.json` via a temp-file write-then-rename **before** returning `vote_commit=True`. This guarantees that any `vote_commit` the coordinator observes is backed by an on-disk record. +2. **Reload on startup.** `serve()` calls `load_persisted_all()` to scan `STATE_DIR` for every `txn_*.json` and rebuilds `pending_orders` *before* the gRPC server starts accepting RPCs. The startup log line `recovered_pending order= items=...` makes the recovery visible. +3. **Three-way Commit semantics.** On a retried `Commit`, the participant distinguishes (a) `pending_orders[order]` exists → apply the decrement and replicate; (b) order already in `committed_orders` → return `commit_idempotent` success; (c) order in neither → return `commit_unknown` failure. Branch (c) is the safety guard: a freshly elected replacement primary that never saw the original `Prepare` refuses to silently mis-commit. + +The coordinator side complements this in [order_executor/src/app.py](order_executor/src/app.py) `run_2pc`: `Commit` retries up to 12 times over ~40 seconds with exponential backoff, re-discovering the DB primary between attempts via `WhoIsPrimary`. So a participant that briefly dies during phase 2 is retried until it returns, finds its persisted `txn_.json`, and lands the commit. + +**Verification:** two end-to-end tests: + +- [order_executor/tests/test_2pc_fail_injection.py](order_executor/tests/test_2pc_fail_injection.py) (verifier check 18) — injects two `Commit` failures on the books_database primary; the third retry succeeds, all 3 replicas converge to `Book A=9`. Pass output contains `PHASE 6 FAIL-INJECTION E2E: PASSED`. +- [order_executor/tests/test_2pc_crash_recovery.py](order_executor/tests/test_2pc_crash_recovery.py) — `docker kill`s the books_database primary *between* `Prepare` and `Commit`, restarts it without the fail-inject override, and verifies the staged `txn_.json` is reloaded (`recovered_pending` log line) and the retry commit lands. After the test, `books_database/state/3/` contains no leftover `txn_*.json`. + +--- + +# Design rationale + +The sections below back the rubric table above: §A.1 documents the R1 consistency-protocol choice, and §A.2 documents the R2 commitment-protocol choice plus the B3 coordinator-failure analysis. + +## A.1 Consistency protocol design (R1) + +**Choice: synchronous primary-backup replication.** + +We chose primary-backup over chain replication and quorum reads/writes because: + +- The order executor already needs a single coordinator for 2PC. Giving the database a single primary keeps the system simple — `Write`, `Prepare`, `Commit`, `Abort` all talk to the same replica. +- Primary-backup reuses the bully election we already built for the executor tier. The three `books_database` replicas run the same pattern, so a single mental model covers both tiers. +- Synchronous replication trades availability for simplicity of reasoning: the primary blocks until every live backup has applied the write, so there is no observable divergence window. The convergence check is a straight equality assertion rather than a bounded-staleness one. + +### A.1.1 Protocol summary + +| Operation | What the primary does | +|---|---| +| `Write(title, qty)` | Call `ReplicateWrite` on every backup in parallel. If every live backup acks, update `kv_store` locally and log `write_committed backups_acked=[...]`. If any backup is missing, log `write_failed` and return failure without updating `kv_store`. | +| `Read(title)` | Serve from `kv_store` on the primary only. Reads from a backup return `"not primary; primary=X"`. | +| `ReplicateWrite(title, qty, seq)` | On the backup: update `kv_store`, bump local `seq_counter` so ordering is observable, log `replicate_applied`. | + +### A.1.2 Leader election and failover + +Bully election on replica id: the highest live replica becomes primary and announces itself via `Coordinator(pid)` to every peer (log line `new primary is X`). Heartbeats fire every `HEARTBEAT_INTERVAL`; a backup that misses `LEADER_TIMEOUT` worth of heartbeats declares the primary dead, clears its cached leader, and starts a new election. + +If the primary dies mid-Write the Write fails on the coordinator side (`replicate_to_backups` sees the missing ack); the caller re-discovers the primary via `WhoIsPrimary` on any replica and retries. + +### A.1.3 How 2PC sits on top + +2PC `Prepare`/`Commit`/`Abort` are primary-only, same as `Write`. The primary stages items in `pending_orders` during `Prepare` (and persists per Bonus B2). On `Commit` it applies the decrement and *synchronously* replicates the new value to the backups before acking the coordinator. So commit-of-2PC and replicate-of-effect happen inside the same critical section: a `Read` from any replica after `2pc_commit_applied` observes the post-commit value. This is the strongest proof the consistency protocol works — an end-to-end assertion that "whatever 2PC committed is visible on every replica". + +### A.1.4 Log lines that prove convergence + +``` +[DB-3] became primary +[DB-3] write_committed primary=3 title="Book A" seq=42 old=9 new=10 backups_acked=[1, 2] +[DB-1] replicate_applied from_primary=3 title="Book A" seq=42 old=9 new=10 +[DB-2] replicate_applied from_primary=3 title="Book A" seq=42 old=9 new=10 +``` + +The `new` field on the primary's `write_committed` line equals `new` on each backup's `replicate_applied` line. Verifier check 16 (`convergence:read-all-replicas`) calls `ReadLocal` directly on each replica and asserts equality from outside. + +### A.1.5 Known limitations + +- **Availability degrades if any backup is down.** Synchronous replication blocks on every live backup, so a slow or dead backup slows down (and eventually fails) Writes on the primary. This is the expected cost of strong consistency on a small demo cluster. +- **Split-brain is not fenced by quorum.** Under a partition both halves could briefly believe they are primary. +- **`committed_orders` and `aborted_orders` grow unboundedly.** They are in-memory sets that exist to make 2PC retry semantics safe. In production they would be compacted or backed by a real log. + +## A.2 Commitment protocol design (R2 + B3) + +**Choice: 2PC.** Roles in this repository: + +| Role | Service | Source | +|---|---|---| +| Coordinator | Leader `order_executor` (only the bully-elected leader runs `run_2pc`) | [order_executor/src/app.py](order_executor/src/app.py) | +| Participant 1 | `books_database` primary | [books_database/src/app.py](books_database/src/app.py) | +| Participant 2 | `payment_service` | [payment_service/src/app.py](payment_service/src/app.py) | + +### A.2.0 Happy-path trace + +``` + executor (coordinator) books_database primary payment_service + ---------------------- ---------------------- --------------- + log 2pc_start + Prepare(order, items) -----------> + persist /app/state/txn_*.json + pending_orders[order]=items + <-- vote_commit + Prepare(order, amount) ------------------------------------------> + prepared[order]=amt + <-- vote_commit + log 2pc_decision=COMMIT + Commit(order) -------------------> + apply + replicate to backups + committed_orders.add(order) + remove /app/state/txn_*.json + <-- success + Commit(order) --------------------------------------------------> + committed.add(order) + <-- success + log 2pc_commit_applied +``` + +Phase 1 decides; phase 2 enacts. The `2pc_decision=...` line is written **before** any phase-2 RPC so every round leaves a grep-friendly audit point. (The fact that this line is stdout-only — not a durable record — is the gap that motivates §A.2.1 below.) + +### A.2.1 Coordinator-failure analysis (Bonus B3) + +The B3 prompt — *"What about failure of the coordinator? … No implementation is needed, but the points will only be awarded upon good analysis, justification, and solution."* — is graded on the written analysis. The four timing windows in which the coordinator can crash are: + +| Window | When | State of participants | +|---|---|---| +| W1 | Coordinator crashes **before** any `Prepare` is sent | Nothing staged. No blocking. | +| W2 | Coordinator crashes **after sending some Prepares, before writing the decision** | Some participants are in `prepared`, holding reservations. | +| W3 | Coordinator crashes **after writing the decision to stdout, before sending any phase-2 RPC** | Participants are still in `prepared`. The decision exists only in the dead process's memory/log buffers. | +| W4 | Coordinator crashes **after sending the phase-2 RPC to one participant but not the other** | One participant committed (or aborted); the other is still `prepared`. Their views diverge. | + +W1 is harmless. W2/W3/W4 are variants of the classic 2PC blocking problem. + +**The blocking problem.** A participant in `prepared` knows it voted commit and the coordinator has the authority to commit or abort, but it does not know which. A unilateral commit would violate atomicity if the coordinator decided abort (`books_database` would decrement stock the payment side never billed); a unilateral abort would violate atomicity if the other participant already committed. The only safe action is to wait. While it waits, it holds its reservation, which in our system reduces the effective stock for every subsequent `Prepare` on the same title. + +**W4 is the worst case.** If the coordinator sent `Commit` to `books_database` and died before sending `Commit` to `payment_service`, `books_database` committed (stock decremented, pending entry cleared) while `payment_service` is still in `prepared`, with no way to know a commit already happened elsewhere. + +### A.2.2 What this repo handles today -Even though you can run the code locally, it is recommended to use Docker and Docker Compose to run the code. This way you don't have to install any dependencies locally and you can easily run the code on any platform. +- **Participant side is fully recoverable** via persistence + idempotent retry — the Bonus B2 mechanism in the main content. +- **Coordinator-side retry for participant transients.** `run_2pc` has a 12-attempt / ~40-second budget on `Commit` with primary re-discovery between attempts. +- **Hot-standby coordinators exist structurally.** The three executors run the same bully-election pattern as the databases; if the leader dies, one of the others is elected within `LEADER_TIMEOUT` (5s). -If you want to run the code locally, you need to install the following dependencies: +In practice the replacement coordinator lands on the correct outcome by retrying phase 1 and relying on participant idempotency — *as long as the original coordinator got at least one participant to commit and the order re-enters `run_2pc`*. Two honesty caveats: -backend services: -- Python 3.8 or newer -- pip -- [grpcio-tools](https://grpc.io/docs/languages/python/quickstart/) -- requirements.txt dependencies from each service +- The `2pc_decision=...` line is **stdout, not a durable record**, so a replacement coordinator cannot read what the dead one decided. +- `Dequeue` on `order_queue` is a destructive `popleft()` with no ack/nack/visibility-timeout, so an order in flight when the leader dies is **not** automatically redelivered to a new leader. Recovery in our demo therefore depends on either (a) the original leader being restarted within its retry window or (b) the user resubmitting. -frontend service: -- It's a simple static HTML page, you can open `frontend/src/index.html` in your browser. +### A.2.3 Solutions from the literature (the B3 "solution" half) -And then run each service individually. +1. **Three-phase commit (3PC).** Insert a `PreCommit` between `Prepare` and `Commit`. A participant in `pre-committed` is guaranteed every live participant voted commit, so on coordinator failure the survivors can elect a replacement and safely commit on their own. Non-blocking under crash failures, at the cost of one extra RPC round. Not non-blocking under network partitions, since partitioned participants cannot distinguish a partition from a crash. +2. **Replacement coordinator via bully + durable decision log.** Keep 2PC but make the coordinator side crash-recoverable. The leader writes `decision_.json` *before* phase 2; bully re-elects a new leader on `LEADER_TIMEOUT`; on promotion, the new leader scans for unfinished decisions and resumes phase 2. Participant idempotency (already implemented) makes this safe. Still blocking for ~5s while a new leader is elected, but does not block forever and avoids 3PC's extra round on the happy path. **This is the recommended mitigation for our topology**, exactly the "highest-ID replacement coordinator" pattern from Session 11. +3. **Cooperative termination.** Participants resolve W4 uncertainty peer-to-peer ("did *you* commit order X?"). Complements rather than replaces a durable decision log — only resolves cases where at least one participant already knows the decision. +4. **Consensus-based commit (Paxos Commit / Raft).** Replace the single coordinator with a replicated state machine; the decision becomes a consensus value, eliminating the single point of failure. Significantly more code; out of scope for this checkpoint and not required by the rubric. \ No newline at end of file diff --git a/books_database/Dockerfile b/books_database/Dockerfile new file mode 100644 index 000000000..b2ec714c7 --- /dev/null +++ b/books_database/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.11 +WORKDIR /app +COPY ./books_database/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +CMD python utils/other/hotreload.py "books_database/src/app.py" diff --git a/books_database/requirements.txt b/books_database/requirements.txt new file mode 100644 index 000000000..c267def1b --- /dev/null +++ b/books_database/requirements.txt @@ -0,0 +1,3 @@ +grpcio==1.78.0 +grpcio-tools==1.78.0 +watchdog==6.0.0 diff --git a/books_database/src/app.py b/books_database/src/app.py new file mode 100644 index 000000000..c1aa5abae --- /dev/null +++ b/books_database/src/app.py @@ -0,0 +1,837 @@ +import json +import os +import sys +import time +import threading +from concurrent import futures + +import grpc + +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") + +db_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/books_database") +) +sys.path.insert(0, db_grpc_path) + +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + + +REPLICA_ID = int(os.getenv("REPLICA_ID", "1")) +REPLICA_PORT = os.getenv("REPLICA_PORT", "50058") +HEARTBEAT_INTERVAL = 2.0 +LEADER_TIMEOUT = 5.0 +REPLICATE_TIMEOUT = 2.0 + +# Phase 6: participant-failure bonus. Staged transactions are persisted to +# STATE_DIR at the moment the participant votes commit so that, if the +# container restarts between Prepare and Commit, the participant can +# recover the staged state from disk and the coordinator's Commit retry +# can still succeed. +STATE_DIR = os.getenv("STATE_DIR", "/app/state") + +# Soft fail injection: make the next N Commit RPCs return UNAVAILABLE so +# we can demonstrate the coordinator's retry loop without having to crash +# the container (which would trigger a leader failover and lose the +# primary-only pending buffer). Gets decremented on each injected failure. +_fail_next_commit_counter = [int(os.getenv("FAIL_NEXT_COMMIT", "0"))] + +SEED_STOCK = { + "Book A": 10, + "Book B": 6, + "Book C": 20, + "Distributed Systems Basics": 5, + "Designing Data-Intensive Applications": 3, +} + + +def parse_peers(): + peers = [] + raw = os.getenv("PEERS", "") + for item in raw.split(","): + item = item.strip() + if not item: + continue + peer_id, peer_addr = item.split("@", 1) + peers.append((int(peer_id), peer_addr)) + return peers + + +PEERS = parse_peers() + + +# --- Bully election state --- + +state_lock = threading.Lock() +leader_id = None +last_heartbeat = time.time() +is_leader = False +election_in_progress = False + + +# --- KV store state --- +# +# We use fine-grained per-key locks so two writes against *different* books +# can run in parallel while two writes against the *same* book are +# serialized. This gives us the concurrent-writes bonus (§4.4) without +# breaking read-validate-write atomicity. kv_state_lock is a short meta-lock +# that only covers lookups in key_locks and kv_store; it is never held while +# we fan out to backups. + +kv_state_lock = threading.Lock() +kv_store = {} # populated in serve() from disk or SEED_STOCK +key_locks = {} # title -> threading.Lock + +seq_lock = threading.Lock() +seq_counter = 0 + +# --- 2PC participant state --- +# +# pending_orders[order_id] = list of (title, quantity) reservations. +# A Prepare inserts the reservation here and we "hold" stock against it when +# evaluating subsequent Prepares. A Commit reads the reservation, applies +# the decrement to kv_store (and replicates to backups), then drops the +# pending entry. An Abort just drops. All three handlers take pending_lock +# so concurrent 2PC ops on the same or different orders serialize cleanly. +# +# Phase 6: pending_orders is also persisted to STATE_DIR on vote_commit. +# Startup recovery re-loads the map so a container restart between +# Prepare and Commit does not lose the reservation. +pending_lock = threading.Lock() +pending_orders = {} + +# committed_orders lets Commit distinguish three cases during a retry: +# (a) order_id is still pending -> apply the decrement +# (b) order_id is in committed_orders -> idempotent success (safe no-op) +# (c) order_id is in neither -> uncertain; refuse with success=False so +# the coordinator keeps retrying until the right replica (the one +# that ran Prepare) becomes reachable again. +# Case (c) is what protects us during a brief failover window: a freshly +# elected primary that never saw Prepare must NOT pretend it committed +# the order. committed_orders is a set of order_ids (bounded by the +# number of orders processed this container lifetime -- fine for a demo). +committed_orders = set() +aborted_orders = set() + + +def _txn_file(order_id): + safe = order_id.replace("/", "_").replace("\\", "_") + return os.path.join(STATE_DIR, f"txn_{safe}.json") + + +def persist_pending(order_id, items): + """Atomically persist a staged transaction before voting commit. + + Write-then-rename: write the full JSON payload to `.tmp`, then + `os.replace` it onto the final path. POSIX guarantees replace is + atomic, so a crash between the two steps leaves either the old file + (no change) or the new file (complete), never a truncated half-file + that `load_persisted_all` could misread on recovery. + """ + os.makedirs(STATE_DIR, exist_ok=True) + path = _txn_file(order_id) + tmp = path + ".tmp" + with open(tmp, "w") as f: + json.dump({"items": [[t, q] for t, q in items]}, f) + os.replace(tmp, path) + + +def remove_persisted(order_id): + """Drop the on-disk staged-transaction file for `order_id`. Called + once Commit (apply + replicate) or Abort has succeeded, because the + in-memory state is now authoritative. Missing file is a no-op so + recovery and steady-state paths can both call this unconditionally.""" + try: + os.remove(_txn_file(order_id)) + except FileNotFoundError: + pass + + +def load_persisted_all(): + """Recovery scan: read every `txn_*.json` in STATE_DIR and return the + staged items keyed by order_id. Called once at process start before + `serve()` accepts RPCs so the replica can rebuild `pending_orders` + exactly as the previous instance left it, letting a retrying + coordinator's Commit or Abort finish the transaction.""" + if not os.path.isdir(STATE_DIR): + return {} + out = {} + for fname in os.listdir(STATE_DIR): + if not (fname.startswith("txn_") and fname.endswith(".json")): + continue + path = os.path.join(STATE_DIR, fname) + try: + with open(path) as f: + data = json.load(f) + order_id = fname[len("txn_"):-len(".json")] + out[order_id] = [(t, int(q)) for t, q in data["items"]] + except Exception as exc: + print(f"[DB-{REPLICA_ID}] recovery_skip file={fname} err={exc!r}") + return out + + +def _kv_store_path(): + return os.path.join(STATE_DIR, "kv_store.json") + + +def persist_kv_store(): + """Atomically flush the current kv_store to disk so a restarted + replica comes back with post-commit stock, not the hard-coded + SEED_STOCK. Same write-then-rename pattern as persist_pending. + + The temp file includes the thread id so that concurrent callers + (e.g. parallel ReplicateWrite handlers for different keys) each + write to their own temp file and never race on os.replace.""" + os.makedirs(STATE_DIR, exist_ok=True) + path = _kv_store_path() + tmp = f"{path}.{threading.get_ident()}.tmp" + with kv_state_lock: + snapshot = dict(kv_store) + with open(tmp, "w") as f: + json.dump(snapshot, f) + os.replace(tmp, path) + + +def load_kv_store(): + """Load kv_store from disk if a previous instance persisted it, + otherwise fall back to SEED_STOCK for a fresh container.""" + path = _kv_store_path() + if os.path.isfile(path): + try: + with open(path) as f: + data = json.load(f) + return {k: int(v) for k, v in data.items()} + except Exception as exc: + print(f"[DB-{REPLICA_ID}] kv_store_load_failed err={exc!r} falling_back_to=SEED_STOCK") + return dict(SEED_STOCK) + + +def get_key_lock(title): + with kv_state_lock: + lock = key_locks.get(title) + if lock is None: + lock = threading.Lock() + key_locks[title] = lock + return lock + + +def peer_addr_for(pid): + for p, addr in PEERS: + if p == pid: + return addr + return "" + + +def has_fresh_leader_locked(): + if leader_id is None: + return False + if is_leader and leader_id == REPLICA_ID: + return True + return (time.time() - last_heartbeat) <= LEADER_TIMEOUT + + +def send_rpc(addr, fn): + try: + with grpc.insecure_channel(addr) as channel: + stub = db_grpc.BooksDatabaseServiceStub(channel) + return fn(stub) + except Exception: + return None + + +def announce_coordinator(): + for pid, addr in PEERS: + if pid == REPLICA_ID: + continue + send_rpc( + addr, + lambda stub: stub.Coordinator( + db_pb2.CoordinatorRequest(leader_id=REPLICA_ID), + timeout=2.0, + ), + ) + + +def start_election(force=False): + global election_in_progress, leader_id + + with state_lock: + if election_in_progress: + return + # Normal path: if we already have a fresh leader, do nothing. + # Forced path: a recovering higher-ID replica is allowed to + # challenge a lower-ID active leader, matching the bully rule + # that the highest alive replica should eventually win. + if (not force) and has_fresh_leader_locked(): + return + election_in_progress = True + + print(f"[DB-{REPLICA_ID}] starting election") + + higher_peers = [(pid, addr) for pid, addr in PEERS if pid > REPLICA_ID] + got_answer = False + + for _pid, addr in higher_peers: + response = send_rpc( + addr, + lambda stub: stub.Election( + db_pb2.ElectionRequest(candidate_id=REPLICA_ID), + timeout=2.0, + ), + ) + if response and response.alive: + got_answer = True + + if not got_answer: + become_leader() + return + + time.sleep(LEADER_TIMEOUT) + + with state_lock: + fresh_leader = has_fresh_leader_locked() + election_in_progress = False + + if not fresh_leader: + with state_lock: + leader_id = None + start_election() + + +def become_leader(): + global leader_id, is_leader, election_in_progress, last_heartbeat + with state_lock: + leader_id = REPLICA_ID + is_leader = True + election_in_progress = False + last_heartbeat = time.time() + + print(f"[DB-{REPLICA_ID}] became primary") + announce_coordinator() + + +def heartbeat_loop(): + while True: + time.sleep(HEARTBEAT_INTERVAL) + with state_lock: + leader_now = is_leader + if not leader_now: + continue + for pid, addr in PEERS: + if pid == REPLICA_ID: + continue + send_rpc( + addr, + lambda stub: stub.Heartbeat( + db_pb2.HeartbeatRequest(leader_id=REPLICA_ID), + timeout=2.0, + ), + ) + + +def timeout_loop(): + global leader_id + while True: + time.sleep(1.0) + with state_lock: + if is_leader or election_in_progress: + continue + if leader_id is None: + continue + expired = (time.time() - last_heartbeat) > LEADER_TIMEOUT + if expired: + print(f"[DB-{REPLICA_ID}] primary timeout detected") + with state_lock: + leader_id = None + start_election() + + +# --- Replication helper (called by the primary on Write) --- +# +# Persistent gRPC channels for backup replication. Creating a fresh +# channel per call works under low concurrency but causes a connection +# storm when several writes fan out to backups simultaneously (each +# write opens 2 new TCP connections). Caching one channel per peer lets +# gRPC multiplex all RPCs over a single HTTP/2 connection. + +_replication_channels = {} +_replication_channels_lock = threading.Lock() + + +def _get_replication_channel(addr): + with _replication_channels_lock: + ch = _replication_channels.get(addr) + if ch is None: + ch = grpc.insecure_channel(addr) + _replication_channels[addr] = ch + return ch + + +def replicate_to_backups(title, quantity, seq): + targets = [(pid, addr) for pid, addr in PEERS if pid != REPLICA_ID] + results = {} + + def do_one(pid, addr): + try: + ch = _get_replication_channel(addr) + stub = db_grpc.BooksDatabaseServiceStub(ch) + resp = stub.ReplicateWrite( + db_pb2.ReplicateWriteRequest( + title=title, + quantity=quantity, + seq=seq, + from_replica=REPLICA_ID, + ), + timeout=REPLICATE_TIMEOUT, + ) + results[pid] = resp + except Exception as exc: + print( + f"[DB-{REPLICA_ID}] replicate_rpc_error " + f"peer={pid} title=\"{title}\" seq={seq} err={exc!r}" + ) + results[pid] = None + + threads = [ + threading.Thread(target=do_one, args=(pid, addr)) + for pid, addr in targets + ] + for t in threads: + t.start() + for t in threads: + t.join(timeout=REPLICATE_TIMEOUT + 1.0) + + acked = [pid for pid, r in results.items() if r is not None and r.success] + missing = [pid for pid, _ in targets if pid not in acked] + return acked, missing + + +# --- gRPC service --- + +class BooksDatabaseService(db_grpc.BooksDatabaseServiceServicer): + + # Client-facing RPCs (Phase 2: primary-only for strong consistency). + + def Read(self, request, context): + with state_lock: + if not is_leader: + msg = f"not primary; primary={leader_id}" + print(f"[DB-{REPLICA_ID}] read_rejected title={request.title} reason={msg}") + return db_pb2.ReadResponse(success=False, quantity=0, message=msg) + + key_lock = get_key_lock(request.title) + with key_lock: + with kv_state_lock: + value = kv_store.get(request.title) + + if value is None: + print(f"[DB-{REPLICA_ID}] read_miss title=\"{request.title}\"") + return db_pb2.ReadResponse( + success=False, quantity=0, message="unknown title" + ) + + print( + f"[DB-{REPLICA_ID}] read_ok title=\"{request.title}\" value={value}" + ) + return db_pb2.ReadResponse(success=True, quantity=value, message="ok") + + def ReadLocal(self, request, context): + """Debug/ops read. Returns whatever this replica currently holds, + regardless of leader status. Used only by the convergence check.""" + key_lock = get_key_lock(request.title) + with key_lock: + with kv_state_lock: + value = kv_store.get(request.title) + + if value is None: + return db_pb2.ReadResponse( + success=False, quantity=0, message="unknown title" + ) + return db_pb2.ReadResponse(success=True, quantity=value, message="ok") + + def Write(self, request, context): + global seq_counter + + with state_lock: + if not is_leader: + msg = f"not primary; primary={leader_id}" + print( + f"[DB-{REPLICA_ID}] write_rejected " + f"title=\"{request.title}\" reason={msg}" + ) + return db_pb2.WriteResponse(success=False, message=msg) + + # Per-key lock: concurrent writes on the *same* title serialize here + # while concurrent writes on *different* titles run in parallel. + key_lock = get_key_lock(request.title) + with key_lock: + with kv_state_lock: + old = kv_store.get(request.title) + + with seq_lock: + seq_counter += 1 + seq = seq_counter + + acked, missing = replicate_to_backups( + request.title, request.quantity, seq + ) + + if missing: + print( + f"[DB-{REPLICA_ID}] write_failed " + f"title=\"{request.title}\" seq={seq} " + f"old={old} new={request.quantity} " + f"acked={acked} missing={missing}" + ) + return db_pb2.WriteResponse( + success=False, + message=f"replication incomplete; missing backups {missing}", + ) + + with kv_state_lock: + kv_store[request.title] = request.quantity + persist_kv_store() + + print( + f"[DB-{REPLICA_ID}] write_committed primary={REPLICA_ID} " + f"title=\"{request.title}\" seq={seq} " + f"old={old} new={request.quantity} backups_acked={acked}" + ) + return db_pb2.WriteResponse(success=True, message="ok") + + # Internal RPCs. + + def ReplicateWrite(self, request, context): + global seq_counter + + # Per-key lock on the backup too: defensive — the primary already + # serializes replicates for the same key, but this guards against + # any future path that might not. + key_lock = get_key_lock(request.title) + with key_lock: + with kv_state_lock: + old = kv_store.get(request.title) + kv_store[request.title] = request.quantity + with seq_lock: + if request.seq > seq_counter: + seq_counter = request.seq + persist_kv_store() + + print( + f"[DB-{REPLICA_ID}] replicate_applied " + f"from_primary={request.from_replica} " + f"title=\"{request.title}\" seq={request.seq} " + f"old={old} new={request.quantity}" + ) + return db_pb2.ReplicateWriteResponse(success=True, message="ok") + + def WhoIsPrimary(self, request, context): + with state_lock: + current = leader_id if leader_id is not None else 0 + addr = peer_addr_for(current) if current else "" + return db_pb2.WhoIsPrimaryResponse(leader_id=current, leader_addr=addr) + + # Bully election RPCs. + + def Election(self, request, context): + global election_in_progress + + if REPLICA_ID <= request.candidate_id: + return db_pb2.ElectionResponse(alive=False) + + print(f"[DB-{REPLICA_ID}] received election from {request.candidate_id}") + + with state_lock: + already_leader = is_leader + election_running = election_in_progress + + if already_leader: + threading.Thread(target=announce_coordinator, daemon=True).start() + elif not election_running: + threading.Thread(target=start_election, daemon=True).start() + + return db_pb2.ElectionResponse(alive=True) + + def Coordinator(self, request, context): + global leader_id, is_leader, election_in_progress, last_heartbeat + with state_lock: + leader_id = request.leader_id + is_leader = leader_id == REPLICA_ID + election_in_progress = False + last_heartbeat = time.time() + + print(f"[DB-{REPLICA_ID}] new primary is {leader_id}") + return db_pb2.Ack(ok=True) + + def Heartbeat(self, request, context): + global leader_id, is_leader, last_heartbeat + with state_lock: + leader_id = request.leader_id + is_leader = leader_id == REPLICA_ID + last_heartbeat = time.time() + return db_pb2.Ack(ok=True) + + # --- 2PC participant RPCs --- + + def Prepare(self, request, context): + """Phase 1 of 2PC. Check that each requested item has enough stock + once existing reservations are subtracted, then stage the order in + pending_orders and return vote_commit=True. If any item is short, + return vote_commit=False and stage nothing.""" + with state_lock: + if not is_leader: + msg = f"not primary; primary={leader_id}" + print( + f"[DB-{REPLICA_ID}] prepare_rejected " + f"order={request.order_id} reason={msg}" + ) + return db_pb2.PrepareResponse(vote_commit=False, message=msg) + + order_id = request.order_id + items = [(it.title, it.quantity) for it in request.items] + + with pending_lock: + if order_id in pending_orders: + print( + f"[DB-{REPLICA_ID}] prepare_idempotent order={order_id} " + f"(already prepared)" + ) + return db_pb2.PrepareResponse( + vote_commit=True, message="already prepared" + ) + + with kv_state_lock: + stock_snapshot = {t: kv_store.get(t) for t, _ in items} + + reserved = {} + for staged in pending_orders.values(): + for t, q in staged: + reserved[t] = reserved.get(t, 0) + q + + insufficient = [] + for title, qty in items: + current = stock_snapshot.get(title) + if current is None: + insufficient.append(f"{title}(unknown)") + continue + available = current - reserved.get(title, 0) + if available < qty: + insufficient.append( + f"{title}(want={qty},avail={available})" + ) + + if insufficient: + print( + f"[DB-{REPLICA_ID}] prepare_vote_abort " + f"order={order_id} reasons={insufficient}" + ) + return db_pb2.PrepareResponse( + vote_commit=False, + message=f"insufficient stock: {insufficient}", + ) + + # Persist first, then mark pending. If persist fails (disk error) + # the pending buffer stays empty and we vote abort. + try: + persist_pending(order_id, items) + except Exception as exc: + print( + f"[DB-{REPLICA_ID}] prepare_persist_failed " + f"order={order_id} err={exc!r}" + ) + return db_pb2.PrepareResponse( + vote_commit=False, + message=f"persist failed: {exc!r}", + ) + pending_orders[order_id] = items + + items_repr = ",".join(f"{t}x{q}" for t, q in items) + print( + f"[DB-{REPLICA_ID}] prepare_vote_commit " + f"order={order_id} items=[{items_repr}] persisted=yes" + ) + return db_pb2.PrepareResponse(vote_commit=True, message="ok") + + def Commit(self, request, context): + """Phase 2 of 2PC. Apply the staged decrements to kv_store, replicate + each one to the backups synchronously, then drop the pending entry. + If any backup fails to ack, leave pending in place and report failure + so the coordinator can retry.""" + global seq_counter + + with state_lock: + if not is_leader: + msg = f"not primary; primary={leader_id}" + print( + f"[DB-{REPLICA_ID}] commit_rejected " + f"order={request.order_id} reason={msg}" + ) + return db_pb2.CommitResponse(success=False, message=msg) + + order_id = request.order_id + + # Phase 6 fail injection. If the env var asked us to fail the next N + # Commits, do so without touching kv_store or the pending entry so + # the coordinator's retry (after the counter reaches zero) still + # finds the reservation and completes the transaction. + if _fail_next_commit_counter[0] > 0: + _fail_next_commit_counter[0] -= 1 + remaining = _fail_next_commit_counter[0] + print( + f"[DB-{REPLICA_ID}] commit_fail_injected " + f"order={order_id} remaining_failures={remaining}" + ) + return db_pb2.CommitResponse( + success=False, + message=f"injected failure; retry (remaining={remaining})", + ) + + with pending_lock: + items = pending_orders.get(order_id) + if items is None: + # No pending reservation. Distinguish "already committed" + # (safe, idempotent success) from "never heard of this + # order" (uncertain; refuse so the coordinator retries + # against the replica that did see Prepare). + if order_id in committed_orders: + print( + f"[DB-{REPLICA_ID}] commit_idempotent " + f"order={order_id} reason=already-committed" + ) + return db_pb2.CommitResponse( + success=True, message="already committed" + ) + print( + f"[DB-{REPLICA_ID}] commit_unknown " + f"order={order_id} reason=no-pending-no-record" + ) + return db_pb2.CommitResponse( + success=False, + message="unknown order; never prepared on this replica", + ) + + applied = [] + for title, qty in items: + with kv_state_lock: + old = kv_store.get(title, 0) + new_value = old - qty + with seq_lock: + seq_counter += 1 + seq = seq_counter + + acked, missing = replicate_to_backups(title, new_value, seq) + if missing: + print( + f"[DB-{REPLICA_ID}] commit_replicate_failed " + f"order={order_id} title=\"{title}\" seq={seq} " + f"missing={missing}" + ) + return db_pb2.CommitResponse( + success=False, + message=f"replication failed; missing {missing}", + ) + + with kv_state_lock: + kv_store[title] = new_value + applied.append((title, old, new_value, seq, acked)) + + del pending_orders[order_id] + committed_orders.add(order_id) + remove_persisted(order_id) + persist_kv_store() + + for title, old, new_value, seq, acked in applied: + print( + f"[DB-{REPLICA_ID}] commit_applied order={order_id} " + f"title=\"{title}\" seq={seq} old={old} new={new_value} " + f"backups_acked={acked}" + ) + return db_pb2.CommitResponse(success=True, message="ok") + + def Abort(self, request, context): + """Drop the staged reservation for this order. Idempotent: aborting + an order that was never prepared (or already committed/aborted) is + a successful no-op.""" + order_id = request.order_id + with pending_lock: + items = pending_orders.pop(order_id, None) + aborted_orders.add(order_id) + remove_persisted(order_id) + + if items is None: + print(f"[DB-{REPLICA_ID}] abort_noop order={order_id}") + return db_pb2.AbortResponse(success=True, message="no pending") + + items_repr = ",".join(f"{t}x{q}" for t, q in items) + print( + f"[DB-{REPLICA_ID}] abort_ok order={order_id} " + f"dropped=[{items_repr}]" + ) + return db_pb2.AbortResponse(success=True, message="ok") + + +def serve(): + global kv_store + # Load committed stock from disk (survives restarts) or fall back to + # the hard-coded SEED_STOCK for a fresh container. + kv_store = load_kv_store() + loaded_from = "disk" if os.path.isfile(_kv_store_path()) else "SEED_STOCK" + print( + f"[DB-{REPLICA_ID}] kv_store_loaded from={loaded_from} " + f"titles={list(kv_store.keys())}" + ) + + # Phase 6 recovery: reload any staged transactions the previous instance + # persisted before it died. From this point on pending_orders is + # authoritative again and the coordinator's next Commit or Abort will + # finish the transaction. + recovered = load_persisted_all() + if recovered: + with pending_lock: + pending_orders.update(recovered) + for oid, items in recovered.items(): + items_repr = ",".join(f"{t}x{q}" for t, q in items) + print( + f"[DB-{REPLICA_ID}] recovered_pending " + f"order={oid} items=[{items_repr}]" + ) + + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + db_grpc.add_BooksDatabaseServiceServicer_to_server( + BooksDatabaseService(), server + ) + server.add_insecure_port("[::]:" + REPLICA_PORT) + server.start() + print( + f"[DB-{REPLICA_ID}] listening on port {REPLICA_PORT} " + f"seeded_titles={list(SEED_STOCK.keys())} " + f"state_dir={STATE_DIR} " + f"recovered_pending={len(recovered)} " + f"fail_next_commit={_fail_next_commit_counter[0]}" + ) + + threading.Thread(target=heartbeat_loop, daemon=True).start() + threading.Thread(target=timeout_loop, daemon=True).start() + + time.sleep(1.0) + with state_lock: + current_leader = leader_id + should_start = ( + (not election_in_progress) + and ( + current_leader is None + or current_leader < REPLICA_ID + ) + ) + if should_start: + # If a lower-ID leader is already active when this replica comes + # back, proactively challenge it so the highest live replica can + # reclaim primary as expected by the tests and bully semantics. + start_election(force=(current_leader is not None and current_leader < REPLICA_ID)) + + server.wait_for_termination() + + +if __name__ == "__main__": + serve() diff --git a/books_database/tests/test_concurrent_writes.py b/books_database/tests/test_concurrent_writes.py new file mode 100644 index 000000000..4bc2b963b --- /dev/null +++ b/books_database/tests/test_concurrent_writes.py @@ -0,0 +1,258 @@ +"""Phase 3 verification: per-key locks allow concurrent writes on different +keys to proceed in parallel, while concurrent writes on the same key +serialize cleanly. + +Run from host: + python books_database/tests/test_concurrent_writes.py +""" + +import os +import sys +import time +import threading + +import grpc + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/books_database"))) + +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + +# Host ports for the three replicas (see docker-compose.yaml). +PRIMARY_CANDIDATES = [ + ("127.0.0.1:50258", 1), + ("127.0.0.1:50259", 2), + ("127.0.0.1:50260", 3), +] + + +def find_primary(): + """Return (host_addr, leader_id) for the current primary. + + Hardened against the brief leader-stabilization window that can + appear right after a failover/restore cycle. The naive "first peer + that reports any leader_id wins" approach can point Writes at a + replica whose own is_leader has already flipped back to False, + yielding `not primary; primary=None` rejections. To avoid that we + require all three of the following, for three consecutive + iterations spaced ~1s apart, within a 30s deadline: + + (a) at least 2 of 3 replicas agree on the same leader_id via + WhoIsPrimary, + (b) a primary-only Read RPC against the named leader succeeds + (this is the only check that actually exercises the + `if not is_leader: reject` branch on the named node), and + (c) the named leader_id is the same as the one returned by the + previous iteration. + """ + id_to_host = {rid: addr for addr, rid in PRIMARY_CANDIDATES} + required_stable = 3 + deadline = time.time() + 30.0 + last_answer = None + streak = 0 + + while time.time() < deadline: + votes = {} + for addr, _ in PRIMARY_CANDIDATES: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary( + db_pb2.WhoIsPrimaryRequest(), timeout=2.0 + ) + if r.leader_id: + votes[r.leader_id] = votes.get(r.leader_id, 0) + 1 + except Exception: + continue + + candidate_id = None + if votes: + # Pick the candidate with the most votes; tie-break on the + # higher leader_id to match the bully protocol's rule. + candidate_id, candidate_votes = sorted( + votes.items(), key=lambda kv: (kv[1], kv[0]), reverse=True + )[0] + if candidate_votes < 2: + candidate_id = None + + probe_ok = False + if candidate_id is not None: + addr = id_to_host[candidate_id] + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + probe = stub.Read( + db_pb2.ReadRequest(title="Book A"), timeout=2.0 + ) + probe_ok = bool(probe.success) + except Exception: + probe_ok = False + + if candidate_id is not None and probe_ok: + if candidate_id == last_answer: + streak += 1 + else: + last_answer = candidate_id + streak = 1 + + if streak >= required_stable: + print( + f"find_primary stable: leader_id={candidate_id} " + f"votes={votes} streak={streak}" + ) + return id_to_host[candidate_id], candidate_id + else: + last_answer = None + streak = 0 + + time.sleep(1.0) + + raise RuntimeError( + f"no stable DB primary within 30s " + f"(last_answer={last_answer}, streak={streak})" + ) + + +def write_one(addr, title, quantity, results, idx, barrier): + barrier.wait() + t0 = time.time() + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.Write( + db_pb2.WriteRequest(title=title, quantity=quantity), + timeout=10.0, + ) + ok, msg = r.success, r.message + except Exception as exc: + ok, msg = False, f"rpc_error={exc}" + results[idx] = (title, quantity, ok, msg, t0, time.time()) + + +def read_one(addr, title): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Read(db_pb2.ReadRequest(title=title), timeout=3.0) + + +def run_concurrent(addr, plan, label): + """plan = list of (title, quantity) tuples, all fired at once.""" + n = len(plan) + results = [None] * n + barrier = threading.Barrier(n) + threads = [ + threading.Thread( + target=write_one, + args=(addr, title, qty, results, i, barrier), + ) + for i, (title, qty) in enumerate(plan) + ] + start = time.time() + for t in threads: + t.start() + for t in threads: + t.join(timeout=30.0) + elapsed = time.time() - start + + all_ok = all(r is not None and r[2] for r in results) + print(f"\n== {label}: {n} concurrent writes -> elapsed={elapsed:.2f}s all_ok={all_ok}") + for r in results: + title, qty, ok, msg, t0, t1 = r + print(f" title=\"{title}\" qty={qty} ok={ok} latency={(t1-t0):.2f}s msg={msg!r}") + return elapsed, all_ok + + +def read_local(addr, title): + """ReadLocal bypasses the primary-only guard.""" + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.ReadLocal(db_pb2.ReadRequest(title=title), timeout=3.0) + + +def main(): + addr, pid = find_primary() + print(f"primary = DB-{pid} @ {addr}") + failures = [] + + # ------------------------------------------------------------------ + # Test A: 5 concurrent writes on the SAME key. Per-key lock + # serializes them, so the final value must be one of the attempted + # values (no torn state), and all 5 writes must succeed. + # ------------------------------------------------------------------ + same_plan = [("Book A", 300 + i) for i in range(5)] + elapsed_same, ok_same = run_concurrent(addr, same_plan, "TEST A (same key)") + if not ok_same: + failures.append("TEST A: not all same-key writes succeeded") + + r_a = read_one(addr, "Book A") + attempted_values = {300 + i for i in range(5)} + if not r_a.success: + failures.append(f"TEST A: Read(Book A) failed: {r_a.message}") + elif r_a.quantity not in attempted_values: + failures.append( + f"TEST A: final Book A = {r_a.quantity}, expected one of {attempted_values}" + ) + print(f" final Book A = {r_a.quantity} (in {attempted_values}? " + f"{'YES' if r_a.quantity in attempted_values else 'NO'})") + + # Verify convergence: all 3 replicas must show the same value. + id_to_host = {rid: host for host, rid in PRIMARY_CANDIDATES} + values = {} + for rid, host in id_to_host.items(): + rl = read_local(host, "Book A") + values[rid] = rl.quantity + print(f" convergence: {values}") + if len(set(values.values())) != 1: + failures.append(f"TEST A: replicas diverged after same-key writes: {values}") + + # ------------------------------------------------------------------ + # Test B: 5 concurrent writes on 5 DIFFERENT keys. With per-key + # locks these should fan out in parallel. Each key gets a unique + # value, so the final read on each key must match exactly. + # ------------------------------------------------------------------ + different_plan = [ + ("Book A", 201), + ("Book B", 202), + ("Book C", 203), + ("Distributed Systems Basics", 204), + ("Designing Data-Intensive Applications", 205), + ] + elapsed_diff, ok_diff = run_concurrent(addr, different_plan, "TEST B (different keys)") + if not ok_diff: + failures.append("TEST B: not all different-key writes succeeded") + + for title, expected in different_plan: + r = read_one(addr, title) + match = r.success and r.quantity == expected + print(f" {title} = {r.quantity} (expected {expected}) {'OK' if match else 'FAIL'}") + if not match: + failures.append(f"TEST B: {title} = {r.quantity}, expected {expected}") + + # Verify convergence on all replicas for every key. + for title, expected in different_plan: + for rid, host in id_to_host.items(): + rl = read_local(host, title) + if rl.quantity != expected: + failures.append( + f"TEST B convergence: DB-{rid} {title} = {rl.quantity}, " + f"expected {expected}" + ) + + # ------------------------------------------------------------------ + # Summary + # ------------------------------------------------------------------ + print() + print(f"elapsed(same key) = {elapsed_same:.2f}s") + print(f"elapsed(different keys) = {elapsed_diff:.2f}s") + if failures: + print(f"\nFAILED ({len(failures)} assertion(s)):") + for f in failures: + print(f" - {f}") + sys.exit(1) + else: + print("\nCONCURRENT WRITES TEST: PASSED") + + +if __name__ == "__main__": + main() diff --git a/docker-compose.yaml b/docker-compose.yaml index b4a60a537..be67803fb 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,59 +1,221 @@ -version: '3' services: frontend: build: - # Use the current directory as the build context - # This allows us to access the files in the current directory inside the Dockerfile context: ./ dockerfile: ./frontend/Dockerfile ports: - # Expose port 8080 on the host, and map port 80 of the container to port 8080 on the host - # Access the application at http://localhost:8080 - "8080:80" volumes: - # Mount the frontend directory - ./frontend/src:/usr/share/nginx/html + orchestrator: build: - # Use the current directory as the build context - # This allows us to access the files in the current directory inside the Dockerfile context: ./ - # Use the Dockerfile in the orchestrator directory dockerfile: ./orchestrator/Dockerfile ports: - # Expose port 8081 on the host, and map port 5000 of the container to port 8081 on the host - - 8081:5000 + - "8081:5000" environment: - # Pass the environment variables to the container - # The PYTHONUNBUFFERED environment variable ensures that the output from the application is logged to the console - PYTHONUNBUFFERED=TRUE - # The PYTHONFILE environment variable specifies the absolute entry point of the application - # Check app.py in the orchestrator directory to see how this is used - PYTHONFILE=/app/orchestrator/src/app.py + - PYTHONPATH=/app volumes: - # Mount the utils directory in the current directory to the /app/utils directory in the container - ./utils:/app/utils - # Mount the orchestrator/src directory in the current directory to the /app/orchestrator/src directory in the container - ./orchestrator/src:/app/orchestrator/src + depends_on: + - fraud_detection + - transaction_verification + - suggestions + - order_queue + fraud_detection: build: - # Use the current directory as the build context - # This allows us to access the files in the current directory inside the Dockerfile context: ./ - # Use the Dockerfile in the fraud_detection directorys dockerfile: ./fraud_detection/Dockerfile ports: - # Expose port 50051 on the host, and map port 50051 of the container to port 50051 on the host - - 50051:50051 + - "50251:50051" environment: - # Pass the environment variables to the container - # The PYTHONUNBUFFERED environment variable ensures that the output from the application is logged to the console - PYTHONUNBUFFERED=TRUE - # The PYTHONFILE environment variable specifies the absolute entry point of the application - # Check app.py in the fraud_detection directory to see how this is used - PYTHONFILE=/app/fraud_detection/src/app.py + - PYTHONPATH=/app volumes: - # Mount the utils directory in the current directory to the /app/utils directory in the container - ./utils:/app/utils - # Mount the fraud_detection/src directory in the current directory to the /app/fraud_detection/src directory in the container - - ./fraud_detection/src:/app/fraud_detection/src \ No newline at end of file + - ./fraud_detection/src:/app/fraud_detection/src + + transaction_verification: + build: + context: ./ + dockerfile: ./transaction_verification/Dockerfile + ports: + - "50252:50052" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/transaction_verification/src/app.py + - PYTHONPATH=/app + volumes: + - ./utils:/app/utils + - ./transaction_verification/src:/app/transaction_verification/src + + suggestions: + build: + context: ./ + dockerfile: ./suggestions/Dockerfile + ports: + - "50253:50053" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/suggestions/src/app.py + - PYTHONPATH=/app + volumes: + - ./utils:/app/utils + - ./suggestions/src:/app/suggestions/src + + order_queue: + build: + context: ./ + dockerfile: ./order_queue/Dockerfile + ports: + - "50254:50054" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/order_queue/src/app.py + volumes: + - ./utils:/app/utils + - ./order_queue/src:/app/order_queue/src + + order_executor_1: + build: + context: ./ + dockerfile: ./order_executor/Dockerfile + ports: + - "50255:50055" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/order_executor/src/app.py + - EXECUTOR_ID=1 + - EXECUTOR_PORT=50055 + - PEERS=1@order_executor_1:50055,2@order_executor_2:50055,3@order_executor_3:50055 + volumes: + - ./utils:/app/utils + - ./order_executor/src:/app/order_executor/src + depends_on: + - order_queue + - payment_service + - books_database_1 + - books_database_2 + - books_database_3 + + order_executor_2: + build: + context: ./ + dockerfile: ./order_executor/Dockerfile + ports: + - "50256:50055" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/order_executor/src/app.py + - EXECUTOR_ID=2 + - EXECUTOR_PORT=50055 + - PEERS=1@order_executor_1:50055,2@order_executor_2:50055,3@order_executor_3:50055 + volumes: + - ./utils:/app/utils + - ./order_executor/src:/app/order_executor/src + depends_on: + - order_queue + - payment_service + - books_database_1 + - books_database_2 + - books_database_3 + + order_executor_3: + build: + context: ./ + dockerfile: ./order_executor/Dockerfile + ports: + - "50257:50055" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/order_executor/src/app.py + - EXECUTOR_ID=3 + - EXECUTOR_PORT=50055 + - PEERS=1@order_executor_1:50055,2@order_executor_2:50055,3@order_executor_3:50055 + volumes: + - ./utils:/app/utils + - ./order_executor/src:/app/order_executor/src + depends_on: + - order_queue + - payment_service + - books_database_1 + - books_database_2 + - books_database_3 + + payment_service: + build: + context: ./ + dockerfile: ./payment_service/Dockerfile + ports: + - "50261:50061" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/payment_service/src/app.py + - PYTHONPATH=/app + - PAYMENT_PORT=50061 + volumes: + - ./utils:/app/utils + - ./payment_service/src:/app/payment_service/src + + books_database_1: + build: + context: ./ + dockerfile: ./books_database/Dockerfile + ports: + - "50258:50058" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/books_database/src/app.py + - PYTHONPATH=/app + - REPLICA_ID=1 + - REPLICA_PORT=50058 + - PEERS=1@books_database_1:50058,2@books_database_2:50058,3@books_database_3:50058 + - STATE_DIR=/app/state + volumes: + - ./utils:/app/utils + - ./books_database/src:/app/books_database/src + - ./books_database/state/1:/app/state + + books_database_2: + build: + context: ./ + dockerfile: ./books_database/Dockerfile + ports: + - "50259:50058" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/books_database/src/app.py + - PYTHONPATH=/app + - REPLICA_ID=2 + - REPLICA_PORT=50058 + - PEERS=1@books_database_1:50058,2@books_database_2:50058,3@books_database_3:50058 + - STATE_DIR=/app/state + volumes: + - ./utils:/app/utils + - ./books_database/src:/app/books_database/src + - ./books_database/state/2:/app/state + + books_database_3: + build: + context: ./ + dockerfile: ./books_database/Dockerfile + ports: + - "50260:50058" + environment: + - PYTHONUNBUFFERED=TRUE + - PYTHONFILE=/app/books_database/src/app.py + - PYTHONPATH=/app + - REPLICA_ID=3 + - REPLICA_PORT=50058 + - PEERS=1@books_database_1:50058,2@books_database_2:50058,3@books_database_3:50058 + - STATE_DIR=/app/state + volumes: + - ./utils:/app/utils + - ./books_database/src:/app/books_database/src + - ./books_database/state/3:/app/state \ No newline at end of file diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 75ae1828a..000000000 --- a/docs/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Documentation - -This folder should contain your documentation, explaining the structure and content of your project. It should also contain your diagrams, explaining the architecture. The recommended writing format is Markdown. diff --git a/docs/diagrams/architecture-diagram.jpg b/docs/diagrams/architecture-diagram.jpg new file mode 100644 index 000000000..b050c4827 Binary files /dev/null and b/docs/diagrams/architecture-diagram.jpg differ diff --git a/docs/diagrams/commitment-protocol.svg b/docs/diagrams/commitment-protocol.svg new file mode 100644 index 000000000..d496335ac --- /dev/null +++ b/docs/diagrams/commitment-protocol.svg @@ -0,0 +1,164 @@ + + Distributed commitment protocol (2PC) diagram + Sequence-style diagram showing the two-phase commit coordinator (leader order_executor) running Prepare then Commit across the books_database primary and the payment_service. One success path (both vote commit, decision=COMMIT) and one failure path (payment votes abort, decision=ABORT) are shown. + + + + + + + + + + + + + + + + + + + Distributed commitment protocol: two-phase commit (2PC) + The leader order_executor is the coordinator. The books_database primary and the payment_service are participants. The decision is logged before phase 2 so every round leaves an audit record. + + + + + + Executor (coordinator) + leader order_executor + books_database primary + participant 1 + payment_service + participant 2 + + + + + + Case A: happy path (both vote commit) — decision=COMMIT + + + 2pc_start + dequeue order, fan out Phase 1 + + Phase 1: Prepare / Vote + + + Prepare(order, items) + + + Prepare(order, amount) + + + stage + persist + pending_orders[order]=items + txn_<order>.json on disk + + + stage amount + prepared[order]=amount + in-memory only + + + vote_commit=true + + + vote_commit=true + + + 2pc_decision=COMMIT + participants=[db,payment] + decision record (stdout log) + + Phase 2: Commit + + + Commit(order) + + + apply + replicate + commit_applied, seq bumped + remove txn file + + + success + + + Commit(order) + + + commit_applied + committed.add(order) + + + success + + + 2pc_commit_applied + order_done status=committed + + + Case B: abort path (DB votes abort, e.g. insufficient stock) — decision=ABORT + + + Prepare(order, items) + + + Prepare(order, amount) + + + vote_abort (insufficient stock) + + + vote_commit=true + + + 2pc_decision=ABORT → Abort sent to both + + + Decision record + Logged to stdout BEFORE phase 2, + so every round leaves an audit + point naming the participants + and the outcome. + + + Participant recovery (Phase 6) + books_database writes txn_<order>.json + before voting commit, so a crashed + replica can reload pending_orders + on restart. Commit/Abort are + idempotent on both participants, + so the coordinator retries safely. + diff --git a/docs/diagrams/consistency-protocol.svg b/docs/diagrams/consistency-protocol.svg new file mode 100644 index 000000000..09dc68b04 --- /dev/null +++ b/docs/diagrams/consistency-protocol.svg @@ -0,0 +1,134 @@ + + Consistency protocol diagram for the replicated books database + Sequence-style diagram showing primary-backup replication across three books_database replicas. The executor always sends Writes and Reads to the elected primary (DB-3); the primary fans each Write out to both backups and only acks the client once every backup has applied the new value. + + + + + + + + + + + + + + + + Replicated database: primary-backup consistency + Three books_database replicas elect a primary via bully. All Writes go to the primary, which replicates synchronously to every backup before acking. Reads go to the primary only (strong consistency). + + + + + + + Executor (leader) + client of the DB tier + books_database_3 + primary (highest id wins bully) + books_database_1 + backup + books_database_2 + backup + + + + + + + Phase 1: primary announces itself after election + + + became primary + logs: primary=3 + + + Coordinator(3) + + Coordinator(3) + + + Phase 2: Write from executor — primary replicates to all backups before acking + + + Write(Book A, 10) + + + stage new value + seq=42, old=? new=10 + + + ReplicateWrite(Book A, 10, seq=42) + + + ReplicateWrite(Book A, 10, seq=42) + + + apply locally + replicate_applied seq=42 + + + apply locally + replicate_applied seq=42 + + + ack(success=true) + + ack(success=true) + + + commit locally + write_committed backups_acked=[1,2] + + + ok + + + Phase 3: Read from executor — answered by the primary only + + + Read(Book A) + + + read_ok value=10 + + + Why synchronous replication? + Primary blocks until every live + backup ACKs. A later Read can + never observe a value that has + not already reached every replica. + + + Failover + If the primary dies, bully elects + a new primary (highest live id). + Reads/Writes are retried against + the new primary. + diff --git a/docs/diagrams/leader-election.svg b/docs/diagrams/leader-election.svg new file mode 100644 index 000000000..46a53d1ae --- /dev/null +++ b/docs/diagrams/leader-election.svg @@ -0,0 +1,116 @@ + + Leader election diagram for the order executor replicas + Sequence-style diagram showing a bully-style leader election at startup and failover after executor 3 stops, with only the leader dequeuing from the order queue. + + + + + + + + + + Leader election and single-consumer execution + The executor replicas use a bully-style election. Only the current leader is allowed to dequeue from the order queue. + + + + + + + Executor 1 + Executor 2 + Executor 3 + Order Queue + + + + + + + Phase 1: startup election + + + Election(1) + + + Election(1) + + + Election(2) + + + Executor 3 becomes leader + highest live id wins + + + Coordinator(3) + + + Coordinator(3) + + + + heartbeats every 2s + + + leader dequeues approved order + + + Phase 2: failover after executor 3 stops + + + Executor 3 stops + followers stop receiving heartbeats + + + + + Timeout detected + Executor 1 starts election + + + Election(1) + + + Executor 2 becomes leader + no higher live peer answers + Coordinator(2) follows + + + Coordinator(2) + + + new leader dequeues the next approved order + + + Mutual exclusion + Only the leader calls `Dequeue`. + That is why one approved order + is executed only once. + + + Failure assumption + Crash-stop failures, 5s timeout, + and simple heartbeat-based recovery. + diff --git a/docs/diagrams/system-flow-diagram.jpg b/docs/diagrams/system-flow-diagram.jpg new file mode 100644 index 000000000..ceacdf9ac Binary files /dev/null and b/docs/diagrams/system-flow-diagram.jpg differ diff --git a/docs/diagrams/vector-clocks.svg b/docs/diagrams/vector-clocks.svg new file mode 100644 index 000000000..6c005550e --- /dev/null +++ b/docs/diagrams/vector-clocks.svg @@ -0,0 +1,131 @@ + + Vector clocks diagram for a successful checkout + Sequence-style diagram showing one successful run across orchestrator, transaction verification, fraud detection, and suggestions, with vector clock values in the order [TV, FD, SUG]. + + + + + + + + + + Vector clocks for one successful checkout + Vector order is [TV, FD, SUG]. The diagram shows one real successful run observed from this repository's logs. + + + + + + + Orchestrator + Transaction Verification + Fraud Detection + Suggestions + + + + + + + + Create order and init services + `InitOrder` sent to TV, FD, SUG + + + InitOrder + vc = [0, 0, 0] + + + InitOrder + vc = [0, 0, 0] + + + InitOrder + vc = [0, 0, 0] + + + + + + + ValidateUserData + vc = [1, 0, 0] + + + ValidateItems + vc = [2, 0, 0] + + + CheckUserFraud + vc = [1, 1, 0] + + + PrecomputeSuggestions + vc = [2, 0, 1] + + + ValidateCardFormat + vc = [3, 0, 0] + + + CheckCardFraud + vc = [3, 2, 0] + + + FinalizeSuggestions + vc = [3, 2, 2] + + + Merge all event clocks + final_vc = [3, 2, 2] + + + ClearOrder OK + local_vc [3, 0, 0] <= final_vc + + + ClearOrder OK + local_vc [3, 2, 0] <= final_vc + + + ClearOrder OK + local_vc [3, 2, 2] <= final_vc + + + + + + + + + + + + + + + + + Bonus clear step: orchestrator broadcasts one merged final_vc. + diff --git a/fraud_detection/requirements.txt b/fraud_detection/requirements.txt index a80eedef7..52b5881e3 100644 --- a/fraud_detection/requirements.txt +++ b/fraud_detection/requirements.txt @@ -1,4 +1,5 @@ -grpcio==1.60.0 -grpcio-tools==1.60.0 -protobuf==4.25.2 +grpcio==1.78.0 +grpcio-tools==1.78.0 + + watchdog==6.0.0 diff --git a/fraud_detection/src/app.py b/fraud_detection/src/app.py index b2f1d2fce..caddb0877 100644 --- a/fraud_detection/src/app.py +++ b/fraud_detection/src/app.py @@ -1,45 +1,345 @@ -import sys import os +import sys +import threading +from concurrent import futures -# This set of lines are needed to import the gRPC stubs. -# The path of the stubs is relative to the current file, or absolute inside the container. -# Change these lines only if strictly needed. -FILE = __file__ if '__file__' in globals() else os.getenv("PYTHONFILE", "") -fraud_detection_grpc_path = os.path.abspath(os.path.join(FILE, '../../../utils/pb/fraud_detection')) +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") +fraud_detection_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/fraud_detection") +) sys.path.insert(0, fraud_detection_grpc_path) + +suggestions_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/suggestions") +) +sys.path.insert(0, suggestions_grpc_path) + +import grpc import fraud_detection_pb2 as fraud_detection import fraud_detection_pb2_grpc as fraud_detection_grpc +import suggestions_pb2 as suggestions +import suggestions_pb2_grpc as suggestions_grpc -import grpc -from concurrent import futures -# Create a class to define the server functions, derived from -# fraud_detection_pb2_grpc.HelloServiceServicer -class HelloService(fraud_detection_grpc.HelloServiceServicer): - # Create an RPC function to say hello - def SayHello(self, request, context): - # Create a HelloResponse object - response = fraud_detection.HelloResponse() - # Set the greeting field of the response object - response.greeting = "Hello, " + request.name - # Print the greeting message - print(response.greeting) - # Return the response object - return response +SERVICE_INDEX = 1 # [transaction_verification, fraud_detection, suggestions] + +orders = {} +orders_lock = threading.Lock() + + +def merge_vc(local_vc, incoming_vc): + return [max(a, b) for a, b in zip(local_vc, incoming_vc)] + + +def tick(vc, idx): + vc = list(vc) + vc[idx] += 1 + return vc + + +def extract_card_digits(card: str) -> str: + return "".join(c for c in str(card) if c.isdigit()) + + +def get_order_state(order_id: str): + with orders_lock: + return orders.get(order_id) + + +def forward_to_sug(order_id, source_event, vc, success, message): + try: + with grpc.insecure_channel("suggestions:50053") as channel: + stub = suggestions_grpc.SuggestionsServiceStub(channel) + req = suggestions.VCForward( + order_id=order_id, + source_event=source_event, + vc=suggestions.VectorClock(values=vc), + success=success, + message=message, + ) + stub.ForwardVC(req, timeout=10.0) + except Exception as e: + print(f"[FD] order={order_id} forward_to_sug_error source={source_event} error={e}") + + +class FraudDetectionService(fraud_detection_grpc.FraudDetectionServiceServicer): + def InitOrder(self, request, context): + order = request.order + + with orders_lock: + orders[order.order_id] = { + "order": order, + "vc": [0, 0, 0], + "lock": threading.Lock(), + # Causal gating state for event e (CheckCardFraud). + # e needs BOTH d (CheckUserFraud, local) AND c (ValidateCardFormat, from TV). + "d_done": False, + "d_vc": None, + "d_success": True, + "d_message": "", + "c_received": False, + "c_vc": None, + "c_success": True, + "c_message": "", + "e_triggered": False, + } + + print(f"[FD] order={order.order_id} event=InitOrder vc={[0, 0, 0]} success=True") + + return fraud_detection.EventResponse( + success=True, + message="Fraud service initialized order.", + vc=fraud_detection.VectorClock(values=[0, 0, 0]), + ) + + def _try_run_e(self, order_id, state): + """Check if both prerequisites for event e are met. If so, run CheckCardFraud.""" + with state["lock"]: + if state["e_triggered"]: + return + if not (state["d_done"] and state["c_received"]): + return + state["e_triggered"] = True + + d_vc = state["d_vc"] + d_success = state["d_success"] + d_message = state["d_message"] + c_vc = state["c_vc"] + c_success = state["c_success"] + c_message = state["c_message"] + + # If either prerequisite failed, propagate failure without running e. + if not d_success: + print(f"[FD] order={order_id} event=CheckCardFraud skipped (d failed: {d_message})") + forward_to_sug(order_id, "e", d_vc, False, d_message) + return + if not c_success: + print(f"[FD] order={order_id} event=CheckCardFraud skipped (c failed: {c_message})") + forward_to_sug(order_id, "e", c_vc, False, c_message) + return + + # Both d and c succeeded: merge their VCs and run e. + merged = merge_vc(d_vc, c_vc) + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, merged) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + # Perform the card-fraud check. + card_digits = extract_card_digits(state["order"].card_number) + + success = True + message = "Card fraud check passed." + + if len(card_digits) != 16: + success = False + message = "Invalid card number." + elif card_digits.startswith("0000") or card_digits.endswith("0000"): + success = False + message = "Suspicious card number pattern." + + print( + f"[FD] order={order_id} event=CheckCardFraud " + f"vc={vc} success={success}" + ) + + # Forward e's result to SUG (SUG needs e's VC to gate event g). + forward_to_sug(order_id, "e", vc, success, message) + + def CheckUserFraud(self, request, context): + """Event d: called by TV after event b. After processing, checks if c's VC + has arrived so that event e can run.""" + order_id = request.order_id + state = get_order_state(order_id) + if state is None: + return fraud_detection.EventResponse( + success=False, + message="Order not found in fraud service.", + vc=fraud_detection.VectorClock(values=[0, 0, 0]), + ) + + incoming_vc = list(request.vc.values) + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, incoming_vc) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + user_name = state["order"].user_name + success = "fraud" not in user_name.lower() + message = "User fraud check passed." if success else "Suspicious user name." + + print( + f"[FD] order={order_id} event=CheckUserFraud " + f"vc={vc} success={success}" + ) + + # Record d's result and attempt to trigger e. + with state["lock"]: + state["d_done"] = True + state["d_vc"] = vc + state["d_success"] = success + state["d_message"] = message + + self._try_run_e(order_id, state) + + return fraud_detection.EventResponse( + success=success, + message=message, + vc=fraud_detection.VectorClock(values=vc), + ) + + def ForwardVC(self, request, context): + """Receive a forwarded VC from another microservice (TV forwards c's VC here).""" + order_id = request.order_id + source_event = request.source_event + incoming_vc = list(request.vc.values) + success = request.success + message = request.message + + state = get_order_state(order_id) + if state is None: + return fraud_detection.EventResponse( + success=False, + message="Order not found in fraud service.", + vc=fraud_detection.VectorClock(values=[0, 0, 0]), + ) + + print( + f"[FD] order={order_id} event=ForwardVC source={source_event} " + f"vc={incoming_vc} success={success}" + ) + + if source_event == "c": + with state["lock"]: + state["c_received"] = True + state["c_vc"] = incoming_vc + state["c_success"] = success + state["c_message"] = message + + self._try_run_e(order_id, state) + elif source_event == "a": + # a failed: no c will ever come, so we treat c as failed + with state["lock"]: + state["c_received"] = True + state["c_vc"] = incoming_vc + state["c_success"] = False + state["c_message"] = message + + self._try_run_e(order_id, state) + elif source_event == "d": + # b failed: TV will not call CheckUserFraud, so d is done+failed + with state["lock"]: + state["d_done"] = True + state["d_vc"] = incoming_vc + state["d_success"] = success + state["d_message"] = message + + self._try_run_e(order_id, state) + + return fraud_detection.EventResponse( + success=True, + message="VC forwarded.", + vc=fraud_detection.VectorClock(values=incoming_vc), + ) + + def CheckCardFraud(self, request, context): + """Event e: kept as an RPC for backward compat, but now triggered internally.""" + order_id = request.order_id + state = get_order_state(order_id) + if state is None: + return fraud_detection.EventResponse( + success=False, + message="Order not found in fraud service.", + vc=fraud_detection.VectorClock(values=[0, 0, 0]), + ) + + incoming_vc = list(request.vc.values) + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, incoming_vc) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + card_digits = extract_card_digits(state["order"].card_number) + + success = True + message = "Card fraud check passed." + + if len(card_digits) != 16: + success = False + message = "Invalid card number." + elif card_digits.startswith("0000") or card_digits.endswith("0000"): + success = False + message = "Suspicious card number pattern." + + print( + f"[FD] order={order_id} event=CheckCardFraud " + f"vc={vc} success={success}" + ) + + return fraud_detection.EventResponse( + success=success, + message=message, + vc=fraud_detection.VectorClock(values=vc), + ) + + def ClearOrder(self, request, context): + order_id = request.order_id + final_vc = list(request.final_vc.values) + + with orders_lock: + state = orders.get(order_id) + + if state is None: + return fraud_detection.EventResponse( + success=False, + message="Order not found in fraud service.", + vc=fraud_detection.VectorClock(values=[0, 0, 0]), + ) + + with state["lock"]: + local_vc = state["vc"] + can_clear = all(a <= b for a, b in zip(local_vc, final_vc)) + + if can_clear: + del orders[order_id] + + success = can_clear + message = ( + "Order cleared from fraud service." + if success + else "Cannot clear order: local VC is ahead of final VC." + ) + + print( + f"[FD] order={order_id} event=ClearOrder " + f"local_vc={local_vc} final_vc={final_vc} success={success}" + ) + + return fraud_detection.EventResponse( + success=success, + message=message, + vc=fraud_detection.VectorClock(values=final_vc), + ) + def serve(): - # Create a gRPC server - server = grpc.server(futures.ThreadPoolExecutor()) - # Add HelloService - fraud_detection_grpc.add_HelloServiceServicer_to_server(HelloService(), server) - # Listen on port 50051 + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + fraud_detection_grpc.add_FraudDetectionServiceServicer_to_server( + FraudDetectionService(), server + ) + port = "50051" server.add_insecure_port("[::]:" + port) - # Start the server server.start() - print("Server started. Listening on port 50051.") - # Keep thread alive + print(f"Fraud detection server started. Listening on port {port}.") server.wait_for_termination() -if __name__ == '__main__': - serve() \ No newline at end of file + +if __name__ == "__main__": + serve() diff --git a/frontend/src/index.html b/frontend/src/index.html index 15c47351f..a1785e871 100644 --- a/frontend/src/index.html +++ b/frontend/src/index.html @@ -22,26 +22,32 @@

Items

+
+
+
+
+
+
@@ -50,6 +56,7 @@

Items

+
+
+
- + + @@ -74,14 +86,14 @@

Items

- + \ No newline at end of file diff --git a/orchestrator/requirements.txt b/orchestrator/requirements.txt index 5ba8e254b..2016d74ab 100644 --- a/orchestrator/requirements.txt +++ b/orchestrator/requirements.txt @@ -1,12 +1,13 @@ blinker==1.7.0 click==8.1.7 Flask==3.0.0 -grpcio==1.60.0 -grpcio-tools==1.60.0 +grpcio==1.78.0 +grpcio-tools==1.78.0 itsdangerous==2.1.2 Jinja2==3.1.3 MarkupSafe==2.1.3 -protobuf==4.25.2 + + Werkzeug==3.0.1 Flask-CORS==4.0.0 watchdog==6.0.0 \ No newline at end of file diff --git a/orchestrator/src/app.py b/orchestrator/src/app.py index 62d5d0662..daf43965f 100644 --- a/orchestrator/src/app.py +++ b/orchestrator/src/app.py @@ -1,75 +1,488 @@ -import sys import os +import sys +import threading +import uuid + +import grpc +from flask import Flask, request +from flask_cors import CORS -# This set of lines are needed to import the gRPC stubs. -# The path of the stubs is relative to the current file, or absolute inside the container. -# Change these lines only if strictly needed. -FILE = __file__ if '__file__' in globals() else os.getenv("PYTHONFILE", "") -fraud_detection_grpc_path = os.path.abspath(os.path.join(FILE, '../../../utils/pb/fraud_detection')) +# Import gRPC stubs +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") + +fraud_detection_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/fraud_detection") +) sys.path.insert(0, fraud_detection_grpc_path) import fraud_detection_pb2 as fraud_detection import fraud_detection_pb2_grpc as fraud_detection_grpc -import grpc +transaction_verification_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/transaction_verification") +) +sys.path.insert(0, transaction_verification_grpc_path) +import transaction_verification_pb2 as transaction_verification +import transaction_verification_pb2_grpc as transaction_verification_grpc + +suggestions_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/suggestions") +) +sys.path.insert(0, suggestions_grpc_path) +import suggestions_pb2 as suggestions +import suggestions_pb2_grpc as suggestions_grpc + +order_queue_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/order_queue") +) +sys.path.insert(0, order_queue_grpc_path) +import order_queue_pb2 as order_queue +import order_queue_pb2_grpc as order_queue_grpc -def greet(name='you'): - # Establish a connection with the fraud-detection gRPC service. - with grpc.insecure_channel('fraud_detection:50051') as channel: - # Create a stub object. - stub = fraud_detection_grpc.HelloServiceStub(channel) - # Call the service through the stub object. - response = stub.SayHello(fraud_detection.HelloRequest(name=name)) - return response.greeting - -# Import Flask. -# Flask is a web framework for Python. -# It allows you to build a web application quickly. -# For more information, see https://flask.palletsprojects.com/en/latest/ -from flask import Flask, request -from flask_cors import CORS -import json -# Create a simple Flask app. app = Flask(__name__) -# Enable CORS for the app. -CORS(app, resources={r'/*': {'origins': '*'}}) +CORS(app, resources={r"/*": {"origins": "*"}}) + + +# CP3_EXECUTION_ONLY lets us skip the Checkpoint 2 validation pipeline +# (TV / FD / SUG + vector-clock gating + clear broadcast) and go straight +# from input validation to enqueue. It is a dev-time flag for iterating +# on the Checkpoint 3 2PC path without waiting ~second(s) for the CP2 +# pipeline on every checkout. The final demo (§6 Option A in +# Charlie-Lima-Alfa.md) keeps this flag off. +CP3_EXECUTION_ONLY = os.getenv("CP3_EXECUTION_ONLY", "").strip().lower() in ( + "1", "true", "yes", "on" +) +print( + f"[ORCH] startup cp3_execution_only={CP3_EXECUTION_ONLY} " + f"(set CP3_EXECUTION_ONLY=true to skip the CP2 validation pipeline)" +) + + +def mask_fixed(card: str) -> str: + digits = "".join(c for c in str(card) if c.isdigit()) + masked = "*" * 12 + digits[-4:].rjust(4, "*") + return " ".join(masked[i:i + 4] for i in range(0, 16, 4)) + + +def merge_vcs(*vectors): + result = [0, 0, 0] + for vc in vectors: + for i in range(3): + result[i] = max(result[i], vc[i]) + return result + + +def build_order_kwargs( + user_name, user_contact, card_number, expiration_date, cvv, item_count, terms_accepted, items +): + return { + "user_name": user_name, + "user_contact": user_contact, + "card_number": card_number, + "expiration_date": expiration_date, + "cvv": cvv, + "item_count": item_count, + "terms_accepted": terms_accepted, + "items": items, + } + + +def parse_items(raw_items): + # Accept both new "title" key and legacy "name" key so old frontends still work. + parsed = [] + for it in raw_items or []: + if not isinstance(it, dict): + continue + title = (it.get("title") or it.get("name") or "").strip() + try: + qty = int(it.get("quantity", 0)) + except (TypeError, ValueError): + qty = 0 + if not title or qty <= 0: + continue + parsed.append({"title": title, "quantity": qty}) + return parsed + + +def _make_order_data(pb_module, order_id, order_kwargs): + items_raw = order_kwargs.get("items", []) + scalar_kwargs = {k: v for k, v in order_kwargs.items() if k != "items"} + item_protos = [ + pb_module.OrderItem(title=i["title"], quantity=i["quantity"]) + for i in items_raw + ] + return pb_module.OrderData(order_id=order_id, items=item_protos, **scalar_kwargs) + + +# --- Service init calls (unchanged) --- + +def init_fraud_service(order_id, order_kwargs): + with grpc.insecure_channel("fraud_detection:50051") as channel: + stub = fraud_detection_grpc.FraudDetectionServiceStub(channel) + request = fraud_detection.InitOrderRequest( + order=_make_order_data(fraud_detection, order_id, order_kwargs) + ) + return stub.InitOrder(request, timeout=5.0) + + +def init_transaction_service(order_id, order_kwargs): + with grpc.insecure_channel("transaction_verification:50052") as channel: + stub = transaction_verification_grpc.TransactionVerificationServiceStub(channel) + request = transaction_verification.InitOrderRequest( + order=_make_order_data(transaction_verification, order_id, order_kwargs) + ) + return stub.InitOrder(request, timeout=5.0) + + +def init_suggestions_service(order_id, order_kwargs): + with grpc.insecure_channel("suggestions:50053") as channel: + stub = suggestions_grpc.SuggestionsServiceStub(channel) + request = suggestions.InitOrderRequest( + order=_make_order_data(suggestions, order_id, order_kwargs) + ) + return stub.InitOrder(request, timeout=5.0) + + +# --- Root event calls: only a and b --- + +def tv_validate_items(order_id): + with grpc.insecure_channel("transaction_verification:50052") as channel: + stub = transaction_verification_grpc.TransactionVerificationServiceStub(channel) + req = transaction_verification.EventRequest( + order_id=order_id, + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + return stub.ValidateItems(req, timeout=15.0) + + +def tv_validate_user_data(order_id): + with grpc.insecure_channel("transaction_verification:50052") as channel: + stub = transaction_verification_grpc.TransactionVerificationServiceStub(channel) + req = transaction_verification.EventRequest( + order_id=order_id, + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + return stub.ValidateUserData(req, timeout=15.0) + + +# --- Pipeline result collection --- + +def await_pipeline_result(order_id): + with grpc.insecure_channel("suggestions:50053") as channel: + stub = suggestions_grpc.SuggestionsServiceStub(channel) + req = suggestions.PipelineResultRequest(order_id=order_id) + return stub.AwaitPipelineResult(req, timeout=30.0) + + +# --- Enqueue and clear (unchanged) --- + +def enqueue_order(order_id, order_kwargs): + with grpc.insecure_channel("order_queue:50054") as channel: + stub = order_queue_grpc.OrderQueueServiceStub(channel) + request = order_queue.EnqueueRequest( + order=_make_order_data(order_queue, order_id, order_kwargs) + ) + return stub.Enqueue(request, timeout=5.0) + -# Define a GET endpoint. -@app.route('/', methods=['GET']) +def clear_fraud_service(order_id, final_vc): + with grpc.insecure_channel("fraud_detection:50051") as channel: + stub = fraud_detection_grpc.FraudDetectionServiceStub(channel) + request = fraud_detection.ClearOrderRequest( + order_id=order_id, + final_vc=fraud_detection.VectorClock(values=final_vc), + ) + return stub.ClearOrder(request, timeout=5.0) + + +def clear_transaction_service(order_id, final_vc): + with grpc.insecure_channel("transaction_verification:50052") as channel: + stub = transaction_verification_grpc.TransactionVerificationServiceStub(channel) + request = transaction_verification.ClearOrderRequest( + order_id=order_id, + final_vc=transaction_verification.VectorClock(values=final_vc), + ) + return stub.ClearOrder(request, timeout=5.0) + + +def clear_suggestions_service(order_id, final_vc): + with grpc.insecure_channel("suggestions:50053") as channel: + stub = suggestions_grpc.SuggestionsServiceStub(channel) + request = suggestions.ClearOrderRequest( + order_id=order_id, + final_vc=suggestions.VectorClock(values=final_vc), + ) + return stub.ClearOrder(request, timeout=5.0) + + +def broadcast_clear(order_id, final_vc): + try: + clear_results = [ + ("transaction_verification", clear_transaction_service(order_id, final_vc)), + ("fraud_detection", clear_fraud_service(order_id, final_vc)), + ("suggestions", clear_suggestions_service(order_id, final_vc)), + ] + failed_services = [ + f"{service}: {response.message}" + for service, response in clear_results + if not response.success + ] + + if failed_services: + print( + f"[ORCH] order={order_id} clear_broadcast_warning=" + f"{'; '.join(failed_services)} final_vc={final_vc}" + ) + return False + + print(f"[ORCH] order={order_id} clear_broadcast_sent final_vc={final_vc}") + return True + except Exception as e: + print(f"[ORCH] order={order_id} clear_broadcast_warning={e}") + return False + + +@app.route("/", methods=["GET"]) def index(): - """ - Responds with 'Hello, [name]' when a GET request is made to '/' endpoint. - """ - # Test the fraud-detection gRPC service. - response = greet(name='orchestrator') - # Return the response. - return response - -@app.route('/checkout', methods=['POST']) + return {"message": "Orchestrator is running."}, 200 + + +@app.route("/checkout", methods=["POST"]) def checkout(): - """ - Responds with a JSON object containing the order ID, status, and suggested books. - """ - # Get request object data to json - request_data = json.loads(request.data) - # Print request object data - print("Request Data:", request_data.get('items')) - - # Dummy response following the provided YAML specification for the bookstore - order_status_response = { - 'orderId': '12345', - 'status': 'Order Approved', - 'suggestedBooks': [ - {'bookId': '123', 'title': 'The Best Book', 'author': 'Author 1'}, - {'bookId': '456', 'title': 'The Second Best Book', 'author': 'Author 2'} - ] - } + request_data = request.get_json(silent=True) + if request_data is None: + return { + "error": { + "code": "BAD_REQUEST", + "message": "Request body must be valid JSON.", + } + }, 400 + + user = request_data.get("user", {}) or {} + items = request_data.get("items", []) or [] + terms_accepted = bool(request_data.get("termsAndConditionsAccepted", False)) + + user_name = (user.get("name") or "").strip() + user_contact = (user.get("contact") or "").strip() + + credit_card = (user.get("creditCard") or {}) + card_number = (credit_card.get("number") or "").strip() + expiration_date = (credit_card.get("expirationDate") or "").strip() + cvv = (credit_card.get("cvv") or "").strip() + + if not user_name: + return { + "error": { + "code": "BAD_REQUEST", + "message": "User name is required.", + } + }, 400 + + if not user_contact: + return { + "error": { + "code": "BAD_REQUEST", + "message": "User contact is required.", + } + }, 400 + + parsed_items = parse_items(items) + item_count = len(parsed_items) + order_id = str(uuid.uuid4()) + + items_repr = ",".join(f"{i['title']}x{i['quantity']}" for i in parsed_items) + print( + f"[ORCH] order={order_id} received_checkout " + f"user={user_name} card={mask_fixed(card_number)} " + f"item_count={item_count} items=[{items_repr}]" + ) + + order_kwargs = build_order_kwargs( + user_name=user_name, + user_contact=user_contact, + card_number=card_number, + expiration_date=expiration_date, + cvv=cvv, + item_count=item_count, + terms_accepted=terms_accepted, + items=parsed_items, + ) + + # --- Option C fast-path: skip the CP2 pipeline entirely --- + if CP3_EXECUTION_ONLY: + print( + f"[ORCH] order={order_id} cp3_execution_only=true " + f"skipping CP2 pipeline (init/root-events/await/clear)" + ) + try: + enqueue_response = enqueue_order(order_id, order_kwargs) + except Exception as e: + print(f"[ORCH] order={order_id} enqueue_error={e}") + return { + "error": { + "code": "INTERNAL_ERROR", + "message": "Order could not be queued.", + } + }, 500 + + if not enqueue_response.success: + print( + f"[ORCH] order={order_id} enqueue_failed " + f"message={enqueue_response.message}" + ) + return { + "error": { + "code": "INTERNAL_ERROR", + "message": enqueue_response.message, + } + }, 500 + + print( + f"[ORCH] order={order_id} enqueue_success " + f"final_status=APPROVED path=cp3_execution_only" + ) + return { + "orderId": order_id, + "status": "Order Approved", + "suggestedBooks": [], + }, 200 + + # --- Phase 1: Initialize all backend services --- + try: + init_tv = init_transaction_service(order_id, order_kwargs) + init_fd = init_fraud_service(order_id, order_kwargs) + init_sug = init_suggestions_service(order_id, order_kwargs) + except Exception as e: + print(f"[ORCH] order={order_id} initialization_error={e}") + return { + "error": { + "code": "INTERNAL_ERROR", + "message": "Failed to initialize backend services.", + } + }, 500 + + for name, response in [ + ("InitTransactionVerification", init_tv), + ("InitFraudDetection", init_fd), + ("InitSuggestions", init_sug), + ]: + if not response.success: + print( + f"[ORCH] order={order_id} step={name} success=False message={response.message}" + ) + return { + "orderId": order_id, + "status": "Order Rejected", + "suggestedBooks": [], + "reason": response.message, + }, 200 + + print(f"[ORCH] order={order_id} initialization_complete") + + # --- Phase 2: Kick off root events on TV --- + # The orchestrator only triggers the two root events (a and b). + # TV handles all downstream chaining: c internally, then forwards to FD and SUG. + # FD gates event e on both d and c's VC. + # SUG gates event g on both f and e's VC. + # The orchestrator does NOT manage any dependency graph. + root_results = {} + root_errors = {} + + def run_root(name, rpc_fn): + try: + root_results[name] = rpc_fn(order_id) + except Exception as e: + root_errors[name] = str(e) + + print(f"[ORCH] order={order_id} starting_root_events") + + threads = [ + threading.Thread(target=run_root, args=("a", tv_validate_items)), + threading.Thread(target=run_root, args=("b", tv_validate_user_data)), + ] + for t in threads: + t.start() + for t in threads: + t.join() + + if root_errors: + print(f"[ORCH] order={order_id} root_event_errors={root_errors}") + + # --- Phase 3: Wait for the full pipeline to complete --- + # SUG.AwaitPipelineResult blocks until event g finishes (or a failure propagates). + try: + pipeline_result = await_pipeline_result(order_id) + except Exception as e: + print(f"[ORCH] order={order_id} pipeline_await_error={e}") + broadcast_clear(order_id, [0, 0, 0]) + return { + "error": { + "code": "INTERNAL_ERROR", + "message": "Failed to await pipeline result.", + } + }, 500 + + final_vc = list(pipeline_result.vc.values) + + # Also merge root event VCs into final_vc for completeness. + for name in ("a", "b"): + if name in root_results: + final_vc = merge_vcs(final_vc, list(root_results[name].vc.values)) + + if not pipeline_result.success: + print( + f"[ORCH] order={order_id} pipeline_rejected " + f"message={pipeline_result.message} final_vc={final_vc}" + ) + broadcast_clear(order_id, final_vc) + return { + "orderId": order_id, + "status": "Order Rejected", + "suggestedBooks": [], + "reason": pipeline_result.message, + }, 200 + + # --- Phase 4: Enqueue approved order --- + try: + enqueue_response = enqueue_order(order_id, order_kwargs) + except Exception as e: + print(f"[ORCH] order={order_id} enqueue_error={e}") + return { + "error": { + "code": "INTERNAL_ERROR", + "message": "Order was approved but could not be queued.", + } + }, 500 + + if not enqueue_response.success: + print(f"[ORCH] order={order_id} enqueue_failed message={enqueue_response.message}") + return { + "error": { + "code": "INTERNAL_ERROR", + "message": enqueue_response.message, + } + }, 500 + + print(f"[ORCH] order={order_id} enqueue_success") + broadcast_clear(order_id, final_vc) + print(f"[ORCH] order={order_id} final_status=APPROVED final_vc={final_vc}") + + books = [] + for book in pipeline_result.books: + books.append( + { + "bookId": book.bookId, + "title": book.title, + "author": book.author, + } + ) - return order_status_response + return { + "orderId": order_id, + "status": "Order Approved", + "suggestedBooks": books, + }, 200 -if __name__ == '__main__': - # Run the app in debug mode to enable hot reloading. - # This is useful for development. - # The default port is 5000. - app.run(host='0.0.0.0') +if __name__ == "__main__": + app.run(host="0.0.0.0") diff --git a/orchestrator/tests/test_cp3_execution_only.py b/orchestrator/tests/test_cp3_execution_only.py new file mode 100644 index 000000000..a8581a5f2 --- /dev/null +++ b/orchestrator/tests/test_cp3_execution_only.py @@ -0,0 +1,169 @@ +"""Phase 8 verification: CP3_EXECUTION_ONLY fast-path. + +When the orchestrator is started with CP3_EXECUTION_ONLY=true, /checkout +must: + - log the `cp3_execution_only=true skipping CP2 pipeline` line, + - NOT emit initialization_complete / starting_root_events / + clear_broadcast_sent, + - still enqueue the order so the 2PC path downstream runs, + - return Order Approved with empty suggestedBooks. + +Run from host (stack must be up with the cp3-only override applied): + docker compose -f docker-compose.yaml \ + -f docker-compose.cp3-only.yaml \ + up -d --no-deps --force-recreate orchestrator + python orchestrator/tests/test_cp3_execution_only.py +""" + +import json +import os +import subprocess +import sys +import time +import urllib.request + +import grpc + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/books_database"))) +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + +DB_HOSTS = [ + ("127.0.0.1:50258", 1), + ("127.0.0.1:50259", 2), + ("127.0.0.1:50260", 3), +] +ORCH = "http://localhost:8081" +REPO_ROOT = os.path.abspath(os.path.join(HERE, "../..")) +COMPOSE = ["docker", "compose", "-f", "docker-compose.yaml"] + + +def _run(cmd, timeout=60): + return subprocess.run( + cmd, cwd=REPO_ROOT, capture_output=True, text=True, timeout=timeout + ) + + +def find_primary(): + id_to_host = {rid: addr for addr, rid in DB_HOSTS} + for addr, _ in DB_HOSTS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary(db_pb2.WhoIsPrimaryRequest(), timeout=2.0) + if r.leader_id: + return id_to_host[r.leader_id] + except Exception: + continue + raise RuntimeError("no DB primary") + + +def raw_write(addr, title, qty): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Write( + db_pb2.WriteRequest(title=title, quantity=qty), timeout=5.0 + ) + + +def read_local(addr, title): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.ReadLocal(db_pb2.ReadRequest(title=title), timeout=3.0).quantity + + +def post_checkout(items): + payload = { + "user": { + "name": "Dave", + "contact": "dave@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123", + }, + "billingAddress": { + "street": "1 Main St", "city": "Tartu", "state": "Tartumaa", + "zip": "51000", "country": "EE", + }, + }, + "items": items, + "termsAndConditionsAccepted": True, + } + data = json.dumps(payload).encode("utf-8") + req = urllib.request.Request( + f"{ORCH}/checkout", data=data, + headers={"Content-Type": "application/json"}, method="POST", + ) + with urllib.request.urlopen(req, timeout=30) as resp: + return json.loads(resp.read()) + + +def main(): + # Baseline. + primary = find_primary() + raw_write(primary, "Book A", 10) + print(f"primary={primary} baseline Book A = 10") + + # Confirm orchestrator is running in CP3_EXECUTION_ONLY=true mode. + # Wide window because the container may have been restarted earlier. + out = _run(COMPOSE + ["logs", "--since", "30m", "orchestrator"]) + assert "cp3_execution_only=True" in (out.stdout or ""), ( + "orchestrator not in CP3_EXECUTION_ONLY mode. Start it with:\n" + " docker compose -f docker-compose.yaml -f docker-compose.cp3-only.yaml " + "up -d --no-deps --force-recreate orchestrator" + ) + print("orchestrator mode = CP3_EXECUTION_ONLY (flag=True)") + + # Submit checkout. + resp = post_checkout([{"title": "Book A", "quantity": 1}]) + order_id = resp.get("orderId") + status = resp.get("status") + books = resp.get("suggestedBooks") or [] + print(f"/checkout -> orderId={order_id} status={status!r} " + f"suggestedBooks={len(books)}") + assert status == "Order Approved", f"expected Approved, got {status!r}" + assert books == [], "CP3_EXECUTION_ONLY must return empty suggestedBooks" + + # Wait for 2PC commit. + deadline = time.time() + 30 + committed = False + while time.time() < deadline: + out = _run(COMPOSE + [ + "logs", "--since", "1m", + "order_executor_1", "order_executor_2", "order_executor_3", + ]) + if f"2pc_commit_applied order={order_id}" in (out.stdout or ""): + committed = True + break + time.sleep(1.0) + assert committed, "2PC never committed the order" + print("2PC commit observed in executor logs") + + # Verify orchestrator did NOT run the CP2 pipeline for this order. + out = _run(COMPOSE + ["logs", "--since", "2m", "orchestrator"]) + orch = out.stdout or "" + skip_line = f"order={order_id} cp3_execution_only=true" + assert skip_line in orch, f"expected orch skip line for {order_id}" + # None of these CP2-pipeline log points should exist for this order. + for forbidden in ( + f"order={order_id} initialization_complete", + f"order={order_id} starting_root_events", + f"order={order_id} clear_broadcast_sent", + ): + assert forbidden not in orch, f"CP2 pipeline ran: saw {forbidden!r}" + print("orchestrator skipped CP2 pipeline (init/root/clear all absent)") + + # Verify stock converged. + time.sleep(1.5) + for addr, rid in DB_HOSTS: + q = read_local(addr, "Book A") + print(f" DB-{rid}: Book A={q}") + assert q == 9, f"DB-{rid} expected 9 got {q}" + + print("\nPHASE 8 OPTION C (CP3_EXECUTION_ONLY) E2E: PASSED") + + +if __name__ == "__main__": + main() diff --git a/order_executor/Dockerfile b/order_executor/Dockerfile new file mode 100644 index 000000000..f32faaae0 --- /dev/null +++ b/order_executor/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.11 +WORKDIR /app +COPY ./order_executor/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +CMD python utils/other/hotreload.py "order_executor/src/app.py" \ No newline at end of file diff --git a/order_executor/requirements.txt b/order_executor/requirements.txt new file mode 100644 index 000000000..1731b14fb --- /dev/null +++ b/order_executor/requirements.txt @@ -0,0 +1,3 @@ +grpcio==1.78.0 +grpcio-tools==1.78.0 +watchdog==6.0.0 \ No newline at end of file diff --git a/order_executor/src/app.py b/order_executor/src/app.py new file mode 100644 index 000000000..d61c6440e --- /dev/null +++ b/order_executor/src/app.py @@ -0,0 +1,581 @@ +import os +import sys +import time +import grpc +import threading +from concurrent import futures + +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") + +executor_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/order_executor") +) +queue_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/order_queue") +) +db_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/books_database") +) +payment_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/payment_service") +) + +sys.path.insert(0, executor_grpc_path) +sys.path.insert(0, queue_grpc_path) +sys.path.insert(0, db_grpc_path) +sys.path.insert(0, payment_grpc_path) + +import order_executor_pb2 as executor_pb2 +import order_executor_pb2_grpc as executor_grpc +import order_queue_pb2 as queue_pb2 +import order_queue_pb2_grpc as queue_grpc +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc +import payment_pb2 as pay_pb2 +import payment_pb2_grpc as pay_grpc + + +EXECUTOR_ID = int(os.getenv("EXECUTOR_ID", "1")) +EXECUTOR_PORT = os.getenv("EXECUTOR_PORT", "50055") +HEARTBEAT_INTERVAL = 2.0 +LEADER_TIMEOUT = 5.0 + +# 2PC participants. The DB primary address is discovered dynamically because +# any of the three replicas can hold primary after a failover. +DB_REPLICA_ADDRS = [ + "books_database_1:50058", + "books_database_2:50058", + "books_database_3:50058", +] +PAYMENT_ADDR = "payment_service:50061" + +# Price table used only so the Prepare payload has a realistic amount. +# The demo payment service never validates the amount; this is purely for +# the log trail. +BOOK_PRICES = { + "Book A": 12.99, + "Book B": 14.99, + "Book C": 9.99, + "Distributed Systems Basics": 45.00, + "Designing Data-Intensive Applications": 52.00, +} +DEFAULT_PRICE = 15.00 + +state_lock = threading.Lock() +leader_id = None +last_heartbeat = time.time() +is_leader = False +election_in_progress = False + + +def parse_peers(): + peers = [] + raw = os.getenv("PEERS", "") + for item in raw.split(","): + item = item.strip() + if not item: + continue + peer_id, peer_addr = item.split("@", 1) + peers.append((int(peer_id), peer_addr)) + return peers + + +PEERS = parse_peers() + + +def has_fresh_leader_locked(): + if leader_id is None: + return False + if is_leader and leader_id == EXECUTOR_ID: + return True + return (time.time() - last_heartbeat) <= LEADER_TIMEOUT + + +def announce_coordinator(): + for pid, addr in PEERS: + if pid == EXECUTOR_ID: + continue + send_rpc( + addr, + lambda stub: stub.Coordinator( + executor_pb2.CoordinatorRequest(leader_id=EXECUTOR_ID), + timeout=2.0, + ), + ) + + +class ControlService(executor_grpc.OrderExecutorControlServicer): + def Election(self, request, context): + global election_in_progress + + if EXECUTOR_ID <= request.candidate_id: + return executor_pb2.ElectionResponse(alive=False) + + print(f"[EXEC-{EXECUTOR_ID}] received election from {request.candidate_id}") + + with state_lock: + already_leader = is_leader + election_running = election_in_progress + + # If I am already the leader, just re-announce myself instead of + # starting a brand new election. + if already_leader: + threading.Thread(target=announce_coordinator, daemon=True).start() + elif not election_running: + threading.Thread(target=start_election, daemon=True).start() + + return executor_pb2.ElectionResponse(alive=True) + + def Coordinator(self, request, context): + global leader_id, is_leader, election_in_progress, last_heartbeat + with state_lock: + leader_id = request.leader_id + is_leader = leader_id == EXECUTOR_ID + election_in_progress = False + last_heartbeat = time.time() + + print(f"[EXEC-{EXECUTOR_ID}] new leader is {leader_id}") + return executor_pb2.Ack(ok=True) + + def Heartbeat(self, request, context): + global leader_id, is_leader, last_heartbeat + with state_lock: + leader_id = request.leader_id + is_leader = leader_id == EXECUTOR_ID + last_heartbeat = time.time() + return executor_pb2.Ack(ok=True) + + +def send_rpc(addr, fn): + try: + with grpc.insecure_channel(addr) as channel: + stub = executor_grpc.OrderExecutorControlStub(channel) + return fn(stub) + except Exception: + return None + + +def start_election(): + global election_in_progress, leader_id + + with state_lock: + if election_in_progress: + return + + # Do not start a new election if a healthy leader is already known. + if has_fresh_leader_locked(): + return + + election_in_progress = True + + print(f"[EXEC-{EXECUTOR_ID}] starting election") + + higher_peers = [(pid, addr) for pid, addr in PEERS if pid > EXECUTOR_ID] + got_answer = False + + for pid, addr in higher_peers: + response = send_rpc( + addr, + lambda stub: stub.Election( + executor_pb2.ElectionRequest(candidate_id=EXECUTOR_ID), + timeout=2.0, + ), + ) + if response and response.alive: + got_answer = True + + if not got_answer: + become_leader() + return + + # Wait for a higher node to announce a leader. + time.sleep(LEADER_TIMEOUT) + + with state_lock: + fresh_leader = has_fresh_leader_locked() + election_in_progress = False + + if not fresh_leader: + with state_lock: + leader_id = None + start_election() + + +def become_leader(): + global leader_id, is_leader, election_in_progress, last_heartbeat + with state_lock: + leader_id = EXECUTOR_ID + is_leader = True + election_in_progress = False + last_heartbeat = time.time() + + print(f"[EXEC-{EXECUTOR_ID}] became leader") + announce_coordinator() + + +def heartbeat_loop(): + while True: + time.sleep(HEARTBEAT_INTERVAL) + + with state_lock: + leader_now = is_leader + + if not leader_now: + continue + + for pid, addr in PEERS: + if pid == EXECUTOR_ID: + continue + send_rpc( + addr, + lambda stub: stub.Heartbeat( + executor_pb2.HeartbeatRequest(leader_id=EXECUTOR_ID), + timeout=2.0, + ), + ) + + +def timeout_loop(): + global leader_id + + while True: + time.sleep(1.0) + + with state_lock: + if is_leader or election_in_progress: + continue + + # During startup, if no leader is known yet, do not immediately + # treat that as a timeout storm. + if leader_id is None: + continue + + expired = (time.time() - last_heartbeat) > LEADER_TIMEOUT + + if expired: + print(f"[EXEC-{EXECUTOR_ID}] leader timeout detected") + with state_lock: + leader_id = None + start_election() + + +def find_db_primary_addr(): + """Ask each replica who the current primary is and return the first + leader_addr the quorum reports. Returns '' if no primary is known.""" + for addr in DB_REPLICA_ADDRS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary( + db_pb2.WhoIsPrimaryRequest(), timeout=2.0 + ) + if r.leader_id and r.leader_addr: + return r.leader_addr + except Exception: + continue + return "" + + +def compute_amount(items): + return sum( + BOOK_PRICES.get(i.title, DEFAULT_PRICE) * i.quantity for i in items + ) + + +def _db_prepare(primary_addr, order_id, items): + with grpc.insecure_channel(primary_addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + item_msgs = [ + db_pb2.PrepareItem(title=i.title, quantity=i.quantity) + for i in items + ] + return stub.Prepare( + db_pb2.PrepareRequest(order_id=order_id, items=item_msgs), + timeout=5.0, + ) + + +def _db_commit(primary_addr, order_id): + with grpc.insecure_channel(primary_addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Commit( + db_pb2.CommitRequest(order_id=order_id), timeout=10.0 + ) + + +def _db_abort(primary_addr, order_id): + with grpc.insecure_channel(primary_addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Abort( + db_pb2.AbortRequest(order_id=order_id), timeout=5.0 + ) + + +def _pay_prepare(order_id, amount, user_name): + with grpc.insecure_channel(PAYMENT_ADDR) as ch: + stub = pay_grpc.PaymentServiceStub(ch) + return stub.Prepare( + pay_pb2.PaymentPrepareRequest( + order_id=order_id, amount=amount, user_name=user_name + ), + timeout=5.0, + ) + + +def _pay_commit(order_id): + with grpc.insecure_channel(PAYMENT_ADDR) as ch: + stub = pay_grpc.PaymentServiceStub(ch) + return stub.Commit( + pay_pb2.PaymentCommitRequest(order_id=order_id), timeout=5.0 + ) + + +def _pay_abort(order_id): + with grpc.insecure_channel(PAYMENT_ADDR) as ch: + stub = pay_grpc.PaymentServiceStub(ch) + return stub.Abort( + pay_pb2.PaymentAbortRequest(order_id=order_id), timeout=5.0 + ) + + +def run_2pc(order): + """Two-Phase Commit coordinator for a single order. + + Phase 1: fan out Prepare to the DB primary and the payment service in + parallel, collect votes. + Decision record: log decision=COMMIT or decision=ABORT *before* sending + phase 2 RPCs so recovery (Phase 6) has a log point to resume from. + Phase 2: send Commit to both participants if both voted commit, else + send Abort to both (idempotent on either side). + """ + order_id = order.order_id + items = list(order.items) + user_name = order.user_name or "unknown" + amount = compute_amount(items) + + items_repr = ",".join(f"{i.title}x{i.quantity}" for i in items) + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_start order={order_id} " + f"user=\"{user_name}\" items=[{items_repr}] amount={amount:.2f}" + ) + + primary_addr = find_db_primary_addr() + if not primary_addr: + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_decision order={order_id} " + f"decision=ABORT reason=no-db-primary" + ) + try: + _pay_abort(order_id) + except Exception: + pass + return False + + # Phase 1: Prepare fan-out in parallel. + results = {} + + def _call_db(): + try: + results["db"] = _db_prepare(primary_addr, order_id, items) + except Exception as e: + results["db"] = None + results["db_err"] = str(e) + + def _call_pay(): + try: + results["pay"] = _pay_prepare(order_id, amount, user_name) + except Exception as e: + results["pay"] = None + results["pay_err"] = str(e) + + t_db = threading.Thread(target=_call_db) + t_pay = threading.Thread(target=_call_pay) + t_db.start() + t_pay.start() + t_db.join(timeout=10.0) + t_pay.join(timeout=10.0) + + db_resp = results.get("db") + pay_resp = results.get("pay") + db_vote = bool(db_resp and db_resp.vote_commit) + pay_vote = bool(pay_resp and pay_resp.vote_commit) + db_msg = db_resp.message if db_resp else results.get("db_err", "no-response") + pay_msg = pay_resp.message if pay_resp else results.get( + "pay_err", "no-response" + ) + + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_votes order={order_id} " + f"db=(vote_commit={db_vote},msg={db_msg!r}) " + f"payment=(vote_commit={pay_vote},msg={pay_msg!r})" + ) + + decision = "COMMIT" if (db_vote and pay_vote) else "ABORT" + + # Decision record. Written BEFORE phase 2 so a crashed coordinator can + # be re-derived from the log (Phase 6/7 material). + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_decision order={order_id} " + f"decision={decision} participants=[db,payment]" + ) + + # Phase 2. + if decision == "COMMIT": + # Retry loop for Phase 6 recovery: if the DB participant returns a + # transient failure (injected or real), back off and retry. The + # participant persists the staged transaction on vote_commit so + # its state survives a restart. + # Retry budget ~40s of wall-clock: long enough to outlast a DB + # participant that is killed mid-retry, rebooted from its on-disk + # pending file, and then has to re-win the bully election to + # become primary again (commit lands on the original participant). + commit_max_attempts = 12 + commit_backoffs = [0.5, 1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0] + db_ok = False + db_msg = "" + for attempt in range(1, commit_max_attempts + 1): + try: + db_c = _db_commit(primary_addr, order_id) + db_ok = bool(db_c and db_c.success) + db_msg = db_c.message if db_c else "no-response" + except Exception as e: + db_c = None + db_ok = False + db_msg = f"rpc_error={e!r}" + + if db_ok: + if attempt > 1: + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_commit_retry_succeeded " + f"order={order_id} attempt={attempt}" + ) + break + + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_commit_retry " + f"order={order_id} attempt={attempt} db_msg={db_msg!r}" + ) + if attempt == commit_max_attempts: + break + time.sleep(commit_backoffs[min(attempt - 1, len(commit_backoffs) - 1)]) + # Re-discover primary in case of failover. + new_primary = find_db_primary_addr() + if new_primary and new_primary != primary_addr: + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_primary_changed " + f"order={order_id} old_primary={primary_addr} " + f"new_primary={new_primary}" + ) + primary_addr = new_primary + elif not new_primary: + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_primary_unknown " + f"order={order_id} attempt={attempt}" + ) + + try: + pay_c = _pay_commit(order_id) + except Exception as e: + pay_c = None + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_commit_pay_rpc_error " + f"order={order_id} err={e!r}" + ) + pay_ok = bool(pay_c and pay_c.success) + + if db_ok and pay_ok: + print(f"[EXEC-{EXECUTOR_ID}] 2pc_commit_applied order={order_id}") + return True + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_commit_partial order={order_id} " + f"db_ok={db_ok} db_msg={db_msg!r} pay_ok={pay_ok}" + ) + return False + + # ABORT path: Abort is idempotent on both sides; send to both + # regardless of which (if any) voted commit. + try: + _db_abort(primary_addr, order_id) + except Exception as e: + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_abort_db_rpc_error " + f"order={order_id} err={e!r}" + ) + try: + _pay_abort(order_id) + except Exception as e: + print( + f"[EXEC-{EXECUTOR_ID}] 2pc_abort_pay_rpc_error " + f"order={order_id} err={e!r}" + ) + print(f"[EXEC-{EXECUTOR_ID}] 2pc_abort_applied order={order_id}") + return False + + +def consume_loop(): + while True: + time.sleep(1.0) + + with state_lock: + if not is_leader: + continue + + try: + with grpc.insecure_channel("order_queue:50054") as channel: + stub = queue_grpc.OrderQueueServiceStub(channel) + response = stub.Dequeue( + queue_pb2.DequeueRequest(executor_id=str(EXECUTOR_ID)), + timeout=2.0, + ) + except Exception as e: + print(f"[EXEC-{EXECUTOR_ID}] queue error: {e}") + continue + + if not response.success: + continue + + items_repr = ",".join( + f"{i.title}x{i.quantity}" for i in response.order.items + ) + print( + f"[EXEC-{EXECUTOR_ID}] leader={EXECUTOR_ID} " + f"executing order={response.order.order_id} " + f'user="{response.order.user_name}" ' + f"item_count={response.order.item_count} items=[{items_repr}]" + ) + + committed = run_2pc(response.order) + status = "committed" if committed else "aborted" + print( + f"[EXEC-{EXECUTOR_ID}] order_done " + f"order={response.order.order_id} status={status}" + ) + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + executor_grpc.add_OrderExecutorControlServicer_to_server( + ControlService(), server + ) + server.add_insecure_port("[::]:" + EXECUTOR_PORT) + server.start() + print(f"[EXEC-{EXECUTOR_ID}] listening on port {EXECUTOR_PORT}") + + threading.Thread(target=heartbeat_loop, daemon=True).start() + threading.Thread(target=timeout_loop, daemon=True).start() + threading.Thread(target=consume_loop, daemon=True).start() + + # Give peers a brief moment to come up, then start election only if + # no leader is already known. + time.sleep(1.0) + with state_lock: + should_start = (leader_id is None) and (not election_in_progress) + + if should_start: + start_election() + + server.wait_for_termination() + + +if __name__ == "__main__": + serve() \ No newline at end of file diff --git a/order_executor/tests/test_2pc_crash_recovery.py b/order_executor/tests/test_2pc_crash_recovery.py new file mode 100644 index 000000000..adb0b3f78 --- /dev/null +++ b/order_executor/tests/test_2pc_crash_recovery.py @@ -0,0 +1,295 @@ +"""Phase 6 end-to-end test: DB-primary crash between Prepare and Commit. + +Procedure: + 1. Baseline stock via raw Write against the current DB primary. + 2. Recreate books_database_3 with FAIL_NEXT_COMMIT=99 so every Commit + retry will be rejected as injected failure while DB-3 is up. + 3. POST /checkout. The coordinator sends Prepare (DB-3 persists the + staged txn to /app/state/txn_.json and votes commit), then starts + retrying Commit, which keeps failing. + 4. After seeing at least one commit_fail_injected, `docker kill` DB-3. + Wait a moment so the coordinator's next attempts return transport + errors (UNAVAILABLE). + 5. Restart DB-3 WITHOUT the override (FAIL_NEXT_COMMIT=0). On boot it + logs `recovered_pending order=` after reading the txn file. + 6. DB-3 reclaims primary via bully. The coordinator's next retry now + reaches a clean DB-3 which finds the recovered reservation and + applies the commit. + 7. All three replicas converge to stock-1; /app/state/ is empty again. + +Run from host (Docker stack must be up): + python order_executor/tests/test_2pc_crash_recovery.py +""" + +import json +import os +import subprocess +import sys +import time +import urllib.request + +import grpc + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/books_database"))) + +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + +DB_HOSTS = [ + ("127.0.0.1:50258", 1), + ("127.0.0.1:50259", 2), + ("127.0.0.1:50260", 3), +] +ORCH = "http://localhost:8081" +REPO_ROOT = os.path.abspath(os.path.join(HERE, "../..")) +COMPOSE_BASE = ["docker", "compose", "-f", "docker-compose.yaml"] +COMPOSE_OVERRIDE = COMPOSE_BASE + ["-f", "docker-compose.fail-inject.yaml"] +STATE_DIR_HOST = os.path.join(REPO_ROOT, "books_database", "state", "3") + + +def _run(cmd, timeout=120): + return subprocess.run( + cmd, cwd=REPO_ROOT, capture_output=True, text=True, timeout=timeout + ) + + +def find_primary(): + id_to_host = {rid: addr for addr, rid in DB_HOSTS} + for addr, _ in DB_HOSTS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary(db_pb2.WhoIsPrimaryRequest(), timeout=2.0) + if r.leader_id: + return id_to_host[r.leader_id], r.leader_id + except Exception: + continue + return None, None + + +def wait_for_primary(expected_id, timeout=60): + deadline = time.time() + timeout + while time.time() < deadline: + _, pid = find_primary() + if pid == expected_id: + return + time.sleep(1.0) + raise RuntimeError(f"timed out waiting for DB-{expected_id} to reclaim primary") + + +def raw_write(addr, title, qty): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Write( + db_pb2.WriteRequest(title=title, quantity=qty), timeout=5.0 + ) + + +def read_local(addr, title): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.ReadLocal( + db_pb2.ReadRequest(title=title), timeout=3.0 + ).quantity + + +def post_checkout_async(items): + payload = { + "user": { + "name": "Carol", + "contact": "carol@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123", + }, + "billingAddress": { + "street": "1 Main St", + "city": "Tartu", + "state": "Tartumaa", + "zip": "51000", + "country": "EE", + }, + }, + "items": items, + "termsAndConditionsAccepted": True, + } + data = json.dumps(payload).encode("utf-8") + req = urllib.request.Request( + f"{ORCH}/checkout", + data=data, + headers={"Content-Type": "application/json"}, + method="POST", + ) + with urllib.request.urlopen(req, timeout=90) as resp: + return json.loads(resp.read()) + + +def scrape_logs(services, since, patterns, timeout=60): + if isinstance(services, str): + services = [services] + deadline = time.time() + timeout + last_text = "" + while time.time() < deadline: + out = _run( + COMPOSE_BASE + ["logs", *services, "--since", since], timeout=30 + ) + text = (out.stdout or "") + (out.stderr or "") + last_text = text + missing = {} + for p, need in patterns.items(): + if text.count(p) < need: + missing[p] = (text.count(p), need) + if not missing: + return text + time.sleep(1.0) + raise AssertionError( + f"timed out scraping {services}; last missing: {missing}" + ) + + +def main(): + # Baseline. + addr, pid = find_primary() + assert pid, "no DB primary at test start" + print(f"initial primary = DB-{pid} @ {addr}") + raw_write(addr, "Book A", 10) + print("baseline: Book A = 10") + + # Arm FAIL_NEXT_COMMIT=99 on DB-3 so Commit keeps failing until we kill it. + print("\n-- arming FAIL_NEXT_COMMIT=99 on DB-3 --") + # Temporarily bump the override value. + override_path = os.path.join(REPO_ROOT, "docker-compose.fail-inject.yaml") + original = open(override_path).read() + open(override_path, "w").write( + "services:\n" + " books_database_3:\n" + " environment:\n" + " - FAIL_NEXT_COMMIT=99\n" + ) + try: + r = _run( + COMPOSE_OVERRIDE + + ["up", "-d", "--no-deps", "--force-recreate", "books_database_3"], + timeout=120, + ) + if r.returncode != 0: + print(r.stdout, r.stderr) + raise RuntimeError("recreate DB-3 with high fail count failed") + + wait_for_primary(3, timeout=60) + print("DB-3 primary (fail_next_commit=99)") + time.sleep(2.0) + + # Fire checkout in a background thread so we can kill DB-3 mid-retry + # without blocking on the orchestrator's response. + import threading + + resp_box = {} + + def do_checkout(): + try: + resp_box["resp"] = post_checkout_async( + [{"title": "Book A", "quantity": 1}] + ) + except Exception as e: + resp_box["err"] = repr(e) + + t = threading.Thread(target=do_checkout) + t.start() + + # Wait until we see at least one commit_fail_injected on DB-3. + print("\n-- waiting for first commit_fail_injected on DB-3 --") + scrape_logs( + "books_database_3", + since="1m", + patterns={"commit_fail_injected": 1}, + timeout=60, + ) + print("saw commit_fail_injected; staged txn is persisted on disk") + + # Confirm the txn file exists on the host-mounted state dir. + staged = [ + f + for f in os.listdir(STATE_DIR_HOST) + if f.startswith("txn_") and f.endswith(".json") + ] + print(f"host state dir = {STATE_DIR_HOST} contents={staged}") + assert staged, "no txn_*.json in DB-3 state dir; persistence broken" + + # Hard-kill DB-3. + print("\n-- docker kill books_database_3 --") + r = _run(COMPOSE_BASE + ["kill", "books_database_3"], timeout=30) + if r.returncode != 0: + print(r.stdout, r.stderr) + raise RuntimeError("kill books_database_3 failed") + time.sleep(3.0) + + finally: + # Restore override file to the benign FAIL_NEXT_COMMIT=2 value + # before we restart DB-3, so the recovered instance runs clean. + open(override_path, "w").write(original) + + # Restart DB-3 WITHOUT any override — fail_next_commit=0. + print("\n-- restarting DB-3 (clean, FAIL_NEXT_COMMIT=0) --") + r = _run( + COMPOSE_BASE + + ["up", "-d", "--no-deps", "--force-recreate", "books_database_3"], + timeout=120, + ) + if r.returncode != 0: + print(r.stdout, r.stderr) + raise RuntimeError("restart DB-3 failed") + + # Must see recovered_pending in logs. + print("\n-- waiting for recovered_pending --") + scrape_logs( + "books_database_3", + since="1m", + patterns={"recovered_pending": 1}, + timeout=60, + ) + print("DB-3 recovered pending reservation from disk") + + wait_for_primary(3, timeout=60) + print("DB-3 reclaimed primary") + + # Coordinator retry should now finally apply the commit. + print("\n-- waiting for 2pc_commit_applied --") + exec_services = ["order_executor_1", "order_executor_2", "order_executor_3"] + scrape_logs( + exec_services, + since="2m", + patterns={"2pc_commit_applied": 1, "2pc_commit_retry_succeeded": 1}, + timeout=120, + ) + print("coordinator committed after DB-3 recovered") + + # Wait for orchestrator response thread. + t.join(timeout=60) + if "resp" in resp_box: + print(f"checkout resp = {resp_box['resp']}") + else: + print(f"checkout err = {resp_box.get('err')}") + + time.sleep(2.0) + print("\n-- convergence check --") + for addr, rid in DB_HOSTS: + q = read_local(addr, "Book A") + print(f" DB-{rid}: Book A={q}") + assert q == 9, f"DB-{rid} expected 9 got {q}" + + remaining = [ + f + for f in os.listdir(STATE_DIR_HOST) + if f.startswith("txn_") and f.endswith(".json") + ] + assert not remaining, f"state dir should be empty, still has {remaining}" + print("DB-3 state dir cleaned after commit") + + print("\nPHASE 6 CRASH-RECOVERY E2E: PASSED") + + +if __name__ == "__main__": + main() diff --git a/order_executor/tests/test_2pc_end_to_end.py b/order_executor/tests/test_2pc_end_to_end.py new file mode 100644 index 000000000..fc4a04c0a --- /dev/null +++ b/order_executor/tests/test_2pc_end_to_end.py @@ -0,0 +1,178 @@ +"""Phase 5 end-to-end test. + +Submits a checkout, waits for the pipeline to execute, then confirms that +the DB primary and all backups hold the expected decremented stock and +that the coordinator wrote a decision record. + +Run from host (Docker stack must be up): + python order_executor/tests/test_2pc_end_to_end.py +""" + +import json +import os +import subprocess +import sys +import time + +import grpc +import urllib.request + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/books_database"))) + +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + +DB_HOSTS = [ + ("127.0.0.1:50258", 1), + ("127.0.0.1:50259", 2), + ("127.0.0.1:50260", 3), +] +ORCH = "http://localhost:8081" + + +def find_primary(): + id_to_host = {rid: addr for addr, rid in DB_HOSTS} + for addr, _ in DB_HOSTS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary(db_pb2.WhoIsPrimaryRequest(), timeout=2.0) + if r.leader_id: + return id_to_host[r.leader_id], r.leader_id + except Exception: + continue + raise RuntimeError("no DB primary") + + +def raw_write(addr, title, qty): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Write( + db_pb2.WriteRequest(title=title, quantity=qty), timeout=5.0 + ) + + +def read_local(addr, title): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.ReadLocal( + db_pb2.ReadRequest(title=title), timeout=3.0 + ).quantity + + +def post_checkout(items): + payload = { + "user": { + "name": "Alice", + "contact": "alice@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123", + }, + "billingAddress": { + "street": "1 Main St", + "city": "Tartu", + "state": "Tartumaa", + "zip": "51000", + "country": "EE", + }, + }, + "items": items, + "termsAndConditionsAccepted": True, + } + data = json.dumps(payload).encode("utf-8") + req = urllib.request.Request( + f"{ORCH}/checkout", + data=data, + headers={"Content-Type": "application/json"}, + method="POST", + ) + with urllib.request.urlopen(req, timeout=30) as resp: + return json.loads(resp.read()) + + +def wait_for_2pc_decision(order_id, timeout=30): + """Scrape executor logs until we see a 2pc_decision for this order.""" + deadline = time.time() + timeout + while time.time() < deadline: + out = subprocess.run( + [ + "docker", + "compose", + "logs", + "order_executor_1", + "order_executor_2", + "order_executor_3", + "--since", + "2m", + ], + capture_output=True, + text=True, + ) + for line in (out.stdout or "").splitlines(): + if f"2pc_decision order={order_id}" in line: + return line + time.sleep(1.0) + return None + + +def main(): + primary_addr, primary_id = find_primary() + print(f"DB primary = DB-{primary_id} @ {primary_addr}") + + # Reset to known baseline via raw Write (skips 2PC participant path). + baseline = {"Book A": 10, "Book B": 6} + for title, qty in baseline.items(): + raw_write(primary_addr, title, qty) + print(f"baseline stock = {baseline}") + + # Happy-path checkout. + checkout_items = [{"title": "Book A", "quantity": 2}, {"title": "Book B", "quantity": 1}] + print(f"POST /checkout items={checkout_items}") + resp = post_checkout(checkout_items) + order_id = resp.get("orderId") + print(f"orchestrator response = orderId={order_id} status={resp.get('status')!r}") + + # Wait for executor to report decision. + line = wait_for_2pc_decision(order_id, timeout=45) + print(f"decision log: {line!r}") + assert line is not None, "no 2pc_decision log line observed" + assert "decision=COMMIT" in line, f"expected COMMIT, got: {line}" + + # Give commit a moment to finish replicating. + time.sleep(2.0) + + for addr, rid in DB_HOSTS: + a = read_local(addr, "Book A") + b = read_local(addr, "Book B") + print(f" DB-{rid}: Book A={a} Book B={b}") + assert a == 8, f"DB-{rid} Book A expected 8 got {a}" + assert b == 5, f"DB-{rid} Book B expected 5 got {b}" + + print("\nPHASE 5 E2E HAPPY PATH: PASSED") + + # ABORT path: request more than available stock. + print("\n-- ABORT path --") + resp = post_checkout([{"title": "Book A", "quantity": 1000}]) + abort_order_id = resp.get("orderId") + print(f"ORCH resp = {resp.get('status')!r}") + + line = wait_for_2pc_decision(abort_order_id, timeout=45) + print(f"decision log: {line!r}") + assert line is not None, "no 2pc_decision log line observed" + assert "decision=ABORT" in line, f"expected ABORT, got: {line}" + + time.sleep(1.0) + + for addr, rid in DB_HOSTS: + a = read_local(addr, "Book A") + print(f" DB-{rid}: Book A={a}") + assert a == 8, f"DB-{rid} Book A stayed 8, got {a}" + + print("\nPHASE 5 E2E ABORT PATH: PASSED") + + +if __name__ == "__main__": + main() diff --git a/order_executor/tests/test_2pc_fail_injection.py b/order_executor/tests/test_2pc_fail_injection.py new file mode 100644 index 000000000..00abe31de --- /dev/null +++ b/order_executor/tests/test_2pc_fail_injection.py @@ -0,0 +1,309 @@ +"""Phase 6 end-to-end test: participant-failure recovery via 2PC commit retry. + +Procedure: + 1. Baseline stock via raw Write against the current DB primary. + 2. Recreate books_database_3 with FAIL_NEXT_COMMIT=2 (compose override). + DB-3 has the highest ID, so bully election hands primary back to it. + 3. POST /checkout for a single Book A. Coordinator sends Prepare -> DB + votes commit -> coordinator sends Commit. First two Commits are + rejected (commit_fail_injected). Third succeeds. + 4. Scrape executor logs for 2pc_commit_retry (x2) + + 2pc_commit_retry_succeeded, and DB logs for commit_fail_injected (x2) + + commit_applied. All three replicas converge to stock-1. + 5. Restart books_database_3 without the override to return to baseline. + +Run from host (Docker stack must be up): + python order_executor/tests/test_2pc_fail_injection.py +""" + +import json +import os +import subprocess +import sys +import time +import urllib.request + +import grpc + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/books_database"))) + +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + +DB_HOSTS = [ + ("127.0.0.1:50258", 1), + ("127.0.0.1:50259", 2), + ("127.0.0.1:50260", 3), +] +ORCH = "http://localhost:8081" +REPO_ROOT = os.path.abspath(os.path.join(HERE, "../..")) +COMPOSE_BASE = ["docker", "compose", "-f", "docker-compose.yaml"] +COMPOSE_OVERRIDE = COMPOSE_BASE + ["-f", "docker-compose.fail-inject.yaml"] + + +def _run(cmd, timeout=120): + return subprocess.run( + cmd, cwd=REPO_ROOT, capture_output=True, text=True, timeout=timeout + ) + + +def wait_for_primary(expected_id=None, timeout=45, stable_checks=3): + """Return (addr, leader_id) once a primary is stably usable. + + The naive "first replica that reports any leader_id wins" probe is + too optimistic. Around restarts and failovers, one replica can + advertise a leader_id before the named primary is actually ready to + serve primary-only Read/Write RPCs. We therefore require: + + (a) a 2-of-3 majority via WhoIsPrimary, + (b) a primary-only Read against the named leader succeeds, and + (c) the same leader_id holds for `stable_checks` consecutive probes. + """ + id_to_host = {rid: addr for addr, rid in DB_HOSTS} + deadline = time.time() + timeout + last_answer = None + streak = 0 + + while time.time() < deadline: + votes = {} + for addr, _ in DB_HOSTS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary( + db_pb2.WhoIsPrimaryRequest(), timeout=2.0 + ) + if r.leader_id: + votes[r.leader_id] = votes.get(r.leader_id, 0) + 1 + except Exception: + continue + + candidate_id = None + if votes: + candidate_id, candidate_votes = sorted( + votes.items(), key=lambda kv: (kv[1], kv[0]), reverse=True + )[0] + if candidate_votes < 2: + candidate_id = None + + if expected_id is not None and candidate_id != expected_id: + candidate_id = None + + probe_ok = False + if candidate_id is not None: + addr = id_to_host[candidate_id] + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + probe = stub.Read( + db_pb2.ReadRequest(title="Book A"), timeout=2.0 + ) + probe_ok = bool(probe.success) + except Exception: + probe_ok = False + + if candidate_id is not None and probe_ok: + if candidate_id == last_answer: + streak += 1 + else: + last_answer = candidate_id + streak = 1 + + if streak >= stable_checks: + return id_to_host[candidate_id], candidate_id + else: + last_answer = None + streak = 0 + + time.sleep(1.0) + + if expected_id is None: + raise RuntimeError( + f"timed out waiting for any stable DB primary " + f"(last_answer={last_answer}, streak={streak})" + ) + raise RuntimeError( + f"timed out waiting for DB-{expected_id} to reclaim stable primary " + f"(last_answer={last_answer}, streak={streak})" + ) + + +def raw_write(addr, title, qty): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Write( + db_pb2.WriteRequest(title=title, quantity=qty), timeout=5.0 + ) + + +def read_local(addr, title): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.ReadLocal( + db_pb2.ReadRequest(title=title), timeout=3.0 + ).quantity + + +def post_checkout(items): + payload = { + "user": { + "name": "Bob", + "contact": "bob@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123", + }, + "billingAddress": { + "street": "1 Main St", + "city": "Tartu", + "state": "Tartumaa", + "zip": "51000", + "country": "EE", + }, + }, + "items": items, + "termsAndConditionsAccepted": True, + } + data = json.dumps(payload).encode("utf-8") + req = urllib.request.Request( + f"{ORCH}/checkout", + data=data, + headers={"Content-Type": "application/json"}, + method="POST", + ) + with urllib.request.urlopen(req, timeout=30) as resp: + return json.loads(resp.read()) + + +def scrape_logs(services, since, patterns, timeout=60): + """Block until every pattern in `patterns` appears at least the + expected number of times in the merged logs of `services` since + `since`. patterns is a dict {pattern_string: min_count}.""" + if isinstance(services, str): + services = [services] + deadline = time.time() + timeout + while time.time() < deadline: + out = _run( + COMPOSE_BASE + ["logs", *services, "--since", since], + timeout=30, + ) + text = (out.stdout or "") + (out.stderr or "") + missing = {} + for p, need in patterns.items(): + got = text.count(p) + if got < need: + missing[p] = (got, need) + if not missing: + return text + time.sleep(1.0) + raise AssertionError( + f"timed out scraping {services} logs; still missing: {missing}" + ) + + +def main(): + # --- Baseline --- + addr, pid = wait_for_primary(timeout=45) + print(f"initial primary = DB-{pid} @ {addr}") + raw_write(addr, "Book A", 10) + print("baseline stock: Book A = 10") + + # --- Arm fail injection on DB-3 and wait for it to reclaim primary --- + print("\n-- arming FAIL_NEXT_COMMIT=2 on DB-3 (recreate with override) --") + r = _run( + COMPOSE_OVERRIDE + + [ + "up", + "-d", + "--no-deps", + "--force-recreate", + "books_database_3", + ], + timeout=120, + ) + if r.returncode != 0: + print("compose up stdout:", r.stdout) + print("compose up stderr:", r.stderr) + raise RuntimeError("failed to recreate books_database_3 with override") + + primary_addr, primary_id = wait_for_primary(expected_id=3, timeout=60) + print(f"DB-{primary_id} reclaimed primary @ {primary_addr}") + # Let the post-election state settle (heartbeats, etc.) + time.sleep(2.0) + + # --- Submit the checkout --- + t_submit = time.time() + since_ts = "30s" # log scrape window + print("\n-- POST /checkout Book A x1 --") + resp = post_checkout([{"title": "Book A", "quantity": 1}]) + order_id = resp.get("orderId") + print(f"orchestrator resp: orderId={order_id} status={resp.get('status')!r}") + + # --- DB-3 should log 2 injected failures + a real commit_applied --- + print("\n-- scraping DB-3 logs --") + db_text = scrape_logs( + "books_database_3", + since=since_ts, + patterns={ + f"commit_fail_injected order={order_id}": 2, + f"commit_applied order={order_id}": 1, + }, + timeout=90, + ) + print("DB-3 log snippet:") + for line in db_text.splitlines(): + if order_id and order_id in line: + print(f" {line}") + + # --- Executor should log 2 retry lines + 1 retry_succeeded --- + print("\n-- scraping executor logs (all 3 merged; leader is unknown) --") + exec_services = ["order_executor_1", "order_executor_2", "order_executor_3"] + exec_text = scrape_logs( + exec_services, + since=since_ts, + patterns={ + f"2pc_commit_retry order={order_id}": 2, + f"2pc_commit_retry_succeeded order={order_id}": 1, + f"2pc_commit_applied order={order_id}": 1, + }, + timeout=90, + ) + for line in exec_text.splitlines(): + if order_id and order_id in line and "2pc_commit" in line: + print(f" {line}") + + # --- Convergence: all 3 replicas must show Book A = 9 --- + time.sleep(2.0) + print("\n-- convergence check --") + for addr, rid in DB_HOSTS: + q = read_local(addr, "Book A") + print(f" DB-{rid}: Book A={q}") + assert q == 9, f"DB-{rid} expected 9 got {q}" + + print("\nPHASE 6 FAIL-INJECTION E2E: PASSED") + + # --- Cleanup: recreate DB-3 without override so fail_next_commit=0 again --- + print("\n-- cleanup: recreate DB-3 without override --") + r = _run( + COMPOSE_BASE + + [ + "up", + "-d", + "--no-deps", + "--force-recreate", + "books_database_3", + ], + timeout=120, + ) + if r.returncode != 0: + print("cleanup stdout:", r.stdout) + print("cleanup stderr:", r.stderr) + # Wait for DB-3 to reclaim primary with the clean env. + wait_for_primary(expected_id=3, timeout=60) + print("DB-3 back to normal, fail_next_commit=0") + + +if __name__ == "__main__": + main() diff --git a/order_queue/Dockerfile b/order_queue/Dockerfile new file mode 100644 index 000000000..dd5446043 --- /dev/null +++ b/order_queue/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.11 +WORKDIR /app +COPY ./order_queue/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +CMD python utils/other/hotreload.py "order_queue/src/app.py" \ No newline at end of file diff --git a/order_queue/requirements.txt b/order_queue/requirements.txt new file mode 100644 index 000000000..1731b14fb --- /dev/null +++ b/order_queue/requirements.txt @@ -0,0 +1,3 @@ +grpcio==1.78.0 +grpcio-tools==1.78.0 +watchdog==6.0.0 \ No newline at end of file diff --git a/order_queue/src/app.py b/order_queue/src/app.py new file mode 100644 index 000000000..d1ce51b07 --- /dev/null +++ b/order_queue/src/app.py @@ -0,0 +1,70 @@ +import os +import sys +import threading +from collections import deque +from concurrent import futures + +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") + +queue_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/order_queue") +) +sys.path.insert(0, queue_grpc_path) + +import grpc +import order_queue_pb2 as order_queue +import order_queue_pb2_grpc as order_queue_grpc + + +orders = deque() +queue_lock = threading.Lock() + + +class OrderQueueService(order_queue_grpc.OrderQueueServiceServicer): + def Enqueue(self, request, context): + with queue_lock: + orders.append(request.order) + + print( + f"[QUEUE] action=enqueue order={request.order.order_id} " + f"size={len(orders)}" + ) + return order_queue.QueueResponse( + success=True, + message="Order enqueued." + ) + + def Dequeue(self, request, context): + with queue_lock: + if not orders: + return order_queue.DequeueResponse( + success=False, + message="Queue is empty." + ) + order = orders.popleft() + + print( + f"[QUEUE] action=dequeue order={order.order_id} " + f"executor={request.executor_id} size={len(orders)}" + ) + return order_queue.DequeueResponse( + success=True, + message="Order dequeued.", + order=order + ) + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + order_queue_grpc.add_OrderQueueServiceServicer_to_server( + OrderQueueService(), server + ) + port = "50054" + server.add_insecure_port("[::]:" + port) + server.start() + print(f"Order queue server started. Listening on port {port}.") + server.wait_for_termination() + + +if __name__ == "__main__": + serve() \ No newline at end of file diff --git a/payment_service/Dockerfile b/payment_service/Dockerfile new file mode 100644 index 000000000..f794b2724 --- /dev/null +++ b/payment_service/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.11 +WORKDIR /app +COPY ./payment_service/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +CMD python utils/other/hotreload.py "payment_service/src/app.py" diff --git a/payment_service/requirements.txt b/payment_service/requirements.txt new file mode 100644 index 000000000..c267def1b --- /dev/null +++ b/payment_service/requirements.txt @@ -0,0 +1,3 @@ +grpcio==1.78.0 +grpcio-tools==1.78.0 +watchdog==6.0.0 diff --git a/payment_service/src/app.py b/payment_service/src/app.py new file mode 100644 index 000000000..a20e0c24d --- /dev/null +++ b/payment_service/src/app.py @@ -0,0 +1,143 @@ +"""Payment service: 2PC participant. + +This service never actually charges a card. For the Checkpoint 3 demo it is +a single-instance gRPC server that always votes commit on Prepare, then logs +the subsequent Commit or Abort. Idempotent on retries (the coordinator may +resend Commit/Abort). +""" + +import os +import sys +import threading +import time +from concurrent import futures + +import grpc + +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") + +pay_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/payment_service") +) +sys.path.insert(0, pay_grpc_path) + +import payment_pb2 as pay_pb2 +import payment_pb2_grpc as pay_grpc + + +PORT = os.getenv("PAYMENT_PORT", "50061") + + +# Per-order bookkeeping. `prepared` stores the amount/user for logging on +# Commit. `committed` and `aborted` are sets used purely for idempotent +# retry handling. +state_lock = threading.Lock() +prepared = {} # order_id -> {"amount": float, "user_name": str} +committed = set() +aborted = set() + + +class PaymentService(pay_grpc.PaymentServiceServicer): + + def Prepare(self, request, context): + """Phase 1 of 2PC on the payment side. Stages the order amount in + the in-memory `prepared` map and always votes commit (the demo + does not simulate card-network rejection). Idempotent: replaying + Prepare after a committed/aborted state returns the recorded + outcome instead of re-staging.""" + order_id = request.order_id + with state_lock: + if order_id in prepared: + print( + f"[PAYMENT] prepare_idempotent order={order_id} " + f"(already prepared)" + ) + return pay_pb2.PaymentPrepareResponse( + vote_commit=True, message="already prepared" + ) + if order_id in committed: + return pay_pb2.PaymentPrepareResponse( + vote_commit=True, message="already committed" + ) + if order_id in aborted: + return pay_pb2.PaymentPrepareResponse( + vote_commit=False, message="already aborted" + ) + prepared[order_id] = { + "amount": request.amount, + "user_name": request.user_name, + } + + print( + f"[PAYMENT] prepare_vote_commit order={order_id} " + f"user=\"{request.user_name}\" amount={request.amount:.2f}" + ) + return pay_pb2.PaymentPrepareResponse(vote_commit=True, message="ok") + + def Commit(self, request, context): + """Phase 2 commit. Moves the order from `prepared` to `committed` + and logs the settled amount. Idempotent on retry (second call + returns `commit_idempotent`). A Commit that arrives without a + matching Prepare is still accepted so a retrying coordinator can + make progress; the authoritative decision record lives on the + coordinator, not here.""" + order_id = request.order_id + with state_lock: + if order_id in committed: + print(f"[PAYMENT] commit_idempotent order={order_id}") + return pay_pb2.PaymentCommitResponse( + success=True, message="already committed" + ) + info = prepared.pop(order_id, None) + committed.add(order_id) + + if info is None: + # Commit without Prepare. Log and accept so the coordinator can + # make progress; the decision record lives on the coordinator. + print( + f"[PAYMENT] commit_without_prepare order={order_id} " + f"(accepted)" + ) + else: + print( + f"[PAYMENT] commit_applied order={order_id} " + f"user=\"{info['user_name']}\" amount={info['amount']:.2f}" + ) + return pay_pb2.PaymentCommitResponse(success=True, message="ok") + + def Abort(self, request, context): + """Phase 2 abort. Drops any `prepared` reservation for the order + and records it in `aborted`. Idempotent; also tolerates an Abort + that arrives before Prepare (logged as `abort_without_prepare` + and treated as a success).""" + order_id = request.order_id + with state_lock: + if order_id in aborted: + print(f"[PAYMENT] abort_idempotent order={order_id}") + return pay_pb2.PaymentAbortResponse( + success=True, message="already aborted" + ) + info = prepared.pop(order_id, None) + aborted.add(order_id) + + if info is None: + print(f"[PAYMENT] abort_without_prepare order={order_id}") + else: + print( + f"[PAYMENT] abort_ok order={order_id} " + f"user=\"{info['user_name']}\" amount={info['amount']:.2f}" + ) + return pay_pb2.PaymentAbortResponse(success=True, message="ok") + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pay_grpc.add_PaymentServiceServicer_to_server(PaymentService(), server) + server.add_insecure_port("[::]:" + PORT) + server.start() + print(f"[PAYMENT] listening on port {PORT}") + server.wait_for_termination() + + +if __name__ == "__main__": + serve() diff --git a/payment_service/tests/smoke_test.py b/payment_service/tests/smoke_test.py new file mode 100644 index 000000000..d22ac8ded --- /dev/null +++ b/payment_service/tests/smoke_test.py @@ -0,0 +1,194 @@ +"""Phase 4 smoke tests: payment_service + books_database participant RPCs. + +Does NOT drive a 2PC coordinator - that is Phase 5. Here we just prove +each participant RPC is wired end-to-end and behaves as specified. + +Run from the host: + python payment_service/tests/smoke_test.py +""" + +import os +import sys +import time + +import grpc + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/payment_service"))) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../../utils/pb/books_database"))) + +import payment_pb2 as pay_pb2 +import payment_pb2_grpc as pay_grpc +import books_database_pb2 as db_pb2 +import books_database_pb2_grpc as db_grpc + +DB_HOSTS = [("127.0.0.1:50258", 1), ("127.0.0.1:50259", 2), ("127.0.0.1:50260", 3)] +PAY_HOST = "127.0.0.1:50261" + + +def find_db_primary(): + id_to_host = {rid: addr for addr, rid in DB_HOSTS} + for addr, _ in DB_HOSTS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary(db_pb2.WhoIsPrimaryRequest(), timeout=2.0) + if r.leader_id: + return id_to_host[r.leader_id], r.leader_id + except Exception: + continue + raise RuntimeError("no DB primary found") + + +def read(addr, title): + """Read from the primary only (client-facing).""" + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Read(db_pb2.ReadRequest(title=title), timeout=3.0) + + +def read_local(addr, title): + """Read from any replica's local copy (debug/ops).""" + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.ReadLocal(db_pb2.ReadRequest(title=title), timeout=3.0) + + +def db_prepare(addr, order_id, items): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + item_msgs = [db_pb2.PrepareItem(title=t, quantity=q) for t, q in items] + return stub.Prepare( + db_pb2.PrepareRequest(order_id=order_id, items=item_msgs), + timeout=5.0, + ) + + +def db_commit(addr, order_id): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Commit(db_pb2.CommitRequest(order_id=order_id), timeout=5.0) + + +def db_abort(addr, order_id): + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + return stub.Abort(db_pb2.AbortRequest(order_id=order_id), timeout=5.0) + + +def pay_prepare(order_id, amount, user): + with grpc.insecure_channel(PAY_HOST) as ch: + stub = pay_grpc.PaymentServiceStub(ch) + return stub.Prepare( + pay_pb2.PaymentPrepareRequest( + order_id=order_id, amount=amount, user_name=user + ), + timeout=3.0, + ) + + +def pay_commit(order_id): + with grpc.insecure_channel(PAY_HOST) as ch: + stub = pay_grpc.PaymentServiceStub(ch) + return stub.Commit( + pay_pb2.PaymentCommitRequest(order_id=order_id), timeout=3.0 + ) + + +def pay_abort(order_id): + with grpc.insecure_channel(PAY_HOST) as ch: + stub = pay_grpc.PaymentServiceStub(ch) + return stub.Abort( + pay_pb2.PaymentAbortRequest(order_id=order_id), timeout=3.0 + ) + + +def main(): + db_addr, db_id = find_db_primary() + print(f"DB primary: DB-{db_id} @ {db_addr}") + + # Per-run prefix so the participant's idempotency caches don't blur runs. + rp = f"smoke-{int(time.time())}" + + # --- Payment service round-trip --- + r = pay_prepare(f"{rp}-p1", 42.50, "Alice") + print(f"payment.Prepare -> vote_commit={r.vote_commit} msg={r.message!r}") + assert r.vote_commit + + r = pay_commit(f"{rp}-p1") + print(f"payment.Commit -> success={r.success} msg={r.message!r}") + assert r.success + + # Idempotent commit + r = pay_commit(f"{rp}-p1") + print(f"payment.Commit (retry) -> success={r.success} msg={r.message!r}") + assert r.success + + # Prepare + Abort + r = pay_prepare(f"{rp}-p2", 9.99, "Bob") + assert r.vote_commit + r = pay_abort(f"{rp}-p2") + print(f"payment.Abort -> success={r.success} msg={r.message!r}") + assert r.success + + # --- DB participant: reset Book A to a known value first --- + with grpc.insecure_channel(db_addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + stub.Write(db_pb2.WriteRequest(title="Book A", quantity=10), timeout=3.0) + stub.Write(db_pb2.WriteRequest(title="Book B", quantity=6), timeout=3.0) + print(f"DB reset: Book A=10 Book B=6") + + # Test 1: Prepare + Commit applies decrement everywhere. + r = db_prepare(db_addr, f"{rp}-o1", [("Book A", 2), ("Book B", 1)]) + print(f"db.Prepare(o1) -> vote={r.vote_commit} msg={r.message!r}") + assert r.vote_commit + + r = db_commit(db_addr, f"{rp}-o1") + print(f"db.Commit(o1) -> ok={r.success} msg={r.message!r}") + assert r.success + + # Convergence check across all replicas (via ReadLocal). + for addr, rid in DB_HOSTS: + a = read_local(addr, "Book A").quantity + b = read_local(addr, "Book B").quantity + print(f" DB-{rid}: Book A={a} Book B={b}") + assert a == 8 and b == 5 + + # Test 2: Prepare + Abort leaves stock unchanged. + r = db_prepare(db_addr, f"{rp}-o2", [("Book A", 3)]) + assert r.vote_commit + r = db_abort(db_addr, f"{rp}-o2") + print(f"db.Abort(o2) -> ok={r.success} msg={r.message!r}") + assert read(db_addr, "Book A").quantity == 8 + + # Test 3: Prepare more than stock -> vote_commit=False, no staging. + r = db_prepare(db_addr, f"{rp}-o3", [("Book A", 1000)]) + print(f"db.Prepare(o3, huge) -> vote={r.vote_commit} msg={r.message!r}") + assert not r.vote_commit + assert read(db_addr, "Book A").quantity == 8 + + # Test 4: Two overlapping Prepares that together exceed stock. + # Book A is 8. We stage an order for 6. Then an order for 5 should fail + # because the first order has 6 reserved. + r = db_prepare(db_addr, f"{rp}-o4a", [("Book A", 6)]) + assert r.vote_commit + r = db_prepare(db_addr, f"{rp}-o4b", [("Book A", 5)]) + print(f"db.Prepare(o4b, would oversell) -> vote={r.vote_commit} msg={r.message!r}") + assert not r.vote_commit + + # Clean up: abort o4a, confirm stock back. + db_abort(db_addr, f"{rp}-o4a") + assert read(db_addr, "Book A").quantity == 8 + + # Test 5: Prepare idempotence. + r1 = db_prepare(db_addr, f"{rp}-o5", [("Book A", 1)]) + r2 = db_prepare(db_addr, f"{rp}-o5", [("Book A", 1)]) + print(f"db.Prepare(o5) x2 -> first={r1.vote_commit} second={r2.vote_commit}") + assert r1.vote_commit and r2.vote_commit + db_abort(db_addr, f"{rp}-o5") + + print("\nALL SMOKE TESTS PASSED") + + +if __name__ == "__main__": + main() diff --git a/scripts/_cp3_db_probe.py b/scripts/_cp3_db_probe.py new file mode 100644 index 000000000..169b79323 --- /dev/null +++ b/scripts/_cp3_db_probe.py @@ -0,0 +1,116 @@ +"""Helper for scripts/checkpoint3-checks.ps1. + +Subcommands: + read-stock print one line per replica: "DB-<id>=<qty>" + find-primary print "primary_id=<N>" and "primary_addr=<host>:<port>" + all-reachable exit 0 if all 3 DB replicas answer WhoIsPrimary + +All subcommands exit non-zero on failure with a short reason on stderr. + +Used by the PowerShell verification script so it can assert on the +actual gRPC state of the replicated books_database rather than going +through the orchestrator path. +""" + +import os +import sys + +HERE = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(HERE, "../utils/pb/books_database"))) + +import grpc # noqa: E402 +import books_database_pb2 as db_pb2 # noqa: E402 +import books_database_pb2_grpc as db_grpc # noqa: E402 + +REPLICAS = [ + (1, "127.0.0.1:50258"), + (2, "127.0.0.1:50259"), + (3, "127.0.0.1:50260"), +] + + +def _stub(addr): + return db_grpc.BooksDatabaseServiceStub(grpc.insecure_channel(addr)) + + +def cmd_read_stock(title, tolerate_missing=False): + out_lines = [] + errors = [] + for rid, addr in REPLICAS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.ReadLocal( + db_pb2.ReadRequest(title=title), timeout=3.0 + ) + out_lines.append(f"DB-{rid}={r.quantity}") + except Exception as exc: + if tolerate_missing: + out_lines.append(f"DB-{rid}=UNREACHABLE") + else: + errors.append(f"DB-{rid}: {exc!r}") + if errors: + sys.stderr.write("read-stock errors: " + "; ".join(errors) + "\n") + return 1 + print("\n".join(out_lines)) + return 0 + + +def cmd_find_primary(): + id_to_addr = { + 1: "127.0.0.1:50258", + 2: "127.0.0.1:50259", + 3: "127.0.0.1:50260", + } + for _, addr in REPLICAS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + r = stub.WhoIsPrimary( + db_pb2.WhoIsPrimaryRequest(), timeout=2.0 + ) + if r.leader_id: + print(f"primary_id={r.leader_id}") + print(f"primary_addr={id_to_addr[r.leader_id]}") + return 0 + except Exception: + continue + sys.stderr.write("no DB primary reachable\n") + return 2 + + +def cmd_all_reachable(): + for rid, addr in REPLICAS: + try: + with grpc.insecure_channel(addr) as ch: + stub = db_grpc.BooksDatabaseServiceStub(ch) + stub.WhoIsPrimary(db_pb2.WhoIsPrimaryRequest(), timeout=2.0) + except Exception as exc: + sys.stderr.write(f"DB-{rid} unreachable: {exc!r}\n") + return 3 + print("all_reachable=1") + return 0 + + +def main(): + if len(sys.argv) < 2: + sys.stderr.write("usage: _cp3_db_probe.py <subcommand> [args...]\n") + return 64 + sub = sys.argv[1] + if sub == "read-stock": + if len(sys.argv) < 3: + sys.stderr.write("read-stock requires <title>\n") + return 64 + tolerate = "--tolerate-missing" in sys.argv[3:] + title = sys.argv[2] + return cmd_read_stock(title, tolerate_missing=tolerate) + if sub == "find-primary": + return cmd_find_primary() + if sub == "all-reachable": + return cmd_all_reachable() + sys.stderr.write(f"unknown subcommand: {sub}\n") + return 64 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/checkpoint3-checks.ps1 b/scripts/checkpoint3-checks.ps1 new file mode 100644 index 000000000..d42d317e3 --- /dev/null +++ b/scripts/checkpoint3-checks.ps1 @@ -0,0 +1,536 @@ +param( + [switch]$SkipBuild, + [switch]$SkipFailover, + [switch]$SkipBonus +) + +Set-StrictMode -Version Latest +$ErrorActionPreference = "Stop" + +$projectRoot = Split-Path -Parent $PSScriptRoot +Set-Location $projectRoot + +$apiUrl = "http://127.0.0.1:8081/checkout" +$dbServices = @("books_database_1", "books_database_2", "books_database_3") +$executorServices = @("order_executor_1", "order_executor_2", "order_executor_3") +$logServices = @( + "orchestrator", + "payment_service", + "order_queue" +) + $executorServices + $dbServices +$pythonFiles = @( + "books_database/src/app.py", + "payment_service/src/app.py", + "order_executor/src/app.py", + "orchestrator/src/app.py" +) +$results = [System.Collections.Generic.List[object]]::new() +$script:CurrentFailureList = $null + +function Write-Section { + param([string]$Message) + Write-Host "" + Write-Host "== $Message ==" +} + +function Add-CheckResult { + param( + [string]$Name, + [bool]$Passed, + [string]$Details + ) + + $results.Add([pscustomobject]@{ + Name = $Name + Passed = $Passed + Details = $Details + }) + + $status = if ($Passed) { "PASS" } else { "FAIL" } + Write-Host ("[{0}] {1} - {2}" -f $status, $Name, $Details) +} + +function Run-Compose { + param([string[]]$ComposeArgs) + + # Docker compose prints container-status lines to stderr. Capturing with + # 2>&1 under $ErrorActionPreference='Stop' would turn those lines into + # terminating errors, so suppress stderr for this call only and rely on + # the exit code instead. + $prev = $ErrorActionPreference + $ErrorActionPreference = "Continue" + try { + $output = & docker compose @ComposeArgs 2>&1 + $exitCode = $LASTEXITCODE + } + finally { + $ErrorActionPreference = $prev + } + + return [pscustomobject]@{ + ExitCode = $exitCode + Output = ($output | Out-String).TrimEnd() + } +} + +function Get-ComposeLogs { + param( + [string[]]$Services, + [int]$Tail = 400, + [string]$Since + ) + + $logArgs = @("logs", "--no-color") + if ($Since) { + $logArgs += @("--since", $Since) + } + $logArgs += "--tail=$Tail" + $logArgs += $Services + + $result = Run-Compose $logArgs + if ($result.ExitCode -ne 0) { + throw "docker compose logs failed.`n$($result.Output)" + } + + return $result.Output +} + +function Invoke-Checkout { + param([string]$FilePath) + + # Invoke-WebRequest has intermittent NPE issues against our orchestrator, + # so shell out to Python which handles the HTTP dance cleanly. + $absPath = (Resolve-Path $FilePath).Path + $pyCode = @" +import json, sys, urllib.request +with open(r'$absPath', 'rb') as f: + body = f.read() +req = urllib.request.Request( + '$apiUrl', data=body, + headers={'Content-Type': 'application/json'}, method='POST') +try: + with urllib.request.urlopen(req, timeout=30) as resp: + code = resp.status + text = resp.read().decode('utf-8') +except urllib.error.HTTPError as e: + code = e.code + text = e.read().decode('utf-8') +print(code) +print(text) +"@ + $out = & python -c $pyCode + if ($LASTEXITCODE -ne 0) { + throw "checkout POST failed: $($out | Out-String)" + } + $lines = $out -split "\r?\n", 2 + $code = [int]$lines[0] + $body = if ($lines.Count -gt 1) { $lines[1] } else { "" } + return [pscustomobject]@{ + StatusCode = $code + Json = ($body | ConvertFrom-Json) + Raw = $body + } +} + +function Wait-ForOrchestrator { + $pyCode = @" +import sys, time, urllib.request +deadline = time.time() + 60 +while time.time() < deadline: + try: + with urllib.request.urlopen('http://127.0.0.1:8081/', timeout=3) as r: + if r.status == 200: + sys.exit(0) + except Exception: + pass + time.sleep(2) +sys.exit(1) +"@ + & python -c $pyCode | Out-Null + if ($LASTEXITCODE -ne 0) { + throw "Orchestrator did not become ready on http://127.0.0.1:8081/." + } +} + +function Wait-ForDbPrimary { + param([int]$TimeoutSeconds = 40) + + $deadline = (Get-Date).AddSeconds($TimeoutSeconds) + while ((Get-Date) -lt $deadline) { + $out = & python scripts/_cp3_db_probe.py find-primary 2>$null + if ($LASTEXITCODE -eq 0) { + $text = ($out | Out-String) + $m = [regex]::Match($text, "primary_id=(\d+)") + if ($m.Success) { + return [int]$m.Groups[1].Value + } + } + Start-Sleep -Seconds 2 + } + throw "No DB primary was elected within ${TimeoutSeconds}s." +} + +function Assert-Condition { + param( + [bool]$Condition, + [string]$Message + ) + + if (-not $Condition) { + if ($null -eq $script:CurrentFailureList) { + throw "Current failure list is not initialized." + } + $script:CurrentFailureList.Add($Message) + } +} + +function Get-OrderLogLines { + param( + [string]$Logs, + [string]$OrderId + ) + + $normalizedOrderId = $OrderId.Trim() + + return @( + ($Logs -split "\r?\n") | Where-Object { $_ -like "*$normalizedOrderId*" } + ) +} + +function Read-StockOnAllReplicas { + param( + [string]$Title, + [switch]$TolerateMissing + ) + + $probeArgs = @("scripts/_cp3_db_probe.py", "read-stock", $Title) + if ($TolerateMissing) { $probeArgs += "--tolerate-missing" } + $out = & python @probeArgs 2>&1 + if ($LASTEXITCODE -ne 0) { + throw "read-stock failed for title '$Title':`n$out" + } + $map = @{} + foreach ($line in ($out -split "\r?\n")) { + $m = [regex]::Match($line, "^DB-(\d+)=(.+)$") + if ($m.Success) { + $rid = [int]$m.Groups[1].Value + $val = $m.Groups[2].Value + if ($val -match "^-?\d+$") { + $map[$rid] = [int]$val + } + else { + $map[$rid] = $val + } + } + } + return $map +} + +function Wait-For2pcOutcome { + param( + [string]$OrderId, + [string]$ExpectedDecision, + [int]$TimeoutSeconds = 30 + ) + + $deadline = (Get-Date).AddSeconds($TimeoutSeconds) + $latestLogs = "" + while ((Get-Date) -lt $deadline) { + $latestLogs = Get-ComposeLogs -Services $logServices -Tail 1500 -Since "5m" + $orderLines = Get-OrderLogLines -Logs $latestLogs -OrderId $OrderId + $decisionLines = @($orderLines | Where-Object { + $_ -match "2pc_decision" -and $_ -match "decision=$ExpectedDecision" + }) + if ($ExpectedDecision -eq "COMMIT") { + $applied = @($orderLines | Where-Object { $_ -match "2pc_commit_applied" }) + if ($decisionLines.Count -ge 1 -and $applied.Count -ge 1) { + return $latestLogs + } + } + else { + $aborted = @($orderLines | Where-Object { $_ -match "2pc_abort_applied" }) + if ($decisionLines.Count -ge 1 -and $aborted.Count -ge 1) { + return $latestLogs + } + } + Start-Sleep -Seconds 1 + } + return $latestLogs +} + +function Test-ValidCommit { + $failures = [System.Collections.Generic.List[string]]::new() + $script:CurrentFailureList = $failures + + $title = "Book A" + $before = Read-StockOnAllReplicas -Title $title + $response = Invoke-Checkout -FilePath "test_checkout.json" + $orderId = ([string]$response.Json.orderId).Trim() + $logs = Wait-For2pcOutcome -OrderId $orderId -ExpectedDecision "COMMIT" + $orderLines = Get-OrderLogLines -Logs $logs -OrderId $orderId + + Assert-Condition ($response.StatusCode -eq 200) "Expected HTTP 200 but got $($response.StatusCode)." + Assert-Condition ("Order Approved" -eq [string]$response.Json.status) "Expected 'Order Approved' but got '$($response.Json.status)'." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[PAYMENT\]" -and $_ -match "prepare_vote_commit" }).Count -ge 1) "payment prepare_vote_commit log missing for $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[PAYMENT\]" -and $_ -match "commit_applied" }).Count -ge 1) "payment commit_applied log missing for $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[DB-" -and $_ -match "prepare_vote_commit" }).Count -ge 1) "DB prepare_vote_commit log missing for $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[DB-" -and $_ -match "commit_applied" -and $_ -match "backups_acked=" }).Count -ge 1) "DB commit_applied log (with backups_acked) missing for $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[EXEC-" -and $_ -match "2pc_decision" -and $_ -match "decision=COMMIT" -and $_ -match "participants=\[db,payment\]" }).Count -ge 1) "executor 2pc_decision=COMMIT (with participants) missing for $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[EXEC-" -and $_ -match "2pc_commit_applied" }).Count -ge 1) "executor 2pc_commit_applied missing for $orderId." + + # Allow a brief moment for replication-apply logs to settle before the + # direct-read convergence check. + Start-Sleep -Seconds 2 + $after = Read-StockOnAllReplicas -Title $title + foreach ($rid in 1..3) { + $b = $before[$rid]; $a = $after[$rid] + Assert-Condition (($b - $a) -eq 1) "DB-$rid stock change was ($b -> $a), expected -1." + } + + $passed = $failures.Count -eq 0 + $details = if ($passed) { + "orderId=$orderId before=[$($before[1]),$($before[2]),$($before[3])] after=[$($after[1]),$($after[2]),$($after[3])]" + } + else { ($failures -join " ") } + + Add-CheckResult -Name "2pc:valid-commit" -Passed $passed -Details $details + $script:CurrentFailureList = $null + return [pscustomobject]@{ Passed = $passed; OrderId = $orderId; After = $after } +} + +function Test-OversoldAbort { + $failures = [System.Collections.Generic.List[string]]::new() + $script:CurrentFailureList = $failures + + $title = "Book A" + $before = Read-StockOnAllReplicas -Title $title + $response = Invoke-Checkout -FilePath "test_checkout_oversold.json" + $orderId = ([string]$response.Json.orderId).Trim() + $logs = Wait-For2pcOutcome -OrderId $orderId -ExpectedDecision "ABORT" + $orderLines = Get-OrderLogLines -Logs $logs -OrderId $orderId + + Assert-Condition ($response.StatusCode -eq 200) "Expected HTTP 200 but got $($response.StatusCode)." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[DB-" -and $_ -match "prepare_vote_abort" }).Count -ge 1) "DB prepare_vote_abort log missing for oversold $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[EXEC-" -and $_ -match "2pc_decision" -and $_ -match "decision=ABORT" }).Count -ge 1) "executor 2pc_decision=ABORT missing for oversold $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[EXEC-" -and $_ -match "2pc_abort_applied" }).Count -ge 1) "executor 2pc_abort_applied missing for oversold $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[PAYMENT\]" -and ($_ -match "abort_ok" -or $_ -match "abort_without_prepare" -or $_ -match "abort_idempotent") }).Count -ge 1) "payment abort log missing for oversold $orderId." + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[DB-" -and $_ -match "commit_applied" }).Count -eq 0) "DB commit_applied unexpectedly fired for oversold $orderId." + + Start-Sleep -Seconds 1 + $after = Read-StockOnAllReplicas -Title $title + foreach ($rid in 1..3) { + Assert-Condition ($before[$rid] -eq $after[$rid]) "DB-$rid stock moved on abort: $($before[$rid]) -> $($after[$rid])." + } + + $passed = $failures.Count -eq 0 + $details = if ($passed) { + "orderId=$orderId before=[$($before[1]),$($before[2]),$($before[3])] after=[$($after[1]),$($after[2]),$($after[3])]" + } + else { ($failures -join " ") } + + Add-CheckResult -Name "2pc:oversold-abort" -Passed $passed -Details $details + $script:CurrentFailureList = $null +} + +function Test-ReplicaConvergence { + param([string]$Title = "Book A") + + $failures = [System.Collections.Generic.List[string]]::new() + $script:CurrentFailureList = $failures + $stock = Read-StockOnAllReplicas -Title $Title + $values = @($stock[1], $stock[2], $stock[3]) + $distinct = @($values | Sort-Object -Unique) + Assert-Condition ($distinct.Count -eq 1) "Replicas disagree on '$Title': DB-1=$($stock[1]) DB-2=$($stock[2]) DB-3=$($stock[3])" + + $passed = $failures.Count -eq 0 + $details = if ($passed) { "all three replicas returned $($values[0]) for '$Title'." } else { ($failures -join " ") } + Add-CheckResult -Name "convergence:read-all-replicas" -Passed $passed -Details $details + $script:CurrentFailureList = $null +} + +function Test-DbPrimaryFailover { + $failures = [System.Collections.Generic.List[string]]::new() + $script:CurrentFailureList = $failures + + $primaryId = Wait-ForDbPrimary -TimeoutSeconds 20 + $primaryService = "books_database_$primaryId" + Write-Host "Stopping current DB primary $primaryService to test failover..." + + $title = "Book A" + $newPrimary = -1 + try { + $stopResult = Run-Compose @("stop", $primaryService) + if ($stopResult.ExitCode -ne 0) { throw "Failed to stop $primaryService.`n$($stopResult.Output)" } + + Start-Sleep -Seconds 8 + # Verify a new primary is elected from the surviving replicas. + $out = & python scripts/_cp3_db_probe.py find-primary 2>&1 + Assert-Condition ($LASTEXITCODE -eq 0) "no DB primary after failover: $out" + $m = [regex]::Match(($out | Out-String), "primary_id=(\d+)") + $newPrimary = if ($m.Success) { [int]$m.Groups[1].Value } else { -1 } + Assert-Condition ($newPrimary -ne $primaryId -and $newPrimary -ne -1) "Expected a different DB primary, got id=$newPrimary." + } + finally { + Write-Host "Restoring $primaryService..." + $restoreResult = Run-Compose @("up", "-d", $primaryService) + if ($restoreResult.ExitCode -ne 0) { + Add-CheckResult -Name "db-failover:restore" -Passed $false -Details $restoreResult.Output + } + else { + Start-Sleep -Seconds 10 + } + } + + # After the former primary is restored it reclaims the role via the + # Bully tie-breaker (higher replica id wins). Phase 14 added + # kv_store.json persistence (write-then-rename in STATE_DIR), so a + # restarted replica now loads committed stock from disk instead of + # reverting to SEED_STOCK. We still drive one checkout and verify + # all three replicas converge on the same value — an end-to-end + # sanity check that the re-elected primary can still serve 2PC + # after a failover+restore cycle. + Start-Sleep -Seconds 6 + $response = Invoke-Checkout -FilePath "test_checkout.json" + $orderId = ([string]$response.Json.orderId).Trim() + $logs = Wait-For2pcOutcome -OrderId $orderId -ExpectedDecision "COMMIT" -TimeoutSeconds 40 + $orderLines = Get-OrderLogLines -Logs $logs -OrderId $orderId + Assert-Condition (@($orderLines | Where-Object { $_ -match "\[EXEC-" -and $_ -match "2pc_commit_applied" }).Count -ge 1) "2pc_commit_applied missing post-restore for $orderId." + + Start-Sleep -Seconds 3 + $post = Read-StockOnAllReplicas -Title $title + $postValues = @($post[1], $post[2], $post[3]) + $postDistinct = @($postValues | Sort-Object -Unique) + Assert-Condition ($postDistinct.Count -eq 1) "Replicas diverged after failover/restore/commit: DB-1=$($post[1]) DB-2=$($post[2]) DB-3=$($post[3])." + + $passed = $failures.Count -eq 0 + $details = if ($passed) { "DB primary $primaryId stopped, replica $newPrimary elected new primary, writes resumed after replica restore." } else { ($failures -join " ") } + Add-CheckResult -Name "db-failover" -Passed $passed -Details $details + $script:CurrentFailureList = $null +} + +function Test-ParticipantFailureBonus { + # Runs the existing standalone Python test which arms FAIL_NEXT_COMMIT=2 + # on DB-3, submits a checkout, and asserts the coordinator retries until + # the injected failures exhaust and the commit lands. Exit code is + # authoritative. + $failures = [System.Collections.Generic.List[string]]::new() + $script:CurrentFailureList = $failures + $out = & python "order_executor/tests/test_2pc_fail_injection.py" 2>&1 + $ec = $LASTEXITCODE + Assert-Condition ($ec -eq 0) "fail-injection test exit=$ec output=$($out | Out-String)" + $passed = $failures.Count -eq 0 + $details = if ($passed) { "coordinator retry absorbed 2 injected commit failures; commit landed; all 3 replicas converged." } else { ($failures -join " ") } + Add-CheckResult -Name "bonus:participant-failure-recovery" -Passed $passed -Details $details + $script:CurrentFailureList = $null +} + +function Test-ConcurrentWritesBonus { + # Runs the standalone Python test which fires 5 concurrent Write RPCs on + # the same key and then 5 on different keys, asserts no torn state, and + # asserts all 3 replicas converge on every key. Exit code is + # authoritative; stdout must also contain the "CONCURRENT WRITES TEST: + # PASSED" banner as a belt-and-braces signal. + $failures = [System.Collections.Generic.List[string]]::new() + $script:CurrentFailureList = $failures + $out = & python "books_database/tests/test_concurrent_writes.py" 2>&1 + $ec = $LASTEXITCODE + $outText = $out | Out-String + Assert-Condition ($ec -eq 0) "concurrent-writes test exit=$ec output=$outText" + Assert-Condition ($outText -match "CONCURRENT WRITES TEST: PASSED") "concurrent-writes banner missing; output=$outText" + $passed = $failures.Count -eq 0 + $details = if ($passed) { "per-key locks serialized 5 same-key writes, fanned out 5 different-key writes, and all 3 replicas converged on every key." } else { ($failures -join " ") } + Add-CheckResult -Name "bonus:concurrent-writes" -Passed $passed -Details $details + $script:CurrentFailureList = $null +} + +Write-Section "Environment" + +$dockerVersion = & docker --version +Add-CheckResult -Name "docker" -Passed ($LASTEXITCODE -eq 0) -Details $dockerVersion + +$composeVersion = & docker compose version +Add-CheckResult -Name "docker-compose" -Passed ($LASTEXITCODE -eq 0) -Details $composeVersion + +$configResult = Run-Compose @("config") +Add-CheckResult -Name "compose-config" -Passed ($configResult.ExitCode -eq 0) -Details "docker compose config exited with code $($configResult.ExitCode)." + +Write-Section "Startup" + +# Always tear down volumes first so every run starts from pristine seed state. +# Prior run's stock mutations on disk would otherwise leak into the next run's +# before/after assertions. +$downResult = Run-Compose @("down", "-v") +Add-CheckResult -Name "compose-down" -Passed ($downResult.ExitCode -eq 0) -Details "Cleared previous stack and volumes." + +if ($SkipBuild) { + $upResult = Run-Compose @("up", "-d") + Add-CheckResult -Name "compose-up" -Passed ($upResult.ExitCode -eq 0) -Details "Started stack without rebuild." +} +else { + $upResult = Run-Compose @("up", "--build", "-d") + Add-CheckResult -Name "compose-up" -Passed ($upResult.ExitCode -eq 0) -Details "Started stack with rebuild." +} + +Wait-ForOrchestrator +Add-CheckResult -Name "orchestrator-ready" -Passed $true -Details "HTTP endpoint is reachable." + +$reachableOut = & python scripts/_cp3_db_probe.py all-reachable 2>&1 +Add-CheckResult -Name "db-all-reachable" -Passed ($LASTEXITCODE -eq 0) -Details ($reachableOut | Out-String).Trim() + +$primaryId = Wait-ForDbPrimary -TimeoutSeconds 40 +Add-CheckResult -Name "db-primary-elected" -Passed $true -Details "DB primary is books_database_$primaryId." + +$psResult = Run-Compose @("ps") +Add-CheckResult -Name "compose-ps" -Passed ($psResult.ExitCode -eq 0) -Details "docker compose ps completed." + +Write-Section "Syntax" + +foreach ($path in $pythonFiles) { + python -m py_compile $path + Add-CheckResult -Name "py-compile:$path" -Passed ($LASTEXITCODE -eq 0) -Details "Syntax OK." +} + +Write-Section "2PC: happy path" + +Test-ValidCommit | Out-Null + +Write-Section "2PC: oversold -> abort" + +Test-OversoldAbort + +Write-Section "Convergence" + +Test-ReplicaConvergence -Title "Book A" + +if (-not $SkipFailover) { + Write-Section "DB primary failover" + Test-DbPrimaryFailover +} + +if (-not $SkipBonus) { + Write-Section "Bonus: participant-failure recovery" + Test-ParticipantFailureBonus + + Write-Section "Bonus: concurrent writes" + Test-ConcurrentWritesBonus +} + +Write-Section "Summary" + +$passedCount = @($results | Where-Object { $_.Passed }).Count +$failedCount = @($results | Where-Object { -not $_.Passed }).Count + +foreach ($result in $results) { + $status = if ($result.Passed) { "PASS" } else { "FAIL" } + Write-Host ("{0} {1}" -f $status, $result.Name) +} + +Write-Host "" +Write-Host ("Passed: {0}" -f $passedCount) +Write-Host ("Failed: {0}" -f $failedCount) + +if ($failedCount -gt 0) { + exit 1 +} + +exit 0 diff --git a/suggestions/Dockerfile b/suggestions/Dockerfile new file mode 100644 index 000000000..1c35664df --- /dev/null +++ b/suggestions/Dockerfile @@ -0,0 +1,8 @@ +FROM python:3.11 + +WORKDIR /app + +COPY ./suggestions/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +CMD python utils/other/hotreload.py "suggestions/src/app.py" \ No newline at end of file diff --git a/suggestions/requirements.txt b/suggestions/requirements.txt new file mode 100644 index 000000000..718548bc2 --- /dev/null +++ b/suggestions/requirements.txt @@ -0,0 +1,5 @@ +grpcio==1.78.0 +grpcio-tools==1.78.0 + + +watchdog==6.0.0 \ No newline at end of file diff --git a/suggestions/src/app.py b/suggestions/src/app.py new file mode 100644 index 000000000..b028ea4ce --- /dev/null +++ b/suggestions/src/app.py @@ -0,0 +1,419 @@ +import os +import sys +import threading +from concurrent import futures + +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") +suggestions_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/suggestions") +) +sys.path.insert(0, suggestions_grpc_path) + +import grpc +import suggestions_pb2 as suggestions +import suggestions_pb2_grpc as suggestions_grpc + + +SERVICE_INDEX = 2 # [transaction_verification, fraud_detection, suggestions] + +orders = {} +orders_lock = threading.Lock() + +STATIC_BOOKS = [ + { + "bookId": "101", + "title": "Distributed Systems Basics", + "author": "A. Author", + }, + { + "bookId": "102", + "title": "Designing Data-Intensive Applications", + "author": "Martin Kleppmann", + }, + { + "bookId": "103", + "title": "Clean Code", + "author": "Robert C. Martin", + }, + { + "bookId": "104", + "title": "The Pragmatic Programmer", + "author": "Andrew Hunt", + }, +] + + +def merge_vc(local_vc, incoming_vc): + return [max(a, b) for a, b in zip(local_vc, incoming_vc)] + + +def tick(vc, idx): + vc = list(vc) + vc[idx] += 1 + return vc + + +def get_order_state(order_id: str): + with orders_lock: + return orders.get(order_id) + + +class SuggestionsService(suggestions_grpc.SuggestionsServiceServicer): + def InitOrder(self, request, context): + order = request.order + + with orders_lock: + orders[order.order_id] = { + "order": order, + "vc": [0, 0, 0], + "lock": threading.Lock(), + "books": [], + # Causal gating state for event g (FinalizeSuggestions). + # g needs BOTH f (PrecomputeSuggestions, local) AND e (CheckCardFraud, from FD). + "f_done": False, + "f_vc": None, + "f_success": True, + "f_message": "", + "e_received": False, + "e_vc": None, + "e_success": True, + "e_message": "", + "g_triggered": False, + # Pipeline result: set when g completes or a failure is final. + "pipeline_done": threading.Event(), + "pipeline_success": False, + "pipeline_message": "", + "pipeline_vc": [0, 0, 0], + "pipeline_books": [], + } + + print(f"[SUG] order={order.order_id} event=InitOrder vc={[0, 0, 0]} success=True") + + return suggestions.EventResponse( + success=True, + message="Suggestions service initialized order.", + vc=suggestions.VectorClock(values=[0, 0, 0]), + ) + + def _complete_pipeline(self, state, success, message, vc, books): + state["pipeline_success"] = success + state["pipeline_message"] = message + state["pipeline_vc"] = vc + state["pipeline_books"] = books + state["pipeline_done"].set() + + def _try_run_g(self, order_id, state): + """Check if both prerequisites for event g are met. If so, run FinalizeSuggestions.""" + with state["lock"]: + if state["g_triggered"]: + return + if not (state["f_done"] and state["e_received"]): + return + state["g_triggered"] = True + + f_vc = state["f_vc"] + f_success = state["f_success"] + f_message = state["f_message"] + e_vc = state["e_vc"] + e_success = state["e_success"] + e_message = state["e_message"] + + # If either prerequisite failed, propagate failure. + if not f_success: + print(f"[SUG] order={order_id} event=FinalizeSuggestions skipped (f failed: {f_message})") + self._complete_pipeline(state, False, f_message, f_vc, []) + return + if not e_success: + print(f"[SUG] order={order_id} event=FinalizeSuggestions skipped (e failed: {e_message})") + self._complete_pipeline(state, False, e_message, e_vc, []) + return + + # Both f and e succeeded: merge their VCs and run g. + merged = merge_vc(f_vc, e_vc) + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, merged) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + prepared_books = state["books"] + success = len(prepared_books) > 0 + message = ( + "Suggestions finalized." + if success + else "No prepared suggestions available." + ) + + print( + f"[SUG] order={order_id} event=FinalizeSuggestions " + f"vc={vc} success={success} returned_books={len(prepared_books)}" + ) + + self._complete_pipeline(state, success, message, vc, prepared_books) + + def PrecomputeSuggestions(self, request, context): + """Event f: called by TV after event a. After processing, checks if e's VC + has arrived so that event g can run.""" + order_id = request.order_id + state = get_order_state(order_id) + if state is None: + return suggestions.EventResponse( + success=False, + message="Order not found in suggestions service.", + vc=suggestions.VectorClock(values=[0, 0, 0]), + ) + + incoming_vc = list(request.vc.values) + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, incoming_vc) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + item_count = state["order"].item_count + + if item_count > 0: + state["books"] = STATIC_BOOKS[:2] + success = True + message = "Suggestions prepared." + else: + state["books"] = [] + success = False + message = "Cannot prepare suggestions for empty order." + + print( + f"[SUG] order={order_id} event=PrecomputeSuggestions " + f"vc={vc} success={success} prepared_books={len(state['books'])}" + ) + + # Record f's result and attempt to trigger g. + with state["lock"]: + state["f_done"] = True + state["f_vc"] = vc + state["f_success"] = success + state["f_message"] = message + + self._try_run_g(order_id, state) + + return suggestions.EventResponse( + success=success, + message=message, + vc=suggestions.VectorClock(values=vc), + ) + + def ForwardVC(self, request, context): + """Receive a forwarded VC from another microservice.""" + order_id = request.order_id + source_event = request.source_event + incoming_vc = list(request.vc.values) + success = request.success + message = request.message + + state = get_order_state(order_id) + if state is None: + return suggestions.EventResponse( + success=False, + message="Order not found in suggestions service.", + vc=suggestions.VectorClock(values=[0, 0, 0]), + ) + + print( + f"[SUG] order={order_id} event=ForwardVC source={source_event} " + f"vc={incoming_vc} success={success}" + ) + + if source_event == "e": + with state["lock"]: + state["e_received"] = True + state["e_vc"] = incoming_vc + state["e_success"] = success + state["e_message"] = message + + self._try_run_g(order_id, state) + elif source_event == "a": + # a failed: no f will ever come (since TV won't call PrecomputeSuggestions). + # Also no c→e chain, so mark both as failed. + with state["lock"]: + if not state["f_done"]: + state["f_done"] = True + state["f_vc"] = incoming_vc + state["f_success"] = False + state["f_message"] = message + if not state["e_received"]: + state["e_received"] = True + state["e_vc"] = incoming_vc + state["e_success"] = False + state["e_message"] = message + + self._try_run_g(order_id, state) + elif source_event == "f": + # f call itself failed (exception in TV calling SUG) + with state["lock"]: + if not state["f_done"]: + state["f_done"] = True + state["f_vc"] = incoming_vc + state["f_success"] = False + state["f_message"] = message + + self._try_run_g(order_id, state) + elif source_event == "d": + # d call failed (exception in TV calling FD), so e will never complete. + with state["lock"]: + if not state["e_received"]: + state["e_received"] = True + state["e_vc"] = incoming_vc + state["e_success"] = False + state["e_message"] = message + + self._try_run_g(order_id, state) + + return suggestions.EventResponse( + success=True, + message="VC forwarded.", + vc=suggestions.VectorClock(values=incoming_vc), + ) + + def AwaitPipelineResult(self, request, context): + """Block until the full event pipeline completes for this order.""" + order_id = request.order_id + state = get_order_state(order_id) + if state is None: + return suggestions.PipelineResultResponse( + success=False, + message="Order not found in suggestions service.", + vc=suggestions.VectorClock(values=[0, 0, 0]), + ) + + # Wait for pipeline completion (event g or a propagated failure). + state["pipeline_done"].wait(timeout=30.0) + + if not state["pipeline_done"].is_set(): + return suggestions.PipelineResultResponse( + success=False, + message="Pipeline timed out.", + vc=suggestions.VectorClock(values=state["vc"]), + ) + + response = suggestions.PipelineResultResponse( + success=state["pipeline_success"], + message=state["pipeline_message"], + vc=suggestions.VectorClock(values=state["pipeline_vc"]), + ) + + for book in state["pipeline_books"]: + b = response.books.add() + b.bookId = book["bookId"] + b.title = book["title"] + b.author = book["author"] + + print( + f"[SUG] order={order_id} event=AwaitPipelineResult " + f"success={state['pipeline_success']} vc={state['pipeline_vc']}" + ) + + return response + + def FinalizeSuggestions(self, request, context): + """Event g: kept as an RPC for backward compat, but now triggered internally.""" + order_id = request.order_id + state = get_order_state(order_id) + if state is None: + return suggestions.SuggestionsEventResponse( + success=False, + message="Order not found in suggestions service.", + vc=suggestions.VectorClock(values=[0, 0, 0]), + books=[], + ) + + incoming_vc = list(request.vc.values) + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, incoming_vc) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + prepared_books = state["books"] + success = len(prepared_books) > 0 + message = ( + "Suggestions finalized." + if success + else "No prepared suggestions available." + ) + + response = suggestions.SuggestionsEventResponse( + success=success, + message=message, + vc=suggestions.VectorClock(values=vc), + ) + + for book in prepared_books: + b = response.books.add() + b.bookId = book["bookId"] + b.title = book["title"] + b.author = book["author"] + + print( + f"[SUG] order={order_id} event=FinalizeSuggestions " + f"vc={vc} success={success} returned_books={len(prepared_books)}" + ) + + return response + + def ClearOrder(self, request, context): + order_id = request.order_id + final_vc = list(request.final_vc.values) + + with orders_lock: + state = orders.get(order_id) + + if state is None: + return suggestions.EventResponse( + success=False, + message="Order not found in suggestions service.", + vc=suggestions.VectorClock(values=[0, 0, 0]), + ) + + with state["lock"]: + local_vc = state["vc"] + can_clear = all(a <= b for a, b in zip(local_vc, final_vc)) + + if can_clear: + del orders[order_id] + + success = can_clear + message = ( + "Order cleared from suggestions service." + if success + else "Cannot clear order: local VC is ahead of final VC." + ) + + print( + f"[SUG] order={order_id} event=ClearOrder " + f"local_vc={local_vc} final_vc={final_vc} success={success}" + ) + + return suggestions.EventResponse( + success=success, + message=message, + vc=suggestions.VectorClock(values=final_vc), + ) + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + suggestions_grpc.add_SuggestionsServiceServicer_to_server( + SuggestionsService(), server + ) + + port = "50053" + server.add_insecure_port("[::]:" + port) + server.start() + print(f"Suggestions server started. Listening on port {port}.") + server.wait_for_termination() + + +if __name__ == "__main__": + serve() diff --git a/test_checkout.json b/test_checkout.json new file mode 100644 index 000000000..a78ba6f7a --- /dev/null +++ b/test_checkout.json @@ -0,0 +1,20 @@ +{ + "user": { + "name": "Test User", + "contact": "test@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123" + }, + "userComment": "Please handle with care." + }, + "items": [ + { + "name": "Book A", + "quantity": 1 + } + ], + "shippingMethod": "Standard", + "termsAndConditionsAccepted": true +} diff --git a/test_checkout_empty_items.json b/test_checkout_empty_items.json new file mode 100644 index 000000000..df39e004d --- /dev/null +++ b/test_checkout_empty_items.json @@ -0,0 +1,15 @@ +{ + "user": { + "name": "Test User", + "contact": "test@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123" + }, + "userComment": "Please handle with care." + }, + "items": [], + "shippingMethod": "Standard", + "termsAndConditionsAccepted": true +} diff --git a/test_checkout_fraud.json b/test_checkout_fraud.json new file mode 100644 index 000000000..8c7677348 --- /dev/null +++ b/test_checkout_fraud.json @@ -0,0 +1,20 @@ +{ + "user": { + "name": "Test User", + "contact": "test@example.com", + "creditCard": { + "number": "4111111111110000", + "expirationDate": "12/30", + "cvv": "123" + }, + "userComment": "Please handle with care." + }, + "items": [ + { + "name": "Book A", + "quantity": 1 + } + ], + "shippingMethod": "Standard", + "termsAndConditionsAccepted": true +} diff --git a/test_checkout_oversold.json b/test_checkout_oversold.json new file mode 100644 index 000000000..1b63d3101 --- /dev/null +++ b/test_checkout_oversold.json @@ -0,0 +1,20 @@ +{ + "user": { + "name": "Oversold Oliver", + "contact": "oversold@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123" + }, + "userComment": "Deliberately requests more copies than exist to exercise 2PC abort." + }, + "items": [ + { + "name": "Book A", + "quantity": 999 + } + ], + "shippingMethod": "Standard", + "termsAndConditionsAccepted": true +} diff --git a/test_checkout_terms_false.json b/test_checkout_terms_false.json new file mode 100644 index 000000000..82087d4b1 --- /dev/null +++ b/test_checkout_terms_false.json @@ -0,0 +1,20 @@ +{ + "user": { + "name": "Test User", + "contact": "test@example.com", + "creditCard": { + "number": "4111111111111111", + "expirationDate": "12/30", + "cvv": "123" + }, + "userComment": "Please handle with care." + }, + "items": [ + { + "name": "Book A", + "quantity": 1 + } + ], + "shippingMethod": "Standard", + "termsAndConditionsAccepted": false +} diff --git a/transaction_verification/Dockerfile b/transaction_verification/Dockerfile new file mode 100644 index 000000000..ad4936c4a --- /dev/null +++ b/transaction_verification/Dockerfile @@ -0,0 +1,8 @@ +FROM python:3.11 + +WORKDIR /app + +COPY ./transaction_verification/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +CMD python utils/other/hotreload.py "transaction_verification/src/app.py" \ No newline at end of file diff --git a/transaction_verification/requirements.txt b/transaction_verification/requirements.txt new file mode 100644 index 000000000..718548bc2 --- /dev/null +++ b/transaction_verification/requirements.txt @@ -0,0 +1,5 @@ +grpcio==1.78.0 +grpcio-tools==1.78.0 + + +watchdog==6.0.0 \ No newline at end of file diff --git a/transaction_verification/src/app.py b/transaction_verification/src/app.py new file mode 100644 index 000000000..0c8c287e0 --- /dev/null +++ b/transaction_verification/src/app.py @@ -0,0 +1,350 @@ +import os +import sys +import threading +from concurrent import futures + +FILE = __file__ if "__file__" in globals() else os.getenv("PYTHONFILE", "") +transaction_verification_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/transaction_verification") +) +sys.path.insert(0, transaction_verification_grpc_path) + +fraud_detection_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/fraud_detection") +) +sys.path.insert(0, fraud_detection_grpc_path) + +suggestions_grpc_path = os.path.abspath( + os.path.join(FILE, "../../../utils/pb/suggestions") +) +sys.path.insert(0, suggestions_grpc_path) + +import grpc +import transaction_verification_pb2 as transaction_verification +import transaction_verification_pb2_grpc as transaction_verification_grpc +import fraud_detection_pb2 as fraud_detection +import fraud_detection_pb2_grpc as fraud_detection_grpc +import suggestions_pb2 as suggestions +import suggestions_pb2_grpc as suggestions_grpc + + +SERVICE_INDEX = 0 # [transaction_verification, fraud_detection, suggestions] + +orders = {} +orders_lock = threading.Lock() + + +def merge_vc(local_vc, incoming_vc): + return [max(a, b) for a, b in zip(local_vc, incoming_vc)] + + +def tick(vc, idx): + vc = list(vc) + vc[idx] += 1 + return vc + + +def extract_card_digits(card: str) -> str: + return "".join(c for c in str(card) if c.isdigit()) + + +def mask_fixed(card: str) -> str: + digits = extract_card_digits(card) + masked = "*" * 12 + digits[-4:].rjust(4, "*") + return " ".join(masked[i:i + 4] for i in range(0, 16, 4)) + + +def get_order_state(order_id: str): + with orders_lock: + return orders.get(order_id) + + +def forward_to_fd(order_id, source_event, vc, success, message): + try: + with grpc.insecure_channel("fraud_detection:50051") as channel: + stub = fraud_detection_grpc.FraudDetectionServiceStub(channel) + req = fraud_detection.VCForward( + order_id=order_id, + source_event=source_event, + vc=fraud_detection.VectorClock(values=vc), + success=success, + message=message, + ) + stub.ForwardVC(req, timeout=10.0) + except Exception as e: + print(f"[TV] order={order_id} forward_to_fd_error source={source_event} error={e}") + + +def forward_to_sug(order_id, source_event, vc, success, message): + try: + with grpc.insecure_channel("suggestions:50053") as channel: + stub = suggestions_grpc.SuggestionsServiceStub(channel) + req = suggestions.VCForward( + order_id=order_id, + source_event=source_event, + vc=suggestions.VectorClock(values=vc), + success=success, + message=message, + ) + stub.ForwardVC(req, timeout=10.0) + except Exception as e: + print(f"[TV] order={order_id} forward_to_sug_error source={source_event} error={e}") + + +def call_fd_check_user_fraud(order_id, vc): + try: + with grpc.insecure_channel("fraud_detection:50051") as channel: + stub = fraud_detection_grpc.FraudDetectionServiceStub(channel) + req = fraud_detection.EventRequest( + order_id=order_id, + vc=fraud_detection.VectorClock(values=vc), + ) + return stub.CheckUserFraud(req, timeout=10.0) + except Exception as e: + print(f"[TV] order={order_id} call_fd_check_user_fraud_error={e}") + return None + + +def call_sug_precompute(order_id, vc): + try: + with grpc.insecure_channel("suggestions:50053") as channel: + stub = suggestions_grpc.SuggestionsServiceStub(channel) + req = suggestions.EventRequest( + order_id=order_id, + vc=suggestions.VectorClock(values=vc), + ) + return stub.PrecomputeSuggestions(req, timeout=10.0) + except Exception as e: + print(f"[TV] order={order_id} call_sug_precompute_error={e}") + return None + + +class TransactionVerificationService( + transaction_verification_grpc.TransactionVerificationServiceServicer +): + def InitOrder(self, request, context): + order = request.order + + with orders_lock: + orders[order.order_id] = { + "order": order, + "vc": [0, 0, 0], + "lock": threading.Lock(), + } + + print(f"[TV] order={order.order_id} event=InitOrder vc={[0, 0, 0]} success=True") + + return transaction_verification.EventResponse( + success=True, + message="Transaction verification service initialized order.", + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + + def _process_event(self, order_id, incoming_vc, event_name, check_fn): + state = get_order_state(order_id) + if state is None: + return None, False, "Order not found in transaction verification service.", [0, 0, 0] + + with state["lock"]: + local_vc = state["vc"] + vc = merge_vc(local_vc, incoming_vc) + vc = tick(vc, SERVICE_INDEX) + state["vc"] = vc + + success, message = check_fn(state) + + print( + f"[TV] order={order_id} event={event_name} " + f"vc={vc} success={success}" + ) + + return vc, success, message, vc + + def ValidateItems(self, request, context): + """Event a: root event. After processing, chains to c (ValidateCardFormat) + and forwards VC to SUG (PrecomputeSuggestions) and eventually to FD.""" + order_id = request.order_id + incoming_vc = list(request.vc.values) + + def check(state): + item_count = state["order"].item_count + success = item_count > 0 + message = "Items check passed." if success else "No items in order." + return success, message + + vc, success, message, _ = self._process_event(order_id, incoming_vc, "ValidateItems", check) + if vc is None: + return transaction_verification.EventResponse( + success=False, message=message, + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + + # After event a completes, trigger downstream events in background threads. + # TV itself handles event c (ValidateCardFormat) and forwards to FD and SUG. + def chain_after_a(): + if not success: + # a failed: propagate failure to FD and SUG + forward_to_fd(order_id, "a", vc, False, message) + forward_to_sug(order_id, "a", vc, False, message) + return + + # Call SUG.PrecomputeSuggestions (event f) — TV forwards a's VC to SUG + sug_resp = call_sug_precompute(order_id, vc) + if sug_resp is None: + forward_to_sug(order_id, "f", vc, False, "PrecomputeSuggestions call failed.") + + # Process event c (ValidateCardFormat) internally on TV + c_vc, c_success, c_message, _ = self._process_event( + order_id, vc, "ValidateCardFormat", self._card_format_check + ) + + if c_vc is None: + forward_to_fd(order_id, "c", vc, False, c_message) + return + + # Forward c's result to FD (FD needs c's VC to gate event e) + forward_to_fd(order_id, "c", c_vc, c_success, c_message) + + threading.Thread(target=chain_after_a, daemon=True).start() + + return transaction_verification.EventResponse( + success=success, message=message, + vc=transaction_verification.VectorClock(values=vc), + ) + + def _card_format_check(self, state): + order = state["order"] + card_digits = extract_card_digits(order.card_number) + + success = True + message = "Card format check passed." + + if not order.card_number or not order.expiration_date or not order.cvv: + success = False + message = "Missing credit card information." + elif len(card_digits) != 16: + success = False + message = "Invalid card number." + return success, message + + def ValidateUserData(self, request, context): + """Event b: root event. After processing, forwards VC to FD (CheckUserFraud).""" + order_id = request.order_id + incoming_vc = list(request.vc.values) + + def check(state): + order = state["order"] + success = True + message = "User data check passed." + + if not order.user_name: + success = False + message = "Missing user name." + elif not order.user_contact: + success = False + message = "Missing user contact." + elif not order.terms_accepted: + success = False + message = "Terms and conditions not accepted." + return success, message + + vc, success, message, _ = self._process_event(order_id, incoming_vc, "ValidateUserData", check) + if vc is None: + return transaction_verification.EventResponse( + success=False, message=message, + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + + # After event b completes, forward to FD in a background thread. + def chain_after_b(): + if not success: + # b failed: propagate failure downstream (d will never run). + forward_to_fd(order_id, "d", vc, False, message) + return + + fd_resp = call_fd_check_user_fraud(order_id, vc) + if fd_resp is None: + # If the FD call itself failed, propagate failure to SUG + forward_to_sug(order_id, "d", vc, False, "CheckUserFraud call failed.") + + threading.Thread(target=chain_after_b, daemon=True).start() + + return transaction_verification.EventResponse( + success=success, message=message, + vc=transaction_verification.VectorClock(values=vc), + ) + + def ValidateCardFormat(self, request, context): + """Event c: kept as an RPC for backward compat, but now called internally by TV.""" + order_id = request.order_id + incoming_vc = list(request.vc.values) + + vc, success, message, _ = self._process_event( + order_id, incoming_vc, "ValidateCardFormat", self._card_format_check + ) + if vc is None: + return transaction_verification.EventResponse( + success=False, message=message, + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + + return transaction_verification.EventResponse( + success=success, message=message, + vc=transaction_verification.VectorClock(values=vc), + ) + + def ClearOrder(self, request, context): + order_id = request.order_id + final_vc = list(request.final_vc.values) + + with orders_lock: + state = orders.get(order_id) + + if state is None: + return transaction_verification.EventResponse( + success=False, + message="Order not found in transaction verification service.", + vc=transaction_verification.VectorClock(values=[0, 0, 0]), + ) + + with state["lock"]: + local_vc = state["vc"] + can_clear = all(a <= b for a, b in zip(local_vc, final_vc)) + + if can_clear: + del orders[order_id] + + success = can_clear + message = ( + "Order cleared from transaction verification service." + if success + else "Cannot clear order: local VC is ahead of final VC." + ) + + print( + f"[TV] order={order_id} event=ClearOrder " + f"local_vc={local_vc} final_vc={final_vc} success={success}" + ) + + return transaction_verification.EventResponse( + success=success, + message=message, + vc=transaction_verification.VectorClock(values=final_vc), + ) + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + transaction_verification_grpc.add_TransactionVerificationServiceServicer_to_server( + TransactionVerificationService(), server + ) + + port = "50052" + server.add_insecure_port("[::]:" + port) + server.start() + print(f"Transaction verification server started. Listening on port {port}.") + server.wait_for_termination() + + +if __name__ == "__main__": + serve() diff --git a/utils/README.md b/utils/README.md deleted file mode 100644 index 86bb64707..000000000 --- a/utils/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Utils - -This folder contains protocol specification files such as OpenAPI, gRPC, etc. - -## OpenAPI - -OpenAPI specification example files are located in the `api` folder. The specification is written in YAML format. You can use [Swagger Editor](https://editor.swagger.io/) to view or edit the specification. The specification is usually used to generate API client code and documentation for various languages. Check the practice session guide for more information on how to proceed. - -## gRPC - -gRPC protocol specification example files are located in the `pb` folder. The specification is written in Protocol Buffers format (`.proto`). Read more about it [here](https://grpc.io/docs/languages/python/quickstart/). -To generate the grpc Python code from the `.proto` file, first you need to install the gRPC tools: - -```bash -python -m pip install grpcio-tools -``` - -Then, you can generate the gRPC Python code using the following command: - -```bash -python -m grpc_tools.protoc -I. --python_out=. --pyi_out=. --grpc_python_out=. ./yourprotofile.proto -``` - -It should generate 3 files: `yourprotofile_pb2.py`, `yourprotofile_pb2_grpc.py` and `yourprotofile_pb2.pyi`. The generated code will be located in the same folder as the `.proto` file. You can use the generated code to implement the gRPC server and client code. Check the example app code (f.e. the orchestrator app) and the practice session guide for more information. - -Note: The generated code is not meant to be edited manually. If you need to make changes to the protocol, edit the `.proto` file and regenerate the code. The generated code will be overwritten. When importing the generated code from the current folder, the folder should contain an empty `__init__.py` file. Check the example app code (f.e. the orchestrator app) to see how to import the generated gRPC code. - -## Other - -In the folder `other` you can find the python script `hotreload.py` that can be used to restart a service when changes to the code are made. This script is used by each Docker container as the entrypoint, and it listens for changes in each container `/app` folder, restarting the respective service. This way, you can code without having to restart any containers manually. \ No newline at end of file diff --git a/utils/other/hotreload.py b/utils/other/hotreload.py index b893455c9..7fb04b2bf 100644 --- a/utils/other/hotreload.py +++ b/utils/other/hotreload.py @@ -27,6 +27,13 @@ def on_modified(self, event): if event.is_directory or '__pycache__' in event.src_path: return # Ignore directories and __pycache__ + # Skip runtime state dirs (e.g. /app/state for the 2PC participant + # write-ahead log). Writes there are transaction data, not source + # code changes, so a reload would drop the in-memory pending buffer + # the coordinator is still trying to reach. + if '/app/state/' in event.src_path or event.src_path.startswith('/app/state'): + return + # Track pending files and their last modification time self.pending_files[event.src_path] = time.time() diff --git a/utils/pb/books_database/__init__.py b/utils/pb/books_database/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/pb/books_database/books_database.proto b/utils/pb/books_database/books_database.proto new file mode 100644 index 000000000..8a81edd9d --- /dev/null +++ b/utils/pb/books_database/books_database.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package books_database; + +service BooksDatabaseService { + // Client-facing RPCs. In Phase 2 clients should send these to the primary. + rpc Read (ReadRequest) returns (ReadResponse); + rpc Write (WriteRequest) returns (WriteResponse); + + // Debug/ops RPC that reads the local replica's copy without the + // primary-only check. Used by the convergence check in + // scripts/checkpoint3-checks.ps1 to verify that all three replicas hold + // the same value after a committed write. Never used by the orchestrator + // or the 2PC coordinator. + rpc ReadLocal (ReadRequest) returns (ReadResponse); + + // Internal: primary -> backup synchronous replication of a committed write. + rpc ReplicateWrite (ReplicateWriteRequest) returns (ReplicateWriteResponse); + + // Primary discovery. Any replica answers; the response points to the + // replica that currently believes itself to be primary (or replies + // leader_id=0 if no primary is known yet). + rpc WhoIsPrimary (WhoIsPrimaryRequest) returns (WhoIsPrimaryResponse); + + // Bully-style election, mirroring the pattern already used by the + // order_executor control service. + rpc Election (ElectionRequest) returns (ElectionResponse); + rpc Coordinator (CoordinatorRequest) returns (Ack); + rpc Heartbeat (HeartbeatRequest) returns (Ack); + + // 2PC participant-side RPCs. The primary acts as the participant; backups + // pick up committed state via ReplicateWrite inside Commit. + rpc Prepare (PrepareRequest) returns (PrepareResponse); + rpc Commit (CommitRequest) returns (CommitResponse); + rpc Abort (AbortRequest) returns (AbortResponse); +} + +message ReadRequest { + string title = 1; +} + +message ReadResponse { + bool success = 1; + int32 quantity = 2; + string message = 3; +} + +message WriteRequest { + string title = 1; + int32 quantity = 2; +} + +message WriteResponse { + bool success = 1; + string message = 2; +} + +message ReplicateWriteRequest { + string title = 1; + int32 quantity = 2; + int64 seq = 3; + int32 from_replica = 4; +} + +message ReplicateWriteResponse { + bool success = 1; + string message = 2; +} + +message WhoIsPrimaryRequest {} + +message WhoIsPrimaryResponse { + int32 leader_id = 1; + string leader_addr = 2; +} + +message ElectionRequest { + int32 candidate_id = 1; +} + +message ElectionResponse { + bool alive = 1; +} + +message CoordinatorRequest { + int32 leader_id = 1; +} + +message HeartbeatRequest { + int32 leader_id = 1; +} + +message Ack { + bool ok = 1; +} + +// --- 2PC participant-side messages --- + +message PrepareItem { + string title = 1; + int32 quantity = 2; +} + +message PrepareRequest { + string order_id = 1; + repeated PrepareItem items = 2; +} + +message PrepareResponse { + bool vote_commit = 1; + string message = 2; +} + +message CommitRequest { + string order_id = 1; +} + +message CommitResponse { + bool success = 1; + string message = 2; +} + +message AbortRequest { + string order_id = 1; +} + +message AbortResponse { + bool success = 1; + string message = 2; +} diff --git a/utils/pb/books_database/books_database_pb2.py b/utils/pb/books_database/books_database_pb2.py new file mode 100644 index 000000000..5fce39236 --- /dev/null +++ b/utils/pb/books_database/books_database_pb2.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: books_database.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'books_database.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x62ooks_database.proto\x12\x0e\x62ooks_database\"\x1c\n\x0bReadRequest\x12\r\n\x05title\x18\x01 \x01(\t\"B\n\x0cReadResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x10\n\x08quantity\x18\x02 \x01(\x05\x12\x0f\n\x07message\x18\x03 \x01(\t\"/\n\x0cWriteRequest\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\"1\n\rWriteResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"[\n\x15ReplicateWriteRequest\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\x12\x0b\n\x03seq\x18\x03 \x01(\x03\x12\x14\n\x0c\x66rom_replica\x18\x04 \x01(\x05\":\n\x16ReplicateWriteResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x15\n\x13WhoIsPrimaryRequest\">\n\x14WhoIsPrimaryResponse\x12\x11\n\tleader_id\x18\x01 \x01(\x05\x12\x13\n\x0bleader_addr\x18\x02 \x01(\t\"\'\n\x0f\x45lectionRequest\x12\x14\n\x0c\x63\x61ndidate_id\x18\x01 \x01(\x05\"!\n\x10\x45lectionResponse\x12\r\n\x05\x61live\x18\x01 \x01(\x08\"\'\n\x12\x43oordinatorRequest\x12\x11\n\tleader_id\x18\x01 \x01(\x05\"%\n\x10HeartbeatRequest\x12\x11\n\tleader_id\x18\x01 \x01(\x05\"\x11\n\x03\x41\x63k\x12\n\n\x02ok\x18\x01 \x01(\x08\".\n\x0bPrepareItem\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\"N\n\x0ePrepareRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12*\n\x05items\x18\x02 \x03(\x0b\x32\x1b.books_database.PrepareItem\"7\n\x0fPrepareResponse\x12\x13\n\x0bvote_commit\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"!\n\rCommitRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\"2\n\x0e\x43ommitResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\" \n\x0c\x41\x62ortRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\"1\n\rAbortResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t2\xd9\x06\n\x14\x42ooksDatabaseService\x12\x41\n\x04Read\x12\x1b.books_database.ReadRequest\x1a\x1c.books_database.ReadResponse\x12\x44\n\x05Write\x12\x1c.books_database.WriteRequest\x1a\x1d.books_database.WriteResponse\x12\x46\n\tReadLocal\x12\x1b.books_database.ReadRequest\x1a\x1c.books_database.ReadResponse\x12_\n\x0eReplicateWrite\x12%.books_database.ReplicateWriteRequest\x1a&.books_database.ReplicateWriteResponse\x12Y\n\x0cWhoIsPrimary\x12#.books_database.WhoIsPrimaryRequest\x1a$.books_database.WhoIsPrimaryResponse\x12M\n\x08\x45lection\x12\x1f.books_database.ElectionRequest\x1a .books_database.ElectionResponse\x12\x46\n\x0b\x43oordinator\x12\".books_database.CoordinatorRequest\x1a\x13.books_database.Ack\x12\x42\n\tHeartbeat\x12 .books_database.HeartbeatRequest\x1a\x13.books_database.Ack\x12J\n\x07Prepare\x12\x1e.books_database.PrepareRequest\x1a\x1f.books_database.PrepareResponse\x12G\n\x06\x43ommit\x12\x1d.books_database.CommitRequest\x1a\x1e.books_database.CommitResponse\x12\x44\n\x05\x41\x62ort\x12\x1c.books_database.AbortRequest\x1a\x1d.books_database.AbortResponseb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'books_database_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_READREQUEST']._serialized_start=40 + _globals['_READREQUEST']._serialized_end=68 + _globals['_READRESPONSE']._serialized_start=70 + _globals['_READRESPONSE']._serialized_end=136 + _globals['_WRITEREQUEST']._serialized_start=138 + _globals['_WRITEREQUEST']._serialized_end=185 + _globals['_WRITERESPONSE']._serialized_start=187 + _globals['_WRITERESPONSE']._serialized_end=236 + _globals['_REPLICATEWRITEREQUEST']._serialized_start=238 + _globals['_REPLICATEWRITEREQUEST']._serialized_end=329 + _globals['_REPLICATEWRITERESPONSE']._serialized_start=331 + _globals['_REPLICATEWRITERESPONSE']._serialized_end=389 + _globals['_WHOISPRIMARYREQUEST']._serialized_start=391 + _globals['_WHOISPRIMARYREQUEST']._serialized_end=412 + _globals['_WHOISPRIMARYRESPONSE']._serialized_start=414 + _globals['_WHOISPRIMARYRESPONSE']._serialized_end=476 + _globals['_ELECTIONREQUEST']._serialized_start=478 + _globals['_ELECTIONREQUEST']._serialized_end=517 + _globals['_ELECTIONRESPONSE']._serialized_start=519 + _globals['_ELECTIONRESPONSE']._serialized_end=552 + _globals['_COORDINATORREQUEST']._serialized_start=554 + _globals['_COORDINATORREQUEST']._serialized_end=593 + _globals['_HEARTBEATREQUEST']._serialized_start=595 + _globals['_HEARTBEATREQUEST']._serialized_end=632 + _globals['_ACK']._serialized_start=634 + _globals['_ACK']._serialized_end=651 + _globals['_PREPAREITEM']._serialized_start=653 + _globals['_PREPAREITEM']._serialized_end=699 + _globals['_PREPAREREQUEST']._serialized_start=701 + _globals['_PREPAREREQUEST']._serialized_end=779 + _globals['_PREPARERESPONSE']._serialized_start=781 + _globals['_PREPARERESPONSE']._serialized_end=836 + _globals['_COMMITREQUEST']._serialized_start=838 + _globals['_COMMITREQUEST']._serialized_end=871 + _globals['_COMMITRESPONSE']._serialized_start=873 + _globals['_COMMITRESPONSE']._serialized_end=923 + _globals['_ABORTREQUEST']._serialized_start=925 + _globals['_ABORTREQUEST']._serialized_end=957 + _globals['_ABORTRESPONSE']._serialized_start=959 + _globals['_ABORTRESPONSE']._serialized_end=1008 + _globals['_BOOKSDATABASESERVICE']._serialized_start=1011 + _globals['_BOOKSDATABASESERVICE']._serialized_end=1868 +# @@protoc_insertion_point(module_scope) diff --git a/utils/pb/books_database/books_database_pb2.pyi b/utils/pb/books_database/books_database_pb2.pyi new file mode 100644 index 000000000..1cf10d4f6 --- /dev/null +++ b/utils/pb/books_database/books_database_pb2.pyi @@ -0,0 +1,152 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class ReadRequest(_message.Message): + __slots__ = ("title",) + TITLE_FIELD_NUMBER: _ClassVar[int] + title: str + def __init__(self, title: _Optional[str] = ...) -> None: ... + +class ReadResponse(_message.Message): + __slots__ = ("success", "quantity", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + quantity: int + message: str + def __init__(self, success: bool = ..., quantity: _Optional[int] = ..., message: _Optional[str] = ...) -> None: ... + +class WriteRequest(_message.Message): + __slots__ = ("title", "quantity") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ...) -> None: ... + +class WriteResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class ReplicateWriteRequest(_message.Message): + __slots__ = ("title", "quantity", "seq", "from_replica") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + SEQ_FIELD_NUMBER: _ClassVar[int] + FROM_REPLICA_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + seq: int + from_replica: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ..., seq: _Optional[int] = ..., from_replica: _Optional[int] = ...) -> None: ... + +class ReplicateWriteResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class WhoIsPrimaryRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class WhoIsPrimaryResponse(_message.Message): + __slots__ = ("leader_id", "leader_addr") + LEADER_ID_FIELD_NUMBER: _ClassVar[int] + LEADER_ADDR_FIELD_NUMBER: _ClassVar[int] + leader_id: int + leader_addr: str + def __init__(self, leader_id: _Optional[int] = ..., leader_addr: _Optional[str] = ...) -> None: ... + +class ElectionRequest(_message.Message): + __slots__ = ("candidate_id",) + CANDIDATE_ID_FIELD_NUMBER: _ClassVar[int] + candidate_id: int + def __init__(self, candidate_id: _Optional[int] = ...) -> None: ... + +class ElectionResponse(_message.Message): + __slots__ = ("alive",) + ALIVE_FIELD_NUMBER: _ClassVar[int] + alive: bool + def __init__(self, alive: bool = ...) -> None: ... + +class CoordinatorRequest(_message.Message): + __slots__ = ("leader_id",) + LEADER_ID_FIELD_NUMBER: _ClassVar[int] + leader_id: int + def __init__(self, leader_id: _Optional[int] = ...) -> None: ... + +class HeartbeatRequest(_message.Message): + __slots__ = ("leader_id",) + LEADER_ID_FIELD_NUMBER: _ClassVar[int] + leader_id: int + def __init__(self, leader_id: _Optional[int] = ...) -> None: ... + +class Ack(_message.Message): + __slots__ = ("ok",) + OK_FIELD_NUMBER: _ClassVar[int] + ok: bool + def __init__(self, ok: bool = ...) -> None: ... + +class PrepareItem(_message.Message): + __slots__ = ("title", "quantity") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ...) -> None: ... + +class PrepareRequest(_message.Message): + __slots__ = ("order_id", "items") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + ITEMS_FIELD_NUMBER: _ClassVar[int] + order_id: str + items: _containers.RepeatedCompositeFieldContainer[PrepareItem] + def __init__(self, order_id: _Optional[str] = ..., items: _Optional[_Iterable[_Union[PrepareItem, _Mapping]]] = ...) -> None: ... + +class PrepareResponse(_message.Message): + __slots__ = ("vote_commit", "message") + VOTE_COMMIT_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + vote_commit: bool + message: str + def __init__(self, vote_commit: bool = ..., message: _Optional[str] = ...) -> None: ... + +class CommitRequest(_message.Message): + __slots__ = ("order_id",) + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + order_id: str + def __init__(self, order_id: _Optional[str] = ...) -> None: ... + +class CommitResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class AbortRequest(_message.Message): + __slots__ = ("order_id",) + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + order_id: str + def __init__(self, order_id: _Optional[str] = ...) -> None: ... + +class AbortResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... diff --git a/utils/pb/books_database/books_database_pb2_grpc.py b/utils/pb/books_database/books_database_pb2_grpc.py new file mode 100644 index 000000000..f8d838ee6 --- /dev/null +++ b/utils/pb/books_database/books_database_pb2_grpc.py @@ -0,0 +1,541 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import books_database_pb2 as books__database__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in books_database_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class BooksDatabaseServiceStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Read = channel.unary_unary( + '/books_database.BooksDatabaseService/Read', + request_serializer=books__database__pb2.ReadRequest.SerializeToString, + response_deserializer=books__database__pb2.ReadResponse.FromString, + _registered_method=True) + self.Write = channel.unary_unary( + '/books_database.BooksDatabaseService/Write', + request_serializer=books__database__pb2.WriteRequest.SerializeToString, + response_deserializer=books__database__pb2.WriteResponse.FromString, + _registered_method=True) + self.ReadLocal = channel.unary_unary( + '/books_database.BooksDatabaseService/ReadLocal', + request_serializer=books__database__pb2.ReadRequest.SerializeToString, + response_deserializer=books__database__pb2.ReadResponse.FromString, + _registered_method=True) + self.ReplicateWrite = channel.unary_unary( + '/books_database.BooksDatabaseService/ReplicateWrite', + request_serializer=books__database__pb2.ReplicateWriteRequest.SerializeToString, + response_deserializer=books__database__pb2.ReplicateWriteResponse.FromString, + _registered_method=True) + self.WhoIsPrimary = channel.unary_unary( + '/books_database.BooksDatabaseService/WhoIsPrimary', + request_serializer=books__database__pb2.WhoIsPrimaryRequest.SerializeToString, + response_deserializer=books__database__pb2.WhoIsPrimaryResponse.FromString, + _registered_method=True) + self.Election = channel.unary_unary( + '/books_database.BooksDatabaseService/Election', + request_serializer=books__database__pb2.ElectionRequest.SerializeToString, + response_deserializer=books__database__pb2.ElectionResponse.FromString, + _registered_method=True) + self.Coordinator = channel.unary_unary( + '/books_database.BooksDatabaseService/Coordinator', + request_serializer=books__database__pb2.CoordinatorRequest.SerializeToString, + response_deserializer=books__database__pb2.Ack.FromString, + _registered_method=True) + self.Heartbeat = channel.unary_unary( + '/books_database.BooksDatabaseService/Heartbeat', + request_serializer=books__database__pb2.HeartbeatRequest.SerializeToString, + response_deserializer=books__database__pb2.Ack.FromString, + _registered_method=True) + self.Prepare = channel.unary_unary( + '/books_database.BooksDatabaseService/Prepare', + request_serializer=books__database__pb2.PrepareRequest.SerializeToString, + response_deserializer=books__database__pb2.PrepareResponse.FromString, + _registered_method=True) + self.Commit = channel.unary_unary( + '/books_database.BooksDatabaseService/Commit', + request_serializer=books__database__pb2.CommitRequest.SerializeToString, + response_deserializer=books__database__pb2.CommitResponse.FromString, + _registered_method=True) + self.Abort = channel.unary_unary( + '/books_database.BooksDatabaseService/Abort', + request_serializer=books__database__pb2.AbortRequest.SerializeToString, + response_deserializer=books__database__pb2.AbortResponse.FromString, + _registered_method=True) + + +class BooksDatabaseServiceServicer(object): + """Missing associated documentation comment in .proto file.""" + + def Read(self, request, context): + """Client-facing RPCs. In Phase 2 clients should send these to the primary. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Write(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadLocal(self, request, context): + """Debug/ops RPC that reads the local replica's copy without the + primary-only check. Used by the convergence check in + scripts/checkpoint3-checks.ps1 to verify that all three replicas hold + the same value after a committed write. Never used by the orchestrator + or the 2PC coordinator. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReplicateWrite(self, request, context): + """Internal: primary -> backup synchronous replication of a committed write. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def WhoIsPrimary(self, request, context): + """Primary discovery. Any replica answers; the response points to the + replica that currently believes itself to be primary (or replies + leader_id=0 if no primary is known yet). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Election(self, request, context): + """Bully-style election, mirroring the pattern already used by the + order_executor control service. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Coordinator(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Heartbeat(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Prepare(self, request, context): + """2PC participant-side RPCs. The primary acts as the participant; backups + pick up committed state via ReplicateWrite inside Commit. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Commit(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Abort(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BooksDatabaseServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Read': grpc.unary_unary_rpc_method_handler( + servicer.Read, + request_deserializer=books__database__pb2.ReadRequest.FromString, + response_serializer=books__database__pb2.ReadResponse.SerializeToString, + ), + 'Write': grpc.unary_unary_rpc_method_handler( + servicer.Write, + request_deserializer=books__database__pb2.WriteRequest.FromString, + response_serializer=books__database__pb2.WriteResponse.SerializeToString, + ), + 'ReadLocal': grpc.unary_unary_rpc_method_handler( + servicer.ReadLocal, + request_deserializer=books__database__pb2.ReadRequest.FromString, + response_serializer=books__database__pb2.ReadResponse.SerializeToString, + ), + 'ReplicateWrite': grpc.unary_unary_rpc_method_handler( + servicer.ReplicateWrite, + request_deserializer=books__database__pb2.ReplicateWriteRequest.FromString, + response_serializer=books__database__pb2.ReplicateWriteResponse.SerializeToString, + ), + 'WhoIsPrimary': grpc.unary_unary_rpc_method_handler( + servicer.WhoIsPrimary, + request_deserializer=books__database__pb2.WhoIsPrimaryRequest.FromString, + response_serializer=books__database__pb2.WhoIsPrimaryResponse.SerializeToString, + ), + 'Election': grpc.unary_unary_rpc_method_handler( + servicer.Election, + request_deserializer=books__database__pb2.ElectionRequest.FromString, + response_serializer=books__database__pb2.ElectionResponse.SerializeToString, + ), + 'Coordinator': grpc.unary_unary_rpc_method_handler( + servicer.Coordinator, + request_deserializer=books__database__pb2.CoordinatorRequest.FromString, + response_serializer=books__database__pb2.Ack.SerializeToString, + ), + 'Heartbeat': grpc.unary_unary_rpc_method_handler( + servicer.Heartbeat, + request_deserializer=books__database__pb2.HeartbeatRequest.FromString, + response_serializer=books__database__pb2.Ack.SerializeToString, + ), + 'Prepare': grpc.unary_unary_rpc_method_handler( + servicer.Prepare, + request_deserializer=books__database__pb2.PrepareRequest.FromString, + response_serializer=books__database__pb2.PrepareResponse.SerializeToString, + ), + 'Commit': grpc.unary_unary_rpc_method_handler( + servicer.Commit, + request_deserializer=books__database__pb2.CommitRequest.FromString, + response_serializer=books__database__pb2.CommitResponse.SerializeToString, + ), + 'Abort': grpc.unary_unary_rpc_method_handler( + servicer.Abort, + request_deserializer=books__database__pb2.AbortRequest.FromString, + response_serializer=books__database__pb2.AbortResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'books_database.BooksDatabaseService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('books_database.BooksDatabaseService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class BooksDatabaseService(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def Read(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Read', + books__database__pb2.ReadRequest.SerializeToString, + books__database__pb2.ReadResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Write(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Write', + books__database__pb2.WriteRequest.SerializeToString, + books__database__pb2.WriteResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReadLocal(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/ReadLocal', + books__database__pb2.ReadRequest.SerializeToString, + books__database__pb2.ReadResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReplicateWrite(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/ReplicateWrite', + books__database__pb2.ReplicateWriteRequest.SerializeToString, + books__database__pb2.ReplicateWriteResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def WhoIsPrimary(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/WhoIsPrimary', + books__database__pb2.WhoIsPrimaryRequest.SerializeToString, + books__database__pb2.WhoIsPrimaryResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Election(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Election', + books__database__pb2.ElectionRequest.SerializeToString, + books__database__pb2.ElectionResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Coordinator(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Coordinator', + books__database__pb2.CoordinatorRequest.SerializeToString, + books__database__pb2.Ack.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Heartbeat(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Heartbeat', + books__database__pb2.HeartbeatRequest.SerializeToString, + books__database__pb2.Ack.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Prepare(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Prepare', + books__database__pb2.PrepareRequest.SerializeToString, + books__database__pb2.PrepareResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Commit(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Commit', + books__database__pb2.CommitRequest.SerializeToString, + books__database__pb2.CommitResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Abort(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/books_database.BooksDatabaseService/Abort', + books__database__pb2.AbortRequest.SerializeToString, + books__database__pb2.AbortResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/utils/pb/fraud_detection/fraud_detection.proto b/utils/pb/fraud_detection/fraud_detection.proto index db20211d7..4115cee66 100644 --- a/utils/pb/fraud_detection/fraud_detection.proto +++ b/utils/pb/fraud_detection/fraud_detection.proto @@ -1,15 +1,64 @@ syntax = "proto3"; -package hello; +package fraud_detection; -service HelloService { - rpc SayHello (HelloRequest) returns (HelloResponse); +service FraudDetectionService { + rpc InitOrder (InitOrderRequest) returns (EventResponse); + + rpc CheckUserFraud (EventRequest) returns (EventResponse); + rpc CheckCardFraud (EventRequest) returns (EventResponse); + + // Receive a forwarded vector clock from another microservice. + rpc ForwardVC (VCForward) returns (EventResponse); + + rpc ClearOrder (ClearOrderRequest) returns (EventResponse); +} + +message VectorClock { + repeated int32 values = 1; +} + +message OrderItem { + string title = 1; + int32 quantity = 2; } -message HelloRequest { - string name = 1; +message OrderData { + string order_id = 1; + string user_name = 2; + string user_contact = 3; + string card_number = 4; + string expiration_date = 5; + string cvv = 6; + int32 item_count = 7; + bool terms_accepted = 8; + repeated OrderItem items = 9; } -message HelloResponse { - string greeting = 1; +message InitOrderRequest { + OrderData order = 1; } + +message EventRequest { + string order_id = 1; + VectorClock vc = 2; +} + +message EventResponse { + bool success = 1; + string message = 2; + VectorClock vc = 3; +} + +message VCForward { + string order_id = 1; + string source_event = 2; + VectorClock vc = 3; + bool success = 4; + string message = 5; +} + +message ClearOrderRequest { + string order_id = 1; + VectorClock final_vc = 2; +} \ No newline at end of file diff --git a/utils/pb/fraud_detection/fraud_detection_pb2.py b/utils/pb/fraud_detection/fraud_detection_pb2.py index cdd0bcae8..2a9913310 100644 --- a/utils/pb/fraud_detection/fraud_detection_pb2.py +++ b/utils/pb/fraud_detection/fraud_detection_pb2.py @@ -1,12 +1,22 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE # source: fraud_detection.proto -# Protobuf Python Version: 4.25.0 +# Protobuf Python Version: 5.29.0 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'fraud_detection.proto' +) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -14,17 +24,29 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66raud_detection.proto\x12\x05hello\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"!\n\rHelloResponse\x12\x10\n\x08greeting\x18\x01 \x01(\t2E\n\x0cHelloService\x12\x35\n\x08SayHello\x12\x13.hello.HelloRequest\x1a\x14.hello.HelloResponseb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66raud_detection.proto\x12\x0f\x66raud_detection\"\x1d\n\x0bVectorClock\x12\x0e\n\x06values\x18\x01 \x03(\x05\",\n\tOrderItem\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\"\xd8\x01\n\tOrderData\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x11\n\tuser_name\x18\x02 \x01(\t\x12\x14\n\x0cuser_contact\x18\x03 \x01(\t\x12\x13\n\x0b\x63\x61rd_number\x18\x04 \x01(\t\x12\x17\n\x0f\x65xpiration_date\x18\x05 \x01(\t\x12\x0b\n\x03\x63vv\x18\x06 \x01(\t\x12\x12\n\nitem_count\x18\x07 \x01(\x05\x12\x16\n\x0eterms_accepted\x18\x08 \x01(\x08\x12)\n\x05items\x18\t \x03(\x0b\x32\x1a.fraud_detection.OrderItem\"=\n\x10InitOrderRequest\x12)\n\x05order\x18\x01 \x01(\x0b\x32\x1a.fraud_detection.OrderData\"J\n\x0c\x45ventRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12(\n\x02vc\x18\x02 \x01(\x0b\x32\x1c.fraud_detection.VectorClock\"[\n\rEventResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\x12(\n\x02vc\x18\x03 \x01(\x0b\x32\x1c.fraud_detection.VectorClock\"\x7f\n\tVCForward\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x14\n\x0csource_event\x18\x02 \x01(\t\x12(\n\x02vc\x18\x03 \x01(\x0b\x32\x1c.fraud_detection.VectorClock\x12\x0f\n\x07success\x18\x04 \x01(\x08\x12\x0f\n\x07message\x18\x05 \x01(\t\"U\n\x11\x43learOrderRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12.\n\x08\x66inal_vc\x18\x02 \x01(\x0b\x32\x1c.fraud_detection.VectorClock2\xa4\x03\n\x15\x46raudDetectionService\x12N\n\tInitOrder\x12!.fraud_detection.InitOrderRequest\x1a\x1e.fraud_detection.EventResponse\x12O\n\x0e\x43heckUserFraud\x12\x1d.fraud_detection.EventRequest\x1a\x1e.fraud_detection.EventResponse\x12O\n\x0e\x43heckCardFraud\x12\x1d.fraud_detection.EventRequest\x1a\x1e.fraud_detection.EventResponse\x12G\n\tForwardVC\x12\x1a.fraud_detection.VCForward\x1a\x1e.fraud_detection.EventResponse\x12P\n\nClearOrder\x12\".fraud_detection.ClearOrderRequest\x1a\x1e.fraud_detection.EventResponseb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'fraud_detection_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _globals['_HELLOREQUEST']._serialized_start=32 - _globals['_HELLOREQUEST']._serialized_end=60 - _globals['_HELLORESPONSE']._serialized_start=62 - _globals['_HELLORESPONSE']._serialized_end=95 - _globals['_HELLOSERVICE']._serialized_start=97 - _globals['_HELLOSERVICE']._serialized_end=166 +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_VECTORCLOCK']._serialized_start=42 + _globals['_VECTORCLOCK']._serialized_end=71 + _globals['_ORDERITEM']._serialized_start=73 + _globals['_ORDERITEM']._serialized_end=117 + _globals['_ORDERDATA']._serialized_start=120 + _globals['_ORDERDATA']._serialized_end=336 + _globals['_INITORDERREQUEST']._serialized_start=338 + _globals['_INITORDERREQUEST']._serialized_end=399 + _globals['_EVENTREQUEST']._serialized_start=401 + _globals['_EVENTREQUEST']._serialized_end=475 + _globals['_EVENTRESPONSE']._serialized_start=477 + _globals['_EVENTRESPONSE']._serialized_end=568 + _globals['_VCFORWARD']._serialized_start=570 + _globals['_VCFORWARD']._serialized_end=697 + _globals['_CLEARORDERREQUEST']._serialized_start=699 + _globals['_CLEARORDERREQUEST']._serialized_end=784 + _globals['_FRAUDDETECTIONSERVICE']._serialized_start=787 + _globals['_FRAUDDETECTIONSERVICE']._serialized_end=1207 # @@protoc_insertion_point(module_scope) diff --git a/utils/pb/fraud_detection/fraud_detection_pb2.pyi b/utils/pb/fraud_detection/fraud_detection_pb2.pyi index 30a263856..58cb72388 100644 --- a/utils/pb/fraud_detection/fraud_detection_pb2.pyi +++ b/utils/pb/fraud_detection/fraud_detection_pb2.pyi @@ -1,17 +1,88 @@ +from google.protobuf.internal import containers as _containers from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Optional as _Optional +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor -class HelloRequest(_message.Message): - __slots__ = ("name",) - NAME_FIELD_NUMBER: _ClassVar[int] - name: str - def __init__(self, name: _Optional[str] = ...) -> None: ... - -class HelloResponse(_message.Message): - __slots__ = ("greeting",) - GREETING_FIELD_NUMBER: _ClassVar[int] - greeting: str - def __init__(self, greeting: _Optional[str] = ...) -> None: ... +class VectorClock(_message.Message): + __slots__ = ("values",) + VALUES_FIELD_NUMBER: _ClassVar[int] + values: _containers.RepeatedScalarFieldContainer[int] + def __init__(self, values: _Optional[_Iterable[int]] = ...) -> None: ... + +class OrderItem(_message.Message): + __slots__ = ("title", "quantity") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ...) -> None: ... + +class OrderData(_message.Message): + __slots__ = ("order_id", "user_name", "user_contact", "card_number", "expiration_date", "cvv", "item_count", "terms_accepted", "items") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + USER_NAME_FIELD_NUMBER: _ClassVar[int] + USER_CONTACT_FIELD_NUMBER: _ClassVar[int] + CARD_NUMBER_FIELD_NUMBER: _ClassVar[int] + EXPIRATION_DATE_FIELD_NUMBER: _ClassVar[int] + CVV_FIELD_NUMBER: _ClassVar[int] + ITEM_COUNT_FIELD_NUMBER: _ClassVar[int] + TERMS_ACCEPTED_FIELD_NUMBER: _ClassVar[int] + ITEMS_FIELD_NUMBER: _ClassVar[int] + order_id: str + user_name: str + user_contact: str + card_number: str + expiration_date: str + cvv: str + item_count: int + terms_accepted: bool + items: _containers.RepeatedCompositeFieldContainer[OrderItem] + def __init__(self, order_id: _Optional[str] = ..., user_name: _Optional[str] = ..., user_contact: _Optional[str] = ..., card_number: _Optional[str] = ..., expiration_date: _Optional[str] = ..., cvv: _Optional[str] = ..., item_count: _Optional[int] = ..., terms_accepted: bool = ..., items: _Optional[_Iterable[_Union[OrderItem, _Mapping]]] = ...) -> None: ... + +class InitOrderRequest(_message.Message): + __slots__ = ("order",) + ORDER_FIELD_NUMBER: _ClassVar[int] + order: OrderData + def __init__(self, order: _Optional[_Union[OrderData, _Mapping]] = ...) -> None: ... + +class EventRequest(_message.Message): + __slots__ = ("order_id", "vc") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + order_id: str + vc: VectorClock + def __init__(self, order_id: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... + +class EventResponse(_message.Message): + __slots__ = ("success", "message", "vc") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + vc: VectorClock + def __init__(self, success: bool = ..., message: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... + +class VCForward(_message.Message): + __slots__ = ("order_id", "source_event", "vc", "success", "message") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + SOURCE_EVENT_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + order_id: str + source_event: str + vc: VectorClock + success: bool + message: str + def __init__(self, order_id: _Optional[str] = ..., source_event: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ..., success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class ClearOrderRequest(_message.Message): + __slots__ = ("order_id", "final_vc") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + FINAL_VC_FIELD_NUMBER: _ClassVar[int] + order_id: str + final_vc: VectorClock + def __init__(self, order_id: _Optional[str] = ..., final_vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... diff --git a/utils/pb/fraud_detection/fraud_detection_pb2_grpc.py b/utils/pb/fraud_detection/fraud_detection_pb2_grpc.py index 4e7a27975..a54520a95 100644 --- a/utils/pb/fraud_detection/fraud_detection_pb2_grpc.py +++ b/utils/pb/fraud_detection/fraud_detection_pb2_grpc.py @@ -1,11 +1,31 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc +import warnings import fraud_detection_pb2 as fraud__detection__pb2 +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False -class HelloServiceStub(object): +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in fraud_detection_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class FraudDetectionServiceStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): @@ -14,42 +34,189 @@ def __init__(self, channel): Args: channel: A grpc.Channel. """ - self.SayHello = channel.unary_unary( - '/hello.HelloService/SayHello', - request_serializer=fraud__detection__pb2.HelloRequest.SerializeToString, - response_deserializer=fraud__detection__pb2.HelloResponse.FromString, - ) + self.InitOrder = channel.unary_unary( + '/fraud_detection.FraudDetectionService/InitOrder', + request_serializer=fraud__detection__pb2.InitOrderRequest.SerializeToString, + response_deserializer=fraud__detection__pb2.EventResponse.FromString, + _registered_method=True) + self.CheckUserFraud = channel.unary_unary( + '/fraud_detection.FraudDetectionService/CheckUserFraud', + request_serializer=fraud__detection__pb2.EventRequest.SerializeToString, + response_deserializer=fraud__detection__pb2.EventResponse.FromString, + _registered_method=True) + self.CheckCardFraud = channel.unary_unary( + '/fraud_detection.FraudDetectionService/CheckCardFraud', + request_serializer=fraud__detection__pb2.EventRequest.SerializeToString, + response_deserializer=fraud__detection__pb2.EventResponse.FromString, + _registered_method=True) + self.ForwardVC = channel.unary_unary( + '/fraud_detection.FraudDetectionService/ForwardVC', + request_serializer=fraud__detection__pb2.VCForward.SerializeToString, + response_deserializer=fraud__detection__pb2.EventResponse.FromString, + _registered_method=True) + self.ClearOrder = channel.unary_unary( + '/fraud_detection.FraudDetectionService/ClearOrder', + request_serializer=fraud__detection__pb2.ClearOrderRequest.SerializeToString, + response_deserializer=fraud__detection__pb2.EventResponse.FromString, + _registered_method=True) -class HelloServiceServicer(object): +class FraudDetectionServiceServicer(object): """Missing associated documentation comment in .proto file.""" - def SayHello(self, request, context): + def InitOrder(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckUserFraud(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckCardFraud(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ForwardVC(self, request, context): + """Receive a forwarded vector clock from another microservice. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ClearOrder(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') -def add_HelloServiceServicer_to_server(servicer, server): +def add_FraudDetectionServiceServicer_to_server(servicer, server): rpc_method_handlers = { - 'SayHello': grpc.unary_unary_rpc_method_handler( - servicer.SayHello, - request_deserializer=fraud__detection__pb2.HelloRequest.FromString, - response_serializer=fraud__detection__pb2.HelloResponse.SerializeToString, + 'InitOrder': grpc.unary_unary_rpc_method_handler( + servicer.InitOrder, + request_deserializer=fraud__detection__pb2.InitOrderRequest.FromString, + response_serializer=fraud__detection__pb2.EventResponse.SerializeToString, + ), + 'CheckUserFraud': grpc.unary_unary_rpc_method_handler( + servicer.CheckUserFraud, + request_deserializer=fraud__detection__pb2.EventRequest.FromString, + response_serializer=fraud__detection__pb2.EventResponse.SerializeToString, + ), + 'CheckCardFraud': grpc.unary_unary_rpc_method_handler( + servicer.CheckCardFraud, + request_deserializer=fraud__detection__pb2.EventRequest.FromString, + response_serializer=fraud__detection__pb2.EventResponse.SerializeToString, + ), + 'ForwardVC': grpc.unary_unary_rpc_method_handler( + servicer.ForwardVC, + request_deserializer=fraud__detection__pb2.VCForward.FromString, + response_serializer=fraud__detection__pb2.EventResponse.SerializeToString, + ), + 'ClearOrder': grpc.unary_unary_rpc_method_handler( + servicer.ClearOrder, + request_deserializer=fraud__detection__pb2.ClearOrderRequest.FromString, + response_serializer=fraud__detection__pb2.EventResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( - 'hello.HelloService', rpc_method_handlers) + 'fraud_detection.FraudDetectionService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('fraud_detection.FraudDetectionService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. -class HelloService(object): +class FraudDetectionService(object): """Missing associated documentation comment in .proto file.""" @staticmethod - def SayHello(request, + def InitOrder(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/fraud_detection.FraudDetectionService/InitOrder', + fraud__detection__pb2.InitOrderRequest.SerializeToString, + fraud__detection__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CheckUserFraud(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/fraud_detection.FraudDetectionService/CheckUserFraud', + fraud__detection__pb2.EventRequest.SerializeToString, + fraud__detection__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CheckCardFraud(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/fraud_detection.FraudDetectionService/CheckCardFraud', + fraud__detection__pb2.EventRequest.SerializeToString, + fraud__detection__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ForwardVC(request, target, options=(), channel_credentials=None, @@ -59,8 +226,45 @@ def SayHello(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/hello.HelloService/SayHello', - fraud__detection__pb2.HelloRequest.SerializeToString, - fraud__detection__pb2.HelloResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + return grpc.experimental.unary_unary( + request, + target, + '/fraud_detection.FraudDetectionService/ForwardVC', + fraud__detection__pb2.VCForward.SerializeToString, + fraud__detection__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ClearOrder(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/fraud_detection.FraudDetectionService/ClearOrder', + fraud__detection__pb2.ClearOrderRequest.SerializeToString, + fraud__detection__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/utils/pb/order_executor/__init__.py b/utils/pb/order_executor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/pb/order_executor/order_executor.proto b/utils/pb/order_executor/order_executor.proto new file mode 100644 index 000000000..51c665f98 --- /dev/null +++ b/utils/pb/order_executor/order_executor.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package order_executor; + +service OrderExecutorControl { + rpc Election (ElectionRequest) returns (ElectionResponse); + rpc Coordinator (CoordinatorRequest) returns (Ack); + rpc Heartbeat (HeartbeatRequest) returns (Ack); +} + +message ElectionRequest { + int32 candidate_id = 1; +} + +message ElectionResponse { + bool alive = 1; +} + +message CoordinatorRequest { + int32 leader_id = 1; +} + +message HeartbeatRequest { + int32 leader_id = 1; +} + +message Ack { + bool ok = 1; +} \ No newline at end of file diff --git a/utils/pb/order_executor/order_executor_pb2.py b/utils/pb/order_executor/order_executor_pb2.py new file mode 100644 index 000000000..50b3d873b --- /dev/null +++ b/utils/pb/order_executor/order_executor_pb2.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: order_executor.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'order_executor.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14order_executor.proto\x12\x0eorder_executor\"\'\n\x0f\x45lectionRequest\x12\x14\n\x0c\x63\x61ndidate_id\x18\x01 \x01(\x05\"!\n\x10\x45lectionResponse\x12\r\n\x05\x61live\x18\x01 \x01(\x08\"\'\n\x12\x43oordinatorRequest\x12\x11\n\tleader_id\x18\x01 \x01(\x05\"%\n\x10HeartbeatRequest\x12\x11\n\tleader_id\x18\x01 \x01(\x05\"\x11\n\x03\x41\x63k\x12\n\n\x02ok\x18\x01 \x01(\x08\x32\xf1\x01\n\x14OrderExecutorControl\x12M\n\x08\x45lection\x12\x1f.order_executor.ElectionRequest\x1a .order_executor.ElectionResponse\x12\x46\n\x0b\x43oordinator\x12\".order_executor.CoordinatorRequest\x1a\x13.order_executor.Ack\x12\x42\n\tHeartbeat\x12 .order_executor.HeartbeatRequest\x1a\x13.order_executor.Ackb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'order_executor_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_ELECTIONREQUEST']._serialized_start=40 + _globals['_ELECTIONREQUEST']._serialized_end=79 + _globals['_ELECTIONRESPONSE']._serialized_start=81 + _globals['_ELECTIONRESPONSE']._serialized_end=114 + _globals['_COORDINATORREQUEST']._serialized_start=116 + _globals['_COORDINATORREQUEST']._serialized_end=155 + _globals['_HEARTBEATREQUEST']._serialized_start=157 + _globals['_HEARTBEATREQUEST']._serialized_end=194 + _globals['_ACK']._serialized_start=196 + _globals['_ACK']._serialized_end=213 + _globals['_ORDEREXECUTORCONTROL']._serialized_start=216 + _globals['_ORDEREXECUTORCONTROL']._serialized_end=457 +# @@protoc_insertion_point(module_scope) diff --git a/utils/pb/order_executor/order_executor_pb2.pyi b/utils/pb/order_executor/order_executor_pb2.pyi new file mode 100644 index 000000000..c8ad8e7c6 --- /dev/null +++ b/utils/pb/order_executor/order_executor_pb2.pyi @@ -0,0 +1,35 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class ElectionRequest(_message.Message): + __slots__ = ("candidate_id",) + CANDIDATE_ID_FIELD_NUMBER: _ClassVar[int] + candidate_id: int + def __init__(self, candidate_id: _Optional[int] = ...) -> None: ... + +class ElectionResponse(_message.Message): + __slots__ = ("alive",) + ALIVE_FIELD_NUMBER: _ClassVar[int] + alive: bool + def __init__(self, alive: bool = ...) -> None: ... + +class CoordinatorRequest(_message.Message): + __slots__ = ("leader_id",) + LEADER_ID_FIELD_NUMBER: _ClassVar[int] + leader_id: int + def __init__(self, leader_id: _Optional[int] = ...) -> None: ... + +class HeartbeatRequest(_message.Message): + __slots__ = ("leader_id",) + LEADER_ID_FIELD_NUMBER: _ClassVar[int] + leader_id: int + def __init__(self, leader_id: _Optional[int] = ...) -> None: ... + +class Ack(_message.Message): + __slots__ = ("ok",) + OK_FIELD_NUMBER: _ClassVar[int] + ok: bool + def __init__(self, ok: bool = ...) -> None: ... diff --git a/utils/pb/order_executor/order_executor_pb2_grpc.py b/utils/pb/order_executor/order_executor_pb2_grpc.py new file mode 100644 index 000000000..7c9c28f14 --- /dev/null +++ b/utils/pb/order_executor/order_executor_pb2_grpc.py @@ -0,0 +1,183 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import order_executor_pb2 as order__executor__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in order_executor_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class OrderExecutorControlStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Election = channel.unary_unary( + '/order_executor.OrderExecutorControl/Election', + request_serializer=order__executor__pb2.ElectionRequest.SerializeToString, + response_deserializer=order__executor__pb2.ElectionResponse.FromString, + _registered_method=True) + self.Coordinator = channel.unary_unary( + '/order_executor.OrderExecutorControl/Coordinator', + request_serializer=order__executor__pb2.CoordinatorRequest.SerializeToString, + response_deserializer=order__executor__pb2.Ack.FromString, + _registered_method=True) + self.Heartbeat = channel.unary_unary( + '/order_executor.OrderExecutorControl/Heartbeat', + request_serializer=order__executor__pb2.HeartbeatRequest.SerializeToString, + response_deserializer=order__executor__pb2.Ack.FromString, + _registered_method=True) + + +class OrderExecutorControlServicer(object): + """Missing associated documentation comment in .proto file.""" + + def Election(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Coordinator(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Heartbeat(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_OrderExecutorControlServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Election': grpc.unary_unary_rpc_method_handler( + servicer.Election, + request_deserializer=order__executor__pb2.ElectionRequest.FromString, + response_serializer=order__executor__pb2.ElectionResponse.SerializeToString, + ), + 'Coordinator': grpc.unary_unary_rpc_method_handler( + servicer.Coordinator, + request_deserializer=order__executor__pb2.CoordinatorRequest.FromString, + response_serializer=order__executor__pb2.Ack.SerializeToString, + ), + 'Heartbeat': grpc.unary_unary_rpc_method_handler( + servicer.Heartbeat, + request_deserializer=order__executor__pb2.HeartbeatRequest.FromString, + response_serializer=order__executor__pb2.Ack.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'order_executor.OrderExecutorControl', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('order_executor.OrderExecutorControl', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class OrderExecutorControl(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def Election(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/order_executor.OrderExecutorControl/Election', + order__executor__pb2.ElectionRequest.SerializeToString, + order__executor__pb2.ElectionResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Coordinator(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/order_executor.OrderExecutorControl/Coordinator', + order__executor__pb2.CoordinatorRequest.SerializeToString, + order__executor__pb2.Ack.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Heartbeat(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/order_executor.OrderExecutorControl/Heartbeat', + order__executor__pb2.HeartbeatRequest.SerializeToString, + order__executor__pb2.Ack.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/utils/pb/order_queue/__init__.py b/utils/pb/order_queue/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/pb/order_queue/order_queue.proto b/utils/pb/order_queue/order_queue.proto new file mode 100644 index 000000000..edbfa6061 --- /dev/null +++ b/utils/pb/order_queue/order_queue.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package order_queue; + +service OrderQueueService { + rpc Enqueue (EnqueueRequest) returns (QueueResponse); + rpc Dequeue (DequeueRequest) returns (DequeueResponse); +} + +message OrderItem { + string title = 1; + int32 quantity = 2; +} + +message OrderData { + string order_id = 1; + string user_name = 2; + string user_contact = 3; + string card_number = 4; + string expiration_date = 5; + string cvv = 6; + int32 item_count = 7; + bool terms_accepted = 8; + repeated OrderItem items = 9; +} + +message EnqueueRequest { + OrderData order = 1; +} + +message DequeueRequest { + string executor_id = 1; +} + +message QueueResponse { + bool success = 1; + string message = 2; +} + +message DequeueResponse { + bool success = 1; + string message = 2; + OrderData order = 3; +} \ No newline at end of file diff --git a/utils/pb/order_queue/order_queue_pb2.py b/utils/pb/order_queue/order_queue_pb2.py new file mode 100644 index 000000000..71cbce788 --- /dev/null +++ b/utils/pb/order_queue/order_queue_pb2.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: order_queue.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'order_queue.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11order_queue.proto\x12\x0border_queue\",\n\tOrderItem\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\"\xd4\x01\n\tOrderData\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x11\n\tuser_name\x18\x02 \x01(\t\x12\x14\n\x0cuser_contact\x18\x03 \x01(\t\x12\x13\n\x0b\x63\x61rd_number\x18\x04 \x01(\t\x12\x17\n\x0f\x65xpiration_date\x18\x05 \x01(\t\x12\x0b\n\x03\x63vv\x18\x06 \x01(\t\x12\x12\n\nitem_count\x18\x07 \x01(\x05\x12\x16\n\x0eterms_accepted\x18\x08 \x01(\x08\x12%\n\x05items\x18\t \x03(\x0b\x32\x16.order_queue.OrderItem\"7\n\x0e\x45nqueueRequest\x12%\n\x05order\x18\x01 \x01(\x0b\x32\x16.order_queue.OrderData\"%\n\x0e\x44\x65queueRequest\x12\x13\n\x0b\x65xecutor_id\x18\x01 \x01(\t\"1\n\rQueueResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"Z\n\x0f\x44\x65queueResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x05order\x18\x03 \x01(\x0b\x32\x16.order_queue.OrderData2\x9d\x01\n\x11OrderQueueService\x12\x42\n\x07\x45nqueue\x12\x1b.order_queue.EnqueueRequest\x1a\x1a.order_queue.QueueResponse\x12\x44\n\x07\x44\x65queue\x12\x1b.order_queue.DequeueRequest\x1a\x1c.order_queue.DequeueResponseb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'order_queue_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_ORDERITEM']._serialized_start=34 + _globals['_ORDERITEM']._serialized_end=78 + _globals['_ORDERDATA']._serialized_start=81 + _globals['_ORDERDATA']._serialized_end=293 + _globals['_ENQUEUEREQUEST']._serialized_start=295 + _globals['_ENQUEUEREQUEST']._serialized_end=350 + _globals['_DEQUEUEREQUEST']._serialized_start=352 + _globals['_DEQUEUEREQUEST']._serialized_end=389 + _globals['_QUEUERESPONSE']._serialized_start=391 + _globals['_QUEUERESPONSE']._serialized_end=440 + _globals['_DEQUEUERESPONSE']._serialized_start=442 + _globals['_DEQUEUERESPONSE']._serialized_end=532 + _globals['_ORDERQUEUESERVICE']._serialized_start=535 + _globals['_ORDERQUEUESERVICE']._serialized_end=692 +# @@protoc_insertion_point(module_scope) diff --git a/utils/pb/order_queue/order_queue_pb2.pyi b/utils/pb/order_queue/order_queue_pb2.pyi new file mode 100644 index 000000000..cdeb72c7f --- /dev/null +++ b/utils/pb/order_queue/order_queue_pb2.pyi @@ -0,0 +1,66 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class OrderItem(_message.Message): + __slots__ = ("title", "quantity") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ...) -> None: ... + +class OrderData(_message.Message): + __slots__ = ("order_id", "user_name", "user_contact", "card_number", "expiration_date", "cvv", "item_count", "terms_accepted", "items") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + USER_NAME_FIELD_NUMBER: _ClassVar[int] + USER_CONTACT_FIELD_NUMBER: _ClassVar[int] + CARD_NUMBER_FIELD_NUMBER: _ClassVar[int] + EXPIRATION_DATE_FIELD_NUMBER: _ClassVar[int] + CVV_FIELD_NUMBER: _ClassVar[int] + ITEM_COUNT_FIELD_NUMBER: _ClassVar[int] + TERMS_ACCEPTED_FIELD_NUMBER: _ClassVar[int] + ITEMS_FIELD_NUMBER: _ClassVar[int] + order_id: str + user_name: str + user_contact: str + card_number: str + expiration_date: str + cvv: str + item_count: int + terms_accepted: bool + items: _containers.RepeatedCompositeFieldContainer[OrderItem] + def __init__(self, order_id: _Optional[str] = ..., user_name: _Optional[str] = ..., user_contact: _Optional[str] = ..., card_number: _Optional[str] = ..., expiration_date: _Optional[str] = ..., cvv: _Optional[str] = ..., item_count: _Optional[int] = ..., terms_accepted: bool = ..., items: _Optional[_Iterable[_Union[OrderItem, _Mapping]]] = ...) -> None: ... + +class EnqueueRequest(_message.Message): + __slots__ = ("order",) + ORDER_FIELD_NUMBER: _ClassVar[int] + order: OrderData + def __init__(self, order: _Optional[_Union[OrderData, _Mapping]] = ...) -> None: ... + +class DequeueRequest(_message.Message): + __slots__ = ("executor_id",) + EXECUTOR_ID_FIELD_NUMBER: _ClassVar[int] + executor_id: str + def __init__(self, executor_id: _Optional[str] = ...) -> None: ... + +class QueueResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class DequeueResponse(_message.Message): + __slots__ = ("success", "message", "order") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + ORDER_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + order: OrderData + def __init__(self, success: bool = ..., message: _Optional[str] = ..., order: _Optional[_Union[OrderData, _Mapping]] = ...) -> None: ... diff --git a/utils/pb/order_queue/order_queue_pb2_grpc.py b/utils/pb/order_queue/order_queue_pb2_grpc.py new file mode 100644 index 000000000..fc1f3f8bf --- /dev/null +++ b/utils/pb/order_queue/order_queue_pb2_grpc.py @@ -0,0 +1,140 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import order_queue_pb2 as order__queue__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in order_queue_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class OrderQueueServiceStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Enqueue = channel.unary_unary( + '/order_queue.OrderQueueService/Enqueue', + request_serializer=order__queue__pb2.EnqueueRequest.SerializeToString, + response_deserializer=order__queue__pb2.QueueResponse.FromString, + _registered_method=True) + self.Dequeue = channel.unary_unary( + '/order_queue.OrderQueueService/Dequeue', + request_serializer=order__queue__pb2.DequeueRequest.SerializeToString, + response_deserializer=order__queue__pb2.DequeueResponse.FromString, + _registered_method=True) + + +class OrderQueueServiceServicer(object): + """Missing associated documentation comment in .proto file.""" + + def Enqueue(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Dequeue(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_OrderQueueServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Enqueue': grpc.unary_unary_rpc_method_handler( + servicer.Enqueue, + request_deserializer=order__queue__pb2.EnqueueRequest.FromString, + response_serializer=order__queue__pb2.QueueResponse.SerializeToString, + ), + 'Dequeue': grpc.unary_unary_rpc_method_handler( + servicer.Dequeue, + request_deserializer=order__queue__pb2.DequeueRequest.FromString, + response_serializer=order__queue__pb2.DequeueResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'order_queue.OrderQueueService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('order_queue.OrderQueueService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class OrderQueueService(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def Enqueue(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/order_queue.OrderQueueService/Enqueue', + order__queue__pb2.EnqueueRequest.SerializeToString, + order__queue__pb2.QueueResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Dequeue(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/order_queue.OrderQueueService/Dequeue', + order__queue__pb2.DequeueRequest.SerializeToString, + order__queue__pb2.DequeueResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/utils/pb/payment_service/__init__.py b/utils/pb/payment_service/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/pb/payment_service/payment.proto b/utils/pb/payment_service/payment.proto new file mode 100644 index 000000000..199924f87 --- /dev/null +++ b/utils/pb/payment_service/payment.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package payment_service; + +// Payment service: 2PC participant. For the demo it never actually charges +// a card - it just logs the call and returns VoteCommit on Prepare. +service PaymentService { + rpc Prepare (PaymentPrepareRequest) returns (PaymentPrepareResponse); + rpc Commit (PaymentCommitRequest) returns (PaymentCommitResponse); + rpc Abort (PaymentAbortRequest) returns (PaymentAbortResponse); +} + +message PaymentPrepareRequest { + string order_id = 1; + double amount = 2; + string user_name = 3; +} + +message PaymentPrepareResponse { + bool vote_commit = 1; + string message = 2; +} + +message PaymentCommitRequest { + string order_id = 1; +} + +message PaymentCommitResponse { + bool success = 1; + string message = 2; +} + +message PaymentAbortRequest { + string order_id = 1; +} + +message PaymentAbortResponse { + bool success = 1; + string message = 2; +} diff --git a/utils/pb/payment_service/payment_pb2.py b/utils/pb/payment_service/payment_pb2.py new file mode 100644 index 000000000..3e6d9ce71 --- /dev/null +++ b/utils/pb/payment_service/payment_pb2.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: payment.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'payment.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rpayment.proto\x12\x0fpayment_service\"L\n\x15PaymentPrepareRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x01\x12\x11\n\tuser_name\x18\x03 \x01(\t\">\n\x16PaymentPrepareResponse\x12\x13\n\x0bvote_commit\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"(\n\x14PaymentCommitRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\"9\n\x15PaymentCommitResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"\'\n\x13PaymentAbortRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\"8\n\x14PaymentAbortResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t2\x9b\x02\n\x0ePaymentService\x12Z\n\x07Prepare\x12&.payment_service.PaymentPrepareRequest\x1a\'.payment_service.PaymentPrepareResponse\x12W\n\x06\x43ommit\x12%.payment_service.PaymentCommitRequest\x1a&.payment_service.PaymentCommitResponse\x12T\n\x05\x41\x62ort\x12$.payment_service.PaymentAbortRequest\x1a%.payment_service.PaymentAbortResponseb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'payment_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_PAYMENTPREPAREREQUEST']._serialized_start=34 + _globals['_PAYMENTPREPAREREQUEST']._serialized_end=110 + _globals['_PAYMENTPREPARERESPONSE']._serialized_start=112 + _globals['_PAYMENTPREPARERESPONSE']._serialized_end=174 + _globals['_PAYMENTCOMMITREQUEST']._serialized_start=176 + _globals['_PAYMENTCOMMITREQUEST']._serialized_end=216 + _globals['_PAYMENTCOMMITRESPONSE']._serialized_start=218 + _globals['_PAYMENTCOMMITRESPONSE']._serialized_end=275 + _globals['_PAYMENTABORTREQUEST']._serialized_start=277 + _globals['_PAYMENTABORTREQUEST']._serialized_end=316 + _globals['_PAYMENTABORTRESPONSE']._serialized_start=318 + _globals['_PAYMENTABORTRESPONSE']._serialized_end=374 + _globals['_PAYMENTSERVICE']._serialized_start=377 + _globals['_PAYMENTSERVICE']._serialized_end=660 +# @@protoc_insertion_point(module_scope) diff --git a/utils/pb/payment_service/payment_pb2.pyi b/utils/pb/payment_service/payment_pb2.pyi new file mode 100644 index 000000000..68806210f --- /dev/null +++ b/utils/pb/payment_service/payment_pb2.pyi @@ -0,0 +1,51 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class PaymentPrepareRequest(_message.Message): + __slots__ = ("order_id", "amount", "user_name") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + AMOUNT_FIELD_NUMBER: _ClassVar[int] + USER_NAME_FIELD_NUMBER: _ClassVar[int] + order_id: str + amount: float + user_name: str + def __init__(self, order_id: _Optional[str] = ..., amount: _Optional[float] = ..., user_name: _Optional[str] = ...) -> None: ... + +class PaymentPrepareResponse(_message.Message): + __slots__ = ("vote_commit", "message") + VOTE_COMMIT_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + vote_commit: bool + message: str + def __init__(self, vote_commit: bool = ..., message: _Optional[str] = ...) -> None: ... + +class PaymentCommitRequest(_message.Message): + __slots__ = ("order_id",) + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + order_id: str + def __init__(self, order_id: _Optional[str] = ...) -> None: ... + +class PaymentCommitResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class PaymentAbortRequest(_message.Message): + __slots__ = ("order_id",) + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + order_id: str + def __init__(self, order_id: _Optional[str] = ...) -> None: ... + +class PaymentAbortResponse(_message.Message): + __slots__ = ("success", "message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ... diff --git a/utils/pb/payment_service/payment_pb2_grpc.py b/utils/pb/payment_service/payment_pb2_grpc.py new file mode 100644 index 000000000..99e9020f6 --- /dev/null +++ b/utils/pb/payment_service/payment_pb2_grpc.py @@ -0,0 +1,189 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import payment_pb2 as payment__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in payment_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class PaymentServiceStub(object): + """Payment service: 2PC participant. For the demo it never actually charges + a card - it just logs the call and returns VoteCommit on Prepare. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Prepare = channel.unary_unary( + '/payment_service.PaymentService/Prepare', + request_serializer=payment__pb2.PaymentPrepareRequest.SerializeToString, + response_deserializer=payment__pb2.PaymentPrepareResponse.FromString, + _registered_method=True) + self.Commit = channel.unary_unary( + '/payment_service.PaymentService/Commit', + request_serializer=payment__pb2.PaymentCommitRequest.SerializeToString, + response_deserializer=payment__pb2.PaymentCommitResponse.FromString, + _registered_method=True) + self.Abort = channel.unary_unary( + '/payment_service.PaymentService/Abort', + request_serializer=payment__pb2.PaymentAbortRequest.SerializeToString, + response_deserializer=payment__pb2.PaymentAbortResponse.FromString, + _registered_method=True) + + +class PaymentServiceServicer(object): + """Payment service: 2PC participant. For the demo it never actually charges + a card - it just logs the call and returns VoteCommit on Prepare. + """ + + def Prepare(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Commit(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Abort(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_PaymentServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Prepare': grpc.unary_unary_rpc_method_handler( + servicer.Prepare, + request_deserializer=payment__pb2.PaymentPrepareRequest.FromString, + response_serializer=payment__pb2.PaymentPrepareResponse.SerializeToString, + ), + 'Commit': grpc.unary_unary_rpc_method_handler( + servicer.Commit, + request_deserializer=payment__pb2.PaymentCommitRequest.FromString, + response_serializer=payment__pb2.PaymentCommitResponse.SerializeToString, + ), + 'Abort': grpc.unary_unary_rpc_method_handler( + servicer.Abort, + request_deserializer=payment__pb2.PaymentAbortRequest.FromString, + response_serializer=payment__pb2.PaymentAbortResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'payment_service.PaymentService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('payment_service.PaymentService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class PaymentService(object): + """Payment service: 2PC participant. For the demo it never actually charges + a card - it just logs the call and returns VoteCommit on Prepare. + """ + + @staticmethod + def Prepare(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/payment_service.PaymentService/Prepare', + payment__pb2.PaymentPrepareRequest.SerializeToString, + payment__pb2.PaymentPrepareResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Commit(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/payment_service.PaymentService/Commit', + payment__pb2.PaymentCommitRequest.SerializeToString, + payment__pb2.PaymentCommitResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Abort(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/payment_service.PaymentService/Abort', + payment__pb2.PaymentAbortRequest.SerializeToString, + payment__pb2.PaymentAbortResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/utils/pb/suggestions/__init__.py b/utils/pb/suggestions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/pb/suggestions/suggestions.proto b/utils/pb/suggestions/suggestions.proto new file mode 100644 index 000000000..80b9b2e2f --- /dev/null +++ b/utils/pb/suggestions/suggestions.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +package suggestions; + +service SuggestionsService { + rpc InitOrder (InitOrderRequest) returns (EventResponse); + + rpc PrecomputeSuggestions (EventRequest) returns (EventResponse); + rpc FinalizeSuggestions (EventRequest) returns (SuggestionsEventResponse); + + // Receive a forwarded vector clock from another microservice. + rpc ForwardVC (VCForward) returns (EventResponse); + + // Block until the full pipeline completes for this order, then return the result. + rpc AwaitPipelineResult (PipelineResultRequest) returns (PipelineResultResponse); + + rpc ClearOrder (ClearOrderRequest) returns (EventResponse); +} + +message VectorClock { + repeated int32 values = 1; +} + +message OrderItem { + string title = 1; + int32 quantity = 2; +} + +message OrderData { + string order_id = 1; + string user_name = 2; + string user_contact = 3; + string card_number = 4; + string expiration_date = 5; + string cvv = 6; + int32 item_count = 7; + bool terms_accepted = 8; + repeated OrderItem items = 9; +} + +message InitOrderRequest { + OrderData order = 1; +} + +message EventRequest { + string order_id = 1; + VectorClock vc = 2; +} + +message EventResponse { + bool success = 1; + string message = 2; + VectorClock vc = 3; +} + +message SuggestedBook { + string bookId = 1; + string title = 2; + string author = 3; +} + +message SuggestionsEventResponse { + bool success = 1; + string message = 2; + VectorClock vc = 3; + repeated SuggestedBook books = 4; +} + +message VCForward { + string order_id = 1; + string source_event = 2; + VectorClock vc = 3; + bool success = 4; + string message = 5; +} + +message PipelineResultRequest { + string order_id = 1; +} + +message PipelineResultResponse { + bool success = 1; + string message = 2; + VectorClock vc = 3; + repeated SuggestedBook books = 4; +} + +message ClearOrderRequest { + string order_id = 1; + VectorClock final_vc = 2; +} \ No newline at end of file diff --git a/utils/pb/suggestions/suggestions_pb2.py b/utils/pb/suggestions/suggestions_pb2.py new file mode 100644 index 000000000..bd29b44bc --- /dev/null +++ b/utils/pb/suggestions/suggestions_pb2.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: suggestions.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'suggestions.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11suggestions.proto\x12\x0bsuggestions\"\x1d\n\x0bVectorClock\x12\x0e\n\x06values\x18\x01 \x03(\x05\",\n\tOrderItem\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\"\xd4\x01\n\tOrderData\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x11\n\tuser_name\x18\x02 \x01(\t\x12\x14\n\x0cuser_contact\x18\x03 \x01(\t\x12\x13\n\x0b\x63\x61rd_number\x18\x04 \x01(\t\x12\x17\n\x0f\x65xpiration_date\x18\x05 \x01(\t\x12\x0b\n\x03\x63vv\x18\x06 \x01(\t\x12\x12\n\nitem_count\x18\x07 \x01(\x05\x12\x16\n\x0eterms_accepted\x18\x08 \x01(\x08\x12%\n\x05items\x18\t \x03(\x0b\x32\x16.suggestions.OrderItem\"9\n\x10InitOrderRequest\x12%\n\x05order\x18\x01 \x01(\x0b\x32\x16.suggestions.OrderData\"F\n\x0c\x45ventRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12$\n\x02vc\x18\x02 \x01(\x0b\x32\x18.suggestions.VectorClock\"W\n\rEventResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\x12$\n\x02vc\x18\x03 \x01(\x0b\x32\x18.suggestions.VectorClock\">\n\rSuggestedBook\x12\x0e\n\x06\x62ookId\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\"\x8d\x01\n\x18SuggestionsEventResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\x12$\n\x02vc\x18\x03 \x01(\x0b\x32\x18.suggestions.VectorClock\x12)\n\x05\x62ooks\x18\x04 \x03(\x0b\x32\x1a.suggestions.SuggestedBook\"{\n\tVCForward\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x14\n\x0csource_event\x18\x02 \x01(\t\x12$\n\x02vc\x18\x03 \x01(\x0b\x32\x18.suggestions.VectorClock\x12\x0f\n\x07success\x18\x04 \x01(\x08\x12\x0f\n\x07message\x18\x05 \x01(\t\")\n\x15PipelineResultRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\"\x8b\x01\n\x16PipelineResultResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\x12$\n\x02vc\x18\x03 \x01(\x0b\x32\x18.suggestions.VectorClock\x12)\n\x05\x62ooks\x18\x04 \x03(\x0b\x32\x1a.suggestions.SuggestedBook\"Q\n\x11\x43learOrderRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12*\n\x08\x66inal_vc\x18\x02 \x01(\x0b\x32\x18.suggestions.VectorClock2\xf0\x03\n\x12SuggestionsService\x12\x46\n\tInitOrder\x12\x1d.suggestions.InitOrderRequest\x1a\x1a.suggestions.EventResponse\x12N\n\x15PrecomputeSuggestions\x12\x19.suggestions.EventRequest\x1a\x1a.suggestions.EventResponse\x12W\n\x13\x46inalizeSuggestions\x12\x19.suggestions.EventRequest\x1a%.suggestions.SuggestionsEventResponse\x12?\n\tForwardVC\x12\x16.suggestions.VCForward\x1a\x1a.suggestions.EventResponse\x12^\n\x13\x41waitPipelineResult\x12\".suggestions.PipelineResultRequest\x1a#.suggestions.PipelineResultResponse\x12H\n\nClearOrder\x12\x1e.suggestions.ClearOrderRequest\x1a\x1a.suggestions.EventResponseb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'suggestions_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_VECTORCLOCK']._serialized_start=34 + _globals['_VECTORCLOCK']._serialized_end=63 + _globals['_ORDERITEM']._serialized_start=65 + _globals['_ORDERITEM']._serialized_end=109 + _globals['_ORDERDATA']._serialized_start=112 + _globals['_ORDERDATA']._serialized_end=324 + _globals['_INITORDERREQUEST']._serialized_start=326 + _globals['_INITORDERREQUEST']._serialized_end=383 + _globals['_EVENTREQUEST']._serialized_start=385 + _globals['_EVENTREQUEST']._serialized_end=455 + _globals['_EVENTRESPONSE']._serialized_start=457 + _globals['_EVENTRESPONSE']._serialized_end=544 + _globals['_SUGGESTEDBOOK']._serialized_start=546 + _globals['_SUGGESTEDBOOK']._serialized_end=608 + _globals['_SUGGESTIONSEVENTRESPONSE']._serialized_start=611 + _globals['_SUGGESTIONSEVENTRESPONSE']._serialized_end=752 + _globals['_VCFORWARD']._serialized_start=754 + _globals['_VCFORWARD']._serialized_end=877 + _globals['_PIPELINERESULTREQUEST']._serialized_start=879 + _globals['_PIPELINERESULTREQUEST']._serialized_end=920 + _globals['_PIPELINERESULTRESPONSE']._serialized_start=923 + _globals['_PIPELINERESULTRESPONSE']._serialized_end=1062 + _globals['_CLEARORDERREQUEST']._serialized_start=1064 + _globals['_CLEARORDERREQUEST']._serialized_end=1145 + _globals['_SUGGESTIONSSERVICE']._serialized_start=1148 + _globals['_SUGGESTIONSSERVICE']._serialized_end=1644 +# @@protoc_insertion_point(module_scope) diff --git a/utils/pb/suggestions/suggestions_pb2.pyi b/utils/pb/suggestions/suggestions_pb2.pyi new file mode 100644 index 000000000..4c90a0686 --- /dev/null +++ b/utils/pb/suggestions/suggestions_pb2.pyi @@ -0,0 +1,128 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class VectorClock(_message.Message): + __slots__ = ("values",) + VALUES_FIELD_NUMBER: _ClassVar[int] + values: _containers.RepeatedScalarFieldContainer[int] + def __init__(self, values: _Optional[_Iterable[int]] = ...) -> None: ... + +class OrderItem(_message.Message): + __slots__ = ("title", "quantity") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ...) -> None: ... + +class OrderData(_message.Message): + __slots__ = ("order_id", "user_name", "user_contact", "card_number", "expiration_date", "cvv", "item_count", "terms_accepted", "items") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + USER_NAME_FIELD_NUMBER: _ClassVar[int] + USER_CONTACT_FIELD_NUMBER: _ClassVar[int] + CARD_NUMBER_FIELD_NUMBER: _ClassVar[int] + EXPIRATION_DATE_FIELD_NUMBER: _ClassVar[int] + CVV_FIELD_NUMBER: _ClassVar[int] + ITEM_COUNT_FIELD_NUMBER: _ClassVar[int] + TERMS_ACCEPTED_FIELD_NUMBER: _ClassVar[int] + ITEMS_FIELD_NUMBER: _ClassVar[int] + order_id: str + user_name: str + user_contact: str + card_number: str + expiration_date: str + cvv: str + item_count: int + terms_accepted: bool + items: _containers.RepeatedCompositeFieldContainer[OrderItem] + def __init__(self, order_id: _Optional[str] = ..., user_name: _Optional[str] = ..., user_contact: _Optional[str] = ..., card_number: _Optional[str] = ..., expiration_date: _Optional[str] = ..., cvv: _Optional[str] = ..., item_count: _Optional[int] = ..., terms_accepted: bool = ..., items: _Optional[_Iterable[_Union[OrderItem, _Mapping]]] = ...) -> None: ... + +class InitOrderRequest(_message.Message): + __slots__ = ("order",) + ORDER_FIELD_NUMBER: _ClassVar[int] + order: OrderData + def __init__(self, order: _Optional[_Union[OrderData, _Mapping]] = ...) -> None: ... + +class EventRequest(_message.Message): + __slots__ = ("order_id", "vc") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + order_id: str + vc: VectorClock + def __init__(self, order_id: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... + +class EventResponse(_message.Message): + __slots__ = ("success", "message", "vc") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + vc: VectorClock + def __init__(self, success: bool = ..., message: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... + +class SuggestedBook(_message.Message): + __slots__ = ("bookId", "title", "author") + BOOKID_FIELD_NUMBER: _ClassVar[int] + TITLE_FIELD_NUMBER: _ClassVar[int] + AUTHOR_FIELD_NUMBER: _ClassVar[int] + bookId: str + title: str + author: str + def __init__(self, bookId: _Optional[str] = ..., title: _Optional[str] = ..., author: _Optional[str] = ...) -> None: ... + +class SuggestionsEventResponse(_message.Message): + __slots__ = ("success", "message", "vc", "books") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + BOOKS_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + vc: VectorClock + books: _containers.RepeatedCompositeFieldContainer[SuggestedBook] + def __init__(self, success: bool = ..., message: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ..., books: _Optional[_Iterable[_Union[SuggestedBook, _Mapping]]] = ...) -> None: ... + +class VCForward(_message.Message): + __slots__ = ("order_id", "source_event", "vc", "success", "message") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + SOURCE_EVENT_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + order_id: str + source_event: str + vc: VectorClock + success: bool + message: str + def __init__(self, order_id: _Optional[str] = ..., source_event: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ..., success: bool = ..., message: _Optional[str] = ...) -> None: ... + +class PipelineResultRequest(_message.Message): + __slots__ = ("order_id",) + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + order_id: str + def __init__(self, order_id: _Optional[str] = ...) -> None: ... + +class PipelineResultResponse(_message.Message): + __slots__ = ("success", "message", "vc", "books") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + BOOKS_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + vc: VectorClock + books: _containers.RepeatedCompositeFieldContainer[SuggestedBook] + def __init__(self, success: bool = ..., message: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ..., books: _Optional[_Iterable[_Union[SuggestedBook, _Mapping]]] = ...) -> None: ... + +class ClearOrderRequest(_message.Message): + __slots__ = ("order_id", "final_vc") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + FINAL_VC_FIELD_NUMBER: _ClassVar[int] + order_id: str + final_vc: VectorClock + def __init__(self, order_id: _Optional[str] = ..., final_vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... diff --git a/utils/pb/suggestions/suggestions_pb2_grpc.py b/utils/pb/suggestions/suggestions_pb2_grpc.py new file mode 100644 index 000000000..46f2a50b1 --- /dev/null +++ b/utils/pb/suggestions/suggestions_pb2_grpc.py @@ -0,0 +1,314 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import suggestions_pb2 as suggestions__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in suggestions_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class SuggestionsServiceStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.InitOrder = channel.unary_unary( + '/suggestions.SuggestionsService/InitOrder', + request_serializer=suggestions__pb2.InitOrderRequest.SerializeToString, + response_deserializer=suggestions__pb2.EventResponse.FromString, + _registered_method=True) + self.PrecomputeSuggestions = channel.unary_unary( + '/suggestions.SuggestionsService/PrecomputeSuggestions', + request_serializer=suggestions__pb2.EventRequest.SerializeToString, + response_deserializer=suggestions__pb2.EventResponse.FromString, + _registered_method=True) + self.FinalizeSuggestions = channel.unary_unary( + '/suggestions.SuggestionsService/FinalizeSuggestions', + request_serializer=suggestions__pb2.EventRequest.SerializeToString, + response_deserializer=suggestions__pb2.SuggestionsEventResponse.FromString, + _registered_method=True) + self.ForwardVC = channel.unary_unary( + '/suggestions.SuggestionsService/ForwardVC', + request_serializer=suggestions__pb2.VCForward.SerializeToString, + response_deserializer=suggestions__pb2.EventResponse.FromString, + _registered_method=True) + self.AwaitPipelineResult = channel.unary_unary( + '/suggestions.SuggestionsService/AwaitPipelineResult', + request_serializer=suggestions__pb2.PipelineResultRequest.SerializeToString, + response_deserializer=suggestions__pb2.PipelineResultResponse.FromString, + _registered_method=True) + self.ClearOrder = channel.unary_unary( + '/suggestions.SuggestionsService/ClearOrder', + request_serializer=suggestions__pb2.ClearOrderRequest.SerializeToString, + response_deserializer=suggestions__pb2.EventResponse.FromString, + _registered_method=True) + + +class SuggestionsServiceServicer(object): + """Missing associated documentation comment in .proto file.""" + + def InitOrder(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PrecomputeSuggestions(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def FinalizeSuggestions(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ForwardVC(self, request, context): + """Receive a forwarded vector clock from another microservice. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AwaitPipelineResult(self, request, context): + """Block until the full pipeline completes for this order, then return the result. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ClearOrder(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_SuggestionsServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'InitOrder': grpc.unary_unary_rpc_method_handler( + servicer.InitOrder, + request_deserializer=suggestions__pb2.InitOrderRequest.FromString, + response_serializer=suggestions__pb2.EventResponse.SerializeToString, + ), + 'PrecomputeSuggestions': grpc.unary_unary_rpc_method_handler( + servicer.PrecomputeSuggestions, + request_deserializer=suggestions__pb2.EventRequest.FromString, + response_serializer=suggestions__pb2.EventResponse.SerializeToString, + ), + 'FinalizeSuggestions': grpc.unary_unary_rpc_method_handler( + servicer.FinalizeSuggestions, + request_deserializer=suggestions__pb2.EventRequest.FromString, + response_serializer=suggestions__pb2.SuggestionsEventResponse.SerializeToString, + ), + 'ForwardVC': grpc.unary_unary_rpc_method_handler( + servicer.ForwardVC, + request_deserializer=suggestions__pb2.VCForward.FromString, + response_serializer=suggestions__pb2.EventResponse.SerializeToString, + ), + 'AwaitPipelineResult': grpc.unary_unary_rpc_method_handler( + servicer.AwaitPipelineResult, + request_deserializer=suggestions__pb2.PipelineResultRequest.FromString, + response_serializer=suggestions__pb2.PipelineResultResponse.SerializeToString, + ), + 'ClearOrder': grpc.unary_unary_rpc_method_handler( + servicer.ClearOrder, + request_deserializer=suggestions__pb2.ClearOrderRequest.FromString, + response_serializer=suggestions__pb2.EventResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'suggestions.SuggestionsService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('suggestions.SuggestionsService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class SuggestionsService(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def InitOrder(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/suggestions.SuggestionsService/InitOrder', + suggestions__pb2.InitOrderRequest.SerializeToString, + suggestions__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PrecomputeSuggestions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/suggestions.SuggestionsService/PrecomputeSuggestions', + suggestions__pb2.EventRequest.SerializeToString, + suggestions__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def FinalizeSuggestions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/suggestions.SuggestionsService/FinalizeSuggestions', + suggestions__pb2.EventRequest.SerializeToString, + suggestions__pb2.SuggestionsEventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ForwardVC(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/suggestions.SuggestionsService/ForwardVC', + suggestions__pb2.VCForward.SerializeToString, + suggestions__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AwaitPipelineResult(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/suggestions.SuggestionsService/AwaitPipelineResult', + suggestions__pb2.PipelineResultRequest.SerializeToString, + suggestions__pb2.PipelineResultResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ClearOrder(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/suggestions.SuggestionsService/ClearOrder', + suggestions__pb2.ClearOrderRequest.SerializeToString, + suggestions__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/utils/pb/transaction_verification/__init__.py b/utils/pb/transaction_verification/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/pb/transaction_verification/transaction_verification.proto b/utils/pb/transaction_verification/transaction_verification.proto new file mode 100644 index 000000000..3864e4ed7 --- /dev/null +++ b/utils/pb/transaction_verification/transaction_verification.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package transaction_verification; + +service TransactionVerificationService { + rpc InitOrder (InitOrderRequest) returns (EventResponse); + + rpc ValidateItems (EventRequest) returns (EventResponse); + rpc ValidateUserData (EventRequest) returns (EventResponse); + rpc ValidateCardFormat (EventRequest) returns (EventResponse); + + rpc ClearOrder (ClearOrderRequest) returns (EventResponse); +} + +message VectorClock { + repeated int32 values = 1; +} + +message OrderItem { + string title = 1; + int32 quantity = 2; +} + +message OrderData { + string order_id = 1; + string user_name = 2; + string user_contact = 3; + string card_number = 4; + string expiration_date = 5; + string cvv = 6; + int32 item_count = 7; + bool terms_accepted = 8; + repeated OrderItem items = 9; +} + +message InitOrderRequest { + OrderData order = 1; +} + +message EventRequest { + string order_id = 1; + VectorClock vc = 2; +} + +message EventResponse { + bool success = 1; + string message = 2; + VectorClock vc = 3; +} + +message ClearOrderRequest { + string order_id = 1; + VectorClock final_vc = 2; +} \ No newline at end of file diff --git a/utils/pb/transaction_verification/transaction_verification_pb2.py b/utils/pb/transaction_verification/transaction_verification_pb2.py new file mode 100644 index 000000000..cd045b8f2 --- /dev/null +++ b/utils/pb/transaction_verification/transaction_verification_pb2.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: transaction_verification.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'transaction_verification.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1etransaction_verification.proto\x12\x18transaction_verification\"\x1d\n\x0bVectorClock\x12\x0e\n\x06values\x18\x01 \x03(\x05\",\n\tOrderItem\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08quantity\x18\x02 \x01(\x05\"\xe1\x01\n\tOrderData\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x11\n\tuser_name\x18\x02 \x01(\t\x12\x14\n\x0cuser_contact\x18\x03 \x01(\t\x12\x13\n\x0b\x63\x61rd_number\x18\x04 \x01(\t\x12\x17\n\x0f\x65xpiration_date\x18\x05 \x01(\t\x12\x0b\n\x03\x63vv\x18\x06 \x01(\t\x12\x12\n\nitem_count\x18\x07 \x01(\x05\x12\x16\n\x0eterms_accepted\x18\x08 \x01(\x08\x12\x32\n\x05items\x18\t \x03(\x0b\x32#.transaction_verification.OrderItem\"F\n\x10InitOrderRequest\x12\x32\n\x05order\x18\x01 \x01(\x0b\x32#.transaction_verification.OrderData\"S\n\x0c\x45ventRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x31\n\x02vc\x18\x02 \x01(\x0b\x32%.transaction_verification.VectorClock\"d\n\rEventResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x31\n\x02vc\x18\x03 \x01(\x0b\x32%.transaction_verification.VectorClock\"^\n\x11\x43learOrderRequest\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x37\n\x08\x66inal_vc\x18\x02 \x01(\x0b\x32%.transaction_verification.VectorClock2\x94\x04\n\x1eTransactionVerificationService\x12`\n\tInitOrder\x12*.transaction_verification.InitOrderRequest\x1a\'.transaction_verification.EventResponse\x12`\n\rValidateItems\x12&.transaction_verification.EventRequest\x1a\'.transaction_verification.EventResponse\x12\x63\n\x10ValidateUserData\x12&.transaction_verification.EventRequest\x1a\'.transaction_verification.EventResponse\x12\x65\n\x12ValidateCardFormat\x12&.transaction_verification.EventRequest\x1a\'.transaction_verification.EventResponse\x12\x62\n\nClearOrder\x12+.transaction_verification.ClearOrderRequest\x1a\'.transaction_verification.EventResponseb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'transaction_verification_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_VECTORCLOCK']._serialized_start=60 + _globals['_VECTORCLOCK']._serialized_end=89 + _globals['_ORDERITEM']._serialized_start=91 + _globals['_ORDERITEM']._serialized_end=135 + _globals['_ORDERDATA']._serialized_start=138 + _globals['_ORDERDATA']._serialized_end=363 + _globals['_INITORDERREQUEST']._serialized_start=365 + _globals['_INITORDERREQUEST']._serialized_end=435 + _globals['_EVENTREQUEST']._serialized_start=437 + _globals['_EVENTREQUEST']._serialized_end=520 + _globals['_EVENTRESPONSE']._serialized_start=522 + _globals['_EVENTRESPONSE']._serialized_end=622 + _globals['_CLEARORDERREQUEST']._serialized_start=624 + _globals['_CLEARORDERREQUEST']._serialized_end=718 + _globals['_TRANSACTIONVERIFICATIONSERVICE']._serialized_start=721 + _globals['_TRANSACTIONVERIFICATIONSERVICE']._serialized_end=1253 +# @@protoc_insertion_point(module_scope) diff --git a/utils/pb/transaction_verification/transaction_verification_pb2.pyi b/utils/pb/transaction_verification/transaction_verification_pb2.pyi new file mode 100644 index 000000000..0411bd824 --- /dev/null +++ b/utils/pb/transaction_verification/transaction_verification_pb2.pyi @@ -0,0 +1,74 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class VectorClock(_message.Message): + __slots__ = ("values",) + VALUES_FIELD_NUMBER: _ClassVar[int] + values: _containers.RepeatedScalarFieldContainer[int] + def __init__(self, values: _Optional[_Iterable[int]] = ...) -> None: ... + +class OrderItem(_message.Message): + __slots__ = ("title", "quantity") + TITLE_FIELD_NUMBER: _ClassVar[int] + QUANTITY_FIELD_NUMBER: _ClassVar[int] + title: str + quantity: int + def __init__(self, title: _Optional[str] = ..., quantity: _Optional[int] = ...) -> None: ... + +class OrderData(_message.Message): + __slots__ = ("order_id", "user_name", "user_contact", "card_number", "expiration_date", "cvv", "item_count", "terms_accepted", "items") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + USER_NAME_FIELD_NUMBER: _ClassVar[int] + USER_CONTACT_FIELD_NUMBER: _ClassVar[int] + CARD_NUMBER_FIELD_NUMBER: _ClassVar[int] + EXPIRATION_DATE_FIELD_NUMBER: _ClassVar[int] + CVV_FIELD_NUMBER: _ClassVar[int] + ITEM_COUNT_FIELD_NUMBER: _ClassVar[int] + TERMS_ACCEPTED_FIELD_NUMBER: _ClassVar[int] + ITEMS_FIELD_NUMBER: _ClassVar[int] + order_id: str + user_name: str + user_contact: str + card_number: str + expiration_date: str + cvv: str + item_count: int + terms_accepted: bool + items: _containers.RepeatedCompositeFieldContainer[OrderItem] + def __init__(self, order_id: _Optional[str] = ..., user_name: _Optional[str] = ..., user_contact: _Optional[str] = ..., card_number: _Optional[str] = ..., expiration_date: _Optional[str] = ..., cvv: _Optional[str] = ..., item_count: _Optional[int] = ..., terms_accepted: bool = ..., items: _Optional[_Iterable[_Union[OrderItem, _Mapping]]] = ...) -> None: ... + +class InitOrderRequest(_message.Message): + __slots__ = ("order",) + ORDER_FIELD_NUMBER: _ClassVar[int] + order: OrderData + def __init__(self, order: _Optional[_Union[OrderData, _Mapping]] = ...) -> None: ... + +class EventRequest(_message.Message): + __slots__ = ("order_id", "vc") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + order_id: str + vc: VectorClock + def __init__(self, order_id: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... + +class EventResponse(_message.Message): + __slots__ = ("success", "message", "vc") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + VC_FIELD_NUMBER: _ClassVar[int] + success: bool + message: str + vc: VectorClock + def __init__(self, success: bool = ..., message: _Optional[str] = ..., vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... + +class ClearOrderRequest(_message.Message): + __slots__ = ("order_id", "final_vc") + ORDER_ID_FIELD_NUMBER: _ClassVar[int] + FINAL_VC_FIELD_NUMBER: _ClassVar[int] + order_id: str + final_vc: VectorClock + def __init__(self, order_id: _Optional[str] = ..., final_vc: _Optional[_Union[VectorClock, _Mapping]] = ...) -> None: ... diff --git a/utils/pb/transaction_verification/transaction_verification_pb2_grpc.py b/utils/pb/transaction_verification/transaction_verification_pb2_grpc.py new file mode 100644 index 000000000..aa188d87d --- /dev/null +++ b/utils/pb/transaction_verification/transaction_verification_pb2_grpc.py @@ -0,0 +1,269 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import transaction_verification_pb2 as transaction__verification__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in transaction_verification_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class TransactionVerificationServiceStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.InitOrder = channel.unary_unary( + '/transaction_verification.TransactionVerificationService/InitOrder', + request_serializer=transaction__verification__pb2.InitOrderRequest.SerializeToString, + response_deserializer=transaction__verification__pb2.EventResponse.FromString, + _registered_method=True) + self.ValidateItems = channel.unary_unary( + '/transaction_verification.TransactionVerificationService/ValidateItems', + request_serializer=transaction__verification__pb2.EventRequest.SerializeToString, + response_deserializer=transaction__verification__pb2.EventResponse.FromString, + _registered_method=True) + self.ValidateUserData = channel.unary_unary( + '/transaction_verification.TransactionVerificationService/ValidateUserData', + request_serializer=transaction__verification__pb2.EventRequest.SerializeToString, + response_deserializer=transaction__verification__pb2.EventResponse.FromString, + _registered_method=True) + self.ValidateCardFormat = channel.unary_unary( + '/transaction_verification.TransactionVerificationService/ValidateCardFormat', + request_serializer=transaction__verification__pb2.EventRequest.SerializeToString, + response_deserializer=transaction__verification__pb2.EventResponse.FromString, + _registered_method=True) + self.ClearOrder = channel.unary_unary( + '/transaction_verification.TransactionVerificationService/ClearOrder', + request_serializer=transaction__verification__pb2.ClearOrderRequest.SerializeToString, + response_deserializer=transaction__verification__pb2.EventResponse.FromString, + _registered_method=True) + + +class TransactionVerificationServiceServicer(object): + """Missing associated documentation comment in .proto file.""" + + def InitOrder(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ValidateItems(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ValidateUserData(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ValidateCardFormat(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ClearOrder(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_TransactionVerificationServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'InitOrder': grpc.unary_unary_rpc_method_handler( + servicer.InitOrder, + request_deserializer=transaction__verification__pb2.InitOrderRequest.FromString, + response_serializer=transaction__verification__pb2.EventResponse.SerializeToString, + ), + 'ValidateItems': grpc.unary_unary_rpc_method_handler( + servicer.ValidateItems, + request_deserializer=transaction__verification__pb2.EventRequest.FromString, + response_serializer=transaction__verification__pb2.EventResponse.SerializeToString, + ), + 'ValidateUserData': grpc.unary_unary_rpc_method_handler( + servicer.ValidateUserData, + request_deserializer=transaction__verification__pb2.EventRequest.FromString, + response_serializer=transaction__verification__pb2.EventResponse.SerializeToString, + ), + 'ValidateCardFormat': grpc.unary_unary_rpc_method_handler( + servicer.ValidateCardFormat, + request_deserializer=transaction__verification__pb2.EventRequest.FromString, + response_serializer=transaction__verification__pb2.EventResponse.SerializeToString, + ), + 'ClearOrder': grpc.unary_unary_rpc_method_handler( + servicer.ClearOrder, + request_deserializer=transaction__verification__pb2.ClearOrderRequest.FromString, + response_serializer=transaction__verification__pb2.EventResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'transaction_verification.TransactionVerificationService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('transaction_verification.TransactionVerificationService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class TransactionVerificationService(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def InitOrder(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transaction_verification.TransactionVerificationService/InitOrder', + transaction__verification__pb2.InitOrderRequest.SerializeToString, + transaction__verification__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ValidateItems(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transaction_verification.TransactionVerificationService/ValidateItems', + transaction__verification__pb2.EventRequest.SerializeToString, + transaction__verification__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ValidateUserData(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transaction_verification.TransactionVerificationService/ValidateUserData', + transaction__verification__pb2.EventRequest.SerializeToString, + transaction__verification__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ValidateCardFormat(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transaction_verification.TransactionVerificationService/ValidateCardFormat', + transaction__verification__pb2.EventRequest.SerializeToString, + transaction__verification__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ClearOrder(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transaction_verification.TransactionVerificationService/ClearOrder', + transaction__verification__pb2.ClearOrderRequest.SerializeToString, + transaction__verification__pb2.EventResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True)