|
| 1 | +package session |
| 2 | + |
| 3 | +import ( |
| 4 | + "crypto/sha256" |
| 5 | + "encoding/hex" |
| 6 | + "fmt" |
| 7 | + "strings" |
| 8 | + "testing" |
| 9 | +) |
| 10 | + |
| 11 | +// TestMigrationCatalogIsContentPinned is the append-only enforcement |
| 12 | +// reviewers asked for on PR #2646: "while it looks ok in this context, |
| 13 | +// it's always dangerous to update an existing migration. A new |
| 14 | +// migration would be cheap." Once a migration has shipped, its |
| 15 | +// (ID, Name, Description, UpSQL, DownSQL) tuple is part of the on-disk |
| 16 | +// schema contract — editing any of those bytes after the fact means an |
| 17 | +// already-deployed database may run the new SQL even though the row in |
| 18 | +// schema_migrations still records the old name, leaving the schema in |
| 19 | +// a state no version of the code has ever produced. |
| 20 | +// |
| 21 | +// The mechanism is a content hash: every migration's textual fields |
| 22 | +// are SHA-256'd into the digest below. Any edit to those fields — or |
| 23 | +// any insertion ahead of an existing entry — changes the digest, and |
| 24 | +// the test fails with a clear "did you mean to add migration N+1?" |
| 25 | +// message. To grow the catalogue: |
| 26 | +// |
| 27 | +// 1. Append the new migration to getAllMigrations() with the next ID; |
| 28 | +// 2. Run the test, copy the actual digest into wantDigest below. |
| 29 | +// |
| 30 | +// Step 2 is intentionally manual so that the audit trail lives in the |
| 31 | +// commit history: every catalogue change is one diff that touches both |
| 32 | +// the migrations slice and this constant. |
| 33 | +// |
| 34 | +// Order matters. The hash is computed over the catalogue in declared |
| 35 | +// order; reordering existing entries — even without changing their |
| 36 | +// content — flips the digest, which is what we want. |
| 37 | +func TestMigrationCatalogIsContentPinned(t *testing.T) { |
| 38 | + t.Parallel() |
| 39 | + |
| 40 | + got := digestMigrationCatalog(getAllMigrations()) |
| 41 | + |
| 42 | + const wantDigest = "0c6d5df46b970104cf49988ee3931e33643d5db85c68dd41b74b639d0094cec9" |
| 43 | + if got != wantDigest { |
| 44 | + t.Fatalf(`migration catalogue content has changed. |
| 45 | +
|
| 46 | +If you added a new migration: |
| 47 | + - confirm its ID is exactly the previous max + 1, and |
| 48 | + - update wantDigest in this test to: |
| 49 | + const wantDigest = %q |
| 50 | +
|
| 51 | +If you EDITED an existing migration entry, stop. Migrations are |
| 52 | +append-only once shipped (see PR #2646). Revert your edit and add a |
| 53 | +new migration instead. |
| 54 | +
|
| 55 | +Diff: |
| 56 | + got: %s |
| 57 | + want: %s |
| 58 | +`, got, got, wantDigest) |
| 59 | + } |
| 60 | +} |
| 61 | + |
| 62 | +// digestMigrationCatalog returns a deterministic SHA-256 of the |
| 63 | +// migration list's textual content, framed so that field-order changes |
| 64 | +// inside individual entries also alter the result. UpFunc is |
| 65 | +// intentionally excluded — function-pointer identity is not stable |
| 66 | +// across builds — so data migrations defined in Go fall outside the |
| 67 | +// append-only enforcement and must rely on change-review as the |
| 68 | +// safety net. |
| 69 | +// |
| 70 | +// Migrations that carry their logic in UpFunc (empty UpSQL/DownSQL) |
| 71 | +// and are therefore NOT covered by this digest: |
| 72 | +// |
| 73 | +// - 015_migrate_messages_to_session_items (migrateMessagesToSessionItems) |
| 74 | +// |
| 75 | +// Reviewers must scrutinise changes to those functions extra |
| 76 | +// carefully, and any future Go-based migration added to the catalogue |
| 77 | +// should be appended to the list above. |
| 78 | +func digestMigrationCatalog(ms []Migration) string { |
| 79 | + var b strings.Builder |
| 80 | + for _, m := range ms { |
| 81 | + fmt.Fprintf(&b, "id=%d\nname=%q\ndesc=%q\nup=%q\ndown=%q\n--\n", |
| 82 | + m.ID, m.Name, m.Description, m.UpSQL, m.DownSQL) |
| 83 | + } |
| 84 | + sum := sha256.Sum256([]byte(b.String())) |
| 85 | + return hex.EncodeToString(sum[:]) |
| 86 | +} |
| 87 | + |
| 88 | +// TestMigrationIDsAreSequential is the construction-side companion to |
| 89 | +// TestMigrationCatalogIsContentPinned: even before the digest can fail, |
| 90 | +// a missing or out-of-order ID is itself a sign someone reused or |
| 91 | +// inserted into the middle of the list. We assert IDs are 1..N with |
| 92 | +// no gaps and that each Name starts with its zero-padded ID — the |
| 93 | +// convention the catalogue already uses for every existing entry. |
| 94 | +func TestMigrationIDsAreSequential(t *testing.T) { |
| 95 | + t.Parallel() |
| 96 | + |
| 97 | + ms := getAllMigrations() |
| 98 | + for i, m := range ms { |
| 99 | + wantID := i + 1 |
| 100 | + if m.ID != wantID { |
| 101 | + t.Errorf("migration[%d]: ID = %d, want %d (catalogue must be 1..N with no gaps)", i, m.ID, wantID) |
| 102 | + } |
| 103 | + // Names look like "001_add_tools_approved_column" — three |
| 104 | + // digits, underscore, then a slug. We only check the prefix. |
| 105 | + // The prefix is derived from wantID (not m.ID) so that this |
| 106 | + // check stays an independent guard: if a middle migration is |
| 107 | + // deleted, m.ID would still match its own "NNN_" prefix and |
| 108 | + // silently mask the gap that the ID check above flagged. |
| 109 | + wantPrefix := fmt.Sprintf("%03d_", wantID) |
| 110 | + if !strings.HasPrefix(m.Name, wantPrefix) { |
| 111 | + t.Errorf("migration[%d]: Name = %q, want prefix %q", i, m.Name, wantPrefix) |
| 112 | + } |
| 113 | + } |
| 114 | +} |
0 commit comments