Compare commits
828 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| fe1de50ded | |||
| 2d243469ed | |||
| a23aa5e5e9 | |||
| 2286e092d9 | |||
|
28aa32ad8c
|
|||
| a9885f8b65 | |||
| 2fbe44fea1 | |||
| 80d08c1ba7 | |||
| d6866b9f72 | |||
| 3c1fc68016 | |||
| 817449afbc | |||
| 856423a221 | |||
|
3c43f1bd80
|
|||
| 4fd36c1330 | |||
| 9411a2dd7d | |||
| f1dc71163b | |||
|
08e84ac1dc
|
|||
| 3ea1ef520c | |||
| e30ab93a47 | |||
| 0eb59235c1 | |||
| 31022037ff | |||
| 085bf35d92 | |||
| 9d655e1a23 | |||
| eee1d45d8c | |||
| 782185981f | |||
| e3f156fe9e | |||
| 230876b523 | |||
| e64adc96ae | |||
| f534752e2c | |||
| 42f4c8014d | |||
| e098562e0c | |||
| 948ab167c1 | |||
| 404c4106bc | |||
| 593874ba52 | |||
| bcbdc4458e | |||
| 4b6637b027 | |||
| bc7729b702 | |||
| 942508e467 | |||
| 59e8a3c43d | |||
| d7358d8053 | |||
| 2350099e7f | |||
| ab5a8a9f6a | |||
| 369a0b050a | |||
| 3bf43210a1 | |||
| 148f8befe7 | |||
| 90b9e91d20 | |||
| 985bca2f58 | |||
| 3fce241596 | |||
| a0baa44320 | |||
| 60939c3159 | |||
| 6530408f15 | |||
| fd6694614e | |||
| 6143a96c75 | |||
|
817927cb7d
|
|||
|
e2c1803683
|
|||
| d79a10e910 | |||
| 48f0ec0bbe | |||
| b89b124a0e | |||
| d008303068 | |||
|
e328ef5a64
|
|||
| b2506188f3 | |||
| 16ce04ea86 | |||
| 1d0f82a851 | |||
| 7be533dc6c | |||
|
73eae98929
|
|||
| aa41f48b0e | |||
| bb0c020812 | |||
| f0c7415d88 | |||
| fbe180020c | |||
| 3eba214a72 | |||
| 134b571baa | |||
| a1a23d69cf | |||
| 6f1dda7be5 | |||
| d50827018a | |||
| 8a6163a921 | |||
| 054cfa1e52 | |||
| 7d25af472a | |||
| ec0d5dff74 | |||
| e3d19384a0 | |||
| 42adcb1df5 | |||
| e072ad685b | |||
| 4222eb1268 | |||
| ab3eafa331 | |||
| 86612d9e25 | |||
| abc5668d33 | |||
| 892fc29331 | |||
| bdb6e37b22 | |||
| 8d512e0290 | |||
| 3ab0a8e701 | |||
| 3bd3daccaf | |||
| 3d6016d7e2 | |||
| 31cc2e10b6 | |||
| 136623c04b | |||
| 12ff2fa1ba | |||
| 1aef6a7a31 | |||
| c02034d6b0 | |||
| dc4555a168 | |||
| 0571f4d986 | |||
| 3c194dc127 | |||
| f7ba0e3dc7 | |||
| 33667d4de1 | |||
| bbf82399e1 | |||
| f927d1c750 | |||
| ef273520b5 | |||
| c8e6287761 | |||
| 7052f0bfb1 | |||
| c6f35f324e | |||
| 08f0ee3374 | |||
| d3c7941de8 | |||
| b1a9021fee | |||
| ab32fddd2b | |||
| ded73a3065 | |||
| 4c571197b8 | |||
| 9f276d7420 | |||
| f56dfeafa7 | |||
| f6f3810ffb | |||
| 9d32174ae4 | |||
| 26073e744d | |||
| 6e38f3d5b5 | |||
| 9199be9da5 | |||
| e3dcb013b6 | |||
| 5982f3ac41 | |||
| eb23f2f62b | |||
| 1effc3937a | |||
|
b69de655e4
|
|||
| 73fdf6c896 | |||
| bf0953b520 | |||
| 95aaea7ecf | |||
| 81c3a7ff13 | |||
| e322f7f7ad | |||
| 4997c53968 | |||
| cfcd023a83 | |||
| 34e7a0b718 | |||
| 7d38c31370 | |||
| 968553b532 | |||
| 0067b42f92 | |||
| 48776b6afc | |||
| 03cf6be52c | |||
| 6b5e66b1be | |||
| 25c3bf2356 | |||
| 7e5bbf3baa | |||
| 8adae23068 | |||
|
0e8d0965b7
|
|||
| cfc5de1bb9 | |||
| 63567eaa8b | |||
| e38e7a2936 | |||
| 718585ebe8 | |||
| 6b3515ed14 | |||
| fee47b271a | |||
| 1da8439372 | |||
| 2de1324458 | |||
| 4673ecdd85 | |||
| d92f24b0a1 | |||
| f80ee9d391 | |||
| 3da293252d | |||
| 6ae6a4d6cf | |||
|
129cd8aad1
|
|||
| 8dd2e57c70 | |||
| 1914211b85 | |||
| ea8eb0a68c | |||
| e9d0e855af | |||
| 8790fd0d82 | |||
| a77a7f3a32 | |||
| fbe962a7b7 | |||
| b5bdcc9dbc | |||
|
fd1685867e
|
|||
|
114cbf89c5
|
|||
| 000ad8b4ad | |||
| 0820fb542f | |||
| f0d4285bee | |||
| e1c10f0537 | |||
| f8c593de3e | |||
| 870f29e59f | |||
| a246c236db | |||
| cebeba4461 | |||
| b77165f6c8 | |||
| 9e85ee1473 | |||
| c113eb920b | |||
| 90cc64ece9 | |||
| ea4df08beb | |||
|
ca7e063888
|
|||
| 7b9dc1456b | |||
|
49af5f0cb1
|
|||
| e347d74a39 | |||
|
ffcf41b85a
|
|||
| 335a9f3b54 | |||
| c0e790b684 | |||
| 3b47365f10 | |||
|
862060875b
|
|||
| 06aeedc3b0 | |||
| fce85782f0 | |||
| 9cd8218eb4 | |||
| 98ef62b144 | |||
| e0cdd2aa58 | |||
| e22e8b339c | |||
|
6404f7a497
|
|||
| 5dc5043d46 | |||
| bcca005256 | |||
|
a9dea19531
|
|||
|
130e92dc5f
|
|||
| c4112a005f | |||
| 549f6617df | |||
| a1b0f49aab | |||
|
4468903535
|
|||
| df054ca451 | |||
| 1e2236dc9e | |||
|
6ccd7f4f25
|
|||
| b1a46f9d4e | |||
|
47dbf827f2
|
|||
|
df44ddbb8e
|
|||
|
9368d77bc8
|
|||
|
4d18cf4175
|
|||
|
bb0c08be06
|
|||
| a9a47c1690 | |||
| de073ce2da | |||
| af045687ae | |||
| c90ee3c9b1 | |||
| 83e99e7d0a | |||
|
80daed081d
|
|||
| f6e4458efa | |||
| 11b9a46802 | |||
| d2324d27df | |||
| c496ed025e | |||
| 4773ada816 | |||
| 6447a299b3 | |||
| b8e9e0d632 | |||
| 3179bb7ae3 | |||
| 73f6fe31d9 | |||
| 87eab3a04e | |||
| e751d35e38 | |||
| 84e30c0771 | |||
| bdb3b80f4a | |||
| d08c9663fe | |||
| a17b8dd122 | |||
| ae8cf15b0a | |||
| 8fd2c1790b | |||
| fe4efa7b97 | |||
| 78599baa5b | |||
| 8ad47c2e54 | |||
| ecce66b579 | |||
| a26e66649a | |||
| b8b5951883 | |||
| f860d80a81 | |||
| d27331cef0 | |||
|
e343ff7538
|
|||
| 4fe45bf125 | |||
| 89e35c4ee7 | |||
| 0f077a53bb | |||
| 51fd889f6a | |||
| e13c9b1a28 | |||
| 576a530886 | |||
| aa9fea580f | |||
| ccb8a10f92 | |||
| d86beb8308 | |||
| 429cf6e66d | |||
| 2dd0ac9392 | |||
| 780244b165 | |||
|
540e90a577
|
|||
| 1b527bab74 | |||
| 5b64cd0165 | |||
| e2fb56f505 | |||
| 6bdbe36c7f | |||
| 2a1415bc35 | |||
| 8e53b1fbf4 | |||
|
2cf992c948
|
|||
| ca9e7c3aa0 | |||
| db762cb496 | |||
| c33885a0ab | |||
| 1135d77c35 | |||
| d1d14c1097 | |||
| 41075e06a3 | |||
| a7d4c01089 | |||
| 7c30a66144 | |||
| 98c17196c8 | |||
| 6635019035 | |||
| 1ae73e7203 | |||
| 91c02eb499 | |||
| 32fc6b2641 | |||
| ce86af4486 | |||
| 5258a68682 | |||
| 1d69be641c | |||
| 3c571a1dc3 | |||
| 4c16e293c0 | |||
| 18fb8da472 | |||
| c9fa0ecc2c | |||
| 35db2fc74e | |||
| e10ab9d75f | |||
| d6100bcb76 | |||
| 47d122aa8d | |||
| b089801216 | |||
| 3c071cb300 | |||
| 8fe56f6ce0 | |||
| c9aa447a9f | |||
| 575f97e8d1 | |||
| dde2a965ec | |||
| b6ae9826b2 | |||
| 787f388168 | |||
| c4b01dbe07 | |||
| 992a95ff8d | |||
| 0c31b8c2b9 | |||
| b75b61f724 | |||
| 0e76377865 | |||
|
52e58f8df8
|
|||
| 35ba7679c9 | |||
| 9d5dc75c2b | |||
| 83cb25a2bb | |||
|
e7a09e8322
|
|||
| 2eae446669 | |||
| 2e86f80727 | |||
| 1311a07dcb | |||
| cd080189a9 | |||
| 2d86c77c39 | |||
| eea7f86bbe | |||
| baccd12e63 | |||
| 7fe90ee9af | |||
| a3b9cae8eb | |||
| 769cbd3f14 | |||
| ecc4da28ff | |||
| 4a14f41324 | |||
| 3f8fdce292 | |||
| 335c419a44 | |||
| 82d2aa1812 | |||
| 24bbb39c3d | |||
| d98a20afff | |||
| 737b133b8b | |||
| 177f923fc2 | |||
| fd0c89dce9 | |||
| 1e58f8e0d5 | |||
| 53349071aa | |||
| efe9e0a5a0 | |||
| 406cb8e4d0 | |||
|
6e7ee0110b
|
|||
| 6eac3a7796 | |||
| 890a6fd50e | |||
| e1cf0d8cb3 | |||
| 63d70d7e35 | |||
| 35a454f8b1 | |||
| 6651553246 | |||
| 311ef3f530 | |||
| d8cce2fb05 | |||
| 309b9423a4 | |||
| 323145a076 | |||
| 28b4dc5572 | |||
| c1173c20ae | |||
| 2d12077016 | |||
| c38db83cd1 | |||
| 25bd438d05 | |||
| f4355d620f | |||
| 887ba69c5e | |||
| 8e6c7aade7 | |||
| 0f0f50111f | |||
| 81a895e5a6 | |||
|
0fab1d5098
|
|||
| 4fbfc0f42e | |||
| 0ba3706c12 | |||
| c44ac87b5e | |||
| deefcd7045 | |||
| 4933857351 | |||
| 18616e8346 | |||
| dd075afb8d | |||
| 003bd3cd50 | |||
| a68fb437dc | |||
| 8aa57e8f68 | |||
| 84e8764054 | |||
| c40bbad892 | |||
| c159c6d20e | |||
| ae0d796e93 | |||
| 1ec0f9a3a7 | |||
| 0dad651959 | |||
| 24fcc4e6a2 | |||
| 4c7406d97b | |||
| 51affc5a55 | |||
| 10871a9b32 | |||
| 411b51f895 | |||
| 6a478209ea | |||
| 043ca65698 | |||
| 8131042a1c | |||
| 2fb2c1947a | |||
| 1cf4faa17f | |||
| 8bb6cb7279 | |||
|
376ae41b4f
|
|||
| dc6e57e815 | |||
| f8c7de447a | |||
| 92050aa31f | |||
| e20829bb2b | |||
| 1918ec3da4 | |||
| 735c387c58 | |||
| 98e2f660a6 | |||
| 7a0159a33f | |||
|
b4447bb15e
|
|||
|
2948905005
|
|||
| 53141720ca | |||
|
e84df1db08
|
|||
| 9539e6bb1b | |||
| 291ef08ad7 | |||
| 98b84772df | |||
| 26278066b8 | |||
| 5bdacce71a | |||
|
0b4bbdeef0
|
|||
| 3c3c939447 | |||
|
93a12a2909
|
|||
| c36802570a | |||
| 5d64a3a45c | |||
| e55d3400e6 | |||
| 7da95e7566 | |||
| b8ea2690fc | |||
| 3689486fa8 | |||
| 1787815299 | |||
| 2886835d18 | |||
| 097e1274db | |||
| 640ede7de2 | |||
| 122c87dab4 | |||
| 4647d7ad1e | |||
| f2c73e8bf6 | |||
| ba7bbd082a | |||
| 2466d2a4ab | |||
| 86a61a1a64 | |||
| 75e85c0339 | |||
| d654ef1b81 | |||
|
55114c3d39
|
|||
| 0575c45d66 | |||
| d004745244 | |||
| 36ef6a85c2 | |||
| aa8363428f | |||
| 36a387040f | |||
| 5cf10efe15 | |||
| 73624e21d7 | |||
|
f53321368d
|
|||
|
59e0f17769
|
|||
| 8661c931a1 | |||
| f484bea8c4 | |||
| 8c4f579cef | |||
| 30ccbf9aab | |||
| 5973c1341d | |||
| 0285549431 | |||
|
3256819705
|
|||
| 578354ddeb | |||
| 12adc9bc48 | |||
| a44937e130 | |||
| 04a88bf310 | |||
| c1e5ee97ee | |||
| 66dd948181 | |||
| 6fc87e952b | |||
| dade16a10c | |||
| aa617ae11d | |||
|
a87b480345
|
|||
| 69e232cd7c | |||
| 5adfe0e711 | |||
| 7fa947a945 | |||
| a4552cb418 | |||
| c15371c236 | |||
|
aaa111dd20
|
|||
|
8e02bfb0a2
|
|||
| 1e34fe77eb | |||
| 30ca9091d2 | |||
|
4dd79e3d73
|
|||
| bdfbf6c22e | |||
| b6a281b5f2 | |||
| e8305441bb | |||
|
dd5c0f3dc0
|
|||
| f35be93cb6 | |||
| 3840c40c90 | |||
|
2ba10d763d
|
|||
| ba099eff51 | |||
| 2196f5a417 | |||
| 3e26aa0166 | |||
| fdf6402b7c | |||
| c6a5711dff | |||
| f82935e71a | |||
| b8053b6273 | |||
| 4086ddf73c | |||
| 8d5ff3863d | |||
| 19233af4a7 | |||
| b6ed3c7ad9 | |||
| 05eaa96d37 | |||
| 9b8f98c2d2 | |||
| d6a862ee94 | |||
| 574e6c8926 | |||
|
15eda6f0de
|
|||
| 46b624568e | |||
| 72bbf92265 | |||
| 313e43016b | |||
| 570a0114b3 | |||
|
a9c07bf4c2
|
|||
| f126c23b45 | |||
| 65dc35a5fc | |||
| b57080c9d0 | |||
| 19a826608c | |||
| 316d4a4c34 | |||
| 48873bf5ee | |||
| f9cce092b5 | |||
| ae8aa21388 | |||
| 7ba1d3ae08 | |||
| fd6e4a0cbd | |||
| 856b140538 | |||
| fdfbf823a6 | |||
| 697e6c3d75 | |||
| fdbb57e33a | |||
| 07057d7206 | |||
| d0afaa7815 | |||
|
ee378dc6a3
|
|||
|
7ffa9a3881
|
|||
|
683bf6bd85
|
|||
|
17a3edbc07
|
|||
|
318e211196
|
|||
| bf0b8f5935 | |||
| 1232ca62b2 | |||
| f6a1e47361 | |||
| f96e6e860b | |||
| 44cc6ccd16 | |||
| 6abc49a3fc | |||
| 7d7836fedf | |||
|
f62a8037bc
|
|||
| 08bba81ba0 | |||
| 881ff122f8 | |||
| 0d384e8a3f | |||
|
f49156f637
|
|||
| 62869e4a77 | |||
| ab2c55fb39 | |||
| ef271a88a9 | |||
|
14d3733bf5
|
|||
| 2cc4e379bc | |||
| d39427c9ff | |||
| 2cb2ba9754 | |||
| 53d5c21b77 | |||
| 2b09f6c3c5 | |||
| 9f2be264c3 | |||
| 31e8d8cd49 | |||
| b4e4c9e2e3 | |||
| 35bf4eb6dc | |||
| 7ddca97cce | |||
| 3a78d90c4b | |||
|
84b7d2bf74
|
|||
| 16008218a5 | |||
| 0908099d34 | |||
|
74823a8947
|
|||
| c5dcc6f87a | |||
| 8f2a88f405 | |||
| d88b6f2ee8 | |||
| 21e32b5b09 | |||
| cd08e429e8 | |||
| 73fdd8a2e7 | |||
| c73a6a2c1f | |||
| 22e0952012 | |||
| fe52ddbb3a | |||
| e229458c39 | |||
| c64f5ce1df | |||
| 7a95c98573 | |||
| 03ba8a774c | |||
| 23da66078a | |||
| 1e50301c3c | |||
| 8de129c925 | |||
| 660f07f3cd | |||
| fed6e512d3 | |||
| f1b7a36103 | |||
| 74ee419e56 | |||
| f8aa95ccd5 | |||
| e013762fd9 | |||
| f1ca04fb07 | |||
|
4be0518209
|
|||
| 649c297d5b | |||
| bc43aa3fae | |||
|
2861e9d067
|
|||
| cc668505e6 | |||
| 7af4548a8b | |||
| 8ce0fffb02 | |||
| 03cd2ab9a3 | |||
|
6bf0e2c12b
|
|||
| 6fc27915c9 | |||
| f18dbcae92 | |||
| c4ac7c6b6c | |||
| d8681ac484 | |||
| 75d9b9d719 | |||
| 09909ed26c | |||
| 68b80c51c1 | |||
| feec5b7bf2 | |||
|
b1c31afed3
|
|||
|
7fc195e44e
|
|||
| 8cb62f47b3 | |||
|
52ffdde1ca
|
|||
|
75909a3797
|
|||
| d605d8fa63 | |||
| 6809635518 | |||
| ffde98ffa7 | |||
| e14974623b | |||
| 1ac137a27d | |||
| b6fe4a34a3 | |||
| 8c269381d5 | |||
| 523be169ad | |||
| d3d197ab65 | |||
| 3992e6f61d | |||
| 92c7880271 | |||
| 1017c29527 | |||
| 1016a20fc3 | |||
| 9b388ef2eb | |||
| d8287c0797 | |||
| 1258c47ef5 | |||
| 01a6a79fd9 | |||
| dd7786d690 | |||
| d717fddec9 | |||
| cfa48a2b81 | |||
| 49cc4b392f | |||
| f9625ac4f5 | |||
| 91f232c54c | |||
| a97ed6018a | |||
| 9ccea01c4c | |||
| ad68feb8af | |||
|
9200aad878
|
|||
| 8cfa8a7e75 | |||
| 2720e14e8b | |||
| aed67aa0a3 | |||
| a7ce266a41 | |||
| 2235427f48 | |||
|
d07a714d3c
|
|||
| ce366061fc | |||
| 6200594ac6 | |||
| fcda9f1c46 | |||
| 6c505385e4 | |||
| 765a9abc04 | |||
| e8c1a56f40 | |||
| 06bb2d9ea8 | |||
|
e6152b379f
|
|||
| d5a171eb76 | |||
| 5472216ae8 | |||
| f364b7c8c7 | |||
| 9ceecd52f6 | |||
| 846614ca2f | |||
| 26614c9270 | |||
|
75307f53ca
|
|||
| bf2248c53c | |||
| 1f071316d1 | |||
| 694434219c | |||
| 456c749db7 | |||
| c28dbd5a61 | |||
| 4274303441 | |||
| b51ad6526f | |||
| bd41057595 | |||
| b091326684 | |||
| 114ce7f43d | |||
| 0eb67f6144 | |||
| 42d13ff3c0 | |||
| 9203cc3396 | |||
| 93ea2f1627 | |||
| 6bf3ee8348 | |||
| e5ad0ba9cf | |||
| 84ef5b1750 | |||
| 7ea24199c0 | |||
| 5cc4ed6f9d | |||
| ab7e4ed2cf | |||
| 1ae6af6163 | |||
|
c6748fc1ad
|
|||
| 903738da9f | |||
| e0c775cba1 | |||
| e55acc3c6f | |||
| 09066e17d2 | |||
|
1d1041c6bf
|
|||
| 0fd1a1ec2b | |||
| 03ab5a77c7 | |||
| a268493733 | |||
| 9f043af220 | |||
| 35f3d16da8 | |||
| ac87dcaf95 | |||
| 31b7497bc3 | |||
| aa7f44233b | |||
| f52983b305 | |||
| dd94ae193d | |||
| e7e09b70aa | |||
| 9393a199fa | |||
| c4e30881d6 | |||
| 07ca26768e | |||
|
a7491261a0
|
|||
| f36d5df9ad | |||
| 6152399901 | |||
| bb4972abf9 | |||
| a7fc4d9189 | |||
| d941c83bce | |||
| 4fef4d9130 | |||
| 69df8b4f77 | |||
| f41e2307cd | |||
| 5eb524e2eb | |||
| 6b8332293c | |||
| e897686b12 | |||
| 144d01bec5 | |||
| e90226aa77 | |||
| a0af1955ca | |||
| 6190438540 | |||
| 14489eed7f | |||
| 49e4486cac | |||
| 190e70723b | |||
| c887dc969f | |||
| d0c553dd9c | |||
| eed427e50b | |||
| bb90aec4c4 | |||
| 44d966cc0d | |||
| 9b52ac4d6b | |||
| ae74889f56 | |||
| 83886cf762 | |||
| 4b9d7f7b1f | |||
| 9c64a7adce | |||
| 14cab92b11 | |||
| 28a069a8bb | |||
| a07c304a6d | |||
| f6dee4b4af | |||
| 21fc644d39 | |||
| b4d4ce9031 | |||
| 038adeeddb | |||
| c5ec82b20a | |||
| 622fa3fca6 | |||
|
087ddaa7c1
|
|||
| 4b21d4366c | |||
|
27da6e122d
|
|||
|
4a8dc7d944
|
|||
|
df13983b09
|
|||
|
9ce480e745
|
|||
| 82d843f5ce | |||
| 9f11184d14 | |||
| 6cd3b935e4 | |||
| be878a3f2a | |||
| 8f9cf64e87 | |||
| bfcd75c5eb | |||
| 72704d8da4 | |||
| 4ae55c588d | |||
| b8c940c49a | |||
| 914f446924 | |||
| 9aa02a4de5 | |||
| f31c91468a | |||
| dc2e1e72f4 | |||
| 5eb0a261b7 | |||
| f841e383f4 | |||
| 542c44e02d | |||
| 5b6910386c | |||
| 17ae6953f8 | |||
| 9f388bddf4 | |||
| 86bc1801b8 | |||
| 04750fca3c | |||
| 35b5550061 | |||
| 7e84fe35eb | |||
| 2fd263332a | |||
| 25e4a8f253 | |||
| ace54c334a | |||
| 2c6f9937cd | |||
| 831f5b974e | |||
| 9ef81d8b49 | |||
| 8bd6310837 | |||
| 660f5d06c4 | |||
| 6387c29c10 | |||
| 9b351731d6 | |||
| d7bab5ca17 | |||
| 8e3670e4ce | |||
| 37cfb1568b | |||
| 7c9b7ee0ad | |||
| 70bb3eb24a | |||
| 25abefe04f | |||
| 627d88cb38 | |||
| fa2626ba28 | |||
| 92d33b078c | |||
| bed52ea25c | |||
| 1b1f7c7e03 | |||
| 5474d7ebfa | |||
| 16347b17b3 | |||
| 99fd174228 | |||
| ad87e668a9 | |||
| e2ab56ce1e | |||
| e6f9aedae7 | |||
| 8d62f987d9 | |||
| d34fb6472a | |||
| 9c4c28404e | |||
| 692cdb1328 | |||
| 4b0319d164 | |||
| 3cefbd2095 | |||
| 5d39cf3a40 | |||
| a18e53b6ea | |||
| 766b4de009 | |||
| 6c24d11ac2 | |||
| 916b9ed121 | |||
| 55f49c7500 | |||
| 873aaec785 | |||
| 3fd15e414e | |||
| ef5beccd4b | |||
| 25941a82f2 | |||
| 70ecb12234 | |||
| 06e71fac68 | |||
| 8685fcbfdc | |||
| 1728a363dd | |||
| bb8cecb3ce | |||
| 4063d2cb06 | |||
| b6de9ceb45 | |||
| 476ebab80a | |||
| c8bc2e2e9e | |||
| c82ebc9501 | |||
| 9475e9f686 | |||
| 8e6593da55 | |||
| 54289fc5bf | |||
| d52469c5bc | |||
| cb2b4ce867 | |||
| 0d97bce67e | |||
| a59083fc9b | |||
| c3bf827cac | |||
| d6c7ccb429 | |||
| 1ed6a2ad8f | |||
| 9bee6ab605 | |||
| e8837aa51c | |||
| 6ec0771741 | |||
| 82a768e989 | |||
| 4f29428d6f | |||
| 20ce86c7b7 | |||
| 283e45f34d | |||
| 5a108a6bbf | |||
| 9cc7d466ba | |||
| 640a98b4ca | |||
| 22855cec52 | |||
| 006e10b39f | |||
| 6b6a799696 | |||
| 129784cb47 | |||
| b976114489 | |||
| 28eb8fa9e1 | |||
| d4453319ee | |||
| 4646a8e3ab | |||
| ab3e0a3891 | |||
| 52beaf9cc6 | |||
| 24b5693f22 | |||
| 8db459e636 | |||
| 162c155002 | |||
| 56317eb04b | |||
| f611d9ffb8 | |||
| f44b62bf08 | |||
| 35a8907d81 | |||
| a5c3ac78de |
+1
-2
@@ -1,6 +1,5 @@
|
||||
.gitignore
|
||||
/.gitlab
|
||||
.gitlab-ci.yml
|
||||
/.gitea
|
||||
.graphqlconfig
|
||||
/exported
|
||||
/k8s
|
||||
|
||||
@@ -0,0 +1,94 @@
|
||||
name: schemas
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
deploy_prod:
|
||||
description: 'Deploy to production'
|
||||
required: false
|
||||
default: 'false'
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- name: Generate and format check
|
||||
run: |
|
||||
go install mvdan.cc/gofumpt@latest
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go generate ./...
|
||||
git diff --stat --exit-code
|
||||
- name: Run tests
|
||||
run: go test -race -coverprofile=coverage.txt ./...
|
||||
|
||||
vulnerabilities:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- name: Check vulnerabilities
|
||||
run: |
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
govulncheck ./...
|
||||
|
||||
check-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '24'
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- name: Check goreleaser config
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
with:
|
||||
version: '~> v2'
|
||||
args: check
|
||||
- name: Test release build
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
with:
|
||||
version: '~> v2'
|
||||
args: release --snapshot --clean
|
||||
|
||||
build:
|
||||
needs: [check, vulnerabilities, check-release]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILDTOOLS_CONTENT: ${{ secrets.BUILDTOOLS_CONTENT }}
|
||||
GITEA_REPOSITORY: ${{ gitea.repository }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: buildtool/setup-buildtools-action@v1
|
||||
- name: Build and push
|
||||
run: unset GITEA_TOKEN && build && push
|
||||
|
||||
deploy-prod:
|
||||
needs: build
|
||||
if: gitea.ref == 'refs/heads/main'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILDTOOLS_CONTENT: ${{ secrets.BUILDTOOLS_CONTENT }}
|
||||
GITEA_REPOSITORY: ${{ gitea.repository }}
|
||||
environment: prod
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: buildtool/setup-buildtools-action@v1
|
||||
- name: Deploy to production
|
||||
run: deploy prod
|
||||
@@ -0,0 +1,33 @@
|
||||
name: Goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
env:
|
||||
RELEASE_TOKEN_FILE: /runner-secrets/release-token
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '24'
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- name: Install goreleaser
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
with:
|
||||
version: '~> v2'
|
||||
install-only: true
|
||||
- name: Release
|
||||
run: |
|
||||
GITEA_TOKEN=$(cat "${RELEASE_TOKEN_FILE}")
|
||||
export GITEA_TOKEN
|
||||
goreleaser release --clean
|
||||
@@ -0,0 +1,25 @@
|
||||
name: pre-commit
|
||||
permissions: read-all
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.14'
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
@@ -0,0 +1,11 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses: unboundsoftware/shared-workflows/.gitea/workflows/Release.yml@main
|
||||
with:
|
||||
tag_only: true
|
||||
@@ -1,7 +1,12 @@
|
||||
.idea
|
||||
.claude
|
||||
.testCoverage.txt
|
||||
.testCoverage.txt.tmp
|
||||
coverage.html
|
||||
coverage.out
|
||||
/exported
|
||||
/release
|
||||
/schemactl
|
||||
/service
|
||||
CHANGES.md
|
||||
VERSION
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
include:
|
||||
- template: 'Workflows/MergeRequest-Pipelines.gitlab-ci.yml'
|
||||
|
||||
stages:
|
||||
- build
|
||||
- deploy-prod
|
||||
- release
|
||||
|
||||
variables:
|
||||
DOCKER_HOST: tcp://docker:2376
|
||||
DOCKER_TLS_CERTDIR: "/certs"
|
||||
DOCKER_TLS_VERIFY: 1
|
||||
DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client"
|
||||
DOCKER_DRIVER: overlay2
|
||||
|
||||
.buildtools:
|
||||
image: buildtool/build-tools:${BUILDTOOLS_VERSION}
|
||||
services:
|
||||
- docker:${DOCKER_DIND_VERSION}
|
||||
|
||||
run-pre-commit:
|
||||
stage: .pre
|
||||
image: unbound/pre-commit
|
||||
variables:
|
||||
PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit
|
||||
cache:
|
||||
- key:
|
||||
files:
|
||||
- .pre-commit-config.yaml
|
||||
paths:
|
||||
- ${PRE_COMMIT_HOME}
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
|
||||
build:
|
||||
extends: .buildtools
|
||||
stage: build
|
||||
script:
|
||||
- build
|
||||
- curl -Os https://uploader.codecov.io/latest/linux/codecov
|
||||
- chmod +x codecov
|
||||
- ./codecov -t ${CODECOV_TOKEN} -R $CI_PROJECT_DIR -C $CI_COMMIT_SHA -r $CI_PROJECT_PATH
|
||||
- push
|
||||
|
||||
vulnerabilities:
|
||||
stage: build
|
||||
image: golang:1.20.4
|
||||
script:
|
||||
- go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
- govulncheck ./...
|
||||
|
||||
deploy-prod:
|
||||
extends: .buildtools
|
||||
stage: deploy-prod
|
||||
before_script:
|
||||
- echo Deploy to prod
|
||||
script:
|
||||
- deploy prod
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == "main"
|
||||
environment:
|
||||
name: prod
|
||||
|
||||
release:
|
||||
stage: release
|
||||
image: docker:stable
|
||||
services:
|
||||
- docker:${DOCKER_DIND_VERSION}
|
||||
|
||||
variables:
|
||||
GORELEASER_IMAGE: goreleaser/goreleaser:v1.11.5-amd64
|
||||
# Disable shallow cloning so that goreleaser can diff between tags to
|
||||
# generate a changelog.
|
||||
GIT_DEPTH: 0
|
||||
|
||||
# Only run this release job for tags, not every commit (for example).
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG
|
||||
|
||||
script: |
|
||||
docker pull $GORELEASER_IMAGE
|
||||
|
||||
# GITLAB_TOKEN is needed to create GitLab releases.
|
||||
# DOCKER_* are needed to push Docker images.
|
||||
docker run --rm --privileged \
|
||||
-v $PWD:/src \
|
||||
-w /src \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-e GITLAB_TOKEN \
|
||||
$GORELEASER_IMAGE release --rm-dist
|
||||
@@ -1,19 +0,0 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
open-pull-requests-limit: 5
|
||||
rebase-strategy: none
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
open-pull-requests-limit: 5
|
||||
rebase-strategy: none
|
||||
@@ -1,2 +1,22 @@
|
||||
version: "2"
|
||||
run:
|
||||
allow-parallel-runners: true
|
||||
linters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
+10
-12
@@ -1,4 +1,9 @@
|
||||
project_name: unbound-schemas
|
||||
version: 2
|
||||
|
||||
gitea_urls:
|
||||
api: http://gitea-http.gitea.svc.cluster.local:3000/api/v1
|
||||
download: https://gitea.unbound.se
|
||||
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
@@ -15,14 +20,13 @@ builds:
|
||||
- amd64
|
||||
- arm64
|
||||
|
||||
brews:
|
||||
homebrew_casks:
|
||||
- name: unbound-schemas
|
||||
tap:
|
||||
repository:
|
||||
owner: unboundsoftware
|
||||
name: homebrew-taps
|
||||
folder: Formula
|
||||
install: |
|
||||
bin.install "schemactl"
|
||||
binaries: [schemactl]
|
||||
directory: Casks
|
||||
commit_author:
|
||||
name: "Joakim Olsson"
|
||||
email: joakim@unbound.se
|
||||
@@ -30,18 +34,12 @@ brews:
|
||||
|
||||
archives:
|
||||
- id: unbound-schemas
|
||||
replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
version_template: "{{ .Tag }}-next"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
|
||||
+11
-18
@@ -2,7 +2,7 @@
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
@@ -10,19 +10,8 @@ repos:
|
||||
args:
|
||||
- --allow-multiple-documents
|
||||
- id: check-added-large-files
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
rev: 3.0.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
- repo: https://gitlab.com/devopshq/gitlab-ci-linter
|
||||
rev: v1.0.3
|
||||
hooks:
|
||||
- id: gitlab-ci-linter
|
||||
args:
|
||||
- --project
|
||||
- unboundsoftware/schemas
|
||||
- repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook
|
||||
rev: v9.5.0
|
||||
rev: v9.24.0
|
||||
hooks:
|
||||
- id: commitlint
|
||||
stages: [ commit-msg ]
|
||||
@@ -34,14 +23,18 @@ repos:
|
||||
- id: go-imports
|
||||
args:
|
||||
- -local
|
||||
- gitlab.com/unboundsoftware/schemas
|
||||
- git.unbound.se/unboundsoftware/schemas
|
||||
- repo: https://github.com/lietu/go-pre-commit
|
||||
rev: v0.0.1
|
||||
rev: v1.0.0
|
||||
hooks:
|
||||
- id: go-test
|
||||
- id: gofumpt
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.52.2
|
||||
rev: v2.10.1
|
||||
hooks:
|
||||
- id: golangci-lint
|
||||
exclude: '^graph/generated/.*$|^graph/model/models_gen.go|^tools/.*$$'
|
||||
- id: golangci-lint-full
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.30.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
exclude: '^ctl/generated.go|graph/generated/.*$|^graph/model/models_gen.go|^tools/.*$$'
|
||||
|
||||
+984
@@ -0,0 +1,984 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [0.9.3] - 2026-02-23
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.249 (#702)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.250 (#704)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.251
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.18.3
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.32
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.252 (#714)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.253 (#716)
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.87
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.254 (#718)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.255
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.27.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.18.4 (#727)
|
||||
- Prevent OOM on rapid schema publishing
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.10.1 (#706)
|
||||
- *(deps)* Update goreleaser/goreleaser-action action to v7
|
||||
- *(deps)* Update actions/setup-node action to v6
|
||||
|
||||
## [0.9.2] - 2026-02-13
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/auth0/go-jwt-middleware/v2 to v3
|
||||
- Migrate to go-jwt-middleware v3 API
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.243 (#680)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.244 (#681)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.245 (#682)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.246 (#683)
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.18.0 (#685)
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.18.1 (#686)
|
||||
- *(deps)* Update opentelemetry-go monorepo (#687)
|
||||
- *(deps)* Update module go.opentelemetry.io/contrib/bridges/otelslog to v0.15.0 (#688)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.247 (#691)
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.14.0 (#692)
|
||||
- *(deps)* Update eventsourced (#693)
|
||||
- *(deps)* Update module golang.org/x/crypto to v0.48.0 (#694)
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.248 (#698)
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update node.js to cd6fb7e (#684)
|
||||
- *(deps)* Update golang:1.25.6 docker digest to ceda080 (#689)
|
||||
- *(deps)* Update golang docker tag to v1.25.7
|
||||
- *(deps)* Update golang:1.25.7 docker digest to d2819ff (#695)
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.9.0 (#697)
|
||||
- *(deps)* Update golang docker tag to v1.26.0 (#696)
|
||||
- *(deps)* Update node.js to v24.13.1 (#699)
|
||||
|
||||
## [0.9.1] - 2026-01-18
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(ci)* Run build job on tags for Docker images
|
||||
|
||||
## [0.9.0] - 2026-01-18
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add commands for managing organizations and users
|
||||
- Migrate from GitLab CI to Gitea Actions
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.239
|
||||
- *(k8s)* Update ingress class configuration for schema
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.84
|
||||
- *(docker)* Update Node.js version to 24.11.1-alpine
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.240
|
||||
- *(deps)* Update opentelemetry-go monorepo
|
||||
- *(deps)* Update module golang.org/x/crypto to v0.46.0
|
||||
- *(deps)* Update module go.opentelemetry.io/contrib/bridges/otelslog to v0.14.0
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.241
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.85
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.242
|
||||
- *(deps)* Update module golang.org/x/crypto to v0.47.0
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.86
|
||||
|
||||
### 🚜 Refactor
|
||||
|
||||
- *(cache)* Optimize test setup and reduce iterations
|
||||
|
||||
### 🧪 Testing
|
||||
|
||||
- Add validation and event tests for organization and API key
|
||||
- *(cache)* Update tests to use legacy hash for speed
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.30.0
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.13.0
|
||||
- *(deps)* Update golang docker tag to v1.25.5
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.7.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.7.1
|
||||
- *(deps)* Update node.js to 682368d
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.7.2
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.13.1
|
||||
- *(deps)* Update golang:1.25.5 docker digest to 0c27bcf
|
||||
- *(deps)* Update node.js to v24.12.0
|
||||
- *(deps)* Update node.js to c921b97
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.13.2
|
||||
- *(deps)* Update golang:1.25.5 docker digest to ad03ba9
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.8.0
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.13.3
|
||||
- *(deps)* Update golang:1.25.5 docker digest to 3a01526
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.24.0
|
||||
- *(deps)* Update node.js to v24.13.0
|
||||
- *(deps)* Update golang docker tag to v1.25.6
|
||||
|
||||
## [0.8.0] - 2025-11-21
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- *(tests)* Add unit tests for WebSocket initialization logic
|
||||
- Add latestSchema query for retrieving schema updates
|
||||
- Add CLAUDE.md for project documentation and guidelines
|
||||
- *(cache)* Implement hashed API key storage and retrieval
|
||||
- *(health)* Add health checking endpoints and logic
|
||||
- *(cache)* Add concurrency safety and logging improvements
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Enhance API key handling and logging in middleware
|
||||
- Add command executor interface for better testing
|
||||
- *(deps)* Update module golang.org/x/crypto to v0.45.0
|
||||
- *(deps)* Update module github.com/auth0/go-jwt-middleware/v2 to v2.3.1
|
||||
|
||||
### 🧪 Testing
|
||||
|
||||
- Enhance assertions for version and subscription config
|
||||
- *(cache)* Reduce goroutines for race detector stability
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.29.1
|
||||
- *(deps)* Update node.js to v24
|
||||
|
||||
## [0.7.0] - 2025-11-19
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add Cosmo Router config generation and PubSub support
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.231
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.232
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.233
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.234
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.235
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.31
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.236
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.82
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.237
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.83
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.13.0
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.238
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- Add git-cliff configuration for changelog generation
|
||||
- *(deps)* Update golang:1.25.3 docker digest to 69d1009
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.6
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.7
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.6.0
|
||||
- *(deps)* Update golang:1.25.3 docker digest to 9ac0edc
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.6.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.29.0
|
||||
- *(deps)* Update golang docker tag to v1.25.4
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.6.2
|
||||
- *(deps)* Update golang:1.25.4 docker digest to efe81fa
|
||||
|
||||
## [0.6.6] - 2025-10-14
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.227
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.228
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.229
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.81
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.26.0
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.230
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.5.0
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.3
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.4
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.5
|
||||
- *(deps)* Update golang:1.25.1 docker digest to 12640a4
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.23.0
|
||||
- *(deps)* Update golang docker tag to v1.25.2
|
||||
- *(deps)* Update golang docker tag to v1.25.3
|
||||
|
||||
## [0.6.5] - 2025-09-18
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.226
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.79
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.80
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.19.3
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook lietu/go-pre-commit to v1
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.1
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.2
|
||||
|
||||
## [0.6.4] - 2025-09-11
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/amqp to v1.9.0
|
||||
|
||||
## [0.6.3] - 2025-09-09
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/stretchr/testify to v1.11.0
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.25.0
|
||||
- *(deps)* Update module github.com/stretchr/testify to v1.11.1
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.222
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.223
|
||||
- *(deps)* Update opentelemetry-go monorepo
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.224
|
||||
- *(deps)* Update module go.opentelemetry.io/contrib/bridges/otelslog to v0.13.0
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.225
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update golang:1.25.0 docker digest to f6b9e1a
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.12.0
|
||||
- *(deps)* Update golang docker tag to v1.25.1
|
||||
- *(deps)* Update golang:1.25.1 docker digest to 53f7808
|
||||
|
||||
## [0.6.2] - 2025-08-22
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- Remove conflicts entry from homebrew-taps config
|
||||
|
||||
## [0.6.1] - 2025-08-22
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.195
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.196
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.197
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.198
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.199
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.200
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.202
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.203
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.204
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.207
|
||||
- *(deps)* Update module github.com/golang-jwt/jwt/v5 to v5.2.3
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.12.1
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.208
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.210
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.78
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.212
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.213
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.215
|
||||
- *(deps)* Update module github.com/golang-jwt/jwt/v5 to v5.3.0
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.216
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.217
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.218
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.219
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.220
|
||||
- *(deps)* Update module github.com/sparetimecoders/goamqp to v0.3.3
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.221
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.2.1
|
||||
- *(deps)* Update golang:1.24.4 docker digest to 9f820b6
|
||||
- *(deps)* Update golang docker tag to v1.24.5
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.11.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.2.2
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.28.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.3.0
|
||||
- *(deps)* Update golang:1.24.5 docker digest to 0a156a4
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.11.1
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.11.2
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.3.1
|
||||
- *(deps)* Update golang docker tag to v1.24.6
|
||||
- *(deps)* Update pre-commit hook pre-commit/pre-commit-hooks to v6
|
||||
- *(deps)* Update golang:1.24.6 docker digest to 958bfd1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.4.0
|
||||
- *(deps)* Update golang:1.24.6 docker digest to cd8f653
|
||||
- *(deps)* Update golang docker tag to v1.25.0
|
||||
|
||||
## [0.6.0] - 2025-06-29
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- *(k8s)* Add OpenTelemetry exporter endpoint to deploy.yaml
|
||||
- Add build version injection via CI_COMMIT argument
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.190
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.28
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.75
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.192
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.12.0
|
||||
- *(deps)* Update opentelemetry-go monorepo
|
||||
- *(deps)* Update module go.opentelemetry.io/contrib/bridges/otelslog to v0.12.0
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.29
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.194
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.30
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.76
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.2.0
|
||||
|
||||
## [0.5.3] - 2025-06-13
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.187
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.188
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.189
|
||||
|
||||
### 🚜 Refactor
|
||||
|
||||
- Remove Sentry integration and replace with OpenTelemetry
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update golang:1.24.4 docker digest to 3494bbe
|
||||
|
||||
## [0.5.2] - 2025-06-09
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(goreleaser)* Specify binary name in configuration
|
||||
|
||||
## [0.5.1] - 2025-06-09
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.11.0
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.33.0
|
||||
- *(deps)* Update module github.com/khan/genqlient to v0.8.1
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.179
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.180
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.181
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.182
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.183
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.74
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.184
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.185
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.186
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update golang:1.24.3 docker digest to f255a7d
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.27.0
|
||||
- *(deps)* Update golang docker tag to v1.24.4
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.27.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.27.2
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.10.2
|
||||
|
||||
## [0.5.0] - 2025-05-15
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- *(k8s)* Add PodDisruptionBudget for schemas service
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.26
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.173
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.174
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.175
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.176
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.73
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.177
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.178
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.24.3
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.27
|
||||
- *(k8s)* Update apiVersion for external secrets
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.1.4
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.1.5
|
||||
- *(deps)* Update golang:1.24.2 docker digest to bf7899c
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.25.0
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.9.0
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.25.1
|
||||
- *(ci)* Update GitLab CI configuration for templates
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.1.6
|
||||
- *(deps)* Update golang docker tag to v1.24.3
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.26.0
|
||||
|
||||
## [0.4.1] - 2025-04-24
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.25
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.170
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.71
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.72
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.171
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.172
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.1.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.1.2
|
||||
|
||||
## [0.4.0] - 2025-04-12
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- *(service)* Implement graceful shutdown for HTTP server
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(secrets)* Remove namespace from ExternalSecret definition
|
||||
- *(deps)* Update module github.com/sparetimecoders/goamqp to v0.3.2
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.24
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.32.0
|
||||
- *(k8s)* Increase CPU request for better performance
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/amqp to v1.8.1
|
||||
|
||||
### 🚜 Refactor
|
||||
|
||||
- *(deploy)* Remove cpu and memory limits for schemas
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.24.3
|
||||
|
||||
## [0.3.0] - 2025-04-08
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- *(k8s)* Add RabbitMQ configurations and update secrets
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.161
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.162
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.163
|
||||
- *(deps)* Update module github.com/auth0/go-jwt-middleware/v2 to v2.3.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.18.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.18.1
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.9.0
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.164
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.165
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.68
|
||||
- *(deps)* Update module github.com/golang-jwt/jwt/v5 to v5.2.2
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.166
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.19.0
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.69
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.70
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.167
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.19.1
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.24.2
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.10.0
|
||||
- *(deps)* Update eventsourced
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.168
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools/v2 to v2.0.0-rc.169
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.6
|
||||
- *(deps)* Update golang docker tag to v1.24.1
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.22.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.7
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.8.0
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.8.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.8
|
||||
- *(deps)* Update golang:1.24.1 docker digest to 5ecf333
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.24.2
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.0.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v2.0.2
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.8.2
|
||||
- *(deps)* Update golang docker tag to v1.24.2
|
||||
- *(deps)* Update golang:1.24.2 docker digest to aebb7df
|
||||
|
||||
## [0.2.0] - 2025-02-28
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- *(dependencies)* Add Eventsourced package group for updates
|
||||
- *(sdlmerge)* Add shared types for GraphQL schema handling
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.64
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.7.0
|
||||
- *(deps)* Update module github.com/khan/genqlient to v0.8.0
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.8.0
|
||||
- *(ci)* Update golang image to improve compatibility
|
||||
- *(ci)* Add resource group to production deployment configuration
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.8.1
|
||||
- *(deps)* Update dependencies to latest versions
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.65
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.66
|
||||
- *(deps)* Update eventsourced
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.23
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.23.3
|
||||
- *(deps)* Update golang:1.23.5 docker digest to e213430
|
||||
- *(deps)* Update dependency go to v1.23.6
|
||||
- *(deps)* Update golang docker tag to v1.23.6
|
||||
- *(deps)* Update golang:1.23.6 docker digest to 958bd2e
|
||||
- *(deps)* Update golang:1.23.6 docker digest to 9271129
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.7.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.2
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.3
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.4
|
||||
- *(go)* Update go version to 1.23.6 and remove toolchain
|
||||
- *(deps)* Update golang docker tag to v1.24.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.64.5
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.21.0
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.24.0
|
||||
- *(deps)* Update golang:1.24.0 docker digest
|
||||
- *(deps)* Update golang:1.24.0 docker digest to a14c5a6
|
||||
- *(deps)* Update golang:1.24.0 docker digest to 58cf31c
|
||||
- *(docker)* Update base image architecture to amd64
|
||||
|
||||
## [0.1.1] - 2025-01-24
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.31.0
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.31.1
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.6.1
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.24.1
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.63
|
||||
- *(k8s)* Standardize app label to app.kubernetes.io/name
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.63.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.63.2
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.5.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.63.3
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.63.4
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.23.0
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 3b1a7de
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 08e1417
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 585103a
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.23.1
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 5305905
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 9820aca
|
||||
- *(deps)* Update dependency go to v1.23.5
|
||||
- *(deps)* Update golang docker tag to v1.23.5
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.6.0
|
||||
- *(deps)* Update golang:1.23.5 docker digest to 8c10f21
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.6.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.23.2
|
||||
|
||||
## [0.1.0] - 2025-01-01
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add timeout configuration to golangci-lint
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.3.0
|
||||
- No digest pinning of own image
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.4.0
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.19
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.56
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.23.0
|
||||
- *(deps)* Update module github.com/stretchr/testify to v1.10.0
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.57
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.20
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.16.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/amqp to v1.7.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.15.0
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.5.0
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.5.1
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.30.0
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.6.0
|
||||
- *(k8s)* Adjust CPU requests for better resource allocation
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.58
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.59
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.60
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.23.1
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.24.0
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.61
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.21
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.62
|
||||
|
||||
### 🚜 Refactor
|
||||
|
||||
- Use common pre-commit job
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.21.2
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.1
|
||||
- *(deps)* Pin dependencies
|
||||
- *(deps)* Pin dependencies
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.2
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.4
|
||||
- *(deps)* Update dependency go to v1.23.3
|
||||
- *(deps)* Update golang docker tag to v1.23.3
|
||||
- *(deps)* Update unbound/pre-commit docker digest to 596abf5
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.62.0
|
||||
- *(deps)* Update golang:1.23.3 docker digest to 8956c08
|
||||
- *(deps)* Update unbound/pre-commit docker digest to e78425c
|
||||
- *(deps)* Update golang:1.23.3 docker digest to 3694e36
|
||||
- *(deps)* Update golang:1.23.3 docker digest to b2ca381
|
||||
- *(deps)* Update golang:1.23.3 docker digest to 2660218
|
||||
- *(deps)* Update golang:1.23.3 docker digest to c2d828f
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.5
|
||||
- *(deps)* Update golang:1.23.3 docker digest to 73f06be
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.6
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.7
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.4.8
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.62.2
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.19.0
|
||||
- *(deps)* Update golang:1.23.3 docker digest to ee5f0ad
|
||||
- *(deps)* Update golang:1.23.3 docker digest to b4aabba
|
||||
- *(deps)* Update golang:1.23.3 docker digest to 2b01164
|
||||
- *(deps)* Update golang:1.23.3 docker digest to 017ec6b
|
||||
- *(deps)* Update dependency go to v1.23.4
|
||||
- *(deps)* Update golang docker tag to v1.23.4
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 574185e
|
||||
- Remove unnecessary Docker variables from configuration
|
||||
- *(ci)* Remove unused docker service from buildtools
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 7003184
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.5.0
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.20.0
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.21.3
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.21.4
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.22.0
|
||||
- *(deps)* Update golang:1.23.4 docker digest to 7ea4c9d
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.22.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.63.0
|
||||
|
||||
## [0.0.7] - 2024-10-22
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.20.0
|
||||
- *(deps)* Update module github.com/jmoiron/sqlx to v1.4.0
|
||||
- *(deps)* Update module github.com/rs/cors to v1.11.0
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.46
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.12
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.47
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.28.0
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.48
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.16
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.28.1
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools to v1.67.3
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.49
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.21.0
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.21.1
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools to v1.67.4
|
||||
- *(deps)* Update module github.com/sparetimecoders/goamqp to v0.3.1
|
||||
- *(deps)* Update module github.com/auth0/go-jwt-middleware/v2 to v2.2.2
|
||||
- *(deps)* Update module github.com/rs/cors to v1.11.1
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.22.0
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.29.0
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v1.2.1
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.50
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.51
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.52
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.22.1
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.53
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.54
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.17
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.55
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/amqp to v1.6.5
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/eventsourced to v1.15.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.14.4
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/amqp to v1.6.6
|
||||
- *(deps)* Update module github.com/getsentry/sentry-go to v0.29.1
|
||||
- *(deps)* Update module github.com/vektah/gqlparser/v2 to v2.5.18
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Update pre-commit hook pre-commit/pre-commit-hooks to v4.6.0
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.15.0
|
||||
- *(deps)* Update dependency go to v1.22.2
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.16.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.58.0
|
||||
- *(deps)* Update dependency go to v1.22.3
|
||||
- *(deps)* Update golang docker tag to v1.22.3
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.58.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.58.2
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.59.0
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.18.3
|
||||
- *(deps)* Update dependency go to v1.22.4
|
||||
- *(deps)* Update golang docker tag to v1.22.4
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.59.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.18.4
|
||||
- *(deps)* Update dependency go to v1.22.5
|
||||
- *(deps)* Update golang docker tag to v1.22.5
|
||||
- *(deps)* Update dependency go to v1.22.6
|
||||
- *(deps)* Update golang docker tag to v1.22.6
|
||||
- *(deps)* Update dependency go to v1.23.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.60.1
|
||||
- *(deps)* Update golang docker tag to v1.23.0
|
||||
- Update golangci-lint hook identifier in pre-commit config
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.60.2
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.60.3
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.17.0
|
||||
- *(deps)* Update dependency go to v1.23.1
|
||||
- *(deps)* Update golang docker tag to v1.23.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.61.0
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.18.0
|
||||
- Update goreleaser image to v2.3.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.19.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.19.2
|
||||
- Add generate check
|
||||
- *(deps)* Update goreleaser/goreleaser docker tag to v2.3.2
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.19.3
|
||||
- *(deps)* Update dependency go to v1.23.2
|
||||
- *(deps)* Update golang docker tag to v1.23.2
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.20.0
|
||||
- *(deps)* Update pre-commit hook pre-commit/pre-commit-hooks to v5
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.20.1
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.21.0
|
||||
- *(deps)* Update pre-commit hook gitleaks/gitleaks to v8.21.1
|
||||
- Add release notes for goreleaser command in .gitlab-ci.yml
|
||||
|
||||
## [0.0.6] - 2024-04-04
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Don't set fixed global_sequence_no
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- Add step for checking release
|
||||
- *(deps)* Update golang docker tag to v1.22.2
|
||||
|
||||
## [0.0.5] - 2024-04-03
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add full SDL to SupGraph response
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- *(deps)* Update module github.com/stretchr/testify to v1.9.0
|
||||
- *(deps)* Update module github.com/golang-jwt/jwt/v4 to v5
|
||||
- Update default migrations
|
||||
- *(deps)* Update module github.com/golang-jwt/jwt/v5 to v5.2.1
|
||||
- *(deps)* Update module github.com/khan/genqlient to v0.7.0
|
||||
- *(deps)* Update module github.com/alecthomas/kong to v0.9.0
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.19.1
|
||||
- *(deps)* Update module github.com/99designs/gqlgen to v0.17.45
|
||||
- *(deps)* Update module github.com/pressly/goose/v3 to v3.19.2
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.14.0
|
||||
- *(deps)* Update module gitlab.com/unboundsoftware/eventsourced/pg to v1.14.3
|
||||
- *(deps)* Update module github.com/wundergraph/graphql-go-tools to v1.67.2
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Bump github.com/stretchr/testify from 1.8.3 to 1.8.4
|
||||
- Update golangci-lint
|
||||
- *(deps)* Bump golang from 1.20.4 to 1.20.5
|
||||
- Update Go version for vulnerabilities
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.31 to 0.17.32
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.32 to 0.17.33
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.21.0 to 0.22.0
|
||||
- *(deps)* Bump github.com/alecthomas/kong from 0.7.1 to 0.8.0
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.33 to 0.17.34
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.3 to 2.5.5
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.5 to 2.5.6
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.11.2 to 3.13.0
|
||||
- *(deps)* Bump github.com/sparetimecoders/goamqp from 0.1.4 to 0.1.5
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.13.0 to 3.13.1
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.13.1 to 3.13.4
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump golang from 1.20.5 to 1.20.6
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.6 to 2.5.7
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.34 to 0.17.35
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.7 to 2.5.8
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.13.4 to 3.14.0
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.35 to 0.17.36
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.22.0 to 0.23.0
|
||||
- *(deps)* Bump golang from 1.20.6 to 1.20.7
|
||||
- Update to Go 1.20.7
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump golang from 1.20.7 to 1.21.0
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.14.0 to 3.15.0
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- Update to Golang 1.21.0 for vulnerabilities
|
||||
- Update pre-commit versions
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.23.0 to 0.24.0
|
||||
- *(deps)* Bump github.com/rs/cors from 1.9.0 to 1.10.0
|
||||
- *(deps)* Bump golang from 1.21.0 to 1.21.1
|
||||
- Update to Go 1.21.1 for vulnerabilities
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.8 to 2.5.9
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.36 to 0.17.37
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.24.0 to 0.24.1
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.9 to 2.5.10
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.37 to 0.17.38
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/rs/cors from 1.10.0 to 1.10.1
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.24.1 to 0.25.0
|
||||
- *(deps)* Bump github.com/sparetimecoders/goamqp from 0.1.5 to 0.2.0
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.38 to 0.17.39
|
||||
- *(deps)* Bump golang from 1.21.1 to 1.21.2
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.15.0 to 3.15.1
|
||||
- *(deps)* Bump golang from 1.21.2 to 1.21.3
|
||||
- *(deps)* Bump github.com/alecthomas/kong from 0.8.0 to 0.8.1
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.39 to 0.17.40
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump golang from 1.21.3 to 1.21.4
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.15.1 to 3.16.0
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.40 to 0.17.41
|
||||
- *(deps)* Bump github.com/auth0/go-jwt-middleware/v2
|
||||
- *(deps)* Bump golang from 1.21.4 to 1.21.5
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.16.0 to 3.17.0
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump github.com/sparetimecoders/goamqp from 0.2.0 to 0.2.1
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.41 to 0.17.42
|
||||
- *(deps)* Bump golang from 1.21.5 to 1.21.6
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.25.0 to 0.26.0
|
||||
- *(deps)* Bump github.com/sparetimecoders/goamqp from 0.2.1 to 0.3.0
|
||||
- *(deps)* Bump github.com/vektah/gqlparser/v2 from 2.5.10 to 2.5.11
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.42 to 0.17.43
|
||||
- *(deps)* Bump github.com/auth0/go-jwt-middleware/v2
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.17.0 to 3.18.0
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump golang from 1.21.6 to 1.22.0
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.26.0 to 0.27.0
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.43 to 0.17.44
|
||||
- *(deps)* Update pre-commit hook devopshq/gitlab-ci-linter to v1.0.6
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.11.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.56.2
|
||||
- *(deps)* Update pre-commit hook lietu/go-pre-commit to v0.1.0
|
||||
- *(deps)* Update pre-commit hook pre-commit/pre-commit-hooks to v4.5.0
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.12.0
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.13.0
|
||||
- Use OrbStack for local dev
|
||||
- *(deps)* Update golang docker tag to v1.22.1
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.57.0
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.57.1
|
||||
- Add gitleaks to pre-commit setup
|
||||
- Update resources
|
||||
- *(deps)* Update pre-commit hook golangci/golangci-lint to v1.57.2
|
||||
- *(deps)* Update pre-commit hook alessandrojcm/commitlint-pre-commit-hook to v9.14.0
|
||||
- Run release on medium instance
|
||||
- Back to small and upgrade goreleaser
|
||||
- Remove deprecated replacements
|
||||
|
||||
## [0.0.4] - 2023-05-29
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Explicitly set dialect to make goose use correct version table
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- *(deps)* Bump golang from 1.20.3 to 1.20.4
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.10.0 to 3.11.0
|
||||
- Update Go version for vulnerabilities
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.30 to 0.17.31
|
||||
- *(deps)* Bump github.com/Khan/genqlient from 0.5.0 to 0.6.0
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.20.0 to 0.21.0
|
||||
- *(deps)* Bump github.com/pressly/goose/v3 from 3.11.0 to 3.11.2
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- *(deps)* Bump github.com/sparetimecoders/goamqp from 0.1.3 to 0.1.4
|
||||
- *(deps)* Bump github.com/wundergraph/graphql-go-tools
|
||||
- Update pre-commit and fix golangci-lint
|
||||
- *(deps)* Bump github.com/stretchr/testify from 1.8.2 to 1.8.3
|
||||
- Actually validate API key privileges and refs
|
||||
|
||||
## [0.0.3] - 2023-04-27
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Add Sentry setup
|
||||
- Add Sentry setup
|
||||
- Organizations and API keys
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Use correct healthcheck path
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- Update schema if URLs have changed
|
||||
- Add pre-commit and remove those checks from Dockerfile
|
||||
- *(deps)* Bump github.com/alecthomas/kong from 0.6.1 to 0.7.1
|
||||
- *(deps)* Bump github.com/stretchr/testify from 1.8.0 to 1.8.1
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.20 to 0.17.22
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.14.0 to 0.16.0
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/amqp
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- Add context and error handling
|
||||
- *(deps)* Bump github.com/rs/cors from 1.8.2 to 1.8.3
|
||||
- Move to default ingress group
|
||||
- Decrease trace sample rate
|
||||
- Improve docker caching
|
||||
- *(deps)* Bump golang from 1.19.4 to 1.19.5
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.16.0 to 0.17.0
|
||||
- Add local module to pre-commit config
|
||||
- Only ignore generated files with do not edit
|
||||
- Default ingress group
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.22 to 0.17.24
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/eventsourced
|
||||
- *(deps)* Bump golang from 1.19.5 to 1.20.0
|
||||
- Use Docker DinD version from variable
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.17.0 to 0.18.0
|
||||
- Switch to manual rebases for Dependabot
|
||||
- *(deps)* Bump golang from 1.20.0 to 1.20.1
|
||||
- Update to golang 1.20.1
|
||||
- *(deps)* Bump github.com/stretchr/testify from 1.8.1 to 1.8.2
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.24 to 0.17.25
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.18.0 to 0.19.0
|
||||
- *(deps)* Bump golang from 1.20.1 to 1.20.2
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.25 to 0.17.26
|
||||
- Update Go verion for vulnerabilities scan
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.26 to 0.17.27
|
||||
- Reduce sample rate
|
||||
- *(deps)* Bump github.com/getsentry/sentry-go from 0.19.0 to 0.20.0
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.27 to 0.17.28
|
||||
- *(deps)* Bump golang from 1.20.2 to 1.20.3
|
||||
- Update to Go 1.20.3
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.28 to 0.17.29
|
||||
- *(deps)* Bump github.com/rs/cors from 1.8.3 to 1.9.0
|
||||
- *(deps)* Bump gitlab.com/unboundsoftware/eventsourced/pg
|
||||
- *(deps)* Bump github.com/99designs/gqlgen from 0.17.29 to 0.17.30
|
||||
- Fix Gitlab CI lint
|
||||
|
||||
## [0.0.2] - 2022-10-14
|
||||
|
||||
### ⚙️ Miscellaneous Tasks
|
||||
|
||||
- Add docker ignore
|
||||
- Handle push of unchanged schema
|
||||
|
||||
## [0.0.1] - 2022-10-09
|
||||
|
||||
### 🚀 Features
|
||||
|
||||
- Initial commit
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Remove GITLAB_TOKEN
|
||||
|
||||
<!-- generated by git-cliff -->
|
||||
@@ -0,0 +1,136 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
This is a GraphQL schema registry service that manages federated GraphQL schemas for microservices. It allows services to publish their subgraph schemas and provides merged supergraphs with Cosmo Router configuration for federated GraphQL gateways.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Event Sourcing
|
||||
The system uses event sourcing via `gitlab.com/unboundsoftware/eventsourced`. Key domain aggregates are:
|
||||
- **Organization** (domain/aggregates.go): Manages organizations, users, and API keys
|
||||
- **SubGraph** (domain/aggregates.go): Tracks subgraph schemas with versioning
|
||||
|
||||
All state changes flow through events (domain/events.go) and commands (domain/commands.go). The EventStore persists events to PostgreSQL, and events are published to RabbitMQ for downstream consumers.
|
||||
|
||||
### GraphQL Layer
|
||||
- **Schema**: graph/schema.graphqls defines the API
|
||||
- **Resolvers**: graph/schema.resolvers.go implements mutations/queries
|
||||
- **Generated Code**: graph/generated/ and graph/model/ (auto-generated by gqlgen)
|
||||
|
||||
The resolver (graph/resolver.go) coordinates between the EventStore, Publisher (RabbitMQ), Cache, and PubSub for subscriptions.
|
||||
|
||||
### Schema Merging
|
||||
The sdlmerge/ package handles GraphQL schema federation:
|
||||
- Merges multiple subgraph SDL schemas into a unified supergraph
|
||||
- Uses wundergraph/graphql-go-tools for AST manipulation
|
||||
- Removes duplicates, extends types, and applies federation directives
|
||||
|
||||
### Authentication & Authorization
|
||||
- **Auth0 JWT** (middleware/auth0.go): Validates user tokens from Auth0
|
||||
- **API Keys** (middleware/apikey.go): Validates service API keys
|
||||
- **Auth Middleware** (middleware/auth.go): Routes auth based on context
|
||||
|
||||
The @auth directive controls field-level access (user vs organization API key).
|
||||
|
||||
### Cosmo Router Integration
|
||||
The service generates Cosmo Router configuration (graph/cosmo.go) using the wgc CLI tool installed in the Docker container. This config enables federated query execution across subgraphs.
|
||||
|
||||
### PubSub for Real-time Updates
|
||||
graph/pubsub.go implements subscription support for schemaUpdates, allowing clients to receive real-time notifications when schemas change.
|
||||
|
||||
## Commands
|
||||
|
||||
### Code Generation
|
||||
```bash
|
||||
# Generate GraphQL server code (gqlgen), format, and organize imports
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Always run this after modifying graph/schema.graphqls. The go:generate directives are in:
|
||||
- graph/resolver.go: runs gqlgen, gofumpt, and goimports
|
||||
- ctl/ctl.go: generates genqlient client code
|
||||
|
||||
### Testing
|
||||
```bash
|
||||
# Run all tests
|
||||
go test ./... -v
|
||||
|
||||
# Run tests with race detection and coverage (as used in CI)
|
||||
CGO_ENABLED=1 go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
# Run specific package tests
|
||||
go test ./middleware -v
|
||||
go test ./graph -v -run TestGenerateCosmoRouterConfig
|
||||
|
||||
# Run single test
|
||||
go test ./cmd/service -v -run TestWebSocket
|
||||
```
|
||||
|
||||
### Building
|
||||
```bash
|
||||
# Build the service binary
|
||||
go build -o service ./cmd/service/service.go
|
||||
|
||||
# Build the CLI tool
|
||||
go build -o schemactl ./cmd/schemactl/schemactl.go
|
||||
|
||||
# Docker build (multi-stage)
|
||||
docker build -t schemas .
|
||||
```
|
||||
|
||||
The Dockerfile runs tests with coverage before building the production binary.
|
||||
|
||||
### Running the Service
|
||||
```bash
|
||||
# Start the service (requires PostgreSQL and RabbitMQ)
|
||||
go run ./cmd/service/service.go \
|
||||
--postgres-url="postgres://user:pass@localhost:5432/schemas?sslmode=disable" \
|
||||
--amqp-url="amqp://user:pass@localhost:5672/" \
|
||||
--issuer="your-auth0-domain.auth0.com"
|
||||
|
||||
# The service listens on port 8080 by default
|
||||
# GraphQL Playground available at http://localhost:8080/
|
||||
```
|
||||
|
||||
### Using the schemactl CLI
|
||||
```bash
|
||||
# Publish a subgraph schema
|
||||
schemactl publish \
|
||||
--api-key="your-api-key" \
|
||||
--schema-ref="production" \
|
||||
--service="users" \
|
||||
--url="http://users-service:8080/query" \
|
||||
--sdl=schema.graphql
|
||||
|
||||
# List subgraphs for a ref
|
||||
schemactl list \
|
||||
--api-key="your-api-key" \
|
||||
--schema-ref="production"
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. **Schema Changes**: Edit graph/schema.graphqls → run `go generate ./...`
|
||||
2. **Resolver Implementation**: Implement in graph/schema.resolvers.go
|
||||
3. **Testing**: Write tests, run `go test ./...`
|
||||
4. **Pre-commit**: Hooks run go-mod-tidy, goimports, gofumpt, golangci-lint, and tests
|
||||
|
||||
## Key Dependencies
|
||||
|
||||
- **gqlgen**: GraphQL server generation
|
||||
- **genqlient**: GraphQL client generation (for ctl package)
|
||||
- **eventsourced**: Event sourcing framework
|
||||
- **wundergraph/graphql-go-tools**: Schema federation and composition
|
||||
- **wgc CLI**: Cosmo Router config generation (Node.js tool)
|
||||
- **Auth0**: JWT authentication
|
||||
- **OpenTelemetry**: Observability (traces, metrics, logs)
|
||||
|
||||
## Important Files
|
||||
|
||||
- gqlgen.yml: gqlgen configuration
|
||||
- graph/tools.go: Declares build-time tool dependencies
|
||||
- .pre-commit-config.yaml: Pre-commit hooks configuration
|
||||
- cliff.toml: Changelog generation config
|
||||
+12
-3
@@ -1,9 +1,10 @@
|
||||
FROM golang:1.20.4 as modules
|
||||
FROM amd64/golang:1.26.0@sha256:e7479dbd4918090d893b97245fd8c0bcf767677f8ede2e60e7fb2c2f38c94215 as modules
|
||||
WORKDIR /build
|
||||
ADD go.* /build
|
||||
RUN go mod download
|
||||
|
||||
FROM modules as build
|
||||
ARG CI_COMMIT
|
||||
WORKDIR /build
|
||||
ENV CGO_ENABLED=0
|
||||
ADD . /build
|
||||
@@ -17,15 +18,23 @@ RUN GOOS=linux GOARCH=amd64 go build \
|
||||
-a -installsuffix cgo \
|
||||
-mod=readonly \
|
||||
-o /release/service \
|
||||
-ldflags '-w -s' \
|
||||
-ldflags "-w -s -X main.buildVersion=${CI_COMMIT}" \
|
||||
./cmd/service/service.go
|
||||
|
||||
FROM scratch as export
|
||||
COPY --from=build /build/coverage.txt /
|
||||
|
||||
FROM scratch
|
||||
FROM node:24.13.1-alpine@sha256:4f696fbf39f383c1e486030ba6b289a5d9af541642fc78ab197e584a113b9c03
|
||||
ENV TZ Europe/Stockholm
|
||||
|
||||
# Install wgc CLI globally for Cosmo Router composition
|
||||
RUN npm install -g wgc@latest
|
||||
|
||||
# Copy timezone data and certificates
|
||||
COPY --from=build /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
|
||||
# Copy the service binary
|
||||
COPY --from=build /release/service /
|
||||
|
||||
CMD ["/service"]
|
||||
|
||||
Vendored
+152
-27
@@ -2,38 +2,49 @@ package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/sparetimecoders/goamqp"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/domain"
|
||||
"gitlab.com/unboundsoftware/schemas/hash"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/hash"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
mu sync.RWMutex
|
||||
organizations map[string]domain.Organization
|
||||
users map[string][]string
|
||||
apiKeys map[string]domain.APIKey
|
||||
apiKeys map[string]domain.APIKey // keyed by organizationId-name
|
||||
services map[string]map[string]map[string]struct{}
|
||||
subGraphs map[string]string
|
||||
lastUpdate map[string]string
|
||||
logger log.Interface
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (c *Cache) OrganizationByAPIKey(apiKey string) *domain.Organization {
|
||||
key, exists := c.apiKeys[apiKey]
|
||||
if !exists {
|
||||
return nil
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
// Find the API key by comparing hashes
|
||||
for _, key := range c.apiKeys {
|
||||
if hash.CompareAPIKey(key.Key, apiKey) {
|
||||
org, exists := c.organizations[key.OrganizationId]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return &org
|
||||
}
|
||||
}
|
||||
org, exists := c.organizations[key.OrganizationId]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return &org
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) OrganizationsByUser(sub string) []domain.Organization {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
orgIds := c.users[sub]
|
||||
orgs := make([]domain.Organization, len(orgIds))
|
||||
for i, id := range orgIds {
|
||||
@@ -42,15 +53,34 @@ func (c *Cache) OrganizationsByUser(sub string) []domain.Organization {
|
||||
return orgs
|
||||
}
|
||||
|
||||
func (c *Cache) ApiKeyByKey(key string) *domain.APIKey {
|
||||
k, exists := c.apiKeys[hash.String(key)]
|
||||
if !exists {
|
||||
return nil
|
||||
func (c *Cache) AllOrganizations() []domain.Organization {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
orgs := make([]domain.Organization, 0, len(c.organizations))
|
||||
for _, org := range c.organizations {
|
||||
orgs = append(orgs, org)
|
||||
}
|
||||
return &k
|
||||
return orgs
|
||||
}
|
||||
|
||||
func (c *Cache) ApiKeyByKey(key string) *domain.APIKey {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
// Find the API key by comparing hashes
|
||||
for _, apiKey := range c.apiKeys {
|
||||
if hash.CompareAPIKey(apiKey.Key, key) {
|
||||
return &apiKey
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) Services(orgId, ref, lastUpdate string) ([]string, string) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
key := refKey(orgId, ref)
|
||||
var services []string
|
||||
if lastUpdate == "" || c.lastUpdate[key] > lastUpdate {
|
||||
@@ -62,43 +92,125 @@ func (c *Cache) Services(orgId, ref, lastUpdate string) ([]string, string) {
|
||||
}
|
||||
|
||||
func (c *Cache) SubGraphId(orgId, ref, service string) string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
return c.subGraphs[subGraphKey(orgId, ref, service)]
|
||||
}
|
||||
|
||||
func (c *Cache) Update(msg any, _ goamqp.Headers) (any, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
switch m := msg.(type) {
|
||||
case *domain.OrganizationAdded:
|
||||
o := domain.Organization{}
|
||||
o := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(m.ID.String()),
|
||||
}
|
||||
m.UpdateOrganization(&o)
|
||||
c.organizations[m.ID.String()] = o
|
||||
c.addUser(m.Initiator, o)
|
||||
c.logger.With("org_id", m.ID.String(), "event", "OrganizationAdded").Debug("cache updated")
|
||||
case *domain.UserAddedToOrganization:
|
||||
org, exists := c.organizations[m.ID.String()]
|
||||
if exists {
|
||||
m.UpdateOrganization(&org)
|
||||
c.organizations[m.ID.String()] = org
|
||||
c.addUser(m.UserId, org)
|
||||
c.logger.With("org_id", m.ID.String(), "user_id", m.UserId, "event", "UserAddedToOrganization").Debug("cache updated")
|
||||
} else {
|
||||
c.logger.With("org_id", m.ID.String(), "event", "UserAddedToOrganization").Warn("organization not found in cache")
|
||||
}
|
||||
case *domain.APIKeyAdded:
|
||||
key := domain.APIKey{
|
||||
Name: m.Name,
|
||||
OrganizationId: m.OrganizationId,
|
||||
Key: m.Key,
|
||||
Key: m.Key, // This is now the hashed key
|
||||
Refs: m.Refs,
|
||||
Read: m.Read,
|
||||
Publish: m.Publish,
|
||||
CreatedBy: m.Initiator,
|
||||
CreatedAt: m.When(),
|
||||
}
|
||||
c.apiKeys[m.Key] = key
|
||||
// Use composite key: organizationId-name
|
||||
c.apiKeys[apiKeyId(m.OrganizationId, m.Name)] = key
|
||||
org := c.organizations[m.OrganizationId]
|
||||
org.APIKeys = append(org.APIKeys, key)
|
||||
c.organizations[m.OrganizationId] = org
|
||||
c.logger.With("org_id", m.OrganizationId, "key_name", m.Name, "event", "APIKeyAdded").Debug("cache updated")
|
||||
case *domain.APIKeyRemoved:
|
||||
orgId := m.ID.String()
|
||||
org, exists := c.organizations[orgId]
|
||||
if exists {
|
||||
// Remove from organization's API keys list
|
||||
for i, key := range org.APIKeys {
|
||||
if key.Name == m.KeyName {
|
||||
org.APIKeys = append(org.APIKeys[:i], org.APIKeys[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
c.organizations[orgId] = org
|
||||
// Remove from apiKeys map
|
||||
delete(c.apiKeys, apiKeyId(orgId, m.KeyName))
|
||||
c.logger.With("org_id", orgId, "key_name", m.KeyName, "event", "APIKeyRemoved").Debug("cache updated")
|
||||
} else {
|
||||
c.logger.With("org_id", orgId, "event", "APIKeyRemoved").Warn("organization not found in cache")
|
||||
}
|
||||
case *domain.OrganizationRemoved:
|
||||
orgId := m.ID.String()
|
||||
org, exists := c.organizations[orgId]
|
||||
if exists {
|
||||
// Remove all API keys for this organization
|
||||
for _, key := range org.APIKeys {
|
||||
delete(c.apiKeys, apiKeyId(orgId, key.Name))
|
||||
}
|
||||
// Remove organization from all users
|
||||
for userId, userOrgs := range c.users {
|
||||
for i, userOrgId := range userOrgs {
|
||||
if userOrgId == orgId {
|
||||
c.users[userId] = append(userOrgs[:i], userOrgs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// If user has no more organizations, remove from map
|
||||
if len(c.users[userId]) == 0 {
|
||||
delete(c.users, userId)
|
||||
}
|
||||
}
|
||||
// Remove services for this organization
|
||||
if refs, exists := c.services[orgId]; exists {
|
||||
for ref := range refs {
|
||||
// Remove all subgraphs for this org/ref combination
|
||||
for service := range refs[ref] {
|
||||
delete(c.subGraphs, subGraphKey(orgId, ref, service))
|
||||
}
|
||||
// Remove lastUpdate for this org/ref
|
||||
delete(c.lastUpdate, refKey(orgId, ref))
|
||||
}
|
||||
delete(c.services, orgId)
|
||||
}
|
||||
// Remove organization
|
||||
delete(c.organizations, orgId)
|
||||
c.logger.With("org_id", orgId, "event", "OrganizationRemoved").Debug("cache updated")
|
||||
} else {
|
||||
c.logger.With("org_id", orgId, "event", "OrganizationRemoved").Warn("organization not found in cache")
|
||||
}
|
||||
case *domain.SubGraphUpdated:
|
||||
c.updateSubGraph(m.OrganizationId, m.Ref, m.ID.String(), m.Service, m.Time)
|
||||
c.logger.With("org_id", m.OrganizationId, "ref", m.Ref, "service", m.Service, "event", "SubGraphUpdated").Debug("cache updated")
|
||||
case *domain.Organization:
|
||||
c.organizations[m.ID.String()] = *m
|
||||
c.addUser(m.CreatedBy, *m)
|
||||
for _, k := range m.APIKeys {
|
||||
c.apiKeys[k.Key] = k
|
||||
// Use composite key: organizationId-name
|
||||
c.apiKeys[apiKeyId(k.OrganizationId, k.Name)] = k
|
||||
}
|
||||
c.logger.With("org_id", m.ID.String(), "event", "Organization aggregate loaded").Debug("cache updated")
|
||||
case *domain.SubGraph:
|
||||
c.updateSubGraph(m.OrganizationId, m.Ref, m.ID.String(), m.Service, m.ChangedAt)
|
||||
c.logger.With("org_id", m.OrganizationId, "ref", m.Ref, "service", m.Service, "event", "SubGraph aggregate loaded").Debug("cache updated")
|
||||
default:
|
||||
c.logger.Warnf("unexpected message received: %+v", msg)
|
||||
c.logger.With("msg", msg).Warn("unexpected message received")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -117,14 +229,23 @@ func (c *Cache) updateSubGraph(orgId string, ref string, subGraphId string, serv
|
||||
|
||||
func (c *Cache) addUser(sub string, organization domain.Organization) {
|
||||
user, exists := c.users[sub]
|
||||
orgId := organization.ID.String()
|
||||
if !exists {
|
||||
c.users[sub] = []string{organization.ID.String()}
|
||||
} else {
|
||||
c.users[sub] = append(user, organization.ID.String())
|
||||
c.users[sub] = []string{orgId}
|
||||
return
|
||||
}
|
||||
|
||||
// Check if organization already exists for this user
|
||||
for _, id := range user {
|
||||
if id == orgId {
|
||||
return // Already exists, no need to add
|
||||
}
|
||||
}
|
||||
|
||||
c.users[sub] = append(user, orgId)
|
||||
}
|
||||
|
||||
func New(logger log.Interface) *Cache {
|
||||
func New(logger *slog.Logger) *Cache {
|
||||
return &Cache{
|
||||
organizations: make(map[string]domain.Organization),
|
||||
users: make(map[string][]string),
|
||||
@@ -143,3 +264,7 @@ func refKey(orgId string, ref string) string {
|
||||
func subGraphKey(orgId string, ref string, service string) string {
|
||||
return fmt.Sprintf("%s<->%s<->%s", orgId, ref, service)
|
||||
}
|
||||
|
||||
func apiKeyId(orgId string, name string) string {
|
||||
return fmt.Sprintf("%s<->%s", orgId, name)
|
||||
}
|
||||
|
||||
Vendored
+645
@@ -0,0 +1,645 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/hash"
|
||||
)
|
||||
|
||||
func TestCache_OrganizationByAPIKey(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
apiKey := "test-api-key-123" // gitleaks:allow
|
||||
hashedKey, err := hash.APIKey(apiKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add organization to cache
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
|
||||
// Add API key to cache
|
||||
c.apiKeys[apiKeyId(orgID, "test-key")] = domain.APIKey{
|
||||
Name: "test-key",
|
||||
OrganizationId: orgID,
|
||||
Key: hashedKey,
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: true,
|
||||
}
|
||||
|
||||
// Test finding organization by plaintext API key
|
||||
foundOrg := c.OrganizationByAPIKey(apiKey)
|
||||
require.NotNil(t, foundOrg)
|
||||
assert.Equal(t, org.Name, foundOrg.Name)
|
||||
assert.Equal(t, orgID, foundOrg.ID.String())
|
||||
|
||||
// Test with wrong API key
|
||||
notFoundOrg := c.OrganizationByAPIKey("wrong-key")
|
||||
assert.Nil(t, notFoundOrg)
|
||||
}
|
||||
|
||||
func TestCache_OrganizationByAPIKey_Legacy(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
apiKey := "legacy-api-key-456" // gitleaks:allow
|
||||
legacyHash := hash.String(apiKey)
|
||||
|
||||
// Add organization to cache
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Legacy Org",
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
|
||||
// Add API key with legacy SHA256 hash
|
||||
c.apiKeys[apiKeyId(orgID, "legacy-key")] = domain.APIKey{
|
||||
Name: "legacy-key",
|
||||
OrganizationId: orgID,
|
||||
Key: legacyHash,
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
}
|
||||
|
||||
// Test finding organization with legacy hash
|
||||
foundOrg := c.OrganizationByAPIKey(apiKey)
|
||||
require.NotNil(t, foundOrg)
|
||||
assert.Equal(t, org.Name, foundOrg.Name)
|
||||
}
|
||||
|
||||
func TestCache_OrganizationsByUser(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
userSub := "user-123"
|
||||
org1ID := uuid.New().String()
|
||||
org2ID := uuid.New().String()
|
||||
|
||||
org1 := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(org1ID),
|
||||
Name: "Org 1",
|
||||
}
|
||||
org2 := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(org2ID),
|
||||
Name: "Org 2",
|
||||
}
|
||||
|
||||
c.organizations[org1ID] = org1
|
||||
c.organizations[org2ID] = org2
|
||||
c.users[userSub] = []string{org1ID, org2ID}
|
||||
|
||||
orgs := c.OrganizationsByUser(userSub)
|
||||
assert.Len(t, orgs, 2)
|
||||
assert.Contains(t, []string{orgs[0].Name, orgs[1].Name}, "Org 1")
|
||||
assert.Contains(t, []string{orgs[0].Name, orgs[1].Name}, "Org 2")
|
||||
}
|
||||
|
||||
func TestCache_ApiKeyByKey(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
apiKey := "test-api-key-789" // gitleaks:allow
|
||||
hashedKey, err := hash.APIKey(apiKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedKey := domain.APIKey{
|
||||
Name: "test-key",
|
||||
OrganizationId: orgID,
|
||||
Key: hashedKey,
|
||||
Refs: []string{"main", "dev"},
|
||||
Read: true,
|
||||
Publish: true,
|
||||
}
|
||||
|
||||
c.apiKeys[apiKeyId(orgID, "test-key")] = expectedKey
|
||||
|
||||
foundKey := c.ApiKeyByKey(apiKey)
|
||||
require.NotNil(t, foundKey)
|
||||
assert.Equal(t, expectedKey.Name, foundKey.Name)
|
||||
assert.Equal(t, expectedKey.OrganizationId, foundKey.OrganizationId)
|
||||
assert.Equal(t, expectedKey.Refs, foundKey.Refs)
|
||||
|
||||
// Test with wrong key
|
||||
notFoundKey := c.ApiKeyByKey("wrong-key")
|
||||
assert.Nil(t, notFoundKey)
|
||||
}
|
||||
|
||||
func TestCache_Services(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
ref := "main"
|
||||
service1 := "service-1"
|
||||
service2 := "service-2"
|
||||
lastUpdate := "2024-01-01T12:00:00Z"
|
||||
|
||||
c.services[orgID] = map[string]map[string]struct{}{
|
||||
ref: {
|
||||
service1: {},
|
||||
service2: {},
|
||||
},
|
||||
}
|
||||
c.lastUpdate[refKey(orgID, ref)] = lastUpdate
|
||||
|
||||
// Test getting services with empty lastUpdate
|
||||
services, returnedLastUpdate := c.Services(orgID, ref, "")
|
||||
assert.Len(t, services, 2)
|
||||
assert.Contains(t, services, service1)
|
||||
assert.Contains(t, services, service2)
|
||||
assert.Equal(t, lastUpdate, returnedLastUpdate)
|
||||
|
||||
// Test with older lastUpdate (should return services)
|
||||
services, returnedLastUpdate = c.Services(orgID, ref, "2023-12-31T12:00:00Z")
|
||||
assert.Len(t, services, 2)
|
||||
assert.Equal(t, lastUpdate, returnedLastUpdate)
|
||||
|
||||
// Test with newer lastUpdate (should return empty)
|
||||
services, returnedLastUpdate = c.Services(orgID, ref, "2024-01-02T12:00:00Z")
|
||||
assert.Len(t, services, 0)
|
||||
assert.Equal(t, lastUpdate, returnedLastUpdate)
|
||||
}
|
||||
|
||||
func TestCache_SubGraphId(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
ref := "main"
|
||||
service := "test-service"
|
||||
subGraphID := uuid.New().String()
|
||||
|
||||
c.subGraphs[subGraphKey(orgID, ref, service)] = subGraphID
|
||||
|
||||
foundID := c.SubGraphId(orgID, ref, service)
|
||||
assert.Equal(t, subGraphID, foundID)
|
||||
|
||||
// Test with non-existent key
|
||||
notFoundID := c.SubGraphId("wrong-org", ref, service)
|
||||
assert.Empty(t, notFoundID)
|
||||
}
|
||||
|
||||
func TestCache_Update_OrganizationAdded(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
event := &domain.OrganizationAdded{
|
||||
Name: "New Org",
|
||||
Initiator: "user-123",
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(orgID)
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify organization was added
|
||||
org, exists := c.organizations[orgID]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, "New Org", org.Name)
|
||||
|
||||
// Verify user was added
|
||||
assert.Contains(t, c.users["user-123"], orgID)
|
||||
}
|
||||
|
||||
func TestCache_Update_APIKeyAdded(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
keyName := "test-key"
|
||||
hashedKey := "hashed-key-value"
|
||||
|
||||
// Add organization first
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
APIKeys: []domain.APIKey{},
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
|
||||
event := &domain.APIKeyAdded{
|
||||
OrganizationId: orgID,
|
||||
Name: keyName,
|
||||
Key: hashedKey,
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
Initiator: "user-123",
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(uuid.New().String())
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify API key was added to cache
|
||||
key, exists := c.apiKeys[apiKeyId(orgID, keyName)]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, keyName, key.Name)
|
||||
assert.Equal(t, hashedKey, key.Key)
|
||||
assert.Equal(t, []string{"main"}, key.Refs)
|
||||
|
||||
// Verify API key was added to organization
|
||||
updatedOrg := c.organizations[orgID]
|
||||
assert.Len(t, updatedOrg.APIKeys, 1)
|
||||
assert.Equal(t, keyName, updatedOrg.APIKeys[0].Name)
|
||||
}
|
||||
|
||||
func TestCache_Update_SubGraphUpdated(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
ref := "main"
|
||||
service := "test-service"
|
||||
subGraphID := uuid.New().String()
|
||||
|
||||
event := &domain.SubGraphUpdated{
|
||||
OrganizationId: orgID,
|
||||
Ref: ref,
|
||||
Service: service,
|
||||
Initiator: "user-123",
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(subGraphID)
|
||||
event.SetWhen(time.Now())
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify subgraph was added to services
|
||||
assert.Contains(t, c.services[orgID][ref], subGraphID)
|
||||
|
||||
// Verify subgraph ID was stored
|
||||
assert.Equal(t, subGraphID, c.subGraphs[subGraphKey(orgID, ref, service)])
|
||||
|
||||
// Verify lastUpdate was set
|
||||
assert.NotEmpty(t, c.lastUpdate[refKey(orgID, ref)])
|
||||
}
|
||||
|
||||
func TestCache_AddUser_NoDuplicates(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
userSub := "user-123"
|
||||
orgID := uuid.New().String()
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
}
|
||||
|
||||
// Add user first time
|
||||
c.addUser(userSub, org)
|
||||
assert.Len(t, c.users[userSub], 1)
|
||||
assert.Equal(t, orgID, c.users[userSub][0])
|
||||
|
||||
// Add same user/org again - should not create duplicate
|
||||
c.addUser(userSub, org)
|
||||
assert.Len(t, c.users[userSub], 1, "Should not add duplicate organization")
|
||||
assert.Equal(t, orgID, c.users[userSub][0])
|
||||
}
|
||||
|
||||
func TestCache_ConcurrentReads(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
// Setup test data - use legacy hash to avoid slow bcrypt
|
||||
orgID := uuid.New().String()
|
||||
userSub := "test-user"
|
||||
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Concurrent Test Org",
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
c.users[userSub] = []string{orgID}
|
||||
|
||||
// Run concurrent reads using fast OrganizationsByUser
|
||||
var wg sync.WaitGroup
|
||||
numGoroutines := 20
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
orgs := c.OrganizationsByUser(userSub)
|
||||
assert.NotEmpty(t, orgs)
|
||||
assert.Equal(t, "Concurrent Test Org", orgs[0].Name)
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestCache_ConcurrentWrites(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numGoroutines := 10 // Reduced for race detector
|
||||
|
||||
// Concurrent organization additions
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
orgID := uuid.New().String()
|
||||
event := &domain.OrganizationAdded{
|
||||
Name: "Org " + string(rune(index)),
|
||||
Initiator: "user-" + string(rune(index)),
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(orgID)
|
||||
_, err := c.Update(event, nil)
|
||||
assert.NoError(t, err)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify all organizations were added
|
||||
assert.Equal(t, numGoroutines, len(c.organizations))
|
||||
}
|
||||
|
||||
func TestCache_ConcurrentReadsAndWrites(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
// Setup initial data - use legacy hash to avoid slow bcrypt in concurrent test
|
||||
orgID := uuid.New().String()
|
||||
legacyKey := "test-rw-key" // gitleaks:allow
|
||||
legacyHash := hash.String(legacyKey)
|
||||
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "RW Test Org",
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
c.apiKeys[apiKeyId(orgID, "test-key")] = domain.APIKey{
|
||||
Name: "test-key",
|
||||
OrganizationId: orgID,
|
||||
Key: legacyHash,
|
||||
}
|
||||
c.users["user-initial"] = []string{orgID}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
numReaders := 5
|
||||
numWriters := 3
|
||||
|
||||
// Concurrent readers - use OrganizationsByUser which is fast
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
orgs := c.OrganizationsByUser("user-initial")
|
||||
assert.NotEmpty(t, orgs)
|
||||
}()
|
||||
}
|
||||
|
||||
// Concurrent writers
|
||||
for i := 0; i < numWriters; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
newOrgID := uuid.New().String()
|
||||
event := &domain.OrganizationAdded{
|
||||
Name: "New Org " + string(rune(index)),
|
||||
Initiator: "user-new-" + string(rune(index)),
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(newOrgID)
|
||||
_, err := c.Update(event, nil)
|
||||
assert.NoError(t, err)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify cache is in consistent state
|
||||
assert.GreaterOrEqual(t, len(c.organizations), numWriters)
|
||||
}
|
||||
|
||||
func TestCache_Update_APIKeyRemoved(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
keyName := "test-key"
|
||||
hashedKey := "hashed-key-value"
|
||||
|
||||
// Add organization with API key
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
APIKeys: []domain.APIKey{
|
||||
{
|
||||
Name: keyName,
|
||||
OrganizationId: orgID,
|
||||
Key: hashedKey,
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
c.apiKeys[apiKeyId(orgID, keyName)] = org.APIKeys[0]
|
||||
|
||||
// Verify key exists before removal
|
||||
_, exists := c.apiKeys[apiKeyId(orgID, keyName)]
|
||||
assert.True(t, exists)
|
||||
|
||||
// Remove the API key
|
||||
event := &domain.APIKeyRemoved{
|
||||
KeyName: keyName,
|
||||
Initiator: "user-123",
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(orgID)
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify API key was removed from cache
|
||||
_, exists = c.apiKeys[apiKeyId(orgID, keyName)]
|
||||
assert.False(t, exists, "API key should be removed from cache")
|
||||
|
||||
// Verify API key was removed from organization
|
||||
updatedOrg := c.organizations[orgID]
|
||||
assert.Len(t, updatedOrg.APIKeys, 0, "API key should be removed from organization")
|
||||
}
|
||||
|
||||
func TestCache_Update_APIKeyRemoved_MultipleKeys(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
|
||||
// Add organization with multiple API keys
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
APIKeys: []domain.APIKey{
|
||||
{
|
||||
Name: "key1",
|
||||
OrganizationId: orgID,
|
||||
Key: "hash1",
|
||||
},
|
||||
{
|
||||
Name: "key2",
|
||||
OrganizationId: orgID,
|
||||
Key: "hash2",
|
||||
},
|
||||
{
|
||||
Name: "key3",
|
||||
OrganizationId: orgID,
|
||||
Key: "hash3",
|
||||
},
|
||||
},
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
c.apiKeys[apiKeyId(orgID, "key1")] = org.APIKeys[0]
|
||||
c.apiKeys[apiKeyId(orgID, "key2")] = org.APIKeys[1]
|
||||
c.apiKeys[apiKeyId(orgID, "key3")] = org.APIKeys[2]
|
||||
|
||||
// Remove the middle key
|
||||
event := &domain.APIKeyRemoved{
|
||||
KeyName: "key2",
|
||||
Initiator: "user-123",
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(orgID)
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify only key2 was removed
|
||||
_, exists := c.apiKeys[apiKeyId(orgID, "key1")]
|
||||
assert.True(t, exists, "key1 should still exist")
|
||||
|
||||
_, exists = c.apiKeys[apiKeyId(orgID, "key2")]
|
||||
assert.False(t, exists, "key2 should be removed")
|
||||
|
||||
_, exists = c.apiKeys[apiKeyId(orgID, "key3")]
|
||||
assert.True(t, exists, "key3 should still exist")
|
||||
|
||||
// Verify organization has 2 keys remaining
|
||||
updatedOrg := c.organizations[orgID]
|
||||
assert.Len(t, updatedOrg.APIKeys, 2)
|
||||
assert.Equal(t, "key1", updatedOrg.APIKeys[0].Name)
|
||||
assert.Equal(t, "key3", updatedOrg.APIKeys[1].Name)
|
||||
}
|
||||
|
||||
func TestCache_Update_OrganizationRemoved(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
userSub := "user-123"
|
||||
|
||||
// Add organization with API keys, users, and subgraphs
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
APIKeys: []domain.APIKey{
|
||||
{
|
||||
Name: "key1",
|
||||
OrganizationId: orgID,
|
||||
Key: "hash1",
|
||||
},
|
||||
},
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
c.apiKeys[apiKeyId(orgID, "key1")] = org.APIKeys[0]
|
||||
c.users[userSub] = []string{orgID}
|
||||
c.services[orgID] = map[string]map[string]struct{}{
|
||||
"main": {
|
||||
"service1": {},
|
||||
},
|
||||
}
|
||||
c.subGraphs[subGraphKey(orgID, "main", "service1")] = "subgraph-id"
|
||||
c.lastUpdate[refKey(orgID, "main")] = "2024-01-01T12:00:00Z"
|
||||
|
||||
// Remove the organization
|
||||
event := &domain.OrganizationRemoved{
|
||||
Initiator: userSub,
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(orgID)
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify organization was removed
|
||||
_, exists := c.organizations[orgID]
|
||||
assert.False(t, exists, "Organization should be removed from cache")
|
||||
|
||||
// Verify API keys were removed
|
||||
_, exists = c.apiKeys[apiKeyId(orgID, "key1")]
|
||||
assert.False(t, exists, "API keys should be removed from cache")
|
||||
|
||||
// Verify user association was removed
|
||||
userOrgs := c.users[userSub]
|
||||
assert.NotContains(t, userOrgs, orgID, "User should not be associated with removed organization")
|
||||
|
||||
// Verify services were removed
|
||||
_, exists = c.services[orgID]
|
||||
assert.False(t, exists, "Services should be removed from cache")
|
||||
|
||||
// Verify subgraphs were removed
|
||||
_, exists = c.subGraphs[subGraphKey(orgID, "main", "service1")]
|
||||
assert.False(t, exists, "Subgraphs should be removed from cache")
|
||||
|
||||
// Verify lastUpdate was removed
|
||||
_, exists = c.lastUpdate[refKey(orgID, "main")]
|
||||
assert.False(t, exists, "LastUpdate should be removed from cache")
|
||||
}
|
||||
|
||||
func TestCache_Update_OrganizationRemoved_MultipleUsers(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
c := New(logger)
|
||||
|
||||
orgID := uuid.New().String()
|
||||
user1 := "user-1"
|
||||
user2 := "user-2"
|
||||
otherOrgID := uuid.New().String()
|
||||
|
||||
// Add organization
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgID),
|
||||
Name: "Test Org",
|
||||
}
|
||||
c.organizations[orgID] = org
|
||||
|
||||
// Add users with multiple org associations
|
||||
c.users[user1] = []string{orgID, otherOrgID}
|
||||
c.users[user2] = []string{orgID}
|
||||
|
||||
// Remove the organization
|
||||
event := &domain.OrganizationRemoved{
|
||||
Initiator: user1,
|
||||
}
|
||||
event.ID = *eventsourced.IdFromString(orgID)
|
||||
|
||||
_, err := c.Update(event, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify user1 still has otherOrgID but not removed orgID
|
||||
assert.Len(t, c.users[user1], 1)
|
||||
assert.Equal(t, otherOrgID, c.users[user1][0])
|
||||
|
||||
// Verify user2 has no organizations
|
||||
assert.Len(t, c.users[user2], 0)
|
||||
}
|
||||
+80
@@ -0,0 +1,80 @@
|
||||
# git-cliff ~ default configuration file
|
||||
# https://git-cliff.org/docs/configuration
|
||||
#
|
||||
# Lines starting with "#" are comments.
|
||||
# Configuration options are organized into tables and keys.
|
||||
# See documentation for more information on available options.
|
||||
|
||||
[changelog]
|
||||
# template for the changelog header
|
||||
header = """
|
||||
# Changelog\n
|
||||
All notable changes to this project will be documented in this file.\n
|
||||
"""
|
||||
# template for the changelog body
|
||||
# https://keats.github.io/tera/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
{% for group, commits in commits | group_by(attribute="group") %}
|
||||
### {{ group | striptags | trim | upper_first }}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
|
||||
{% if commit.breaking %}[**breaking**] {% endif %}\
|
||||
{{ commit.message | upper_first }}\
|
||||
{% endfor %}
|
||||
{% endfor %}\n
|
||||
"""
|
||||
# template for the changelog footer
|
||||
footer = """
|
||||
<!-- generated by git-cliff -->
|
||||
"""
|
||||
# remove the leading and trailing s
|
||||
trim = true
|
||||
# postprocessors
|
||||
postprocessors = [
|
||||
# { pattern = '<REPO>', replace = "https://github.com/orhun/git-cliff" }, # replace repository URL
|
||||
]
|
||||
# render body even when there are no releases to process
|
||||
# render_always = true
|
||||
# output file path
|
||||
# output = "test.md"
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
conventional_commits = true
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
# Replace issue numbers
|
||||
#{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](<REPO>/issues/${2}))"},
|
||||
# Check spelling of the commit with https://github.com/crate-ci/typos
|
||||
# If the spelling is incorrect, it will be automatically fixed.
|
||||
#{ pattern = '.*', replace_command = 'typos --write-changes -' },
|
||||
]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "<!-- 0 -->🚀 Features" },
|
||||
{ message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
|
||||
{ message = "^doc", group = "<!-- 3 -->📚 Documentation" },
|
||||
{ message = "^perf", group = "<!-- 4 -->⚡ Performance" },
|
||||
{ message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
|
||||
{ message = "^style", group = "<!-- 5 -->🎨 Styling" },
|
||||
{ message = "^test", group = "<!-- 6 -->🧪 Testing" },
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true },
|
||||
{ message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
|
||||
{ body = ".*security", group = "<!-- 8 -->🛡️ Security" },
|
||||
{ message = "^revert", group = "<!-- 9 -->◀️ Revert" },
|
||||
]
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
filter_commits = false
|
||||
# sort the tags topologically
|
||||
topo_order = false
|
||||
# sort the commits inside sections by oldest/newest order
|
||||
sort_commits = "oldest"
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/apex/log"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/ctl"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/ctl"
|
||||
)
|
||||
|
||||
type Context struct {
|
||||
|
||||
+101
-99
@@ -2,7 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -12,40 +14,39 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql/handler"
|
||||
"github.com/99designs/gqlgen/graphql/handler/extension"
|
||||
"github.com/99designs/gqlgen/graphql/handler/lru"
|
||||
"github.com/99designs/gqlgen/graphql/handler/transport"
|
||||
"github.com/99designs/gqlgen/graphql/playground"
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/apex/log"
|
||||
"github.com/apex/log/handlers/json"
|
||||
"github.com/getsentry/sentry-go"
|
||||
sentryhttp "github.com/getsentry/sentry-go/http"
|
||||
"github.com/rs/cors"
|
||||
"github.com/sparetimecoders/goamqp"
|
||||
"github.com/vektah/gqlparser/v2/ast"
|
||||
"gitlab.com/unboundsoftware/eventsourced/amqp"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
"gitlab.com/unboundsoftware/eventsourced/pg"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/cache"
|
||||
"gitlab.com/unboundsoftware/schemas/domain"
|
||||
"gitlab.com/unboundsoftware/schemas/graph"
|
||||
"gitlab.com/unboundsoftware/schemas/graph/generated"
|
||||
"gitlab.com/unboundsoftware/schemas/middleware"
|
||||
"gitlab.com/unboundsoftware/schemas/store"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/cache"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/generated"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/health"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/logging"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/middleware"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/monitoring"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/store"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
AmqpURL string `name:"amqp-url" env:"AMQP_URL" help:"URL to use to connect to RabbitMQ" default:"amqp://user:password@localhost:5672/"`
|
||||
AmqpURL string `name:"amqp-url" env:"AMQP_URL" help:"URL to use to connect to RabbitMQ" default:"amqp://user:password@unbound-control-plane.orb.local:5672/"`
|
||||
Port int `name:"port" env:"PORT" help:"Listen-port for GraphQL API" default:"8080"`
|
||||
LogLevel string `name:"log-level" env:"LOG_LEVEL" help:"The level of logging to use (debug, info, warn, error, fatal)" default:"info"`
|
||||
DatabaseURL string `name:"postgres-url" env:"POSTGRES_URL" help:"URL to use to connect to Postgres" default:"postgres://postgres:postgres@:5432/schemas?sslmode=disable"`
|
||||
LogFormat string `name:"log-format" env:"LOG_FORMAT" help:"The format of logs" default:"text" enum:"otel,json,text"`
|
||||
DatabaseURL string `name:"postgres-url" env:"POSTGRES_URL" help:"URL to use to connect to Postgres" default:"postgres://postgres:postgres@unbound-control-plane.orb.local:5432/schemas?sslmode=disable"`
|
||||
DatabaseDriverName string `name:"db-driver" env:"DB_DRIVER" help:"Driver to use to connect to db" default:"postgres"`
|
||||
Issuer string `name:"issuer" env:"ISSUER" help:"The JWT token issuer to use" default:"unbound.eu.auth0.com"`
|
||||
StrictSSL bool `name:"strict-ssl" env:"STRICT_SSL" help:"Should strict SSL handling be enabled" default:"true"`
|
||||
SentryConfig
|
||||
}
|
||||
|
||||
type SentryConfig struct {
|
||||
DSN string `name:"sentry-dsn" env:"SENTRY_DSN" help:"Sentry dsn" default:""`
|
||||
Environment string `name:"sentry-environment" env:"SENTRY_ENVIRONMENT" help:"Sentry environment" default:"development"`
|
||||
Environment string `name:"environment" env:"ENVIRONMENT" help:"The environment we are running in" default:"development" enum:"development,staging,production"`
|
||||
}
|
||||
|
||||
var buildVersion = "none"
|
||||
@@ -55,9 +56,7 @@ const serviceName = "schemas"
|
||||
func main() {
|
||||
var cli CLI
|
||||
_ = kong.Parse(&cli)
|
||||
log.SetHandler(json.New(os.Stdout))
|
||||
log.SetLevelFromString(cli.LogLevel)
|
||||
logger := log.WithField("service", serviceName)
|
||||
logger := logging.SetupLogger(cli.LogLevel, cli.LogFormat, serviceName, buildVersion)
|
||||
closeEvents := make(chan error)
|
||||
|
||||
if err := start(
|
||||
@@ -66,19 +65,22 @@ func main() {
|
||||
ConnectAMQP,
|
||||
cli,
|
||||
); err != nil {
|
||||
logger.WithError(err).Error("process error")
|
||||
logger.With("error", err).Error("process error")
|
||||
}
|
||||
}
|
||||
|
||||
func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url string) (Connection, error), cli CLI) error {
|
||||
if err := setupSentry(logger, cli.SentryConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sentry.Flush(2 * time.Second)
|
||||
|
||||
func start(closeEvents chan error, logger *slog.Logger, connectToAmqpFunc func(url string) (Connection, error), cli CLI) error {
|
||||
rootCtx, rootCancel := context.WithCancel(context.Background())
|
||||
defer rootCancel()
|
||||
|
||||
shutdownFn, err := monitoring.SetupOTelSDK(rootCtx, cli.LogFormat == "otel", serviceName, buildVersion, cli.Environment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = errors.Join(shutdownFn(context.Background()))
|
||||
}()
|
||||
|
||||
db, err := store.SetupDB(cli.DatabaseDriverName, cli.DatabaseURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to setup DB: %v", err)
|
||||
@@ -90,7 +92,10 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
pg.WithEventTypes(
|
||||
&domain.SubGraphUpdated{},
|
||||
&domain.OrganizationAdded{},
|
||||
&domain.UserAddedToOrganization{},
|
||||
&domain.APIKeyAdded{},
|
||||
&domain.APIKeyRemoved{},
|
||||
&domain.OrganizationRemoved{},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -101,23 +106,7 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
return fmt.Errorf("event migrations: %w", err)
|
||||
}
|
||||
|
||||
publisher, err := goamqp.NewPublisher(
|
||||
goamqp.Route{
|
||||
Type: domain.SubGraphUpdated{},
|
||||
Key: "SubGraph.Updated",
|
||||
},
|
||||
goamqp.Route{
|
||||
Type: domain.OrganizationAdded{},
|
||||
Key: "Organization.Added",
|
||||
},
|
||||
goamqp.Route{
|
||||
Type: domain.APIKeyAdded{},
|
||||
Key: "Organization.APIKeyAdded",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create publisher: %v", err)
|
||||
}
|
||||
publisher := goamqp.NewPublisher()
|
||||
eventPublisher, err := amqp.New(publisher)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create event publisher: %v", err)
|
||||
@@ -135,13 +124,22 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
return fmt.Errorf("caching subgraphs: %w", err)
|
||||
}
|
||||
setups := []goamqp.Setup{
|
||||
goamqp.UseLogger(logger.Errorf),
|
||||
goamqp.UseLogger(func(s string) { logger.Error(s) }),
|
||||
goamqp.CloseListener(closeEvents),
|
||||
goamqp.WithPrefetchLimit(20),
|
||||
goamqp.EventStreamPublisher(publisher),
|
||||
goamqp.TransientEventStreamConsumer("SubGraph.Updated", serviceCache.Update, domain.SubGraphUpdated{}),
|
||||
goamqp.TransientEventStreamConsumer("Organization.Added", serviceCache.Update, domain.OrganizationAdded{}),
|
||||
goamqp.TransientEventStreamConsumer("Organization.UserAdded", serviceCache.Update, domain.UserAddedToOrganization{}),
|
||||
goamqp.TransientEventStreamConsumer("Organization.APIKeyAdded", serviceCache.Update, domain.APIKeyAdded{}),
|
||||
goamqp.TransientEventStreamConsumer("Organization.APIKeyRemoved", serviceCache.Update, domain.APIKeyRemoved{}),
|
||||
goamqp.TransientEventStreamConsumer("Organization.Removed", serviceCache.Update, domain.OrganizationRemoved{}),
|
||||
goamqp.WithTypeMapping("SubGraph.Updated", domain.SubGraphUpdated{}),
|
||||
goamqp.WithTypeMapping("Organization.Added", domain.OrganizationAdded{}),
|
||||
goamqp.WithTypeMapping("Organization.UserAdded", domain.UserAddedToOrganization{}),
|
||||
goamqp.WithTypeMapping("Organization.APIKeyAdded", domain.APIKeyAdded{}),
|
||||
goamqp.WithTypeMapping("Organization.APIKeyRemoved", domain.APIKeyRemoved{}),
|
||||
goamqp.WithTypeMapping("Organization.Removed", domain.OrganizationRemoved{}),
|
||||
}
|
||||
if err := conn.Start(rootCtx, setups...); err != nil {
|
||||
return fmt.Errorf("failed to setup AMQP: %v", err)
|
||||
@@ -178,7 +176,7 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
defer wg.Done()
|
||||
err := <-closeEvents
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("received close from AMQP")
|
||||
logger.With("error", err).Error("received close from AMQP")
|
||||
rootCancel()
|
||||
}
|
||||
}()
|
||||
@@ -188,8 +186,11 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
defer wg.Done()
|
||||
<-rootCtx.Done()
|
||||
|
||||
if err := httpSrv.Close(); err != nil {
|
||||
logger.WithError(err).Error("close http server")
|
||||
shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer shutdownRelease()
|
||||
|
||||
if err := httpSrv.Shutdown(shutdownCtx); err != nil {
|
||||
logger.With("error", err).Error("close http server")
|
||||
}
|
||||
close(sigint)
|
||||
close(closeEvents)
|
||||
@@ -200,10 +201,13 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
defer rootCancel()
|
||||
|
||||
resolver := &graph.Resolver{
|
||||
EventStore: eventStore,
|
||||
Publisher: eventPublisher,
|
||||
Logger: logger,
|
||||
Cache: serviceCache,
|
||||
EventStore: eventStore,
|
||||
Publisher: eventPublisher,
|
||||
Logger: logger,
|
||||
Cache: serviceCache,
|
||||
PubSub: graph.NewPubSub(),
|
||||
CosmoGenerator: graph.NewCosmoGenerator(&graph.DefaultCommandExecutor{}, 60*time.Second),
|
||||
Debouncer: graph.NewDebouncer(500 * time.Millisecond),
|
||||
}
|
||||
|
||||
config := generated.Config{
|
||||
@@ -214,15 +218,49 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
mw := middleware.NewAuth0("https://schemas.unbound.se", cli.Issuer, cli.StrictSSL)
|
||||
authMiddleware := middleware.NewAuth(serviceCache)
|
||||
config.Directives.Auth = authMiddleware.Directive
|
||||
srv := handler.NewDefaultServer(generated.NewExecutableSchema(
|
||||
config,
|
||||
))
|
||||
srv := handler.New(generated.NewExecutableSchema(config))
|
||||
|
||||
sentryHandler := sentryhttp.New(sentryhttp.Options{Repanic: true})
|
||||
mux.Handle("/", sentryHandler.HandleFunc(playground.Handler("GraphQL playground", "/query")))
|
||||
mux.Handle("/health", http.HandlerFunc(healthFunc))
|
||||
srv.AddTransport(transport.Websocket{
|
||||
KeepAlivePingInterval: 10 * time.Second,
|
||||
InitFunc: func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
logger.Info("WebSocket connection with API key", "has_key", true)
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := serviceCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
logger.Info("WebSocket: Organization found for API key", "org_id", organization.ID.String())
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
} else {
|
||||
logger.Warn("WebSocket: No organization found for API key")
|
||||
}
|
||||
} else {
|
||||
logger.Info("WebSocket connection without API key")
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
},
|
||||
})
|
||||
srv.AddTransport(transport.Options{})
|
||||
srv.AddTransport(transport.GET{})
|
||||
srv.AddTransport(transport.POST{})
|
||||
srv.AddTransport(transport.MultipartForm{})
|
||||
|
||||
srv.SetQueryCache(lru.New[*ast.QueryDocument](1000))
|
||||
|
||||
srv.Use(extension.Introspection{})
|
||||
srv.Use(extension.AutomaticPersistedQuery{
|
||||
Cache: lru.New[string](100),
|
||||
})
|
||||
|
||||
healthChecker := health.New(db.DB, logger)
|
||||
|
||||
mux.Handle("/", monitoring.Handler(playground.Handler("GraphQL playground", "/query")))
|
||||
mux.Handle("/health", http.HandlerFunc(healthChecker.LivenessHandler))
|
||||
mux.Handle("/health/live", http.HandlerFunc(healthChecker.LivenessHandler))
|
||||
mux.Handle("/health/ready", http.HandlerFunc(healthChecker.ReadinessHandler))
|
||||
mux.Handle("/query", cors.AllowAll().Handler(
|
||||
sentryHandler.Handle(
|
||||
monitoring.Handler(
|
||||
mw.Middleware().CheckJWT(
|
||||
apiKeyMiddleware.Handler(
|
||||
authMiddleware.Handler(srv),
|
||||
@@ -231,10 +269,10 @@ func start(closeEvents chan error, logger *log.Entry, connectToAmqpFunc func(url
|
||||
),
|
||||
))
|
||||
|
||||
logger.Infof("connect to http://localhost:%d/ for GraphQL playground", cli.Port)
|
||||
logger.Info(fmt.Sprintf("connect to http://localhost:%d/ for GraphQL playground", cli.Port))
|
||||
|
||||
if err := httpSrv.ListenAndServe(); err != nil {
|
||||
logger.WithError(err).Error("listen http")
|
||||
if err := httpSrv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
|
||||
logger.With("error", err).Error("listen http")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -279,42 +317,6 @@ func loadSubGraphs(ctx context.Context, eventStore eventsourced.EventStore, serv
|
||||
return nil
|
||||
}
|
||||
|
||||
func healthFunc(w http.ResponseWriter, _ *http.Request) {
|
||||
_, _ = w.Write([]byte("OK"))
|
||||
}
|
||||
|
||||
func setupSentry(logger log.Interface, args SentryConfig) error {
|
||||
if args.Environment == "" {
|
||||
return fmt.Errorf("no Sentry environment supplied, exiting")
|
||||
}
|
||||
cfg := sentry.ClientOptions{
|
||||
Dsn: args.DSN,
|
||||
Environment: args.Environment,
|
||||
Release: fmt.Sprintf("%s-%s", serviceName, buildVersion),
|
||||
}
|
||||
switch args.Environment {
|
||||
case "development":
|
||||
cfg.Debug = true
|
||||
cfg.EnableTracing = false
|
||||
cfg.TracesSampleRate = 0.0
|
||||
case "production":
|
||||
if args.DSN == "" {
|
||||
return fmt.Errorf("no DSN supplied for non-dev environment, exiting")
|
||||
}
|
||||
cfg.Debug = false
|
||||
cfg.EnableTracing = true
|
||||
cfg.TracesSampleRate = 0.01
|
||||
default:
|
||||
return fmt.Errorf("illegal environment %s", args.Environment)
|
||||
}
|
||||
|
||||
if err := sentry.Init(cfg); err != nil {
|
||||
return fmt.Errorf("sentry setup: %w", err)
|
||||
}
|
||||
logger.Infof("configured Sentry for env: %s", args.Environment)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConnectAMQP(url string) (Connection, error) {
|
||||
return goamqp.NewFromURL(serviceName, url)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,362 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql/handler/transport"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/hash"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/middleware"
|
||||
)
|
||||
|
||||
// MockCache is a mock implementation for testing
|
||||
type MockCache struct {
|
||||
organizations map[string]*domain.Organization // keyed by orgId-name composite
|
||||
apiKeys map[string]string // maps orgId-name to hashed key
|
||||
}
|
||||
|
||||
func (m *MockCache) OrganizationByAPIKey(plainKey string) *domain.Organization {
|
||||
// Find organization by comparing plaintext key with stored hash
|
||||
for compositeKey, hashedKey := range m.apiKeys {
|
||||
if hash.CompareAPIKey(hashedKey, plainKey) {
|
||||
return m.organizations[compositeKey]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestWebSocketInitFunc_WithValidAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
orgID := uuid.New()
|
||||
org := &domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(orgID.String()),
|
||||
},
|
||||
Name: "Test Organization",
|
||||
}
|
||||
|
||||
apiKey := "test-api-key-123"
|
||||
hashedKey, err := hash.APIKey(apiKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
compositeKey := orgID.String() + "-test-key"
|
||||
|
||||
mockCache := &MockCache{
|
||||
organizations: map[string]*domain.Organization{
|
||||
compositeKey: org,
|
||||
},
|
||||
apiKeys: map[string]string{
|
||||
compositeKey: hashedKey,
|
||||
},
|
||||
}
|
||||
|
||||
// Create InitFunc (simulating the WebSocket InitFunc logic)
|
||||
initFunc := func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := mockCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
}
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
}
|
||||
|
||||
// Test
|
||||
ctx := context.Background()
|
||||
initPayload := transport.InitPayload{
|
||||
"X-Api-Key": apiKey,
|
||||
}
|
||||
|
||||
resultCtx, resultPayload, err := initFunc(ctx, initPayload)
|
||||
|
||||
// Assert
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resultPayload)
|
||||
|
||||
// Check API key is in context
|
||||
if value := resultCtx.Value(middleware.ApiKey); value != nil {
|
||||
assert.Equal(t, apiKey, value.(string))
|
||||
} else {
|
||||
t.Fatal("API key not found in context")
|
||||
}
|
||||
|
||||
// Check organization is in context
|
||||
if value := resultCtx.Value(middleware.OrganizationKey); value != nil {
|
||||
capturedOrg, ok := value.(domain.Organization)
|
||||
require.True(t, ok, "Organization should be of correct type")
|
||||
assert.Equal(t, org.Name, capturedOrg.Name)
|
||||
assert.Equal(t, org.ID.String(), capturedOrg.ID.String())
|
||||
} else {
|
||||
t.Fatal("Organization not found in context")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWebSocketInitFunc_WithInvalidAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := &MockCache{
|
||||
organizations: map[string]*domain.Organization{},
|
||||
apiKeys: map[string]string{},
|
||||
}
|
||||
|
||||
apiKey := "invalid-api-key"
|
||||
|
||||
// Create InitFunc
|
||||
initFunc := func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := mockCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
}
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
}
|
||||
|
||||
// Test
|
||||
ctx := context.Background()
|
||||
initPayload := transport.InitPayload{
|
||||
"X-Api-Key": apiKey,
|
||||
}
|
||||
|
||||
resultCtx, resultPayload, err := initFunc(ctx, initPayload)
|
||||
|
||||
// Assert
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resultPayload)
|
||||
|
||||
// Check API key is in context
|
||||
if value := resultCtx.Value(middleware.ApiKey); value != nil {
|
||||
assert.Equal(t, apiKey, value.(string))
|
||||
} else {
|
||||
t.Fatal("API key not found in context")
|
||||
}
|
||||
|
||||
// Check organization is NOT in context (since API key is invalid)
|
||||
value := resultCtx.Value(middleware.OrganizationKey)
|
||||
assert.Nil(t, value, "Organization should not be set for invalid API key")
|
||||
}
|
||||
|
||||
func TestWebSocketInitFunc_WithoutAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := &MockCache{
|
||||
organizations: map[string]*domain.Organization{},
|
||||
apiKeys: map[string]string{},
|
||||
}
|
||||
|
||||
// Create InitFunc
|
||||
initFunc := func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := mockCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
}
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
}
|
||||
|
||||
// Test
|
||||
ctx := context.Background()
|
||||
initPayload := transport.InitPayload{}
|
||||
|
||||
resultCtx, resultPayload, err := initFunc(ctx, initPayload)
|
||||
|
||||
// Assert
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resultPayload)
|
||||
|
||||
// Check API key is NOT in context
|
||||
value := resultCtx.Value(middleware.ApiKey)
|
||||
assert.Nil(t, value, "API key should not be set when not provided")
|
||||
|
||||
// Check organization is NOT in context
|
||||
value = resultCtx.Value(middleware.OrganizationKey)
|
||||
assert.Nil(t, value, "Organization should not be set when API key is not provided")
|
||||
}
|
||||
|
||||
func TestWebSocketInitFunc_WithEmptyAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := &MockCache{
|
||||
organizations: map[string]*domain.Organization{},
|
||||
apiKeys: map[string]string{},
|
||||
}
|
||||
|
||||
// Create InitFunc
|
||||
initFunc := func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := mockCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
}
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
}
|
||||
|
||||
// Test
|
||||
ctx := context.Background()
|
||||
initPayload := transport.InitPayload{
|
||||
"X-Api-Key": "", // Empty string
|
||||
}
|
||||
|
||||
resultCtx, resultPayload, err := initFunc(ctx, initPayload)
|
||||
|
||||
// Assert
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resultPayload)
|
||||
|
||||
// Check API key is NOT in context (because empty string fails the condition)
|
||||
value := resultCtx.Value(middleware.ApiKey)
|
||||
assert.Nil(t, value, "API key should not be set when empty")
|
||||
|
||||
// Check organization is NOT in context
|
||||
value = resultCtx.Value(middleware.OrganizationKey)
|
||||
assert.Nil(t, value, "Organization should not be set when API key is empty")
|
||||
}
|
||||
|
||||
func TestWebSocketInitFunc_WithWrongTypeAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := &MockCache{
|
||||
organizations: map[string]*domain.Organization{},
|
||||
apiKeys: map[string]string{},
|
||||
}
|
||||
|
||||
// Create InitFunc
|
||||
initFunc := func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := mockCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
}
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
}
|
||||
|
||||
// Test
|
||||
ctx := context.Background()
|
||||
initPayload := transport.InitPayload{
|
||||
"X-Api-Key": 12345, // Wrong type (int instead of string)
|
||||
}
|
||||
|
||||
resultCtx, resultPayload, err := initFunc(ctx, initPayload)
|
||||
|
||||
// Assert
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resultPayload)
|
||||
|
||||
// Check API key is NOT in context (type assertion fails)
|
||||
value := resultCtx.Value(middleware.ApiKey)
|
||||
assert.Nil(t, value, "API key should not be set when wrong type")
|
||||
|
||||
// Check organization is NOT in context
|
||||
value = resultCtx.Value(middleware.OrganizationKey)
|
||||
assert.Nil(t, value, "Organization should not be set when API key has wrong type")
|
||||
}
|
||||
|
||||
func TestWebSocketInitFunc_WithMultipleOrganizations(t *testing.T) {
|
||||
// Setup - create multiple organizations
|
||||
org1ID := uuid.New()
|
||||
org1 := &domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(org1ID.String()),
|
||||
},
|
||||
Name: "Organization 1",
|
||||
}
|
||||
|
||||
org2ID := uuid.New()
|
||||
org2 := &domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(org2ID.String()),
|
||||
},
|
||||
Name: "Organization 2",
|
||||
}
|
||||
|
||||
apiKey1 := "api-key-org-1"
|
||||
apiKey2 := "api-key-org-2"
|
||||
hashedKey1, err := hash.APIKey(apiKey1)
|
||||
require.NoError(t, err)
|
||||
hashedKey2, err := hash.APIKey(apiKey2)
|
||||
require.NoError(t, err)
|
||||
|
||||
compositeKey1 := org1ID.String() + "-key1"
|
||||
compositeKey2 := org2ID.String() + "-key2"
|
||||
|
||||
mockCache := &MockCache{
|
||||
organizations: map[string]*domain.Organization{
|
||||
compositeKey1: org1,
|
||||
compositeKey2: org2,
|
||||
},
|
||||
apiKeys: map[string]string{
|
||||
compositeKey1: hashedKey1,
|
||||
compositeKey2: hashedKey2,
|
||||
},
|
||||
}
|
||||
|
||||
// Create InitFunc
|
||||
initFunc := func(ctx context.Context, initPayload transport.InitPayload) (context.Context, *transport.InitPayload, error) {
|
||||
// Extract API key from WebSocket connection_init payload
|
||||
if apiKey, ok := initPayload["X-Api-Key"].(string); ok && apiKey != "" {
|
||||
ctx = context.WithValue(ctx, middleware.ApiKey, apiKey)
|
||||
|
||||
// Look up organization by API key (cache handles hash comparison)
|
||||
if organization := mockCache.OrganizationByAPIKey(apiKey); organization != nil {
|
||||
ctx = context.WithValue(ctx, middleware.OrganizationKey, *organization)
|
||||
}
|
||||
}
|
||||
return ctx, &initPayload, nil
|
||||
}
|
||||
|
||||
// Test with first API key
|
||||
ctx1 := context.Background()
|
||||
initPayload1 := transport.InitPayload{
|
||||
"X-Api-Key": apiKey1,
|
||||
}
|
||||
|
||||
resultCtx1, _, err := initFunc(ctx1, initPayload1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if value := resultCtx1.Value(middleware.OrganizationKey); value != nil {
|
||||
capturedOrg, ok := value.(domain.Organization)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, org1.Name, capturedOrg.Name)
|
||||
assert.Equal(t, org1.ID.String(), capturedOrg.ID.String())
|
||||
} else {
|
||||
t.Fatal("Organization 1 not found in context")
|
||||
}
|
||||
|
||||
// Test with second API key
|
||||
ctx2 := context.Background()
|
||||
initPayload2 := transport.InitPayload{
|
||||
"X-Api-Key": apiKey2,
|
||||
}
|
||||
|
||||
resultCtx2, _, err := initFunc(ctx2, initPayload2)
|
||||
require.NoError(t, err)
|
||||
|
||||
if value := resultCtx2.Value(middleware.OrganizationKey); value != nil {
|
||||
capturedOrg, ok := value.(domain.Organization)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, org2.Name, capturedOrg.Name)
|
||||
assert.Equal(t, org2.ID.String(), capturedOrg.ID.String())
|
||||
} else {
|
||||
t.Fatal("Organization 2 not found in context")
|
||||
}
|
||||
}
|
||||
+26
-26
@@ -43,6 +43,7 @@ type SubGraphsResponse struct {
|
||||
func (v *SubGraphsResponse) GetSupergraph() SubGraphsSupergraph { return v.Supergraph }
|
||||
|
||||
func (v *SubGraphsResponse) UnmarshalJSON(b []byte) error {
|
||||
|
||||
if string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
@@ -148,6 +149,7 @@ func __unmarshalSubGraphsSupergraph(b []byte, v *SubGraphsSupergraph) error {
|
||||
}
|
||||
|
||||
func __marshalSubGraphsSupergraph(v *SubGraphsSupergraph) ([]byte, error) {
|
||||
|
||||
var typename string
|
||||
switch v := (*v).(type) {
|
||||
case *SubGraphsSupergraphSubGraphs:
|
||||
@@ -270,7 +272,7 @@ type __UpdateSubGraphInput struct {
|
||||
// GetInput returns __UpdateSubGraphInput.Input, and is useful for accessing the field via an interface.
|
||||
func (v *__UpdateSubGraphInput) GetInput() *InputSubGraph { return v.Input }
|
||||
|
||||
// The query or mutation executed by SubGraphs.
|
||||
// The query executed by SubGraphs.
|
||||
const SubGraphs_Operation = `
|
||||
query SubGraphs ($ref: String!) {
|
||||
supergraph(ref: $ref) {
|
||||
@@ -289,32 +291,31 @@ query SubGraphs ($ref: String!) {
|
||||
`
|
||||
|
||||
func SubGraphs(
|
||||
ctx context.Context,
|
||||
client graphql.Client,
|
||||
ctx_ context.Context,
|
||||
client_ graphql.Client,
|
||||
ref string,
|
||||
) (*SubGraphsResponse, error) {
|
||||
req := &graphql.Request{
|
||||
) (data_ *SubGraphsResponse, err_ error) {
|
||||
req_ := &graphql.Request{
|
||||
OpName: "SubGraphs",
|
||||
Query: SubGraphs_Operation,
|
||||
Variables: &__SubGraphsInput{
|
||||
Ref: ref,
|
||||
},
|
||||
}
|
||||
var err error
|
||||
|
||||
var data SubGraphsResponse
|
||||
resp := &graphql.Response{Data: &data}
|
||||
data_ = &SubGraphsResponse{}
|
||||
resp_ := &graphql.Response{Data: data_}
|
||||
|
||||
err = client.MakeRequest(
|
||||
ctx,
|
||||
req,
|
||||
resp,
|
||||
err_ = client_.MakeRequest(
|
||||
ctx_,
|
||||
req_,
|
||||
resp_,
|
||||
)
|
||||
|
||||
return &data, err
|
||||
return data_, err_
|
||||
}
|
||||
|
||||
// The query or mutation executed by UpdateSubGraph.
|
||||
// The mutation executed by UpdateSubGraph.
|
||||
const UpdateSubGraph_Operation = `
|
||||
mutation UpdateSubGraph ($input: InputSubGraph!) {
|
||||
updateSubGraph(input: $input) {
|
||||
@@ -328,27 +329,26 @@ mutation UpdateSubGraph ($input: InputSubGraph!) {
|
||||
`
|
||||
|
||||
func UpdateSubGraph(
|
||||
ctx context.Context,
|
||||
client graphql.Client,
|
||||
ctx_ context.Context,
|
||||
client_ graphql.Client,
|
||||
input *InputSubGraph,
|
||||
) (*UpdateSubGraphResponse, error) {
|
||||
req := &graphql.Request{
|
||||
) (data_ *UpdateSubGraphResponse, err_ error) {
|
||||
req_ := &graphql.Request{
|
||||
OpName: "UpdateSubGraph",
|
||||
Query: UpdateSubGraph_Operation,
|
||||
Variables: &__UpdateSubGraphInput{
|
||||
Input: input,
|
||||
},
|
||||
}
|
||||
var err error
|
||||
|
||||
var data UpdateSubGraphResponse
|
||||
resp := &graphql.Response{Data: &data}
|
||||
data_ = &UpdateSubGraphResponse{}
|
||||
resp_ := &graphql.Response{Data: data_}
|
||||
|
||||
err = client.MakeRequest(
|
||||
ctx,
|
||||
req,
|
||||
resp,
|
||||
err_ = client_.MakeRequest(
|
||||
ctx_,
|
||||
req_,
|
||||
resp_,
|
||||
)
|
||||
|
||||
return &data, err
|
||||
return data_, err_
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ func (o *Organization) Apply(event eventsourced.Event) error {
|
||||
switch e := event.(type) {
|
||||
case *OrganizationAdded:
|
||||
e.UpdateOrganization(o)
|
||||
case *UserAddedToOrganization:
|
||||
e.UpdateOrganization(o)
|
||||
case *APIKeyAdded:
|
||||
o.APIKeys = append(o.APIKeys, APIKey{
|
||||
Name: e.Name,
|
||||
@@ -36,6 +38,10 @@ func (o *Organization) Apply(event eventsourced.Event) error {
|
||||
})
|
||||
o.ChangedBy = e.Initiator
|
||||
o.ChangedAt = e.When()
|
||||
case *APIKeyRemoved:
|
||||
e.UpdateOrganization(o)
|
||||
case *OrganizationRemoved:
|
||||
e.UpdateOrganization(o)
|
||||
default:
|
||||
return fmt.Errorf("unexpected event type: %+v", event)
|
||||
}
|
||||
|
||||
+95
-2
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/hash"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/hash"
|
||||
)
|
||||
|
||||
type AddOrganization struct {
|
||||
@@ -34,6 +34,37 @@ func (a AddOrganization) Event(context.Context) eventsourced.Event {
|
||||
|
||||
var _ eventsourced.Command = AddOrganization{}
|
||||
|
||||
type AddUserToOrganization struct {
|
||||
UserId string
|
||||
Initiator string
|
||||
}
|
||||
|
||||
func (a AddUserToOrganization) Validate(_ context.Context, aggregate eventsourced.Aggregate) error {
|
||||
if aggregate.Identity() == nil {
|
||||
return fmt.Errorf("organization does not exist")
|
||||
}
|
||||
if len(a.UserId) == 0 {
|
||||
return fmt.Errorf("userId is required")
|
||||
}
|
||||
// Check if user is already in the organization
|
||||
org := aggregate.(*Organization)
|
||||
for _, user := range org.Users {
|
||||
if user == a.UserId {
|
||||
return fmt.Errorf("user is already a member of this organization")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a AddUserToOrganization) Event(context.Context) eventsourced.Event {
|
||||
return &UserAddedToOrganization{
|
||||
UserId: a.UserId,
|
||||
Initiator: a.Initiator,
|
||||
}
|
||||
}
|
||||
|
||||
var _ eventsourced.Command = AddUserToOrganization{}
|
||||
|
||||
type AddAPIKey struct {
|
||||
Name string
|
||||
Key string
|
||||
@@ -56,9 +87,20 @@ func (a AddAPIKey) Validate(_ context.Context, aggregate eventsourced.Aggregate)
|
||||
}
|
||||
|
||||
func (a AddAPIKey) Event(context.Context) eventsourced.Event {
|
||||
// Hash the API key using bcrypt for secure storage
|
||||
// Note: We can't return an error here, but bcrypt errors are extremely rare
|
||||
// (only if system runs out of memory or bcrypt cost is invalid)
|
||||
// We use a fixed cost of 12 which is always valid
|
||||
hashedKey, err := hash.APIKey(a.Key)
|
||||
if err != nil {
|
||||
// This should never happen with bcrypt cost 12, but if it does,
|
||||
// we'll store an empty hash which will fail validation later
|
||||
hashedKey = ""
|
||||
}
|
||||
|
||||
return &APIKeyAdded{
|
||||
Name: a.Name,
|
||||
Key: hash.String(a.Key),
|
||||
Key: hashedKey,
|
||||
Refs: a.Refs,
|
||||
Read: a.Read,
|
||||
Publish: a.Publish,
|
||||
@@ -68,6 +110,57 @@ func (a AddAPIKey) Event(context.Context) eventsourced.Event {
|
||||
|
||||
var _ eventsourced.Command = AddAPIKey{}
|
||||
|
||||
type RemoveAPIKey struct {
|
||||
KeyName string
|
||||
Initiator string
|
||||
}
|
||||
|
||||
func (r RemoveAPIKey) Validate(_ context.Context, aggregate eventsourced.Aggregate) error {
|
||||
if aggregate.Identity() == nil {
|
||||
return fmt.Errorf("organization does not exist")
|
||||
}
|
||||
org := aggregate.(*Organization)
|
||||
found := false
|
||||
for _, k := range org.APIKeys {
|
||||
if k.Name == r.KeyName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("API key '%s' not found", r.KeyName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r RemoveAPIKey) Event(context.Context) eventsourced.Event {
|
||||
return &APIKeyRemoved{
|
||||
KeyName: r.KeyName,
|
||||
Initiator: r.Initiator,
|
||||
}
|
||||
}
|
||||
|
||||
var _ eventsourced.Command = RemoveAPIKey{}
|
||||
|
||||
type RemoveOrganization struct {
|
||||
Initiator string
|
||||
}
|
||||
|
||||
func (r RemoveOrganization) Validate(_ context.Context, aggregate eventsourced.Aggregate) error {
|
||||
if aggregate.Identity() == nil {
|
||||
return fmt.Errorf("organization does not exist")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r RemoveOrganization) Event(context.Context) eventsourced.Event {
|
||||
return &OrganizationRemoved{
|
||||
Initiator: r.Initiator,
|
||||
}
|
||||
}
|
||||
|
||||
var _ eventsourced.Command = RemoveOrganization{}
|
||||
|
||||
type UpdateSubGraph struct {
|
||||
OrganizationId string
|
||||
Ref string
|
||||
|
||||
+524
-10
@@ -2,12 +2,73 @@ package domain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/hash"
|
||||
)
|
||||
|
||||
// AddOrganization tests
|
||||
|
||||
func TestAddOrganization_Validate_Success(t *testing.T) {
|
||||
cmd := AddOrganization{
|
||||
Name: "Test Org",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{} // New organization with no identity
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAddOrganization_Validate_AlreadyExists(t *testing.T) {
|
||||
cmd := AddOrganization{
|
||||
Name: "Test Org",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("existing-org-id"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already exists")
|
||||
}
|
||||
|
||||
func TestAddOrganization_Validate_EmptyName(t *testing.T) {
|
||||
cmd := AddOrganization{
|
||||
Name: "",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{}
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "name is required")
|
||||
}
|
||||
|
||||
func TestAddOrganization_Event(t *testing.T) {
|
||||
cmd := AddOrganization{
|
||||
Name: "Test Org",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
event := cmd.Event(context.Background())
|
||||
require.NotNil(t, event)
|
||||
|
||||
orgEvent, ok := event.(*OrganizationAdded)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "Test Org", orgEvent.Name)
|
||||
assert.Equal(t, "user@example.com", orgEvent.Initiator)
|
||||
}
|
||||
|
||||
// AddAPIKey tests
|
||||
|
||||
func TestAddAPIKey_Event(t *testing.T) {
|
||||
type fields struct {
|
||||
Name string
|
||||
@@ -24,7 +85,6 @@ func TestAddAPIKey_Event(t *testing.T) {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want eventsourced.Event
|
||||
}{
|
||||
{
|
||||
name: "event",
|
||||
@@ -37,14 +97,6 @@ func TestAddAPIKey_Event(t *testing.T) {
|
||||
Initiator: "jim@example.org",
|
||||
},
|
||||
args: args{},
|
||||
want: &APIKeyAdded{
|
||||
Name: "test",
|
||||
Key: "dXNfYWtfMTIzNDU2Nzg5MDEyMzQ1NuOwxEKY/BwUmvv0yJlvuSQnrkHkZJuTTKSVmRt4UrhV",
|
||||
Refs: []string{"Example@dev"},
|
||||
Read: true,
|
||||
Publish: true,
|
||||
Initiator: "jim@example.org",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -57,7 +109,469 @@ func TestAddAPIKey_Event(t *testing.T) {
|
||||
Publish: tt.fields.Publish,
|
||||
Initiator: tt.fields.Initiator,
|
||||
}
|
||||
assert.Equalf(t, tt.want, a.Event(tt.args.in0), "Event(%v)", tt.args.in0)
|
||||
event := a.Event(tt.args.in0)
|
||||
require.NotNil(t, event)
|
||||
|
||||
// Cast to APIKeyAdded to verify fields
|
||||
apiKeyEvent, ok := event.(*APIKeyAdded)
|
||||
require.True(t, ok, "Event should be *APIKeyAdded")
|
||||
|
||||
// Verify non-key fields match exactly
|
||||
assert.Equal(t, tt.fields.Name, apiKeyEvent.Name)
|
||||
assert.Equal(t, tt.fields.Refs, apiKeyEvent.Refs)
|
||||
assert.Equal(t, tt.fields.Read, apiKeyEvent.Read)
|
||||
assert.Equal(t, tt.fields.Publish, apiKeyEvent.Publish)
|
||||
assert.Equal(t, tt.fields.Initiator, apiKeyEvent.Initiator)
|
||||
|
||||
// Verify the key is hashed correctly (bcrypt format)
|
||||
assert.True(t, strings.HasPrefix(apiKeyEvent.Key, "$2"), "Key should be bcrypt hashed")
|
||||
assert.NotEqual(t, tt.fields.Key, apiKeyEvent.Key, "Key should be hashed, not plaintext")
|
||||
|
||||
// Verify the hash matches the original key
|
||||
assert.True(t, hash.CompareAPIKey(apiKeyEvent.Key, tt.fields.Key), "Hashed key should match original")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddAPIKey_Validate_Success(t *testing.T) {
|
||||
cmd := AddAPIKey{
|
||||
Name: "production-key",
|
||||
Key: "us_ak_1234567890123456",
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{},
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAddAPIKey_Validate_OrganizationNotExists(t *testing.T) {
|
||||
cmd := AddAPIKey{
|
||||
Name: "production-key",
|
||||
Key: "us_ak_1234567890123456",
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{} // No identity means it doesn't exist
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "does not exist")
|
||||
}
|
||||
|
||||
func TestAddAPIKey_Validate_DuplicateKeyName(t *testing.T) {
|
||||
cmd := AddAPIKey{
|
||||
Name: "existing-key",
|
||||
Key: "us_ak_1234567890123456",
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{
|
||||
{
|
||||
Name: "existing-key",
|
||||
Key: "hashed-key",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "already exist")
|
||||
assert.Contains(t, err.Error(), "existing-key")
|
||||
}
|
||||
|
||||
// UpdateSubGraph tests
|
||||
|
||||
func TestUpdateSubGraph_Validate_Success(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_MissingRef(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "ref is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_RefWhitespaceOnly(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: " ",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "ref is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_MissingService(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "",
|
||||
Url: &url,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "service is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_ServiceWhitespaceOnly(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: " ",
|
||||
Url: &url,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "service is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_MissingSDL(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
Sdl: "",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "SDL is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_SDLWhitespaceOnly(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
Sdl: " \n\t ",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "SDL is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_MissingURL_NoExistingURL(t *testing.T) {
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: nil,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
Url: nil, // No existing URL
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "url is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_MissingURL_HasExistingURL(t *testing.T) {
|
||||
existingURL := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: nil,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
Url: &existingURL, // Has existing URL, so nil is OK
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_EmptyURL_NoExistingURL(t *testing.T) {
|
||||
emptyURL := ""
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &emptyURL,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
Url: nil,
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "url is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_URLWhitespaceOnly_NoExistingURL(t *testing.T) {
|
||||
whitespaceURL := " "
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &whitespaceURL,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
subGraph := &SubGraph{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("subgraph-123"),
|
||||
Url: nil,
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), subGraph)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "url is missing")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Validate_WrongAggregateType(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
// Pass wrong aggregate type
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not a SubGraph")
|
||||
}
|
||||
|
||||
func TestUpdateSubGraph_Event(t *testing.T) {
|
||||
url := "http://example.com/graphql"
|
||||
wsURL := "ws://example.com/graphql"
|
||||
cmd := UpdateSubGraph{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users",
|
||||
Url: &url,
|
||||
WSUrl: &wsURL,
|
||||
Sdl: "type Query { hello: String }",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
event := cmd.Event(context.Background())
|
||||
require.NotNil(t, event)
|
||||
|
||||
subGraphEvent, ok := event.(*SubGraphUpdated)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "org-123", subGraphEvent.OrganizationId)
|
||||
assert.Equal(t, "main", subGraphEvent.Ref)
|
||||
assert.Equal(t, "users", subGraphEvent.Service)
|
||||
assert.Equal(t, url, *subGraphEvent.Url)
|
||||
assert.Equal(t, wsURL, *subGraphEvent.WSUrl)
|
||||
assert.Equal(t, "type Query { hello: String }", subGraphEvent.Sdl)
|
||||
assert.Equal(t, "user@example.com", subGraphEvent.Initiator)
|
||||
}
|
||||
|
||||
// RemoveAPIKey tests
|
||||
|
||||
func TestRemoveAPIKey_Validate_Success(t *testing.T) {
|
||||
cmd := RemoveAPIKey{
|
||||
KeyName: "production-key",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{
|
||||
{
|
||||
Name: "production-key",
|
||||
Key: "hashed-key",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveAPIKey_Validate_OrganizationNotExists(t *testing.T) {
|
||||
cmd := RemoveAPIKey{
|
||||
KeyName: "production-key",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{} // No identity means it doesn't exist
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "does not exist")
|
||||
}
|
||||
|
||||
func TestRemoveAPIKey_Validate_KeyNotFound(t *testing.T) {
|
||||
cmd := RemoveAPIKey{
|
||||
KeyName: "non-existent-key",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{
|
||||
{
|
||||
Name: "production-key",
|
||||
Key: "hashed-key",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not found")
|
||||
assert.Contains(t, err.Error(), "non-existent-key")
|
||||
}
|
||||
|
||||
func TestRemoveAPIKey_Event(t *testing.T) {
|
||||
cmd := RemoveAPIKey{
|
||||
KeyName: "production-key",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
event := cmd.Event(context.Background())
|
||||
require.NotNil(t, event)
|
||||
|
||||
keyEvent, ok := event.(*APIKeyRemoved)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "production-key", keyEvent.KeyName)
|
||||
assert.Equal(t, "user@example.com", keyEvent.Initiator)
|
||||
}
|
||||
|
||||
// RemoveOrganization tests
|
||||
|
||||
func TestRemoveOrganization_Validate_Success(t *testing.T) {
|
||||
cmd := RemoveOrganization{
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
Name: "Test Org",
|
||||
}
|
||||
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveOrganization_Validate_OrganizationNotExists(t *testing.T) {
|
||||
cmd := RemoveOrganization{
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{} // No identity means it doesn't exist
|
||||
err := cmd.Validate(context.Background(), org)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "does not exist")
|
||||
}
|
||||
|
||||
func TestRemoveOrganization_Event(t *testing.T) {
|
||||
cmd := RemoveOrganization{
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
event := cmd.Event(context.Background())
|
||||
require.NotNil(t, event)
|
||||
|
||||
orgEvent, ok := event.(*OrganizationRemoved)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "user@example.com", orgEvent.Initiator)
|
||||
}
|
||||
|
||||
+51
-6
@@ -3,8 +3,7 @@ package domain
|
||||
import "gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
type OrganizationAdded struct {
|
||||
eventsourced.EventAggregateId
|
||||
eventsourced.EventTime
|
||||
eventsourced.BaseEvent
|
||||
Name string `json:"name"`
|
||||
Initiator string `json:"initiator"`
|
||||
}
|
||||
@@ -18,9 +17,26 @@ func (a *OrganizationAdded) UpdateOrganization(o *Organization) {
|
||||
o.ChangedAt = a.When()
|
||||
}
|
||||
|
||||
type UserAddedToOrganization struct {
|
||||
eventsourced.BaseEvent
|
||||
UserId string `json:"userId"`
|
||||
Initiator string `json:"initiator"`
|
||||
}
|
||||
|
||||
func (a *UserAddedToOrganization) UpdateOrganization(o *Organization) {
|
||||
// Check if user is already in the organization
|
||||
for _, user := range o.Users {
|
||||
if user == a.UserId {
|
||||
return // User already exists, no need to add
|
||||
}
|
||||
}
|
||||
o.Users = append(o.Users, a.UserId)
|
||||
o.ChangedBy = a.Initiator
|
||||
o.ChangedAt = a.When()
|
||||
}
|
||||
|
||||
type APIKeyAdded struct {
|
||||
eventsourced.EventAggregateId
|
||||
eventsourced.EventTime
|
||||
eventsourced.BaseEvent
|
||||
OrganizationId string `json:"organizationId"`
|
||||
Name string `json:"name"`
|
||||
Key string `json:"key"`
|
||||
@@ -36,9 +52,38 @@ func (a *APIKeyAdded) EnrichFromAggregate(aggregate eventsourced.Aggregate) {
|
||||
|
||||
var _ eventsourced.EnrichableEvent = &APIKeyAdded{}
|
||||
|
||||
type APIKeyRemoved struct {
|
||||
eventsourced.BaseEvent
|
||||
KeyName string `json:"keyName"`
|
||||
Initiator string `json:"initiator"`
|
||||
}
|
||||
|
||||
func (a *APIKeyRemoved) UpdateOrganization(o *Organization) {
|
||||
// Remove the API key from the organization
|
||||
for i, key := range o.APIKeys {
|
||||
if key.Name == a.KeyName {
|
||||
o.APIKeys = append(o.APIKeys[:i], o.APIKeys[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
o.ChangedBy = a.Initiator
|
||||
o.ChangedAt = a.When()
|
||||
}
|
||||
|
||||
type OrganizationRemoved struct {
|
||||
eventsourced.BaseEvent
|
||||
Initiator string `json:"initiator"`
|
||||
}
|
||||
|
||||
func (a *OrganizationRemoved) UpdateOrganization(o *Organization) {
|
||||
// Mark organization as removed by clearing critical fields
|
||||
// The aggregate will still exist in the event store, but it's logically deleted
|
||||
o.ChangedBy = a.Initiator
|
||||
o.ChangedAt = a.When()
|
||||
}
|
||||
|
||||
type SubGraphUpdated struct {
|
||||
eventsourced.EventAggregateId
|
||||
eventsourced.EventTime
|
||||
eventsourced.BaseEvent
|
||||
OrganizationId string `json:"organizationId"`
|
||||
Ref string `json:"ref"`
|
||||
Service string `json:"service"`
|
||||
|
||||
@@ -0,0 +1,254 @@
|
||||
package domain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
)
|
||||
|
||||
func TestOrganizationAdded_UpdateOrganization(t *testing.T) {
|
||||
event := &OrganizationAdded{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
Name: "Test Organization",
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
}
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
assert.Equal(t, "Test Organization", org.Name)
|
||||
assert.Equal(t, []string{"user@example.com"}, org.Users)
|
||||
assert.Equal(t, "user@example.com", org.CreatedBy)
|
||||
assert.Equal(t, "user@example.com", org.ChangedBy)
|
||||
assert.Equal(t, event.When(), org.CreatedAt)
|
||||
assert.Equal(t, event.When(), org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestUserAddedToOrganization_UpdateOrganization(t *testing.T) {
|
||||
event := &UserAddedToOrganization{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
UserId: "new-user@example.com",
|
||||
Initiator: "admin@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
Users: []string{"existing-user@example.com"},
|
||||
}
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
assert.Len(t, org.Users, 2)
|
||||
assert.Contains(t, org.Users, "existing-user@example.com")
|
||||
assert.Contains(t, org.Users, "new-user@example.com")
|
||||
assert.Equal(t, "admin@example.com", org.ChangedBy)
|
||||
assert.Equal(t, event.When(), org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestUserAddedToOrganization_UpdateOrganization_DuplicateUser(t *testing.T) {
|
||||
event := &UserAddedToOrganization{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
UserId: "existing-user@example.com",
|
||||
Initiator: "admin@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
Users: []string{"existing-user@example.com"},
|
||||
ChangedBy: "previous-admin@example.com",
|
||||
}
|
||||
originalChangedBy := org.ChangedBy
|
||||
originalChangedAt := org.ChangedAt
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
// User should not be added twice
|
||||
assert.Len(t, org.Users, 1)
|
||||
assert.Equal(t, "existing-user@example.com", org.Users[0])
|
||||
|
||||
// ChangedBy and ChangedAt should NOT be updated when user already exists (idempotent)
|
||||
assert.Equal(t, originalChangedBy, org.ChangedBy)
|
||||
assert.Equal(t, originalChangedAt, org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestAPIKeyRemoved_UpdateOrganization(t *testing.T) {
|
||||
event := &APIKeyRemoved{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
KeyName: "production-key",
|
||||
Initiator: "admin@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{
|
||||
{Name: "dev-key", Key: "hashed-key-1"},
|
||||
{Name: "production-key", Key: "hashed-key-2"},
|
||||
{Name: "staging-key", Key: "hashed-key-3"},
|
||||
},
|
||||
}
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
assert.Len(t, org.APIKeys, 2)
|
||||
assert.Equal(t, "dev-key", org.APIKeys[0].Name)
|
||||
assert.Equal(t, "staging-key", org.APIKeys[1].Name)
|
||||
assert.Equal(t, "admin@example.com", org.ChangedBy)
|
||||
assert.Equal(t, event.When(), org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestAPIKeyRemoved_UpdateOrganization_KeyNotFound(t *testing.T) {
|
||||
event := &APIKeyRemoved{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
KeyName: "non-existent-key",
|
||||
Initiator: "admin@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{
|
||||
{Name: "dev-key", Key: "hashed-key-1"},
|
||||
{Name: "production-key", Key: "hashed-key-2"},
|
||||
},
|
||||
}
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
// No keys should be removed
|
||||
assert.Len(t, org.APIKeys, 2)
|
||||
assert.Equal(t, "dev-key", org.APIKeys[0].Name)
|
||||
assert.Equal(t, "production-key", org.APIKeys[1].Name)
|
||||
|
||||
// But metadata should still be updated
|
||||
assert.Equal(t, "admin@example.com", org.ChangedBy)
|
||||
assert.Equal(t, event.When(), org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestAPIKeyRemoved_UpdateOrganization_OnlyKey(t *testing.T) {
|
||||
event := &APIKeyRemoved{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
KeyName: "only-key",
|
||||
Initiator: "admin@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
APIKeys: []APIKey{
|
||||
{Name: "only-key", Key: "hashed-key"},
|
||||
},
|
||||
}
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
// All keys should be removed
|
||||
assert.Len(t, org.APIKeys, 0)
|
||||
assert.Equal(t, "admin@example.com", org.ChangedBy)
|
||||
assert.Equal(t, event.When(), org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestOrganizationRemoved_UpdateOrganization(t *testing.T) {
|
||||
event := &OrganizationRemoved{
|
||||
BaseEvent: eventsourced.BaseEvent{
|
||||
EventTime: eventsourced.EventTime{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
Initiator: "admin@example.com",
|
||||
}
|
||||
|
||||
org := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString("org-123"),
|
||||
Name: "Test Organization",
|
||||
Users: []string{"user1@example.com", "user2@example.com"},
|
||||
APIKeys: []APIKey{
|
||||
{Name: "key1", Key: "hashed-key-1"},
|
||||
},
|
||||
CreatedBy: "creator@example.com",
|
||||
CreatedAt: time.Now().Add(-24 * time.Hour),
|
||||
}
|
||||
|
||||
event.UpdateOrganization(org)
|
||||
|
||||
// Organization data remains (soft delete), but metadata is updated
|
||||
assert.Equal(t, "Test Organization", org.Name)
|
||||
assert.Len(t, org.Users, 2)
|
||||
assert.Len(t, org.APIKeys, 1)
|
||||
|
||||
// Metadata should be updated to reflect removal
|
||||
assert.Equal(t, "admin@example.com", org.ChangedBy)
|
||||
assert.Equal(t, event.When(), org.ChangedAt)
|
||||
}
|
||||
|
||||
func TestAPIKeyAdded_EnrichFromAggregate(t *testing.T) {
|
||||
orgId := "org-123"
|
||||
aggregate := &Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregateFromString(orgId),
|
||||
}
|
||||
|
||||
event := &APIKeyAdded{
|
||||
Name: "test-key",
|
||||
Key: "hashed-key",
|
||||
Refs: []string{"main"},
|
||||
Read: true,
|
||||
Publish: false,
|
||||
Initiator: "user@example.com",
|
||||
}
|
||||
|
||||
event.EnrichFromAggregate(aggregate)
|
||||
|
||||
assert.Equal(t, orgId, event.OrganizationId)
|
||||
}
|
||||
|
||||
func TestSubGraphUpdated_Event(t *testing.T) {
|
||||
// Verify SubGraphUpdated event structure
|
||||
url := "http://service.example.com"
|
||||
wsUrl := "ws://service.example.com"
|
||||
|
||||
event := &SubGraphUpdated{
|
||||
OrganizationId: "org-123",
|
||||
Ref: "main",
|
||||
Service: "users-service",
|
||||
Url: &url,
|
||||
WSUrl: &wsUrl,
|
||||
Sdl: "type Query { user: User }",
|
||||
Initiator: "system",
|
||||
}
|
||||
|
||||
require.NotNil(t, event)
|
||||
assert.Equal(t, "org-123", event.OrganizationId)
|
||||
assert.Equal(t, "main", event.Ref)
|
||||
assert.Equal(t, "users-service", event.Service)
|
||||
assert.Equal(t, url, *event.Url)
|
||||
assert.Equal(t, wsUrl, *event.WSUrl)
|
||||
assert.Equal(t, "type Query { user: User }", event.Sdl)
|
||||
assert.Equal(t, "system", event.Initiator)
|
||||
}
|
||||
@@ -1,55 +1,91 @@
|
||||
module gitlab.com/unboundsoftware/schemas
|
||||
module gitea.unbound.se/unboundsoftware/schemas
|
||||
|
||||
go 1.19
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.31
|
||||
github.com/Khan/genqlient v0.6.0
|
||||
github.com/alecthomas/kong v0.7.1
|
||||
github.com/99designs/gqlgen v0.17.87
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/Khan/genqlient v0.8.1
|
||||
github.com/alecthomas/kong v1.14.0
|
||||
github.com/apex/log v1.9.0
|
||||
github.com/auth0/go-jwt-middleware/v2 v2.1.0
|
||||
github.com/getsentry/sentry-go v0.21.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pressly/goose/v3 v3.11.2
|
||||
github.com/rs/cors v1.9.0
|
||||
github.com/sparetimecoders/goamqp v0.1.4
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/vektah/gqlparser/v2 v2.5.1
|
||||
github.com/wundergraph/graphql-go-tools v1.63.1
|
||||
gitlab.com/unboundsoftware/eventsourced/amqp v1.6.4
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.13.0
|
||||
gitlab.com/unboundsoftware/eventsourced/pg v1.11.2
|
||||
github.com/auth0/go-jwt-middleware/v3 v3.0.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/pressly/goose/v3 v3.27.0
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/sparetimecoders/goamqp v0.3.3
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/vektah/gqlparser/v2 v2.5.32
|
||||
github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.255
|
||||
gitlab.com/unboundsoftware/eventsourced/amqp v1.9.1
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.19.4
|
||||
gitlab.com/unboundsoftware/eventsourced/pg v1.18.4
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.15.0
|
||||
go.opentelemetry.io/otel v1.40.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0
|
||||
go.opentelemetry.io/otel/log v0.16.0
|
||||
go.opentelemetry.io/otel/sdk v1.40.0
|
||||
go.opentelemetry.io/otel/sdk/log v0.16.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0
|
||||
go.opentelemetry.io/otel/trace v1.40.0
|
||||
golang.org/x/crypto v0.48.0
|
||||
golang.org/x/sync v0.19.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/mock v1.4.4 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
|
||||
github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/goccy/go-yaml v1.19.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
|
||||
github.com/lestrrat-go/dsig v1.0.0 // indirect
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
|
||||
github.com/lestrrat-go/httpcc v1.0.1 // indirect
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.3 // indirect
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12 // indirect
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
|
||||
github.com/lib/pq v1.11.2 // indirect
|
||||
github.com/mfridman/interpolate v0.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rabbitmq/amqp091-go v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 // indirect
|
||||
github.com/tidwall/gjson v1.14.3 // indirect
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 // indirect
|
||||
github.com/segmentio/asm v1.2.1 // indirect
|
||||
github.com/sethvargo/go-retry v0.3.0 // indirect
|
||||
github.com/sosodev/duration v1.3.1 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tidwall/gjson v1.17.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/urfave/cli/v2 v2.24.4 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
github.com/urfave/cli/v3 v3.6.2 // indirect
|
||||
github.com/valyala/fastjson v1.6.7 // indirect
|
||||
github.com/wundergraph/astjson v1.1.0 // indirect
|
||||
github.com/wundergraph/go-arena v1.1.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/mod v0.33.0 // indirect
|
||||
golang.org/x/net v0.50.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/tools v0.42.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d // indirect
|
||||
google.golang.org/grpc v1.79.1 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
)
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
github.com/99designs/gqlgen v0.17.31 h1:VncSQ82VxieHkea8tz11p7h/zSbvHSxSDZfywqWt158=
|
||||
github.com/99designs/gqlgen v0.17.31/go.mod h1:i4rEatMrzzu6RXaHydq1nmEPZkb3bKQsnxNRHS4DQB4=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/Khan/genqlient v0.6.0 h1:Bwb1170ekuNIVIwTJEqvO8y7RxBxXu639VJOkKSrwAk=
|
||||
github.com/Khan/genqlient v0.6.0/go.mod h1:rvChwWVTqXhiapdhLDV4bp9tz/Xvtewwkon4DpWWCRM=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
|
||||
github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
|
||||
github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
|
||||
github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo=
|
||||
filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc=
|
||||
github.com/99designs/gqlgen v0.17.87 h1:pSnCIMhBQezAE8bc1GNmfdLXFmnWtWl1GRDFEE/nHP8=
|
||||
github.com/99designs/gqlgen v0.17.87/go.mod h1:fK05f1RqSNfQpd4CfW5qk/810Tqi4/56Wf6Nem0khAg=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/Khan/genqlient v0.8.1 h1:wtOCc8N9rNynRLXN3k3CnfzheCUNKBcvXmVv5zt6WCs=
|
||||
github.com/Khan/genqlient v0.8.1/go.mod h1:R2G6DzjBvCbhjsEajfRjbWdVglSH/73kSivC9TLWVjU=
|
||||
github.com/PuerkitoBio/goquery v1.11.0 h1:jZ7pwMQXIITcUXNH83LLk+txlaEy6NVOfTuP43xxfqw=
|
||||
github.com/PuerkitoBio/goquery v1.11.0/go.mod h1:wQHgxUOU3JGuj3oD/QFfxUdlzW6xPHfqyHre6VMY4DQ=
|
||||
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
|
||||
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
|
||||
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
|
||||
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kong v1.14.0 h1:gFgEUZWu2ZmZ+UhyZ1bDhuutbKN1nTtJTwh19Wsn21s=
|
||||
github.com/alecthomas/kong v1.14.0/go.mod h1:wrlbXem1CWqUV5Vbmss5ISYhsVPkBb1Yo7YKJghju2I=
|
||||
github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs=
|
||||
github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0=
|
||||
github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA=
|
||||
github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
|
||||
@@ -20,129 +28,152 @@ github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy
|
||||
github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/auth0/go-jwt-middleware/v2 v2.1.0 h1:VU4LsC3aFPoqXVyEp8EixU6FNM+ZNIjECszRTvtGQI8=
|
||||
github.com/auth0/go-jwt-middleware/v2 v2.1.0/go.mod h1:CpzcJoleayAACpv+vt0AP8/aYn5TDngsqzLapV1nM4c=
|
||||
github.com/auth0/go-jwt-middleware/v3 v3.0.0 h1:+rvUPCT+VbAuK4tpS13fWfZrMyqTwLopt3VoY0Y7kvA=
|
||||
github.com/auth0/go-jwt-middleware/v3 v3.0.0/go.mod h1:iU42jqjRyeKbf9YYSnRnolr836gk6Ty/jnUNuVq2b0o=
|
||||
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/evanphx/json-patch/v5 v5.1.0 h1:B0aXl1o/1cP8NbviYiBMkcHBtUjIJ1/Ccg6b+SwCLQg=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/getsentry/sentry-go v0.21.0 h1:c9l5F1nPF30JIppulk4veau90PK6Smu3abgVtVQWon4=
|
||||
github.com/getsentry/sentry-go v0.21.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
|
||||
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM=
|
||||
github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 h1:E80wOd3IFQcoBxLkAUpUQ3BoGrZ4DxhQdP21+HH1s6A=
|
||||
github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68/go.mod h1:0D5r/VSW6D/o65rKLL9xk7sZxL2+oku2HvFPYeIMFr4=
|
||||
github.com/jensneuse/diffview v1.0.0 h1:4b6FQJ7y3295JUHU3tRko6euyEboL825ZsXeZZM47Z4=
|
||||
github.com/jensneuse/diffview v1.0.0/go.mod h1:i6IacuD8LnEaPuiyzMHA+Wfz5mAuycMOf3R/orUY9y4=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.8/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA=
|
||||
github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw=
|
||||
github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38=
|
||||
github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo=
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY=
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=
|
||||
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
|
||||
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.3 h1:WjLHWkDkgWXeIUrKi/7lS/sGq2DjkSAwdTbH5RHXAKs=
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.3/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0=
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12 h1:p25r68Y4KrbBdYjIsQweYxq794CtGCzcrc5dGzJIRjg=
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8=
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=
|
||||
github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs=
|
||||
github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
|
||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
|
||||
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.11.2 h1:QgTP45FhBBHdmf7hWKlbWFHtwPtxo0phSDkwDKGUrYs=
|
||||
github.com/pressly/goose/v3 v3.11.2/go.mod h1:LWQzSc4vwfHA/3B8getTp8g3J5Z8tFBxgxinmGlMlJk=
|
||||
github.com/rabbitmq/amqp091-go v1.5.0/go.mod h1:JsV0ofX5f1nwOGafb8L5rBItt9GyhfQfcJj+oyz0dGg=
|
||||
github.com/rabbitmq/amqp091-go v1.8.1 h1:RejT1SBUim5doqcL6s7iN6SBmsQqyTgXb1xMlH0h1hA=
|
||||
github.com/rabbitmq/amqp091-go v1.8.1/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc=
|
||||
github.com/pressly/goose/v3 v3.27.0 h1:/D30gVTuQhu0WsNZYbJi4DMOsx1lNq+6SkLe+Wp59BM=
|
||||
github.com/pressly/goose/v3 v3.27.0/go.mod h1:3ZBeCXqzkgIRvrEMDkYh1guvtoJTU5oMMuDdkutoM78=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE=
|
||||
github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo=
|
||||
github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 h1:uIkTLo0AGRc8l7h5l9r+GcYi9qfVPt6lD4/bhmzfiKo=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
|
||||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/sanity-io/litter v1.5.8 h1:uM/2lKrWdGbRXDrIq08Lh9XtVYoeGtcQxk9rtQ7+rYg=
|
||||
github.com/sanity-io/litter v1.5.8/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U=
|
||||
github.com/sebdah/goldie/v2 v2.7.1 h1:PkBHymaYdtvEkZV7TmyqKxdmn5/Vcj+8TpATWZjnG5E=
|
||||
github.com/sebdah/goldie/v2 v2.7.1/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
|
||||
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||
github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
|
||||
github.com/sparetimecoders/goamqp v0.1.3/go.mod h1:BKUl32yHsxpKEZEn7oEgyKB8Y0C4dk5n+17FModO6iM=
|
||||
github.com/sparetimecoders/goamqp v0.1.4 h1:zNvnCJYb5vraMx+OJCCuPIaXP8ub3Et15ff8ylZrPkY=
|
||||
github.com/sparetimecoders/goamqp v0.1.4/go.mod h1:WUJIWrbwl6rWxbfQTsy/doY7yHQL55L7M89k7ry6ouU=
|
||||
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
|
||||
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/sparetimecoders/goamqp v0.3.3 h1:z/nfTPmrjeU/rIVuNOgsVLCimp3WFoNFvS3ZzXRJ6HE=
|
||||
github.com/sparetimecoders/goamqp v0.3.3/go.mod h1:W9NRCpWLE+Vruv2dcRSbszNil2O826d2Nv6kAkETW5o=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
|
||||
github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
@@ -157,93 +188,114 @@ github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj
|
||||
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
||||
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
||||
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
|
||||
github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU=
|
||||
github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4=
|
||||
github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs=
|
||||
github.com/wundergraph/graphql-go-tools v1.63.1 h1:N151K4dnVhjG0RgDQ5EW8WS64ehlh6l2qC2cW4vFecg=
|
||||
github.com/wundergraph/graphql-go-tools v1.63.1/go.mod h1:44QXSTiT0Jn82792Qjr7wHEJruC+RnofVJb5hM8Gins=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
gitlab.com/unboundsoftware/eventsourced/amqp v1.6.4 h1:k9xA5fo3zvP2W5GZseAytAivJvVvBKezn5acZssrgvk=
|
||||
gitlab.com/unboundsoftware/eventsourced/amqp v1.6.4/go.mod h1:XHg6Men3GHsA/x9ln+atApW4ST2ZHVMp3NPnxW51JoA=
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.11.2/go.mod h1:vGYGhwwjQjal7d+niWo4wKZ6ZI1zc1ehHPBfPbc2ICg=
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.13.0 h1:hGJzgND2DQ+ONiettDmk6VaAGyUTqsPv2oT3AEGZsLo=
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.13.0/go.mod h1:vGYGhwwjQjal7d+niWo4wKZ6ZI1zc1ehHPBfPbc2ICg=
|
||||
gitlab.com/unboundsoftware/eventsourced/pg v1.11.2 h1:Yf9ZzzxoU98nM377QBQaoeNFQ349d3/04cETZF/UX/U=
|
||||
gitlab.com/unboundsoftware/eventsourced/pg v1.11.2/go.mod h1:+cfXfP8PUR1DbICf3gV/UQijK2Tbajjvkj4BV6CJeHs=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
github.com/urfave/cli/v3 v3.6.2 h1:lQuqiPrZ1cIz8hz+HcrG0TNZFxU70dPZ3Yl+pSrH9A8=
|
||||
github.com/urfave/cli/v3 v3.6.2/go.mod h1:ysVLtOEmg2tOy6PknnYVhDoouyC/6N42TMeoMzskhso=
|
||||
github.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM=
|
||||
github.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/vektah/gqlparser/v2 v2.5.32 h1:k9QPJd4sEDTL+qB4ncPLflqTJ3MmjB9SrVzJrawpFSc=
|
||||
github.com/vektah/gqlparser/v2 v2.5.32/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts=
|
||||
github.com/wundergraph/astjson v1.1.0 h1:xORDosrZ87zQFJwNGe/HIHXqzpdHOFmqWgykCLVL040=
|
||||
github.com/wundergraph/astjson v1.1.0/go.mod h1:h12D/dxxnedtLzsKyBLK7/Oe4TAoGpRVC9nDpDrZSWw=
|
||||
github.com/wundergraph/go-arena v1.1.0 h1:9+wSRkJAkA2vbYHp6s8tEGhPViRGQNGXqPHT0QzhdIc=
|
||||
github.com/wundergraph/go-arena v1.1.0/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw=
|
||||
github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.255 h1:lN+D5OWay3U1mwtRlA+j7kJqP5ksKdRFMvYA+8XLJ1E=
|
||||
github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.255/go.mod h1:gfmmrPd2khZONmwYE8RIfnGjwIG+RqL52jYiBzcUST8=
|
||||
gitlab.com/unboundsoftware/eventsourced/amqp v1.9.1 h1:X6269JoAzHIKCVmtgMHZH3m7xOpACSp37ca3eODe9iU=
|
||||
gitlab.com/unboundsoftware/eventsourced/amqp v1.9.1/go.mod h1:EAs0d6Eh0aDiQkUJlSWErHqgHFQdxx0e8I7aG/2FarY=
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.19.4 h1:+yZkhi9/sTyBEN5vJTfvycyXgGrm07QKGSh3jiWiQdM=
|
||||
gitlab.com/unboundsoftware/eventsourced/eventsourced v1.19.4/go.mod h1:LrA7I7etRmhIC1PjO8c26BHm+gWsy2rC3eSMe5+XUWE=
|
||||
gitlab.com/unboundsoftware/eventsourced/pg v1.18.4 h1:ei0xdaACXw6/54w5hPscGUlJUzHJm6MQoeUP7hPqbJA=
|
||||
gitlab.com/unboundsoftware/eventsourced/pg v1.18.4/go.mod h1:IryGlvRa02/IAASbGqoMHTC2Q4WHXr2QY7fLUVN3mL0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.15.0 h1:yOYhGNPZseueTTvWp5iBD3/CthrmvayUXYEX862dDi4=
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.15.0/go.mod h1:CvaNVqIfcybc+7xqZNubbE+26K6P7AKZF/l0lE2kdCk=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 h1:9y5sHvAxWzft1WQ4BwqcvA+IFVUJ1Ya75mSAUnFEVwE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0/go.mod h1:eQqT90eR3X5Dbs1g9YSM30RavwLF725Ris5/XSXWvqE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 h1:ivlbaajBWJqhcCPniDqDJmRwj4lc6sRT+dCAVKNmxlQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0/go.mod h1:u/G56dEKDDwXNCVLsbSrllB2o8pbtFLUC4HpR66r2dc=
|
||||
go.opentelemetry.io/otel/log v0.16.0 h1:DeuBPqCi6pQwtCK0pO4fvMB5eBq6sNxEnuTs88pjsN4=
|
||||
go.opentelemetry.io/otel/log v0.16.0/go.mod h1:rWsmqNVTLIA8UnwYVOItjyEZDbKIkMxdQunsIhpUMes=
|
||||
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
|
||||
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
|
||||
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
|
||||
go.opentelemetry.io/otel/sdk/log v0.16.0 h1:e/b4bdlQwC5fnGtG3dlXUrNOnP7c8YLVSpSfEBIkTnI=
|
||||
go.opentelemetry.io/otel/sdk/log v0.16.0/go.mod h1:JKfP3T6ycy7QEuv3Hj8oKDy7KItrEkus8XJE6EoSzw4=
|
||||
go.opentelemetry.io/otel/sdk/log/logtest v0.16.0 h1:/XVkpZ41rVRTP4DfMgYv1nEtNmf65XPPyAdqV90TMy4=
|
||||
go.opentelemetry.io/otel/sdk/log/logtest v0.16.0/go.mod h1:iOOPgQr5MY9oac/F5W86mXdeyWZGleIx3uXO98X2R6Y=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
|
||||
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
|
||||
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0=
|
||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
|
||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
|
||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d h1:t/LOSXPJ9R0B6fnZNyALBRfZBH0Uy0gT+uR+SJ6syqQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
|
||||
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
|
||||
modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
|
||||
modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
|
||||
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/sqlite v1.22.1 h1:P2+Dhp5FR1RlVRkQ3dDfCiv3Ok8XPxqpe70IjYVA9oE=
|
||||
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/libc v1.68.0 h1:PJ5ikFOV5pwpW+VqCK1hKJuEWsonkIJhhIXyuF/91pQ=
|
||||
modernc.org/libc v1.68.0/go.mod h1:NnKCYeoYgsEqnY3PgvNgAeaJnso968ygU8Z0DxjoEc0=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU=
|
||||
modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
||||
|
||||
+3
-3
@@ -1,8 +1,8 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"gitlab.com/unboundsoftware/schemas/domain"
|
||||
"gitlab.com/unboundsoftware/schemas/graph/model"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
)
|
||||
|
||||
func ToGqlOrganizations(orgs []domain.Organization) []*model.Organization {
|
||||
@@ -38,7 +38,7 @@ func ToGqlAPIKeys(keys []domain.APIKey) []*model.APIKey {
|
||||
result[i] = &model.APIKey{
|
||||
ID: apiKeyId(k.OrganizationId, k.Name),
|
||||
Name: k.Name,
|
||||
Key: &k.Key,
|
||||
Key: nil, // Never return the hashed key - only return plaintext on creation
|
||||
Organization: nil,
|
||||
Refs: k.Refs,
|
||||
Read: k.Read,
|
||||
|
||||
+161
@@ -0,0 +1,161 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
)
|
||||
|
||||
// CommandExecutor is an interface for executing external commands
|
||||
// This allows for mocking in tests
|
||||
type CommandExecutor interface {
|
||||
Execute(name string, args ...string) ([]byte, error)
|
||||
}
|
||||
|
||||
// DefaultCommandExecutor implements CommandExecutor using os/exec
|
||||
type DefaultCommandExecutor struct{}
|
||||
|
||||
// Execute runs a command and returns its combined output
|
||||
func (e *DefaultCommandExecutor) Execute(name string, args ...string) ([]byte, error) {
|
||||
cmd := exec.Command(name, args...)
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
// GenerateCosmoRouterConfig generates a Cosmo Router execution config from subgraphs
|
||||
// using the official wgc CLI tool via npx
|
||||
func GenerateCosmoRouterConfig(subGraphs []*model.SubGraph) (string, error) {
|
||||
return GenerateCosmoRouterConfigWithExecutor(subGraphs, &DefaultCommandExecutor{})
|
||||
}
|
||||
|
||||
// GenerateCosmoRouterConfigWithExecutor generates a Cosmo Router execution config from subgraphs
|
||||
// using the provided command executor (useful for testing)
|
||||
func GenerateCosmoRouterConfigWithExecutor(subGraphs []*model.SubGraph, executor CommandExecutor) (string, error) {
|
||||
if len(subGraphs) == 0 {
|
||||
return "", fmt.Errorf("no subgraphs provided")
|
||||
}
|
||||
|
||||
// Create a temporary directory for composition
|
||||
tmpDir, err := os.MkdirTemp("", "cosmo-compose-*")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("create temp dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Write each subgraph SDL to a file
|
||||
type SubgraphConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
RoutingURL string `yaml:"routing_url,omitempty"`
|
||||
Schema map[string]string `yaml:"schema"`
|
||||
Subscription map[string]interface{} `yaml:"subscription,omitempty"`
|
||||
}
|
||||
|
||||
type InputConfig struct {
|
||||
Version int `yaml:"version"`
|
||||
Subgraphs []SubgraphConfig `yaml:"subgraphs"`
|
||||
}
|
||||
|
||||
inputConfig := InputConfig{
|
||||
Version: 1,
|
||||
Subgraphs: make([]SubgraphConfig, 0, len(subGraphs)),
|
||||
}
|
||||
|
||||
for _, sg := range subGraphs {
|
||||
// Write SDL to a temp file
|
||||
schemaFile := filepath.Join(tmpDir, fmt.Sprintf("%s.graphql", sg.Service))
|
||||
if err := os.WriteFile(schemaFile, []byte(sg.Sdl), 0o644); err != nil {
|
||||
return "", fmt.Errorf("write schema file for %s: %w", sg.Service, err)
|
||||
}
|
||||
|
||||
subgraphCfg := SubgraphConfig{
|
||||
Name: sg.Service,
|
||||
Schema: map[string]string{
|
||||
"file": schemaFile,
|
||||
},
|
||||
}
|
||||
|
||||
if sg.URL != nil {
|
||||
subgraphCfg.RoutingURL = *sg.URL
|
||||
}
|
||||
|
||||
if sg.WsURL != nil {
|
||||
subgraphCfg.Subscription = map[string]interface{}{
|
||||
"url": *sg.WsURL,
|
||||
"protocol": "ws",
|
||||
"websocket_subprotocol": "graphql-ws",
|
||||
}
|
||||
}
|
||||
|
||||
inputConfig.Subgraphs = append(inputConfig.Subgraphs, subgraphCfg)
|
||||
}
|
||||
|
||||
// Write input config YAML
|
||||
inputFile := filepath.Join(tmpDir, "input.yaml")
|
||||
inputYAML, err := yaml.Marshal(inputConfig)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshal input config: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(inputFile, inputYAML, 0o644); err != nil {
|
||||
return "", fmt.Errorf("write input config: %w", err)
|
||||
}
|
||||
|
||||
// Execute wgc router compose
|
||||
// wgc is installed globally in the Docker image
|
||||
outputFile := filepath.Join(tmpDir, "config.json")
|
||||
output, err := executor.Execute("wgc", "router", "compose",
|
||||
"--input", inputFile,
|
||||
"--out", outputFile,
|
||||
"--suppress-warnings",
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("wgc router compose failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Read the generated config
|
||||
configJSON, err := os.ReadFile(outputFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read output config: %w", err)
|
||||
}
|
||||
|
||||
return string(configJSON), nil
|
||||
}
|
||||
|
||||
// CosmoGenerator wraps config generation with a concurrency limit and timeout
|
||||
// to prevent unbounded wgc process spawning under rapid schema updates.
|
||||
type CosmoGenerator struct {
|
||||
sem *semaphore.Weighted
|
||||
executor CommandExecutor
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewCosmoGenerator creates a CosmoGenerator that allows at most one concurrent
|
||||
// wgc process and applies the given timeout to each generation attempt.
|
||||
func NewCosmoGenerator(executor CommandExecutor, timeout time.Duration) *CosmoGenerator {
|
||||
return &CosmoGenerator{
|
||||
sem: semaphore.NewWeighted(1),
|
||||
executor: executor,
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Generate produces a Cosmo Router config, blocking if another generation is
|
||||
// already in progress. The provided context (plus the configured timeout)
|
||||
// controls cancellation.
|
||||
func (g *CosmoGenerator) Generate(ctx context.Context, subGraphs []*model.SubGraph) (string, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, g.timeout)
|
||||
defer cancel()
|
||||
|
||||
if err := g.sem.Acquire(ctx, 1); err != nil {
|
||||
return "", fmt.Errorf("acquire cosmo generator: %w", err)
|
||||
}
|
||||
defer g.sem.Release(1)
|
||||
|
||||
return GenerateCosmoRouterConfigWithExecutor(subGraphs, g.executor)
|
||||
}
|
||||
@@ -0,0 +1,577 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
)
|
||||
|
||||
// MockCommandExecutor implements CommandExecutor for testing
|
||||
type MockCommandExecutor struct {
|
||||
// CallCount tracks how many times Execute was called
|
||||
CallCount int
|
||||
// LastArgs stores the arguments from the last call
|
||||
LastArgs []string
|
||||
// Error can be set to simulate command failures
|
||||
Error error
|
||||
}
|
||||
|
||||
// Execute mocks the wgc command by generating a realistic config.json file
|
||||
func (m *MockCommandExecutor) Execute(name string, args ...string) ([]byte, error) {
|
||||
m.CallCount++
|
||||
m.LastArgs = append([]string{name}, args...)
|
||||
|
||||
if m.Error != nil {
|
||||
return nil, m.Error
|
||||
}
|
||||
|
||||
// Parse the input file to understand what subgraphs we're composing
|
||||
var inputFile, outputFile string
|
||||
for i, arg := range args {
|
||||
if arg == "--input" && i+1 < len(args) {
|
||||
inputFile = args[i+1]
|
||||
}
|
||||
if arg == "--out" && i+1 < len(args) {
|
||||
outputFile = args[i+1]
|
||||
}
|
||||
}
|
||||
|
||||
if inputFile == "" || outputFile == "" {
|
||||
return nil, fmt.Errorf("missing required arguments")
|
||||
}
|
||||
|
||||
// Read the input YAML to get subgraph information
|
||||
inputData, err := os.ReadFile(inputFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read input file: %w", err)
|
||||
}
|
||||
|
||||
var input struct {
|
||||
Version int `yaml:"version"`
|
||||
Subgraphs []struct {
|
||||
Name string `yaml:"name"`
|
||||
RoutingURL string `yaml:"routing_url,omitempty"`
|
||||
Schema map[string]string `yaml:"schema"`
|
||||
Subscription map[string]interface{} `yaml:"subscription,omitempty"`
|
||||
} `yaml:"subgraphs"`
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(inputData, &input); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse input YAML: %w", err)
|
||||
}
|
||||
|
||||
// Generate a realistic Cosmo Router config based on the input
|
||||
config := map[string]interface{}{
|
||||
"version": "mock-version-uuid",
|
||||
"subgraphs": func() []map[string]interface{} {
|
||||
subgraphs := make([]map[string]interface{}, len(input.Subgraphs))
|
||||
for i, sg := range input.Subgraphs {
|
||||
subgraph := map[string]interface{}{
|
||||
"id": fmt.Sprintf("mock-id-%d", i),
|
||||
"name": sg.Name,
|
||||
}
|
||||
if sg.RoutingURL != "" {
|
||||
subgraph["routingUrl"] = sg.RoutingURL
|
||||
}
|
||||
subgraphs[i] = subgraph
|
||||
}
|
||||
return subgraphs
|
||||
}(),
|
||||
"engineConfig": map[string]interface{}{
|
||||
"graphqlSchema": generateMockSchema(input.Subgraphs),
|
||||
"datasourceConfigurations": func() []map[string]interface{} {
|
||||
dsConfigs := make([]map[string]interface{}, len(input.Subgraphs))
|
||||
for i, sg := range input.Subgraphs {
|
||||
// Read SDL from file
|
||||
sdl := ""
|
||||
if schemaFile, ok := sg.Schema["file"]; ok {
|
||||
if sdlData, err := os.ReadFile(schemaFile); err == nil {
|
||||
sdl = string(sdlData)
|
||||
}
|
||||
}
|
||||
|
||||
dsConfig := map[string]interface{}{
|
||||
"id": fmt.Sprintf("datasource-%d", i),
|
||||
"kind": "GRAPHQL",
|
||||
"customGraphql": map[string]interface{}{
|
||||
"federation": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"serviceSdl": sdl,
|
||||
},
|
||||
"subscription": func() map[string]interface{} {
|
||||
if len(sg.Subscription) > 0 {
|
||||
return map[string]interface{}{
|
||||
"enabled": true,
|
||||
"url": map[string]interface{}{
|
||||
"staticVariableContent": sg.Subscription["url"],
|
||||
},
|
||||
"protocol": sg.Subscription["protocol"],
|
||||
"websocketSubprotocol": sg.Subscription["websocket_subprotocol"],
|
||||
}
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"enabled": false,
|
||||
}
|
||||
}(),
|
||||
},
|
||||
}
|
||||
dsConfigs[i] = dsConfig
|
||||
}
|
||||
return dsConfigs
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
// Write the config to the output file
|
||||
configJSON, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(outputFile, configJSON, 0o644); err != nil {
|
||||
return nil, fmt.Errorf("failed to write output file: %w", err)
|
||||
}
|
||||
|
||||
return []byte("Success"), nil
|
||||
}
|
||||
|
||||
// generateMockSchema creates a simple merged schema from subgraphs
|
||||
func generateMockSchema(subgraphs []struct {
|
||||
Name string `yaml:"name"`
|
||||
RoutingURL string `yaml:"routing_url,omitempty"`
|
||||
Schema map[string]string `yaml:"schema"`
|
||||
Subscription map[string]interface{} `yaml:"subscription,omitempty"`
|
||||
},
|
||||
) string {
|
||||
schema := strings.Builder{}
|
||||
schema.WriteString("schema {\n query: Query\n")
|
||||
|
||||
// Check if any subgraph has subscriptions
|
||||
hasSubscriptions := false
|
||||
for _, sg := range subgraphs {
|
||||
if len(sg.Subscription) > 0 {
|
||||
hasSubscriptions = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasSubscriptions {
|
||||
schema.WriteString(" subscription: Subscription\n")
|
||||
}
|
||||
schema.WriteString("}\n\n")
|
||||
|
||||
// Add types by reading SDL files
|
||||
for _, sg := range subgraphs {
|
||||
if schemaFile, ok := sg.Schema["file"]; ok {
|
||||
if sdlData, err := os.ReadFile(schemaFile); err == nil {
|
||||
schema.WriteString(string(sdlData))
|
||||
schema.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return schema.String()
|
||||
}
|
||||
|
||||
func TestGenerateCosmoRouterConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
subGraphs []*model.SubGraph
|
||||
wantErr bool
|
||||
validate func(t *testing.T, config string)
|
||||
}{
|
||||
{
|
||||
name: "single subgraph with all fields",
|
||||
subGraphs: []*model.SubGraph{
|
||||
{
|
||||
Service: "test-service",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
WsURL: stringPtr("ws://localhost:4001/query"),
|
||||
Sdl: "type Query { test: String }",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
validate: func(t *testing.T, config string) {
|
||||
var result map[string]interface{}
|
||||
err := json.Unmarshal([]byte(config), &result)
|
||||
require.NoError(t, err, "Config should be valid JSON")
|
||||
|
||||
// Version is a UUID string from wgc
|
||||
version, ok := result["version"].(string)
|
||||
require.True(t, ok, "Version should be a string")
|
||||
assert.NotEmpty(t, version, "Version should not be empty")
|
||||
|
||||
subgraphs, ok := result["subgraphs"].([]interface{})
|
||||
require.True(t, ok, "subgraphs should be an array")
|
||||
require.Len(t, subgraphs, 1, "Should have 1 subgraph")
|
||||
|
||||
sg := subgraphs[0].(map[string]interface{})
|
||||
assert.Equal(t, "test-service", sg["name"])
|
||||
assert.Equal(t, "http://localhost:4001/query", sg["routingUrl"])
|
||||
|
||||
// Check that datasource configurations include subscription settings
|
||||
engineConfig, ok := result["engineConfig"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have engineConfig")
|
||||
|
||||
dsConfigs, ok := engineConfig["datasourceConfigurations"].([]interface{})
|
||||
require.True(t, ok && len(dsConfigs) > 0, "Should have datasource configurations")
|
||||
|
||||
ds := dsConfigs[0].(map[string]interface{})
|
||||
customGraphql, ok := ds["customGraphql"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have customGraphql config")
|
||||
|
||||
subscription, ok := customGraphql["subscription"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have subscription config")
|
||||
assert.True(t, subscription["enabled"].(bool), "Subscription should be enabled")
|
||||
|
||||
subUrl, ok := subscription["url"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have subscription URL")
|
||||
assert.Equal(t, "ws://localhost:4001/query", subUrl["staticVariableContent"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple subgraphs",
|
||||
subGraphs: []*model.SubGraph{
|
||||
{
|
||||
Service: "service-1",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
Sdl: "type Query { field1: String }",
|
||||
},
|
||||
{
|
||||
Service: "service-2",
|
||||
URL: stringPtr("http://localhost:4002/query"),
|
||||
Sdl: "type Query { field2: String }",
|
||||
},
|
||||
{
|
||||
Service: "service-3",
|
||||
URL: stringPtr("http://localhost:4003/query"),
|
||||
WsURL: stringPtr("ws://localhost:4003/query"),
|
||||
Sdl: "type Subscription { updates: String }",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
validate: func(t *testing.T, config string) {
|
||||
var result map[string]interface{}
|
||||
err := json.Unmarshal([]byte(config), &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
subgraphs := result["subgraphs"].([]interface{})
|
||||
assert.Len(t, subgraphs, 3, "Should have 3 subgraphs")
|
||||
|
||||
// Check service names
|
||||
sg1 := subgraphs[0].(map[string]interface{})
|
||||
assert.Equal(t, "service-1", sg1["name"])
|
||||
|
||||
sg3 := subgraphs[2].(map[string]interface{})
|
||||
assert.Equal(t, "service-3", sg3["name"])
|
||||
|
||||
// Check that datasource configurations include subscription for service-3
|
||||
engineConfig, ok := result["engineConfig"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have engineConfig")
|
||||
|
||||
dsConfigs, ok := engineConfig["datasourceConfigurations"].([]interface{})
|
||||
require.True(t, ok && len(dsConfigs) == 3, "Should have 3 datasource configurations")
|
||||
|
||||
// Find service-3's datasource config (should have subscription enabled)
|
||||
ds3 := dsConfigs[2].(map[string]interface{})
|
||||
customGraphql, ok := ds3["customGraphql"].(map[string]interface{})
|
||||
require.True(t, ok, "Service-3 should have customGraphql config")
|
||||
|
||||
subscription, ok := customGraphql["subscription"].(map[string]interface{})
|
||||
require.True(t, ok, "Service-3 should have subscription config")
|
||||
assert.True(t, subscription["enabled"].(bool), "Service-3 subscription should be enabled")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "subgraph with no URL",
|
||||
subGraphs: []*model.SubGraph{
|
||||
{
|
||||
Service: "test-service",
|
||||
URL: nil,
|
||||
WsURL: nil,
|
||||
Sdl: "type Query { test: String }",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
validate: func(t *testing.T, config string) {
|
||||
var result map[string]interface{}
|
||||
err := json.Unmarshal([]byte(config), &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
subgraphs := result["subgraphs"].([]interface{})
|
||||
sg := subgraphs[0].(map[string]interface{})
|
||||
|
||||
// Should not have routing URL when URL is nil
|
||||
_, hasRoutingURL := sg["routingUrl"]
|
||||
assert.False(t, hasRoutingURL, "Should not have routingUrl when URL is nil")
|
||||
|
||||
// Check datasource configurations don't have subscription enabled
|
||||
engineConfig, ok := result["engineConfig"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have engineConfig")
|
||||
|
||||
dsConfigs, ok := engineConfig["datasourceConfigurations"].([]interface{})
|
||||
require.True(t, ok && len(dsConfigs) > 0, "Should have datasource configurations")
|
||||
|
||||
ds := dsConfigs[0].(map[string]interface{})
|
||||
customGraphql, ok := ds["customGraphql"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have customGraphql config")
|
||||
|
||||
subscription, ok := customGraphql["subscription"].(map[string]interface{})
|
||||
if ok {
|
||||
// wgc always enables subscription but URL should be empty when WsURL is nil
|
||||
subUrl, hasUrl := subscription["url"].(map[string]interface{})
|
||||
if hasUrl {
|
||||
_, hasStaticContent := subUrl["staticVariableContent"]
|
||||
assert.False(t, hasStaticContent, "Subscription URL should be empty when WsURL is nil")
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty subgraphs",
|
||||
subGraphs: []*model.SubGraph{},
|
||||
wantErr: true,
|
||||
validate: nil,
|
||||
},
|
||||
{
|
||||
name: "nil subgraphs",
|
||||
subGraphs: nil,
|
||||
wantErr: true,
|
||||
validate: nil,
|
||||
},
|
||||
{
|
||||
name: "complex SDL with multiple types",
|
||||
subGraphs: []*model.SubGraph{
|
||||
{
|
||||
Service: "complex-service",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
Sdl: `
|
||||
type Query {
|
||||
user(id: ID!): User
|
||||
users: [User!]!
|
||||
}
|
||||
|
||||
type User {
|
||||
id: ID!
|
||||
name: String!
|
||||
email: String!
|
||||
}
|
||||
`,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
validate: func(t *testing.T, config string) {
|
||||
var result map[string]interface{}
|
||||
err := json.Unmarshal([]byte(config), &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the composed graphqlSchema contains the types
|
||||
engineConfig, ok := result["engineConfig"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have engineConfig")
|
||||
|
||||
graphqlSchema, ok := engineConfig["graphqlSchema"].(string)
|
||||
require.True(t, ok, "Should have graphqlSchema")
|
||||
|
||||
assert.Contains(t, graphqlSchema, "Query", "Schema should contain Query type")
|
||||
assert.Contains(t, graphqlSchema, "User", "Schema should contain User type")
|
||||
|
||||
// Check datasource has the original SDL
|
||||
dsConfigs, ok := engineConfig["datasourceConfigurations"].([]interface{})
|
||||
require.True(t, ok && len(dsConfigs) > 0, "Should have datasource configurations")
|
||||
|
||||
ds := dsConfigs[0].(map[string]interface{})
|
||||
customGraphql, ok := ds["customGraphql"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have customGraphql config")
|
||||
|
||||
federation, ok := customGraphql["federation"].(map[string]interface{})
|
||||
require.True(t, ok, "Should have federation config")
|
||||
|
||||
serviceSdl, ok := federation["serviceSdl"].(string)
|
||||
require.True(t, ok, "Should have serviceSdl")
|
||||
assert.Contains(t, serviceSdl, "email: String!", "SDL should contain email field")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Use mock executor for all tests
|
||||
mockExecutor := &MockCommandExecutor{}
|
||||
config, err := GenerateCosmoRouterConfigWithExecutor(tt.subGraphs, mockExecutor)
|
||||
|
||||
if tt.wantErr {
|
||||
assert.Error(t, err)
|
||||
// Verify executor was not called for error cases
|
||||
if len(tt.subGraphs) == 0 {
|
||||
assert.Equal(t, 0, mockExecutor.CallCount, "Should not call executor for empty subgraphs")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, config, "Config should not be empty")
|
||||
|
||||
// Verify executor was called correctly
|
||||
assert.Equal(t, 1, mockExecutor.CallCount, "Should call executor once")
|
||||
assert.Equal(t, "wgc", mockExecutor.LastArgs[0], "Should call wgc command")
|
||||
assert.Contains(t, mockExecutor.LastArgs, "router", "Should include 'router' arg")
|
||||
assert.Contains(t, mockExecutor.LastArgs, "compose", "Should include 'compose' arg")
|
||||
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, config)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGenerateCosmoRouterConfig_MockError tests error handling with mock executor
|
||||
func TestGenerateCosmoRouterConfig_MockError(t *testing.T) {
|
||||
subGraphs := []*model.SubGraph{
|
||||
{
|
||||
Service: "test-service",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
Sdl: "type Query { test: String }",
|
||||
},
|
||||
}
|
||||
|
||||
// Create a mock executor that returns an error
|
||||
mockExecutor := &MockCommandExecutor{
|
||||
Error: fmt.Errorf("simulated wgc failure"),
|
||||
}
|
||||
|
||||
config, err := GenerateCosmoRouterConfigWithExecutor(subGraphs, mockExecutor)
|
||||
|
||||
// Verify error is propagated
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "wgc router compose failed")
|
||||
assert.Contains(t, err.Error(), "simulated wgc failure")
|
||||
assert.Empty(t, config)
|
||||
|
||||
// Verify executor was called
|
||||
assert.Equal(t, 1, mockExecutor.CallCount, "Should have attempted to call executor")
|
||||
}
|
||||
|
||||
// SlowMockExecutor simulates a slow wgc command for concurrency testing.
|
||||
type SlowMockExecutor struct {
|
||||
MockCommandExecutor
|
||||
delay time.Duration
|
||||
mu sync.Mutex
|
||||
concurrent atomic.Int32
|
||||
maxSeen atomic.Int32
|
||||
}
|
||||
|
||||
func (m *SlowMockExecutor) Execute(name string, args ...string) ([]byte, error) {
|
||||
cur := m.concurrent.Add(1)
|
||||
// Track the maximum concurrent executions observed.
|
||||
for {
|
||||
old := m.maxSeen.Load()
|
||||
if cur <= old || m.maxSeen.CompareAndSwap(old, cur) {
|
||||
break
|
||||
}
|
||||
}
|
||||
defer m.concurrent.Add(-1)
|
||||
|
||||
time.Sleep(m.delay)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.MockCommandExecutor.Execute(name, args...)
|
||||
}
|
||||
|
||||
func TestCosmoGenerator_ConcurrencyLimit(t *testing.T) {
|
||||
executor := &SlowMockExecutor{delay: 100 * time.Millisecond}
|
||||
gen := NewCosmoGenerator(executor, 5*time.Second)
|
||||
|
||||
subGraphs := []*model.SubGraph{
|
||||
{
|
||||
Service: "svc",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
Sdl: "type Query { hello: String }",
|
||||
},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range 5 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, _ = gen.Generate(context.Background(), subGraphs)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, int32(1), executor.maxSeen.Load(),
|
||||
"at most 1 wgc process should run concurrently")
|
||||
}
|
||||
|
||||
func TestCosmoGenerator_Timeout(t *testing.T) {
|
||||
// Executor that takes longer than the timeout.
|
||||
executor := &SlowMockExecutor{delay: 500 * time.Millisecond}
|
||||
gen := NewCosmoGenerator(executor, 50*time.Millisecond)
|
||||
|
||||
subGraphs := []*model.SubGraph{
|
||||
{
|
||||
Service: "svc",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
Sdl: "type Query { hello: String }",
|
||||
},
|
||||
}
|
||||
|
||||
// First call: occupies the semaphore for 500ms.
|
||||
go func() {
|
||||
_, _ = gen.Generate(context.Background(), subGraphs)
|
||||
}()
|
||||
|
||||
// Give the first goroutine time to acquire the semaphore.
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Second call: should timeout waiting for the semaphore.
|
||||
_, err := gen.Generate(context.Background(), subGraphs)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "acquire cosmo generator")
|
||||
}
|
||||
|
||||
func TestCosmoGenerator_ContextCancellation(t *testing.T) {
|
||||
executor := &SlowMockExecutor{delay: 500 * time.Millisecond}
|
||||
gen := NewCosmoGenerator(executor, 5*time.Second)
|
||||
|
||||
subGraphs := []*model.SubGraph{
|
||||
{
|
||||
Service: "svc",
|
||||
URL: stringPtr("http://localhost:4001/query"),
|
||||
Sdl: "type Query { hello: String }",
|
||||
},
|
||||
}
|
||||
|
||||
// First call: occupies the semaphore.
|
||||
go func() {
|
||||
_, _ = gen.Generate(context.Background(), subGraphs)
|
||||
}()
|
||||
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Second call with an already-cancelled context.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
_, err := gen.Generate(ctx, subGraphs)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "acquire cosmo generator")
|
||||
}
|
||||
|
||||
// Helper function for tests
|
||||
func stringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Debouncer coalesces rapid calls with the same key, executing only the last
|
||||
// one after a configurable delay. This prevents redundant work when multiple
|
||||
// updates arrive in quick succession (e.g., rapid schema publishing).
|
||||
type Debouncer struct {
|
||||
mu sync.Mutex
|
||||
delay time.Duration
|
||||
timers map[string]*time.Timer
|
||||
}
|
||||
|
||||
// NewDebouncer creates a Debouncer with the given delay window.
|
||||
func NewDebouncer(delay time.Duration) *Debouncer {
|
||||
return &Debouncer{
|
||||
delay: delay,
|
||||
timers: make(map[string]*time.Timer),
|
||||
}
|
||||
}
|
||||
|
||||
// Debounce resets the timer for key. When the timer fires (after delay with no
|
||||
// new calls for the same key), fn is executed in a new goroutine.
|
||||
func (d *Debouncer) Debounce(key string, fn func()) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if t, ok := d.timers[key]; ok {
|
||||
t.Stop()
|
||||
}
|
||||
|
||||
d.timers[key] = time.AfterFunc(d.delay, func() {
|
||||
d.mu.Lock()
|
||||
delete(d.timers, key)
|
||||
d.mu.Unlock()
|
||||
|
||||
fn()
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDebouncer_Coalesces(t *testing.T) {
|
||||
d := NewDebouncer(50 * time.Millisecond)
|
||||
var calls atomic.Int32
|
||||
|
||||
// Fire 10 rapid calls for the same key — only the last should execute.
|
||||
for range 10 {
|
||||
d.Debounce("key1", func() {
|
||||
calls.Add(1)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for the debounce delay plus some margin.
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
assert.Equal(t, int32(1), calls.Load(), "rapid calls should coalesce into a single execution")
|
||||
}
|
||||
|
||||
func TestDebouncer_DifferentKeys(t *testing.T) {
|
||||
d := NewDebouncer(50 * time.Millisecond)
|
||||
var calls atomic.Int32
|
||||
|
||||
d.Debounce("key-a", func() { calls.Add(1) })
|
||||
d.Debounce("key-b", func() { calls.Add(1) })
|
||||
d.Debounce("key-c", func() { calls.Add(1) })
|
||||
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
assert.Equal(t, int32(3), calls.Load(), "different keys should fire independently")
|
||||
}
|
||||
|
||||
func TestDebouncer_TimerReset(t *testing.T) {
|
||||
d := NewDebouncer(100 * time.Millisecond)
|
||||
var value atomic.Int32
|
||||
|
||||
// First call sets value to 1.
|
||||
d.Debounce("key", func() { value.Store(1) })
|
||||
|
||||
// Wait 60ms (less than the 100ms delay), then replace with value 2.
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
d.Debounce("key", func() { value.Store(2) })
|
||||
|
||||
// At 60ms the first timer hasn't fired yet. Wait for the second timer.
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
require.Equal(t, int32(2), value.Load(), "later call should replace the earlier one")
|
||||
}
|
||||
+2742
-2924
File diff suppressed because it is too large
Load Diff
@@ -36,6 +36,9 @@ type InputSubGraph struct {
|
||||
Sdl string `json:"sdl"`
|
||||
}
|
||||
|
||||
type Mutation struct {
|
||||
}
|
||||
|
||||
type Organization struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
@@ -43,6 +46,16 @@ type Organization struct {
|
||||
APIKeys []*APIKey `json:"apiKeys"`
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
}
|
||||
|
||||
type SchemaUpdate struct {
|
||||
Ref string `json:"ref"`
|
||||
ID string `json:"id"`
|
||||
SubGraphs []*SubGraph `json:"subGraphs"`
|
||||
CosmoRouterConfig *string `json:"cosmoRouterConfig,omitempty"`
|
||||
}
|
||||
|
||||
type SubGraph struct {
|
||||
ID string `json:"id"`
|
||||
Service string `json:"service"`
|
||||
@@ -56,11 +69,15 @@ type SubGraph struct {
|
||||
type SubGraphs struct {
|
||||
ID string `json:"id"`
|
||||
MinDelaySeconds int `json:"minDelaySeconds"`
|
||||
Sdl string `json:"sdl"`
|
||||
SubGraphs []*SubGraph `json:"subGraphs"`
|
||||
}
|
||||
|
||||
func (SubGraphs) IsSupergraph() {}
|
||||
|
||||
type Subscription struct {
|
||||
}
|
||||
|
||||
type Unchanged struct {
|
||||
ID string `json:"id"`
|
||||
MinDelaySeconds int `json:"minDelaySeconds"`
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
)
|
||||
|
||||
// PubSub handles publishing schema updates to subscribers
|
||||
type PubSub struct {
|
||||
mu sync.RWMutex
|
||||
subscribers map[string][]chan *model.SchemaUpdate
|
||||
}
|
||||
|
||||
func NewPubSub() *PubSub {
|
||||
return &PubSub{
|
||||
subscribers: make(map[string][]chan *model.SchemaUpdate),
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe creates a new subscription channel for a given schema ref
|
||||
func (ps *PubSub) Subscribe(ref string) chan *model.SchemaUpdate {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
ch := make(chan *model.SchemaUpdate, 10)
|
||||
ps.subscribers[ref] = append(ps.subscribers[ref], ch)
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Unsubscribe removes a subscription channel
|
||||
func (ps *PubSub) Unsubscribe(ref string, ch chan *model.SchemaUpdate) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
subs := ps.subscribers[ref]
|
||||
for i, sub := range subs {
|
||||
if sub == ch {
|
||||
// Remove this subscriber
|
||||
ps.subscribers[ref] = append(subs[:i], subs[i+1:]...)
|
||||
close(sub)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty subscriber lists
|
||||
if len(ps.subscribers[ref]) == 0 {
|
||||
delete(ps.subscribers, ref)
|
||||
}
|
||||
}
|
||||
|
||||
// Publish sends a schema update to all subscribers of a given ref
|
||||
func (ps *PubSub) Publish(ref string, update *model.SchemaUpdate) {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
|
||||
for _, ch := range ps.subscribers[ref] {
|
||||
// Non-blocking send - if subscriber is slow, skip
|
||||
select {
|
||||
case ch <- update:
|
||||
default:
|
||||
// Channel full, subscriber is too slow - skip this update
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,256 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
)
|
||||
|
||||
func TestPubSub_SubscribeAndPublish(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
ref := "Test@dev"
|
||||
|
||||
// Subscribe
|
||||
ch := ps.Subscribe(ref)
|
||||
require.NotNil(t, ch, "Subscribe should return a channel")
|
||||
|
||||
// Publish
|
||||
update := &model.SchemaUpdate{
|
||||
Ref: ref,
|
||||
ID: "test-id-1",
|
||||
SubGraphs: []*model.SubGraph{
|
||||
{
|
||||
ID: "sg1",
|
||||
Service: "test-service",
|
||||
Sdl: "type Query { test: String }",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
go ps.Publish(ref, update)
|
||||
|
||||
// Receive
|
||||
select {
|
||||
case received := <-ch:
|
||||
assert.Equal(t, update.Ref, received.Ref, "Ref should match")
|
||||
assert.Equal(t, update.ID, received.ID, "ID should match")
|
||||
assert.Equal(t, len(update.SubGraphs), len(received.SubGraphs), "SubGraphs count should match")
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("Timeout waiting for published update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_MultipleSubscribers(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
ref := "Test@dev"
|
||||
|
||||
// Create multiple subscribers
|
||||
ch1 := ps.Subscribe(ref)
|
||||
ch2 := ps.Subscribe(ref)
|
||||
ch3 := ps.Subscribe(ref)
|
||||
|
||||
update := &model.SchemaUpdate{
|
||||
Ref: ref,
|
||||
ID: "test-id-2",
|
||||
}
|
||||
|
||||
// Publish once
|
||||
ps.Publish(ref, update)
|
||||
|
||||
// All subscribers should receive the update
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
checkReceived := func(ch <-chan *model.SchemaUpdate, name string) {
|
||||
defer wg.Done()
|
||||
select {
|
||||
case received := <-ch:
|
||||
assert.Equal(t, update.ID, received.ID, "%s should receive correct update", name)
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("%s: Timeout waiting for update", name)
|
||||
}
|
||||
}
|
||||
|
||||
go checkReceived(ch1, "Subscriber 1")
|
||||
go checkReceived(ch2, "Subscriber 2")
|
||||
go checkReceived(ch3, "Subscriber 3")
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestPubSub_DifferentRefs(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
|
||||
ref1 := "Test1@dev"
|
||||
ref2 := "Test2@dev"
|
||||
|
||||
ch1 := ps.Subscribe(ref1)
|
||||
ch2 := ps.Subscribe(ref2)
|
||||
|
||||
update1 := &model.SchemaUpdate{Ref: ref1, ID: "id1"}
|
||||
update2 := &model.SchemaUpdate{Ref: ref2, ID: "id2"}
|
||||
|
||||
// Publish to ref1
|
||||
ps.Publish(ref1, update1)
|
||||
|
||||
// Only ch1 should receive
|
||||
select {
|
||||
case received := <-ch1:
|
||||
assert.Equal(t, "id1", received.ID)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("ch1 should have received update")
|
||||
}
|
||||
|
||||
// ch2 should not receive ref1's update
|
||||
select {
|
||||
case <-ch2:
|
||||
t.Fatal("ch2 should not receive ref1's update")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// Expected - no update
|
||||
}
|
||||
|
||||
// Publish to ref2
|
||||
ps.Publish(ref2, update2)
|
||||
|
||||
// Now ch2 should receive
|
||||
select {
|
||||
case received := <-ch2:
|
||||
assert.Equal(t, "id2", received.ID)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("ch2 should have received update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_Unsubscribe(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
ref := "Test@dev"
|
||||
|
||||
ch := ps.Subscribe(ref)
|
||||
|
||||
// Unsubscribe
|
||||
ps.Unsubscribe(ref, ch)
|
||||
|
||||
// Channel should be closed
|
||||
_, ok := <-ch
|
||||
assert.False(t, ok, "Channel should be closed after unsubscribe")
|
||||
|
||||
// Publishing after unsubscribe should not panic
|
||||
assert.NotPanics(t, func() {
|
||||
ps.Publish(ref, &model.SchemaUpdate{Ref: ref})
|
||||
})
|
||||
}
|
||||
|
||||
func TestPubSub_BufferedChannel(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
ref := "Test@dev"
|
||||
|
||||
ch := ps.Subscribe(ref)
|
||||
|
||||
// Publish multiple updates quickly (up to buffer size of 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
update := &model.SchemaUpdate{
|
||||
Ref: ref,
|
||||
ID: string(rune('a' + i)),
|
||||
}
|
||||
ps.Publish(ref, update)
|
||||
}
|
||||
|
||||
// All 10 should be buffered and receivable
|
||||
received := 0
|
||||
timeout := time.After(1 * time.Second)
|
||||
|
||||
for received < 10 {
|
||||
select {
|
||||
case <-ch:
|
||||
received++
|
||||
case <-timeout:
|
||||
t.Fatalf("Only received %d out of 10 updates", received)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, 10, received, "Should receive all buffered updates")
|
||||
}
|
||||
|
||||
func TestPubSub_SlowSubscriber(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
ref := "Test@dev"
|
||||
|
||||
ch := ps.Subscribe(ref)
|
||||
|
||||
// Fill the buffer (10 items)
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Publish(ref, &model.SchemaUpdate{Ref: ref})
|
||||
}
|
||||
|
||||
// Publish one more - this should be dropped (channel full, non-blocking send)
|
||||
ps.Publish(ref, &model.SchemaUpdate{Ref: ref, ID: "should-be-dropped"})
|
||||
|
||||
// Drain the channel
|
||||
count := 0
|
||||
timeout := time.After(500 * time.Millisecond)
|
||||
|
||||
drainLoop:
|
||||
for {
|
||||
select {
|
||||
case update := <-ch:
|
||||
count++
|
||||
// Should not receive the dropped update
|
||||
assert.NotEqual(t, "should-be-dropped", update.ID, "Should not receive dropped update")
|
||||
case <-timeout:
|
||||
break drainLoop
|
||||
}
|
||||
}
|
||||
|
||||
// Should have received exactly 10 (the buffer size), not 11
|
||||
assert.Equal(t, 10, count, "Should only receive buffered updates, not the dropped one")
|
||||
}
|
||||
|
||||
func TestPubSub_ConcurrentPublish(t *testing.T) {
|
||||
ps := NewPubSub()
|
||||
ref := "Test@dev"
|
||||
|
||||
ch := ps.Subscribe(ref)
|
||||
|
||||
numPublishers := 10
|
||||
updatesPerPublisher := 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numPublishers)
|
||||
|
||||
// Multiple goroutines publishing concurrently
|
||||
for i := 0; i < numPublishers; i++ {
|
||||
go func(publisherID int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < updatesPerPublisher; j++ {
|
||||
ps.Publish(ref, &model.SchemaUpdate{
|
||||
Ref: ref,
|
||||
ID: string(rune('a' + publisherID)),
|
||||
})
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Should not panic and subscriber should receive updates
|
||||
// (exact count may vary due to buffer and timing)
|
||||
timeout := time.After(1 * time.Second)
|
||||
received := 0
|
||||
|
||||
receiveLoop:
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
received++
|
||||
case <-timeout:
|
||||
break receiveLoop
|
||||
}
|
||||
}
|
||||
|
||||
assert.Greater(t, received, 0, "Should have received some updates")
|
||||
}
|
||||
+12
-7
@@ -3,15 +3,17 @@ package graph
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/apex/log"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/cache"
|
||||
"gitlab.com/unboundsoftware/schemas/middleware"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/cache"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/middleware"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/99designs/gqlgen
|
||||
//go:generate gofumpt -w .
|
||||
//go:generate goimports -w -local gitea.unbound.se/unboundsoftware/schemas .
|
||||
|
||||
// This file will not be regenerated automatically.
|
||||
//
|
||||
@@ -22,10 +24,13 @@ type Publisher interface {
|
||||
}
|
||||
|
||||
type Resolver struct {
|
||||
EventStore eventsourced.EventStore
|
||||
Publisher Publisher
|
||||
Logger log.Interface
|
||||
Cache *cache.Cache
|
||||
EventStore eventsourced.EventStore
|
||||
Publisher Publisher
|
||||
Logger *slog.Logger
|
||||
Cache *cache.Cache
|
||||
PubSub *PubSub
|
||||
CosmoGenerator *CosmoGenerator
|
||||
Debouncer *Debouncer
|
||||
}
|
||||
|
||||
func (r *Resolver) apiKeyCanAccessRef(ctx context.Context, ref string, publish bool) (string, error) {
|
||||
|
||||
+18
-1
@@ -1,14 +1,23 @@
|
||||
type Query {
|
||||
organizations: [Organization!]! @auth(user: true)
|
||||
supergraph(ref: String!, isAfter: String): Supergraph! @auth(organization: true)
|
||||
allOrganizations: [Organization!]! @auth(user: true)
|
||||
supergraph(ref: String!, isAfter: String): Supergraph! @auth(user: true, organization: true)
|
||||
latestSchema(ref: String!): SchemaUpdate! @auth(user: true, organization: true)
|
||||
}
|
||||
|
||||
type Mutation {
|
||||
addOrganization(name: String!): Organization! @auth(user: true)
|
||||
addUserToOrganization(organizationId: ID!, userId: String!): Organization! @auth(user: true)
|
||||
addAPIKey(input: InputAPIKey): APIKey! @auth(user: true)
|
||||
removeAPIKey(organizationId: ID!, keyName: String!): Organization! @auth(user: true)
|
||||
removeOrganization(organizationId: ID!): Boolean! @auth(user: true)
|
||||
updateSubGraph(input: InputSubGraph!): SubGraph! @auth(organization: true)
|
||||
}
|
||||
|
||||
type Subscription {
|
||||
schemaUpdates(ref: String!): SchemaUpdate! @auth(organization: true)
|
||||
}
|
||||
|
||||
type Organization {
|
||||
id: ID!
|
||||
name: String!
|
||||
@@ -40,6 +49,7 @@ type Unchanged {
|
||||
type SubGraphs {
|
||||
id: ID!
|
||||
minDelaySeconds: Int!
|
||||
sdl: String!
|
||||
subGraphs: [SubGraph!]!
|
||||
}
|
||||
|
||||
@@ -53,6 +63,13 @@ type SubGraph {
|
||||
changedAt: Time!
|
||||
}
|
||||
|
||||
type SchemaUpdate {
|
||||
ref: String!
|
||||
id: ID!
|
||||
subGraphs: [SubGraph!]!
|
||||
cosmoRouterConfig: String
|
||||
}
|
||||
|
||||
input InputAPIKey {
|
||||
name: String!
|
||||
organizationId: ID!
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/domain"
|
||||
"gitlab.com/unboundsoftware/schemas/graph/model"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
)
|
||||
|
||||
func (r *Resolver) fetchSubGraph(ctx context.Context, subGraphId string) (*domain.SubGraph, error) {
|
||||
|
||||
+318
-17
@@ -1,6 +1,7 @@
|
||||
package graph
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// This file will be automatically regenerated based on the schema, any resolver
|
||||
// implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen
|
||||
|
||||
@@ -9,14 +10,14 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/wundergraph/graphql-go-tools/pkg/federation/sdlmerge"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/domain"
|
||||
"gitlab.com/unboundsoftware/schemas/graph/generated"
|
||||
"gitlab.com/unboundsoftware/schemas/graph/model"
|
||||
"gitlab.com/unboundsoftware/schemas/middleware"
|
||||
"gitlab.com/unboundsoftware/schemas/rand"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/generated"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/graph/model"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/middleware"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/rand"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/sdlmerge"
|
||||
)
|
||||
|
||||
// AddOrganization is the resolver for the addOrganization field.
|
||||
@@ -37,6 +38,24 @@ func (r *mutationResolver) AddOrganization(ctx context.Context, name string) (*m
|
||||
return ToGqlOrganization(*org), nil
|
||||
}
|
||||
|
||||
// AddUserToOrganization is the resolver for the addUserToOrganization field.
|
||||
func (r *mutationResolver) AddUserToOrganization(ctx context.Context, organizationID string, userID string) (*model.Organization, error) {
|
||||
sub := middleware.UserFromContext(ctx)
|
||||
org := &domain.Organization{BaseAggregate: eventsourced.BaseAggregateFromString(organizationID)}
|
||||
h, err := r.handler(ctx, org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = h.Handle(ctx, &domain.AddUserToOrganization{
|
||||
UserId: userID,
|
||||
Initiator: sub,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ToGqlOrganization(*org), nil
|
||||
}
|
||||
|
||||
// AddAPIKey is the resolver for the addAPIKey field.
|
||||
func (r *mutationResolver) AddAPIKey(ctx context.Context, input *model.InputAPIKey) (*model.APIKey, error) {
|
||||
sub := middleware.UserFromContext(ctx)
|
||||
@@ -71,6 +90,41 @@ func (r *mutationResolver) AddAPIKey(ctx context.Context, input *model.InputAPIK
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RemoveAPIKey is the resolver for the removeAPIKey field.
|
||||
func (r *mutationResolver) RemoveAPIKey(ctx context.Context, organizationID string, keyName string) (*model.Organization, error) {
|
||||
sub := middleware.UserFromContext(ctx)
|
||||
org := &domain.Organization{BaseAggregate: eventsourced.BaseAggregateFromString(organizationID)}
|
||||
h, err := r.handler(ctx, org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = h.Handle(ctx, &domain.RemoveAPIKey{
|
||||
KeyName: keyName,
|
||||
Initiator: sub,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ToGqlOrganization(*org), nil
|
||||
}
|
||||
|
||||
// RemoveOrganization is the resolver for the removeOrganization field.
|
||||
func (r *mutationResolver) RemoveOrganization(ctx context.Context, organizationID string) (bool, error) {
|
||||
sub := middleware.UserFromContext(ctx)
|
||||
org := &domain.Organization{BaseAggregate: eventsourced.BaseAggregateFromString(organizationID)}
|
||||
h, err := r.handler(ctx, org)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = h.Handle(ctx, &domain.RemoveOrganization{
|
||||
Initiator: sub,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// UpdateSubGraph is the resolver for the updateSubGraph field.
|
||||
func (r *mutationResolver) UpdateSubGraph(ctx context.Context, input model.InputSubGraph) (*model.SubGraph, error) {
|
||||
orgId := middleware.OrganizationFromContext(ctx)
|
||||
@@ -119,6 +173,53 @@ func (r *mutationResolver) UpdateSubGraph(ctx context.Context, input model.Input
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Debounce schema update publishing so rapid successive updates for the
|
||||
// same org+ref only trigger one config generation.
|
||||
r.Debouncer.Debounce(orgId+":"+input.Ref, func() {
|
||||
services, lastUpdate := r.Cache.Services(orgId, input.Ref, "")
|
||||
r.Logger.Info("Publishing schema update after subgraph change",
|
||||
"ref", input.Ref,
|
||||
"orgId", orgId,
|
||||
"lastUpdate", lastUpdate,
|
||||
"servicesCount", len(services),
|
||||
)
|
||||
|
||||
subGraphs := make([]*model.SubGraph, len(services))
|
||||
for i, id := range services {
|
||||
sg, err := r.fetchSubGraph(context.Background(), id)
|
||||
if err != nil {
|
||||
r.Logger.Error("fetch subgraph for update notification", "error", err)
|
||||
continue
|
||||
}
|
||||
subGraphs[i] = r.toGqlSubGraph(sg)
|
||||
}
|
||||
|
||||
// Generate Cosmo router config (concurrency-limited)
|
||||
cosmoConfig, err := r.CosmoGenerator.Generate(context.Background(), subGraphs)
|
||||
if err != nil {
|
||||
r.Logger.Error("generate cosmo config for update", "error", err)
|
||||
cosmoConfig = "" // Send empty if generation fails
|
||||
}
|
||||
|
||||
// Publish to all subscribers of this ref
|
||||
update := &model.SchemaUpdate{
|
||||
Ref: input.Ref,
|
||||
ID: lastUpdate,
|
||||
SubGraphs: subGraphs,
|
||||
CosmoRouterConfig: &cosmoConfig,
|
||||
}
|
||||
|
||||
r.Logger.Info("Publishing schema update to subscribers",
|
||||
"ref", update.Ref,
|
||||
"id", update.ID,
|
||||
"subGraphsCount", len(update.SubGraphs),
|
||||
"cosmoConfigLength", len(cosmoConfig),
|
||||
)
|
||||
|
||||
r.PubSub.Publish(input.Ref, update)
|
||||
})
|
||||
|
||||
return r.toGqlSubGraph(subGraph), nil
|
||||
}
|
||||
|
||||
@@ -129,13 +230,49 @@ func (r *queryResolver) Organizations(ctx context.Context) ([]*model.Organizatio
|
||||
return ToGqlOrganizations(orgs), nil
|
||||
}
|
||||
|
||||
// AllOrganizations is the resolver for the allOrganizations field.
|
||||
func (r *queryResolver) AllOrganizations(ctx context.Context) ([]*model.Organization, error) {
|
||||
// Check if user has admin role
|
||||
if !middleware.UserHasRole(ctx, "admin") {
|
||||
return nil, fmt.Errorf("unauthorized: admin role required")
|
||||
}
|
||||
|
||||
orgs := r.Cache.AllOrganizations()
|
||||
return ToGqlOrganizations(orgs), nil
|
||||
}
|
||||
|
||||
// Supergraph is the resolver for the supergraph field.
|
||||
func (r *queryResolver) Supergraph(ctx context.Context, ref string, isAfter *string) (model.Supergraph, error) {
|
||||
orgId := middleware.OrganizationFromContext(ctx)
|
||||
_, err := r.apiKeyCanAccessRef(ctx, ref, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
userId := middleware.UserFromContext(ctx)
|
||||
|
||||
r.Logger.Info("Supergraph query",
|
||||
"ref", ref,
|
||||
"orgId", orgId,
|
||||
"userId", userId,
|
||||
)
|
||||
|
||||
// If authenticated with API key (organization), check access
|
||||
if orgId != "" {
|
||||
_, err := r.apiKeyCanAccessRef(ctx, ref, false)
|
||||
if err != nil {
|
||||
r.Logger.Error("API key cannot access ref", "error", err, "ref", ref)
|
||||
return nil, err
|
||||
}
|
||||
} else if userId != "" {
|
||||
// For user authentication, check if user has access to ref through their organizations
|
||||
userOrgs := r.Cache.OrganizationsByUser(userId)
|
||||
if len(userOrgs) == 0 {
|
||||
r.Logger.Error("User has no organizations", "userId", userId)
|
||||
return nil, fmt.Errorf("user has no access to any organizations")
|
||||
}
|
||||
// Use the first organization's ID for querying
|
||||
orgId = userOrgs[0].ID.String()
|
||||
r.Logger.Info("Using organization from user context", "orgId", orgId)
|
||||
} else {
|
||||
return nil, fmt.Errorf("no authentication provided")
|
||||
}
|
||||
|
||||
after := ""
|
||||
if isAfter != nil {
|
||||
after = *isAfter
|
||||
@@ -148,11 +285,77 @@ func (r *queryResolver) Supergraph(ctx context.Context, ref string, isAfter *str
|
||||
}, nil
|
||||
}
|
||||
subGraphs := make([]*model.SubGraph, len(services))
|
||||
serviceSDLs := make([]string, len(services))
|
||||
for i, id := range services {
|
||||
sg, err := r.fetchSubGraph(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subGraphs[i] = r.toGqlSubGraph(sg)
|
||||
serviceSDLs[i] = sg.Sdl
|
||||
}
|
||||
|
||||
sdl, err := sdlmerge.MergeSDLs(serviceSDLs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.SubGraphs{
|
||||
ID: lastUpdate,
|
||||
SubGraphs: subGraphs,
|
||||
Sdl: sdl,
|
||||
MinDelaySeconds: 10,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LatestSchema is the resolver for the latestSchema field.
|
||||
func (r *queryResolver) LatestSchema(ctx context.Context, ref string) (*model.SchemaUpdate, error) {
|
||||
orgId := middleware.OrganizationFromContext(ctx)
|
||||
userId := middleware.UserFromContext(ctx)
|
||||
|
||||
r.Logger.Info("LatestSchema query",
|
||||
"ref", ref,
|
||||
"orgId", orgId,
|
||||
"userId", userId,
|
||||
)
|
||||
|
||||
// If authenticated with API key (organization), check access
|
||||
if orgId != "" {
|
||||
_, err := r.apiKeyCanAccessRef(ctx, ref, false)
|
||||
if err != nil {
|
||||
r.Logger.Error("API key cannot access ref", "error", err, "ref", ref)
|
||||
return nil, err
|
||||
}
|
||||
} else if userId != "" {
|
||||
// For user authentication, check if user has access to ref through their organizations
|
||||
userOrgs := r.Cache.OrganizationsByUser(userId)
|
||||
if len(userOrgs) == 0 {
|
||||
r.Logger.Error("User has no organizations", "userId", userId)
|
||||
return nil, fmt.Errorf("user has no access to any organizations")
|
||||
}
|
||||
// Use the first organization's ID for querying
|
||||
// In a real-world scenario, you might want to check which org has access to this ref
|
||||
orgId = userOrgs[0].ID.String()
|
||||
r.Logger.Info("Using organization from user context", "orgId", orgId)
|
||||
} else {
|
||||
return nil, fmt.Errorf("no authentication provided")
|
||||
}
|
||||
|
||||
// Get current services and schema
|
||||
services, lastUpdate := r.Cache.Services(orgId, ref, "")
|
||||
r.Logger.Info("Fetching latest schema",
|
||||
"ref", ref,
|
||||
"orgId", orgId,
|
||||
"lastUpdate", lastUpdate,
|
||||
"servicesCount", len(services),
|
||||
)
|
||||
|
||||
subGraphs := make([]*model.SubGraph, len(services))
|
||||
for i, id := range services {
|
||||
sg, err := r.fetchSubGraph(ctx, id)
|
||||
if err != nil {
|
||||
r.Logger.Error("fetch subgraph", "error", err, "id", id)
|
||||
return nil, err
|
||||
}
|
||||
subGraphs[i] = &model.SubGraph{
|
||||
ID: sg.ID.String(),
|
||||
Service: sg.Service,
|
||||
@@ -163,11 +366,105 @@ func (r *queryResolver) Supergraph(ctx context.Context, ref string, isAfter *str
|
||||
ChangedAt: sg.ChangedAt,
|
||||
}
|
||||
}
|
||||
return &model.SubGraphs{
|
||||
ID: lastUpdate,
|
||||
SubGraphs: subGraphs,
|
||||
MinDelaySeconds: 10,
|
||||
}, nil
|
||||
|
||||
// Generate Cosmo router config (concurrency-limited)
|
||||
cosmoConfig, err := r.CosmoGenerator.Generate(ctx, subGraphs)
|
||||
if err != nil {
|
||||
r.Logger.Error("generate cosmo config", "error", err)
|
||||
cosmoConfig = "" // Return empty if generation fails
|
||||
}
|
||||
|
||||
update := &model.SchemaUpdate{
|
||||
Ref: ref,
|
||||
ID: lastUpdate,
|
||||
SubGraphs: subGraphs,
|
||||
CosmoRouterConfig: &cosmoConfig,
|
||||
}
|
||||
|
||||
r.Logger.Info("Latest schema fetched",
|
||||
"ref", update.Ref,
|
||||
"id", update.ID,
|
||||
"subGraphsCount", len(update.SubGraphs),
|
||||
"cosmoConfigLength", len(cosmoConfig),
|
||||
)
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
// SchemaUpdates is the resolver for the schemaUpdates field.
|
||||
func (r *subscriptionResolver) SchemaUpdates(ctx context.Context, ref string) (<-chan *model.SchemaUpdate, error) {
|
||||
orgId := middleware.OrganizationFromContext(ctx)
|
||||
|
||||
r.Logger.Info("SchemaUpdates subscription started",
|
||||
"ref", ref,
|
||||
"orgId", orgId,
|
||||
)
|
||||
|
||||
_, err := r.apiKeyCanAccessRef(ctx, ref, false)
|
||||
if err != nil {
|
||||
r.Logger.Error("API key cannot access ref", "error", err, "ref", ref)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Subscribe to updates for this ref
|
||||
ch := r.PubSub.Subscribe(ref)
|
||||
|
||||
// Send initial state immediately
|
||||
go func() {
|
||||
services, lastUpdate := r.Cache.Services(orgId, ref, "")
|
||||
r.Logger.Info("Preparing initial schema update",
|
||||
"ref", ref,
|
||||
"orgId", orgId,
|
||||
"lastUpdate", lastUpdate,
|
||||
"servicesCount", len(services),
|
||||
)
|
||||
|
||||
subGraphs := make([]*model.SubGraph, len(services))
|
||||
for i, id := range services {
|
||||
sg, err := r.fetchSubGraph(ctx, id)
|
||||
if err != nil {
|
||||
r.Logger.Error("fetch subgraph for initial update", "error", err, "id", id)
|
||||
continue
|
||||
}
|
||||
subGraphs[i] = r.toGqlSubGraph(sg)
|
||||
}
|
||||
|
||||
// Generate Cosmo router config (concurrency-limited)
|
||||
cosmoConfig, err := r.CosmoGenerator.Generate(ctx, subGraphs)
|
||||
if err != nil {
|
||||
r.Logger.Error("generate cosmo config", "error", err)
|
||||
cosmoConfig = "" // Send empty if generation fails
|
||||
}
|
||||
|
||||
// Send initial update
|
||||
update := &model.SchemaUpdate{
|
||||
Ref: ref,
|
||||
ID: lastUpdate,
|
||||
SubGraphs: subGraphs,
|
||||
CosmoRouterConfig: &cosmoConfig,
|
||||
}
|
||||
|
||||
r.Logger.Info("Sending initial schema update",
|
||||
"ref", update.Ref,
|
||||
"id", update.ID,
|
||||
"subGraphsCount", len(update.SubGraphs),
|
||||
"cosmoConfigLength", len(cosmoConfig),
|
||||
)
|
||||
|
||||
select {
|
||||
case ch <- update:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Clean up subscription when context is done
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
r.PubSub.Unsubscribe(ref, ch)
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Mutation returns generated.MutationResolver implementation.
|
||||
@@ -176,7 +473,11 @@ func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResol
|
||||
// Query returns generated.QueryResolver implementation.
|
||||
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
||||
|
||||
// Subscription returns generated.SubscriptionResolver implementation.
|
||||
func (r *Resolver) Subscription() generated.SubscriptionResolver { return &subscriptionResolver{r} }
|
||||
|
||||
type (
|
||||
mutationResolver struct{ *Resolver }
|
||||
queryResolver struct{ *Resolver }
|
||||
mutationResolver struct{ *Resolver }
|
||||
queryResolver struct{ *Resolver }
|
||||
subscriptionResolver struct{ *Resolver }
|
||||
)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package graph
|
||||
|
||||
|
||||
@@ -3,9 +3,72 @@ package hash
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// String creates a SHA256 hash of a string (legacy, for non-sensitive data)
|
||||
func String(s string) string {
|
||||
encoded := sha256.New().Sum([]byte(s))
|
||||
return base64.StdEncoding.EncodeToString(encoded)
|
||||
}
|
||||
|
||||
// APIKey hashes an API key using bcrypt for secure storage
|
||||
// Cost of 12 provides a good balance between security and performance
|
||||
func APIKey(key string) (string, error) {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(key), 12)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(hash), nil
|
||||
}
|
||||
|
||||
// CompareAPIKey compares a plaintext API key with a hash
|
||||
// Supports both bcrypt (new) and SHA256 (legacy) hashes for backwards compatibility
|
||||
// Returns true if they match, false otherwise
|
||||
//
|
||||
// Migration Strategy:
|
||||
// Old API keys stored with SHA256 will continue to work. To upgrade them to bcrypt:
|
||||
// 1. Keys are automatically upgraded when users re-authenticate (if implemented)
|
||||
// 2. Or, run a one-time migration using MigrateAPIKeyHash when convenient
|
||||
func CompareAPIKey(hashedKey, plainKey string) bool {
|
||||
// Bcrypt hashes start with $2a$, $2b$, or $2y$
|
||||
// If the hash starts with $2, it's a bcrypt hash
|
||||
if len(hashedKey) > 2 && hashedKey[0] == '$' && hashedKey[1] == '2' {
|
||||
// New bcrypt hash
|
||||
err := bcrypt.CompareHashAndPassword([]byte(hashedKey), []byte(plainKey))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Legacy SHA256 hash - compare using the old method
|
||||
legacyHash := String(plainKey)
|
||||
return hashedKey == legacyHash
|
||||
}
|
||||
|
||||
// IsLegacyHash returns true if the hash is a legacy SHA256 hash (not bcrypt)
|
||||
func IsLegacyHash(hashedKey string) bool {
|
||||
return len(hashedKey) <= 2 || hashedKey[0] != '$' || hashedKey[1] != '2'
|
||||
}
|
||||
|
||||
// MigrateAPIKeyHash can be used to upgrade a legacy SHA256 hash to bcrypt
|
||||
// This is useful for one-time migrations of existing keys
|
||||
// Returns the new bcrypt hash if the key is legacy, otherwise returns the original
|
||||
func MigrateAPIKeyHash(currentHash, plainKey string) (string, bool, error) {
|
||||
// If already bcrypt, no migration needed
|
||||
if !IsLegacyHash(currentHash) {
|
||||
return currentHash, false, nil
|
||||
}
|
||||
|
||||
// Verify the legacy hash is correct before migrating
|
||||
if !CompareAPIKey(currentHash, plainKey) {
|
||||
return "", false, nil // Invalid key, don't migrate
|
||||
}
|
||||
|
||||
// Generate new bcrypt hash
|
||||
newHash, err := APIKey(plainKey)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
return newHash, true, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,169 @@
|
||||
package hash
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAPIKey(t *testing.T) {
|
||||
key := "test_api_key_12345" // gitleaks:allow
|
||||
|
||||
hash1, err := APIKey(key)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, hash1)
|
||||
assert.NotEqual(t, key, hash1, "Hash should not equal plaintext")
|
||||
|
||||
// Bcrypt hashes should start with $2
|
||||
assert.True(t, strings.HasPrefix(hash1, "$2"), "Should be a bcrypt hash")
|
||||
|
||||
// Same key should produce different hashes (due to salt)
|
||||
hash2, err := APIKey(key)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, hash1, hash2, "Bcrypt should produce different hashes with different salts")
|
||||
}
|
||||
|
||||
func TestCompareAPIKey_Bcrypt(t *testing.T) {
|
||||
key := "test_api_key_12345" // gitleaks:allow
|
||||
|
||||
hash, err := APIKey(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Correct key should match
|
||||
assert.True(t, CompareAPIKey(hash, key))
|
||||
|
||||
// Wrong key should not match
|
||||
assert.False(t, CompareAPIKey(hash, "wrong_key"))
|
||||
}
|
||||
|
||||
func TestCompareAPIKey_Legacy(t *testing.T) {
|
||||
key := "test_api_key_12345" // gitleaks:allow
|
||||
|
||||
// Create a legacy SHA256 hash
|
||||
legacyHash := String(key)
|
||||
|
||||
// Should still work with legacy hashes
|
||||
assert.True(t, CompareAPIKey(legacyHash, key))
|
||||
|
||||
// Wrong key should not match
|
||||
assert.False(t, CompareAPIKey(legacyHash, "wrong_key"))
|
||||
}
|
||||
|
||||
func TestCompareAPIKey_BackwardCompatibility(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hashFunc func(string) string
|
||||
expectOK bool
|
||||
}{
|
||||
{
|
||||
name: "bcrypt hash",
|
||||
hashFunc: func(k string) string {
|
||||
h, _ := APIKey(k)
|
||||
return h
|
||||
},
|
||||
expectOK: true,
|
||||
},
|
||||
{
|
||||
name: "legacy SHA256 hash",
|
||||
hashFunc: func(k string) string {
|
||||
return String(k)
|
||||
},
|
||||
expectOK: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
key := "test_key_123"
|
||||
hash := tt.hashFunc(key)
|
||||
|
||||
result := CompareAPIKey(hash, key)
|
||||
assert.Equal(t, tt.expectOK, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
// Test that String function still works (for non-sensitive data)
|
||||
input := "test_string"
|
||||
hash1 := String(input)
|
||||
hash2 := String(input)
|
||||
|
||||
// SHA256 should be deterministic
|
||||
assert.Equal(t, hash1, hash2)
|
||||
assert.NotEmpty(t, hash1)
|
||||
assert.NotEqual(t, input, hash1)
|
||||
}
|
||||
|
||||
func TestIsLegacyHash(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hash string
|
||||
isLegacy bool
|
||||
}{
|
||||
{
|
||||
name: "bcrypt hash",
|
||||
hash: "$2a$12$abcdefghijklmnopqrstuv",
|
||||
isLegacy: false,
|
||||
},
|
||||
{
|
||||
name: "SHA256 hash",
|
||||
hash: "dXNfYWtfMTIzNDU2Nzg5MDEyMzQ1NuOwxEKY",
|
||||
isLegacy: true,
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
hash: "",
|
||||
isLegacy: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.isLegacy, IsLegacyHash(tt.hash))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateAPIKeyHash(t *testing.T) {
|
||||
plainKey := "test_api_key_123"
|
||||
|
||||
t.Run("migrate legacy hash", func(t *testing.T) {
|
||||
// Create a legacy SHA256 hash
|
||||
legacyHash := String(plainKey)
|
||||
|
||||
// Migrate it
|
||||
newHash, migrated, err := MigrateAPIKeyHash(legacyHash, plainKey)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, migrated, "Should indicate migration occurred")
|
||||
assert.NotEqual(t, legacyHash, newHash, "New hash should differ from legacy")
|
||||
assert.True(t, strings.HasPrefix(newHash, "$2"), "New hash should be bcrypt")
|
||||
|
||||
// Verify new hash works
|
||||
assert.True(t, CompareAPIKey(newHash, plainKey))
|
||||
})
|
||||
|
||||
t.Run("no migration needed for bcrypt", func(t *testing.T) {
|
||||
// Create a bcrypt hash
|
||||
bcryptHash, err := APIKey(plainKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to migrate it
|
||||
newHash, migrated, err := MigrateAPIKeyHash(bcryptHash, plainKey)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, migrated, "Should not migrate bcrypt hash")
|
||||
assert.Equal(t, bcryptHash, newHash, "Hash should remain unchanged")
|
||||
})
|
||||
|
||||
t.Run("invalid key does not migrate", func(t *testing.T) {
|
||||
legacyHash := String("correct_key")
|
||||
|
||||
// Try to migrate with wrong plaintext
|
||||
newHash, migrated, err := MigrateAPIKeyHash(legacyHash, "wrong_key")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, migrated, "Should not migrate invalid key")
|
||||
assert.Empty(t, newHash, "Should return empty for invalid key")
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Checker struct {
|
||||
db *sql.DB
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func New(db *sql.DB, logger *slog.Logger) *Checker {
|
||||
return &Checker{
|
||||
db: db,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
type HealthStatus struct {
|
||||
Status string `json:"status"`
|
||||
Checks map[string]string `json:"checks,omitempty"`
|
||||
}
|
||||
|
||||
// LivenessHandler checks if the application is running
|
||||
// This is a simple check that always returns OK if the handler is reached
|
||||
func (h *Checker) LivenessHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(HealthStatus{
|
||||
Status: "UP",
|
||||
})
|
||||
}
|
||||
|
||||
// ReadinessHandler checks if the application is ready to accept traffic
|
||||
// This checks database connectivity and other critical dependencies
|
||||
func (h *Checker) ReadinessHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
checks := make(map[string]string)
|
||||
allHealthy := true
|
||||
|
||||
// Check database connectivity
|
||||
if err := h.db.PingContext(ctx); err != nil {
|
||||
h.logger.With("error", err).Warn("database health check failed")
|
||||
checks["database"] = "DOWN"
|
||||
allHealthy = false
|
||||
} else {
|
||||
checks["database"] = "UP"
|
||||
}
|
||||
|
||||
status := HealthStatus{
|
||||
Status: "UP",
|
||||
Checks: checks,
|
||||
}
|
||||
|
||||
if !allHealthy {
|
||||
status.Status = "DOWN"
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
_ = json.NewEncoder(w).Encode(status)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_ = json.NewEncoder(w).Encode(status)
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLivenessHandler(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
db, _, err := sqlmock.New()
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
checker := New(db, logger)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/health/live", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
checker.LivenessHandler(rec, req)
|
||||
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), `"status":"UP"`)
|
||||
}
|
||||
|
||||
func TestReadinessHandler_Healthy(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
db, mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
// Expect a ping and return success
|
||||
mock.ExpectPing().WillReturnError(nil)
|
||||
|
||||
checker := New(db, logger)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/health/ready", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
checker.ReadinessHandler(rec, req)
|
||||
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), `"status":"UP"`)
|
||||
assert.Contains(t, rec.Body.String(), `"database":"UP"`)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestReadinessHandler_DatabaseDown(t *testing.T) {
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
db, mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
// Expect a ping and return error
|
||||
mock.ExpectPing().WillReturnError(sql.ErrConnDone)
|
||||
|
||||
checker := New(db, logger)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/health/ready", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
checker.ReadinessHandler(rec, req)
|
||||
|
||||
assert.Equal(t, http.StatusServiceUnavailable, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), `"status":"DOWN"`)
|
||||
assert.Contains(t, rec.Body.String(), `"database":"DOWN"`)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
+1
-1
@@ -2,7 +2,7 @@ apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
labels:
|
||||
app: schemas
|
||||
app.kubernetes.io/name: schemas
|
||||
name: schemas
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: schemas
|
||||
data:
|
||||
LOG_FORMAT: "otel"
|
||||
ENVIRONMENT: "production"
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: schemas
|
||||
data:
|
||||
ENVIRONMENT: "development"
|
||||
+23
-12
@@ -7,7 +7,7 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: schemas
|
||||
app.kubernetes.io/name: schemas
|
||||
name: schemas
|
||||
annotations:
|
||||
kubernetes.io/change-cause: "${TIMESTAMP} Deployed commit id: ${COMMIT}"
|
||||
@@ -15,7 +15,7 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: schemas
|
||||
app.kubernetes.io/name: schemas
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: schemas
|
||||
app.kubernetes.io/name: schemas
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
@@ -33,7 +33,7 @@ spec:
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: "app"
|
||||
- key: "app.kubernetes.io/name"
|
||||
operator: In
|
||||
values:
|
||||
- schemas
|
||||
@@ -41,27 +41,38 @@ spec:
|
||||
containers:
|
||||
- name: schemas
|
||||
resources:
|
||||
limits:
|
||||
memory: "100Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
cpu: "20m"
|
||||
memory: "20Mi"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health/live
|
||||
port: 8080
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
path: /health/ready
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: registry.gitlab.com/unboundsoftware/schemas:${COMMIT}
|
||||
image: oci.unbound.se/unboundsoftware/schemas:${COMMIT}
|
||||
ports:
|
||||
- name: api
|
||||
containerPort: 8080
|
||||
env:
|
||||
- name: OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
value: http://k8s-monitoring-alloy-receiver.monitoring.svc.cluster.local:4318
|
||||
envFrom:
|
||||
- secretRef:
|
||||
- configMapRef:
|
||||
name: schemas
|
||||
- secretRef:
|
||||
name: rabbitmq
|
||||
name: schemas
|
||||
restartPolicy: Always
|
||||
serviceAccountName: schemas
|
||||
---
|
||||
@@ -77,5 +88,5 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: schemas
|
||||
app.kubernetes.io/name: schemas
|
||||
type: NodePort
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: Ingress
|
||||
metadata:
|
||||
name: schemas-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "alb"
|
||||
alb.ingress.kubernetes.io/group.name: "default"
|
||||
alb.ingress.kubernetes.io/scheme: internet-facing
|
||||
alb.ingress.kubernetes.io/target-type: instance
|
||||
@@ -11,6 +10,7 @@ metadata:
|
||||
alb.ingress.kubernetes.io/ssl-redirect: "443"
|
||||
alb.ingress.kubernetes.io/healthcheck-path: '/health'
|
||||
spec:
|
||||
ingressClassName: "alb"
|
||||
rules:
|
||||
- host: "schemas.unbound.se"
|
||||
http:
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: schemas-pdb
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: schemas
|
||||
@@ -5,3 +5,4 @@ metadata:
|
||||
stringData:
|
||||
API_KEY: supersecret123!
|
||||
POSTGRES_URL: "postgres://postgres:postgres@postgres:5432/schemas?sslmode=disable"
|
||||
AMQP_URL: "amqp://user:password@rabbitmq:5672/"
|
||||
|
||||
+5
-4
@@ -1,8 +1,7 @@
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
apiVersion: external-secrets.io/v1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: schemas
|
||||
namespace: default
|
||||
spec:
|
||||
refreshInterval: 1h
|
||||
secretStoreRef:
|
||||
@@ -11,13 +10,15 @@ spec:
|
||||
target:
|
||||
creationPolicy: Owner
|
||||
template:
|
||||
mergePolicy: Merge
|
||||
engineVersion: 'v2'
|
||||
data:
|
||||
POSTGRES_URL: "postgres://{{ .DB_USERNAME }}:{{ .DB_PASSWORD }}@{{ .DB_HOST }}:{{ .DB_PORT }}/schemas?sslmode=disable"
|
||||
API_KEY: "{{ .API_KEY }}"
|
||||
SENTRY_DSN: "{{ .SENTRY_DSN }}"
|
||||
SENTRY_ENVIRONMENT: "{{ .SENTRY_ENVIRONMENT }}"
|
||||
dataFrom:
|
||||
- extract:
|
||||
key: services/schemas
|
||||
- extract:
|
||||
key: rds/postgres/prod-psql
|
||||
- extract:
|
||||
key: mq/rabbit/prod
|
||||
|
||||
@@ -0,0 +1,65 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"go.opentelemetry.io/contrib/bridges/otelslog"
|
||||
)
|
||||
|
||||
type Logger interface {
|
||||
Info(msg string, args ...any)
|
||||
Warn(msg string, args ...any)
|
||||
Error(msg string, args ...any)
|
||||
}
|
||||
|
||||
var defaultLogger *slog.Logger
|
||||
|
||||
type contextKey string
|
||||
|
||||
const loggerKey = contextKey("logger")
|
||||
|
||||
func SetupLogger(logLevel, logFormat, serviceName, buildVersion string) *slog.Logger {
|
||||
var leveler slog.LevelVar
|
||||
|
||||
err := leveler.UnmarshalText([]byte(logLevel))
|
||||
|
||||
handlerOpts := &slog.HandlerOptions{
|
||||
AddSource: false,
|
||||
Level: leveler.Level(),
|
||||
ReplaceAttr: nil,
|
||||
}
|
||||
|
||||
var handler slog.Handler
|
||||
switch logFormat {
|
||||
case "json":
|
||||
handler = slog.NewJSONHandler(os.Stdout, handlerOpts)
|
||||
case "text":
|
||||
handler = slog.NewTextHandler(os.Stdout, handlerOpts)
|
||||
case "otel":
|
||||
handler = otelslog.NewHandler(serviceName,
|
||||
otelslog.WithVersion(buildVersion))
|
||||
}
|
||||
defaultLogger = slog.New(handler).With("service", serviceName).With("version", buildVersion)
|
||||
if err != nil {
|
||||
defaultLogger.With("err", err).Error("Failed to parse log level")
|
||||
os.Exit(1)
|
||||
}
|
||||
slog.SetDefault(defaultLogger)
|
||||
return defaultLogger
|
||||
}
|
||||
|
||||
// ContextWithLogger returns a new Context with the logger attached
|
||||
func ContextWithLogger(ctx context.Context, logger *slog.Logger) context.Context {
|
||||
return context.WithValue(ctx, loggerKey, logger)
|
||||
}
|
||||
|
||||
// LoggerFromContext returns a logger from the passed context or the default logger
|
||||
func LoggerFromContext(ctx context.Context) *slog.Logger {
|
||||
logger := ctx.Value(loggerKey)
|
||||
if l, ok := logger.(*slog.Logger); ok {
|
||||
return l
|
||||
}
|
||||
return defaultLogger
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func NewMockLogger() *MockLogger {
|
||||
logged := &bytes.Buffer{}
|
||||
|
||||
return &MockLogger{
|
||||
logged: logged,
|
||||
logger: slog.New(slog.NewTextHandler(logged, &slog.HandlerOptions{
|
||||
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
|
||||
if a.Key == "time" {
|
||||
return slog.Attr{}
|
||||
}
|
||||
return a
|
||||
},
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
type MockLogger struct {
|
||||
logger *slog.Logger
|
||||
logged *bytes.Buffer
|
||||
}
|
||||
|
||||
func (m *MockLogger) Logger() *slog.Logger {
|
||||
return m.logger
|
||||
}
|
||||
|
||||
func (m *MockLogger) Check(t testing.TB, wantLogged []string) {
|
||||
var gotLogged []string
|
||||
if m.logged.String() != "" {
|
||||
gotLogged = strings.Split(m.logged.String(), "\n")
|
||||
gotLogged = gotLogged[:len(gotLogged)-1]
|
||||
}
|
||||
if len(wantLogged) == 0 {
|
||||
assert.Empty(t, gotLogged)
|
||||
return
|
||||
}
|
||||
assert.Equal(t, wantLogged, gotLogged)
|
||||
}
|
||||
+58
-16
@@ -6,10 +6,8 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
|
||||
"gitlab.com/unboundsoftware/schemas/domain"
|
||||
"gitlab.com/unboundsoftware/schemas/hash"
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,14 +32,9 @@ type AuthMiddleware struct {
|
||||
func (m *AuthMiddleware) Handler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
token, err := TokenFromContext(r.Context())
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = w.Write([]byte("Invalid JWT token format"))
|
||||
return
|
||||
}
|
||||
if token != nil {
|
||||
ctx = context.WithValue(ctx, UserKey, token.Claims.(jwt.MapClaims)["sub"])
|
||||
claims := ClaimsFromContext(r.Context())
|
||||
if claims != nil {
|
||||
ctx = context.WithValue(ctx, UserKey, claims.RegisteredClaims.Subject)
|
||||
}
|
||||
apiKey, err := ApiKeyFromContext(r.Context())
|
||||
if err != nil {
|
||||
@@ -49,7 +42,9 @@ func (m *AuthMiddleware) Handler(next http.Handler) http.Handler {
|
||||
_, _ = w.Write([]byte("Invalid API Key format"))
|
||||
return
|
||||
}
|
||||
if organization := m.cache.OrganizationByAPIKey(hash.String(apiKey)); organization != nil {
|
||||
// Cache handles hash comparison internally
|
||||
organization := m.cache.OrganizationByAPIKey(apiKey)
|
||||
if organization != nil {
|
||||
ctx = context.WithValue(ctx, OrganizationKey, *organization)
|
||||
}
|
||||
|
||||
@@ -66,6 +61,26 @@ func UserFromContext(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func UserHasRole(ctx context.Context, role string) bool {
|
||||
claims := ClaimsFromContext(ctx)
|
||||
if claims == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
customClaims, ok := claims.CustomClaims.(*CustomClaims)
|
||||
if !ok || customClaims == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, r := range customClaims.Roles {
|
||||
if r == role {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func OrganizationFromContext(ctx context.Context) string {
|
||||
if value := ctx.Value(OrganizationKey); value != nil {
|
||||
if u, ok := value.(domain.Organization); ok {
|
||||
@@ -76,15 +91,42 @@ func OrganizationFromContext(ctx context.Context) string {
|
||||
}
|
||||
|
||||
func (m *AuthMiddleware) Directive(ctx context.Context, _ interface{}, next graphql.Resolver, user *bool, organization *bool) (res interface{}, err error) {
|
||||
if user != nil && *user {
|
||||
if u := UserFromContext(ctx); u == "" {
|
||||
userRequired := user != nil && *user
|
||||
orgRequired := organization != nil && *organization
|
||||
|
||||
u := UserFromContext(ctx)
|
||||
orgId := OrganizationFromContext(ctx)
|
||||
|
||||
fmt.Printf("[Auth Directive] userRequired=%v, orgRequired=%v, hasUser=%v, hasOrg=%v\n",
|
||||
userRequired, orgRequired, u != "", orgId != "")
|
||||
|
||||
// If both are required, it means EITHER is acceptable (OR logic)
|
||||
if userRequired && orgRequired {
|
||||
if u == "" && orgId == "" {
|
||||
fmt.Printf("[Auth Directive] REJECTED: Neither user nor organization available\n")
|
||||
return nil, fmt.Errorf("authentication required: provide either user token or organization API key")
|
||||
}
|
||||
fmt.Printf("[Auth Directive] ACCEPTED: Has user=%v OR organization=%v\n", u != "", orgId != "")
|
||||
return next(ctx)
|
||||
}
|
||||
|
||||
// Only user required
|
||||
if userRequired {
|
||||
if u == "" {
|
||||
fmt.Printf("[Auth Directive] REJECTED: No user available\n")
|
||||
return nil, fmt.Errorf("no user available in request")
|
||||
}
|
||||
fmt.Printf("[Auth Directive] ACCEPTED: User authenticated\n")
|
||||
}
|
||||
if organization != nil && *organization {
|
||||
if orgId := OrganizationFromContext(ctx); orgId == "" {
|
||||
|
||||
// Only organization required
|
||||
if orgRequired {
|
||||
if orgId == "" {
|
||||
fmt.Printf("[Auth Directive] REJECTED: No organization available\n")
|
||||
return nil, fmt.Errorf("no organization available in request")
|
||||
}
|
||||
fmt.Printf("[Auth Directive] ACCEPTED: Organization authenticated\n")
|
||||
}
|
||||
|
||||
return next(ctx)
|
||||
}
|
||||
|
||||
+47
-153
@@ -2,39 +2,34 @@ package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"log"
|
||||
"net/url"
|
||||
|
||||
mw "github.com/auth0/go-jwt-middleware/v2"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/pkg/errors"
|
||||
jwtmiddleware "github.com/auth0/go-jwt-middleware/v3"
|
||||
"github.com/auth0/go-jwt-middleware/v3/jwks"
|
||||
"github.com/auth0/go-jwt-middleware/v3/validator"
|
||||
)
|
||||
|
||||
// CustomClaims contains custom claims from the JWT token.
|
||||
type CustomClaims struct {
|
||||
Roles []string `json:"https://unbound.se/roles"`
|
||||
}
|
||||
|
||||
// Validate implements the validator.CustomClaims interface.
|
||||
func (c CustomClaims) Validate(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Auth0 struct {
|
||||
domain string
|
||||
audience string
|
||||
client *http.Client
|
||||
cache JwksCache
|
||||
}
|
||||
|
||||
func NewAuth0(audience, domain string, strictSsl bool) *Auth0 {
|
||||
customTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: !strictSsl}
|
||||
client := &http.Client{Transport: customTransport}
|
||||
|
||||
func NewAuth0(audience, domain string, _ bool) *Auth0 {
|
||||
return &Auth0{
|
||||
domain: domain,
|
||||
audience: audience,
|
||||
client: client,
|
||||
cache: JwksCache{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
cache: make(map[string]cacheItem),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,148 +37,47 @@ type Response struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type Jwks struct {
|
||||
Keys []JSONWebKeys `json:"keys"`
|
||||
}
|
||||
|
||||
type JSONWebKeys struct {
|
||||
Kty string `json:"kty"`
|
||||
Kid string `json:"kid"`
|
||||
Use string `json:"use"`
|
||||
N string `json:"n"`
|
||||
E string `json:"e"`
|
||||
X5c []string `json:"x5c"`
|
||||
}
|
||||
|
||||
func (a *Auth0) ValidationKeyGetter() func(token *jwt.Token) (interface{}, error) {
|
||||
func (a *Auth0) Middleware() *jwtmiddleware.JWTMiddleware {
|
||||
issuer := fmt.Sprintf("https://%s/", a.domain)
|
||||
return func(token *jwt.Token) (interface{}, error) {
|
||||
// Verify 'aud' claim
|
||||
aud := a.audience
|
||||
checkAud := token.Claims.(jwt.MapClaims).VerifyAudience(aud, false)
|
||||
if !checkAud {
|
||||
return token, errors.New("Invalid audience.")
|
||||
}
|
||||
// Verify 'iss' claim
|
||||
iss := issuer
|
||||
checkIss := token.Claims.(jwt.MapClaims).VerifyIssuer(iss, false)
|
||||
if !checkIss {
|
||||
return token, errors.New("Invalid issuer.")
|
||||
}
|
||||
|
||||
cert, err := a.getPemCert(token)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
result, _ := jwt.ParseRSAPublicKeyFromPEM([]byte(cert))
|
||||
return result, nil
|
||||
issuerURL, err := url.Parse(issuer)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to parse issuer URL: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Auth0) Middleware() *mw.JWTMiddleware {
|
||||
jwtMiddleware := mw.New(func(ctx context.Context, token string) (interface{}, error) {
|
||||
jwtToken, err := jwt.Parse(token, a.ValidationKeyGetter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := jwtToken.Method.(*jwt.SigningMethodRSA); !ok {
|
||||
return nil, fmt.Errorf("unexpected signing method: %v", jwtToken.Header["alg"])
|
||||
}
|
||||
err = jwtToken.Claims.Valid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwtToken, nil
|
||||
},
|
||||
mw.WithTokenExtractor(func(r *http.Request) (string, error) {
|
||||
token := r.Header.Get("Authorization")
|
||||
if strings.HasPrefix(token, "Bearer ") {
|
||||
return token[7:], nil
|
||||
}
|
||||
return "", nil
|
||||
provider, err := jwks.NewCachingProvider(jwks.WithIssuerURL(issuerURL))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create JWKS provider: %v", err)
|
||||
}
|
||||
|
||||
jwtValidator, err := validator.New(
|
||||
validator.WithKeyFunc(provider.KeyFunc),
|
||||
validator.WithAlgorithm(validator.RS256),
|
||||
validator.WithIssuer(issuer),
|
||||
validator.WithAudience(a.audience),
|
||||
validator.WithCustomClaims(func() validator.CustomClaims {
|
||||
return &CustomClaims{}
|
||||
}),
|
||||
mw.WithCredentialsOptional(true),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create JWT validator: %v", err)
|
||||
}
|
||||
|
||||
jwtMiddleware, err := jwtmiddleware.New(
|
||||
jwtmiddleware.WithValidator(jwtValidator),
|
||||
jwtmiddleware.WithCredentialsOptional(true),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create JWT middleware: %v", err)
|
||||
}
|
||||
|
||||
return jwtMiddleware
|
||||
}
|
||||
|
||||
func TokenFromContext(ctx context.Context) (*jwt.Token, error) {
|
||||
if value := ctx.Value(mw.ContextKey{}); value != nil {
|
||||
if u, ok := value.(*jwt.Token); ok {
|
||||
return u, nil
|
||||
}
|
||||
return nil, fmt.Errorf("token is in wrong format")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *Auth0) cacheGetWellknown(url string) (*Jwks, error) {
|
||||
if value := a.cache.get(url); value != nil {
|
||||
return value, nil
|
||||
}
|
||||
jwks := &Jwks{}
|
||||
resp, err := a.client.Get(url)
|
||||
func ClaimsFromContext(ctx context.Context) *validator.ValidatedClaims {
|
||||
claims, err := jwtmiddleware.GetClaims[*validator.ValidatedClaims](ctx)
|
||||
if err != nil {
|
||||
return jwks, err
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
err = json.NewDecoder(resp.Body).Decode(jwks)
|
||||
if err == nil && jwks != nil {
|
||||
a.cache.put(url, jwks)
|
||||
}
|
||||
return jwks, err
|
||||
}
|
||||
|
||||
func (a *Auth0) getPemCert(token *jwt.Token) (string, error) {
|
||||
jwks, err := a.cacheGetWellknown(fmt.Sprintf("https://%s/.well-known/jwks.json", a.domain))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var cert string
|
||||
for k := range jwks.Keys {
|
||||
if token.Header["kid"] == jwks.Keys[k].Kid {
|
||||
cert = "-----BEGIN CERTIFICATE-----\n" + jwks.Keys[k].X5c[0] + "\n-----END CERTIFICATE-----"
|
||||
}
|
||||
}
|
||||
|
||||
if cert == "" {
|
||||
err := errors.New("Unable to find appropriate key.")
|
||||
return cert, err
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
type JwksCache struct {
|
||||
*sync.RWMutex
|
||||
cache map[string]cacheItem
|
||||
}
|
||||
type cacheItem struct {
|
||||
data *Jwks
|
||||
expiration time.Time
|
||||
}
|
||||
|
||||
func (c *JwksCache) get(url string) *Jwks {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
if value, ok := c.cache[url]; ok {
|
||||
if time.Now().After(value.expiration) {
|
||||
return nil
|
||||
}
|
||||
return value.data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *JwksCache) put(url string, jwks *Jwks) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.cache[url] = cacheItem{
|
||||
data: jwks,
|
||||
expiration: time.Now().Add(time.Minute * 60),
|
||||
return nil
|
||||
}
|
||||
return claims
|
||||
}
|
||||
|
||||
@@ -0,0 +1,566 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/auth0/go-jwt-middleware/v3/core"
|
||||
"github.com/auth0/go-jwt-middleware/v3/validator"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
||||
|
||||
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
||||
)
|
||||
|
||||
// MockCache is a mock implementation of the Cache interface
|
||||
type MockCache struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockCache) OrganizationByAPIKey(apiKey string) *domain.Organization {
|
||||
args := m.Called(apiKey)
|
||||
if args.Get(0) == nil {
|
||||
return nil
|
||||
}
|
||||
return args.Get(0).(*domain.Organization)
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_WithValidAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
orgID := uuid.New()
|
||||
expectedOrg := &domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(orgID.String()),
|
||||
},
|
||||
Name: "Test Organization",
|
||||
}
|
||||
|
||||
apiKey := "test-api-key-123"
|
||||
|
||||
// Mock expects plaintext key (cache handles hashing internally)
|
||||
mockCache.On("OrganizationByAPIKey", apiKey).Return(expectedOrg)
|
||||
|
||||
// Create a test handler that checks the context
|
||||
var capturedOrg *domain.Organization
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if org := r.Context().Value(OrganizationKey); org != nil {
|
||||
if o, ok := org.(domain.Organization); ok {
|
||||
capturedOrg = &o
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request with API key in context
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
ctx := context.WithValue(req.Context(), ApiKey, apiKey)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
require.NotNil(t, capturedOrg)
|
||||
assert.Equal(t, expectedOrg.Name, capturedOrg.Name)
|
||||
assert.Equal(t, expectedOrg.ID.String(), capturedOrg.ID.String())
|
||||
mockCache.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_WithInvalidAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
apiKey := "invalid-api-key"
|
||||
|
||||
// Mock expects plaintext key (cache handles hashing internally)
|
||||
mockCache.On("OrganizationByAPIKey", apiKey).Return(nil)
|
||||
|
||||
// Create a test handler that checks the context
|
||||
var capturedOrg *domain.Organization
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if org := r.Context().Value(OrganizationKey); org != nil {
|
||||
if o, ok := org.(domain.Organization); ok {
|
||||
capturedOrg = &o
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request with API key in context
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
ctx := context.WithValue(req.Context(), ApiKey, apiKey)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Nil(t, capturedOrg, "Organization should not be set for invalid API key")
|
||||
mockCache.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_WithoutAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
// The middleware passes the plaintext API key (cache handles hashing)
|
||||
mockCache.On("OrganizationByAPIKey", "").Return(nil)
|
||||
|
||||
// Create a test handler that checks the context
|
||||
var capturedOrg *domain.Organization
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if org := r.Context().Value(OrganizationKey); org != nil {
|
||||
if o, ok := org.(domain.Organization); ok {
|
||||
capturedOrg = &o
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request without API key
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Nil(t, capturedOrg, "Organization should not be set without API key")
|
||||
mockCache.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_WithValidJWT(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
// The middleware passes the plaintext API key (cache handles hashing)
|
||||
mockCache.On("OrganizationByAPIKey", "").Return(nil)
|
||||
|
||||
userID := "user-123"
|
||||
claims := &validator.ValidatedClaims{
|
||||
RegisteredClaims: validator.RegisteredClaims{
|
||||
Subject: userID,
|
||||
},
|
||||
}
|
||||
|
||||
// Create a test handler that checks the context
|
||||
var capturedUser string
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if user := r.Context().Value(UserKey); user != nil {
|
||||
if u, ok := user.(string); ok {
|
||||
capturedUser = u
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request with JWT claims in context
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
ctx := core.SetClaims(req.Context(), claims)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Equal(t, userID, capturedUser)
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_APIKeyErrorHandling(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request with invalid API key type in context
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
ctx := context.WithValue(req.Context(), ApiKey, 12345) // Invalid type
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusInternalServerError, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), "Invalid API Key format")
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_JWTMissingClaims(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
// The middleware passes the plaintext API key (cache handles hashing)
|
||||
mockCache.On("OrganizationByAPIKey", "").Return(nil)
|
||||
|
||||
// Create a test handler that checks the context
|
||||
var capturedUser string
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if user := r.Context().Value(UserKey); user != nil {
|
||||
if u, ok := user.(string); ok {
|
||||
capturedUser = u
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request without JWT claims - user should not be set
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Empty(t, capturedUser, "User should not be set when no claims in context")
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Handler_BothJWTAndAPIKey(t *testing.T) {
|
||||
// Setup
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
orgID := uuid.New()
|
||||
expectedOrg := &domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(orgID.String()),
|
||||
},
|
||||
Name: "Test Organization",
|
||||
}
|
||||
|
||||
userID := "user-123"
|
||||
apiKey := "test-api-key-123"
|
||||
|
||||
claims := &validator.ValidatedClaims{
|
||||
RegisteredClaims: validator.RegisteredClaims{
|
||||
Subject: userID,
|
||||
},
|
||||
}
|
||||
|
||||
// Mock expects plaintext key (cache handles hashing internally)
|
||||
mockCache.On("OrganizationByAPIKey", apiKey).Return(expectedOrg)
|
||||
|
||||
// Create a test handler that checks both user and organization in context
|
||||
var capturedUser string
|
||||
var capturedOrg *domain.Organization
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if user := r.Context().Value(UserKey); user != nil {
|
||||
if u, ok := user.(string); ok {
|
||||
capturedUser = u
|
||||
}
|
||||
}
|
||||
if org := r.Context().Value(OrganizationKey); org != nil {
|
||||
if o, ok := org.(domain.Organization); ok {
|
||||
capturedOrg = &o
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Create request with both JWT claims and API key in context
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
ctx := core.SetClaims(req.Context(), claims)
|
||||
ctx = context.WithValue(ctx, ApiKey, apiKey)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Execute
|
||||
authMiddleware.Handler(testHandler).ServeHTTP(rec, req)
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Equal(t, userID, capturedUser)
|
||||
require.NotNil(t, capturedOrg)
|
||||
assert.Equal(t, expectedOrg.Name, capturedOrg.Name)
|
||||
mockCache.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestUserFromContext(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ctx context.Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "with valid user",
|
||||
ctx: context.WithValue(context.Background(), UserKey, "user-123"),
|
||||
expected: "user-123",
|
||||
},
|
||||
{
|
||||
name: "without user",
|
||||
ctx: context.Background(),
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "with invalid type",
|
||||
ctx: context.WithValue(context.Background(), UserKey, 123),
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := UserFromContext(tt.ctx)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrganizationFromContext(t *testing.T) {
|
||||
orgID := uuid.New()
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(orgID.String()),
|
||||
},
|
||||
Name: "Test Org",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ctx context.Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "with valid organization",
|
||||
ctx: context.WithValue(context.Background(), OrganizationKey, org),
|
||||
expected: orgID.String(),
|
||||
},
|
||||
{
|
||||
name: "without organization",
|
||||
ctx: context.Background(),
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "with invalid type",
|
||||
ctx: context.WithValue(context.Background(), OrganizationKey, "not-an-org"),
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := OrganizationFromContext(tt.ctx)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Directive_RequiresUser(t *testing.T) {
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
requireUser := true
|
||||
|
||||
// Test with user present
|
||||
ctx := context.WithValue(context.Background(), UserKey, "user-123")
|
||||
_, err := authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, &requireUser, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test without user
|
||||
ctx = context.Background()
|
||||
_, err = authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, &requireUser, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no user available in request")
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Directive_RequiresOrganization(t *testing.T) {
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
requireOrg := true
|
||||
orgID := uuid.New()
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(orgID.String()),
|
||||
},
|
||||
Name: "Test Org",
|
||||
}
|
||||
|
||||
// Test with organization present
|
||||
ctx := context.WithValue(context.Background(), OrganizationKey, org)
|
||||
_, err := authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, nil, &requireOrg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test without organization
|
||||
ctx = context.Background()
|
||||
_, err = authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, nil, &requireOrg)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no organization available in request")
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Directive_RequiresBoth(t *testing.T) {
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
requireUser := true
|
||||
requireOrg := true
|
||||
orgID := uuid.New()
|
||||
org := domain.Organization{
|
||||
BaseAggregate: eventsourced.BaseAggregate{
|
||||
ID: eventsourced.IdFromString(orgID.String()),
|
||||
},
|
||||
Name: "Test Org",
|
||||
}
|
||||
|
||||
// When both user and organization are marked as acceptable,
|
||||
// the directive uses OR logic - either one is sufficient
|
||||
|
||||
// Test with both present - should succeed
|
||||
ctx := context.WithValue(context.Background(), UserKey, "user-123")
|
||||
ctx = context.WithValue(ctx, OrganizationKey, org)
|
||||
_, err := authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, &requireUser, &requireOrg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test with only user - should succeed (OR logic)
|
||||
ctx = context.WithValue(context.Background(), UserKey, "user-123")
|
||||
_, err = authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, &requireUser, &requireOrg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test with only organization - should succeed (OR logic)
|
||||
ctx = context.WithValue(context.Background(), OrganizationKey, org)
|
||||
_, err = authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, &requireUser, &requireOrg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test with neither - should fail
|
||||
ctx = context.Background()
|
||||
_, err = authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, &requireUser, &requireOrg)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "authentication required")
|
||||
}
|
||||
|
||||
func TestAuthMiddleware_Directive_NoRequirements(t *testing.T) {
|
||||
mockCache := new(MockCache)
|
||||
authMiddleware := NewAuth(mockCache)
|
||||
|
||||
// Test with no requirements
|
||||
ctx := context.Background()
|
||||
result, err := authMiddleware.Directive(ctx, nil, func(ctx context.Context) (interface{}, error) {
|
||||
return "success", nil
|
||||
}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "success", result)
|
||||
}
|
||||
|
||||
func TestUserHasRole_WithValidRole(t *testing.T) {
|
||||
// Create claims with roles
|
||||
claims := &validator.ValidatedClaims{
|
||||
RegisteredClaims: validator.RegisteredClaims{
|
||||
Subject: "user-123",
|
||||
},
|
||||
CustomClaims: &CustomClaims{
|
||||
Roles: []string{"admin", "user"},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := core.SetClaims(context.Background(), claims)
|
||||
|
||||
// Test for existing role
|
||||
hasRole := UserHasRole(ctx, "admin")
|
||||
assert.True(t, hasRole)
|
||||
|
||||
hasRole = UserHasRole(ctx, "user")
|
||||
assert.True(t, hasRole)
|
||||
}
|
||||
|
||||
func TestUserHasRole_WithoutRole(t *testing.T) {
|
||||
// Create claims with roles
|
||||
claims := &validator.ValidatedClaims{
|
||||
RegisteredClaims: validator.RegisteredClaims{
|
||||
Subject: "user-123",
|
||||
},
|
||||
CustomClaims: &CustomClaims{
|
||||
Roles: []string{"user"},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := core.SetClaims(context.Background(), claims)
|
||||
|
||||
// Test for non-existing role
|
||||
hasRole := UserHasRole(ctx, "admin")
|
||||
assert.False(t, hasRole)
|
||||
}
|
||||
|
||||
func TestUserHasRole_WithoutRolesClaim(t *testing.T) {
|
||||
// Create claims without custom claims
|
||||
claims := &validator.ValidatedClaims{
|
||||
RegisteredClaims: validator.RegisteredClaims{
|
||||
Subject: "user-123",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := core.SetClaims(context.Background(), claims)
|
||||
|
||||
// Test should return false when custom claims is missing
|
||||
hasRole := UserHasRole(ctx, "admin")
|
||||
assert.False(t, hasRole)
|
||||
}
|
||||
|
||||
func TestUserHasRole_WithoutClaims(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Test should return false when no claims in context
|
||||
hasRole := UserHasRole(ctx, "admin")
|
||||
assert.False(t, hasRole)
|
||||
}
|
||||
|
||||
func TestUserHasRole_WithEmptyRoles(t *testing.T) {
|
||||
// Create claims with empty roles
|
||||
claims := &validator.ValidatedClaims{
|
||||
RegisteredClaims: validator.RegisteredClaims{
|
||||
Subject: "user-123",
|
||||
},
|
||||
CustomClaims: &CustomClaims{
|
||||
Roles: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := core.SetClaims(context.Background(), claims)
|
||||
|
||||
// Test should return false when roles array is empty
|
||||
hasRole := UserHasRole(ctx, "admin")
|
||||
assert.False(t, hasRole)
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func AroundOperations(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler {
|
||||
op := graphql.GetOperationContext(ctx)
|
||||
spanName := fmt.Sprintf("graphql:operation:%s", op.OperationName)
|
||||
// Span always injected in the http handler above
|
||||
sp := trace.SpanFromContext(ctx)
|
||||
if sp != nil {
|
||||
sp.SetName(spanName)
|
||||
}
|
||||
return next(ctx)
|
||||
}
|
||||
|
||||
func AroundRootFields(ctx context.Context, next graphql.RootResolver) graphql.Marshaler {
|
||||
oc := graphql.GetRootFieldContext(ctx)
|
||||
spanCtx, span := StartSpan(ctx, fmt.Sprintf("graphql:rootfield:%s", oc.Field.Name))
|
||||
defer span.Finish()
|
||||
return next(spanCtx)
|
||||
}
|
||||
|
||||
func AroundFields(ctx context.Context, next graphql.Resolver) (res any, err error) {
|
||||
oc := graphql.GetFieldContext(ctx)
|
||||
var span Span
|
||||
if oc.IsResolver {
|
||||
ctx, span = StartSpan(ctx, fmt.Sprintf("graphql:field:%s", oc.Field.Name))
|
||||
}
|
||||
defer func() {
|
||||
if span != nil {
|
||||
span.Finish()
|
||||
}
|
||||
}()
|
||||
return next(ctx)
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
|
||||
"go.opentelemetry.io/otel/log/global"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/log"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
// SetupOTelSDK bootstraps the OpenTelemetry pipeline.
|
||||
func SetupOTelSDK(ctx context.Context, enabled bool, serviceName, buildVersion, environment string) (func(context.Context) error, error) {
|
||||
if os.Getenv("OTEL_RESOURCE_ATTRIBUTES") == "" {
|
||||
if err := os.Setenv("OTEL_RESOURCE_ATTRIBUTES", fmt.Sprintf("service.name=%s,service.version=%s,service.environment=%s", serviceName, buildVersion, environment)); err != nil {
|
||||
return func(context.Context) error {
|
||||
return nil
|
||||
}, err
|
||||
}
|
||||
}
|
||||
var shutdownFuncs []func(context.Context) error
|
||||
if !enabled {
|
||||
return func(context.Context) error {
|
||||
return nil
|
||||
}, nil
|
||||
}
|
||||
shutdown := func(ctx context.Context) error {
|
||||
var err error
|
||||
for _, fn := range shutdownFuncs {
|
||||
err = errors.Join(err, fn(ctx))
|
||||
}
|
||||
shutdownFuncs = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// handleErr calls shutdown for cleanup and makes sure that all errors are returned.
|
||||
handleErr := func(inErr error) (func(context.Context) error, error) {
|
||||
return nil, errors.Join(inErr, shutdown(ctx))
|
||||
}
|
||||
|
||||
// Set up the propagator.
|
||||
prop := propagation.NewCompositeTextMapPropagator(
|
||||
propagation.TraceContext{},
|
||||
propagation.Baggage{},
|
||||
)
|
||||
otel.SetTextMapPropagator(prop)
|
||||
|
||||
traceExporter, err := otlptracehttp.New(ctx)
|
||||
if err != nil {
|
||||
return handleErr(err)
|
||||
}
|
||||
shutdownFuncs = append(shutdownFuncs, traceExporter.Shutdown)
|
||||
|
||||
tracerProvider := trace.NewTracerProvider(
|
||||
trace.WithBatcher(traceExporter,
|
||||
trace.WithBatchTimeout(5*time.Second)),
|
||||
)
|
||||
shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown)
|
||||
otel.SetTracerProvider(tracerProvider)
|
||||
|
||||
logExporter, err := stdoutlog.New()
|
||||
if err != nil {
|
||||
return handleErr(err)
|
||||
}
|
||||
processor := log.NewSimpleProcessor(logExporter)
|
||||
logProvider := log.NewLoggerProvider(log.WithProcessor(processor))
|
||||
|
||||
global.SetLoggerProvider(logProvider)
|
||||
shutdownFuncs = append(shutdownFuncs, logProvider.Shutdown)
|
||||
|
||||
exp, err := otlpmetrichttp.New(ctx)
|
||||
if err != nil {
|
||||
return handleErr(err)
|
||||
}
|
||||
meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp)))
|
||||
shutdownFuncs = append(shutdownFuncs, meterProvider.Shutdown)
|
||||
|
||||
otel.SetMeterProvider(meterProvider)
|
||||
return shutdown, err
|
||||
}
|
||||
|
||||
func Handler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := otel.GetTextMapPropagator().Extract(r.Context(), propagation.HeaderCarrier(r.Header))
|
||||
spanCtx, s := StartSpan(ctx, "http")
|
||||
defer s.Finish()
|
||||
|
||||
h.ServeHTTP(w, r.WithContext(spanCtx))
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type Span interface {
|
||||
Context() context.Context
|
||||
Finish()
|
||||
}
|
||||
|
||||
type span struct {
|
||||
otelSpan trace.Span
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (s *span) Finish() {
|
||||
s.otelSpan.End()
|
||||
}
|
||||
|
||||
func (s *span) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, Span) {
|
||||
ctx, otelSpan := otel.Tracer("").Start(ctx, name, opts...)
|
||||
|
||||
return ctx, &span{
|
||||
otelSpan: otelSpan,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
type TraceHandlerFunc func(ctx context.Context, name string) (context.Context, func())
|
||||
|
||||
func (t TraceHandlerFunc) Trace(tx context.Context, name string) (context.Context, func()) {
|
||||
return t(tx, name)
|
||||
}
|
||||
|
||||
func Trace(ctx context.Context, name string) (context.Context, func()) {
|
||||
ctx, s := StartSpan(ctx, name)
|
||||
return ctx, s.Finish
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:recommended"
|
||||
],
|
||||
"packageRules": [
|
||||
{
|
||||
"matchManagers": [
|
||||
"kubernetes"
|
||||
],
|
||||
"matchPackageNames": [
|
||||
"oci.unbound.se/unboundsoftware/schemas"
|
||||
],
|
||||
"enabled": false
|
||||
},
|
||||
{
|
||||
"groupName": "Eventsourced",
|
||||
"matchPackageNames": [
|
||||
"gitlab.com/unboundsoftware/eventsourced/**"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
type collectEntitiesVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
collectedEntities entitySet
|
||||
}
|
||||
|
||||
func newCollectEntitiesVisitor(collectedEntities entitySet) *collectEntitiesVisitor {
|
||||
return &collectEntitiesVisitor{
|
||||
collectedEntities: collectedEntities,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *collectEntitiesVisitor) Register(walker *astvisitor.Walker) {
|
||||
c.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(c)
|
||||
walker.RegisterEnterInterfaceTypeDefinitionVisitor(c)
|
||||
walker.RegisterEnterObjectTypeDefinitionVisitor(c)
|
||||
}
|
||||
|
||||
func (c *collectEntitiesVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
c.document = operation
|
||||
}
|
||||
|
||||
func (c *collectEntitiesVisitor) EnterInterfaceTypeDefinition(ref int) {
|
||||
interfaceType := c.document.InterfaceTypeDefinitions[ref]
|
||||
name := c.document.InterfaceTypeDefinitionNameString(ref)
|
||||
if err := c.resolvePotentialEntity(name, interfaceType.Directives.Refs); err != nil {
|
||||
c.StopWithExternalErr(*err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *collectEntitiesVisitor) EnterObjectTypeDefinition(ref int) {
|
||||
objectType := c.document.ObjectTypeDefinitions[ref]
|
||||
name := c.document.ObjectTypeDefinitionNameString(ref)
|
||||
if err := c.resolvePotentialEntity(name, objectType.Directives.Refs); err != nil {
|
||||
c.StopWithExternalErr(*err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *collectEntitiesVisitor) resolvePotentialEntity(name string, directiveRefs []int) *operationreport.ExternalError {
|
||||
if _, exists := c.collectedEntities[name]; exists {
|
||||
err := operationreport.ErrEntitiesMustNotBeDuplicated(name)
|
||||
return &err
|
||||
}
|
||||
for _, directiveRef := range directiveRefs {
|
||||
if c.document.DirectiveNameString(directiveRef) != "key" {
|
||||
continue
|
||||
}
|
||||
c.collectedEntities[name] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
type extendEnumTypeDefinitionVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
}
|
||||
|
||||
func newExtendEnumTypeDefinition() *extendEnumTypeDefinitionVisitor {
|
||||
return &extendEnumTypeDefinitionVisitor{}
|
||||
}
|
||||
|
||||
func (e *extendEnumTypeDefinitionVisitor) Register(walker *astvisitor.Walker) {
|
||||
e.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(e)
|
||||
walker.RegisterEnterEnumTypeExtensionVisitor(e)
|
||||
}
|
||||
|
||||
func (e *extendEnumTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
e.document = operation
|
||||
}
|
||||
|
||||
func (e *extendEnumTypeDefinitionVisitor) EnterEnumTypeExtension(ref int) {
|
||||
nodes, exists := e.document.Index.NodesByNameBytes(e.document.EnumTypeExtensionNameBytes(ref))
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
hasExtended := false
|
||||
for i := range nodes {
|
||||
if nodes[i].Kind != ast.NodeKindEnumTypeDefinition {
|
||||
continue
|
||||
}
|
||||
if hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.EnumTypeExtensionNameString(ref)))
|
||||
return
|
||||
}
|
||||
e.document.ExtendEnumTypeDefinitionByEnumTypeExtension(nodes[i].Ref, ref)
|
||||
hasExtended = true
|
||||
}
|
||||
|
||||
if !hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.EnumTypeExtensionNameBytes(ref)))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
func newExtendInputObjectTypeDefinition() *extendInputObjectTypeDefinitionVisitor {
|
||||
return &extendInputObjectTypeDefinitionVisitor{}
|
||||
}
|
||||
|
||||
type extendInputObjectTypeDefinitionVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
}
|
||||
|
||||
func (e *extendInputObjectTypeDefinitionVisitor) Register(walker *astvisitor.Walker) {
|
||||
e.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(e)
|
||||
walker.RegisterEnterInputObjectTypeExtensionVisitor(e)
|
||||
}
|
||||
|
||||
func (e *extendInputObjectTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
e.document = operation
|
||||
}
|
||||
|
||||
func (e *extendInputObjectTypeDefinitionVisitor) EnterInputObjectTypeExtension(ref int) {
|
||||
nodes, exists := e.document.Index.NodesByNameBytes(e.document.InputObjectTypeExtensionNameBytes(ref))
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
hasExtended := false
|
||||
for i := range nodes {
|
||||
if nodes[i].Kind != ast.NodeKindInputObjectTypeDefinition {
|
||||
continue
|
||||
}
|
||||
if hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.InputObjectTypeExtensionNameString(ref)))
|
||||
return
|
||||
}
|
||||
e.document.ExtendInputObjectTypeDefinitionByInputObjectTypeExtension(nodes[i].Ref, ref)
|
||||
hasExtended = true
|
||||
}
|
||||
|
||||
if !hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.InputObjectTypeExtensionNameBytes(ref)))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
func newExtendInterfaceTypeDefinition(collectedEntities entitySet) *extendInterfaceTypeDefinitionVisitor {
|
||||
return &extendInterfaceTypeDefinitionVisitor{
|
||||
collectedEntities: collectedEntities,
|
||||
}
|
||||
}
|
||||
|
||||
type extendInterfaceTypeDefinitionVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
collectedEntities entitySet
|
||||
}
|
||||
|
||||
func (e *extendInterfaceTypeDefinitionVisitor) Register(walker *astvisitor.Walker) {
|
||||
e.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(e)
|
||||
walker.RegisterEnterInterfaceTypeExtensionVisitor(e)
|
||||
}
|
||||
|
||||
func (e *extendInterfaceTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
e.document = operation
|
||||
}
|
||||
|
||||
func (e *extendInterfaceTypeDefinitionVisitor) EnterInterfaceTypeExtension(ref int) {
|
||||
nameBytes := e.document.InterfaceTypeExtensionNameBytes(ref)
|
||||
nodes, exists := e.document.Index.NodesByNameBytes(nameBytes)
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
var nodeToExtend *ast.Node
|
||||
isEntity := false
|
||||
for i := range nodes {
|
||||
if nodes[i].Kind != ast.NodeKindInterfaceTypeDefinition {
|
||||
continue
|
||||
}
|
||||
if nodeToExtend != nil {
|
||||
e.StopWithExternalErr(*multipleExtensionError(isEntity, nameBytes))
|
||||
return
|
||||
}
|
||||
var err *operationreport.ExternalError
|
||||
extension := e.document.InterfaceTypeExtensions[ref]
|
||||
if isEntity, err = e.collectedEntities.isExtensionForEntity(nameBytes, extension.Directives.Refs, e.document); err != nil {
|
||||
e.StopWithExternalErr(*err)
|
||||
return
|
||||
}
|
||||
nodeToExtend = &nodes[i]
|
||||
}
|
||||
|
||||
if nodeToExtend == nil {
|
||||
e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.InterfaceTypeExtensionNameBytes(ref)))
|
||||
return
|
||||
}
|
||||
|
||||
e.document.ExtendInterfaceTypeDefinitionByInterfaceTypeExtension(nodeToExtend.Ref, ref)
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
type mergeDuplicatedFieldsVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
}
|
||||
|
||||
func newMergeDuplicatedFieldsVisitor() *mergeDuplicatedFieldsVisitor {
|
||||
return &mergeDuplicatedFieldsVisitor{
|
||||
nil,
|
||||
nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mergeDuplicatedFieldsVisitor) Register(walker *astvisitor.Walker) {
|
||||
m.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(m)
|
||||
walker.RegisterLeaveObjectTypeDefinitionVisitor(m)
|
||||
}
|
||||
|
||||
func (m *mergeDuplicatedFieldsVisitor) EnterDocument(document, _ *ast.Document) {
|
||||
m.document = document
|
||||
}
|
||||
|
||||
func (m *mergeDuplicatedFieldsVisitor) LeaveObjectTypeDefinition(ref int) {
|
||||
var refsForDeletion []int
|
||||
fieldByTypeRefSet := make(map[string]int)
|
||||
for _, fieldRef := range m.document.ObjectTypeDefinitions[ref].FieldsDefinition.Refs {
|
||||
fieldName := m.document.FieldDefinitionNameString(fieldRef)
|
||||
newTypeRef := m.document.FieldDefinitions[fieldRef].Type
|
||||
if oldTypeRef, ok := fieldByTypeRefSet[fieldName]; ok {
|
||||
if m.document.TypesAreEqualDeep(oldTypeRef, newTypeRef) {
|
||||
refsForDeletion = append(refsForDeletion, fieldRef)
|
||||
continue
|
||||
}
|
||||
oldFieldTypeNameBytes, err := m.document.PrintTypeBytes(oldTypeRef, nil)
|
||||
if err != nil {
|
||||
m.StopWithInternalErr(err)
|
||||
return
|
||||
}
|
||||
newFieldTypeNameBytes, err := m.document.PrintTypeBytes(newTypeRef, nil)
|
||||
if err != nil {
|
||||
m.StopWithInternalErr(err)
|
||||
return
|
||||
}
|
||||
m.StopWithExternalErr(operationreport.ErrDuplicateFieldsMustBeIdentical(
|
||||
fieldName, m.document.ObjectTypeDefinitionNameString(ref), string(oldFieldTypeNameBytes), string(newFieldTypeNameBytes),
|
||||
))
|
||||
return
|
||||
}
|
||||
|
||||
fieldByTypeRefSet[fieldName] = newTypeRef
|
||||
}
|
||||
|
||||
m.document.RemoveFieldDefinitionsFromObjectTypeDefinition(refsForDeletion, ref)
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
func newExtendObjectTypeDefinition(collectedEntities entitySet) *extendObjectTypeDefinitionVisitor {
|
||||
return &extendObjectTypeDefinitionVisitor{
|
||||
collectedEntities: collectedEntities,
|
||||
}
|
||||
}
|
||||
|
||||
type extendObjectTypeDefinitionVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
collectedEntities entitySet
|
||||
}
|
||||
|
||||
func (e *extendObjectTypeDefinitionVisitor) Register(walker *astvisitor.Walker) {
|
||||
e.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(e)
|
||||
walker.RegisterEnterObjectTypeExtensionVisitor(e)
|
||||
}
|
||||
|
||||
func (e *extendObjectTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
e.document = operation
|
||||
}
|
||||
|
||||
func (e *extendObjectTypeDefinitionVisitor) EnterObjectTypeExtension(ref int) {
|
||||
nameBytes := e.document.ObjectTypeExtensionNameBytes(ref)
|
||||
nodes, exists := e.document.Index.NodesByNameBytes(nameBytes)
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
var nodeToExtend *ast.Node
|
||||
isEntity := false
|
||||
for i := range nodes {
|
||||
if nodes[i].Kind != ast.NodeKindObjectTypeDefinition {
|
||||
continue
|
||||
}
|
||||
if nodeToExtend != nil {
|
||||
e.StopWithExternalErr(*multipleExtensionError(isEntity, nameBytes))
|
||||
return
|
||||
}
|
||||
var err *operationreport.ExternalError
|
||||
extension := e.document.ObjectTypeExtensions[ref]
|
||||
if isEntity, err = e.collectedEntities.isExtensionForEntity(nameBytes, extension.Directives.Refs, e.document); err != nil {
|
||||
e.StopWithExternalErr(*err)
|
||||
return
|
||||
}
|
||||
nodeToExtend = &nodes[i]
|
||||
if ast.IsRootType(nameBytes) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if nodeToExtend == nil {
|
||||
e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(nameBytes))
|
||||
return
|
||||
}
|
||||
|
||||
e.document.ExtendObjectTypeDefinitionByObjectTypeExtension(nodeToExtend.Ref, ref)
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
type removeDuplicateFieldedSharedTypesVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
sharedTypeSet map[string]fieldedSharedType
|
||||
rootNodesToRemove []ast.Node
|
||||
lastInputRef int
|
||||
lastInterfaceRef int
|
||||
lastObjectRef int
|
||||
}
|
||||
|
||||
func newRemoveDuplicateFieldedSharedTypesVisitor() *removeDuplicateFieldedSharedTypesVisitor {
|
||||
return &removeDuplicateFieldedSharedTypesVisitor{
|
||||
nil,
|
||||
nil,
|
||||
make(map[string]fieldedSharedType),
|
||||
nil,
|
||||
ast.InvalidRef,
|
||||
ast.InvalidRef,
|
||||
ast.InvalidRef,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldedSharedTypesVisitor) Register(walker *astvisitor.Walker) {
|
||||
r.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(r)
|
||||
walker.RegisterEnterInputObjectTypeDefinitionVisitor(r)
|
||||
walker.RegisterEnterInterfaceTypeDefinitionVisitor(r)
|
||||
walker.RegisterEnterObjectTypeDefinitionVisitor(r)
|
||||
walker.RegisterLeaveDocumentVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldedSharedTypesVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
r.document = operation
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldedSharedTypesVisitor) EnterInputObjectTypeDefinition(ref int) {
|
||||
if ref <= r.lastInputRef {
|
||||
return
|
||||
}
|
||||
name := r.document.InputObjectTypeDefinitionNameString(ref)
|
||||
refs := r.document.InputObjectTypeDefinitions[ref].InputFieldsDefinition.Refs
|
||||
input, exists := r.sharedTypeSet[name]
|
||||
if exists {
|
||||
if !input.areFieldsIdentical(refs) {
|
||||
r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name))
|
||||
return
|
||||
}
|
||||
r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindInputObjectTypeDefinition, Ref: ref})
|
||||
} else {
|
||||
r.sharedTypeSet[name] = newFieldedSharedType(r.document, ast.NodeKindInputValueDefinition, refs)
|
||||
}
|
||||
r.lastInputRef = ref
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldedSharedTypesVisitor) EnterInterfaceTypeDefinition(ref int) {
|
||||
if ref <= r.lastInterfaceRef {
|
||||
return
|
||||
}
|
||||
name := r.document.InterfaceTypeDefinitionNameString(ref)
|
||||
interfaceType := r.document.InterfaceTypeDefinitions[ref]
|
||||
refs := interfaceType.FieldsDefinition.Refs
|
||||
iFace, exists := r.sharedTypeSet[name]
|
||||
if exists {
|
||||
if !iFace.areFieldsIdentical(refs) {
|
||||
r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name))
|
||||
return
|
||||
}
|
||||
r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindInterfaceTypeDefinition, Ref: ref})
|
||||
} else {
|
||||
r.sharedTypeSet[name] = newFieldedSharedType(r.document, ast.NodeKindFieldDefinition, refs)
|
||||
}
|
||||
r.lastInterfaceRef = ref
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldedSharedTypesVisitor) EnterObjectTypeDefinition(ref int) {
|
||||
if ref <= r.lastObjectRef {
|
||||
return
|
||||
}
|
||||
name := r.document.ObjectTypeDefinitionNameString(ref)
|
||||
objectType := r.document.ObjectTypeDefinitions[ref]
|
||||
refs := objectType.FieldsDefinition.Refs
|
||||
object, exists := r.sharedTypeSet[name]
|
||||
if exists {
|
||||
if !object.areFieldsIdentical(refs) {
|
||||
r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name))
|
||||
return
|
||||
}
|
||||
r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindObjectTypeDefinition, Ref: ref})
|
||||
} else {
|
||||
r.sharedTypeSet[name] = newFieldedSharedType(r.document, ast.NodeKindFieldDefinition, refs)
|
||||
}
|
||||
r.lastObjectRef = ref
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldedSharedTypesVisitor) LeaveDocument(_, _ *ast.Document) {
|
||||
if r.rootNodesToRemove != nil {
|
||||
r.document.DeleteRootNodes(r.rootNodesToRemove)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
type removeDuplicateFieldlessSharedTypesVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
sharedTypeSet map[string]fieldlessSharedType
|
||||
rootNodesToRemove []ast.Node
|
||||
lastEnumRef int
|
||||
lastUnionRef int
|
||||
lastScalarRef int
|
||||
}
|
||||
|
||||
func newRemoveDuplicateFieldlessSharedTypesVisitor() *removeDuplicateFieldlessSharedTypesVisitor {
|
||||
return &removeDuplicateFieldlessSharedTypesVisitor{
|
||||
nil,
|
||||
nil,
|
||||
make(map[string]fieldlessSharedType),
|
||||
nil,
|
||||
ast.InvalidRef,
|
||||
ast.InvalidRef,
|
||||
ast.InvalidRef,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldlessSharedTypesVisitor) Register(walker *astvisitor.Walker) {
|
||||
r.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(r)
|
||||
walker.RegisterEnterEnumTypeDefinitionVisitor(r)
|
||||
walker.RegisterEnterScalarTypeDefinitionVisitor(r)
|
||||
walker.RegisterEnterUnionTypeDefinitionVisitor(r)
|
||||
walker.RegisterLeaveDocumentVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
r.document = operation
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterEnumTypeDefinition(ref int) {
|
||||
if ref <= r.lastEnumRef {
|
||||
return
|
||||
}
|
||||
name := r.document.EnumTypeDefinitionNameString(ref)
|
||||
enum, exists := r.sharedTypeSet[name]
|
||||
if exists {
|
||||
if !enum.areValuesIdentical(r.document.EnumTypeDefinitions[ref].EnumValuesDefinition.Refs) {
|
||||
r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name))
|
||||
return
|
||||
}
|
||||
r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindEnumTypeDefinition, Ref: ref})
|
||||
} else {
|
||||
r.sharedTypeSet[name] = newEnumSharedType(r.document, ref)
|
||||
}
|
||||
r.lastEnumRef = ref
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterScalarTypeDefinition(ref int) {
|
||||
if ref <= r.lastScalarRef {
|
||||
return
|
||||
}
|
||||
name := r.document.ScalarTypeDefinitionNameString(ref)
|
||||
_, exists := r.sharedTypeSet[name]
|
||||
if exists {
|
||||
r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindScalarTypeDefinition, Ref: ref})
|
||||
} else {
|
||||
r.sharedTypeSet[name] = scalarSharedType{}
|
||||
}
|
||||
r.lastScalarRef = ref
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldlessSharedTypesVisitor) EnterUnionTypeDefinition(ref int) {
|
||||
if ref <= r.lastUnionRef {
|
||||
return
|
||||
}
|
||||
name := r.document.UnionTypeDefinitionNameString(ref)
|
||||
union, exists := r.sharedTypeSet[name]
|
||||
if exists {
|
||||
if !union.areValuesIdentical(r.document.UnionTypeDefinitions[ref].UnionMemberTypes.Refs) {
|
||||
r.StopWithExternalErr(operationreport.ErrSharedTypesMustBeIdenticalToFederate(name))
|
||||
return
|
||||
}
|
||||
r.rootNodesToRemove = append(r.rootNodesToRemove, ast.Node{Kind: ast.NodeKindUnionTypeDefinition, Ref: ref})
|
||||
} else {
|
||||
r.sharedTypeSet[name] = newUnionSharedType(r.document, ref)
|
||||
}
|
||||
r.lastUnionRef = ref
|
||||
}
|
||||
|
||||
func (r *removeDuplicateFieldlessSharedTypesVisitor) LeaveDocument(_, _ *ast.Document) {
|
||||
if r.rootNodesToRemove != nil {
|
||||
r.document.DeleteRootNodes(r.rootNodesToRemove)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
)
|
||||
|
||||
func newRemoveEmptyObjectTypeDefinition() *removeEmptyObjectTypeDefinition {
|
||||
return &removeEmptyObjectTypeDefinition{}
|
||||
}
|
||||
|
||||
type removeEmptyObjectTypeDefinition struct{}
|
||||
|
||||
func (r *removeEmptyObjectTypeDefinition) Register(walker *astvisitor.Walker) {
|
||||
walker.RegisterLeaveDocumentVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeEmptyObjectTypeDefinition) LeaveDocument(operation, _ *ast.Document) {
|
||||
for ref := range operation.ObjectTypeDefinitions {
|
||||
if operation.ObjectTypeDefinitions[ref].HasFieldDefinitions {
|
||||
continue
|
||||
}
|
||||
|
||||
name := operation.ObjectTypeDefinitionNameString(ref)
|
||||
node, ok := operation.Index.FirstNodeByNameStr(name)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
operation.RemoveRootNode(node)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
)
|
||||
|
||||
func newRemoveFieldDefinitions(directives ...string) *removeFieldDefinitionByDirective {
|
||||
directivesSet := make(map[string]struct{}, len(directives))
|
||||
for _, directive := range directives {
|
||||
directivesSet[directive] = struct{}{}
|
||||
}
|
||||
|
||||
return &removeFieldDefinitionByDirective{
|
||||
directives: directivesSet,
|
||||
}
|
||||
}
|
||||
|
||||
type removeFieldDefinitionByDirective struct {
|
||||
operation *ast.Document
|
||||
directives map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *removeFieldDefinitionByDirective) Register(walker *astvisitor.Walker) {
|
||||
walker.RegisterEnterDocumentVisitor(r)
|
||||
walker.RegisterLeaveObjectTypeDefinitionVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeFieldDefinitionByDirective) EnterDocument(operation, _ *ast.Document) {
|
||||
r.operation = operation
|
||||
}
|
||||
|
||||
func (r *removeFieldDefinitionByDirective) LeaveObjectTypeDefinition(ref int) {
|
||||
var refsForDeletion []int
|
||||
// select fields for deletion
|
||||
for _, fieldRef := range r.operation.ObjectTypeDefinitions[ref].FieldsDefinition.Refs {
|
||||
for _, directiveRef := range r.operation.FieldDefinitions[fieldRef].Directives.Refs {
|
||||
directiveName := r.operation.DirectiveNameString(directiveRef)
|
||||
if _, ok := r.directives[directiveName]; ok {
|
||||
refsForDeletion = append(refsForDeletion, fieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
// delete fields
|
||||
r.operation.RemoveFieldDefinitionsFromObjectTypeDefinition(refsForDeletion, ref)
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
)
|
||||
|
||||
func newRemoveFieldDefinitionDirective(directives ...string) *removeFieldDefinitionDirective {
|
||||
directivesSet := make(map[string]struct{}, len(directives))
|
||||
for _, directive := range directives {
|
||||
directivesSet[directive] = struct{}{}
|
||||
}
|
||||
|
||||
return &removeFieldDefinitionDirective{
|
||||
directives: directivesSet,
|
||||
}
|
||||
}
|
||||
|
||||
type removeFieldDefinitionDirective struct {
|
||||
operation *ast.Document
|
||||
directives map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *removeFieldDefinitionDirective) Register(walker *astvisitor.Walker) {
|
||||
walker.RegisterEnterDocumentVisitor(r)
|
||||
walker.RegisterEnterFieldDefinitionVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeFieldDefinitionDirective) EnterDocument(operation, _ *ast.Document) {
|
||||
r.operation = operation
|
||||
}
|
||||
|
||||
func (r *removeFieldDefinitionDirective) EnterFieldDefinition(ref int) {
|
||||
var refsForDeletion []int
|
||||
// select directives for deletion
|
||||
for _, directiveRef := range r.operation.FieldDefinitions[ref].Directives.Refs {
|
||||
directiveName := r.operation.DirectiveNameString(directiveRef)
|
||||
if _, ok := r.directives[directiveName]; ok {
|
||||
refsForDeletion = append(refsForDeletion, directiveRef)
|
||||
}
|
||||
}
|
||||
// delete directives
|
||||
r.operation.RemoveDirectivesFromNode(ast.Node{Kind: ast.NodeKindFieldDefinition, Ref: ref}, refsForDeletion)
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
)
|
||||
|
||||
func newRemoveInterfaceDefinitionDirective(directives ...string) *removeInterfaceDefinitionDirective {
|
||||
directivesSet := make(map[string]struct{}, len(directives))
|
||||
for _, directive := range directives {
|
||||
directivesSet[directive] = struct{}{}
|
||||
}
|
||||
|
||||
return &removeInterfaceDefinitionDirective{
|
||||
directives: directivesSet,
|
||||
}
|
||||
}
|
||||
|
||||
type removeInterfaceDefinitionDirective struct {
|
||||
*astvisitor.Walker
|
||||
operation *ast.Document
|
||||
directives map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *removeInterfaceDefinitionDirective) Register(walker *astvisitor.Walker) {
|
||||
walker.RegisterEnterDocumentVisitor(r)
|
||||
walker.RegisterEnterInterfaceTypeDefinitionVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeInterfaceDefinitionDirective) EnterDocument(operation, _ *ast.Document) {
|
||||
r.operation = operation
|
||||
}
|
||||
|
||||
func (r *removeInterfaceDefinitionDirective) EnterInterfaceTypeDefinition(ref int) {
|
||||
var refsForDeletion []int
|
||||
// select fields for deletion
|
||||
for _, directiveRef := range r.operation.InterfaceTypeDefinitions[ref].Directives.Refs {
|
||||
directiveName := r.operation.DirectiveNameString(directiveRef)
|
||||
if _, ok := r.directives[directiveName]; ok {
|
||||
refsForDeletion = append(refsForDeletion, directiveRef)
|
||||
}
|
||||
}
|
||||
// delete directives
|
||||
r.operation.RemoveDirectivesFromNode(ast.Node{Kind: ast.NodeKindInterfaceTypeDefinition, Ref: ref}, refsForDeletion)
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
)
|
||||
|
||||
func newRemoveObjectTypeDefinitionDirective(directives ...string) *removeObjectTypeDefinitionDirective {
|
||||
directivesSet := make(map[string]struct{}, len(directives))
|
||||
for _, directive := range directives {
|
||||
directivesSet[directive] = struct{}{}
|
||||
}
|
||||
|
||||
return &removeObjectTypeDefinitionDirective{
|
||||
directives: directivesSet,
|
||||
}
|
||||
}
|
||||
|
||||
type removeObjectTypeDefinitionDirective struct {
|
||||
operation *ast.Document
|
||||
directives map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *removeObjectTypeDefinitionDirective) Register(walker *astvisitor.Walker) {
|
||||
walker.RegisterEnterDocumentVisitor(r)
|
||||
walker.RegisterEnterObjectTypeDefinitionVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeObjectTypeDefinitionDirective) EnterDocument(operation, _ *ast.Document) {
|
||||
r.operation = operation
|
||||
}
|
||||
|
||||
func (r *removeObjectTypeDefinitionDirective) EnterObjectTypeDefinition(ref int) {
|
||||
var refsForDeletion []int
|
||||
// select fields for deletion
|
||||
for _, directiveRef := range r.operation.ObjectTypeDefinitions[ref].Directives.Refs {
|
||||
directiveName := r.operation.DirectiveNameString(directiveRef)
|
||||
if _, ok := r.directives[directiveName]; ok {
|
||||
refsForDeletion = append(refsForDeletion, directiveRef)
|
||||
}
|
||||
}
|
||||
// delete directives
|
||||
r.operation.RemoveDirectivesFromNode(ast.Node{Kind: ast.NodeKindObjectTypeDefinition, Ref: ref}, refsForDeletion)
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
)
|
||||
|
||||
func newRemoveMergedTypeExtensions() *removeMergedTypeExtensionsVisitor {
|
||||
return &removeMergedTypeExtensionsVisitor{}
|
||||
}
|
||||
|
||||
type removeMergedTypeExtensionsVisitor struct{}
|
||||
|
||||
func (r *removeMergedTypeExtensionsVisitor) Register(walker *astvisitor.Walker) {
|
||||
walker.RegisterLeaveDocumentVisitor(r)
|
||||
}
|
||||
|
||||
func (r *removeMergedTypeExtensionsVisitor) LeaveDocument(operation, definition *ast.Document) {
|
||||
operation.RemoveMergedTypeExtensions()
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
func newExtendScalarTypeDefinition() *extendScalarTypeDefinitionVisitor {
|
||||
return &extendScalarTypeDefinitionVisitor{}
|
||||
}
|
||||
|
||||
type extendScalarTypeDefinitionVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
}
|
||||
|
||||
func (e *extendScalarTypeDefinitionVisitor) Register(walker *astvisitor.Walker) {
|
||||
e.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(e)
|
||||
walker.RegisterEnterScalarTypeExtensionVisitor(e)
|
||||
}
|
||||
|
||||
func (e *extendScalarTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
e.document = operation
|
||||
}
|
||||
|
||||
func (e *extendScalarTypeDefinitionVisitor) EnterScalarTypeExtension(ref int) {
|
||||
nodes, exists := e.document.Index.NodesByNameBytes(e.document.ScalarTypeExtensionNameBytes(ref))
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
hasExtended := false
|
||||
for i := range nodes {
|
||||
if nodes[i].Kind != ast.NodeKindScalarTypeDefinition {
|
||||
continue
|
||||
}
|
||||
if hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.ScalarTypeExtensionNameString(ref)))
|
||||
return
|
||||
}
|
||||
e.document.ExtendScalarTypeDefinitionByScalarTypeExtension(nodes[i].Ref, ref)
|
||||
hasExtended = true
|
||||
}
|
||||
if !hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.ScalarTypeExtensionNameBytes(ref)))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/asttransform"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvalidation"
|
||||
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astnormalization"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astparser"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astprinter"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
const (
|
||||
rootOperationTypeDefinitions = `
|
||||
type Query {}
|
||||
type Mutation {}
|
||||
type Subscription {}
|
||||
`
|
||||
|
||||
parseDocumentError = "parse graphql document string: %w"
|
||||
)
|
||||
|
||||
type Visitor interface {
|
||||
Register(walker *astvisitor.Walker)
|
||||
}
|
||||
|
||||
func MergeAST(ast *ast.Document) error {
|
||||
normalizer := normalizer{}
|
||||
normalizer.setupWalkers()
|
||||
|
||||
return normalizer.normalize(ast)
|
||||
}
|
||||
|
||||
func MergeSDLs(SDLs ...string) (string, error) {
|
||||
rawDocs := make([]string, 0, len(SDLs)+1)
|
||||
rawDocs = append(rawDocs, rootOperationTypeDefinitions)
|
||||
rawDocs = append(rawDocs, SDLs...)
|
||||
if validationError := validateSubgraphs(rawDocs[1:]); validationError != nil {
|
||||
return "", validationError
|
||||
}
|
||||
if normalizationError := normalizeSubgraphs(rawDocs[1:]); normalizationError != nil {
|
||||
return "", normalizationError
|
||||
}
|
||||
|
||||
doc, report := astparser.ParseGraphqlDocumentString(strings.Join(rawDocs, "\n"))
|
||||
if report.HasErrors() {
|
||||
return "", fmt.Errorf("parse graphql document string: %w", report)
|
||||
}
|
||||
|
||||
astnormalization.NormalizeSubgraphSDL(&doc, &report)
|
||||
if report.HasErrors() {
|
||||
return "", fmt.Errorf("merge ast: %w", report)
|
||||
}
|
||||
|
||||
if err := MergeAST(&doc); err != nil {
|
||||
return "", fmt.Errorf("merge ast: %w", err)
|
||||
}
|
||||
|
||||
// Format with indentation for better readability
|
||||
buf := &bytes.Buffer{}
|
||||
if err := astprinter.PrintIndent(&doc, []byte(" "), buf); err != nil {
|
||||
return "", fmt.Errorf("stringify schema: %w", err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func validateSubgraphs(subgraphs []string) error {
|
||||
validator := astvalidation.NewDefinitionValidator(
|
||||
astvalidation.PopulatedTypeBodies(), astvalidation.KnownTypeNames(),
|
||||
)
|
||||
for _, subgraph := range subgraphs {
|
||||
doc, report := astparser.ParseGraphqlDocumentString(subgraph)
|
||||
if err := asttransform.MergeDefinitionWithBaseSchema(&doc); err != nil {
|
||||
return err
|
||||
}
|
||||
if report.HasErrors() {
|
||||
return fmt.Errorf(parseDocumentError, report)
|
||||
}
|
||||
validator.Validate(&doc, &report)
|
||||
if report.HasErrors() {
|
||||
return fmt.Errorf("validate schema: %w", report)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizeSubgraphs(subgraphs []string) error {
|
||||
subgraphNormalizer := astnormalization.NewSubgraphDefinitionNormalizer()
|
||||
for i, subgraph := range subgraphs {
|
||||
doc, report := astparser.ParseGraphqlDocumentString(subgraph)
|
||||
if report.HasErrors() {
|
||||
return fmt.Errorf(parseDocumentError, report)
|
||||
}
|
||||
subgraphNormalizer.NormalizeDefinition(&doc, &report)
|
||||
if report.HasErrors() {
|
||||
return fmt.Errorf("normalize schema: %w", report)
|
||||
}
|
||||
out, err := astprinter.PrintString(&doc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("stringify schema: %w", err)
|
||||
}
|
||||
subgraphs[i] = out
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type normalizer struct {
|
||||
walkers []*astvisitor.Walker
|
||||
}
|
||||
|
||||
type entitySet map[string]struct{}
|
||||
|
||||
func (m *normalizer) setupWalkers() {
|
||||
collectedEntities := make(entitySet)
|
||||
visitorGroups := [][]Visitor{
|
||||
{
|
||||
newCollectEntitiesVisitor(collectedEntities),
|
||||
},
|
||||
{
|
||||
newExtendEnumTypeDefinition(),
|
||||
newExtendInputObjectTypeDefinition(),
|
||||
newExtendInterfaceTypeDefinition(collectedEntities),
|
||||
newExtendScalarTypeDefinition(),
|
||||
newExtendUnionTypeDefinition(),
|
||||
newExtendObjectTypeDefinition(collectedEntities),
|
||||
newRemoveEmptyObjectTypeDefinition(),
|
||||
newRemoveMergedTypeExtensions(),
|
||||
},
|
||||
// visitors for cleaning up federated duplicated fields and directives
|
||||
{
|
||||
newRemoveFieldDefinitions("external"),
|
||||
newRemoveDuplicateFieldedSharedTypesVisitor(),
|
||||
newRemoveDuplicateFieldlessSharedTypesVisitor(),
|
||||
newMergeDuplicatedFieldsVisitor(),
|
||||
newRemoveInterfaceDefinitionDirective("key"),
|
||||
newRemoveObjectTypeDefinitionDirective("key"),
|
||||
newRemoveFieldDefinitionDirective("provides", "requires"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, visitorGroup := range visitorGroups {
|
||||
walker := astvisitor.NewWalker(48)
|
||||
for _, visitor := range visitorGroup {
|
||||
visitor.Register(&walker)
|
||||
m.walkers = append(m.walkers, &walker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *normalizer) normalize(operation *ast.Document) error {
|
||||
report := operationreport.Report{}
|
||||
|
||||
for _, walker := range m.walkers {
|
||||
walker.Walk(operation, nil, &report)
|
||||
if report.HasErrors() {
|
||||
return fmt.Errorf("walk: %w", report)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e entitySet) isExtensionForEntity(nameBytes []byte, directiveRefs []int, document *ast.Document) (bool, *operationreport.ExternalError) {
|
||||
name := string(nameBytes)
|
||||
hasDirectives := len(directiveRefs) > 0
|
||||
if _, exists := e[name]; !exists {
|
||||
if !hasDirectives || !isEntityExtension(directiveRefs, document) {
|
||||
return false, nil
|
||||
}
|
||||
err := operationreport.ErrExtensionWithKeyDirectiveMustExtendEntity(name)
|
||||
return false, &err
|
||||
}
|
||||
if !hasDirectives {
|
||||
err := operationreport.ErrEntityExtensionMustHaveKeyDirective(name)
|
||||
return false, &err
|
||||
}
|
||||
if isEntityExtension(directiveRefs, document) {
|
||||
return true, nil
|
||||
}
|
||||
err := operationreport.ErrEntityExtensionMustHaveKeyDirective(name)
|
||||
return false, &err
|
||||
}
|
||||
|
||||
func isEntityExtension(directiveRefs []int, document *ast.Document) bool {
|
||||
for _, directiveRef := range directiveRefs {
|
||||
if document.DirectiveNameString(directiveRef) == "key" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func multipleExtensionError(isEntity bool, nameBytes []byte) *operationreport.ExternalError {
|
||||
if isEntity {
|
||||
err := operationreport.ErrEntitiesMustNotBeDuplicated(string(nameBytes))
|
||||
return &err
|
||||
}
|
||||
err := operationreport.ErrSharedTypesMustNotBeExtended(string(nameBytes))
|
||||
return &err
|
||||
}
|
||||
@@ -0,0 +1,434 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMergeSDLs_Success(t *testing.T) {
|
||||
// Both types need to be in the same subgraph or properly federated
|
||||
sdl1 := `
|
||||
type User {
|
||||
id: ID!
|
||||
name: String!
|
||||
}
|
||||
|
||||
type Post {
|
||||
id: ID!
|
||||
title: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl1)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "Post")
|
||||
assert.Contains(t, result, "id")
|
||||
assert.Contains(t, result, "name")
|
||||
assert.Contains(t, result, "title")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_SingleSchema(t *testing.T) {
|
||||
sdl := `
|
||||
type Query {
|
||||
hello: String
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "Query")
|
||||
assert.Contains(t, result, "hello")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_EmptySchemas(t *testing.T) {
|
||||
result, err := MergeSDLs()
|
||||
require.NoError(t, err)
|
||||
// With no schemas, result will be empty after processing
|
||||
// This is valid - just verifies no crash
|
||||
_ = result
|
||||
}
|
||||
|
||||
func TestMergeSDLs_InvalidSyntax(t *testing.T) {
|
||||
invalidSDL := `
|
||||
type User {
|
||||
id: ID!
|
||||
name: String!
|
||||
// Missing closing brace
|
||||
`
|
||||
|
||||
_, err := MergeSDLs(invalidSDL)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "parse graphql document string")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_UnknownType(t *testing.T) {
|
||||
sdl := `
|
||||
type User {
|
||||
id: ID!
|
||||
profile: UnknownType
|
||||
}
|
||||
`
|
||||
|
||||
_, err := MergeSDLs(sdl)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "validate schema")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_DuplicateTypes_DifferentFields(t *testing.T) {
|
||||
// Same type with different fields in different subgraphs - should fail
|
||||
// In federation, shared types must be identical
|
||||
sdl1 := `
|
||||
type User {
|
||||
id: ID!
|
||||
}
|
||||
`
|
||||
sdl2 := `
|
||||
type User {
|
||||
name: String!
|
||||
}
|
||||
`
|
||||
|
||||
_, err := MergeSDLs(sdl1, sdl2)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "shared type")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_ExtendType(t *testing.T) {
|
||||
sdl1 := `
|
||||
type User {
|
||||
id: ID!
|
||||
}
|
||||
`
|
||||
sdl2 := `
|
||||
extend type User {
|
||||
email: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl1, sdl2)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "id")
|
||||
assert.Contains(t, result, "email")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_Scalars(t *testing.T) {
|
||||
sdl := `
|
||||
scalar DateTime
|
||||
|
||||
type Event {
|
||||
id: ID!
|
||||
createdAt: DateTime!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "DateTime")
|
||||
assert.Contains(t, result, "Event")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_Enums(t *testing.T) {
|
||||
sdl := `
|
||||
enum Role {
|
||||
ADMIN
|
||||
USER
|
||||
GUEST
|
||||
}
|
||||
|
||||
type User {
|
||||
id: ID!
|
||||
role: Role!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "Role")
|
||||
assert.Contains(t, result, "ADMIN")
|
||||
assert.Contains(t, result, "USER")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_Interfaces(t *testing.T) {
|
||||
sdl := `
|
||||
interface Node {
|
||||
id: ID!
|
||||
}
|
||||
|
||||
type User implements Node {
|
||||
id: ID!
|
||||
name: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "Node")
|
||||
assert.Contains(t, result, "implements")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_Unions(t *testing.T) {
|
||||
sdl := `
|
||||
type User {
|
||||
id: ID!
|
||||
name: String!
|
||||
}
|
||||
|
||||
type Bot {
|
||||
id: ID!
|
||||
version: String!
|
||||
}
|
||||
|
||||
union Actor = User | Bot
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "Actor")
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "Bot")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_InputTypes(t *testing.T) {
|
||||
sdl := `
|
||||
input CreateUserInput {
|
||||
name: String!
|
||||
email: String!
|
||||
}
|
||||
|
||||
type Mutation {
|
||||
createUser(input: CreateUserInput!): User
|
||||
}
|
||||
|
||||
type User {
|
||||
id: ID!
|
||||
name: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "CreateUserInput")
|
||||
assert.Contains(t, result, "createUser")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_Directives(t *testing.T) {
|
||||
sdl := `
|
||||
type User {
|
||||
id: ID!
|
||||
name: String! @deprecated(reason: "Use fullName instead")
|
||||
fullName: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "name")
|
||||
assert.Contains(t, result, "fullName")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_FederationKeys(t *testing.T) {
|
||||
// Federation @key directive
|
||||
sdl := `
|
||||
type User @key(fields: "id") {
|
||||
id: ID!
|
||||
name: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
// @key directive should be removed during merge
|
||||
assert.NotContains(t, result, "@key")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_ExternalFields(t *testing.T) {
|
||||
// Federation @external directive
|
||||
sdl1 := `
|
||||
type User @key(fields: "id") {
|
||||
id: ID!
|
||||
name: String!
|
||||
}
|
||||
`
|
||||
sdl2 := `
|
||||
extend type User @key(fields: "id") {
|
||||
id: ID! @external
|
||||
posts: [Post!]!
|
||||
}
|
||||
|
||||
type Post {
|
||||
id: ID!
|
||||
title: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl1, sdl2)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "Post")
|
||||
// @external fields should be removed
|
||||
assert.NotContains(t, result, "@external")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_ComplexSchema(t *testing.T) {
|
||||
// Multiple subgraphs with various types - simplified to avoid cross-references
|
||||
users := `
|
||||
type User @key(fields: "id") {
|
||||
id: ID!
|
||||
username: String!
|
||||
email: String!
|
||||
}
|
||||
|
||||
type Query {
|
||||
user(id: ID!): User
|
||||
users: [User!]!
|
||||
}
|
||||
`
|
||||
|
||||
posts := `
|
||||
extend type User @key(fields: "id") {
|
||||
id: ID! @external
|
||||
posts: [Post!]!
|
||||
}
|
||||
|
||||
type Post @key(fields: "id") {
|
||||
id: ID!
|
||||
title: String!
|
||||
content: String!
|
||||
}
|
||||
|
||||
extend type Query {
|
||||
post(id: ID!): Post
|
||||
posts: [Post!]!
|
||||
}
|
||||
`
|
||||
|
||||
comments := `
|
||||
extend type Post @key(fields: "id") {
|
||||
id: ID! @external
|
||||
comments: [Comment!]!
|
||||
}
|
||||
|
||||
type Comment {
|
||||
id: ID!
|
||||
text: String!
|
||||
}
|
||||
|
||||
extend type Query {
|
||||
comment(id: ID!): Comment
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(users, posts, comments)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify all types are present
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "Post")
|
||||
assert.Contains(t, result, "Comment")
|
||||
assert.Contains(t, result, "Query")
|
||||
|
||||
// Verify fields from all subgraphs
|
||||
assert.Contains(t, result, "username")
|
||||
assert.Contains(t, result, "posts")
|
||||
assert.Contains(t, result, "comments")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_EmptyTypeDefinition(t *testing.T) {
|
||||
sdl := `
|
||||
type Empty {}
|
||||
`
|
||||
|
||||
_, err := MergeSDLs(sdl)
|
||||
require.Error(t, err)
|
||||
// Empty types are invalid in GraphQL
|
||||
assert.Contains(t, err.Error(), "empty body")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_MultipleValidationErrors(t *testing.T) {
|
||||
// Schema with multiple errors
|
||||
sdl := `
|
||||
type User {
|
||||
id: ID!
|
||||
profile: NonExistentType1
|
||||
settings: NonExistentType2
|
||||
}
|
||||
`
|
||||
|
||||
_, err := MergeSDLs(sdl)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMergeSDLs_ListTypes(t *testing.T) {
|
||||
sdl := `
|
||||
type User {
|
||||
id: ID!
|
||||
tags: [String!]!
|
||||
friends: [User!]
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "tags")
|
||||
assert.Contains(t, result, "friends")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_NonNullTypes(t *testing.T) {
|
||||
sdl := `
|
||||
type User {
|
||||
id: ID!
|
||||
name: String!
|
||||
email: String
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
assert.Contains(t, result, "id")
|
||||
assert.Contains(t, result, "name")
|
||||
assert.Contains(t, result, "email")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_Comments(t *testing.T) {
|
||||
sdl := `
|
||||
# This is a user type
|
||||
type User {
|
||||
# User ID
|
||||
id: ID!
|
||||
# User name
|
||||
name: String!
|
||||
}
|
||||
`
|
||||
|
||||
result, err := MergeSDLs(sdl)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, result, "User")
|
||||
}
|
||||
|
||||
func TestMergeSDLs_LargeSchema(t *testing.T) {
|
||||
// Test with a reasonably large schema to ensure performance
|
||||
var sdlBuilder strings.Builder
|
||||
for i := 0; i < 50; i++ {
|
||||
sdlBuilder.WriteString("type Type")
|
||||
sdlBuilder.WriteString(strings.Repeat(string(rune('A'+i%26)), 1))
|
||||
sdlBuilder.WriteString(string(rune('0' + i/26)))
|
||||
sdlBuilder.WriteString(" { id: ID }\n")
|
||||
}
|
||||
|
||||
result, err := MergeSDLs(sdlBuilder.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify some types are present
|
||||
assert.Contains(t, result, "TypeA0")
|
||||
assert.Contains(t, result, "TypeB0")
|
||||
assert.Contains(t, result, "TypeC0")
|
||||
}
|
||||
@@ -0,0 +1,167 @@
|
||||
package sdlmerge
|
||||
|
||||
import "github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
|
||||
type fieldlessSharedType interface {
|
||||
areValuesIdentical(valueRefsToCompare []int) bool
|
||||
valueRefs() []int
|
||||
valueName(ref int) string
|
||||
}
|
||||
|
||||
func createValueSet(f fieldlessSharedType) map[string]bool {
|
||||
valueSet := make(map[string]bool)
|
||||
for _, valueRef := range f.valueRefs() {
|
||||
valueSet[f.valueName(valueRef)] = true
|
||||
}
|
||||
return valueSet
|
||||
}
|
||||
|
||||
type fieldedSharedType struct {
|
||||
document *ast.Document
|
||||
fieldKind ast.NodeKind
|
||||
fieldRefs []int
|
||||
fieldSet map[string]int
|
||||
}
|
||||
|
||||
func newFieldedSharedType(document *ast.Document, fieldKind ast.NodeKind, fieldRefs []int) fieldedSharedType {
|
||||
f := fieldedSharedType{
|
||||
document,
|
||||
fieldKind,
|
||||
fieldRefs,
|
||||
nil,
|
||||
}
|
||||
f.createFieldSet()
|
||||
return f
|
||||
}
|
||||
|
||||
func (f fieldedSharedType) areFieldsIdentical(fieldRefsToCompare []int) bool {
|
||||
if len(f.fieldRefs) != len(fieldRefsToCompare) {
|
||||
return false
|
||||
}
|
||||
for _, fieldRef := range fieldRefsToCompare {
|
||||
actualFieldName := f.fieldName(fieldRef)
|
||||
expectedTypeRef, exists := f.fieldSet[actualFieldName]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
actualTypeRef := f.fieldTypeRef(fieldRef)
|
||||
if !f.document.TypesAreCompatibleDeep(expectedTypeRef, actualTypeRef) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *fieldedSharedType) createFieldSet() {
|
||||
fieldSet := make(map[string]int)
|
||||
for _, fieldRef := range f.fieldRefs {
|
||||
fieldSet[f.fieldName(fieldRef)] = f.fieldTypeRef(fieldRef)
|
||||
}
|
||||
f.fieldSet = fieldSet
|
||||
}
|
||||
|
||||
func (f fieldedSharedType) fieldName(ref int) string {
|
||||
switch f.fieldKind {
|
||||
case ast.NodeKindInputValueDefinition:
|
||||
return f.document.InputValueDefinitionNameString(ref)
|
||||
default:
|
||||
return f.document.FieldDefinitionNameString(ref)
|
||||
}
|
||||
}
|
||||
|
||||
func (f fieldedSharedType) fieldTypeRef(ref int) int {
|
||||
switch f.fieldKind {
|
||||
case ast.NodeKindInputValueDefinition:
|
||||
return f.document.InputValueDefinitions[ref].Type
|
||||
default:
|
||||
return f.document.FieldDefinitions[ref].Type
|
||||
}
|
||||
}
|
||||
|
||||
type enumSharedType struct {
|
||||
*ast.EnumTypeDefinition
|
||||
document *ast.Document
|
||||
valueSet map[string]bool
|
||||
}
|
||||
|
||||
func newEnumSharedType(document *ast.Document, ref int) enumSharedType {
|
||||
e := enumSharedType{
|
||||
&document.EnumTypeDefinitions[ref],
|
||||
document,
|
||||
nil,
|
||||
}
|
||||
e.valueSet = createValueSet(e)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e enumSharedType) areValuesIdentical(valueRefsToCompare []int) bool {
|
||||
if len(e.valueRefs()) != len(valueRefsToCompare) {
|
||||
return false
|
||||
}
|
||||
for _, valueRefToCompare := range valueRefsToCompare {
|
||||
name := e.valueName(valueRefToCompare)
|
||||
if !e.valueSet[name] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e enumSharedType) valueRefs() []int {
|
||||
return e.EnumValuesDefinition.Refs
|
||||
}
|
||||
|
||||
func (e enumSharedType) valueName(ref int) string {
|
||||
return e.document.EnumValueDefinitionNameString(ref)
|
||||
}
|
||||
|
||||
type unionSharedType struct {
|
||||
*ast.UnionTypeDefinition
|
||||
document *ast.Document
|
||||
valueSet map[string]bool
|
||||
}
|
||||
|
||||
func newUnionSharedType(document *ast.Document, ref int) unionSharedType {
|
||||
u := unionSharedType{
|
||||
&document.UnionTypeDefinitions[ref],
|
||||
document,
|
||||
nil,
|
||||
}
|
||||
u.valueSet = createValueSet(u)
|
||||
return u
|
||||
}
|
||||
|
||||
func (u unionSharedType) areValuesIdentical(valueRefsToCompare []int) bool {
|
||||
if len(u.valueRefs()) != len(valueRefsToCompare) {
|
||||
return false
|
||||
}
|
||||
for _, refToCompare := range valueRefsToCompare {
|
||||
name := u.valueName(refToCompare)
|
||||
if !u.valueSet[name] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (u unionSharedType) valueRefs() []int {
|
||||
return u.UnionMemberTypes.Refs
|
||||
}
|
||||
|
||||
func (u unionSharedType) valueName(ref int) string {
|
||||
return u.document.TypeNameString(ref)
|
||||
}
|
||||
|
||||
type scalarSharedType struct{}
|
||||
|
||||
func (scalarSharedType) areValuesIdentical(_ []int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (scalarSharedType) valueRefs() []int {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scalarSharedType) valueName(_ int) string {
|
||||
return ""
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
package sdlmerge
|
||||
|
||||
import (
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/ast"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor"
|
||||
"github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport"
|
||||
)
|
||||
|
||||
func newExtendUnionTypeDefinition() *extendUnionTypeDefinitionVisitor {
|
||||
return &extendUnionTypeDefinitionVisitor{}
|
||||
}
|
||||
|
||||
type extendUnionTypeDefinitionVisitor struct {
|
||||
*astvisitor.Walker
|
||||
document *ast.Document
|
||||
}
|
||||
|
||||
func (e *extendUnionTypeDefinitionVisitor) Register(walker *astvisitor.Walker) {
|
||||
e.Walker = walker
|
||||
walker.RegisterEnterDocumentVisitor(e)
|
||||
walker.RegisterEnterUnionTypeExtensionVisitor(e)
|
||||
}
|
||||
|
||||
func (e *extendUnionTypeDefinitionVisitor) EnterDocument(operation, _ *ast.Document) {
|
||||
e.document = operation
|
||||
}
|
||||
|
||||
func (e *extendUnionTypeDefinitionVisitor) EnterUnionTypeExtension(ref int) {
|
||||
nodes, exists := e.document.Index.NodesByNameBytes(e.document.UnionTypeExtensionNameBytes(ref))
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
hasExtended := false
|
||||
for i := range nodes {
|
||||
if nodes[i].Kind != ast.NodeKindUnionTypeDefinition {
|
||||
continue
|
||||
}
|
||||
if hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrSharedTypesMustNotBeExtended(e.document.UnionTypeExtensionNameString(ref)))
|
||||
return
|
||||
}
|
||||
e.document.ExtendUnionTypeDefinitionByUnionTypeExtension(nodes[i].Ref, ref)
|
||||
hasExtended = true
|
||||
}
|
||||
|
||||
if !hasExtended {
|
||||
e.StopWithExternalErr(operationreport.ErrExtensionOrphansMustResolveInSupergraph(e.document.UnionTypeExtensionNameBytes(ref)))
|
||||
}
|
||||
}
|
||||
@@ -3,26 +3,26 @@
|
||||
insert into aggregates (id, name)
|
||||
values ('d46ffcb0-19e8-4769-8697-590326ef7b51', 'domain.Organization');
|
||||
|
||||
insert into events (name, aggregate_id, sequence_no, payload, tstamp, aggregate_name)
|
||||
insert into events (name, aggregate_id, aggregate_sequence_no, payload, tstamp, aggregate_name)
|
||||
values ('domain.OrganizationAdded', 'd46ffcb0-19e8-4769-8697-590326ef7b51', 1, '{"id":"d46ffcb0-19e8-4769-8697-590326ef7b51","time":"2023-04-26T14:46:04.43462+02:00","name":"Unbound Software Development","initiator":"google-oauth2|101953650269257914934"}', '2023-04-26T14:46:04.43462+02:00', 'domain.Organization');
|
||||
|
||||
-- Add API keys
|
||||
insert into events (name, aggregate_id, sequence_no, payload, tstamp, aggregate_name)
|
||||
insert into events (name, aggregate_id, aggregate_sequence_no, payload, tstamp, aggregate_name)
|
||||
values ('domain.APIKeyAdded', 'd46ffcb0-19e8-4769-8697-590326ef7b51', 2,
|
||||
'{"id":"d46ffcb0-19e8-4769-8697-590326ef7b51","time":"2023-04-26T15:46:54.181929+02:00","organizationId":"","name":"CI","key":"dXNfYWtfeUl2R3RRQUJQTmJzVEFrUeOwxEKY/BwUmvv0yJlvuSQnrkHkZJuTTKSVmRt4UrhV","refs":["Shiny@staging","Shiny@prod"],"read":false,"publish":true,"initiator":"google-oauth2|101953650269257914934"}',
|
||||
'2023-04-26 15:46:54.181929 +02:00', 'domain.Organization');
|
||||
|
||||
insert into events (name, aggregate_id, sequence_no, payload, tstamp, aggregate_name)
|
||||
insert into events (name, aggregate_id, aggregate_sequence_no, payload, tstamp, aggregate_name)
|
||||
values ('domain.APIKeyAdded', 'd46ffcb0-19e8-4769-8697-590326ef7b51', 3,
|
||||
'{"id":"d46ffcb0-19e8-4769-8697-590326ef7b51","time":"2023-04-26T15:52:55.955203+02:00","organizationId":"","name":"Gateway","key":"dXNfYWtfdnkzSkRseDNlSDNjcnZzOeOwxEKY/BwUmvv0yJlvuSQnrkHkZJuTTKSVmRt4UrhV","refs":["Shiny@staging","Shiny@prod"],"read":true,"publish":false,"initiator":"google-oauth2|101953650269257914934"}',
|
||||
'2023-04-26 15:52:55.955203 +02:00', 'domain.Organization');
|
||||
|
||||
insert into events (name, aggregate_id, sequence_no, payload, tstamp, aggregate_name)
|
||||
insert into events (name, aggregate_id, aggregate_sequence_no, payload, tstamp, aggregate_name)
|
||||
values ('domain.APIKeyAdded', 'd46ffcb0-19e8-4769-8697-590326ef7b51', 4,
|
||||
'{"id":"d46ffcb0-19e8-4769-8697-590326ef7b51","time":"2023-04-26T16:30:00.0011+02:00","organizationId":"","name":"Local dev","key":"dXNfYWtfM0kzaGZndmVaQllyQzdjVOOwxEKY/BwUmvv0yJlvuSQnrkHkZJuTTKSVmRt4UrhV","refs":["Shiny@dev"],"read":true,"publish":true,"initiator":"google-oauth2|101953650269257914934"}',
|
||||
'2023-04-26 16:30:00.001100 +02:00', 'domain.Organization');
|
||||
|
||||
insert into events (name, aggregate_id, sequence_no, payload, tstamp, aggregate_name)
|
||||
insert into events (name, aggregate_id, aggregate_sequence_no, payload, tstamp, aggregate_name)
|
||||
values ('domain.APIKeyAdded', 'd46ffcb0-19e8-4769-8697-590326ef7b51', 5,
|
||||
'{"id":"d46ffcb0-19e8-4769-8697-590326ef7b51","time":"2023-04-27T07:43:26.599544+02:00","organizationId":"","name":"Acctest","key":"dXNfYWtfdlVqMzdBMXVraklmaGtKSOOwxEKY/BwUmvv0yJlvuSQnrkHkZJuTTKSVmRt4UrhV","refs":["Shiny@test"],"read":true,"publish":true,"initiator":"google-oauth2|101953650269257914934"}',
|
||||
'2023-04-27 07:43:26.599544 +02:00', 'domain.Organization');
|
||||
|
||||
Reference in New Issue
Block a user