Compare commits

...

285 Commits

Author SHA1 Message Date
Tony Garnock-Jones 6468e16790 Bump preserves dep 2024-04-12 19:57:23 +02:00
Tony Garnock-Jones 65101e900e Release independent packages
syndicate@0.40.0
syndicate-macros@0.32.0
syndicate-schema-plugin@0.9.0
syndicate-server@0.45.0
syndicate-tools@0.18.0

Generated by cargo-workspaces
2024-04-10 17:04:25 +02:00
Tony Garnock-Jones 581886835a New dataspace pattern implementation; update HTTP server 2024-04-10 17:03:09 +02:00
Tony Garnock-Jones dcb1aec142 Merge latest changes from the syndicate-protocols repository 2024-04-10 15:43:06 +02:00
Tony Garnock-Jones c0239cf322 And with that we are almost back where we started with http.prs! 2024-04-10 15:16:35 +02:00
Tony Garnock-Jones 9cc4175f24 Cope with HTTP/1.0's optional Host header 2024-04-10 14:54:19 +02:00
Tony Garnock-Jones 70f42dd931 Another revision of http.prs 2024-04-10 14:31:27 +02:00
Tony Garnock-Jones ef1ebe6412 Sigh. <done> turns out to be a good idea in addition to <processing> 2024-04-10 13:24:25 +02:00
Tony Garnock-Jones deec008c66 No taskset on osx 2024-04-10 11:07:22 +02:00
Tony Garnock-Jones 008671d0b2 Bump deps incl preserves-schema for a keyword-avoiding fix 2024-04-09 22:41:58 +02:00
Tony Garnock-Jones 9fcf22e1b5 Merge latest changes from the syndicate-protocols repository 2024-04-09 15:16:46 +02:00
Tony Garnock-Jones ca18ca08df Alternative representation of dataspacePatterns 2024-04-09 09:15:21 +02:00
Tony Garnock-Jones 40ca168eac Repair typo 2024-04-09 09:13:51 +02:00
Tony Garnock-Jones 5a73e8d4c3 Alter dataspacePatterns language to make rec and arr more like dict 2024-04-04 16:31:09 +02:00
Tony Garnock-Jones 91b26001d8 There isn't an /etc/mime.types on OSX 2024-04-03 22:32:54 +02:00
Tony Garnock-Jones b83b39515d Release independent packages
syndicate@0.39.0
syndicate-macros@0.31.0
syndicate-schema-plugin@0.8.0
syndicate-server@0.44.0
syndicate-tools@0.17.0

Generated by cargo-workspaces
2024-04-01 16:53:42 +02:00
Tony Garnock-Jones d9fa6362af Merge latest changes from the syndicate-protocols repository 2024-04-01 16:52:57 +02:00
Tony Garnock-Jones 94598a574b Update HTTP service protocol 2024-04-01 16:52:24 +02:00
Tony Garnock-Jones 80ad0914ed Revise http protocol 2024-04-01 16:52:24 +02:00
Tony Garnock-Jones bdb0cc1023 Repair severe error in turn rollback 2024-04-01 16:52:24 +02:00
Tony Garnock-Jones 710ff91a64 Revise http protocol 2024-04-01 15:56:07 +02:00
Tony Garnock-Jones d3748a286b Release independent packages
syndicate-server@0.43.1

Generated by cargo-workspaces
2024-04-01 15:08:11 +02:00
Tony Garnock-Jones a56aec2c30 Tweak tracing in http_router 2024-04-01 15:01:33 +02:00
Tony Garnock-Jones 0c06ae9601 Repair path matching where no explicit PathPatternElement::Rest is present 2024-04-01 14:58:55 +02:00
Tony Garnock-Jones 1f0c9d2883 Dep bump 2024-03-30 11:36:42 +01:00
Tony Garnock-Jones 615830f799 Release independent packages
syndicate@0.38.0

Generated by cargo-workspaces
2024-03-30 11:02:01 +01:00
Tony Garnock-Jones 3c44768a72 Convenience syndicate::relay::stdio_service 2024-03-30 11:00:22 +01:00
Tony Garnock-Jones 04bb8c2f23 Release independent packages
syndicate@0.37.1

Generated by cargo-workspaces
2024-03-29 10:23:40 +01:00
Tony Garnock-Jones 9084c1781e Repair nested-panic situation 2024-03-29 10:23:21 +01:00
Tony Garnock-Jones 8a817fcb4f Release independent packages
syndicate@0.37.0
syndicate-macros@0.30.0
syndicate-schema-plugin@0.7.0
syndicate-server@0.43.0
syndicate-tools@0.16.0

Generated by cargo-workspaces
2024-03-28 16:33:56 +01:00
Tony Garnock-Jones 2ed2b38edc Repair noise session introduction 2024-03-28 16:32:46 +01:00
Tony Garnock-Jones 5090625f47 Bump deps 2024-03-28 15:50:36 +01:00
Tony Garnock-Jones a7ede65bad Merge latest changes from the syndicate-protocols repository 2024-03-28 15:50:12 +01:00
Tony Garnock-Jones c59e044695 Set embeddedType for noise 2024-03-28 15:49:48 +01:00
Tony Garnock-Jones ef98217a3a Merge latest changes from the syndicate-protocols repository 2024-03-28 15:17:37 +01:00
Tony Garnock-Jones bf0d47f1b7 Repair noise protocol 2024-03-28 15:17:28 +01:00
Tony Garnock-Jones fef41f39eb Release independent packages
syndicate@0.36.1

Generated by cargo-workspaces
2024-03-22 20:51:30 +01:00
Tony Garnock-Jones 0b72b4029b Repair reimported, attenuated references. 2024-03-22 20:51:02 +01:00
Tony Garnock-Jones 40a239c9eb Release independent packages
syndicate-server@0.42.0

Generated by cargo-workspaces
2024-03-22 11:24:21 +01:00
Tony Garnock-Jones 55456621d4 Handle refinement to gatekeeper protocol allowing JIT binding and/or direct rejection 2024-03-22 11:22:58 +01:00
Tony Garnock-Jones 7797a3cd09 Updated description of gatekeeper protocol 2024-03-22 10:11:57 +01:00
Tony Garnock-Jones eb9d9bed0f Generalize target-stompling-avoidance originally only for docker 2024-03-08 10:59:45 +01:00
Tony Garnock-Jones b96c469ef5 Put release profile settings back the way they should be 2024-03-08 10:51:04 +01:00
Tony Garnock-Jones 34f611f4fe Release independent packages
syndicate@0.36.0
syndicate-macros@0.29.0
syndicate-schema-plugin@0.6.0
syndicate-server@0.41.0
syndicate-tools@0.15.0

Generated by cargo-workspaces
2024-03-08 10:48:11 +01:00
Tony Garnock-Jones 58c24c30c4 Update Preserves to 0.995 2024-03-08 10:47:52 +01:00
Tony Garnock-Jones fa990bc042 Implement a $control entity, a message <exit n>, and a --control command-line flag. 2024-03-07 09:27:58 +01:00
Tony Garnock-Jones 060ba36d2e Release independent packages
syndicate-macros@0.28.1

Generated by cargo-workspaces
2024-03-04 10:15:51 +01:00
Tony Garnock-Jones ecd5e87823 Bump deps 2024-03-04 10:15:36 +01:00
Tony Garnock-Jones a401e5fcd1 A little fairer 2024-03-04 10:11:17 +01:00
Tony Garnock-Jones 5db05b2df2 Release independent packages
syndicate@0.35.0
syndicate-macros@0.28.0
syndicate-schema-plugin@0.5.0
syndicate-server@0.40.0
syndicate-tools@0.14.0

Generated by cargo-workspaces
2024-03-04 10:08:47 +01:00
Tony Garnock-Jones f4a4b4d595 Reuse a single Activation per actor: this merges RunningActor with Activation 2024-03-04 10:07:31 +01:00
Tony Garnock-Jones b7d4bd4b58 Avoid uselessly computing turn descriptions when there is no listener for them 2024-03-03 14:15:56 +01:00
Tony Garnock-Jones 41cf85f865 tokio-ring.rs 2024-03-03 10:34:25 +01:00
Tony Garnock-Jones 4fcb14d63e Latency-mode for syndicate-macros/example/ring.rs 2024-03-03 10:34:18 +01:00
Tony Garnock-Jones b4f355aa0d Oops, had ExitStatus without derive Debug 2024-02-24 21:58:56 +01:00
Tony Garnock-Jones 5a431b2060 Clean up imports 2024-02-24 21:58:46 +01:00
Tony Garnock-Jones 1ff222b291 Demote terminate-on-drop to a debug message rather than an error 2024-02-24 13:08:32 +01:00
Tony Garnock-Jones e501d0f76a Repair warnings 2024-02-24 13:06:22 +01:00
Tony Garnock-Jones 2e65d31d5d Release independent packages
syndicate@0.34.0
syndicate-macros@0.27.0
syndicate-schema-plugin@0.4.0
syndicate-server@0.39.0
syndicate-tools@0.13.0

Generated by cargo-workspaces
2024-02-05 23:41:53 +01:00
Tony Garnock-Jones 852f0f4722 Switch embedded from `#!` to `#:` 2024-02-05 23:40:44 +01:00
Tony Garnock-Jones 9850c73993 Merge latest changes from the syndicate-protocols repository 2024-02-05 23:34:05 +01:00
Tony Garnock-Jones 9864ce0ec8 Switch `#!` to `#:` 2024-02-05 23:14:19 +01:00
Tony Garnock-Jones 19b1e84e43 Update deps 2024-02-03 15:25:51 +01:00
Tony Garnock-Jones 3649cc1237 Release independent packages
syndicate@0.33.0
syndicate-macros@0.26.0
syndicate-schema-plugin@0.3.0
syndicate-server@0.38.0
syndicate-tools@0.12.0

Generated by cargo-workspaces
2024-02-03 15:24:55 +01:00
Tony Garnock-Jones 0f2d9239f9 Remove now-retired Float references 2024-02-03 15:24:28 +01:00
Tony Garnock-Jones 0514f11d0f Merge latest changes from the syndicate-protocols repository 2024-02-03 15:17:56 +01:00
Tony Garnock-Jones 12428bbdf6 Switch to Preserves 0.993 2024-02-03 15:17:14 +01:00
Tony Garnock-Jones 5dd68e87c1 Preserves 0.993 lacks float 2024-02-03 15:16:23 +01:00
Tony Garnock-Jones e2a32b891d Release independent packages
syndicate@0.32.0
syndicate-macros@0.25.1
syndicate-schema-plugin@0.2.2
syndicate-server@0.37.0
syndicate-tools@0.11.0

Generated by cargo-workspaces
2024-02-03 15:03:21 +01:00
Tony Garnock-Jones 461ac034f8 Avoid double-execution within a round; see syndicate-lang/syndicate-js#3 2023-12-19 23:12:13 +13:00
Tony Garnock-Jones 19cbceda7a Merge latest changes from the syndicate-protocols repository 2023-12-19 21:38:12 +13:00
Tony Garnock-Jones 97876335ba Save a few bytes on the wire. If not now, never I guess 2023-12-19 21:37:41 +13:00
Tony Garnock-Jones d7b330e6dd stdenv.prs 2023-12-04 22:25:40 +01:00
Tony Garnock-Jones 3cbe17790d Release independent packages
syndicate-server@0.36.1

Generated by cargo-workspaces
2023-11-26 00:27:58 +01:00
Tony Garnock-Jones 1d97ed1b55 Retract request assertions for completed HTTP requests 2023-11-26 00:27:45 +01:00
Tony Garnock-Jones 15914aa153 Another way to do it 2023-11-24 14:38:25 +01:00
Tony Garnock-Jones 4f42bbe7b6 Bump deps (specifically preserves) 2023-11-24 14:26:47 +01:00
Tony Garnock-Jones 9c32a4a4b8 Release independent packages
syndicate@0.31.1
syndicate-schema-plugin@0.2.1
syndicate-server@0.36.0
syndicate-tools@0.10.0

Generated by cargo-workspaces
2023-11-24 14:05:05 +01:00
Tony Garnock-Jones 56f04786ab New gatekeeper internal-service, for partitioning access 2023-11-24 14:04:33 +01:00
Tony Garnock-Jones 545e247c21 Add `--caveat` option to `syndicate-macaroon mint` 2023-11-24 13:23:20 +01:00
Tony Garnock-Jones 06f16d42ec Bump preserves-schema dep 2023-11-18 16:29:25 +01:00
Tony Garnock-Jones fe861e516f Release independent packages
syndicate-server@0.35.2

Generated by cargo-workspaces
2023-11-17 12:55:38 +01:00
Tony Garnock-Jones 13c841ce6e Don't enable HTTP from the command-line -p flag. Closes #3. 2023-11-17 12:55:04 +01:00
Tony Garnock-Jones 9ae1be6f56 Further tweak logging 2023-11-17 12:53:49 +01:00
Tony Garnock-Jones 9786bcb285 Release independent packages
syndicate-server@0.35.1

Generated by cargo-workspaces
2023-11-17 12:50:32 +01:00
Tony Garnock-Jones abb2978b9a Clean up logging 2023-11-17 12:50:17 +01:00
Tony Garnock-Jones b1e20ac706 Update README instructions 2023-11-15 21:06:19 +01:00
Tony Garnock-Jones 34b59cff3b Mention exposed port in Dockerfile 2023-11-15 17:52:16 +01:00
Tony Garnock-Jones d514a5178f Release independent packages
syndicate@0.31.0
syndicate-macros@0.25.0
syndicate-schema-plugin@0.2.0
syndicate-server@0.35.0
syndicate-tools@0.9.0

Generated by cargo-workspaces
2023-11-15 12:07:44 +01:00
Tony Garnock-Jones e88c335735 Bump version 2023-11-15 12:06:03 +01:00
Tony Garnock-Jones a38765affa Static file service 2023-11-14 00:56:10 +01:00
Tony Garnock-Jones 65dae05890 Multiplex regular HTTP on existing TCP/WebSocket connections 2023-11-13 21:52:27 +01:00
Tony Garnock-Jones 090ac8780f Add "KeepAlive" for when a driver is still getting ready to expose an Entity but hasn't done so yet. 2023-11-12 10:14:54 +01:00
Tony Garnock-Jones bbaacd3038 Cargo.lock 2023-11-11 01:36:26 +01:00
Tony Garnock-Jones 1d61ea0c8e Generic pattern_plugin implementation 2023-11-10 23:19:22 +01:00
Tony Garnock-Jones 1e9e60207b Release independent packages
syndicate@0.30.0
syndicate-macros@0.25.0
syndicate-schema-plugin@0.1.0
syndicate-server@0.34.1

Generated by cargo-workspaces
2023-11-10 22:55:47 +01:00
Tony Garnock-Jones 702057023d Split out syndicate-schema-plugin 2023-11-10 22:54:29 +01:00
Tony Garnock-Jones 1f7930d31a ring.rs 2023-11-08 19:30:26 +01:00
Tony Garnock-Jones 764fb3b866 Remove (trivial) unnecessary clone 2023-11-07 00:40:43 +01:00
Tony Garnock-Jones 726265132f Small initial capacity 2023-11-07 00:11:59 +01:00
Tony Garnock-Jones f6b6dd25f1 Small performance win from avoiding use of HashMap in single-receiver case 2023-11-06 23:54:59 +01:00
Tony Garnock-Jones 94c7de2a08 Bump deps 2023-11-01 00:20:50 +01:00
Tony Garnock-Jones e4c2634088 Release independent packages
syndicate@0.30.0
syndicate-macros@0.25.0
syndicate-server@0.34.0
syndicate-tools@0.9.0

Generated by cargo-workspaces
2023-10-31 22:58:28 +01:00
Tony Garnock-Jones cbaeba7bba Update for Preserves 0.991 2023-10-31 22:58:02 +01:00
Tony Garnock-Jones f8c76e9230 Merge latest changes from the syndicate-protocols repository 2023-10-31 22:54:40 +01:00
Tony Garnock-Jones fe9ceaf65c Update comment syntax for Preserves 0.991 2023-10-31 21:56:44 +01:00
Tony Garnock-Jones 60e6c6badf Avoid spurious "Invalid Preserves tag 0" message when server quits before sending anything 2023-10-19 12:40:38 +02:00
Tony Garnock-Jones 2bf2e29dc2 Release independent packages
syndicate@0.29.1
syndicate-server@0.33.2
syndicate-tools@0.8.1

Generated by cargo-workspaces
2023-10-18 22:51:15 +02:00
Tony Garnock-Jones 9a148ecfcc Good grief, I forgot to update the preserves crate versions 2023-10-18 22:50:54 +02:00
Tony Garnock-Jones 2104bc1ff0 Release independent packages
syndicate-server@0.33.1

Generated by cargo-workspaces
2023-10-18 14:22:50 +02:00
Tony Garnock-Jones 17a9c96342 Update protocols for preserves 0.990 2023-10-18 14:22:18 +02:00
Tony Garnock-Jones 3c4ba48624 Release independent packages
syndicate@0.29.0
syndicate-macros@0.24.0
syndicate-server@0.33.0
syndicate-tools@0.8.0

Generated by cargo-workspaces
2023-10-18 14:03:54 +02:00
Tony Garnock-Jones e063a3f84d Merge latest changes from the syndicate-protocols repository 2023-10-18 14:02:38 +02:00
Tony Garnock-Jones 72566ac223 Update for Preserves 0.990 2023-10-18 14:02:28 +02:00
Tony Garnock-Jones 4e30ef48dc Add syndicate-tools to fixtags.sh 2023-10-05 10:01:09 +02:00
Tony Garnock-Jones d66840bae7 Update internal dependencies 2023-10-05 09:59:31 +02:00
Tony Garnock-Jones 768fdd6448 Release independent packages
syndicate@0.28.3
syndicate-macros@0.23.2
syndicate-server@0.32.2
syndicate-tools@0.7.1

Generated by cargo-workspaces
2023-10-05 09:57:24 +02:00
Tony Garnock-Jones 8055895319 BUMP_ARGS 2023-10-05 09:56:39 +02:00
Tony Garnock-Jones a83999d6ed Build each docker image with a separate target directory, because it turns out they seem to pollute each other if they all share one! 2023-10-05 09:53:53 +02:00
Tony Garnock-Jones 1f7b7a02b1 Enable jemalloc feature for simple benchmarking 2023-10-05 09:53:27 +02:00
Tony Garnock-Jones 24b6217897 Make jemalloc optional 2023-10-05 09:47:22 +02:00
Tony Garnock-Jones d517fc4e92 Bump deps 2023-10-05 09:44:07 +02:00
Tony Garnock-Jones a0c40eadd0 Update lockfile 2023-10-05 08:01:55 +02:00
Tony Garnock-Jones fc420d1a86 Bump to pick up macro version bump 2023-10-04 23:24:12 +02:00
Tony Garnock-Jones f3e5652eee New release of syndicate-macros to pick up syn feature flag changes 2023-10-04 22:41:17 +02:00
Tony Garnock-Jones 538ad4244c Hmm the perf increase from mold may have been illusory 2023-10-04 22:00:01 +02:00
Tony Garnock-Jones 1cb2eba0e4 Release independent packages
syndicate-server@0.32.0

Generated by cargo-workspaces
2023-10-04 21:48:35 +02:00
Tony Garnock-Jones a9971fc35a Note about `mold` 2023-10-04 21:48:14 +02:00
Tony Garnock-Jones 8dead81cef 50% performance boost from jemalloc! 2023-10-04 21:28:47 +02:00
Tony Garnock-Jones 16681841a7 Bump version 2023-09-29 14:56:55 +02:00
Tony Garnock-Jones 97fdfe6136 noise mode for syndicate-macaroon 2023-09-29 14:56:35 +02:00
Tony Garnock-Jones c26b67f286 docker-compose.yml 2023-09-29 13:56:09 +02:00
Tony Garnock-Jones 65db64fce1 Update quickstart 2023-09-29 13:55:44 +02:00
Tony Garnock-Jones 0432f8a04a Multiarch docker builds 2023-09-29 13:54:05 +02:00
Tony Garnock-Jones dd69d5caaa A different workaround for https://github.com/dtolnay/proc-macro2/issues/402 2023-09-29 09:42:12 +02:00
Tony Garnock-Jones e6bc6d091f Bump dependencies 2023-09-27 23:31:51 +02:00
Tony Garnock-Jones 4c9505d28e Get the project building again 2023-09-27 23:28:06 +02:00
Tony Garnock-Jones a74cd19526 Remove apparently-useless drop() call 2023-05-26 13:52:31 +02:00
Tony Garnock-Jones 5f3558817e Workaround for rust-embedded/cross issue 598 is no longer required 2023-05-12 11:07:10 +02:00
Tony Garnock-Jones b4a3f743b5 Bump deps; enable extra-traits in syn for Debug impl for syn::Expr and syn::Type 2023-05-12 10:33:15 +02:00
Tony Garnock-Jones a340b127d7 Release independent packages
syndicate@0.28.2

Generated by cargo-workspaces
2023-02-11 21:53:28 +01:00
Tony Garnock-Jones 08486b4b1c Merge latest changes from the syndicate-protocols repository 2023-02-11 21:52:34 +01:00
Tony Garnock-Jones d8a139b23a Switch back to transport sequence representation 2023-02-11 21:49:49 +01:00
Tony Garnock-Jones 990f3fe4cb Release independent packages
syndicate@0.28.1

Generated by cargo-workspaces
2023-02-11 17:45:50 +01:00
Tony Garnock-Jones 3a3c3c0ee4 Merge latest changes from the syndicate-protocols repository 2023-02-11 17:44:34 +01:00
Tony Garnock-Jones 46fd2dec3b Set of any for transports in gatekeeper.Route 2023-02-11 17:43:42 +01:00
Tony Garnock-Jones 7d7b3135ba Release independent packages
syndicate@0.28.0
syndicate-macros@0.23.0
syndicate-server@0.31.0
syndicate-tools@0.6.0

Generated by cargo-workspaces
2023-02-10 16:44:38 +01:00
Tony Garnock-Jones 06d52c43da Merge latest changes from the syndicate-protocols repository 2023-02-09 23:07:58 +01:00
Tony Garnock-Jones 1ae2583414 Remove accidental self-qualification 2023-02-09 23:07:43 +01:00
Tony Garnock-Jones 4dca1b1615 More updates to gatekeeper protocol 2023-02-09 00:17:12 +01:00
Tony Garnock-Jones 45406c75ac Merge latest changes from the syndicate-protocols repository 2023-02-08 23:44:22 +01:00
Tony Garnock-Jones f3c9662607 Another small error 2023-02-08 23:43:51 +01:00
Tony Garnock-Jones f134d0227d Merge latest changes from the syndicate-protocols repository 2023-02-08 23:39:53 +01:00
Tony Garnock-Jones 82624d3007 Another small error 2023-02-08 23:39:42 +01:00
Tony Garnock-Jones 8de00045e6 Merge latest changes from the syndicate-protocols repository 2023-02-08 23:36:37 +01:00
Tony Garnock-Jones 8b690b9103 Repair minor error 2023-02-08 23:36:21 +01:00
Tony Garnock-Jones f8d1acfa3e Merge latest changes from the syndicate-protocols repository 2023-02-08 23:11:49 +01:00
Tony Garnock-Jones 5a52f243e5 Adjust steps in noise and sturdy 2023-02-08 23:11:05 +01:00
Tony Garnock-Jones 6224baa2b6 Avoid variable-arity steps 2023-02-08 23:04:42 +01:00
Tony Garnock-Jones 00c99d96df Simplify 2023-02-08 22:35:34 +01:00
Tony Garnock-Jones 6ec6bbaf41 Incorporate Step, Description 2023-02-08 22:27:41 +01:00
Tony Garnock-Jones ddc94bfa60 Merge latest changes from the syndicate-protocols repository 2023-02-08 22:12:01 +01:00
Tony Garnock-Jones 8619342e5e Refinements 2023-02-08 22:11:45 +01:00
Tony Garnock-Jones 5bcb268ff8 Adjust ResolvePath/TransportConnection/PathStep 2023-02-08 20:36:14 +01:00
Tony Garnock-Jones 7e8dcef0e2 Refactor gatekeeper implementation for new protocols. 2023-02-08 18:01:51 +01:00
Tony Garnock-Jones 9a5d452754 Merge latest changes from the syndicate-protocols repository 2023-02-08 17:47:01 +01:00
Tony Garnock-Jones 9cd2e6776c Refactor gatekeeper protocols. 2023-02-08 17:46:47 +01:00
Tony Garnock-Jones c0d4b535a3 Merge latest changes from the syndicate-protocols repository 2023-02-08 14:35:19 +01:00
Tony Garnock-Jones 3c1cb11779 Allow override of PROTOCOLS_BRANCH 2023-02-08 14:35:15 +01:00
Tony Garnock-Jones a086c1d721 Repair typo 2023-02-07 13:18:18 +01:00
Tony Garnock-Jones bc41182533 Another small repair 2023-02-07 13:11:14 +01:00
Tony Garnock-Jones 2ad99b56b8 Be more precise about HMAC-BLAKE2s-256 and the key length 2023-02-07 12:44:47 +01:00
Tony Garnock-Jones a2013287db Release independent packages
syndicate@0.27.0
syndicate-macros@0.22.0
syndicate-server@0.30.0
syndicate-tools@0.5.0

Generated by cargo-workspaces
2023-02-06 18:15:03 +01:00
Tony Garnock-Jones 7de2752068 Switch to HMAC-BLAKE2s 2023-02-06 17:09:17 +01:00
Tony Garnock-Jones d2c783927c Merge latest changes from the syndicate-protocols repository 2023-02-06 16:31:50 +01:00
Tony Garnock-Jones f6b88ee3fb Switch to HMAC-BLAKE2s 2023-02-06 16:19:03 +01:00
Tony Garnock-Jones ee8a23aa2e Switch from milliseconds to seconds. Fixes #1 2023-02-06 15:36:17 +01:00
Tony Garnock-Jones 833be7b293 Update attenuations 2023-02-06 14:48:18 +01:00
Tony Garnock-Jones 12eaeb8f62 Merge latest changes from the syndicate-protocols repository 2023-02-06 13:35:51 +01:00
Tony Garnock-Jones 5cd0335a79 Argh, previous commit won't work 2023-02-06 11:06:02 +01:00
Tony Garnock-Jones b52da09081 More usable (?) rewrite language 2023-02-06 10:58:16 +01:00
Tony Garnock-Jones 9ca618268e Simplify attenuations 2023-02-06 10:45:41 +01:00
Tony Garnock-Jones 1879c52963 Merge latest changes from the syndicate-protocols repository 2023-02-04 17:09:55 +01:00
Tony Garnock-Jones 9f1f76d0ca Remove racketEvent.prs 2023-02-04 16:30:27 +01:00
Tony Garnock-Jones f4078aabaa Update binary bundle 2023-02-04 13:46:49 +01:00
Tony Garnock-Jones 557a36756f First step of cleanup of protocols 2023-02-04 13:46:34 +01:00
Tony Garnock-Jones 9f88765cf7 Release independent packages
syndicate@0.26.2
syndicate-server@0.29.2

Generated by cargo-workspaces
2023-01-31 15:12:12 +01:00
Tony Garnock-Jones 2a11bc6bbb Use bundled bundle, rather than external file, which isn't found in published crate build 2023-01-31 15:11:50 +01:00
Tony Garnock-Jones 1dac3e5a19 Release independent packages
syndicate@0.26.1
syndicate-server@0.29.1

Generated by cargo-workspaces
2023-01-31 14:21:29 +01:00
Tony Garnock-Jones 2382157039 Oops. Wrong dep on preserves-schema 2023-01-31 14:21:18 +01:00
Tony Garnock-Jones 69c526436f Release independent packages
syndicate@0.26.0
syndicate-macros@0.21.0
syndicate-server@0.29.0
syndicate-tools@0.4.0

Generated by cargo-workspaces
2023-01-31 14:13:06 +01:00
Tony Garnock-Jones 9761e68bd0 Bump 2023-01-31 14:10:57 +01:00
Tony Garnock-Jones 4becf23caa Switch from snow to noise-protocol; Noise responder implementation 2023-01-30 17:30:44 +01:00
Tony Garnock-Jones 94040ae566 More ergonomic guard api 2023-01-30 17:29:25 +01:00
Tony Garnock-Jones c3571a2faf Expose a more flexible interface to relays 2023-01-30 17:28:20 +01:00
Tony Garnock-Jones dbbbc8c1c6 Breaking change: much improved error API 2023-01-30 14:25:58 +01:00
Tony Garnock-Jones 3dea29ffe4 Repair macro for syndicate patterns involving dicts and seqs 2023-01-30 09:38:43 +01:00
Tony Garnock-Jones f3424c160d Groundwork for handling noise connects 2023-01-28 22:45:48 +01:00
Tony Garnock-Jones 049ef9aea7 Merge latest changes from the syndicate-protocols repository 2023-01-27 12:52:58 +01:00
Tony Garnock-Jones 07a5f688be Repair binary bundle 2023-01-27 12:52:07 +01:00
Tony Garnock-Jones 48c61098c4 Merge latest changes from the syndicate-protocols repository 2023-01-27 12:49:17 +01:00
Tony Garnock-Jones fff84d4c2a Update noise mapping 2023-01-27 12:45:02 +01:00
Tony Garnock-Jones bc62cab348 Bump deps 2023-01-27 09:42:41 +01:00
Tony Garnock-Jones 5983cd01f1 Another note re noise 2023-01-23 13:08:12 +01:00
Tony Garnock-Jones e8881f5980 Now I have actually implemented Noise, revise the schema 2023-01-19 12:18:58 +01:00
Tony Garnock-Jones 40b4681a6e Ugh, xsalsa20poly1305 as an AEAD isn't a thing 2023-01-16 16:21:12 +01:00
Tony Garnock-Jones 0f5e033174 noise 2023-01-16 15:52:46 +01:00
Tony Garnock-Jones aae53b5525 Update precompiled form 2023-01-16 15:51:57 +01:00
Tony Garnock-Jones fce32a589c Release independent packages
syndicate@0.25.0
syndicate-macros@0.20.0
syndicate-server@0.28.0
syndicate-tools@0.3.0

Generated by cargo-workspaces
2023-01-16 15:05:48 +01:00
Tony Garnock-Jones bae21fb69b Update deps; in particular, get preserves 3.0, which has the fixed numerics/symbols syntax 2023-01-16 15:03:35 +01:00
Tony Garnock-Jones 25ef92f78e Include syndicate package version in syndicate-server version display 2023-01-09 09:30:46 +01:00
Tony Garnock-Jones 2f6f1dde26 Release independent packages
syndicate@0.24.3

Generated by cargo-workspaces
2023-01-09 09:21:13 +01:00
Tony Garnock-Jones b5564979f0 Repair error in sync handling 2023-01-09 09:20:58 +01:00
Tony Garnock-Jones 5ca6bdb3bb Release independent packages
syndicate@0.24.2

Generated by cargo-workspaces
2023-01-08 13:19:21 +01:00
Tony Garnock-Jones 11b5a187b9 Fix tag format template 2023-01-08 13:19:06 +01:00
Tony Garnock-Jones 1cb89f0b6b Pick up preserves bugfix around schematized embedded-ref deserialization 2023-01-08 13:17:46 +01:00
Tony Garnock-Jones 4c03646567 HTTP 2022-12-13 18:08:34 +13:00
Tony Garnock-Jones 90940b3c3d Bump preserves version 2022-10-26 16:03:30 +02:00
Tony Garnock-Jones eb2bd3cf8e Release independent packages
syndicate@0.24.1
syndicate-macros@0.19.1
syndicate-server@0.27.1
syndicate-tools@0.2.1

Generated by cargo-workspaces
2022-10-26 13:46:28 +02:00
Tony Garnock-Jones 451a298f94 Oops, want independent versioning 2022-10-26 13:45:48 +02:00
Tony Garnock-Jones 181523d05c Redo using clap derive instead of builder 2022-10-26 13:44:31 +02:00
Tony Garnock-Jones 4ce2093e52 Bump deps (specifically to get preserves hex bugfix) 2022-10-26 13:42:44 +02:00
Tony Garnock-Jones 2f3b186262 Switch to cargo-workspaces 2022-10-26 13:41:46 +02:00
Tony Garnock-Jones e21485c44d (cargo-release) version 0.2.0 2022-10-24 15:14:07 +02:00
Tony Garnock-Jones 86347412e7 (cargo-release) version 0.19.0 2022-10-24 15:14:07 +02:00
Tony Garnock-Jones 2d46d87f58 (cargo-release) version 0.27.0 2022-10-24 15:14:07 +02:00
Tony Garnock-Jones 54103f87eb (cargo-release) version 0.24.0 2022-10-24 15:14:06 +02:00
Tony Garnock-Jones 4a6bb3e143 Bump preserves-schema 2022-10-24 15:10:37 +02:00
Tony Garnock-Jones cdfe157fd9 Cargo update 2022-10-18 20:54:51 +02:00
Tony Garnock-Jones fbfafc1d1d (cargo-release) version 0.1.0 2022-10-18 14:14:30 +02:00
Tony Garnock-Jones e1eb7ae3dd Prepare for syndicate-tools v0.1.0 release 2022-10-18 14:14:10 +02:00
Tony Garnock-Jones f2be0d5e62 Cosmetic: remove unwanted comment 2022-10-18 14:06:07 +02:00
Tony Garnock-Jones fc930059d3 syndicate-macaroon 2022-10-18 14:05:12 +02:00
Tony Garnock-Jones bcaf08c602 (cargo-release) version 0.26.0 2022-07-22 18:14:08 +02:00
Tony Garnock-Jones 9293bd3904 (cargo-release) version 0.25.0 2022-07-22 18:13:24 +02:00
Tony Garnock-Jones bf1552d9a8 Use busybox as base rather than a completely empty image, for convenience 2022-05-25 11:02:33 +02:00
Tony Garnock-Jones a7ec157437 Update docker scripting 2022-05-24 17:00:02 +02:00
Tony Garnock-Jones ccfcf6ec26 Docker syndicate-server 2022-05-24 16:51:54 +02:00
Tony Garnock-Jones af679531b4 Bump deps for a ~1% speed boost from tracing 0.1.32 2022-03-09 19:20:39 +01:00
Tony Garnock-Jones ec8ba36d6a Add `stringify` quasi-function 2022-03-01 10:02:30 +01:00
Tony Garnock-Jones ec453b7db7 (cargo-release) version 0.24.0 2022-02-06 23:03:51 +01:00
Tony Garnock-Jones efb76bfe91 Add "never" restart policy 2022-02-06 23:03:21 +01:00
Tony Garnock-Jones fb31ea44cf fixtags.sh 2022-02-04 17:06:18 +01:00
Tony Garnock-Jones d75bfe4e35 (cargo-release) version 0.18.0 2022-02-04 17:00:18 +01:00
Tony Garnock-Jones 393514fb3a (cargo-release) version 0.23.0 2022-02-04 17:00:18 +01:00
Tony Garnock-Jones 406f22703b (cargo-release) version 0.23.0 2022-02-04 17:00:18 +01:00
Tony Garnock-Jones 4f0145e161 Sort directory entries in config scan 2022-02-04 16:59:29 +01:00
Tony Garnock-Jones b09fbdceec Remove hardcoded milestones and system-layer notions 2022-02-04 16:00:15 +01:00
Tony Garnock-Jones b556414fec Merge latest changes from the syndicate-protocols repository 2022-02-04 14:27:02 +01:00
Tony Garnock-Jones ca92d99c52 Remove notion of "system-layer-service" from core protocols 2022-02-04 14:26:50 +01:00
Tony Garnock-Jones 98c76df2f7 Repair accidentally-committed reference to local path (!) 2022-02-04 14:15:28 +01:00
Tony Garnock-Jones 0a0d977a48 Bump deps 2022-02-04 14:13:08 +01:00
Tony Garnock-Jones 8a0675d8ee (cargo-release) version 0.22.0 2022-02-04 14:02:10 +01:00
Tony Garnock-Jones af2578f887 (cargo-release) version 0.17.0 2022-02-04 14:02:10 +01:00
Tony Garnock-Jones 84ebf530d3 (cargo-release) version 0.22.0 2022-02-04 14:02:10 +01:00
Tony Garnock-Jones f88592282d MAJOR REFACTORING OF CORE ASSERTION-TRACKING STRUCTURES. Little impact on API. Read on for details.
2022-02-01 15:22:30 Two problems.

 - If a stop action panics (in `_terminate_facet`), the Facet is dropped before its outbound
   handles are removed. With the code as it stands, this leaks assertions (!!).

 - The logic for removing an outbound handle seems to be running in the wrong facet context???
   (See `f.outbound_handles.remove(&handle)` in the cleanup actions
    - I think I need to remove the for_myself mechanism
    - and add some callbacks to run only on successful commit

2022-02-02 12:12:33 This is hard.

Here's the current implementation:

 - assert
    - inserts into outbound_handles of active facet
    - adds cleanup action describing how to do the retraction
    - enqueues the assert action, which
       - calls e.assert()

 - retract
    - looks up & removes the cleanup action, which
       - enqueues the retract action, which
          - removes from outbound_handles of the WRONG facet in the WRONG actor
          - calls e.retract()

 - _terminate_facet
    - uses outbound_handles to retract the facet's assertions
    - doesn't directly touch cleanup actions, relying on retract to do that
    - if one of a facet's stop actions panics, will drop the facet, leaking its assertions
    - actually, even if a stop action yields `Err`, it will drop the facet and leak assertions
    - yikes

 - facet drop
    - panics if outbound_handles is nonempty

 - actor cleanup
    - relies on facet tree to find assertions to retract

Revised plan:

 - ✓ revise Activation/PendingEvents structures
    - rename `cleanup_actions` to `outbound_assertions`
    - remove `for_myself` queues and `final_actions`
    - add `pre_commit_actions`, `rollback_actions` and `commit_actions`

 - ✓ assert
    - as before
    - but on rollback, removes from `outbound_handles` (if the facet still exists) and
      `outbound_assertions` (always)
    - marks the new assertion as "established" on commit

 - ✓ retract
    - lookup in `outbound_assertions` by handle, using presence as indication it hasn't been
      scheduled in this turn
    - on rollback, put it back in `outbound_assertions` ONLY IF IT IS MARKED ESTABLISHED -
      otherwise it is a retraction of an `assert` that has *also* been rolled back in this turn
    - on commit, remove it from `outbound_handles`
    - enqueue the retract action, which just calls e.retract()

 - ✓ _terminate_facet
    - revised quite a bit now we rely on `RunningActor::cleanup` to use `outbound_assertions`
      rather than the facet tree.
    - still drops Facets on panic, but this is now mostly harmless (reorders retractions a bit)
    - handles `Err` from a stop action more gracefully
    - slightly cleverer tracking of what needs doing based on a `TerminationDirection`
    - now ONLY applies to ORDERLY cleanup of the facet tree. Disorderly cleanup ignores the
      facet tree and just retracts the assertions willy-nilly.

 - ✓ facet drop
    - warn if outbound_handles is nonempty, but don't do anything about it

 - ✓ actor cleanup
    - doesn't use the facet tree at all.
    - cleanly shutting down is done elsewhere
    - uses the remaining entries in `outbound_assertions` (previously `cleanup_actions`) to
      deal with retractions for dropped facets as well as any other facets that haven't been
      cleanly shut down

 - ✓ activate
    - now has a panic_guard::PanicGuard RAII for conveying a crash to an actor in case the
      activation is happening from a linked task or another thread (this wasn't the case in the
      examples that provoked this work, though)
    - simplified
    - explicit commit/rollback decision

 - ✓ Actor::run
    - no longer uses the same path for crash-termination and success-termination
    - instead, for success-termination, takes a turn that calls Activation::stop_root
       - this cleans up the facet tree using _terminate_facet
       - when the turn ends, it notices that the root facet is gone and shuts down the actor
       - so in principle there will be nothing for actor cleanup to do

2022-02-04 13:52:34 This took days. :-(
2022-02-04 13:59:37 +01:00
Tony Garnock-Jones 98731ba968 Merge latest changes from the syndicate-protocols repository 2022-02-03 22:57:58 +01:00
Tony Garnock-Jones d820601eea Better trace messages from dependency tracking 2022-02-03 22:57:21 +01:00
Tony Garnock-Jones 28b0c5b4d5 One-shot daemons shouldn't be considered ready at all, just complete 2022-02-03 22:56:20 +01:00
Tony Garnock-Jones 19c96bdef2 Allow userDefined states 2022-02-03 22:55:06 +01:00
Tony Garnock-Jones 99a027dc26 Remove unwanted commented-out code 2022-02-03 15:59:19 +01:00
Tony Garnock-Jones 9add501124 Remove the (no-op) rollback entirely 2022-02-02 12:21:43 +01:00
Tony Garnock-Jones 38a5279827 Include facet ID in panic message when nonempty outbound_handles at drop time 2022-02-02 12:10:33 +01:00
Tony Garnock-Jones 1244e416d0 clear/deliver -> rollback/commit, and don't commit on drop 2022-02-02 12:10:13 +01:00
Tony Garnock-Jones d7a847de37 Refactor with_facet 2022-02-02 11:52:13 +01:00
Tony Garnock-Jones 4ea07cdd6b Further simplify supervision protocols 2022-01-26 23:37:43 +01:00
Tony Garnock-Jones 70c442ad47 Use a named unit struct instead of () 2022-01-26 23:37:21 +01:00
Tony Garnock-Jones 7e4654c8f7 Simplify and repair stdout/stderr logging in daemons 2022-01-26 23:37:04 +01:00
Tony Garnock-Jones 1111776754 Eliminate need for awkward boot_fn transmission subprotocol 2022-01-26 22:30:47 +01:00
Tony Garnock-Jones cc11120f23 Avoid erasing information immediately prior to it being needed (!) (when we can) 2022-01-26 22:12:45 +01:00
Tony Garnock-Jones e600d59f6e Conditional match expressions. I can't help but feel I'm committing some kind of crime against programming language design here. 2022-01-20 10:17:15 +01:00
Tony Garnock-Jones 9080dc6f1e Fill in the rest of the jolly owl 2022-01-20 10:12:04 +01:00
Tony Garnock-Jones a9f83e0a9d Merge latest changes from the syndicate-protocols repository 2022-01-20 10:12:00 +01:00
Tony Garnock-Jones ab34b62cf1 Refine the trace protocol a bit 2022-01-20 09:40:53 +01:00
Tony Garnock-Jones 4dc613a091 Foundations for causal tracing 2022-01-19 14:40:50 +01:00
Tony Garnock-Jones f7a5edff39 Merge latest changes from the syndicate-protocols repository 2022-01-19 14:36:09 +01:00
Tony Garnock-Jones 5a65256cf3 Syndicate traces 2022-01-19 14:24:21 +01:00
Tony Garnock-Jones 650463ff20 Accommodate extension point 2022-01-17 00:32:16 +01:00
Tony Garnock-Jones c951cea508 Merge latest changes from the syndicate-protocols repository 2022-01-17 00:26:10 +01:00
Tony Garnock-Jones 257c604e2b Repair bad record pattern 2022-01-17 00:22:10 +01:00
Tony Garnock-Jones a06d532006 Extension point. Closes #2 2022-01-16 21:17:36 +01:00
Tony Garnock-Jones 45f9abfd97 (cargo-release) version 0.21.0 2022-01-16 15:15:51 +01:00
Tony Garnock-Jones 894f0a648a (cargo-release) version 0.16.0 2022-01-16 15:15:51 +01:00
Tony Garnock-Jones e6a2a25f62 (cargo-release) version 0.21.0 2022-01-16 15:15:51 +01:00
Tony Garnock-Jones 3d3c1ebf70 Better handling of activation after termination, which repairs a scary-looking-but-harmless panic in config_watcher's private thread 2022-01-16 00:02:33 +01:00
97 changed files with 6730 additions and 2813 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
/target /target
**/*.rs.bk **/*.rs.bk
localdev/
scratch/ scratch/

1709
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,9 @@ cargo-features = ["strip"]
members = [ members = [
"syndicate", "syndicate",
"syndicate-macros", "syndicate-macros",
"syndicate-schema-plugin",
"syndicate-server", "syndicate-server",
"syndicate-tools",
] ]
# [patch.crates-io] # [patch.crates-io]
@ -24,3 +26,9 @@ strip = true
[profile.bench] [profile.bench]
debug = true debug = true
# [patch.crates-io]
# # Unfortunately, until [1] is fixed (perhaps via [2]), we have to use a patched proc-macro2.
# # [1]: https://github.com/dtolnay/proc-macro2/issues/402
# # [2]: https://github.com/dtolnay/proc-macro2/pull/407
# proc-macro2 = { git = "https://github.com/tonyg/proc-macro2", branch = "repair_span_start_end" }

View File

@ -11,33 +11,28 @@ test:
test-all: test-all:
cargo test --all-targets cargo test --all-targets
# Try ws-bump:
# cargo workspaces version \
# make release-minor --no-global-tag \
# --individual-tag-prefix '%n-v' \
# to check things, and --allow-branch 'main' \
# $(BUMP_ARGS)
# make release-minor RELEASE_DRY_RUN=
#
# to do things for real.
RELEASE_DRY_RUN=--dry-run ws-publish:
release-%: cargo workspaces publish \
PUBLISH_GRACE_SLEEP=15 cargo release \ --from-git
$(RELEASE_DRY_RUN) \
-vv --no-dev-version --exclude-unchanged \
$*
PROTOCOLS_BRANCH=main
pull-protocols: pull-protocols:
git subtree pull -P syndicate/protocols \ git subtree pull -P syndicate/protocols \
-m 'Merge latest changes from the syndicate-protocols repository' \ -m 'Merge latest changes from the syndicate-protocols repository' \
git@git.syndicate-lang.org:syndicate-lang/syndicate-protocols \ git@git.syndicate-lang.org:syndicate-lang/syndicate-protocols \
main $(PROTOCOLS_BRANCH)
static: static-x86_64 static: static-x86_64
static-%: static-%:
cross build --target $*-unknown-linux-musl --features vendored-openssl CARGO_TARGET_DIR=target/target.$* cross build --target $*-unknown-linux-musl --features vendored-openssl,jemalloc
########################################################################### ###########################################################################
@ -59,28 +54,27 @@ static-%:
x86_64-binary: x86_64-binary-release x86_64-binary: x86_64-binary-release
x86_64-binary-release: x86_64-binary-release:
cross build --target x86_64-unknown-linux-musl --release --all-targets --features vendored-openssl CARGO_TARGET_DIR=target/target.x86_64 cross build --target x86_64-unknown-linux-musl --release --all-targets --features vendored-openssl,jemalloc
x86_64-binary-debug: x86_64-binary-debug:
cross build --target x86_64-unknown-linux-musl --all-targets --features vendored-openssl CARGO_TARGET_DIR=target/target.x86_64 cross build --target x86_64-unknown-linux-musl --all-targets --features vendored-openssl
armv7-binary: armv7-binary-release armv7-binary: armv7-binary-release
armv7-binary-release: armv7-binary-release:
cross build --target=armv7-unknown-linux-musleabihf --release --all-targets --features vendored-openssl CARGO_TARGET_DIR=target/target.armv7 cross build --target=armv7-unknown-linux-musleabihf --release --all-targets --features vendored-openssl
armv7-binary-debug: armv7-binary-debug:
cross build --target=armv7-unknown-linux-musleabihf --all-targets --features vendored-openssl CARGO_TARGET_DIR=target/target.armv7 cross build --target=armv7-unknown-linux-musleabihf --all-targets --features vendored-openssl
# Hack to workaround https://github.com/rust-embedded/cross/issues/598 # As of 2023-05-12 (and probably earlier!) this is no longer required with current Rust nightlies
HACK_WORKAROUND_ISSUE_598=CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS="-C link-arg=/usr/local/aarch64-linux-musl/lib/libc.a" # # Hack to workaround https://github.com/rust-embedded/cross/issues/598
# HACK_WORKAROUND_ISSUE_598=CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS="-C link-arg=/usr/local/aarch64-linux-musl/lib/libc.a"
aarch64-binary: aarch64-binary-release aarch64-binary: aarch64-binary-release
aarch64-binary-release: aarch64-binary-release:
$(HACK_WORKAROUND_ISSUE_598) \ CARGO_TARGET_DIR=target/target.aarch64 cross build --target=aarch64-unknown-linux-musl --release --all-targets --features vendored-openssl,jemalloc
cross build --target=aarch64-unknown-linux-musl --release --all-targets --features vendored-openssl
aarch64-binary-debug: aarch64-binary-debug:
$(HACK_WORKAROUND_ISSUE_598) \ CARGO_TARGET_DIR=target/target.aarch64 cross build --target=aarch64-unknown-linux-musl --all-targets --features vendored-openssl
cross build --target=aarch64-unknown-linux-musl --all-targets --features vendored-openssl

View File

@ -23,16 +23,30 @@ A Rust implementation of:
## Quickstart ## Quickstart
From docker or podman:
docker run -it --rm leastfixedpoint/syndicate-server /syndicate-server -p 8001
Build and run from source:
git clone https://git.syndicate-lang.org/syndicate-lang/syndicate-rs git clone https://git.syndicate-lang.org/syndicate-lang/syndicate-rs
cd syndicate-rs cd syndicate-rs
cargo build --release cargo build --release
./target/release/syndicate-server -p 8001 ./target/release/syndicate-server -p 8001
If you have [`mold`](https://github.com/rui314/mold) available (`apt install mold`), you may be
able to get faster linking by creating `.cargo/config.toml` as follows:
[build]
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
Enabling the `jemalloc` feature can get a *substantial* (~20%-50%) improvement in throughput.
## Running the examples ## Running the examples
In one window, start the server: In one window, start the server with a basic configuration:
./target/release/syndicate-server -p 8001 ./target/release/syndicate-server -c dev-scripts/benchmark-config.pr
Then, choose one of the examples below. Then, choose one of the examples below.
@ -70,7 +84,7 @@ about who kicks off the pingpong session.
You may find better performance by restricting the server to fewer You may find better performance by restricting the server to fewer
cores than you have available. For example, for me, running cores than you have available. For example, for me, running
taskset -c 0,1 ./target/release/syndicate-server -p 8001 taskset -c 0,1 ./target/release/syndicate-server -c dev-scripts/benchmark-config.pr
roughly *quadruples* throughput for a single producer/consumer pair, roughly *doubles* throughput for a single producer/consumer pair,
on my 48-core AMD CPU. on my 48-core AMD CPU.

View File

@ -1,3 +1,3 @@
let ?root_ds = dataspace let ?root_ds = dataspace
<require-service <relay-listener <tcp "0.0.0.0" 8001> $gatekeeper>> <require-service <relay-listener <tcp "0.0.0.0" 9001> $gatekeeper>>
<bind "syndicate" #x"" $root_ds> <bind <ref { oid: "syndicate" key: #x"" }> $root_ds #f>

View File

@ -1,2 +1,7 @@
#!/bin/sh #!/bin/sh
make -C ../syndicate-server binary && exec taskset -c 0,1 ../target/release/syndicate-server -c benchmark-config.pr "$@" TASKSET='taskset -c 0,1'
if [ $(uname -s) = 'Darwin' ]
then
TASKSET=
fi
make -C ../syndicate-server binary && exec $TASKSET ../target/release/syndicate-server -c benchmark-config.pr "$@"

1
docker/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
syndicate-server.*

6
docker/Dockerfile Normal file
View File

@ -0,0 +1,6 @@
FROM busybox
RUN mkdir /data
ARG TARGETARCH
COPY ./syndicate-server.$TARGETARCH /syndicate-server
EXPOSE 1
CMD ["/syndicate-server", "-c", "/data", "-p", "1"]

37
docker/Makefile Normal file
View File

@ -0,0 +1,37 @@
U=leastfixedpoint
I=syndicate-server
ARCHITECTURES:=amd64 arm arm64
SERVERS:=$(patsubst %,syndicate-server.%,$(ARCHITECTURES))
VERSION=$(shell ./syndicate-server.$(shell ./docker-architecture $$(uname -m)) --version | cut -d' ' -f2)
all:
.PHONY: all clean image push push-only
clean:
rm -f syndicate-server.*
-podman images -q $(U)/$(I) | sort -u | xargs podman rmi -f
image: $(SERVERS)
for A in $(ARCHITECTURES); do set -x; \
podman build --platform=linux/$$A \
-t $(U)/$(I):$(VERSION)-$$A \
-t $(U)/$(I):latest-$$A \
.; \
done
rm -f tmp.image
push: image push-only
push-only:
$(patsubst %,podman push $(U)/$(I):$(VERSION)-%;,$(ARCHITECTURES))
$(patsubst %,podman push $(U)/$(I):latest-%;,$(ARCHITECTURES))
podman rmi -f $(U)/$(I):$(VERSION) $(U)/$(I):latest
podman manifest create $(U)/$(I):$(VERSION) $(patsubst %,$(U)/$(I):$(VERSION)-%,$(ARCHITECTURES))
podman manifest create $(U)/$(I):latest $(patsubst %,$(U)/$(I):latest-%,$(ARCHITECTURES))
podman manifest push $(U)/$(I):$(VERSION)
podman manifest push $(U)/$(I):latest
syndicate-server.%:
make -C .. $$(./alpine-architecture $*)-binary-release
cp -a ../target/target.$$(./alpine-architecture $*)/$$(./alpine-architecture $*)-unknown-linux-musl*/release/syndicate-server $@

9
docker/README.md Normal file
View File

@ -0,0 +1,9 @@
# Docker images for syndicate-server
Build using podman:
apt install podman
and at least until the dependencies are fixed (?),
apt install uidmap slirp4netns

6
docker/alpine-architecture Executable file
View File

@ -0,0 +1,6 @@
#!/bin/sh
case $1 in
amd64) echo x86_64;;
arm) echo armv7;;
arm64) echo aarch64;;
esac

6
docker/docker-architecture Executable file
View File

@ -0,0 +1,6 @@
#!/bin/sh
case $1 in
x86_64) echo amd64;;
armv7) echo arm;;
aarch64) echo arm64;;
esac

View File

@ -0,0 +1,9 @@
version: "3"
services:
syndicate:
image: leastfixedpoint/syndicate-server
ports:
- "1:1"
volumes:
- "/etc/syndicate:/data"

12
fixtags.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/sh
buildtag() {
name=$(grep '^name' "$1" | head -1 | sed -e 's:^.*"\([^"]*\)":\1:')
version=$(grep '^version' "$1" | head -1 | sed -e 's:^.*"\([^"]*\)":\1:')
echo "$name-v$version"
}
git tag "$(buildtag syndicate/Cargo.toml)"
git tag "$(buildtag syndicate-macros/Cargo.toml)"
git tag "$(buildtag syndicate-server/Cargo.toml)"
git tag "$(buildtag syndicate-tools/Cargo.toml)"

152
gatekeeper-config.pr Normal file
View File

@ -0,0 +1,152 @@
# We will create a TCP listener on port 9222, which speaks unencrypted
# protocol and allows interaction with the default/system gatekeeper, which
# has a single noise binding for introducing encrypted interaction with a
# *second* gatekeeper, which finally allows resolution of references to
# other objects.
# First, build a space where we place bindings for the inner gatekeeper to
# expose.
let ?inner-bindings = dataspace
# Next, start the inner gatekeeper.
<require-service <gatekeeper $inner-bindings>>
? <service-object <gatekeeper $inner-bindings> ?inner-gatekeeper> [
# Expose it via a noise binding at the outer/system gatekeeper.
<bind <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
secretKey: #[qLkyuJw/K4yobr4XVKExbinDwEx9QTt9PfDWyx14/kg],
service: world }>
$inner-gatekeeper #f>
]
# Now, expose the outer gatekeeper to the world, via TCP. The system
# gatekeeper is a primordial syndicate-server object bound to $gatekeeper.
<require-service <relay-listener <tcp "0.0.0.0" 9222> $gatekeeper>>
# Finally, let's expose some behaviour accessible via the inner gatekeeper.
#
# We will create a service dataspace called $world.
let ?world = dataspace
# Running `syndicate-macaroon mint --oid a-service --phrase hello` yields:
#
# <ref {oid: a-service, sig: #[JTTGQeYCgohMXW/2S2XH8g]}>
#
# That's a root capability for the service. We use the corresponding
# sturdy.SturdyDescriptionDetail to bind it to $world.
#
$inner-bindings += <bind <ref {oid: a-service, key: #"hello"}>
$world #f>
# Now, we can hand out paths to our services involving an initial noise
# step and a subsequent sturdyref/macaroon step.
#
# For example, running `syndicate-macaroon` like this:
#
# syndicate-macaroon mint --oid a-service --phrase hello \
# --caveat '<rewrite <bind <_>> <rec labelled [<lit "alice"> <ref 0>]>>'
#
# generates
#
# <ref {caveats: [<rewrite <bind <_>> <rec labelled [<lit "alice">, <ref 0>]>>],
# oid: a-service,
# sig: #[CXn7+rAoO3Xr6Y6Laap3OA]}>
#
# which is an attenuation of the root capability we bound that wraps all
# assertions and messages in a `<labelled "alice" _>` wrapper.
#
# All together, the `gatekeeper.Route` that Alice would use would be
# something like:
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { caveats: [<rewrite <bind <_>> <rec labelled [<lit "alice">, <ref 0>]>>],
# oid: a-service,
# sig: #[CXn7+rAoO3Xr6Y6Laap3OA] }>>
#
# Here's one for "bob":
#
# syndicate-macaroon mint --oid a-service --phrase hello \
# --caveat '<rewrite <bind <_>> <rec labelled [<lit "bob"> <ref 0>]>>'
#
# <ref {caveats: [<rewrite <bind <_>> <rec labelled [<lit "bob">, <ref 0>]>>],
# oid: a-service,
# sig: #[/75BbF77LOiqNcvpzNHf0g]}>
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { caveats: [<rewrite <bind <_>> <rec labelled [<lit "bob">, <ref 0>]>>],
# oid: a-service,
# sig: #[/75BbF77LOiqNcvpzNHf0g] }>>
#
# We relay labelled to unlabelled information, enacting a chat protocol
# that enforces usernames.
$world [
# Assertions of presence have the username wiped out and replaced with the label.
? <labelled ?who <Present _>> <Present $who>
# Likewise utterance messages.
?? <labelled ?who <Says _ ?what>> ! <Says $who $what>
# We allow anyone to subscribe to presence and utterances.
? <labelled _ <Observe <rec Present ?p> ?o>> <Observe <rec Present $p> $o>
? <labelled _ <Observe <rec Says ?p> ?o>> <Observe <rec Says $p> $o>
]
# We can also use sturdyref rewrites to directly handle `Says` and
# `Present` values, rather than wrapping with `<labelled ...>` and
# unwrapping using the script fragment just above.
#
# The multiply-quoted patterns in the `Observe` cases start to get unwieldy
# at this point!
#
# For Alice:
#
# syndicate-macaroon mint --oid a-service --phrase hello --caveat '<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "alice">]>>
# <rewrite <rec Says [<_> <bind String>]> <rec Says [<lit "alice"> <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present> <_>]> <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says> <_>]> <_>]>> <ref 0>>
# ]>'
#
# <ref { oid: a-service sig: #[s918Jk6As8AWJ9rtozOTlg] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "alice">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "alice">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { oid: a-service sig: #[s918Jk6As8AWJ9rtozOTlg] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "alice">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "alice">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>>
#
# For Bob:
#
# syndicate-macaroon mint --oid a-service --phrase hello --caveat '<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "bob">]>>
# <rewrite <rec Says [<_> <bind String>]> <rec Says [<lit "bob"> <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present> <_>]> <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says> <_>]> <_>]>> <ref 0>>
# ]>'
#
# <ref { oid: a-service sig: #[QBbV4LrS0i3BG6OyCPJl+A] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "bob">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "bob">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { oid: a-service sig: #[QBbV4LrS0i3BG6OyCPJl+A] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "bob">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "bob">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>>

65
http-config.pr Normal file
View File

@ -0,0 +1,65 @@
# We use $root_ds as the httpd space.
let ?root_ds = dataspace
# Supplying $root_ds as the last parameter in this relay-listener enables httpd service.
<require-service <relay-listener <tcp "0.0.0.0" 9001> $gatekeeper $root_ds>>
# Regular gatekeeper stuff works too.
<bind <ref { oid: "syndicate" key: #x"" }> $root_ds #f>
# Create an httpd router monitoring $root_ds for requests and bind requests.
<require-service <http-router $root_ds>>
# Create a static file server. When it gets a request, it ignores the first n (here, 1)
# elements of the path, and takes the remainder as relative to its configured directory (here,
# ".").
#
<require-service <http-static-files "." 1>>
#
# It publishes a service object: requests should be asserted to this.
# The http-bind record establishes this mapping.
#
? <service-object <http-static-files "." 1> ?handler> [
$root_ds += <http-bind #f 9001 get ["files" ...] $handler>
]
# Separately, bind path /d to $index, and respond there.
#
let ?index = dataspace
$root_ds += <http-bind #f 9001 get ["d"] $index>
$index ? <request _ ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <chunk "<!DOCTYPE html>">
$k ! <done "<html><body>D</body></html>">
]
# Similarly, bind three paths, /d, /e and /t to $index2
# Because /d doubles up, the httpd router gives a warning when it is accessed.
# Accessing /e works fine.
# Accessing /t results in wasted work because of the hijacking listeners below.
#
let ?index2 = dataspace
$root_ds += <http-bind #f 9001 get ["d"] $index2>
$root_ds += <http-bind #f 9001 get ["e"] $index2>
$root_ds += <http-bind #f 9001 get ["t"] $index2>
$index2 ? <request _ ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <chunk "<!DOCTYPE html>">
$k ! <done "<html><body>D2</body></html>">
]
# These two hijack /t by listening for raw incoming requests the same way the httpd router
# does. They respond quicker and so win the race. The httpd router's responses are lost.
#
$root_ds ? <request <http-request _ _ _ get ["t"] _ _ _> ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <done "<html><body>T</body></html>">
]
$root_ds ? <request <http-request _ _ _ get ["t"] _ _ _> ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <done "<html><body>T2</body></html>">
]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "syndicate-macros" name = "syndicate-macros"
version = "0.15.0" version = "0.32.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"] authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018" edition = "2018"
@ -13,12 +13,15 @@ license = "Apache-2.0"
proc-macro = true proc-macro = true
[dependencies] [dependencies]
syndicate = { path = "../syndicate", version = "^0.20.0"} syndicate = { path = "../syndicate", version = "0.40.0"}
proc-macro2 = { version = "^1.0", features = ["span-locations"] } proc-macro2 = { version = "^1.0", features = ["span-locations"] }
quote = "^1.0" quote = "^1.0"
syn = "^1.0" syn = { version = "^1.0", features = ["extra-traits"] } # for impl Debug for syn::Expr
[dev-dependencies] [dev-dependencies]
tokio = { version = "1.10", features = ["io-std"] } tokio = { version = "1.10", features = ["io-std"] }
tracing = "0.1" tracing = "0.1"
[package.metadata.workspaces]
independent = true

View File

@ -6,13 +6,13 @@ use syndicate::schemas::dataspace::Observe;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
Actor::new(None).boot(tracing::Span::current(), |t| { Actor::top(None, |t| {
let ds = Cap::new(&t.create(Dataspace::new())); let ds = Cap::new(&t.create(Dataspace::new(None)));
let _ = t.prevent_inert_check(); let _ = t.prevent_inert_check();
t.spawn(syndicate::name!("box"), enclose!((ds) move |t| { t.spawn(Some(AnyValue::symbol("box")), enclose!((ds) move |t| {
let current_value = t.named_field("current_value", 0u64); let current_value = t.named_field("current_value", 0u64);
t.dataflow({ t.dataflow({
@ -49,7 +49,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(()) Ok(())
})); }));
t.spawn(syndicate::name!("client"), enclose!((ds) move |t| { t.spawn(Some(AnyValue::symbol("client")), enclose!((ds) move |t| {
let box_state_handler = syndicate::entity(0u32) let box_state_handler = syndicate::entity(0u32)
.on_asserted(enclose!((ds) move |count, t, captures: AnyValue| { .on_asserted(enclose!((ds) move |count, t, captures: AnyValue| {
*count = *count + 1; *count = *count + 1;

View File

@ -0,0 +1,133 @@
use syndicate::actor::*;
use std::env;
use std::sync::Arc;
#[derive(Debug)]
enum Instruction {
SetPeer(Arc<Ref<Instruction>>),
HandleMessage(u64),
}
struct Forwarder {
hop_limit: u64,
supervisor: Arc<Ref<Instruction>>,
peer: Option<Arc<Ref<Instruction>>>,
}
impl Drop for Forwarder {
fn drop(&mut self) {
let r = self.peer.take();
let _ = tokio::spawn(async move {
drop(r);
});
}
}
impl Entity<Instruction> for Forwarder {
fn message(&mut self, turn: &mut Activation, message: Instruction) -> ActorResult {
match message {
Instruction::SetPeer(r) => {
tracing::info!("Setting peer {:?}", r);
self.peer = Some(r);
}
Instruction::HandleMessage(n) => {
let target = if n >= self.hop_limit { &self.supervisor } else { self.peer.as_ref().expect("peer") };
turn.message(target, Instruction::HandleMessage(n + 1));
}
}
Ok(())
}
}
struct Supervisor {
latency_mode: bool,
total_transfers: u64,
remaining_to_receive: u32,
start_time: Option<std::time::Instant>,
}
impl Entity<Instruction> for Supervisor {
fn message(&mut self, turn: &mut Activation, message: Instruction) -> ActorResult {
match message {
Instruction::SetPeer(_) => {
tracing::info!("Start");
self.start_time = Some(std::time::Instant::now());
},
Instruction::HandleMessage(_n) => {
self.remaining_to_receive -= 1;
if self.remaining_to_receive == 0 {
let stop_time = std::time::Instant::now();
let duration = stop_time - self.start_time.unwrap();
tracing::info!("Stop after {:?}; {:?} messages, so {:?} Hz ({} mode)",
duration,
self.total_transfers,
(1000.0 * self.total_transfers as f64) / duration.as_millis() as f64,
if self.latency_mode { "latency" } else { "throughput" });
turn.stop_root();
}
},
}
Ok(())
}
}
#[tokio::main]
async fn main() -> ActorResult {
syndicate::convenient_logging()?;
Actor::top(None, |t| {
let args: Vec<String> = env::args().collect();
let n_actors: u32 = args.get(1).unwrap_or(&"1000000".to_string()).parse()?;
let n_rounds: u32 = args.get(2).unwrap_or(&"200".to_string()).parse()?;
let latency_mode: bool = match args.get(3).unwrap_or(&"throughput".to_string()).as_str() {
"latency" => true,
"throughput" => false,
_other => return Err("Invalid throughput/latency mode".into()),
};
tracing::info!("Will run {:?} actors for {:?} rounds", n_actors, n_rounds);
let total_transfers: u64 = n_actors as u64 * n_rounds as u64;
let (hop_limit, injection_count) = if latency_mode {
(total_transfers, 1)
} else {
(n_rounds as u64, n_actors)
};
let me = t.create(Supervisor {
latency_mode,
total_transfers,
remaining_to_receive: injection_count,
start_time: None,
});
let mut forwarders: Vec<Arc<Ref<Instruction>>> = Vec::new();
for _i in 0 .. n_actors {
if _i % 10000 == 0 { tracing::info!("Actor {:?}", _i); }
forwarders.push(
t.spawn_for_entity(None, true, Box::new(
Forwarder {
hop_limit,
supervisor: me.clone(),
peer: forwarders.last().cloned(),
}))
.0.expect("an entity"));
}
t.message(&forwarders[0], Instruction::SetPeer(forwarders.last().expect("an entity").clone()));
t.later(move |t| {
t.message(&me, Instruction::SetPeer(me.clone()));
t.later(move |t| {
let mut injected: u32 = 0;
for f in forwarders.into_iter() {
if injected >= injection_count {
break;
}
t.message(&f, Instruction::HandleMessage(0));
injected += 1;
}
Ok(())
});
Ok(())
});
Ok(())
}).await??;
Ok(())
}

View File

@ -0,0 +1,175 @@
use std::env;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
type Ref<T> = UnboundedSender<Box<T>>;
#[derive(Debug)]
enum Instruction {
SetPeer(Arc<Ref<Instruction>>),
HandleMessage(u64),
}
struct Forwarder {
hop_limit: u64,
supervisor: Arc<Ref<Instruction>>,
peer: Option<Arc<Ref<Instruction>>>,
}
impl Drop for Forwarder {
fn drop(&mut self) {
let r = self.peer.take();
let _ = tokio::spawn(async move {
drop(r);
});
}
}
enum Action { Continue, Stop }
trait Actor<T> {
fn message(&mut self, message: T) -> Action;
}
fn send<T: std::marker::Send + 'static>(ch: &Arc<Ref<T>>, message: T) -> () {
match ch.send(Box::new(message)) {
Ok(()) => (),
Err(v) => panic!("Aiee! Could not send {:?}", v),
}
}
fn spawn<T: std::marker::Send + 'static, R: Actor<T> + std::marker::Send + 'static>(rt: Option<Arc<AtomicU64>>, mut ac: R) -> Arc<Ref<T>> {
let (tx, mut rx) = unbounded_channel::<Box<T>>();
if let Some(ref c) = rt {
c.fetch_add(1, Ordering::SeqCst);
}
tokio::spawn(async move {
loop {
match rx.recv().await {
None => break,
Some(message) => {
match ac.message(*message) {
Action::Continue => continue,
Action::Stop => break,
}
}
}
}
if let Some(c) = rt {
c.fetch_sub(1, Ordering::SeqCst);
}
});
Arc::new(tx)
}
impl Actor<Instruction> for Forwarder {
fn message(&mut self, message: Instruction) -> Action {
match message {
Instruction::SetPeer(r) => {
tracing::info!("Setting peer {:?}", r);
self.peer = Some(r);
}
Instruction::HandleMessage(n) => {
let target = if n >= self.hop_limit { &self.supervisor } else { self.peer.as_ref().expect("peer") };
send(target, Instruction::HandleMessage(n + 1));
}
}
Action::Continue
}
}
struct Supervisor {
latency_mode: bool,
total_transfers: u64,
remaining_to_receive: u32,
start_time: Option<std::time::Instant>,
}
impl Actor<Instruction> for Supervisor {
fn message(&mut self, message: Instruction) -> Action {
match message {
Instruction::SetPeer(_) => {
tracing::info!("Start");
self.start_time = Some(std::time::Instant::now());
},
Instruction::HandleMessage(_n) => {
self.remaining_to_receive -= 1;
if self.remaining_to_receive == 0 {
let stop_time = std::time::Instant::now();
let duration = stop_time - self.start_time.unwrap();
tracing::info!("Stop after {:?}; {:?} messages, so {:?} Hz ({} mode)",
duration,
self.total_transfers,
(1000.0 * self.total_transfers as f64) / duration.as_millis() as f64,
if self.latency_mode { "latency" } else { "throughput" });
return Action::Stop;
}
},
}
Action::Continue
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + std::marker::Send + std::marker::Sync>> {
syndicate::convenient_logging()?;
let args: Vec<String> = env::args().collect();
let n_actors: u32 = args.get(1).unwrap_or(&"1000000".to_string()).parse()?;
let n_rounds: u32 = args.get(2).unwrap_or(&"200".to_string()).parse()?;
let latency_mode: bool = match args.get(3).unwrap_or(&"throughput".to_string()).as_str() {
"latency" => true,
"throughput" => false,
_other => return Err("Invalid throughput/latency mode".into()),
};
tracing::info!("Will run {:?} actors for {:?} rounds", n_actors, n_rounds);
let count = Arc::new(AtomicU64::new(0));
let total_transfers: u64 = n_actors as u64 * n_rounds as u64;
let (hop_limit, injection_count) = if latency_mode {
(total_transfers, 1)
} else {
(n_rounds as u64, n_actors)
};
let me = spawn(Some(count.clone()), Supervisor {
latency_mode,
total_transfers,
remaining_to_receive: injection_count,
start_time: None,
});
let mut forwarders: Vec<Arc<Ref<Instruction>>> = Vec::new();
for _i in 0 .. n_actors {
if _i % 10000 == 0 { tracing::info!("Actor {:?}", _i); }
forwarders.push(spawn(None, Forwarder {
hop_limit,
supervisor: me.clone(),
peer: forwarders.last().cloned(),
}));
}
send(&forwarders[0], Instruction::SetPeer(forwarders.last().expect("an entity").clone()));
send(&me, Instruction::SetPeer(me.clone()));
let mut injected: u32 = 0;
for f in forwarders.into_iter() {
if injected >= injection_count {
break;
}
send(&f, Instruction::HandleMessage(0));
injected += 1;
}
loop {
if count.load(Ordering::SeqCst) == 0 {
break;
}
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
}
Ok(())
}

View File

@ -27,6 +27,7 @@ use pat::lit;
enum SymbolVariant<'a> { enum SymbolVariant<'a> {
Normal(&'a str), Normal(&'a str),
#[allow(dead_code)] // otherwise we get 'warning: field `0` is never read'
Binder(&'a str), Binder(&'a str),
Substitution(&'a str), Substitution(&'a str),
Discard, Discard,
@ -35,7 +36,7 @@ enum SymbolVariant<'a> {
fn compile_sequence_members(vs: &[IOValue]) -> Vec<TokenStream> { fn compile_sequence_members(vs: &[IOValue]) -> Vec<TokenStream> {
vs.iter().enumerate().map(|(i, f)| { vs.iter().enumerate().map(|(i, f)| {
let p = compile_pattern(f); let p = compile_pattern(f);
quote!((#i .into(), #p)) quote!((syndicate::value::Value::from(#i).wrap(), #p))
}).collect::<Vec<_>>() }).collect::<Vec<_>>()
} }
@ -79,10 +80,6 @@ impl ValueCompiler {
match v.value() { match v.value() {
Value::Boolean(b) => Value::Boolean(b) =>
quote!(#V_::Value::from(#b).wrap()), quote!(#V_::Value::from(#b).wrap()),
Value::Float(f) => {
let f = f.0;
quote!(#V_::Value::from(#f).wrap())
}
Value::Double(d) => { Value::Double(d) => {
let d = d.0; let d = d.0;
quote!(#V_::Value::from(#d).wrap()) quote!(#V_::Value::from(#d).wrap())
@ -154,16 +151,14 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
#[allow(non_snake_case)] #[allow(non_snake_case)]
let V_: TokenStream = quote!(syndicate::value); let V_: TokenStream = quote!(syndicate::value);
#[allow(non_snake_case)] #[allow(non_snake_case)]
let MapFromIterator_: TokenStream = quote!(<#V_::Map<_, _> as std::iter::FromIterator<_>>::from_iter); let MapFrom_: TokenStream = quote!(<#V_::Map<_, _>>::from);
match v.value() { match v.value() {
Value::Symbol(s) => match analyze_symbol(&s, true) { Value::Symbol(s) => match analyze_symbol(&s, true) {
SymbolVariant::Binder(_) => SymbolVariant::Binder(_) =>
quote!(#P_::Pattern::DBind(Box::new(#P_::DBind { quote!(#P_::Pattern::Bind{ pattern: Box::new(#P_::Pattern::Discard) }),
pattern: #P_::Pattern::DDiscard(Box::new(#P_::DDiscard))
}))),
SymbolVariant::Discard => SymbolVariant::Discard =>
quote!(#P_::Pattern::DDiscard(Box::new(#P_::DDiscard))), quote!(#P_::Pattern::Discard),
SymbolVariant::Substitution(s) => SymbolVariant::Substitution(s) =>
lit(Ident::new(s, Span::call_site())), lit(Ident::new(s, Span::call_site())),
SymbolVariant::Normal(_) => SymbolVariant::Normal(_) =>
@ -175,9 +170,7 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
Some(label) => Some(label) =>
if label.starts_with("$") && r.arity() == 1 { if label.starts_with("$") && r.arity() == 1 {
let nested = compile_pattern(&r.fields()[0]); let nested = compile_pattern(&r.fields()[0]);
quote!(#P_::Pattern::DBind(Box::new(#P_::DBind { quote!(#P_::Pattern::Bind{ pattern: Box::new(#nested) })
pattern: #nested
})))
} else { } else {
let label_stx = if label.starts_with("=") { let label_stx = if label.starts_with("=") {
let id = Ident::new(&label[1..], Span::call_site()); let id = Ident::new(&label[1..], Span::call_site());
@ -186,18 +179,19 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
quote!(#V_::Value::symbol(#label).wrap()) quote!(#V_::Value::symbol(#label).wrap())
}; };
let members = compile_sequence_members(r.fields()); let members = compile_sequence_members(r.fields());
quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Rec { quote!(#P_::Pattern::Group {
label: #label_stx, type_: Box::new(#P_::GroupType::Rec { label: #label_stx }),
fields: vec![#(#members),*], entries: #MapFrom_([#(#members),*]),
}))) })
} }
} }
} }
Value::Sequence(vs) => { Value::Sequence(vs) => {
let members = compile_sequence_members(vs); let members = compile_sequence_members(vs);
quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Arr { quote!(#P_::Pattern::Group {
items: vec![#(#members),*], type_: Box::new(#P_::GroupType::Arr),
}))) entries: #MapFrom_([#(#members),*]),
})
} }
Value::Set(_) => Value::Set(_) =>
panic!("Cannot match sets in patterns"), panic!("Cannot match sets in patterns"),
@ -207,9 +201,10 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
let v = compile_pattern(v); let v = compile_pattern(v);
quote!((#k, #v)) quote!((#k, #v))
}).collect::<Vec<_>>(); }).collect::<Vec<_>>();
quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Dict { quote!(#P_::Pattern::Group {
entries: #MapFromIterator_(vec![#(#members),*]) type_: Box::new(#P_::GroupType::Dict),
}))) entries: #MapFrom_([#(#members),*]),
})
} }
_ => lit(ValueCompiler::for_patterns().compile(v)), _ => lit(ValueCompiler::for_patterns().compile(v)),
} }

View File

@ -15,10 +15,9 @@ pub fn lit<T: ToTokens>(e: T) -> TokenStream2 {
} }
fn compile_sequence_members(stxs: &Vec<Stx>) -> Result<Vec<TokenStream2>, &'static str> { fn compile_sequence_members(stxs: &Vec<Stx>) -> Result<Vec<TokenStream2>, &'static str> {
stxs.iter().map(|stx| { stxs.iter().enumerate().map(|(i, stx)| {
// let p = to_pattern_expr(stx)?; let p = to_pattern_expr(stx)?;
// Ok(quote!(#p)) Ok(quote!((syndicate::value::Value::from(#i).wrap(), #p)))
to_pattern_expr(stx)
}).collect() }).collect()
} }
@ -28,7 +27,7 @@ pub fn to_pattern_expr(stx: &Stx) -> Result<TokenStream2, &'static str> {
#[allow(non_snake_case)] #[allow(non_snake_case)]
let V_: TokenStream2 = quote!(syndicate::value); let V_: TokenStream2 = quote!(syndicate::value);
#[allow(non_snake_case)] #[allow(non_snake_case)]
let MapFromIterator_: TokenStream2 = quote!(<#V_::Map<_, _> as std::iter::FromIterator<_>>::from_iter); let MapFrom_: TokenStream2 = quote!(<#V_::Map<_, _>>::from);
match stx { match stx {
Stx::Atom(v) => Stx::Atom(v) =>
@ -41,26 +40,27 @@ pub fn to_pattern_expr(stx: &Stx) -> Result<TokenStream2, &'static str> {
None => to_pattern_expr(&Stx::Discard)?, None => to_pattern_expr(&Stx::Discard)?,
} }
}; };
Ok(quote!(#P_::Pattern::DBind(Box::new(#P_::DBind { pattern: #inner_pat_expr })))) Ok(quote!(#P_::Pattern::Bind { pattern: Box::new(#inner_pat_expr) }))
} }
Stx::Subst(e) => Stx::Subst(e) =>
Ok(lit(e)), Ok(lit(e)),
Stx::Discard => Stx::Discard =>
Ok(quote!(#P_::Pattern::DDiscard(Box::new(#P_::DDiscard)))), Ok(quote!(#P_::Pattern::Discard)),
Stx::Rec(l, fs) => { Stx::Rec(l, fs) => {
let label = to_value_expr(&*l)?; let label = to_value_expr(&*l)?;
let members = compile_sequence_members(fs)?; let members = compile_sequence_members(fs)?;
Ok(quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Rec { Ok(quote!(#P_::Pattern::Group {
label: #label, type_: Box::new(#P_::GroupType::Rec { label: #label }),
fields: vec![#(#members),*], entries: #MapFrom_([#(#members),*]),
})))) }))
}, },
Stx::Seq(stxs) => { Stx::Seq(stxs) => {
let members = compile_sequence_members(stxs)?; let members = compile_sequence_members(stxs)?;
Ok(quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Arr { Ok(quote!(#P_::Pattern::Group {
items: vec![#(#members),*], type_: Box::new(#P_::GroupType::Arr),
})))) entries: #MapFrom_([#(#members),*]),
}))
} }
Stx::Set(_stxs) => Stx::Set(_stxs) =>
Err("Set literals not supported in patterns"), Err("Set literals not supported in patterns"),
@ -70,9 +70,10 @@ pub fn to_pattern_expr(stx: &Stx) -> Result<TokenStream2, &'static str> {
let v = to_pattern_expr(v)?; let v = to_pattern_expr(v)?;
Ok(quote!((#k, #v))) Ok(quote!((#k, #v)))
}).collect::<Result<Vec<_>, &'static str>>()?; }).collect::<Result<Vec<_>, &'static str>>()?;
Ok(quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Dict { Ok(quote!(#P_::Pattern::Group {
entries: #MapFromIterator_(vec![#(#members),*]) type_: Box::new(#P_::GroupType::Dict),
})))) entries: #MapFrom_([#(#members),*])
}))
} }
} }
} }

View File

@ -1,5 +1,6 @@
use proc_macro2::Delimiter; use proc_macro2::Delimiter;
use proc_macro2::LineColumn; use proc_macro2::LineColumn;
use proc_macro2::Span;
use proc_macro2::TokenStream; use proc_macro2::TokenStream;
use syn::ExprLit; use syn::ExprLit;
@ -14,7 +15,6 @@ use syn::parse::Parser;
use syn::parse::ParseStream; use syn::parse::ParseStream;
use syn::parse_str; use syn::parse_str;
use syndicate::value::Float;
use syndicate::value::Double; use syndicate::value::Double;
use syndicate::value::IOValue; use syndicate::value::IOValue;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
@ -70,24 +70,41 @@ fn punct_char(c: Cursor) -> Option<(char, Cursor)> {
c.punct().map(|(p, c)| (p.as_char(), c)) c.punct().map(|(p, c)| (p.as_char(), c))
} }
fn start_pos(s: Span) -> LineColumn {
// We would like to write
// s.start()
// here, but until [1] is fixed (perhaps via [2]), we have to go the unsafe route
// and assume we are in procedural macro context.
// [1]: https://github.com/dtolnay/proc-macro2/issues/402
// [2]: https://github.com/dtolnay/proc-macro2/pull/407
let u = s.unwrap().start();
LineColumn { column: u.column(), line: u.line() }
}
fn end_pos(s: Span) -> LineColumn {
// See start_pos
let u = s.unwrap().end();
LineColumn { column: u.column(), line: u.line() }
}
fn parse_id(mut c: Cursor) -> Result<(String, Cursor)> { fn parse_id(mut c: Cursor) -> Result<(String, Cursor)> {
let mut id = String::new(); let mut id = String::new();
let mut prev_pos = c.span().start(); let mut prev_pos = start_pos(c.span());
loop { loop {
if c.eof() || c.span().start() != prev_pos { if c.eof() || start_pos(c.span()) != prev_pos {
return Ok((id, c)); return Ok((id, c));
} else if let Some((p, next)) = c.punct() { } else if let Some((p, next)) = c.punct() {
match p.as_char() { match p.as_char() {
'<' | '>' | '(' | ')' | '{' | '}' | '[' | ']' | ',' | ':' => return Ok((id, c)), '<' | '>' | '(' | ')' | '{' | '}' | '[' | ']' | ',' | ':' => return Ok((id, c)),
ch => { ch => {
id.push(ch); id.push(ch);
prev_pos = c.span().end(); prev_pos = end_pos(c.span());
c = next; c = next;
} }
} }
} else if let Some((i, next)) = c.ident() { } else if let Some((i, next)) = c.ident() {
id.push_str(&i.to_string()); id.push_str(&i.to_string());
prev_pos = i.span().end(); prev_pos = end_pos(i.span());
c = next; c = next;
} else { } else {
return Ok((id, c)); return Ok((id, c));
@ -126,7 +143,7 @@ fn skip_commas(mut c: Cursor) -> Cursor {
} }
} }
fn parse_group_inner<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>( fn parse_group<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
mut c: Cursor<'c>, mut c: Cursor<'c>,
f: F, f: F,
after: Cursor<'c>, after: Cursor<'c>,
@ -143,15 +160,6 @@ fn parse_group_inner<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
} }
} }
fn parse_group<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
d: Delimiter,
f: F,
c: Cursor<'c>,
) -> Result<(Vec<R>, Cursor<'c>)> {
let (inner, _, after) = c.group(d).unwrap();
parse_group_inner(inner, f, after)
}
fn parse_kv(c: Cursor) -> Result<((Stx, Stx), Cursor)> { fn parse_kv(c: Cursor) -> Result<((Stx, Stx), Cursor)> {
let (k, c) = parse1(c)?; let (k, c) = parse1(c)?;
if let Some((':', c)) = punct_char(c) { if let Some((':', c)) = punct_char(c) {
@ -162,7 +170,7 @@ fn parse_kv(c: Cursor) -> Result<((Stx, Stx), Cursor)> {
} }
fn adjacent_ident(pos: LineColumn, c: Cursor) -> (Option<Ident>, Cursor) { fn adjacent_ident(pos: LineColumn, c: Cursor) -> (Option<Ident>, Cursor) {
if c.span().start() != pos { if start_pos(c.span()) != pos {
(None, c) (None, c)
} else if let Some((id, next)) = c.ident() { } else if let Some((id, next)) = c.ident() {
(Some(id), next) (Some(id), next)
@ -186,8 +194,8 @@ fn parse_generic<T: Parse>(mut c: Cursor) -> Option<(T, Cursor)> {
// OK, because parse2 checks for end-of-stream, let's chop // OK, because parse2 checks for end-of-stream, let's chop
// the input at the position of the error and try again (!). // the input at the position of the error and try again (!).
let mut collected = Vec::new(); let mut collected = Vec::new();
let upto = e.span().start(); let upto = start_pos(e.span());
while !c.eof() && c.span().start() != upto { while !c.eof() && start_pos(c.span()) != upto {
let (tt, next) = c.token_tree().unwrap(); let (tt, next) = c.token_tree().unwrap();
collected.push(tt); collected.push(tt);
c = next; c = next;
@ -208,10 +216,8 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
} else { } else {
Ok((Stx::Rec(Box::new(q.remove(0)), q), c)) Ok((Stx::Rec(Box::new(q.remove(0)), q), c))
}), }),
'{' => parse_group(Delimiter::Brace, parse_kv, c).map(|(q,c)| (Stx::Dict(q),c)),
'[' => parse_group(Delimiter::Bracket, parse1, c).map(|(q,c)| (Stx::Seq(q),c)),
'$' => { '$' => {
let (maybe_id, next) = adjacent_ident(p.span().end(), next); let (maybe_id, next) = adjacent_ident(end_pos(p.span()), next);
let (maybe_type, next) = if let Some((':', next)) = punct_char(next) { let (maybe_type, next) = if let Some((':', next)) = punct_char(next) {
match parse_generic::<Type>(next) { match parse_generic::<Type>(next) {
Some((t, next)) => (Some(t), next), Some((t, next)) => (Some(t), next),
@ -229,7 +235,7 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
} }
'#' => { '#' => {
if let Some((inner, _, next)) = next.group(Delimiter::Brace) { if let Some((inner, _, next)) = next.group(Delimiter::Brace) {
parse_group_inner(inner, parse1, next).map(|(q,c)| (Stx::Set(q),c)) parse_group(inner, parse1, next).map(|(q,c)| (Stx::Set(q),c))
} else if let Some((inner, _, next)) = next.group(Delimiter::Parenthesis) { } else if let Some((inner, _, next)) = next.group(Delimiter::Parenthesis) {
Ok((Stx::Subst(inner.token_stream()), next)) Ok((Stx::Subst(inner.token_stream()), next))
} else if let Some((tt, next)) = next.token_tree() { } else if let Some((tt, next)) = next.token_tree() {
@ -259,7 +265,7 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
IOValue::new(i.base10_parse::<i128>()?) IOValue::new(i.base10_parse::<i128>()?)
} }
Lit::Float(f) => if f.suffix() == "f32" { Lit::Float(f) => if f.suffix() == "f32" {
IOValue::new(&Float(f.base10_parse::<f32>()?)) IOValue::new(&Double(f.base10_parse::<f32>()? as f64))
} else { } else {
IOValue::new(&Double(f.base10_parse::<f64>()?)) IOValue::new(&Double(f.base10_parse::<f64>()?))
} }
@ -267,6 +273,10 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
Lit::Verbatim(_) => return Err(Error::new(c.span(), "Verbatim literals not supported")), Lit::Verbatim(_) => return Err(Error::new(c.span(), "Verbatim literals not supported")),
}; };
Ok((Stx::Atom(v), next)) Ok((Stx::Atom(v), next))
} else if let Some((inner, _, after)) = c.group(Delimiter::Brace) {
parse_group(inner, parse_kv, after).map(|(q,c)| (Stx::Dict(q),c))
} else if let Some((inner, _, after)) = c.group(Delimiter::Bracket) {
parse_group(inner, parse1, after).map(|(q,c)| (Stx::Seq(q),c))
} else { } else {
Err(Error::new(c.span(), "Unexpected input")) Err(Error::new(c.span(), "Unexpected input"))
} }

View File

@ -50,10 +50,6 @@ pub fn value_to_value_expr(v: &IOValue) -> TokenStream2 {
match v.value() { match v.value() {
Value::Boolean(b) => Value::Boolean(b) =>
quote!(#V_::Value::from(#b).wrap()), quote!(#V_::Value::from(#b).wrap()),
Value::Float(f) => {
let f = f.0;
quote!(#V_::Value::from(#f).wrap())
}
Value::Double(d) => { Value::Double(d) => {
let d = d.0; let d = d.0;
quote!(#V_::Value::from(#d).wrap()) quote!(#V_::Value::from(#d).wrap())

View File

@ -0,0 +1,15 @@
{
"folders": [
{
"path": "."
},
{
"path": "../syndicate-protocols"
}
],
"settings": {
"files.exclude": {
"target": true
}
}
}

View File

@ -0,0 +1,19 @@
[package]
name = "syndicate-schema-plugin"
version = "0.9.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
description = "Support for using Preserves Schema with Syndicate macros."
homepage = "https://syndicate-lang.org/"
repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"
license = "Apache-2.0"
[lib]
[dependencies]
preserves-schema = "5.995"
syndicate = { path = "../syndicate", version = "0.40.0"}
[package.metadata.workspaces]
independent = true

View File

@ -0,0 +1,3 @@
mod pattern_plugin;
pub use pattern_plugin::PatternPlugin;

View File

@ -0,0 +1,164 @@
use preserves_schema::*;
use preserves_schema::compiler::*;
use preserves_schema::compiler::context::ModuleContext;
use preserves_schema::compiler::types::definition_type;
use preserves_schema::compiler::types::Purpose;
use preserves_schema::gen::schema::*;
use preserves_schema::syntax::block::escape_string;
use preserves_schema::syntax::block::constructors::*;
use std::iter::FromIterator;
use syndicate::pattern::lift_literal;
use syndicate::schemas::dataspace_patterns as P;
use syndicate::value::IOValue;
use syndicate::value::Map;
use syndicate::value::NestedValue;
#[derive(Debug)]
pub struct PatternPlugin;
type WalkState<'a, 'm, 'b> =
preserves_schema::compiler::cycles::WalkState<&'a ModuleContext<'m, 'b>>;
impl Plugin for PatternPlugin {
fn generate_definition(
&self,
ctxt: &mut ModuleContext,
definition_name: &str,
definition: &Definition,
) {
if ctxt.mode == context::ModuleContextMode::TargetGeneric {
let mut s = WalkState::new(ctxt, ctxt.module_path.clone());
if let Some(p) = definition.wc(&mut s) {
let ty = definition_type(&ctxt.module_path,
Purpose::Codegen,
definition_name,
definition);
let v = syndicate::language().unparse(&p);
let v = preserves_schema::support::preserves::value::TextWriter::encode(
&mut preserves_schema::support::preserves::value::NoEmbeddedDomainCodec,
&v).unwrap();
ctxt.define_type(item(seq![
"impl",
ty.generic_decl(ctxt),
" ",
names::render_constructor(definition_name),
ty.generic_arg(ctxt),
" ", codeblock![
seq!["#[allow(unused)] pub fn wildcard_dataspace_pattern() ",
"-> syndicate::schemas::dataspace_patterns::Pattern ",
codeblock![
"use syndicate::schemas::dataspace_patterns::*;",
"use preserves_schema::Codec;",
seq!["let _v = syndicate::value::text::from_str(",
escape_string(&v),
", syndicate::value::ViaCodec::new(syndicate::value::NoEmbeddedDomainCodec)).unwrap();"],
"syndicate::language().parse(&_v).unwrap()"]]]]));
}
}
}
}
fn discard() -> P::Pattern {
P::Pattern::Discard
}
trait WildcardPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern>;
}
impl WildcardPattern for Definition {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Definition::Or { .. } => None,
Definition::And { .. } => None,
Definition::Pattern(p) => p.wc(s),
}
}
}
impl WildcardPattern for Pattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Pattern::CompoundPattern(p) => p.wc(s),
Pattern::SimplePattern(p) => p.wc(s),
}
}
}
fn from_io(v: &IOValue) -> Option<P::_Any> {
Some(v.value().copy_via(&mut |_| Err(())).ok()?.wrap())
}
impl WildcardPattern for CompoundPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
CompoundPattern::Tuple { patterns } |
CompoundPattern::TuplePrefix { fixed: patterns, .. }=>
Some(P::Pattern::Group {
type_: Box::new(P::GroupType::Arr),
entries: patterns.iter().enumerate()
.map(|(i, p)| Some((P::_Any::new(i), unname(p).wc(s)?)))
.collect::<Option<Map<P::_Any, P::Pattern>>>()?,
}),
CompoundPattern::Dict { entries } =>
Some(P::Pattern::Group {
type_: Box::new(P::GroupType::Dict),
entries: Map::from_iter(
entries.0.iter()
.map(|(k, p)| Some((from_io(k)?, unname_simple(p).wc(s)?)))
.filter(|e| discard() != e.as_ref().unwrap().1)
.collect::<Option<Vec<(P::_Any, P::Pattern)>>>()?
.into_iter()),
}),
CompoundPattern::Rec { label, fields } => match (unname(label), unname(fields)) {
(Pattern::SimplePattern(label), Pattern::CompoundPattern(fields)) =>
match (*label, *fields) {
(SimplePattern::Lit { value }, CompoundPattern::Tuple { patterns }) =>
Some(P::Pattern::Group{
type_: Box::new(P::GroupType::Rec { label: from_io(&value)? }),
entries: patterns.iter().enumerate()
.map(|(i, p)| Some((P::_Any::new(i), unname(p).wc(s)?)))
.collect::<Option<Map<P::_Any, P::Pattern>>>()?,
}),
_ => None,
},
_ => None,
},
}
}
}
impl WildcardPattern for SimplePattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
SimplePattern::Any |
SimplePattern::Atom { .. } |
SimplePattern::Embedded { .. } |
SimplePattern::Seqof { .. } |
SimplePattern::Setof { .. } |
SimplePattern::Dictof { .. } => Some(discard()),
SimplePattern::Lit { value } => Some(lift_literal(&from_io(value)?)),
SimplePattern::Ref(r) => s.cycle_check(
r,
|ctxt, r| ctxt.bundle.lookup_definition(r).map(|v| v.0),
|s, d| d.and_then(|d| d.wc(s)).or_else(|| Some(discard())),
|| Some(discard())),
}
}
}
fn unname(np: &NamedPattern) -> Pattern {
match np {
NamedPattern::Anonymous(p) => (**p).clone(),
NamedPattern::Named(b) => Pattern::SimplePattern(Box::new(b.pattern.clone())),
}
}
fn unname_simple(np: &NamedSimplePattern) -> &SimplePattern {
match np {
NamedSimplePattern::Anonymous(p) => p,
NamedSimplePattern::Named(b) => &b.pattern,
}
}

View File

@ -1,6 +1,6 @@
[package] [package]
name = "syndicate-server" name = "syndicate-server"
version = "0.20.1" version = "0.45.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"] authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018" edition = "2018"
@ -9,27 +9,39 @@ homepage = "https://syndicate-lang.org/"
repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs" repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"
license = "Apache-2.0" license = "Apache-2.0"
[features]
jemalloc = ["dep:tikv-jemallocator"]
[build-dependencies] [build-dependencies]
preserves-schema = "^2" preserves-schema = "5.995"
syndicate = { path = "../syndicate", version = "^0.20.0"} syndicate = { path = "../syndicate", version = "0.40.0"}
syndicate-schema-plugin = { path = "../syndicate-schema-plugin", version = "0.9.0"}
[dependencies] [dependencies]
preserves-schema = "^2" preserves-schema = "5.995"
syndicate = { path = "../syndicate", version = "^0.20.0"} syndicate = { path = "../syndicate", version = "0.40.0"}
syndicate-macros = { path = "../syndicate-macros", version = "^0.15.0"} syndicate-macros = { path = "../syndicate-macros", version = "0.32.0"}
chrono = "0.4" chrono = "0.4"
futures = "0.3" futures = "0.3"
lazy_static = "1.4" lazy_static = "1.4"
noise-protocol = "0.1"
noise-rust-crypto = "0.5"
notify = "4.0" notify = "4.0"
structopt = "0.3" structopt = "0.3"
tungstenite = "0.13" tikv-jemallocator = { version = "0.5.0", optional = true }
tokio-tungstenite = "0.14"
tokio = { version = "1.10", features = ["io-std", "time", "process"] } tokio = { version = "1.10", features = ["io-std", "time", "process"] }
tokio-util = "0.6" tokio-util = "0.6"
tokio-stream = "0.1"
tracing = "0.1" tracing = "0.1"
tracing-subscriber = "0.2" tracing-subscriber = "0.2"
tracing-futures = "0.2" tracing-futures = "0.2"
hyper = { version = "0.14.27", features = ["server", "http1", "stream"] }
hyper-tungstenite = "0.11.1"
parking_lot = "0.12.1"
[package.metadata.workspaces]
independent = true

View File

@ -13,7 +13,7 @@ inotifytest:
binary: binary-release binary: binary-release
binary-release: binary-release:
cargo build --release --all-targets cargo build --release --all-targets --features jemalloc
binary-debug: binary-debug:
cargo build --all-targets cargo build --all-targets

View File

@ -1,176 +1,32 @@
use preserves_schema::compiler::*; use preserves_schema::compiler::*;
mod pattern_plugin {
use preserves_schema::*;
use preserves_schema::compiler::*;
use preserves_schema::compiler::context::ModuleContext;
use preserves_schema::gen::schema::*;
use preserves_schema::syntax::block::escape_string;
use preserves_schema::syntax::block::constructors::*;
use std::iter::FromIterator;
use syndicate::pattern::lift_literal;
use syndicate::schemas::dataspace_patterns as P;
use syndicate::value::IOValue;
use syndicate::value::Map;
use syndicate::value::NestedValue;
#[derive(Debug)]
pub struct PatternPlugin;
type WalkState<'a, 'm, 'b> =
preserves_schema::compiler::cycles::WalkState<&'a ModuleContext<'m, 'b>>;
impl Plugin for PatternPlugin {
fn generate_definition(
&self,
ctxt: &mut ModuleContext,
definition_name: &str,
definition: &Definition,
) {
if ctxt.mode == context::ModuleContextMode::TargetGeneric {
let mut s = WalkState::new(ctxt, ctxt.module_path.clone());
if let Some(p) = definition.wc(&mut s) {
let v = syndicate::language().unparse(&p);
let v = preserves_schema::support::preserves::value::TextWriter::encode(
&mut preserves_schema::support::preserves::value::NoEmbeddedDomainCodec,
&v).unwrap();
ctxt.define_type(item(seq![
"impl ", definition_name.to_owned(), " ", codeblock![
seq!["#[allow(unused)] pub fn wildcard_dataspace_pattern() ",
"-> syndicate::schemas::dataspace_patterns::Pattern ",
codeblock![
"use syndicate::schemas::dataspace_patterns::*;",
"use preserves_schema::Codec;",
seq!["let _v = syndicate::value::text::from_str(",
escape_string(&v),
", syndicate::value::ViaCodec::new(syndicate::value::NoEmbeddedDomainCodec)).unwrap();"],
"syndicate::language().parse(&_v).unwrap()"]]]]));
}
}
}
}
fn discard() -> P::Pattern {
P::Pattern::DDiscard(Box::new(P::DDiscard))
}
trait WildcardPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern>;
}
impl WildcardPattern for Definition {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Definition::Or { .. } => None,
Definition::And { .. } => None,
Definition::Pattern(p) => p.wc(s),
}
}
}
impl WildcardPattern for Pattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Pattern::CompoundPattern(p) => p.wc(s),
Pattern::SimplePattern(p) => p.wc(s),
}
}
}
fn from_io(v: &IOValue) -> Option<P::_Any> {
Some(v.value().copy_via(&mut |_| Err(())).ok()?.wrap())
}
impl WildcardPattern for CompoundPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
CompoundPattern::Tuple { patterns } =>
Some(P::Pattern::DCompound(Box::new(P::DCompound::Arr {
items: patterns.iter()
.map(|p| unname(p).wc(s))
.collect::<Option<Vec<P::Pattern>>>()?,
}))),
CompoundPattern::TuplePrefix { .. } =>
Some(discard()),
CompoundPattern::Dict { entries } =>
Some(P::Pattern::DCompound(Box::new(P::DCompound::Dict {
entries: Map::from_iter(
entries.0.iter()
.map(|(k, p)| Some((from_io(k)?, unname_simple(p).wc(s)?)))
.filter(|e| discard() != e.as_ref().unwrap().1)
.collect::<Option<Vec<(P::_Any, P::Pattern)>>>()?
.into_iter()),
}))),
CompoundPattern::Rec { label, fields } => match (unname(label), unname(fields)) {
(Pattern::SimplePattern(label), Pattern::CompoundPattern(fields)) =>
match (*label, *fields) {
(SimplePattern::Lit { value }, CompoundPattern::Tuple { patterns }) =>
Some(P::Pattern::DCompound(Box::new(P::DCompound::Rec {
label: from_io(&value)?,
fields: patterns.iter()
.map(|p| unname(p).wc(s))
.collect::<Option<Vec<P::Pattern>>>()?,
}))),
_ => None,
},
_ => None,
},
}
}
}
impl WildcardPattern for SimplePattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
SimplePattern::Any |
SimplePattern::Atom { .. } |
SimplePattern::Embedded { .. } |
SimplePattern::Seqof { .. } |
SimplePattern::Setof { .. } |
SimplePattern::Dictof { .. } => Some(discard()),
SimplePattern::Lit { value } => Some(lift_literal(&from_io(value)?)),
SimplePattern::Ref(r) => s.cycle_check(
r,
|ctxt, r| ctxt.bundle.lookup_definition(r),
|s, d| d.and_then(|d| d.wc(s)).or_else(|| Some(discard())),
|| Some(discard())),
}
}
}
fn unname(np: &NamedPattern) -> Pattern {
match np {
NamedPattern::Anonymous(p) => (**p).clone(),
NamedPattern::Named(b) => Pattern::SimplePattern(Box::new(b.pattern.clone())),
}
}
fn unname_simple(np: &NamedSimplePattern) -> &SimplePattern {
match np {
NamedSimplePattern::Anonymous(p) => p,
NamedSimplePattern::Named(b) => &b.pattern,
}
}
}
fn main() -> std::io::Result<()> { fn main() -> std::io::Result<()> {
let buildroot = std::path::PathBuf::from(std::env::var_os("OUT_DIR").unwrap()); let buildroot = std::path::PathBuf::from(std::env::var_os("OUT_DIR").unwrap());
let mut gen_dir = buildroot.clone(); let mut gen_dir = buildroot.clone();
gen_dir.push("src/schemas"); gen_dir.push("src/schemas");
let mut c = CompilerConfig::new(gen_dir, "crate::schemas".to_owned()); let mut c = CompilerConfig::new("crate::schemas".to_owned());
c.plugins.push(Box::new(pattern_plugin::PatternPlugin)); c.plugins.push(Box::new(syndicate_schema_plugin::PatternPlugin));
c.add_external_module(ExternalModule::new(vec!["EntityRef".to_owned()], "syndicate::actor")); c.add_external_module(ExternalModule::new(vec!["EntityRef".to_owned()], "syndicate::actor"));
c.add_external_module( c.add_external_module(
ExternalModule::new(vec!["TransportAddress".to_owned()], ExternalModule::new(vec!["TransportAddress".to_owned()],
"syndicate::schemas::transport_address") "syndicate::schemas::transport_address")
.set_fallback_language_types( .set_fallback_language_types(
|v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect())); |v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect()));
c.add_external_module(
ExternalModule::new(vec!["gatekeeper".to_owned()], "syndicate::schemas::gatekeeper")
.set_fallback_language_types(
|v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect())
);
c.add_external_module(
ExternalModule::new(vec!["noise".to_owned()], "syndicate::schemas::noise")
.set_fallback_language_types(
|v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect())
);
let inputs = expand_inputs(&vec!["protocols/schema-bundle.bin".to_owned()])?; let inputs = expand_inputs(&vec!["protocols/schema-bundle.bin".to_owned()])?;
c.load_schemas_and_bundles(&inputs)?; c.load_schemas_and_bundles(&inputs, &vec![])?;
compile(&c) c.load_xref_bin("syndicate", syndicate::schemas::_bundle())?;
compile(&c, &mut CodeCollector::files(gen_dir))
} }

View File

@ -12,21 +12,20 @@ use syndicate::value::NestedValue;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use core::time::Duration; use core::time::Duration;
use tokio::time::interval;
#[derive(Clone, Debug, StructOpt)] #[derive(Clone, Debug, StructOpt)]
pub struct Config { pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
let config = Config::from_args(); let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?; let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split(); let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::new(None).boot(syndicate::name!("consumer"), |t| { Actor::top(None, |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), |_state, t, ds| { relay::connect_stream(t, i, o, false, sturdyref, (), |_state, t, ds| {
let consumer = syndicate::entity(0) let consumer = syndicate::entity(0)
.on_message(|message_count, _t, m: AnyValue| { .on_message(|message_count, _t, m: AnyValue| {
@ -44,21 +43,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
observer: Arc::clone(&consumer), observer: Arc::clone(&consumer),
}); });
t.linked_task(syndicate::name!("tick"), async move { t.every(Duration::from_secs(1), move |t| {
let mut stats_timer = interval(Duration::from_secs(1)); consumer.message(t, &(), &AnyValue::new(true));
loop { Ok(())
stats_timer.tick().await; })?;
let consumer = Arc::clone(&consumer);
external_event(&Arc::clone(&consumer.underlying.mailbox),
&Account::new(syndicate::name!("account")),
Box::new(move |t| t.with_entity(
&consumer.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
});
Ok(None) Ok(None)
}); })
Ok(())
}).await??; }).await??;
Ok(()) Ok(())
} }

View File

@ -26,14 +26,14 @@ mod dirty;
#[derive(Clone, Debug, StructOpt)] #[derive(Clone, Debug, StructOpt)]
pub struct Config { pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = Config::from_args(); let config = Config::from_args();
let mut stream = TcpStream::connect("127.0.0.1:8001")?; let mut stream = TcpStream::connect("127.0.0.1:9001")?;
dirty::dirty_resolve(&mut stream, &config.dataspace)?; dirty::dirty_resolve(&mut stream, &config.dataspace)?;
let iolang = Language::<IOValue>::default(); let iolang = Language::<IOValue>::default();
@ -58,7 +58,10 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut buf = [0; 131072]; let mut buf = [0; 131072];
let turn_size = { let turn_size = {
stream.read(&mut buf)?; let n = stream.read(&mut buf)?;
if n == 0 {
return Ok(());
}
let mut src = BytesBinarySource::new(&buf); let mut src = BytesBinarySource::new(&buf);
src.packed_iovalues().demand_next(false)?; src.packed_iovalues().demand_next(false)?;
src.index src.index

View File

@ -25,7 +25,7 @@ pub struct Config {
#[structopt(short = "b", default_value = "0")] #[structopt(short = "b", default_value = "0")]
bytes_padding: usize, bytes_padding: usize,
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
@ -40,7 +40,7 @@ fn says(who: IOValue, what: IOValue) -> IOValue {
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = Config::from_args(); let config = Config::from_args();
let mut stream = TcpStream::connect("127.0.0.1:8001")?; let mut stream = TcpStream::connect("127.0.0.1:9001")?;
dirty::dirty_resolve(&mut stream, &config.dataspace)?; dirty::dirty_resolve(&mut stream, &config.dataspace)?;
let padding: IOValue = Value::ByteString(vec![0; config.bytes_padding]).wrap(); let padding: IOValue = Value::ByteString(vec![0; config.bytes_padding]).wrap();

View File

@ -16,15 +16,16 @@ pub fn dirty_resolve(stream: &mut TcpStream, dataspace: &str) -> Result<(), Box<
let iolang = Language::<IOValue>::default(); let iolang = Language::<IOValue>::default();
let sturdyref = sturdy::SturdyRef::from_hex(dataspace)?; let sturdyref = sturdy::SturdyRef::from_hex(dataspace)?;
let sturdyref = iolang.parse(&syndicate::language().unparse(&sturdyref) let sturdyref = iolang.parse::<gatekeeper::Step<IOValue>>(
.copy_via(&mut |_| Err("no!"))?)?; &syndicate::language().unparse(&sturdyref)
.copy_via(&mut |_| Err("no!"))?)?;
let resolve_turn = P::Turn(vec![ let resolve_turn = P::Turn(vec![
P::TurnEvent { P::TurnEvent {
oid: P::Oid(0.into()), oid: P::Oid(0.into()),
event: P::Event::Assert(Box::new(P::Assert { event: P::Event::Assert(Box::new(P::Assert {
assertion: P::Assertion(iolang.unparse(&gatekeeper::Resolve::<IOValue> { assertion: P::Assertion(iolang.unparse(&gatekeeper::Resolve::<IOValue> {
sturdyref, step: sturdyref,
observer: iolang.unparse(&sturdy::WireRef::Mine { observer: iolang.unparse(&sturdy::WireRef::Mine {
oid: Box::new(sturdy::Oid(0.into())), oid: Box::new(sturdy::Oid(0.into())),
}), }),

View File

@ -1,9 +1,11 @@
use std::sync::Arc; use std::sync::Arc;
use std::sync::Mutex;
use std::time::SystemTime; use std::time::SystemTime;
use structopt::StructOpt; use structopt::StructOpt;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose;
use syndicate::language; use syndicate::language;
use syndicate::relay; use syndicate::relay;
use syndicate::schemas::dataspace::Observe; use syndicate::schemas::dataspace::Observe;
@ -14,7 +16,6 @@ use syndicate::value::Value;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use core::time::Duration; use core::time::Duration;
use tokio::time::interval;
#[derive(Clone, Debug, StructOpt)] #[derive(Clone, Debug, StructOpt)]
pub struct PingConfig { pub struct PingConfig {
@ -42,7 +43,7 @@ pub struct Config {
#[structopt(subcommand)] #[structopt(subcommand)]
mode: PingPongMode, mode: PingPongMode,
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
@ -88,12 +89,12 @@ fn report_latencies(rtt_ns_samples: &Vec<u64>) {
} }
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
let config = Config::from_args(); let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?; let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split(); let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::new(None).boot(syndicate::name!("pingpong"), |t| { Actor::top(None, |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| { relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| {
let (send_label, recv_label, report_latency_every, should_echo, bytes_padding) = let (send_label, recv_label, report_latency_every, should_echo, bytes_padding) =
@ -110,22 +111,18 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut event_counter: u64 = 0; let mut event_counter: u64 = 0;
let mut rtt_ns_samples: Vec<u64> = vec![0; report_latency_every]; let mut rtt_ns_samples: Vec<u64> = vec![0; report_latency_every];
let mut rtt_batch_count: usize = 0; let mut rtt_batch_count: usize = 0;
let mut current_reply = None; let current_reply = Arc::new(Mutex::new(None));
let self_ref = t.create_inert(); Cap::new(&t.create(
self_ref.become_entity( syndicate::entity(())
syndicate::entity(Arc::clone(&self_ref)) .on_message(move |(), t, m: AnyValue| {
.on_message(move |self_ref, t, m: AnyValue| {
match m.value().as_boolean() { match m.value().as_boolean() {
Some(true) => { Some(_) => {
tracing::info!("{:?} turns, {:?} events in the last second", tracing::info!("{:?} turns, {:?} events in the last second",
turn_counter, turn_counter,
event_counter); event_counter);
turn_counter = 0; turn_counter = 0;
event_counter = 0; event_counter = 0;
} }
Some(false) => {
current_reply = None;
}
None => { None => {
event_counter += 1; event_counter += 1;
let bindings = m.value().to_sequence()?; let bindings = m.value().to_sequence()?;
@ -137,9 +134,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
timestamp.clone(), timestamp.clone(),
padding.clone())); padding.clone()));
} else { } else {
if let None = current_reply { let mut g = current_reply.lock().expect("unpoisoned");
if let None = *g {
turn_counter += 1; turn_counter += 1;
t.message_for_myself(&self_ref, AnyValue::new(false)); t.pre_commit(enclose!((current_reply) move |_| {
*current_reply.lock().expect("unpoisoned") = None;
Ok(())
}));
let rtt_ns = now() - timestamp.value().to_u64()?; let rtt_ns = now() - timestamp.value().to_u64()?;
rtt_ns_samples[rtt_batch_count] = rtt_ns; rtt_ns_samples[rtt_batch_count] = rtt_ns;
rtt_batch_count += 1; rtt_batch_count += 1;
@ -150,18 +151,16 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
rtt_batch_count = 0; rtt_batch_count = 0;
} }
current_reply = Some( *g = Some(simple_record2(&send_label,
simple_record2(&send_label, Value::from(now()).wrap(),
Value::from(now()).wrap(), padding.clone()));
padding.clone()));
} }
ds.message(t, &(), current_reply.as_ref().expect("some reply")); ds.message(t, &(), g.as_ref().expect("some reply"));
} }
} }
} }
Ok(()) Ok(())
})); })))
Cap::new(&self_ref)
}; };
ds.assert(t, language(), &Observe { ds.assert(t, language(), &Observe {
@ -172,46 +171,35 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
observer: Arc::clone(&consumer), observer: Arc::clone(&consumer),
}); });
t.linked_task(syndicate::name!("tick"), async move { t.every(Duration::from_secs(1), move |t| {
let mut stats_timer = interval(Duration::from_secs(1)); consumer.message(t, &(), &AnyValue::new(true));
loop { Ok(())
stats_timer.tick().await; })?;
let consumer = Arc::clone(&consumer);
external_event(&Arc::clone(&consumer.underlying.mailbox),
&Account::new(syndicate::name!("account")),
Box::new(move |t| t.with_entity(
&consumer.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
});
if let PingPongMode::Ping(c) = &config.mode { if let PingPongMode::Ping(c) = &config.mode {
let facet = t.facet_ref();
let turn_count = c.turn_count; let turn_count = c.turn_count;
let action_count = c.action_count; let action_count = c.action_count;
let account = Arc::clone(t.account()); let account = Arc::clone(t.account());
t.linked_task(syndicate::name!("boot-ping"), async move { t.linked_task(Some(AnyValue::symbol("boot-ping")), async move {
let padding = AnyValue::bytestring(vec![0; bytes_padding]); let padding = AnyValue::bytestring(vec![0; bytes_padding]);
for _ in 0..turn_count { for _ in 0..turn_count {
let mut events: PendingEventQueue = vec![];
let current_rec = simple_record2(send_label, let current_rec = simple_record2(send_label,
Value::from(now()).wrap(), Value::from(now()).wrap(),
padding.clone()); padding.clone());
for _ in 0..action_count { facet.activate(&account, None, |t| {
let ds = Arc::clone(&ds); for _ in 0..action_count {
let current_rec = current_rec.clone(); ds.message(t, &(), &current_rec);
events.push(Box::new(move |t| t.with_entity( }
&ds.underlying, Ok(())
|t, e| e.message(t, current_rec)))); });
}
external_events(&ds.underlying.mailbox, &account, events)?
} }
Ok(LinkedTaskTermination::KeepFacet) Ok(LinkedTaskTermination::KeepFacet)
}); });
} }
Ok(None) Ok(None)
}); })
Ok(())
}).await??; }).await??;
Ok(()) Ok(())
} }

View File

@ -1,10 +1,10 @@
use structopt::StructOpt; use structopt::StructOpt;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose; use syndicate::preserves::rec;
use syndicate::relay; use syndicate::relay;
use syndicate::sturdy; use syndicate::sturdy;
use syndicate::value::Value; use syndicate::value::NestedValue;
use tokio::net::TcpStream; use tokio::net::TcpStream;
@ -16,44 +16,37 @@ pub struct Config {
#[structopt(short = "b", default_value = "0")] #[structopt(short = "b", default_value = "0")]
bytes_padding: usize, bytes_padding: usize,
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
#[inline]
fn says(who: AnyValue, what: AnyValue) -> AnyValue {
let mut r = Value::simple_record("Says", 2);
r.fields_vec_mut().push(who);
r.fields_vec_mut().push(what);
r.finish().wrap()
}
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
let config = Config::from_args(); let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?; let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split(); let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::new(None).boot(syndicate::name!("producer"), |t| { Actor::top(None, |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| { relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| {
let padding: AnyValue = Value::ByteString(vec![0; config.bytes_padding]).wrap(); let facet = t.facet_ref();
let padding = AnyValue::new(&vec![0u8; config.bytes_padding][..]);
let action_count = config.action_count; let action_count = config.action_count;
let account = Account::new(syndicate::name!("account")); let account = Account::new(None, None);
t.linked_task(syndicate::name!("sender"), async move { t.linked_task(Some(AnyValue::symbol("sender")), async move {
loop { loop {
account.ensure_clear_funds().await; account.ensure_clear_funds().await;
let mut events: PendingEventQueue = Vec::new(); facet.activate(&account, None, |t| {
for _ in 0..action_count { for _ in 0..action_count {
events.push(Box::new(enclose!((ds, padding) move |t| t.with_entity( ds.message(t, &(), &rec![AnyValue::symbol("Says"),
&ds.underlying, |t, e| e.message( AnyValue::new("producer"),
t, says(Value::from("producer").wrap(), padding)))))); padding.clone()]);
} }
external_events(&ds.underlying.mailbox, &account, events)?; Ok(())
});
} }
}); });
Ok(None) Ok(None)
}); })
Ok(())
}).await??; }).await??;
Ok(()) Ok(())
} }

View File

@ -12,21 +12,20 @@ use syndicate::value::NestedValue;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use core::time::Duration; use core::time::Duration;
use tokio::time::interval;
#[derive(Clone, Debug, StructOpt)] #[derive(Clone, Debug, StructOpt)]
pub struct Config { pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
let config = Config::from_args(); let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?; let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split(); let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::new(None).boot(syndicate::name!("state-consumer"), |t| { Actor::top(None, |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), |_state, t, ds| { relay::connect_stream(t, i, o, false, sturdyref, (), |_state, t, ds| {
let consumer = { let consumer = {
#[derive(Default)] #[derive(Default)]
@ -65,21 +64,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
observer: Arc::clone(&consumer), observer: Arc::clone(&consumer),
}); });
t.linked_task(syndicate::name!("tick"), async move { t.every(Duration::from_secs(1), move |t| {
let mut stats_timer = interval(Duration::from_secs(1)); consumer.message(t, &(), &AnyValue::new(true));
loop { Ok(())
stats_timer.tick().await; })?;
let consumer = Arc::clone(&consumer);
external_event(&Arc::clone(&consumer.underlying.mailbox),
&Account::new(syndicate::name!("account")),
Box::new(move |t| t.with_entity(
&consumer.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
});
Ok(None) Ok(None)
}); })
Ok(())
}).await??; }).await??;
Ok(()) Ok(())
} }

View File

@ -1,57 +1,48 @@
use std::sync::Arc;
use structopt::StructOpt; use structopt::StructOpt;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose; use syndicate::preserves::rec;
use syndicate::relay; use syndicate::relay;
use syndicate::sturdy; use syndicate::sturdy;
use syndicate::value::Value; use syndicate::value::NestedValue;
use tokio::net::TcpStream; use tokio::net::TcpStream;
#[derive(Clone, Debug, StructOpt)] #[derive(Clone, Debug, StructOpt)]
pub struct Config { pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")] #[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
dataspace: String, dataspace: String,
} }
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
let config = Config::from_args(); let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?; let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split(); let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::new(None).boot(syndicate::name!("state-producer"), |t| { Actor::top(None, |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| { relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| {
let account = Account::new(syndicate::name!("account")); let facet = t.facet_ref();
t.linked_task(syndicate::name!("sender"), async move { let account = Account::new(None, None);
let presence: AnyValue = Value::simple_record1( t.linked_task(Some(AnyValue::symbol("sender")), async move {
"Present", let presence = rec![AnyValue::symbol("Present"), AnyValue::new(std::process::id())];
Value::from(std::process::id()).wrap()).wrap();
let handle = syndicate::actor::next_handle();
let assert_e = || {
external_event(
&Arc::clone(&ds.underlying.mailbox), &account, Box::new(enclose!(
(ds, presence, handle) move |t| t.with_entity(
&ds.underlying, |t, e| e.assert(t, presence, handle)))))
};
let retract_e = || {
external_event(
&Arc::clone(&ds.underlying.mailbox), &account, Box::new(enclose!(
(ds, handle) move |t| t.with_entity(
&ds.underlying, |t, e| e.retract(t, handle)))))
};
assert_e()?;
loop { loop {
let mut handle = None;
facet.activate(&account, None, |t| {
handle = ds.assert(t, &(), &presence);
Ok(())
});
account.ensure_clear_funds().await; account.ensure_clear_funds().await;
retract_e()?; facet.activate(&account, None, |t| {
assert_e()?; if let Some(h) = handle {
t.retract(h);
}
Ok(())
});
} }
}); });
Ok(None) Ok(None)
}); })
Ok(())
}).await??; }).await??;
Ok(()) Ok(())
} }

View File

@ -1,10 +1,17 @@
´³bundle·µ³ documentation„´³schema·³version³ definitions·³Url´³orµµ±present´³dict·³url´³named³url´³atom³String„„„„„µ±invalid´³dict·³url´³named³url³any„„„„µ±absent´³dict·„„„„„³IOList´³orµµ±bytes´³atom³ ´³bundle·µ³control„´³schema·³version°³ definitions·³
ByteString„„µ±string´³atom³String„„µ±nested´³seqof´³refµ„³IOList„„„„„³Metadata´³rec´³lit³metadata„´³tupleµ´³named³object³any„´³named³info´³dictof´³atom³Symbol„³any„„„„„³ Description´³orµµ±present´³dict·³ description´³named³ description´³refµ„³IOList„„„„„µ±invalid´³dict·³ description´³named³ description³any„„„„µ±absent´³dict·„„„„„„³ embeddedType€„„µ³externalServices„´³schema·³version³ definitions·³Process´³orµµ±simple´³refµ„³ CommandLine„„µ±full´³refµ„³ FullProcess„„„„³Service´³refµ„³ DaemonService„³ClearEnv´³orµµ±present´³dict·³clearEnv´³named³clearEnv´³atom³Boolean„„„„„µ±invalid´³dict·³clearEnv´³named³clearEnv³any„„„„µ±absent´³dict·„„„„„³EnvValue´³orµµ±set´³atom³String„„µ±remove´³lit€„„µ±invalid³any„„„³Protocol´³orµµ±none´³lit³none„„µ±binarySyndicate´³lit³application/syndicate„„µ± textSyndicate´³lit³text/syndicate„„„„³ ExitServer´³rec´³lit³exit„´³tupleµ´³named³code´³atom³ SignedInteger„„„„„„³ embeddedType€„„µ³ documentation„´³schema·³version°³ definitions·³Url´³orµµ±present´³dict·³url´³named³url´³atom³String„„„„„µ±invalid´³dict·³url´³named³url³any„„„„µ±absent´³dict·„„„„„³IOList´³orµµ±bytes´³atom³
ByteString„„µ±string´³atom³String„„µ±nested´³seqof´³refµ„³IOList„„„„„³Metadata´³rec´³lit³metadata„´³tupleµ´³named³object³any„´³named³info´³dictof´³atom³Symbol„³any„„„„„³ Description´³orµµ±present´³dict·³ description´³named³ description´³refµ„³IOList„„„„„µ±invalid´³dict·³ description´³named³ description³any„„„„µ±absent´³dict·„„„„„„³ embeddedType€„„µ³externalServices„´³schema·³version°³ definitions·³Process´³orµµ±simple´³refµ„³ CommandLine„„µ±full´³refµ„³ FullProcess„„„„³Service´³refµ„³ DaemonService„³ClearEnv´³orµµ±present´³dict·³clearEnv´³named³clearEnv´³atom³Boolean„„„„„µ±invalid´³dict·³clearEnv´³named³clearEnv³any„„„„µ±absent´³dict·„„„„„³EnvValue´³orµµ±set´³atom³String„„µ±remove´³lit€„„µ±invalid³any„„„³Protocol´³orµµ±none´³lit³none„„µ±binarySyndicate´³lit³application/syndicate„„µ± textSyndicate´³lit³text/syndicate„„„„³
ProcessDir´³orµµ±present´³dict·³dir´³named³dir´³atom³String„„„„„µ±invalid´³dict·³dir´³named³dir³any„„„„µ±absent´³dict·„„„„„³ ProcessDir´³orµµ±present´³dict·³dir´³named³dir´³atom³String„„„„„µ±invalid´³dict·³dir´³named³dir³any„„„„µ±absent´³dict·„„„„„³
ProcessEnv´³orµµ±present´³dict·³env´³named³env´³dictof´³refµ„³ EnvVariable„´³refµ„³EnvValue„„„„„„µ±invalid´³dict·³env´³named³env³any„„„„µ±absent´³dict·„„„„„³ CommandLine´³orµµ±shell´³atom³String„„µ±full´³refµ„³FullCommandLine„„„„³ EnvVariable´³orµµ±string´³atom³String„„µ±symbol´³atom³Symbol„„µ±invalid³any„„„³ FullProcess´³andµ´³dict·³argv´³named³argv´³refµ„³ CommandLine„„„„´³named³env´³refµ„³ ProcessEnv´³orµµ±present´³dict·³env´³named³env´³dictof´³refµ„³ EnvVariable„´³refµ„³EnvValue„„„„„„µ±invalid´³dict·³env´³named³env³any„„„„µ±absent´³dict·„„„„„³ CommandLine´³orµµ±shell´³atom³String„„µ±full´³refµ„³FullCommandLine„„„„³ EnvVariable´³orµµ±string´³atom³String„„µ±symbol´³atom³Symbol„„µ±invalid³any„„„³ FullProcess´³andµ´³dict·³argv´³named³argv´³refµ„³ CommandLine„„„„´³named³env´³refµ„³
ProcessEnv„„´³named³dir´³refµ„³ ProcessEnv„„´³named³dir´³refµ„³
ProcessDir„„´³named³clearEnv´³refµ„³ClearEnv„„„„³ ReadyOnStart´³orµµ±present´³dict·³ readyOnStart´³named³ readyOnStart´³atom³Boolean„„„„„µ±invalid´³dict·³ readyOnStart´³named³ readyOnStart³any„„„„µ±absent´³dict·„„„„„³ RestartField´³orµµ±present´³dict·³restart´³named³restart´³refµ„³ RestartPolicy„„„„„µ±invalid´³dict·³restart´³named³restart³any„„„„µ±absent´³dict·„„„„„³ DaemonProcess´³rec´³lit³daemon„´³tupleµ´³named³id³any„´³named³config´³refµ„³DaemonProcessSpec„„„„„³ DaemonService´³rec´³lit³daemon„´³tupleµ´³named³id³any„„„„³ ProtocolField´³orµµ±present´³dict·³protocol´³named³protocol´³refµ„³Protocol„„„„„µ±invalid´³dict·³protocol´³named³protocol³any„„„„µ±absent´³dict·„„„„„³ RestartPolicy´³orµµ±always´³lit³always„„µ±onError´³lit³on-error„„µ±all´³lit³all„„„„³FullCommandLine´³ tuplePrefixµ´³named³program´³atom³String„„„´³named³args´³seqof´³atom³String„„„„³DaemonProcessSpec´³orµµ±simple´³refµ„³ CommandLine„„µ±oneShot´³rec´³lit³one-shot„´³tupleµ´³named³setup´³refµ„³ CommandLine„„„„„„µ±full´³refµ„³FullDaemonProcess„„„„³FullDaemonProcess´³andµ´³named³process´³refµ„³ FullProcess„„´³named³ readyOnStart´³refµ„³ ReadyOnStart„„´³named³restart´³refµ„³ RestartField„„´³named³protocol´³refµ„³ ProtocolField„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³internalServices„´³schema·³version³ definitions·³ ConfigEnv´³dictof´³atom³Symbol„³any„³ Milestone´³rec´³lit³ milestone„´³tupleµ´³named³name³any„„„„³ DebtReporter´³rec´³lit³ debt-reporter„´³tupleµ´³named³intervalSeconds´³atom³Double„„„„„³ ConfigWatcher´³rec´³lit³config-watcher„´³tupleµ´³named³path´³atom³String„„´³named³env´³refµ„³ ConfigEnv„„„„„³TcpRelayListener´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Tcp„„´³named³ ProcessDir„„´³named³clearEnv´³refµ„³ClearEnv„„„„³ ReadyOnStart´³orµµ±present´³dict·³ readyOnStart´³named³ readyOnStart´³atom³Boolean„„„„„µ±invalid´³dict·³ readyOnStart´³named³ readyOnStart³any„„„„µ±absent´³dict·„„„„„³ RestartField´³orµµ±present´³dict·³restart´³named³restart´³refµ„³ RestartPolicy„„„„„µ±invalid´³dict·³restart´³named³restart³any„„„„µ±absent´³dict·„„„„„³ DaemonProcess´³rec´³lit³daemon„´³tupleµ´³named³id³any„´³named³config´³refµ„³DaemonProcessSpec„„„„„³ DaemonService´³rec´³lit³daemon„´³tupleµ´³named³id³any„„„„³ ProtocolField´³orµµ±present´³dict·³protocol´³named³protocol´³refµ„³Protocol„„„„„µ±invalid´³dict·³protocol´³named³protocol³any„„„„µ±absent´³dict·„„„„„³ RestartPolicy´³orµµ±always´³lit³always„„µ±onError´³lit³on-error„„µ±all´³lit³all„„µ±never´³lit³never„„„„³FullCommandLine´³ tuplePrefixµ´³named³program´³atom³String„„„´³named³args´³seqof´³atom³String„„„„³DaemonProcessSpec´³orµµ±simple´³refµ„³ CommandLine„„µ±oneShot´³rec´³lit³one-shot„´³tupleµ´³named³setup´³refµ„³ CommandLine„„„„„„µ±full´³refµ„³FullDaemonProcess„„„„³FullDaemonProcess´³andµ´³named³process´³refµ„³ FullProcess„„´³named³ readyOnStart´³refµ„³ ReadyOnStart„„´³named³restart´³refµ„³ RestartField„„´³named³protocol´³refµ„³ ProtocolField„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³internalServices„´³schema·³version°³ definitions·³ ConfigEnv´³dictof´³atom³Symbol„³any„³
Gatekeeper´³rec´³lit³
gatekeeper„´³tupleµ´³named³ bindspace´³embedded´³refµ³
gatekeeper„³Bind„„„„„„³
HttpRouter´³rec´³lit³ http-router„´³tupleµ´³named³httpd´³embedded³any„„„„„³ TcpWithHttp´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Tcp„„´³named³
gatekeeper´³embedded´³refµ³ gatekeeper´³embedded´³refµ³
gatekeeper„³Resolve„„„„„„³UnixRelayListener´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Unix„„´³named³ gatekeeper„³Resolve„„„´³named³httpd´³embedded´³refµ³http„³ HttpContext„„„„„„³ DebtReporter´³rec´³lit³ debt-reporter„´³tupleµ´³named³intervalSeconds´³atom³Double„„„„„³ ConfigWatcher´³rec´³lit³config-watcher„´³tupleµ´³named³path´³atom³String„„´³named³env´³refµ„³ ConfigEnv„„„„„³TcpWithoutHttp´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Tcp„„´³named³
gatekeeper´³embedded´³refµ³ gatekeeper´³embedded´³refµ³
gatekeeper„³Resolve„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„ gatekeeper„³Resolve„„„„„„³TcpRelayListener´³orµµ±TcpWithoutHttp´³refµ„³TcpWithoutHttp„„µ± TcpWithHttp´³refµ„³ TcpWithHttp„„„„³UnixRelayListener´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Unix„„´³named³
gatekeeper´³embedded´³refµ³
gatekeeper„³Resolve„„„„„„³HttpStaticFileServer´³rec´³lit³http-static-files„´³tupleµ´³named³dir´³atom³String„„´³named³pathPrefixElements´³atom³ SignedInteger„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„

View File

@ -0,0 +1,12 @@
version 1 .
# Messages and assertions relating to the `$control` entity enabled in syndicate-server when
# the `--control` flag is supplied.
#
# For example, placing the following into `control-config.pr` and starting the server with
# `syndicate-server --control -c control-config.pr` will result in the server exiting with
# exit code 2:
#
# $control ! <exit 2>
ExitServer = <exit @code int> .

View File

@ -1,11 +1,11 @@
version 1 . version 1 .
; Assertion. Describes `object`. # Assertion. Describes `object`.
Metadata = <metadata @object any @info { symbol: any ...:... }> . Metadata = <metadata @object any @info { symbol: any ...:... }> .
; Projections of the `info` in a `Metadata` record. # Projections of the `info` in a `Metadata` record.
Description = @present { description: IOList } / @invalid { description: any } / @absent {} . Description = @present { description: IOList } / @invalid { description: any } / @absent {} .
Url = @present { url: string } / @invalid { url: any } / @absent {} . Url = @present { url: string } / @invalid { url: any } / @absent {} .
; Data type. From preserves' `conventions.md`. # Data type. From preserves' `conventions.md`.
IOList = @bytes bytes / @string string / @nested [IOList ...] . IOList = @bytes bytes / @string string / @nested [IOList ...] .

View File

@ -30,23 +30,26 @@ EnvVariable = @string string / @symbol symbol / @invalid any .
EnvValue = @set string / @remove #f / @invalid any . EnvValue = @set string / @remove #f / @invalid any .
RestartPolicy = RestartPolicy =
/ ; Whether the process terminates normally or abnormally, restart it / # Whether the process terminates normally or abnormally, restart it
; without affecting any peer processes within the service. # without affecting any peer processes within the service.
=always =always
/ ; If the process terminates normally, leave everything alone; if it / # If the process terminates normally, leave everything alone; if it
; terminates abnormally, restart it without affecting peers. # terminates abnormally, restart it without affecting peers.
@onError =on-error @onError =on-error
/ ; If the process terminates normally, leave everything alone; if it / # If the process terminates normally, leave everything alone; if it
; terminates abnormally, restart the whole daemon (all processes # terminates abnormally, restart the whole daemon (all processes
; within the daemon). # within the daemon).
=all =all
/ # Treat both normal and abnormal termination as normal termination; that is, never restart,
# and enter state "complete" even if the process fails.
=never
. .
Protocol = Protocol =
/ ; stdin is /dev/null, output and error are logged / # stdin is /dev/null, output and error are logged
=none =none
/ ; stdin and stdout are *binary* Syndicate-protocol channels / # stdin and stdout are *binary* Syndicate-protocol channels
@binarySyndicate =application/syndicate @binarySyndicate =application/syndicate
/ ; stdin and stdout are *text* Syndicate-protocol channels / # stdin and stdout are *text* Syndicate-protocol channels
@textSyndicate =text/syndicate @textSyndicate =text/syndicate
. .

View File

@ -1,11 +1,18 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
Gatekeeper = <gatekeeper @bindspace #:gatekeeper.Bind> .
DebtReporter = <debt-reporter @intervalSeconds double>. DebtReporter = <debt-reporter @intervalSeconds double>.
TcpRelayListener = <relay-listener @addr TransportAddress.Tcp @gatekeeper #!gatekeeper.Resolve> . TcpRelayListener = TcpWithoutHttp / TcpWithHttp .
UnixRelayListener = <relay-listener @addr TransportAddress.Unix @gatekeeper #!gatekeeper.Resolve> . TcpWithoutHttp = <relay-listener @addr TransportAddress.Tcp @gatekeeper #:gatekeeper.Resolve> .
TcpWithHttp = <relay-listener @addr TransportAddress.Tcp @gatekeeper #:gatekeeper.Resolve @httpd #:http.HttpContext> .
UnixRelayListener = <relay-listener @addr TransportAddress.Unix @gatekeeper #:gatekeeper.Resolve> .
ConfigWatcher = <config-watcher @path string @env ConfigEnv>. ConfigWatcher = <config-watcher @path string @env ConfigEnv>.
Milestone = <milestone @name any>.
ConfigEnv = { symbol: any ...:... }. ConfigEnv = { symbol: any ...:... }.
HttpRouter = <http-router @httpd #:any> .
HttpStaticFileServer = <http-static-files @dir string @pathPrefixElements int> .

View File

@ -3,23 +3,21 @@ use preserves_schema::Codec;
use std::sync::Arc; use std::sync::Arc;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::during::entity;
use syndicate::enclose; use syndicate::enclose;
use syndicate::schemas::dataspace::Observe; use syndicate::preserves::rec;
use syndicate::schemas::service; use syndicate::schemas::service;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
use crate::counter; use crate::counter;
use crate::language::language; use crate::language::language;
use crate::schemas::internal_services;
use syndicate_macros::during; use syndicate_macros::during;
pub fn boot(t: &mut Activation, ds: Arc<Cap>) { pub fn boot(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(syndicate::name!("dependencies"), move |t| { t.spawn(Some(AnyValue::symbol("dependencies_listener")), move |t| {
Ok(during!(t, ds, language(), <require-service $spec>, |t: &mut Activation| { Ok(during!(t, ds, language(), <require-service $spec>, |t: &mut Activation| {
tracing::debug!(?spec, "tracking dependencies"); tracing::debug!(?spec, "tracking dependencies");
t.spawn_link(syndicate::name!(parent: None, "dependencies", spec = ?spec), t.spawn_link(Some(rec![AnyValue::symbol("dependencies"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec))); enclose!((ds) |t| run(t, ds, spec)));
Ok(()) Ok(())
})) }))
@ -27,34 +25,9 @@ pub fn boot(t: &mut Activation, ds: Arc<Cap>) {
} }
fn run(t: &mut Activation, ds: Arc<Cap>, service_name: AnyValue) -> ActorResult { fn run(t: &mut Activation, ds: Arc<Cap>, service_name: AnyValue) -> ActorResult {
if !service_name.value().is_simple_record("milestone", Some(1)) {
let system_layer_dep = service::ServiceDependency {
depender: service_name.clone(),
dependee: service::ServiceState {
service_name: language().unparse(&internal_services::Milestone {
name: AnyValue::symbol("system-layer"),
}),
state: service::State::Ready,
},
};
let milestone_monitor = entity(ds.assert(t, language(), &system_layer_dep))
.on_asserted(enclose!((ds) move |handle, t, _captures: AnyValue| {
ds.update::<_, service::ServiceDependency>(t, handle, language(), None);
Ok(Some(Box::new(enclose!((ds, system_layer_dep) move |handle, t| {
ds.update(t, handle, language(), Some(&system_layer_dep));
Ok(())
}))))
}))
.create_cap(t);
ds.assert(t, language(), &Observe {
pattern: syndicate_macros::pattern!{<system-layer-service #(&service_name)>},
observer: milestone_monitor,
});
}
let obstacle_count = t.named_field("obstacle_count", 1isize); let obstacle_count = t.named_field("obstacle_count", 1isize);
t.dataflow(enclose!((obstacle_count) move |t| { t.dataflow(enclose!((service_name, obstacle_count) move |t| {
tracing::trace!(obstacle_count = ?t.get(&obstacle_count)); tracing::trace!(?service_name, obstacle_count = ?t.get(&obstacle_count));
Ok(()) Ok(())
}))?; }))?;
@ -73,24 +46,25 @@ fn run(t: &mut Activation, ds: Arc<Cap>, service_name: AnyValue) -> ActorResult
}) })
})?; })?;
let depender = service_name.clone();
enclose!((ds, obstacle_count) during!( enclose!((ds, obstacle_count) during!(
t, ds, language(), <depends-on #(&service_name) $dependee>, t, ds, language(), <depends-on #(&depender) $dependee>,
enclose!((ds, obstacle_count) move |t: &mut Activation| { enclose!((service_name, ds, obstacle_count) move |t: &mut Activation| {
if let Ok(dependee) = language().parse::<service::ServiceState>(&dependee) { if let Ok(dependee) = language().parse::<service::ServiceState>(&dependee) {
tracing::trace!(on = ?dependee, "new dependency"); tracing::trace!(?service_name, ?dependee, "new dependency");
ds.assert(t, language(), &service::RequireService { ds.assert(t, language(), &service::RequireService {
service_name: dependee.service_name, service_name: dependee.service_name,
}); });
} else { } else {
tracing::warn!(on = ?dependee, "cannot deduce dependee service name"); tracing::warn!(?service_name, ?dependee, "cannot deduce dependee service name");
} }
counter::adjust(t, &obstacle_count, 1); counter::adjust(t, &obstacle_count, 1);
let d = &dependee.clone(); let d = &dependee.clone();
during!(t, ds, language(), #d, enclose!( during!(t, ds, language(), #d, enclose!(
(obstacle_count, dependee) move |t: &mut Activation| { (service_name, obstacle_count, dependee) move |t: &mut Activation| {
tracing::trace!(on = ?dependee, "dependency satisfied"); tracing::trace!(?service_name, ?dependee, "dependency satisfied");
counter::adjust(t, &obstacle_count, -1); counter::adjust(t, &obstacle_count, -1);
Ok(()) Ok(())
})); }));

View File

@ -1,37 +1,179 @@
use noise_protocol::CipherState;
use noise_protocol::U8Array;
use noise_protocol::patterns::HandshakePattern;
use noise_rust_crypto::Blake2s;
use noise_rust_crypto::ChaCha20Poly1305;
use noise_rust_crypto::X25519;
use preserves_schema::Codec; use preserves_schema::Codec;
use syndicate::relay::Mutex;
use syndicate::relay::TunnelRelay;
use syndicate::trace::TurnCause;
use syndicate::value::NoEmbeddedDomainCodec;
use syndicate::value::packed::PackedWriter;
use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::during::DuringResult; use syndicate::enclose;
use syndicate::schemas::gatekeeper;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
use syndicate::schemas::dataspace;
use syndicate::schemas::gatekeeper;
use syndicate::schemas::noise;
use syndicate::schemas::sturdy;
use crate::language::language; use crate::language::language;
// pub fn bind( use syndicate_macros::during;
// t: &mut Activation, use syndicate_macros::pattern;
// ds: &Arc<Cap>,
// oid: syndicate::schemas::sturdy::_Any,
// key: [u8; 16],
// target: Arc<Cap>,
// ) {
// let sr = sturdy::SturdyRef::mint(oid.clone(), &key);
// tracing::info!(cap = ?language().unparse(&sr), hex = %sr.to_hex());
// ds.assert(t, language(), &gatekeeper::Bind { oid, key: key.to_vec(), target });
// }
pub fn handle_resolve( fn sturdy_step_type() -> String {
language().unparse(&sturdy::SturdyStepType).value().to_symbol().unwrap().clone()
}
fn noise_step_type() -> String {
language().unparse(&noise::NoiseStepType).value().to_symbol().unwrap().clone()
}
pub fn handle_binds(t: &mut Activation, ds: &Arc<Cap>) -> ActorResult {
during!(t, ds, language(), <bind <ref $desc> $target $observer>, |t: &mut Activation| {
t.spawn_link(None, move |t| {
target.value().to_embedded()?;
let observer = language().parse::<gatekeeper::BindObserver>(&observer)?;
let desc = language().parse::<sturdy::SturdyDescriptionDetail>(&desc)?;
let sr = sturdy::SturdyRef::mint(desc.oid, &desc.key);
if let gatekeeper::BindObserver::Present(o) = observer {
o.assert(t, language(), &gatekeeper::Bound::Bound {
path_step: Box::new(gatekeeper::PathStep {
step_type: sturdy_step_type(),
detail: language().unparse(&sr.parameters),
}),
});
}
Ok(())
});
Ok(())
});
during!(t, ds, language(), <bind <noise $desc> $target $observer>, |t: &mut Activation| {
t.spawn_link(None, move |t| {
target.value().to_embedded()?;
let observer = language().parse::<gatekeeper::BindObserver>(&observer)?;
let spec = language().parse::<noise::NoiseDescriptionDetail<AnyValue>>(&desc)?.0;
match validate_noise_spec(spec) {
Ok(spec) => if let gatekeeper::BindObserver::Present(o) = observer {
o.assert(t, language(), &gatekeeper::Bound::Bound {
path_step: Box::new(gatekeeper::PathStep {
step_type: noise_step_type(),
detail: language().unparse(&noise::NoisePathStepDetail(noise::NoiseSpec {
key: spec.public_key,
service: noise::ServiceSelector(spec.service),
protocol: if spec.protocol == default_noise_protocol() {
noise::NoiseProtocol::Absent
} else {
noise::NoiseProtocol::Present {
protocol: spec.protocol,
}
},
pre_shared_keys: if spec.psks.is_empty() {
noise::NoisePreSharedKeys::Absent
} else {
noise::NoisePreSharedKeys::Present {
pre_shared_keys: spec.psks,
}
},
})),
}),
});
},
Err(e) => {
if let gatekeeper::BindObserver::Present(o) = observer {
o.assert(t, language(), &gatekeeper::Bound::Rejected(
Box::new(gatekeeper::Rejected {
detail: AnyValue::new(format!("{}", &e)),
})));
}
tracing::error!("Invalid noise bind description: {}", e);
}
}
Ok(())
});
Ok(())
});
Ok(())
}
pub fn facet_handle_resolve(
ds: &mut Arc<Cap>, ds: &mut Arc<Cap>,
t: &mut Activation, t: &mut Activation,
a: gatekeeper::Resolve, a: gatekeeper::Resolve,
) -> DuringResult<Arc<Cap>> { ) -> ActorResult {
use syndicate::schemas::dataspace; let mut detail: &'static str = "unsupported";
let gatekeeper::Resolve { sturdyref, observer } = a; if a.step.step_type == sturdy_step_type() {
let queried_oid = sturdyref.oid.clone(); detail = "invalid";
if let Ok(s) = language().parse::<sturdy::SturdyStepDetail>(&a.step.detail) {
t.facet(|t| {
let f = handle_direct_resolution(ds, t, a.clone())?;
await_bind_sturdyref(ds, t, sturdy::SturdyRef { parameters: s.0 }, a.observer, f)
})?;
return Ok(());
}
}
if a.step.step_type == noise_step_type() {
detail = "invalid";
if let Ok(s) = language().parse::<noise::NoiseStepDetail<AnyValue>>(&a.step.detail) {
t.facet(|t| {
let f = handle_direct_resolution(ds, t, a.clone())?;
await_bind_noise(ds, t, s.0.0, a.observer, f)
})?;
return Ok(());
}
}
a.observer.assert(t, language(), &gatekeeper::Rejected {
detail: AnyValue::symbol(detail),
});
Ok(())
}
fn handle_direct_resolution(
ds: &mut Arc<Cap>,
t: &mut Activation,
a: gatekeeper::Resolve,
) -> Result<FacetId, ActorError> {
let outer_facet = t.facet_id();
t.facet(move |t| {
let handler = syndicate::entity(a.observer)
.on_asserted(move |observer, t, a: AnyValue| {
t.stop_facet_and_continue(outer_facet, Some(
enclose!((observer, a) move |t: &mut Activation| {
observer.assert(t, language(), &a);
Ok(())
})))?;
Ok(None)
})
.create_cap(t);
ds.assert(t, language(), &gatekeeper::Resolve {
step: a.step.clone(),
observer: handler,
});
Ok(())
})
}
fn await_bind_sturdyref(
ds: &mut Arc<Cap>,
t: &mut Activation,
sturdyref: sturdy::SturdyRef,
observer: Arc<Cap>,
direct_resolution_facet: FacetId,
) -> ActorResult {
let queried_oid = sturdyref.parameters.oid.clone();
let handler = syndicate::entity(observer) let handler = syndicate::entity(observer)
.on_asserted(move |observer, t, a: AnyValue| { .on_asserted(move |observer, t, a: AnyValue| {
t.stop_facet(direct_resolution_facet);
let bindings = a.value().to_sequence()?; let bindings = a.value().to_sequence()?;
let key = bindings[0].value().to_bytestring()?; let key = bindings[0].value().to_bytestring()?;
let unattenuated_target = bindings[1].value().to_embedded()?; let unattenuated_target = bindings[1].value().to_embedded()?;
@ -39,28 +181,320 @@ pub fn handle_resolve(
Err(e) => { Err(e) => {
tracing::warn!(sturdyref = ?language().unparse(&sturdyref), tracing::warn!(sturdyref = ?language().unparse(&sturdyref),
"sturdyref failed validation: {}", e); "sturdyref failed validation: {}", e);
Ok(None) observer.assert(t, language(), &gatekeeper::Resolved::Rejected(
Box::new(gatekeeper::Rejected {
detail: AnyValue::symbol("sturdyref-failed-validation"),
})));
}, },
Ok(target) => { Ok(target) => {
tracing::trace!(sturdyref = ?language().unparse(&sturdyref), tracing::trace!(sturdyref = ?language().unparse(&sturdyref),
?target, ?target,
"sturdyref resolved"); "sturdyref resolved");
if let Some(h) = observer.assert(t, &(), &AnyValue::domain(target)) { observer.assert(t, language(), &gatekeeper::Resolved::Accepted {
Ok(Some(Box::new(move |_observer, t| Ok(t.retract(h))))) responder_session: target,
} else { });
Ok(None)
}
} }
} }
Ok(None)
}) })
.create_cap(t); .create_cap(t);
if let Some(oh) = ds.assert(t, language(), &dataspace::Observe { ds.assert(t, language(), &dataspace::Observe {
// TODO: codegen plugin to generate pattern constructors // TODO: codegen plugin to generate pattern constructors
pattern: syndicate_macros::pattern!{<bind #(&queried_oid) $ $>}, pattern: pattern!{<bind <ref { oid: #(&queried_oid), key: $ }> $ _>},
observer: handler, observer: handler,
}) { });
Ok(Some(Box::new(move |_ds, t| Ok(t.retract(oh))))) Ok(())
} else { }
Ok(None)
struct ValidatedNoiseSpec {
service: AnyValue,
protocol: String,
pattern: HandshakePattern,
psks: Vec<Vec<u8>>,
secret_key: Option<Vec<u8>>,
public_key: Vec<u8>,
}
fn default_noise_protocol() -> String {
language().unparse(&noise::DefaultProtocol).value().to_string().unwrap().clone()
}
fn validate_noise_spec(
spec: noise::NoiseServiceSpec<AnyValue>,
) -> Result<ValidatedNoiseSpec, ActorError> {
let protocol = match spec.base.protocol {
noise::NoiseProtocol::Present { protocol } => protocol,
noise::NoiseProtocol::Invalid { protocol } =>
Err(format!("Invalid noise protocol {:?}", protocol))?,
noise::NoiseProtocol::Absent => default_noise_protocol(),
};
const PREFIX: &'static str = "Noise_";
const SUFFIX: &'static str = "_25519_ChaChaPoly_BLAKE2s";
if !protocol.starts_with(PREFIX) || !protocol.ends_with(SUFFIX) {
Err(format!("Unsupported protocol {:?}", protocol))?;
}
let pattern_name = &protocol[PREFIX.len()..(protocol.len()-SUFFIX.len())];
let pattern = lookup_pattern(pattern_name).ok_or_else::<ActorError, _>(
|| format!("Unsupported handshake pattern {:?}", pattern_name).into())?;
let psks = match spec.base.pre_shared_keys {
noise::NoisePreSharedKeys::Present { pre_shared_keys } => pre_shared_keys,
noise::NoisePreSharedKeys::Invalid { pre_shared_keys } =>
Err(format!("Invalid pre-shared-keys {:?}", pre_shared_keys))?,
noise::NoisePreSharedKeys::Absent => vec![],
};
let secret_key = match spec.secret_key {
noise::SecretKeyField::Present { secret_key } => Some(secret_key),
noise::SecretKeyField::Invalid { secret_key } =>
Err(format!("Invalid secret key {:?}", secret_key))?,
noise::SecretKeyField::Absent => None,
};
Ok(ValidatedNoiseSpec {
service: spec.base.service.0,
protocol,
pattern,
psks,
secret_key,
public_key: spec.base.key,
})
}
fn await_bind_noise(
ds: &mut Arc<Cap>,
t: &mut Activation,
service_selector: AnyValue,
observer: Arc<Cap>,
direct_resolution_facet: FacetId,
) -> ActorResult {
let handler = syndicate::entity(())
.on_asserted_facet(move |_state, t, a: AnyValue| {
t.stop_facet(direct_resolution_facet);
let observer = Arc::clone(&observer);
t.spawn_link(None, move |t| {
let bindings = a.value().to_sequence()?;
let spec = validate_noise_spec(language().parse(&bindings[0])?)?;
let service = bindings[1].value().to_embedded()?;
run_noise_responder(t, spec, observer, Arc::clone(service))
});
Ok(())
})
.create_cap(t);
ds.assert(t, language(), &dataspace::Observe {
// TODO: codegen plugin to generate pattern constructors
pattern: pattern!{
<bind <noise $spec:NoiseServiceSpec{ { service: #(&service_selector) } }> $service _>
},
observer: handler,
});
Ok(())
}
type HandshakeState = noise_protocol::HandshakeState<X25519, ChaCha20Poly1305, Blake2s>;
enum ResponderState {
Invalid, // used during state transitions
Introduction {
service: Arc<Cap>,
hs: HandshakeState,
},
Handshake {
initiator_session: Arc<Cap>,
service: Arc<Cap>,
hs: HandshakeState,
},
Transport {
relay_input: Arc<Mutex<Option<TunnelRelay>>>,
c_recv: CipherState<ChaCha20Poly1305>,
},
}
impl Entity<noise::SessionItem> for ResponderState {
fn assert(&mut self, _t: &mut Activation, item: noise::SessionItem, _handle: Handle) -> ActorResult {
let initiator_session = match item {
noise::SessionItem::Initiator(i_box) => i_box.initiator_session,
noise::SessionItem::Packet(_) => Err("Unexpected Packet assertion")?,
};
match std::mem::replace(self, ResponderState::Invalid) {
ResponderState::Introduction { service, hs } => {
*self = ResponderState::Handshake { initiator_session, service, hs };
Ok(())
}
_ =>
Err("Received second Initiator")?,
}
}
fn message(&mut self, t: &mut Activation, item: noise::SessionItem) -> ActorResult {
let p = match item {
noise::SessionItem::Initiator(_) => Err("Unexpected Initiator message")?,
noise::SessionItem::Packet(p_box) => *p_box,
};
match self {
ResponderState::Invalid | ResponderState::Introduction { .. } =>
Err("Received Packet in invalid ResponderState")?,
ResponderState::Handshake { initiator_session, service, hs } => match p {
noise::Packet::Complete(bs) => {
if bs.len() < hs.get_next_message_overhead() {
Err("Invalid handshake message for pattern")?;
}
if bs.len() > hs.get_next_message_overhead() {
Err("Cannot accept payload during handshake")?;
}
hs.read_message(&bs, &mut [])?;
let mut reply = vec![0u8; hs.get_next_message_overhead()];
hs.write_message(&[], &mut reply[..])?;
initiator_session.message(t, language(), &noise::Packet::Complete(reply.into()));
if hs.completed() {
let (c_recv, mut c_send) = hs.get_ciphers();
let (_, relay_input, mut relay_output) =
TunnelRelay::_run(t, Some(Arc::clone(service)), None, false);
let trace_collector = t.trace_collector();
let initiator_session = Arc::clone(initiator_session);
let relay_output_name = Some(AnyValue::symbol("relay_output"));
let transport_facet = t.facet_ref();
t.linked_task(relay_output_name.clone(), async move {
let account = Account::new(relay_output_name, trace_collector);
let cause = TurnCause::external("relay_output");
loop {
match relay_output.recv().await {
None => return Ok(LinkedTaskTermination::KeepFacet),
Some(loaned_item) => {
const MAXSIZE: usize = 65535 - 16; /* Noise tag length is 16 */
let p = if loaned_item.item.len() > MAXSIZE {
noise::Packet::Fragmented(
loaned_item.item
.chunks(MAXSIZE)
.map(|c| c_send.encrypt_vec(c))
.collect())
} else {
noise::Packet::Complete(c_send.encrypt_vec(&loaned_item.item))
};
if !transport_facet.activate(&account, Some(cause.clone()), |t| {
initiator_session.message(t, language(), &p);
Ok(())
}) {
break;
}
}
}
}
Ok(LinkedTaskTermination::Normal)
});
*self = ResponderState::Transport { relay_input, c_recv };
}
}
_ => Err("Fragmented handshake is not allowed")?,
},
ResponderState::Transport { relay_input, c_recv } => {
let bs = match p {
noise::Packet::Complete(bs) =>
c_recv.decrypt_vec(&bs[..]).map_err(|_| "Cannot decrypt packet")?,
noise::Packet::Fragmented(pieces) => {
let mut result = Vec::with_capacity(1024);
for piece in pieces {
result.extend(c_recv.decrypt_vec(&piece[..])
.map_err(|_| "Cannot decrypt packet fragment")?);
}
result
}
};
let mut g = relay_input.lock();
let tr = g.as_mut().expect("initialized");
tr.handle_inbound_datagram(t, &bs[..])?;
}
}
Ok(())
} }
} }
fn lookup_pattern(name: &str) -> Option<HandshakePattern> {
use noise_protocol::patterns::*;
Some(match name {
"N" => noise_n(),
"K" => noise_k(),
"X" => noise_x(),
"NN" => noise_nn(),
"NK" => noise_nk(),
"NX" => noise_nx(),
"XN" => noise_xn(),
"XK" => noise_xk(),
"XX" => noise_xx(),
"KN" => noise_kn(),
"KK" => noise_kk(),
"KX" => noise_kx(),
"IN" => noise_in(),
"IK" => noise_ik(),
"IX" => noise_ix(),
"Npsk0" => noise_n_psk0(),
"Kpsk0" => noise_k_psk0(),
"Xpsk1" => noise_x_psk1(),
"NNpsk0" => noise_nn_psk0(),
"NNpsk2" => noise_nn_psk2(),
"NKpsk0" => noise_nk_psk0(),
"NKpsk2" => noise_nk_psk2(),
"NXpsk2" => noise_nx_psk2(),
"XNpsk3" => noise_xn_psk3(),
"XKpsk3" => noise_xk_psk3(),
"XXpsk3" => noise_xx_psk3(),
"KNpsk0" => noise_kn_psk0(),
"KNpsk2" => noise_kn_psk2(),
"KKpsk0" => noise_kk_psk0(),
"KKpsk2" => noise_kk_psk2(),
"KXpsk2" => noise_kx_psk2(),
"INpsk1" => noise_in_psk1(),
"INpsk2" => noise_in_psk2(),
"IKpsk1" => noise_ik_psk1(),
"IKpsk2" => noise_ik_psk2(),
"IXpsk2" => noise_ix_psk2(),
"NNpsk0+psk2" => noise_nn_psk0_psk2(),
"NXpsk0+psk1+psk2" => noise_nx_psk0_psk1_psk2(),
"XNpsk1+psk3" => noise_xn_psk1_psk3(),
"XKpsk0+psk3" => noise_xk_psk0_psk3(),
"KNpsk1+psk2" => noise_kn_psk1_psk2(),
"KKpsk0+psk2" => noise_kk_psk0_psk2(),
"INpsk1+psk2" => noise_in_psk1_psk2(),
"IKpsk0+psk2" => noise_ik_psk0_psk2(),
"IXpsk0+psk2" => noise_ix_psk0_psk2(),
"XXpsk0+psk1" => noise_xx_psk0_psk1(),
"XXpsk0+psk2" => noise_xx_psk0_psk2(),
"XXpsk0+psk3" => noise_xx_psk0_psk3(),
"XXpsk0+psk1+psk2+psk3" => noise_xx_psk0_psk1_psk2_psk3(),
_ => return None,
})
}
fn run_noise_responder(
t: &mut Activation,
spec: ValidatedNoiseSpec,
observer: Arc<Cap>,
service: Arc<Cap>,
) -> ActorResult {
let hs = {
let mut builder = noise_protocol::HandshakeStateBuilder::new();
builder.set_pattern(spec.pattern);
builder.set_is_initiator(false);
let prologue = PackedWriter::encode(&mut NoEmbeddedDomainCodec, &spec.service)?;
builder.set_prologue(&prologue);
match spec.secret_key {
None => (),
Some(sk) => {
let sk: [u8; 32] = sk.try_into().map_err(|_| "Bad secret key length")?;
builder.set_s(U8Array::from_slice(&sk));
},
}
let mut hs = builder.build_handshake_state();
for psk in spec.psks.into_iter() {
hs.push_psk(&psk);
}
hs
};
let responder_session =
Cap::guard(crate::Language::arc(), t.create(ResponderState::Introduction{ service, hs }));
observer.assert(t, language(), &gatekeeper::Resolved::Accepted { responder_session });
Ok(())
}

View File

@ -0,0 +1,195 @@
use std::convert::TryInto;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use hyper::{Request, Response, Body, StatusCode};
use hyper::body;
use hyper::header::HeaderName;
use hyper::header::HeaderValue;
use syndicate::actor::*;
use syndicate::error::Error;
use syndicate::trace;
use syndicate::value::Map;
use syndicate::value::NestedValue;
use syndicate::schemas::http;
use tokio::sync::oneshot;
use tokio::sync::mpsc::{UnboundedSender, unbounded_channel};
use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::language;
static NEXT_SEQ: AtomicU64 = AtomicU64::new(0);
pub fn empty_response(code: StatusCode) -> Response<Body> {
let mut r = Response::new(Body::empty());
*r.status_mut() = code;
r
}
type ChunkItem = Result<body::Bytes, Box<dyn std::error::Error + Send + Sync>>;
struct ResponseCollector {
tx_res: Option<(oneshot::Sender<Response<Body>>, Response<Body>)>,
body_tx: Option<UnboundedSender<ChunkItem>>,
}
impl ResponseCollector {
fn new(tx: oneshot::Sender<Response<Body>>) -> Self {
let (body_tx, body_rx) = unbounded_channel();
let body_stream: Box<dyn futures::Stream<Item = ChunkItem> + Send> =
Box::new(UnboundedReceiverStream::new(body_rx));
let mut res = Response::new(body_stream.into());
*res.status_mut() = StatusCode::OK;
ResponseCollector {
tx_res: Some((tx, res)),
body_tx: Some(body_tx),
}
}
fn with_res<F: FnOnce(&mut Response<Body>) -> ActorResult>(&mut self, f: F) -> ActorResult {
if let Some((_, res)) = &mut self.tx_res {
f(res)?;
}
Ok(())
}
fn deliver_res(&mut self) {
if let Some((tx, res)) = std::mem::replace(&mut self.tx_res, None) {
let _ = tx.send(res);
}
}
fn add_chunk(&mut self, value: http::Chunk) -> ActorResult {
self.deliver_res();
if let Some(body_tx) = self.body_tx.as_mut() {
body_tx.send(Ok(match value {
http::Chunk::Bytes(bs) => bs.into(),
http::Chunk::String(s) => s.as_bytes().to_vec().into(),
}))?;
}
Ok(())
}
fn finish(&mut self, t: &mut Activation) -> ActorResult {
self.deliver_res();
self.body_tx = None;
t.stop();
Ok(())
}
}
impl Entity<http::HttpResponse> for ResponseCollector {
fn message(&mut self, t: &mut Activation, message: http::HttpResponse) -> ActorResult {
match message {
http::HttpResponse::Status { code, .. } => self.with_res(|r| {
*r.status_mut() = StatusCode::from_u16(
(&code).try_into().map_err(|_| "bad status code")?)?;
Ok(())
}),
http::HttpResponse::Header { name, value } => self.with_res(|r| {
r.headers_mut().insert(HeaderName::from_bytes(name.as_bytes())?,
HeaderValue::from_str(value.as_str())?);
Ok(())
}),
http::HttpResponse::Chunk { chunk } => {
self.add_chunk(*chunk)
}
http::HttpResponse::Done { chunk } => {
self.add_chunk(*chunk)?;
self.finish(t)
}
}
}
}
pub async fn serve(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef,
httpd: Arc<Cap>,
mut req: Request<Body>,
port: u16,
) -> Result<Response<Body>, Error> {
let host = match req.headers().get("host").and_then(|v| v.to_str().ok()) {
None => http::RequestHost::Absent,
Some(h) => http::RequestHost::Present(match h.rsplit_once(':') {
None => h.to_string(),
Some((h, _port)) => h.to_string(),
})
};
let uri = req.uri();
let mut path: Vec<String> = uri.path().split('/').map(|s| s.to_string()).collect();
path.remove(0);
let mut query: Map<String, Vec<http::QueryValue>> = Map::new();
for piece in uri.query().unwrap_or("").split('&').into_iter() {
match piece.split_once('=') {
Some((k, v)) => {
let key = k.to_string();
let value = v.to_string();
match query.get_mut(&key) {
None => { query.insert(key, vec![http::QueryValue::String(value)]); },
Some(vs) => { vs.push(http::QueryValue::String(value)); },
}
}
None => {
if piece.len() > 0 {
let key = piece.to_string();
if !query.contains_key(&key) {
query.insert(key, vec![]);
}
}
}
}
}
let mut headers: Map<String, String> = Map::new();
for h in req.headers().into_iter() {
match h.1.to_str() {
Ok(v) => { headers.insert(h.0.as_str().to_string().to_lowercase(), v.to_string()); },
Err(_) => return Ok(empty_response(StatusCode::BAD_REQUEST)),
}
}
let body = match body::to_bytes(req.body_mut()).await {
Ok(b) => http::RequestBody::Present(b.to_vec()),
Err(_) => return Ok(empty_response(StatusCode::BAD_REQUEST)),
};
let account = Account::new(Some(AnyValue::symbol("http")), trace_collector);
let (tx, rx) = oneshot::channel();
facet.activate(&account, Some(trace::TurnCause::external("http")), |t| {
t.facet(move |t| {
let sreq = http::HttpRequest {
sequence_number: NEXT_SEQ.fetch_add(1, Ordering::Relaxed).into(),
host,
port: port.into(),
method: req.method().to_string().to_lowercase(),
path,
headers: http::Headers(headers),
query,
body,
};
tracing::debug!(?sreq);
let srep = Cap::guard(&language().syndicate, t.create(ResponseCollector::new(tx)));
httpd.assert(t, language(), &http::HttpContext { req: sreq, res: srep });
Ok(())
})?;
Ok(())
});
let response_result = rx.await;
match response_result {
Ok(response) => Ok(response),
Err(_ /* sender dropped */) => Ok(empty_response(StatusCode::INTERNAL_SERVER_ERROR)),
}
}

View File

@ -1,5 +1,7 @@
use preserves_schema::Codec; use preserves_schema::Codec;
use std::convert::TryInto;
use std::io;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
@ -11,6 +13,7 @@ use syndicate::enclose;
use syndicate::relay; use syndicate::relay;
use syndicate::schemas::service; use syndicate::schemas::service;
use syndicate::schemas::transport_address; use syndicate::schemas::transport_address;
use syndicate::trace;
use syndicate::value::Map; use syndicate::value::Map;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
@ -18,16 +21,22 @@ use syndicate::value::NestedValue;
mod counter; mod counter;
mod dependencies; mod dependencies;
mod gatekeeper; mod gatekeeper;
mod http;
mod language; mod language;
mod lifecycle; mod lifecycle;
mod protocol; mod protocol;
mod script; mod script;
mod services; mod services;
#[cfg(feature = "jemalloc")]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
mod schemas { mod schemas {
include!(concat!(env!("OUT_DIR"), "/src/schemas/mod.rs")); include!(concat!(env!("OUT_DIR"), "/src/schemas/mod.rs"));
} }
use language::Language;
use language::language; use language::language;
use schemas::internal_services; use schemas::internal_services;
@ -50,10 +59,17 @@ struct ServerConfig {
#[structopt(long)] #[structopt(long)]
no_banner: bool, no_banner: bool,
#[structopt(short = "t", long)]
trace_file: Option<PathBuf>,
/// Enable `$control` entity.
#[structopt(long)]
control: bool,
} }
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> ActorResult {
let config = Arc::new(ServerConfig::from_args()); let config = Arc::new(ServerConfig::from_args());
syndicate::convenient_logging()?; syndicate::convenient_logging()?;
@ -74,7 +90,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!(r"{} \____{}/{}_/ {} /____/\__, /_/ /_/\____/_/\___/\__/_/\__/\___/", GREEN, BRIGHT_GREEN, GREEN, NORMAL); eprintln!(r"{} \____{}/{}_/ {} /____/\__, /_/ /_/\____/_/\___/\__/_/\__/\___/", GREEN, BRIGHT_GREEN, GREEN, NORMAL);
eprintln!(r" /____/"); eprintln!(r" /____/");
eprintln!(r""); eprintln!(r"");
eprintln!(r" {}version {}{}", BRIGHT_YELLOW, env!("CARGO_PKG_VERSION"), NORMAL); eprintln!(r" {}version {} [syndicate {}]{}", BRIGHT_YELLOW, env!("CARGO_PKG_VERSION"), syndicate::syndicate_package_version(), NORMAL);
eprintln!(r""); eprintln!(r"");
eprintln!(r" documentation & reference material: https://syndicate-lang.org/"); eprintln!(r" documentation & reference material: https://syndicate-lang.org/");
eprintln!(r" source code & bugs: https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"); eprintln!(r" source code & bugs: https://git.syndicate-lang.org/syndicate-lang/syndicate-rs");
@ -83,13 +99,18 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
tracing::trace!("startup"); tracing::trace!("startup");
Actor::new(None).boot(tracing::Span::current(), move |t| { let trace_collector = config.trace_file.clone().map(
let server_config_ds = Cap::new(&t.create(Dataspace::new(Some(syndicate::name!("config"))))); |p| Ok::<trace::TraceCollector, io::Error>(trace::TraceCollector::ascii(
let log_ds = Cap::new(&t.create(Dataspace::new(Some(syndicate::name!("log"))))); io::BufWriter::new(std::fs::File::create(p)?))))
.transpose()?;
Actor::top(trace_collector, move |t| {
let server_config_ds = Cap::new(&t.create(Dataspace::new(Some(AnyValue::symbol("config")))));
let log_ds = Cap::new(&t.create(Dataspace::new(Some(AnyValue::symbol("log")))));
if config.inferior { if config.inferior {
tracing::info!("inferior server instance"); tracing::info!("inferior server instance");
t.spawn(syndicate::name!("parent"), enclose!((server_config_ds) move |t| { t.spawn(Some(AnyValue::symbol("parent")), enclose!((server_config_ds) move |t| {
protocol::run_io_relay(t, protocol::run_io_relay(t,
relay::Input::Bytes(Box::pin(tokio::io::stdin())), relay::Input::Bytes(Box::pin(tokio::io::stdin())),
relay::Output::Bytes(Box::pin(tokio::io::stdout())), relay::Output::Bytes(Box::pin(tokio::io::stdout())),
@ -97,20 +118,36 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
})); }));
} }
let gatekeeper = Cap::guard(Arc::clone(&language().syndicate), t.create( let gatekeeper = Cap::guard(Language::arc(), t.create(
syndicate::entity(Arc::clone(&server_config_ds)) syndicate::entity(Arc::clone(&server_config_ds))
.on_asserted(gatekeeper::handle_resolve))); .on_asserted_facet(gatekeeper::facet_handle_resolve)));
gatekeeper::handle_binds(t, &server_config_ds)?;
let mut env = Map::new(); let mut env = Map::new();
env.insert("config".to_owned(), AnyValue::domain(Arc::clone(&server_config_ds))); env.insert("config".to_owned(), AnyValue::domain(Arc::clone(&server_config_ds)));
env.insert("log".to_owned(), AnyValue::domain(Arc::clone(&log_ds))); env.insert("log".to_owned(), AnyValue::domain(Arc::clone(&log_ds)));
env.insert("gatekeeper".to_owned(), AnyValue::domain(Arc::clone(&gatekeeper))); env.insert("gatekeeper".to_owned(), AnyValue::domain(Arc::clone(&gatekeeper)));
if config.control {
env.insert("control".to_owned(), AnyValue::domain(Cap::guard(Language::arc(), t.create(
syndicate::entity(())
.on_message(|_, _t, m: crate::schemas::control::ExitServer| {
tracing::info!("$control received exit request with code {}", m.code);
std::process::exit((&m.code).try_into().unwrap_or_else(|_| {
tracing::warn!(
"exit code {} out-of-range of 32-bit signed integer, using 1 instead",
m.code);
1
}))
})))));
}
dependencies::boot(t, Arc::clone(&server_config_ds)); dependencies::boot(t, Arc::clone(&server_config_ds));
services::config_watcher::on_demand(t, Arc::clone(&server_config_ds)); services::config_watcher::on_demand(t, Arc::clone(&server_config_ds));
services::daemon::on_demand(t, Arc::clone(&server_config_ds), Arc::clone(&log_ds)); services::daemon::on_demand(t, Arc::clone(&server_config_ds), Arc::clone(&log_ds));
services::debt_reporter::on_demand(t, Arc::clone(&server_config_ds)); services::debt_reporter::on_demand(t, Arc::clone(&server_config_ds));
services::milestone::on_demand(t, Arc::clone(&server_config_ds)); services::gatekeeper::on_demand(t, Arc::clone(&server_config_ds));
services::http_router::on_demand(t, Arc::clone(&server_config_ds));
services::tcp_relay_listener::on_demand(t, Arc::clone(&server_config_ds)); services::tcp_relay_listener::on_demand(t, Arc::clone(&server_config_ds));
services::unix_relay_listener::on_demand(t, Arc::clone(&server_config_ds)); services::unix_relay_listener::on_demand(t, Arc::clone(&server_config_ds));
@ -124,7 +161,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
for port in config.ports.clone() { for port in config.ports.clone() {
server_config_ds.assert(t, language(), &service::RunService { server_config_ds.assert(t, language(), &service::RunService {
service_name: language().unparse(&internal_services::TcpRelayListener { service_name: language().unparse(&internal_services::TcpWithoutHttp {
addr: transport_address::Tcp { addr: transport_address::Tcp {
host: "0.0.0.0".to_owned(), host: "0.0.0.0".to_owned(),
port: (port as i32).into(), port: (port as i32).into(),
@ -154,7 +191,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}); });
} }
t.spawn(tracing::Span::current(), enclose!((log_ds) move |t| { t.spawn(Some(AnyValue::symbol("logger")), enclose!((log_ds) move |t| {
let n_unknown: AnyValue = AnyValue::symbol("-"); let n_unknown: AnyValue = AnyValue::symbol("-");
let n_pid: AnyValue = AnyValue::symbol("pid"); let n_pid: AnyValue = AnyValue::symbol("pid");
let n_line: AnyValue = AnyValue::symbol("line"); let n_line: AnyValue = AnyValue::symbol("line");

View File

@ -1,26 +1,30 @@
use futures::SinkExt; use futures::SinkExt;
use futures::StreamExt; use futures::StreamExt;
use hyper::header::HeaderValue;
use hyper::service::service_fn;
use std::future::ready; use std::future::ready;
use std::io;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose;
use syndicate::error::Error; use syndicate::error::Error;
use syndicate::error::error; use syndicate::error::error;
use syndicate::relay; use syndicate::relay;
use syndicate::trace;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tungstenite::Message; use hyper_tungstenite::tungstenite::Message;
struct ExitListener; struct ExitListener;
impl Entity<()> for ExitListener { impl Entity<()> for ExitListener {
fn exit_hook(&mut self, _t: &mut Activation, exit_status: &Arc<ActorResult>) -> ActorResult { fn exit_hook(&mut self, _t: &mut Activation, exit_status: &Arc<ExitStatus>) {
tracing::info!(?exit_status, "disconnect"); tracing::info!(?exit_status, "disconnect");
Ok(())
} }
} }
@ -31,52 +35,97 @@ pub fn run_io_relay(
initial_ref: Arc<Cap>, initial_ref: Arc<Cap>,
) -> ActorResult { ) -> ActorResult {
let exit_listener = t.create(ExitListener); let exit_listener = t.create(ExitListener);
t.state.add_exit_hook(&exit_listener); t.add_exit_hook(&exit_listener);
relay::TunnelRelay::run(t, i, o, Some(initial_ref), None, false); relay::TunnelRelay::run(t, i, o, Some(initial_ref), None, false);
Ok(()) Ok(())
} }
pub fn run_connection( pub fn run_connection(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef, facet: FacetRef,
i: relay::Input, i: relay::Input,
o: relay::Output, o: relay::Output,
initial_ref: Arc<Cap>, initial_ref: Arc<Cap>,
) -> ActorResult { ) {
facet.activate(Account::new(syndicate::name!("start-session")), let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("start-session"));
|t| run_io_relay(t, i, o, initial_ref)) let account = Account::new(Some(AnyValue::symbol("start-session")), trace_collector);
facet.activate(&account, cause, |t| run_io_relay(t, i, o, initial_ref));
} }
pub async fn detect_protocol( pub async fn detect_protocol(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef, facet: FacetRef,
stream: TcpStream, stream: TcpStream,
gateway: Arc<Cap>, gateway: Arc<Cap>,
httpd: Option<Arc<Cap>>,
addr: std::net::SocketAddr, addr: std::net::SocketAddr,
server_port: u16,
) -> ActorResult { ) -> ActorResult {
let (i, o) = { let mut buf = [0; 1]; // peek at the first byte to see what kind of connection to expect
let mut buf = [0; 1]; // peek at the first byte to see what kind of connection to expect match stream.peek(&mut buf).await? {
match stream.peek(&mut buf).await? { 1 => match buf[0] {
1 => match buf[0] { v if v == b'[' /* Turn */ || v == b'<' /* Error and Extension */ || v >= 128 => {
b'G' /* ASCII 'G' for "GET" */ => { tracing::info!(protocol = %(if v >= 128 { "application/syndicate" } else { "text/syndicate" }), peer = ?addr);
tracing::info!(protocol = %"websocket", peer = ?addr); let (i, o) = stream.into_split();
let s = tokio_tungstenite::accept_async(stream).await let i = relay::Input::Bytes(Box::pin(i));
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; let o = relay::Output::Bytes(Box::pin(o /* BufWriter::new(o) */));
let (o, i) = s.split(); run_connection(trace_collector, facet, i, o, gateway);
let i = i.filter_map(|r| ready(extract_binary_packets(r).transpose())); Ok(())
let o = o.sink_map_err(message_error).with(|bs| ready(Ok(Message::Binary(bs))));
(relay::Input::Packets(Box::pin(i)), relay::Output::Packets(Box::pin(o)))
},
_ => {
tracing::info!(protocol = %"raw", peer = ?addr);
let (i, o) = stream.into_split();
(relay::Input::Bytes(Box::pin(i)),
relay::Output::Bytes(Box::pin(o /* BufWriter::new(o) */)))
}
} }
0 => Err(error("closed before starting", AnyValue::new(false)))?, _ => {
_ => unreachable!() let upgraded = Arc::new(AtomicBool::new(false));
let keepalive = facet.actor.keep_alive();
let mut http = hyper::server::conn::Http::new();
http.http1_keep_alive(true);
http.http1_only(true);
let service = service_fn(|mut req| enclose!(
(upgraded, keepalive, trace_collector, facet, gateway, httpd) async move {
if hyper_tungstenite::is_upgrade_request(&req) {
tracing::info!(protocol = %"websocket",
method=%req.method(),
uri=?req.uri(),
host=?req.headers().get("host").unwrap_or(&HeaderValue::from_static("")));
let (response, websocket) = hyper_tungstenite::upgrade(&mut req, None)
.map_err(|e| message_error(e))?;
upgraded.store(true, Ordering::SeqCst);
tokio::spawn(enclose!(() async move {
let (o, i) = websocket.await?.split();
let i = i.filter_map(|r| ready(extract_binary_packets(r).transpose()));
let o = o.sink_map_err(message_error).with(|bs| ready(Ok(Message::Binary(bs))));
let i = relay::Input::Packets(Box::pin(i));
let o = relay::Output::Packets(Box::pin(o));
run_connection(trace_collector, facet, i, o, gateway);
drop(keepalive);
Ok(()) as ActorResult
}));
Ok(response)
} else {
match httpd {
None => Ok(crate::http::empty_response(
hyper::StatusCode::SERVICE_UNAVAILABLE)),
Some(httpd) => {
tracing::info!(protocol = %"http",
method=%req.method(),
uri=?req.uri(),
host=?req.headers().get("host").unwrap_or(&HeaderValue::from_static("")));
crate::http::serve(trace_collector, facet, httpd, req, server_port).await
}
}
}
}));
http.serve_connection(stream, service).with_upgrades().await?;
if upgraded.load(Ordering::SeqCst) {
tracing::debug!("serve_connection completed after upgrade to websocket");
} else {
tracing::debug!("serve_connection completed after regular HTTP session");
facet.activate(&Account::new(None, None), None, |t| Ok(t.stop()));
}
Ok(())
},
} }
}; 0 => Err(error("closed before starting", AnyValue::new(false)))?,
run_connection(facet, i, o, gateway) _ => unreachable!()
}
} }
fn message_error<E: std::fmt::Display>(e: E) -> Error { fn message_error<E: std::fmt::Display>(e: E) -> Error {
@ -84,7 +133,7 @@ fn message_error<E: std::fmt::Display>(e: E) -> Error {
} }
fn extract_binary_packets( fn extract_binary_packets(
r: Result<Message, tungstenite::Error>, r: Result<Message, hyper_tungstenite::tungstenite::Error>,
) -> Result<Option<Vec<u8>>, Error> { ) -> Result<Option<Vec<u8>>, Error> {
match r { match r {
Ok(m) => match m { Ok(m) => match m {
@ -98,6 +147,8 @@ fn extract_binary_packets(
Ok(None), // unsolicited pongs are to be ignored Ok(None), // unsolicited pongs are to be ignored
Message::Close(_) => Message::Close(_) =>
Ok(None), // we're about to see the end of the stream, so ignore this Ok(None), // we're about to see the end of the stream, so ignore this
Message::Frame(_) =>
Err("Raw frames are not accepted")?,
}, },
Err(e) => Err(message_error(e)), Err(e) => Err(message_error(e)),
} }

View File

@ -9,14 +9,16 @@ use syndicate::actor::*;
use syndicate::dataspace::Dataspace; use syndicate::dataspace::Dataspace;
use syndicate::during; use syndicate::during;
use syndicate::enclose; use syndicate::enclose;
use syndicate::pattern::{lift_literal, drop_literal}; use syndicate::pattern::{lift_literal, drop_literal, pattern_seq_from_dictionary};
use syndicate::schemas::dataspace; use syndicate::schemas::dataspace;
use syndicate::schemas::dataspace_patterns as P; use syndicate::schemas::dataspace_patterns as P;
use syndicate::schemas::sturdy; use syndicate::schemas::sturdy;
use syndicate::value::Map; use syndicate::value::Map;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
use syndicate::value::NoEmbeddedDomainCodec;
use syndicate::value::Record; use syndicate::value::Record;
use syndicate::value::Set; use syndicate::value::Set;
use syndicate::value::TextWriter;
use syndicate::value::Value; use syndicate::value::Value;
use crate::language::language; use crate::language::language;
@ -76,6 +78,12 @@ pub enum Instruction {
pattern_template: AnyValue, pattern_template: AnyValue,
expr: Expr, expr: Expr,
}, },
Cond {
value_var: String,
pattern_template: AnyValue,
on_match: Box<Instruction>,
on_nomatch: Box<Instruction>,
},
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -86,11 +94,14 @@ pub enum Expr {
Dataspace, Dataspace,
Timestamp, Timestamp,
Facet, Facet,
Stringify {
expr: Box<Expr>,
},
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
enum RewriteTemplate { enum RewriteTemplate {
Filter { Accept {
pattern_template: AnyValue, pattern_template: AnyValue,
}, },
Rewrite { Rewrite {
@ -99,6 +110,16 @@ enum RewriteTemplate {
}, },
} }
#[derive(Debug, Clone)]
enum CaveatTemplate {
Alts {
alternatives: Vec<RewriteTemplate>,
},
Reject {
pattern_template: AnyValue,
},
}
#[derive(Debug)] #[derive(Debug)]
enum Symbolic { enum Symbolic {
Reference(String), Reference(String),
@ -152,7 +173,7 @@ fn bad_instruction(message: &str) -> io::Error {
} }
fn discard() -> P::Pattern { fn discard() -> P::Pattern {
P::Pattern::DDiscard(Box::new(P::DDiscard)) P::Pattern::Discard
} }
fn dlit(value: AnyValue) -> P::Pattern { fn dlit(value: AnyValue) -> P::Pattern {
@ -163,62 +184,83 @@ fn tlit(value: AnyValue) -> sturdy::Template {
sturdy::Template::Lit(Box::new(sturdy::Lit { value })) sturdy::Template::Lit(Box::new(sturdy::Lit { value }))
} }
fn parse_attenuation(r: &Record<AnyValue>) -> io::Result<Option<(String, Vec<RewriteTemplate>)>> { fn parse_rewrite(raw_base_name: &AnyValue, e: &AnyValue) -> io::Result<RewriteTemplate> {
if let Some(fields) = e.value().as_simple_record("accept", Some(1)) {
return Ok(RewriteTemplate::Accept {
pattern_template: fields[0].clone(),
});
}
if let Some(fields) = e.value().as_simple_record("rewrite", Some(2)) {
return Ok(RewriteTemplate::Rewrite {
pattern_template: fields[0].clone(),
template_template: fields[1].clone(),
});
}
Err(bad_instruction(&format!("Bad rewrite in attenuation of {:?}: {:?}", raw_base_name, e)))
}
fn parse_caveat(raw_base_name: &AnyValue, e: &AnyValue) -> io::Result<CaveatTemplate> {
if let Some(fields) = e.value().as_simple_record("or", Some(1)) {
let raw_rewrites = match fields[0].value().as_sequence() {
None => Err(bad_instruction(&format!(
"Alternatives in <or> in attenuation of {:?} must have sequence of rewrites; got {:?}",
raw_base_name,
fields[0])))?,
Some(vs) => vs,
};
let alternatives =
raw_rewrites.iter().map(|r| parse_rewrite(raw_base_name, r)).collect::<Result<Vec<_>, _>>()?;
return Ok(CaveatTemplate::Alts{ alternatives });
}
if let Some(fields) = e.value().as_simple_record("reject", Some(1)) {
return Ok(CaveatTemplate::Reject{ pattern_template: fields[0].clone() });
}
if let Ok(r) = parse_rewrite(raw_base_name, e) {
return Ok(CaveatTemplate::Alts { alternatives: vec![r] });
}
Err(bad_instruction(&format!("Bad caveat in attenuation of {:?}: {:?}", raw_base_name, e)))
}
fn parse_attenuation(r: &Record<AnyValue>) -> io::Result<Option<(String, Vec<CaveatTemplate>)>> {
if r.label() != &AnyValue::symbol("*") { if r.label() != &AnyValue::symbol("*") {
return Ok(None); return Ok(None);
} }
if r.fields().len() != 2 { if r.fields().len() != 2 {
Err(bad_instruction(&format!( Err(bad_instruction(&format!(
"Attenuation requires a reference and a sequence of rewrites; got {:?}", "Attenuation requires a reference and a sequence of caveats; got {:?}",
r)))?; r)))?;
} }
let base_name = match r.fields()[0].value().as_symbol().map(|s| analyze(&s)) { let raw_base_name = &r.fields()[0];
let base_name = match raw_base_name.value().as_symbol().map(|s| analyze(&s)) {
Some(Symbolic::Reference(s)) => s, Some(Symbolic::Reference(s)) => s,
_ => Err(bad_instruction(&format!( _ => Err(bad_instruction(&format!(
"Attenuation must have variable reference as first argument; got {:?}", "Attenuation must have variable reference as first argument; got {:?}",
r.fields()[0])))?, raw_base_name)))?,
}; };
let raw_alternatives = match r.fields()[1].value().as_sequence() { let raw_caveats = match r.fields()[1].value().as_sequence() {
None => Err(bad_instruction(&format!( None => Err(bad_instruction(&format!(
"Attenuation of {:?} must have sequence of rewrites; got {:?}", "Attenuation of {:?} must have sequence of caveats; got {:?}",
r.fields()[0], raw_base_name,
r.fields()[1])))?, r.fields()[1])))?,
Some(vs) => vs, Some(vs) => vs,
}; };
let mut alternatives = Vec::new(); let caveats = raw_caveats.iter().map(|c| parse_caveat(raw_base_name, c)).collect::<Result<Vec<_>, _>>()?;
Ok(Some((base_name, caveats)))
for e in raw_alternatives.iter() {
match e.value().as_simple_record("filter", Some(1)) {
Some(fields) =>
alternatives.push(RewriteTemplate::Filter {
pattern_template: fields[0].clone()
}),
None => match e.value().as_simple_record("rewrite", Some(2)) {
Some(fields) =>
alternatives.push(RewriteTemplate::Rewrite {
pattern_template: fields[0].clone(),
template_template: fields[1].clone(),
}),
None => Err(bad_instruction(&format!(
"Bad rewrite in attenuation of {:?}: {:?}",
r.fields()[0],
e)))?,
}
}
}
Ok(Some((base_name, alternatives)))
} }
impl<'env> PatternInstantiator<'env> { impl<'env> PatternInstantiator<'env> {
fn instantiate_pattern(&mut self, template: &AnyValue) -> io::Result<P::Pattern> { fn instantiate_pattern(&mut self, template: &AnyValue) -> io::Result<P::Pattern> {
Ok(match template.value() { Ok(match template.value() {
Value::Boolean(_) | Value::Boolean(_) |
Value::Float(_) |
Value::Double(_) | Value::Double(_) |
Value::SignedInteger(_) | Value::SignedInteger(_) |
Value::String(_) | Value::String(_) |
@ -230,7 +272,7 @@ impl<'env> PatternInstantiator<'env> {
Symbolic::Discard => discard(), Symbolic::Discard => discard(),
Symbolic::Binder(s) => { Symbolic::Binder(s) => {
self.binding_names.push(s); self.binding_names.push(s);
P::Pattern::DBind(Box::new(P::DBind { pattern: discard() })) P::Pattern::Bind { pattern: Box::new(discard()) }
} }
Symbolic::Reference(s) => Symbolic::Reference(s) =>
dlit(self.env.lookup(&s, "pattern-template variable")?.clone()), dlit(self.env.lookup(&s, "pattern-template variable")?.clone()),
@ -239,49 +281,53 @@ impl<'env> PatternInstantiator<'env> {
}, },
Value::Record(r) => match parse_attenuation(r)? { Value::Record(r) => match parse_attenuation(r)? {
Some((base_name, alternatives)) => Some((base_name, caveats)) =>
dlit(self.env.eval_attenuation(base_name, alternatives)?), dlit(self.env.eval_attenuation(base_name, caveats)?),
None => match self.maybe_binder_with_pattern(r)? { None => match self.maybe_binder_with_pattern(r)? {
Some(pat) => pat, Some(pat) => pat,
None => { None => {
let label = self.instantiate_pattern(r.label())?; let label = self.instantiate_pattern(r.label())?;
let fields = r.fields().iter().map(|p| self.instantiate_pattern(p)) let entries = r.fields().iter().enumerate()
.collect::<io::Result<Vec<P::Pattern>>>()?; .map(|(i, p)| Ok((AnyValue::new(i), self.instantiate_pattern(p)?)))
P::Pattern::DCompound(Box::new(P::DCompound::Rec { .collect::<io::Result<Map<AnyValue, P::Pattern>>>()?;
label: drop_literal(&label) P::Pattern::Group {
.ok_or(bad_instruction("Record pattern must have literal label"))?, type_: Box::new(P::GroupType::Rec {
fields, label: drop_literal(&label)
})) .ok_or(bad_instruction("Record pattern must have literal label"))?,
}),
entries,
}
} }
} }
}, },
Value::Sequence(v) => Value::Sequence(v) =>
P::Pattern::DCompound(Box::new(P::DCompound::Arr { P::Pattern::Group {
items: v.iter() type_: Box::new(P::GroupType::Arr),
.map(|p| self.instantiate_pattern(p)) entries: v.iter().enumerate()
.collect::<io::Result<Vec<P::Pattern>>>()?, .map(|(i, p)| Ok((AnyValue::new(i), self.instantiate_pattern(p)?)))
})), .collect::<io::Result<Map<AnyValue, P::Pattern>>>()?,
},
Value::Set(_) => Value::Set(_) =>
Err(bad_instruction(&format!("Sets not permitted in patterns: {:?}", template)))?, Err(bad_instruction(&format!("Sets not permitted in patterns: {:?}", template)))?,
Value::Dictionary(v) => Value::Dictionary(v) =>
P::Pattern::DCompound(Box::new(P::DCompound::Dict { P::Pattern::Group {
type_: Box::new(P::GroupType::Dict),
entries: v.iter() entries: v.iter()
.map(|(a, b)| Ok((a.clone(), self.instantiate_pattern(b)?))) .map(|(a, b)| Ok((a.clone(), self.instantiate_pattern(b)?)))
.collect::<io::Result<Map<AnyValue, P::Pattern>>>()?, .collect::<io::Result<Map<AnyValue, P::Pattern>>>()?,
})), },
}) })
} }
fn maybe_binder_with_pattern(&mut self, r: &Record<AnyValue>) -> io::Result<Option<P::Pattern>> { fn maybe_binder_with_pattern(&mut self, r: &Record<AnyValue>) -> io::Result<Option<P::Pattern>> {
match r.label().value().as_symbol().map(|s| analyze(&s)) { match r.label().value().as_symbol().map(|s| analyze(&s)) {
Some(Symbolic::Binder(formal)) => if r.fields().len() == 1 { Some(Symbolic::Binder(formal)) if r.fields().len() == 1 => {
let pattern = self.instantiate_pattern(&r.fields()[0])?; let pattern = self.instantiate_pattern(&r.fields()[0])?;
self.binding_names.push(formal); self.binding_names.push(formal);
return Ok(Some(P::Pattern::DBind(Box::new(P::DBind { pattern })))); Ok(Some(P::Pattern::Bind { pattern: Box::new(pattern) }))
}, },
_ => (), _ => Ok(None),
} }
Ok(None)
} }
} }
@ -329,7 +375,6 @@ impl Env {
fn instantiate_value(&self, template: &AnyValue) -> io::Result<AnyValue> { fn instantiate_value(&self, template: &AnyValue) -> io::Result<AnyValue> {
Ok(match template.value() { Ok(match template.value() {
Value::Boolean(_) | Value::Boolean(_) |
Value::Float(_) |
Value::Double(_) | Value::Double(_) |
Value::SignedInteger(_) | Value::SignedInteger(_) |
Value::String(_) | Value::String(_) |
@ -348,8 +393,8 @@ impl Env {
}, },
Value::Record(r) => match parse_attenuation(r)? { Value::Record(r) => match parse_attenuation(r)? {
Some((base_name, alternatives)) => Some((base_name, caveats)) =>
self.eval_attenuation(base_name, alternatives)?, self.eval_attenuation(base_name, caveats)?,
None => None =>
Value::Record(Record(r.fields_vec().iter().map(|a| self.instantiate_value(a)) Value::Record(Record(r.fields_vec().iter().map(|a| self.instantiate_value(a))
.collect::<Result<Vec<_>, _>>()?)).wrap(), .collect::<Result<Vec<_>, _>>()?)).wrap(),
@ -387,7 +432,7 @@ impl Env {
fn eval_attenuation( fn eval_attenuation(
&self, &self,
base_name: String, base_name: String,
alternatives: Vec<RewriteTemplate>, caveats: Vec<CaveatTemplate>,
) -> io::Result<AnyValue> { ) -> io::Result<AnyValue> {
let base_value = self.lookup(&base_name, "attenuation-base variable")?; let base_value = self.lookup(&base_name, "attenuation-base variable")?;
match base_value.value().as_embedded() { match base_value.value().as_embedded() {
@ -395,9 +440,7 @@ impl Env {
"Value to be attenuated is {:?} but must be capability", "Value to be attenuated is {:?} but must be capability",
base_value))), base_value))),
Some(base_cap) => { Some(base_cap) => {
match base_cap.attenuate(&sturdy::Attenuation(vec![ match base_cap.attenuate(&caveats.iter().map(|c| self.instantiate_caveat(c)).collect::<Result<Vec<_>, _>>()?) {
self.instantiate_caveat(&alternatives)?]))
{
Ok(derived_cap) => Ok(AnyValue::domain(derived_cap)), Ok(derived_cap) => Ok(AnyValue::domain(derived_cap)),
Err(caveat_error) => Err(caveat_error) =>
Err(bad_instruction(&format!("Attenuation of {:?} failed: {:?}", Err(bad_instruction(&format!("Attenuation of {:?} failed: {:?}",
@ -477,6 +520,17 @@ impl Env {
} }
} }
} }
Instruction::Cond { value_var, pattern_template, on_match, on_nomatch } => {
let (binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
let value = self.lookup(value_var, "value in conditional expression")?;
match pattern.match_value(&value) {
None => self.eval(t, on_nomatch)?,
Some(captures) => {
self.extend(&binding_names, captures);
self.eval(t, on_match)?
}
}
}
} }
Ok(()) Ok(())
} }
@ -487,38 +541,58 @@ impl Env {
Expr::Dataspace => Ok(AnyValue::domain(Cap::new(&t.create(Dataspace::new(None))))), Expr::Dataspace => Ok(AnyValue::domain(Cap::new(&t.create(Dataspace::new(None))))),
Expr::Timestamp => Ok(AnyValue::new(chrono::Utc::now().to_rfc3339())), Expr::Timestamp => Ok(AnyValue::new(chrono::Utc::now().to_rfc3339())),
Expr::Facet => Ok(AnyValue::domain(Cap::new(&t.create(FacetHandle::new())))), Expr::Facet => Ok(AnyValue::domain(Cap::new(&t.create(FacetHandle::new())))),
Expr::Stringify { expr } => {
let v = self.eval_expr(t, expr)?;
let s = TextWriter::encode(&mut NoEmbeddedDomainCodec, &v)?;
Ok(AnyValue::new(s))
}
}
}
fn instantiate_rewrite(
&self,
rw: &RewriteTemplate,
) -> io::Result<sturdy::Rewrite> {
match rw {
RewriteTemplate::Accept { pattern_template } => {
let (_binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
Ok(sturdy::Rewrite {
pattern: embed_pattern(&P::Pattern::Bind { pattern: Box::new(pattern) }),
template: sturdy::Template::TRef(Box::new(sturdy::TRef { binding: 0.into() })),
})
}
RewriteTemplate::Rewrite { pattern_template, template_template } => {
let (binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
Ok(sturdy::Rewrite {
pattern: embed_pattern(&pattern),
template: self.instantiate_template(&binding_names, template_template)?,
})
}
} }
} }
fn instantiate_caveat( fn instantiate_caveat(
&self, &self,
alternatives: &Vec<RewriteTemplate>, c: &CaveatTemplate,
) -> io::Result<sturdy::Caveat> { ) -> io::Result<sturdy::Caveat> {
let mut rewrites = Vec::new(); match c {
for rw in alternatives { CaveatTemplate::Alts { alternatives } => {
match rw { let mut rewrites =
RewriteTemplate::Filter { pattern_template } => { alternatives.iter().map(|r| self.instantiate_rewrite(r)).collect::<Result<Vec<_>, _>>()?;
let (_binding_names, pattern) = self.instantiate_pattern(pattern_template)?; if rewrites.len() == 1 {
rewrites.push(sturdy::Rewrite { Ok(sturdy::Caveat::Rewrite(Box::new(rewrites.pop().unwrap())))
pattern: embed_pattern(&P::Pattern::DBind(Box::new(P::DBind { pattern }))), } else {
template: sturdy::Template::TRef(Box::new(sturdy::TRef { binding: 0.into() })), Ok(sturdy::Caveat::Alts(Box::new(sturdy::Alts {
}) alternatives: rewrites,
} })))
RewriteTemplate::Rewrite { pattern_template, template_template } => {
let (binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
rewrites.push(sturdy::Rewrite {
pattern: embed_pattern(&pattern),
template: self.instantiate_template(&binding_names, template_template)?,
})
} }
} }
} CaveatTemplate::Reject { pattern_template } => {
if rewrites.len() == 1 { Ok(sturdy::Caveat::Reject(Box::new(
Ok(sturdy::Caveat::Rewrite(Box::new(rewrites.pop().unwrap()))) sturdy::Reject {
} else { pattern: embed_pattern(&self.instantiate_pattern(pattern_template)?.1),
Ok(sturdy::Caveat::Alts(Box::new(sturdy::Alts { })))
alternatives: rewrites, }
})))
} }
} }
@ -533,7 +607,6 @@ impl Env {
Ok(match template.value() { Ok(match template.value() {
Value::Boolean(_) | Value::Boolean(_) |
Value::Float(_) |
Value::Double(_) | Value::Double(_) |
Value::SignedInteger(_) | Value::SignedInteger(_) |
Value::String(_) | Value::String(_) |
@ -557,18 +630,19 @@ impl Env {
}, },
Value::Record(r) => match parse_attenuation(r)? { Value::Record(r) => match parse_attenuation(r)? {
Some((base_name, alternatives)) => Some((base_name, caveats)) =>
match find_bound(&base_name) { match find_bound(&base_name) {
Some(i) => Some(i) =>
sturdy::Template::TAttenuate(Box::new(sturdy::TAttenuate { sturdy::Template::TAttenuate(Box::new(sturdy::TAttenuate {
template: sturdy::Template::TRef(Box::new(sturdy::TRef { template: sturdy::Template::TRef(Box::new(sturdy::TRef {
binding: i.into(), binding: i.into(),
})), })),
attenuation: sturdy::Attenuation(vec![ attenuation: caveats.iter()
self.instantiate_caveat(&alternatives)?]), .map(|c| self.instantiate_caveat(c))
.collect::<Result<Vec<_>, _>>()?,
})), })),
None => None =>
tlit(self.eval_attenuation(base_name, alternatives)?), tlit(self.eval_attenuation(base_name, caveats)?),
}, },
None => { None => {
// TODO: properly consolidate constant templates into literals. // TODO: properly consolidate constant templates into literals.
@ -604,24 +678,26 @@ impl Env {
fn embed_pattern(p: &P::Pattern) -> sturdy::Pattern { fn embed_pattern(p: &P::Pattern) -> sturdy::Pattern {
match p { match p {
P::Pattern::DDiscard(_) => sturdy::Pattern::PDiscard(Box::new(sturdy::PDiscard)), P::Pattern::Discard => sturdy::Pattern::PDiscard(Box::new(sturdy::PDiscard)),
P::Pattern::DBind(b) => sturdy::Pattern::PBind(Box::new(sturdy::PBind { P::Pattern::Bind { pattern } => sturdy::Pattern::PBind(Box::new(sturdy::PBind {
pattern: embed_pattern(&b.pattern), pattern: embed_pattern(&**pattern),
})), })),
P::Pattern::DLit(b) => sturdy::Pattern::Lit(Box::new(sturdy::Lit { P::Pattern::Lit { value } => sturdy::Pattern::Lit(Box::new(sturdy::Lit {
value: language().unparse(&b.value), value: language().unparse(&**value),
})), })),
P::Pattern::DCompound(b) => sturdy::Pattern::PCompound(Box::new(match &**b { P::Pattern::Group { type_, entries } => sturdy::Pattern::PCompound(Box::new(match &**type_ {
P::DCompound::Rec { label, fields } => P::GroupType::Rec { label } =>
sturdy::PCompound::Rec { sturdy::PCompound::Rec {
label: label.clone(), label: label.clone(),
fields: fields.iter().map(embed_pattern).collect(), fields: pattern_seq_from_dictionary(entries).expect("correct field entries")
.into_iter().map(embed_pattern).collect(),
}, },
P::DCompound::Arr { items } => P::GroupType::Arr =>
sturdy::PCompound::Arr { sturdy::PCompound::Arr {
items: items.iter().map(embed_pattern).collect(), items: pattern_seq_from_dictionary(entries).expect("correct element entries")
.into_iter().map(embed_pattern).collect(),
}, },
P::DCompound::Dict { entries } => P::GroupType::Dict =>
sturdy::PCompound::Dict { sturdy::PCompound::Dict {
entries: entries.iter().map(|(k, v)| (k.clone(), embed_pattern(v))).collect(), entries: entries.iter().map(|(k, v)| (k.clone(), embed_pattern(v))).collect(),
}, },
@ -664,7 +740,7 @@ impl<'t> Parser<'t> {
T::default() T::default()
} }
pub fn parse(&mut self, target: &str) -> Parsed<Instruction> { pub fn parse(&mut self, target: &str, outer_target: &str) -> Parsed<Instruction> {
if self.ateof() { if self.ateof() {
return Parsed::Eof; return Parsed::Eof;
} }
@ -679,7 +755,7 @@ impl<'t> Parser<'t> {
if let Some(tokens) = self.peek().as_sequence() { if let Some(tokens) = self.peek().as_sequence() {
self.drop(); self.drop();
let mut inner_parser = Parser::new(tokens); let mut inner_parser = Parser::new(tokens);
let instructions = inner_parser.parse_all(target); let instructions = inner_parser.parse_all(target, outer_target);
self.errors.extend(inner_parser.errors); self.errors.extend(inner_parser.errors);
return Parsed::Value(Instruction::Sequence { instructions }); return Parsed::Value(Instruction::Sequence { instructions });
} }
@ -694,7 +770,7 @@ impl<'t> Parser<'t> {
Instruction::During { target, pattern_template, body } }, Instruction::During { target, pattern_template, body } },
"?" => |target, pattern_template, body| { // "??" "?" => |target, pattern_template, body| { // "??"
Instruction::OnMessage { target, pattern_template, body } }, Instruction::OnMessage { target, pattern_template, body } },
"-" => match self.parse(target) { // "?-" "-" => match self.parse(target, outer_target) { // "?-"
Parsed::Value(i) => return Parsed::Value(Instruction::OnStop { Parsed::Value(i) => return Parsed::Value(Instruction::OnStop {
body: Box::new(i), body: Box::new(i),
}), }),
@ -709,7 +785,7 @@ impl<'t> Parser<'t> {
} }
let pattern_template = self.shift(); let pattern_template = self.shift();
return match self.parse(target) { return match self.parse(target, outer_target) {
Parsed::Eof => Parsed::Eof =>
self.error(format!( self.error(format!(
"Missing instruction in react with pattern {:?}", "Missing instruction in react with pattern {:?}",
@ -733,7 +809,7 @@ impl<'t> Parser<'t> {
let m = format!("Missing instruction after retarget: {:?}", self.peek()); let m = format!("Missing instruction after retarget: {:?}", self.peek());
return self.error(m); return self.error(m);
} }
return self.parse(&s); return self.parse(&s, target);
} }
Symbolic::Bare(s) => { Symbolic::Bare(s) => {
if s == "let" { if s == "let" {
@ -774,26 +850,30 @@ impl<'t> Parser<'t> {
} }
Symbolic::Literal(s) => { Symbolic::Literal(s) => {
if s == "~" { // "=~" if s == "~" { // "=~"
// s.drop(); self.drop();
// if self.ateof() { if self.ateof() {
// return self.error("Missing pattern, true-instruction and false-continuation in match"); return self.error("Missing pattern, true-instruction and false-continuation in match");
// } }
// let match_template = self.shift(); let match_template = self.shift();
// return match self.parse(target) { return match self.parse(outer_target, outer_target) {
// Parsed::Eof => Parsed::Eof =>
// self.error(format!( self.error(format!(
// "Missing true-instruction in conditional with pattern {:?}", "Missing true-instruction in conditional with pattern {:?}",
// match_template)), match_template)),
// Parsed::Skip => Parsed::Skip =>
// Parsed::Skip, Parsed::Skip,
// Parsed::Value(true_instruction) => { Parsed::Value(true_instruction) => {
// let false_instructions = self.parse_all(); let false_instructions = self.parse_all(outer_target, outer_target);
// Parsed::Value(Instruction::Cond { Parsed::Value(Instruction::Cond {
// value: target.to_owned(), value_var: target.to_owned(),
// pattern: match_template, pattern_template: match_template,
// on_match: true_instruction, on_match: Box::new(true_instruction),
// on_nomatch: self.parse_all( on_nomatch: Box::new(Instruction::Sequence {
// }; instructions: false_instructions,
}),
})
}
};
} else { } else {
/* fall through */ /* fall through */
} }
@ -807,10 +887,10 @@ impl<'t> Parser<'t> {
} }
} }
pub fn parse_all(&mut self, target: &str) -> Vec<Instruction> { pub fn parse_all(&mut self, target: &str, outer_target: &str) -> Vec<Instruction> {
let mut instructions = Vec::new(); let mut instructions = Vec::new();
loop { loop {
match self.parse(target) { match self.parse(target, outer_target) {
Parsed::Value(i) => instructions.push(i), Parsed::Value(i) => instructions.push(i),
Parsed::Skip => (), Parsed::Skip => (),
Parsed::Eof => break, Parsed::Eof => break,
@ -820,7 +900,7 @@ impl<'t> Parser<'t> {
} }
pub fn parse_top(&mut self, target: &str) -> Result<Option<Instruction>, Vec<String>> { pub fn parse_top(&mut self, target: &str) -> Result<Option<Instruction>, Vec<String>> {
let instructions = self.parse_all(target); let instructions = self.parse_all(target, target);
if self.errors.is_empty() { if self.errors.is_empty() {
match instructions.len() { match instructions.len() {
0 => Ok(None), 0 => Ok(None),
@ -851,6 +931,11 @@ impl<'t> Parser<'t> {
return Some(Expr::Facet); return Some(Expr::Facet);
} }
if self.peek() == &Value::symbol("stringify") {
self.drop();
return Some(Expr::Stringify { expr: Box::new(self.parse_expr()?) });
}
return Some(Expr::Template{ template: self.shift() }); return Some(Expr::Template{ template: self.shift() });
} }
} }

View File

@ -3,6 +3,8 @@ use notify::Watcher;
use notify::RecursiveMode; use notify::RecursiveMode;
use notify::watcher; use notify::watcher;
use syndicate::preserves::rec;
use std::fs; use std::fs;
use std::future; use std::future;
use std::io; use std::io;
@ -16,6 +18,7 @@ use syndicate::actor::*;
use syndicate::error::Error; use syndicate::error::Error;
use syndicate::enclose; use syndicate::enclose;
use syndicate::supervise::{Supervisor, SupervisorConfiguration}; use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use syndicate::value::BinarySource; use syndicate::value::BinarySource;
use syndicate::value::BytesBinarySource; use syndicate::value::BytesBinarySource;
use syndicate::value::Map; use syndicate::value::Map;
@ -32,11 +35,11 @@ use crate::script;
use syndicate_macros::during; use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, config_ds: Arc<Cap>) { pub fn on_demand(t: &mut Activation, config_ds: Arc<Cap>) {
t.spawn(syndicate::name!("config_watcher"), move |t| { t.spawn(Some(AnyValue::symbol("config_watcher")), move |t| {
Ok(during!(t, config_ds, language(), <run-service $spec: internal_services::ConfigWatcher>, |t| { Ok(during!(t, config_ds, language(), <run-service $spec: internal_services::ConfigWatcher::<AnyValue>>, |t| {
Supervisor::start( Supervisor::start(
t, t,
syndicate::name!(parent: None, "config", path = ?spec.path), Some(rec![AnyValue::symbol("config"), AnyValue::new(spec.path.clone())]),
SupervisorConfiguration::default(), SupervisorConfiguration::default(),
enclose!((config_ds, spec) lifecycle::updater(config_ds, spec)), enclose!((config_ds, spec) lifecycle::updater(config_ds, spec)),
enclose!((config_ds) move |t| enclose!((config_ds, spec) run(t, config_ds, spec)))) enclose!((config_ds) move |t| enclose!((config_ds, spec) run(t, config_ds, spec))))
@ -142,13 +145,20 @@ fn initial_scan(
scan_file(t, path_state, env); scan_file(t, path_state, env);
} else { } else {
match fs::read_dir(&env.path) { match fs::read_dir(&env.path) {
Ok(entries) => for er in entries { Ok(unsorted_entries) => {
match er { let mut entries: Vec<fs::DirEntry> = Vec::new();
Ok(e) => for er in unsorted_entries {
initial_scan(t, path_state, config_ds, env.clone_with_path(e.path())), match er {
Err(e) => Ok(e) =>
tracing::warn!( entries.push(e),
"initial_scan: transient during scan of {:?}: {:?}", &env.path, e), Err(e) =>
tracing::warn!(
"initial_scan: transient during scan of {:?}: {:?}", &env.path, e),
}
}
entries.sort_by_key(|e| e.file_name());
for e in entries {
initial_scan(t, path_state, config_ds, env.clone_with_path(e.path()));
} }
} }
Err(e) => tracing::warn!("initial_scan: enumerating {:?}: {:?}", &env.path, e), Err(e) => tracing::warn!("initial_scan: enumerating {:?}: {:?}", &env.path, e),
@ -174,7 +184,8 @@ fn run(
let mut watcher = watcher(tx, Duration::from_millis(100)).map_err(convert_notify_error)?; let mut watcher = watcher(tx, Duration::from_millis(100)).map_err(convert_notify_error)?;
watcher.watch(&env.path, RecursiveMode::Recursive).map_err(convert_notify_error)?; watcher.watch(&env.path, RecursiveMode::Recursive).map_err(convert_notify_error)?;
let facet = t.facet.clone(); let facet = t.facet_ref();
let trace_collector = t.trace_collector();
let span = tracing::Span::current(); let span = tracing::Span::current();
thread::spawn(move || { thread::spawn(move || {
let _entry = span.enter(); let _entry = span.enter();
@ -182,16 +193,24 @@ fn run(
let mut path_state: Map<PathBuf, FacetId> = Map::new(); let mut path_state: Map<PathBuf, FacetId> = Map::new();
{ {
facet.activate(Account::new(syndicate::name!("initial_scan")), |t| { let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("initial_scan"));
initial_scan(t, &mut path_state, &config_ds, env.clone()); let account = Account::new(Some(AnyValue::symbol("initial_scan")), trace_collector.clone());
config_ds.assert(t, language(), &lifecycle::ready(&spec)); if !facet.activate(
Ok(()) &account, cause, |t| {
}).unwrap(); initial_scan(t, &mut path_state, &config_ds, env.clone());
tracing::trace!("initial_scan complete"); config_ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
})
{
return;
}
} }
tracing::trace!("initial_scan complete");
let mut rescan = |paths: Vec<PathBuf>| { let mut rescan = |paths: Vec<PathBuf>| {
facet.activate(Account::new(syndicate::name!("rescan")), |t| { let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("rescan"));
let account = Account::new(Some(AnyValue::symbol("rescan")), trace_collector.clone());
facet.activate(&account, cause, |t| {
let mut to_stop = Vec::new(); let mut to_stop = Vec::new();
for path in paths.into_iter() { for path in paths.into_iter() {
let maybe_facet_id = path_state.remove(&path); let maybe_facet_id = path_state.remove(&path);
@ -209,15 +228,15 @@ fn run(
t.stop_facet(facet_id); t.stop_facet(facet_id);
} }
Ok(()) Ok(())
}).unwrap() })
}; };
while let Ok(event) = rx.recv() { while let Ok(event) = rx.recv() {
tracing::trace!("notification: {:?}", &event); tracing::trace!("notification: {:?}", &event);
match event { let keep_running = match event {
DebouncedEvent::NoticeWrite(_p) | DebouncedEvent::NoticeWrite(_p) |
DebouncedEvent::NoticeRemove(_p) => DebouncedEvent::NoticeRemove(_p) =>
(), true,
DebouncedEvent::Create(p) | DebouncedEvent::Create(p) |
DebouncedEvent::Write(p) | DebouncedEvent::Write(p) |
DebouncedEvent::Chmod(p) | DebouncedEvent::Chmod(p) |
@ -225,20 +244,27 @@ fn run(
rescan(vec![p]), rescan(vec![p]),
DebouncedEvent::Rename(p, q) => DebouncedEvent::Rename(p, q) =>
rescan(vec![p, q]), rescan(vec![p, q]),
_ => _ => {
tracing::info!("{:?}", event), tracing::info!("{:?}", event);
} true
}
};
if !keep_running { break; }
} }
let _ = facet.activate(Account::new(syndicate::name!("termination")), |t| { {
tracing::trace!("linked thread terminating associated facet"); let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("termination"));
Ok(t.stop()) let account = Account::new(Some(AnyValue::symbol("termination")), trace_collector);
}); facet.activate(&account, cause, |t| {
tracing::trace!("linked thread terminating associated facet");
Ok(t.stop())
});
}
tracing::trace!("linked thread done"); tracing::trace!("linked thread done");
}); });
t.linked_task(syndicate::name!("cancel-wait"), async move { t.linked_task(Some(AnyValue::symbol("cancel-wait")), async move {
future::pending::<()>().await; future::pending::<()>().await;
drop(watcher); drop(watcher);
Ok(LinkedTaskTermination::KeepFacet) Ok(LinkedTaskTermination::KeepFacet)

View File

@ -4,8 +4,10 @@ use std::sync::Arc;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose; use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::schemas::service; use syndicate::schemas::service;
use syndicate::supervise::{Supervisor, SupervisorConfiguration}; use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
use tokio::io::AsyncRead; use tokio::io::AsyncRead;
@ -21,8 +23,8 @@ use crate::schemas::external_services::*;
use syndicate_macros::during; use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, config_ds: Arc<Cap>, root_ds: Arc<Cap>) { pub fn on_demand(t: &mut Activation, config_ds: Arc<Cap>, root_ds: Arc<Cap>) {
t.spawn(syndicate::name!("daemon"), move |t| { t.spawn(Some(AnyValue::symbol("daemon_listener")), move |t| {
Ok(during!(t, config_ds, language(), <run-service $spec: DaemonService>, Ok(during!(t, config_ds, language(), <run-service $spec: DaemonService::<AnyValue>>,
enclose!((config_ds, root_ds) move |t: &mut Activation| { enclose!((config_ds, root_ds) move |t: &mut Activation| {
supervise_daemon(t, config_ds, root_ds, spec) supervise_daemon(t, config_ds, root_ds, spec)
}))) })))
@ -39,14 +41,14 @@ fn supervise_daemon(
lifecycle::on_service_restart(t, &config_ds, &spec, enclose!( lifecycle::on_service_restart(t, &config_ds, &spec, enclose!(
(config_ds, root_ds, spec) move |t| { (config_ds, root_ds, spec) move |t| {
tracing::info!(id = ?spec.id, "Terminating to restart"); tracing::info!(id = ?spec.id, "Terminating to restart");
t.stop_facet_and_continue(t.facet.facet_id, Some( t.stop_facet_and_continue(t.facet_id(), Some(
enclose!((config_ds, root_ds, spec) move |t: &mut Activation| { enclose!((config_ds, root_ds, spec) move |t: &mut Activation| {
supervise_daemon(t, config_ds, root_ds, spec) supervise_daemon(t, config_ds, root_ds, spec)
}))) })))
})); }));
Supervisor::start( Supervisor::start(
t, t,
syndicate::name!(parent: None, "daemon", id = ?spec.id), Some(language().unparse(&spec)),
SupervisorConfiguration::on_error_only(), SupervisorConfiguration::on_error_only(),
enclose!((config_ds, spec) lifecycle::updater(config_ds, spec)), enclose!((config_ds, spec) lifecycle::updater(config_ds, spec)),
enclose!((config_ds, root_ds) move |t| enclose!((config_ds, root_ds) move |t|
@ -137,7 +139,7 @@ impl DaemonProcessSpec {
}, },
DaemonProcessSpec::OneShot { setup } => FullDaemonProcess { DaemonProcessSpec::OneShot { setup } => FullDaemonProcess {
process: Process::Simple(setup).elaborate(), process: Process::Simple(setup).elaborate(),
ready_on_start: ReadyOnStart::Absent, ready_on_start: ReadyOnStart::Present { ready_on_start: false },
restart: RestartField::Present { restart: Box::new(RestartPolicy::OnError) }, restart: RestartField::Present { restart: Box::new(RestartPolicy::OnError) },
protocol: ProtocolField::Absent, protocol: ProtocolField::Absent,
}, },
@ -162,7 +164,6 @@ struct DaemonInstance {
config_ds: Arc<Cap>, config_ds: Arc<Cap>,
log_ds: Arc<Cap>, log_ds: Arc<Cap>,
service: AnyValue, service: AnyValue,
name: tracing::Span,
cmd: process::Command, cmd: process::Command,
announce_presumed_readiness: bool, announce_presumed_readiness: bool,
unready_configs: Arc<Field<isize>>, unready_configs: Arc<Field<isize>>,
@ -175,7 +176,7 @@ impl DaemonInstance {
fn handle_exit(self, t: &mut Activation, error_message: Option<String>) -> ActorResult { fn handle_exit(self, t: &mut Activation, error_message: Option<String>) -> ActorResult {
let delay = let delay =
std::time::Duration::from_millis(if let None = error_message { 200 } else { 1000 }); std::time::Duration::from_millis(if let None = error_message { 200 } else { 1000 });
t.stop_facet_and_continue(t.facet.facet_id, Some(move |t: &mut Activation| { t.stop_facet_and_continue(t.facet_id(), Some(move |t: &mut Activation| {
#[derive(Debug)] #[derive(Debug)]
enum NextStep { enum NextStep {
SleepAndRestart, SleepAndRestart,
@ -198,6 +199,7 @@ impl DaemonInstance {
Err(s.as_str())? Err(s.as_str())?
} }
}, },
RestartPolicy::Never => SignalSuccessfulCompletion,
}; };
match error_message { match error_message {
@ -211,7 +213,6 @@ impl DaemonInstance {
t.facet(|t| { t.facet(|t| {
let _ = t.prevent_inert_check(); let _ = t.prevent_inert_check();
counter::adjust(t, &self.completed_processes, 1); counter::adjust(t, &self.completed_processes, 1);
counter::adjust(t, &self.unready_configs, -1);
Ok(()) Ok(())
})?; })?;
() ()
@ -224,43 +225,47 @@ impl DaemonInstance {
fn log<R: 'static + Send + AsyncRead + Unpin>( fn log<R: 'static + Send + AsyncRead + Unpin>(
&self, &self,
t: &mut Activation, t: &mut Activation,
facet: FacetRef,
pid: Option<u32>, pid: Option<u32>,
r: R, r: R,
kind: &str kind: &str
) { ) -> ActorResult {
let log_ds = self.log_ds.clone(); t.facet(|t| {
let service = self.service.clone(); let facet = t.facet_ref();
let kind = AnyValue::symbol(kind); let log_ds = self.log_ds.clone();
let pid = match pid { let service = self.service.clone();
Some(n) => AnyValue::new(n), let kind = AnyValue::symbol(kind);
None => AnyValue::symbol("unknown"), let pid = match pid {
}; Some(n) => AnyValue::new(n),
t.spawn(syndicate::name!(parent: self.name.clone(), "log"), move |t| { None => AnyValue::symbol("unknown"),
t.linked_task(tracing::Span::current(), async move { };
let trace_collector = t.trace_collector();
t.linked_task(None, async move {
let mut r = BufReader::new(r); let mut r = BufReader::new(r);
let cause = trace_collector.as_ref().map(
|_| trace::TurnCause::external(kind.value().as_symbol().unwrap()));
let account = Account::new(None, trace_collector);
loop { loop {
let mut buf = Vec::new(); let mut buf = Vec::new();
if r.read_until(b'\n', &mut buf).await? == 0 { match r.read_until(b'\n', &mut buf).await {
return Ok(LinkedTaskTermination::Normal); Ok(0) | Err(_) => break,
Ok(_) => (),
} }
let buf = match std::str::from_utf8(&buf) { let buf = match std::str::from_utf8(&buf) {
Ok(s) => AnyValue::new(s), Ok(s) => AnyValue::new(s),
Err(_) => AnyValue::bytestring(buf), Err(_) => AnyValue::bytestring(buf),
}; };
let now = AnyValue::new(chrono::Utc::now().to_rfc3339()); let now = AnyValue::new(chrono::Utc::now().to_rfc3339());
if facet.activate( if !facet.activate(
Account::new(tracing::Span::current()), &account, cause.clone(), enclose!((pid, service, kind) |t| {
enclose!((pid, service, kind) |t| {
log_ds.message(t, &(), &syndicate_macros::template!( log_ds.message(t, &(), &syndicate_macros::template!(
"<log =now { "<log =now {
pid: =pid, pid: =pid,
service: =service, service: =service,
stream: =kind, stream: =kind,
line: =buf, line: =buf,
}>")); }>"));
Ok(()) Ok(())
})).is_err() }))
{ {
break; break;
} }
@ -268,7 +273,8 @@ impl DaemonInstance {
Ok(LinkedTaskTermination::Normal) Ok(LinkedTaskTermination::Normal)
}); });
Ok(()) Ok(())
}); })?;
Ok(())
} }
fn start(mut self, t: &mut Activation) -> ActorResult { fn start(mut self, t: &mut Activation) -> ActorResult {
@ -284,10 +290,10 @@ impl DaemonInstance {
let pid = child.id(); let pid = child.id();
tracing::debug!(?pid, cmd = ?self.cmd, "started"); tracing::debug!(?pid, cmd = ?self.cmd, "started");
let facet = t.facet.clone(); let facet = t.facet_ref();
if let Some(r) = child.stderr.take() { if let Some(r) = child.stderr.take() {
self.log(t, facet.clone(), pid, r, "stderr"); self.log(t, pid, r, "stderr")?;
} }
match self.protocol { match self.protocol {
@ -295,7 +301,7 @@ impl DaemonInstance {
Protocol::BinarySyndicate => self.relay_facet(t, &mut child, false)?, Protocol::BinarySyndicate => self.relay_facet(t, &mut child, false)?,
Protocol::None => { Protocol::None => {
if let Some(r) = child.stdout.take() { if let Some(r) = child.stdout.take() {
self.log(t, facet.clone(), pid, r, "stdout"); self.log(t, pid, r, "stdout")?;
} }
} }
} }
@ -304,16 +310,20 @@ impl DaemonInstance {
counter::adjust(t, &self.unready_configs, -1); counter::adjust(t, &self.unready_configs, -1);
} }
let trace_collector = t.trace_collector();
t.linked_task( t.linked_task(
syndicate::name!(parent: self.name.clone(), "wait"), Some(rec![AnyValue::symbol("wait"), self.service.clone()]),
enclose!((facet) async move { enclose!((facet) async move {
tracing::trace!("waiting for process exit"); tracing::trace!("waiting for process exit");
let status = child.wait().await?; let status = child.wait().await?;
tracing::debug!(?status); tracing::debug!(?status);
facet.activate(Account::new(syndicate::name!("instance-terminated")), |t| { let cause = trace_collector.as_ref().map(
|_| trace::TurnCause::external("instance-terminated"));
let account = Account::new(Some(AnyValue::symbol("instance-terminated")), trace_collector);
facet.activate(&account, cause, |t| {
let m = if status.success() { None } else { Some(format!("{}", status)) }; let m = if status.success() { None } else { Some(format!("{}", status)) };
self.handle_exit(t, m) self.handle_exit(t, m)
})?; });
Ok(LinkedTaskTermination::Normal) Ok(LinkedTaskTermination::Normal)
})); }));
Ok(()) Ok(())
@ -378,9 +388,10 @@ fn run(
Ok(()) Ok(())
}))?; }))?;
let trace_collector = t.trace_collector();
enclose!((config_ds, unready_configs, completed_processes) enclose!((config_ds, unready_configs, completed_processes)
during!(t, config_ds.clone(), language(), <daemon #(&service.id) $config>, { during!(t, config_ds.clone(), language(), <daemon #(&service.id) $config>, {
enclose!((spec, config_ds, root_ds, unready_configs, completed_processes) enclose!((spec, config_ds, root_ds, unready_configs, completed_processes, trace_collector)
|t: &mut Activation| { |t: &mut Activation| {
tracing::debug!(?config, "new config"); tracing::debug!(?config, "new config");
counter::adjust(t, &unready_configs, 1); counter::adjust(t, &unready_configs, 1);
@ -390,8 +401,8 @@ fn run(
Ok(config) => { Ok(config) => {
tracing::info!(?config); tracing::info!(?config);
let config = config.elaborate(); let config = config.elaborate();
let facet = t.facet.clone(); let facet = t.facet_ref();
t.linked_task(syndicate::name!("subprocess"), async move { t.linked_task(Some(AnyValue::symbol("subprocess")), async move {
let mut cmd = config.process.build_command().ok_or("Cannot start daemon process")?; let mut cmd = config.process.build_command().ok_or("Cannot start daemon process")?;
let announce_presumed_readiness = match config.ready_on_start { let announce_presumed_readiness = match config.ready_on_start {
@ -432,7 +443,6 @@ fn run(
config_ds, config_ds,
log_ds: root_ds, log_ds: root_ds,
service: spec, service: spec,
name: tracing::Span::current(),
cmd, cmd,
announce_presumed_readiness, announce_presumed_readiness,
unready_configs, unready_configs,
@ -441,9 +451,12 @@ fn run(
protocol, protocol,
}; };
facet.activate(Account::new(syndicate::name!("instance-startup")), |t| { let cause = trace_collector.as_ref().map(
|_| trace::TurnCause::external("instance-startup"));
let account = Account::new(Some(AnyValue::symbol("instance-startup")), trace_collector);
facet.activate(&account, cause, |t| {
daemon_instance.start(t) daemon_instance.start(t)
})?; });
Ok(LinkedTaskTermination::KeepFacet) Ok(LinkedTaskTermination::KeepFacet)
}); });
Ok(()) Ok(())

View File

@ -1,7 +1,12 @@
use preserves_schema::Codec;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::Ordering;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose; use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use crate::language::language; use crate::language::language;
use crate::lifecycle; use crate::lifecycle;
@ -10,9 +15,10 @@ use crate::schemas::internal_services::DebtReporter;
use syndicate_macros::during; use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) { pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(syndicate::name!("debt_reporter"), move |t| { t.spawn(Some(AnyValue::symbol("debt_reporter_listener")), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: DebtReporter>, |t: &mut Activation| { Ok(during!(t, ds, language(), <run-service $spec: DebtReporter>, |t: &mut Activation| {
t.spawn_link(tracing::Span::current(), enclose!((ds) |t| run(t, ds, spec))); t.spawn_link(Some(rec![AnyValue::symbol("debt_reporter"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec)));
Ok(()) Ok(())
})) }))
}); });
@ -22,10 +28,38 @@ fn run(t: &mut Activation, ds: Arc<Cap>, spec: DebtReporter) -> ActorResult {
ds.assert(t, language(), &lifecycle::started(&spec)); ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec)); ds.assert(t, language(), &lifecycle::ready(&spec));
t.every(core::time::Duration::from_millis((spec.interval_seconds.0 * 1000.0) as u64), |_t| { t.every(core::time::Duration::from_millis((spec.interval_seconds.0 * 1000.0) as u64), |_t| {
for (id, (name, debt)) in syndicate::actor::ACCOUNTS.read().iter() { for (account_id, (name, debt)) in syndicate::actor::ACCOUNTS.read().iter() {
let _enter = name.enter(); tracing::info!(account_id, ?name, debt = ?debt.load(Ordering::Relaxed));
tracing::info!(id, debt = ?debt.load(std::sync::atomic::Ordering::Relaxed));
} }
// let snapshot = syndicate::actor::ACTORS.read().clone();
// for (id, (name, ac_ref)) in snapshot.iter() {
// if *id == _t.state.actor_id {
// tracing::debug!("skipping report on the reporting actor, to avoid deadlock");
// continue;
// }
// tracing::trace!(?id, "about to lock");
// tracing::info_span!("actor", id, ?name).in_scope(|| match &*ac_ref.state.lock() {
// ActorState::Terminated { exit_status } =>
// tracing::info!(?exit_status, "terminated"),
// ActorState::Running(state) => {
// tracing::info!(field_count = ?state.fields.len(),
// outbound_assertion_count = ?state.outbound_assertions.len(),
// facet_count = ?state.facet_nodes.len());
// tracing::info_span!("facets").in_scope(|| {
// for (facet_id, f) in state.facet_nodes.iter() {
// tracing::info!(
// ?facet_id,
// parent_id = ?f.parent_facet_id,
// outbound_handle_count = ?f.outbound_handles.len(),
// linked_task_count = ?f.linked_tasks.len(),
// inert_check_preventers = ?f.inert_check_preventers.load(Ordering::Relaxed));
// }
// });
// }
// });
// }
Ok(()) Ok(())
}) })
} }

View File

@ -0,0 +1,39 @@
use preserves_schema::Codec;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use crate::gatekeeper;
use crate::language::Language;
use crate::language::language;
use crate::lifecycle;
use crate::schemas::internal_services::Gatekeeper;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("gatekeeper_listener")), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: Gatekeeper::<AnyValue>>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("gatekeeper"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec)));
Ok(())
}))
});
}
fn run(t: &mut Activation, ds: Arc<Cap>, spec: Gatekeeper<AnyValue>) -> ActorResult {
let resolver = t.create(syndicate::entity(Arc::clone(&spec.bindspace))
.on_asserted_facet(gatekeeper::facet_handle_resolve));
ds.assert(t, language(), &syndicate::schemas::service::ServiceObject {
service_name: language().unparse(&spec),
object: AnyValue::domain(Cap::guard(Language::arc(), resolver)),
});
gatekeeper::handle_binds(t, &spec.bindspace)?;
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
}

View File

@ -0,0 +1,348 @@
use preserves_schema::Codec;
use std::convert::TryFrom;
use std::io::Read;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::error::Error;
use syndicate::preserves::rec;
use syndicate::preserves::value::Map;
use syndicate::preserves::value::NestedValue;
use syndicate::schemas::http;
use syndicate::value::signed_integer::SignedInteger;
use crate::language::language;
use crate::lifecycle;
use crate::schemas::internal_services::HttpRouter;
use crate::schemas::internal_services::HttpStaticFileServer;
use syndicate_macros::during;
lazy_static::lazy_static! {
pub static ref MIME_TABLE: Map<String, String> = load_mime_table("/etc/mime.types").unwrap_or_default();
}
pub fn load_mime_table(path: &str) -> Result<Map<String, String>, std::io::Error> {
let mut table = Map::new();
let file = std::fs::read_to_string(path)?;
for line in file.split('\n') {
if line.starts_with('#') {
continue;
}
let pieces = line.split(&[' ', '\t'][..]).collect::<Vec<&str>>();
for i in 1..pieces.len() {
table.insert(pieces[i].to_string(), pieces[0].to_string());
}
}
Ok(table)
}
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("http_router_listener")), move |t| {
enclose!((ds) during!(t, ds, language(), <run-service $spec: HttpRouter::<AnyValue>>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("http_router"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec)));
Ok(())
}));
enclose!((ds) during!(t, ds, language(), <run-service $spec: HttpStaticFileServer>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("http_static_file_server"), language().unparse(&spec)]),
enclose!((ds) |t| run_static_file_server(t, ds, spec)));
Ok(())
}));
Ok(())
});
}
#[derive(Debug, Clone)]
struct ActiveHandler {
cap: Arc<Cap>,
terminated: Arc<Field<bool>>,
}
type MethodTable = Map<http::MethodPattern, Vec<ActiveHandler>>;
type HostTable = Map<http::HostPattern, Map<http::PathPattern, MethodTable>>;
type RoutingTable = Map<SignedInteger, HostTable>;
fn request_host(value: &http::RequestHost) -> Option<String> {
match value {
http::RequestHost::Present(h) => Some(h.to_owned()),
http::RequestHost::Absent => None,
}
}
fn run(t: &mut Activation, ds: Arc<Cap>, spec: HttpRouter) -> ActorResult {
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
let httpd = spec.httpd;
let routes: Arc<Field<RoutingTable>> = t.named_field("routes", Map::new());
enclose!((httpd, routes) during!(t, httpd, language(), <http-bind _ $port _ _ _>, |t: &mut Activation| {
let port1 = port.clone();
enclose!((httpd, routes) during!(t, httpd, language(), <http-listener #(&port1)>, enclose!((routes, port) |t: &mut Activation| {
let port2 = port.clone();
during!(t, httpd, language(), <http-bind $host #(&port2) $method $path $handler>, |t: &mut Activation| {
tracing::debug!("+HTTP binding {:?} {:?} {:?} {:?} {:?}", host, port, method, path, handler);
let port = port.value().to_signedinteger()?;
let host = language().parse::<http::HostPattern>(&host)?;
let path = language().parse::<http::PathPattern>(&path)?;
let method = language().parse::<http::MethodPattern>(&method)?;
let handler_cap = handler.value().to_embedded()?.clone();
let handler_terminated = t.named_field("handler-terminated", false);
t.get_mut(&routes)
.entry(port.clone()).or_default()
.entry(host.clone()).or_default()
.entry(path.clone()).or_default()
.entry(method.clone()).or_default()
.push(ActiveHandler {
cap: handler_cap.clone(),
terminated: handler_terminated,
});
t.on_stop(enclose!((routes, method, path, host, port) move |t| {
tracing::debug!("-HTTP binding {:?} {:?} {:?} {:?} {:?}", host, port, method, path, handler);
let port_map = t.get_mut(&routes);
let host_map = port_map.entry(port.clone()).or_default();
let path_map = host_map.entry(host.clone()).or_default();
let method_map = path_map.entry(path.clone()).or_default();
let handler_vec = method_map.entry(method.clone()).or_default();
let handler = {
let i = handler_vec.iter().position(|a| a.cap == handler_cap)
.expect("Expected an index of an active handler to remove");
handler_vec.swap_remove(i)
};
if handler_vec.is_empty() {
method_map.remove(&method);
}
if method_map.is_empty() {
path_map.remove(&path);
}
if path_map.is_empty() {
host_map.remove(&host);
}
if host_map.is_empty() {
port_map.remove(&port);
}
*t.get_mut(&handler.terminated) = true;
Ok(())
}));
Ok(())
});
Ok(())
})));
Ok(())
}));
during!(t, httpd, language(), <request $req $res>, |t: &mut Activation| {
let req = match language().parse::<http::HttpRequest>(&req) { Ok(v) => v, Err(_) => return Ok(()) };
let res = match res.value().to_embedded() { Ok(v) => v, Err(_) => return Ok(()) };
tracing::trace!("Looking up handler for {:#?} in {:#?}", &req, &t.get(&routes));
let host_map = match t.get(&routes).get(&req.port) {
Some(host_map) => host_map,
None => return send_empty(t, res, 404, "Not found"),
};
let methods = match request_host(&req.host).and_then(|h| try_hostname(host_map, http::HostPattern::Host(h), &req.path).transpose()).transpose()? {
Some(methods) => methods,
None => match try_hostname(host_map, http::HostPattern::Any, &req.path)? {
Some(methods) => methods,
None => return send_empty(t, res, 404, "Not found"),
}
};
let handlers = match methods.get(&http::MethodPattern::Specific(req.method.clone())) {
Some(handlers) => handlers,
None => match methods.get(&http::MethodPattern::Any) {
Some(handlers) => handlers,
None => {
let allowed = methods.keys().map(|k| match k {
http::MethodPattern::Specific(m) => m.to_uppercase(),
http::MethodPattern::Any => unreachable!(),
}).collect::<Vec<String>>().join(", ");
res.message(t, language(), &http::HttpResponse::Status {
code: 405.into(), message: "Method Not Allowed".into() });
res.message(t, language(), &http::HttpResponse::Header {
name: "allow".into(), value: allowed });
return send_done(t, res);
}
}
};
if handlers.len() > 1 {
tracing::warn!(?req, "Too many handlers available");
}
let ActiveHandler { cap, terminated } = handlers.first().expect("Nonempty handler set").clone();
tracing::trace!("Handler for {:?} is {:?}", &req, &cap);
t.dataflow(enclose!((terminated, req, res) move |t| {
if *t.get(&terminated) {
tracing::trace!("Handler for {:?} terminated", &req);
send_empty(t, &res, 500, "Internal Server Error")?;
}
Ok(())
}))?;
cap.assert(t, language(), &http::HttpContext { req, res: res.clone() });
Ok(())
});
Ok(())
}
fn send_done(t: &mut Activation, res: &Arc<Cap>) -> ActorResult {
res.message(t, language(), &http::HttpResponse::Done {
chunk: Box::new(http::Chunk::Bytes(vec![])) });
Ok(())
}
fn send_empty(t: &mut Activation, res: &Arc<Cap>, code: u16, message: &str) -> ActorResult {
res.message(t, language(), &http::HttpResponse::Status {
code: code.into(), message: message.into() });
send_done(t, res)
}
fn path_pattern_matches(path_pat: &http::PathPattern, path: &Vec<String>) -> bool {
let mut path_iter = path.iter();
for pat_elem in path_pat.0.iter() {
match pat_elem {
http::PathPatternElement::Label(v) => match path_iter.next() {
Some(path_elem) => {
if v != path_elem {
return false;
}
}
None => return false,
},
http::PathPatternElement::Wildcard => match path_iter.next() {
Some(_) => (),
None => return false,
},
http::PathPatternElement::Rest => return true,
}
}
match path_iter.next() {
Some(_more) => false,
None => true,
}
}
fn try_hostname<'table>(
host_map: &'table HostTable,
host_pat: http::HostPattern,
path: &Vec<String>,
) -> Result<Option<&'table MethodTable>, Error> {
match host_map.get(&host_pat) {
None => Ok(None),
Some(path_table) => {
for (path_pat, method_table) in path_table.iter() {
tracing::trace!("Checking path {:?} against pattern {:?}", &path, &path_pat);
if path_pattern_matches(path_pat, path) {
return Ok(Some(method_table));
}
}
Ok(None)
}
}
}
fn render_dir(path: std::path::PathBuf) -> Result<(Vec<u8>, Option<&'static str>), Error> {
let mut body = String::new();
for entry in std::fs::read_dir(&path)? {
if let Ok(entry) = entry {
let is_dir = entry.metadata().map(|m| m.is_dir()).unwrap_or(false);
let name = entry.file_name().to_string_lossy()
.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('\'', "&apos;")
.replace('"', "&quot;") + (if is_dir { "/" } else { "" });
body.push_str(&format!("<a href=\"{}\">{}</a><br>\n", name, name));
}
}
Ok((body.into_bytes(), Some("text/html")))
}
impl HttpStaticFileServer {
fn respond(&mut self, t: &mut Activation, req: &http::HttpRequest, res: &Arc<Cap>) -> ActorResult {
let path_prefix_elements = usize::try_from(&self.path_prefix_elements)
.map_err(|_| "Bad pathPrefixElements")?;
let mut is_index = false;
let mut path = req.path[path_prefix_elements..].iter().cloned().collect::<Vec<String>>();
if let Some(e) = path.last_mut() {
if e.len() == 0 {
*e = "index.html".into();
is_index = true;
}
}
let mut realpath = std::path::PathBuf::from(&self.dir);
for element in path.into_iter() {
if element.contains('/') || element.starts_with('.') { Err("Invalid path element")?; }
realpath.push(element);
}
let (body, mime_type) = match std::fs::File::open(&realpath) {
Err(_) => {
if is_index {
realpath.pop();
}
if std::fs::metadata(&realpath).is_ok_and(|m| m.is_dir()) {
render_dir(realpath)?
} else {
return send_empty(t, res, 404, "Not found")
}
},
Ok(mut fh) => {
if fh.metadata().is_ok_and(|m| m.is_dir()) {
drop(fh);
res.message(t, language(), &http::HttpResponse::Status {
code: 301.into(), message: "Moved permanently".into() });
res.message(t, language(), &http::HttpResponse::Header {
name: "location".into(), value: format!("/{}/", req.path.join("/")) });
return send_done(t, res);
} else {
let mut buf = Vec::new();
fh.read_to_end(&mut buf)?;
if let Some(extension) = realpath.extension().and_then(|e| e.to_str()) {
(buf, MIME_TABLE.get(extension).map(|m| m.as_str()))
} else {
(buf, None)
}
}
}
};
res.message(t, language(), &http::HttpResponse::Status {
code: 200.into(), message: "OK".into() });
if let Some(mime_type) = mime_type {
res.message(t, language(), &http::HttpResponse::Header {
name: "content-type".into(), value: mime_type.to_owned() });
}
res.message(t, language(), &http::HttpResponse::Done {
chunk: Box::new(http::Chunk::Bytes(body)) });
Ok(())
}
}
impl Entity<http::HttpContext<AnyValue>> for HttpStaticFileServer {
fn assert(&mut self, t: &mut Activation, assertion: http::HttpContext<AnyValue>, _handle: Handle) -> ActorResult {
let http::HttpContext { req, res } = assertion;
if let Err(e) = self.respond(t, &req, &res) {
tracing::error!(?req, error=?e);
send_empty(t, &res, 500, "Internal server error")?;
}
Ok(())
}
}
fn run_static_file_server(t: &mut Activation, ds: Arc<Cap>, spec: HttpStaticFileServer) -> ActorResult {
let object = Cap::guard(&language().syndicate, t.create(spec.clone()));
ds.assert(t, language(), &syndicate::schemas::service::ServiceObject {
service_name: language().unparse(&spec),
object: AnyValue::domain(object),
});
Ok(())
}

View File

@ -1,37 +0,0 @@
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use crate::language::language;
use crate::lifecycle;
use crate::schemas::internal_services::Milestone;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(syndicate::name!("milestone"), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: Milestone>, |t: &mut Activation| {
Supervisor::start(
t,
syndicate::name!(parent: None, "milestone", name = ?spec.name),
SupervisorConfiguration::default(),
|_, _| Ok(()),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
}))
});
}
fn run(
t: &mut Activation,
ds: Arc<Cap>,
spec: Milestone,
) -> ActorResult {
lifecycle::terminate_on_service_restart(t, &ds, &spec);
tracing::info!(milestone = ?spec.name, "entered");
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
t.on_stop(move |_| { tracing::info!(milestone = ?spec.name, "exited"); Ok(()) });
Ok(())
}

View File

@ -1,6 +1,7 @@
pub mod config_watcher; pub mod config_watcher;
pub mod daemon; pub mod daemon;
pub mod debt_reporter; pub mod debt_reporter;
pub mod milestone; pub mod gatekeeper;
pub mod http_router;
pub mod tcp_relay_listener; pub mod tcp_relay_listener;
pub mod unix_relay_listener; pub mod unix_relay_listener;

View File

@ -1,62 +1,119 @@
use preserves_schema::Codec;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::sync::Arc; use std::sync::Arc;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose; use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use syndicate::supervise::{Supervisor, SupervisorConfiguration}; use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use crate::language::language; use crate::language::language;
use crate::lifecycle; use crate::lifecycle;
use crate::protocol::detect_protocol; use crate::protocol::detect_protocol;
use crate::schemas::internal_services::TcpRelayListener; use crate::schemas::internal_services::TcpWithoutHttp;
use syndicate_macros::during; use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) { pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(syndicate::name!("tcp_relay_listener"), move |t| { t.spawn(Some(AnyValue::symbol("tcp_relay_listener")), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: TcpRelayListener>, |t| { enclose!((ds) during!(t, ds, language(), <run-service $spec: TcpWithoutHttp::<AnyValue>>, |t| {
Supervisor::start( run_supervisor(t, ds.clone(), spec)
t, }));
syndicate::name!(parent: None, "relay", addr = ?spec), Ok(())
SupervisorConfiguration::default(),
enclose!((ds, spec) lifecycle::updater(ds, spec)),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
}))
}); });
} }
fn run(t: &mut Activation, ds: Arc<Cap>, spec: TcpRelayListener) -> ActorResult { fn run_supervisor(t: &mut Activation, ds: Arc<Cap>, spec: TcpWithoutHttp) -> ActorResult {
Supervisor::start(
t,
Some(rec![AnyValue::symbol("relay"), language().unparse(&spec)]),
SupervisorConfiguration::default(),
enclose!((ds, spec) lifecycle::updater(ds, spec)),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
}
fn run(t: &mut Activation, ds: Arc<Cap>, spec: TcpWithoutHttp) -> ActorResult {
lifecycle::terminate_on_service_restart(t, &ds, &spec); lifecycle::terminate_on_service_restart(t, &ds, &spec);
let host = spec.addr.host.clone();
let port = u16::try_from(&spec.addr.port).map_err(|_| "Invalid TCP port number")?; let httpd = t.named_field("httpd", None::<Arc<Cap>>);
let parent_span = tracing::Span::current();
let facet = t.facet.clone(); {
t.linked_task(syndicate::name!("listener"), async move { let ad = spec.addr.clone();
let ad2 = ad.clone();
let gk = spec.gatekeeper.clone();
enclose!((ds, httpd) during!(t, ds, language(),
<run-service <relay-listener #(&language().unparse(&ad)) #(&AnyValue::domain(gk)) $h>>, |t: &mut Activation| {
if let Some(h) = h.value().as_embedded().cloned() {
h.assert(t, language(), &syndicate::schemas::http::HttpListener { port: ad2.port.clone() });
*t.get_mut(&httpd) = Some(h.clone());
t.on_stop(enclose!((httpd) move |t| {
let f = t.get_mut(&httpd);
if *f == Some(h.clone()) { *f = None; }
Ok(())
}));
}
Ok(())
}));
}
let TcpWithoutHttp { addr, gatekeeper } = spec.clone();
let host = addr.host.clone();
let port = u16::try_from(&addr.port).map_err(|_| "Invalid TCP port number")?;
let facet = t.facet_ref();
let trace_collector = t.trace_collector();
t.linked_task(Some(AnyValue::symbol("listener")), async move {
let listen_addr = format!("{}:{}", host, port); let listen_addr = format!("{}:{}", host, port);
let listener = TcpListener::bind(listen_addr).await?; let listener = TcpListener::bind(listen_addr).await?;
facet.activate(Account::new(syndicate::name!("readiness")), |t| {
tracing::info!("listening"); {
ds.assert(t, language(), &lifecycle::ready(&spec)); let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("readiness"));
Ok(()) let account = Account::new(Some(AnyValue::symbol("readiness")), trace_collector.clone());
})?; if !facet.activate(
&account, cause, |t| {
tracing::info!("listening");
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
})
{
return Ok(LinkedTaskTermination::Normal);
}
}
loop { loop {
let (stream, addr) = listener.accept().await?; let (stream, addr) = listener.accept().await?;
let gatekeeper = spec.gatekeeper.clone(); let gatekeeper = gatekeeper.clone();
let name = syndicate::name!(parent: parent_span.clone(), "conn"); let name = Some(rec![AnyValue::symbol("tcp"), AnyValue::new(format!("{}", &addr))]);
facet.activate(Account::new(name.clone()), move |t| { let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("connect"));
t.spawn(name, move |t| { let account = Account::new(name.clone(), trace_collector.clone());
Ok(t.linked_task(tracing::Span::current(), { if !facet.activate(
let facet = t.facet.clone(); &account, cause, enclose!((trace_collector, httpd) move |t| {
async move { let httpd = t.get(&httpd).clone();
detect_protocol(facet, stream, gatekeeper, addr).await?; t.spawn(name, move |t| {
Ok(LinkedTaskTermination::KeepFacet) Ok(t.linked_task(None, {
} let facet = t.facet_ref();
})) async move {
}); detect_protocol(trace_collector,
Ok(()) facet,
})?; stream,
gatekeeper,
httpd,
addr,
port).await?;
Ok(LinkedTaskTermination::KeepFacet)
}
}))
});
Ok(())
}))
{
return Ok(LinkedTaskTermination::Normal);
}
} }
}); });
Ok(()) Ok(())

View File

@ -1,3 +1,5 @@
use preserves_schema::Codec;
use std::io; use std::io;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
@ -5,8 +7,11 @@ use std::sync::Arc;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::enclose; use syndicate::enclose;
use syndicate::error::Error; use syndicate::error::Error;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use syndicate::relay; use syndicate::relay;
use syndicate::supervise::{Supervisor, SupervisorConfiguration}; use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use tokio::net::UnixListener; use tokio::net::UnixListener;
use tokio::net::UnixStream; use tokio::net::UnixStream;
@ -19,11 +24,11 @@ use crate::schemas::internal_services::UnixRelayListener;
use syndicate_macros::during; use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) { pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(syndicate::name!("unix_relay_listener"), move |t| { t.spawn(Some(AnyValue::symbol("unix_relay_listener")), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: UnixRelayListener>, |t| { Ok(during!(t, ds, language(), <run-service $spec: UnixRelayListener::<AnyValue>>, |t| {
Supervisor::start( Supervisor::start(
t, t,
syndicate::name!(parent: None, "relay", addr = ?spec), Some(rec![AnyValue::symbol("relay"), language().unparse(&spec)]),
SupervisorConfiguration::default(), SupervisorConfiguration::default(),
enclose!((ds, spec) lifecycle::updater(ds, spec)), enclose!((ds, spec) lifecycle::updater(ds, spec)),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec)))) enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
@ -34,39 +39,56 @@ pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
fn run(t: &mut Activation, ds: Arc<Cap>, spec: UnixRelayListener) -> ActorResult { fn run(t: &mut Activation, ds: Arc<Cap>, spec: UnixRelayListener) -> ActorResult {
lifecycle::terminate_on_service_restart(t, &ds, &spec); lifecycle::terminate_on_service_restart(t, &ds, &spec);
let path_str = spec.addr.path.clone(); let path_str = spec.addr.path.clone();
let parent_span = tracing::Span::current(); let facet = t.facet_ref();
let facet = t.facet.clone(); let trace_collector = t.trace_collector();
t.linked_task(syndicate::name!("listener"), async move { t.linked_task(Some(AnyValue::symbol("listener")), async move {
let listener = bind_unix_listener(&PathBuf::from(path_str)).await?; let listener = bind_unix_listener(&PathBuf::from(path_str)).await?;
facet.activate(Account::new(syndicate::name!("readiness")), |t| {
tracing::info!("listening"); {
ds.assert(t, language(), &lifecycle::ready(&spec)); let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("readiness"));
Ok(()) let account = Account::new(Some(AnyValue::symbol("readiness")), trace_collector.clone());
})?; if !facet.activate(
&account, cause, |t| {
tracing::info!("listening");
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
})
{
return Ok(LinkedTaskTermination::Normal);
}
}
loop { loop {
let (stream, _addr) = listener.accept().await?; let (stream, _addr) = listener.accept().await?;
let peer = stream.peer_cred()?; let peer = stream.peer_cred()?;
let gatekeeper = spec.gatekeeper.clone(); let gatekeeper = spec.gatekeeper.clone();
let name = syndicate::name!(parent: parent_span.clone(), "conn", let name = Some(rec![AnyValue::symbol("unix"),
pid = ?peer.pid().unwrap_or(-1), AnyValue::new(peer.pid().unwrap_or(-1)),
uid = peer.uid()); AnyValue::new(peer.uid())]);
facet.activate(Account::new(name.clone()), move |t| { let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("connect"));
t.spawn(name, |t| { let account = Account::new(name.clone(), trace_collector.clone());
Ok(t.linked_task(tracing::Span::current(), { if !facet.activate(
let facet = t.facet.clone(); &account, cause, enclose!((trace_collector) move |t| {
async move { t.spawn(name, |t| {
tracing::info!(protocol = %"unix"); Ok(t.linked_task(None, {
let (i, o) = stream.into_split(); let facet = t.facet_ref();
run_connection(facet, async move {
relay::Input::Bytes(Box::pin(i)), tracing::info!(protocol = %"unix");
relay::Output::Bytes(Box::pin(o)), let (i, o) = stream.into_split();
gatekeeper)?; run_connection(trace_collector,
Ok(LinkedTaskTermination::KeepFacet) facet,
} relay::Input::Bytes(Box::pin(i)),
})) relay::Output::Bytes(Box::pin(o)),
}); gatekeeper);
Ok(()) Ok(LinkedTaskTermination::KeepFacet)
})?; }
}))
});
Ok(())
}))
{
return Ok(LinkedTaskTermination::Normal);
}
} }
}); });
Ok(()) Ok(())

View File

@ -0,0 +1,23 @@
[package]
name = "syndicate-tools"
version = "0.18.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
description = "Syndicate command-line utilities."
homepage = "https://syndicate-lang.org/"
repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"
license = "Apache-2.0"
[dependencies]
preserves = "4.995"
syndicate = { path = "../syndicate", version = "0.40.0"}
clap = { version = "^4.0", features = ["derive"] }
clap_complete = "^4.0"
noise-protocol = "0.1"
noise-rust-crypto = "0.5"
[package.metadata.workspaces]
independent = true

View File

@ -0,0 +1,168 @@
use std::io;
use std::str::FromStr;
use clap::ArgGroup;
use clap::CommandFactory;
use clap::Parser;
use clap::Subcommand;
use clap::arg;
use clap_complete::{generate, Shell};
use noise_protocol::DH;
use noise_protocol::Hash;
use noise_rust_crypto::Blake2s;
use noise_rust_crypto::X25519;
use preserves::hex::HexParser;
use preserves::value::BytesBinarySource;
use preserves::value::NestedValue;
use preserves::value::NoEmbeddedDomainCodec;
use preserves::value::Reader;
use preserves::value::TextReader;
use preserves::value::ViaCodec;
use preserves::value::TextWriter;
use syndicate::language;
use syndicate::preserves_schema::Codec;
use syndicate::preserves_schema::ParseError;
use syndicate::schemas::noise;
use syndicate::sturdy::Caveat;
use syndicate::sturdy::SturdyRef;
use syndicate::sturdy::_Any;
#[derive(Clone, Debug)]
struct Preserves<N: NestedValue>(N);
#[derive(Subcommand, Debug)]
enum Action {
#[command(group(ArgGroup::new("key").required(true)))]
/// Generate a fresh SturdyRef from an OID value and a key
Mint {
#[arg(long, value_name="VALUE")]
/// Preserves value to use as SturdyRef OID
oid: Preserves<_Any>,
#[arg(long, group="key")]
/// Key phrase
phrase: Option<String>,
#[arg(long, group="key")]
/// Key bytes, encoded as hex
hex: Option<String>,
#[arg(long)]
/// Caveats to add
caveat: Vec<Preserves<_Any>>,
},
#[command(group(ArgGroup::new("key").required(true)))]
/// Generate a fresh NoiseServiceSpec from a service selector and a key
Noise {
#[arg(long, value_name="VALUE")]
/// Preserves value to use as the service selector
service: Preserves<_Any>,
#[arg(long, value_name="PROTOCOL")]
/// Noise handshake protocol name
protocol: Option<String>,
#[arg(long, group="key")]
/// Key phrase
phrase: Option<String>,
#[arg(long, group="key")]
/// Key bytes, encoded as hex
hex: Option<String>,
#[arg(long, group="key")]
/// Generate a random key
random: bool,
},
/// Emit shell completion code
Completions {
/// Shell dialect to generate
shell: Shell,
}
}
#[derive(Parser, Debug)]
#[command(version)]
struct Cli {
#[command(subcommand)]
action: Action,
}
impl<N: NestedValue> FromStr for Preserves<N> {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Preserves(TextReader::new(&mut BytesBinarySource::new(s.as_bytes()),
ViaCodec::new(NoEmbeddedDomainCodec)).demand_next(false)?))
}
}
fn main() -> io::Result<()> {
let args = <Cli as Parser>::parse();
match args.action {
Action::Completions { shell } => {
let mut cmd = <Cli as CommandFactory>::command();
let name = cmd.get_name().to_string();
generate(shell, &mut cmd, name, &mut io::stdout());
}
Action::Noise { service, protocol, phrase, hex, random } => {
let key =
if random {
X25519::genkey()
} else if let Some(hex) = hex {
let mut hash = Blake2s::default();
hash.input(hex.as_bytes());
hash.result()
} else if let Some(phrase) = phrase {
let mut hash = Blake2s::default();
hash.input(phrase.as_bytes());
hash.result()
} else {
unreachable!()
};
let n = noise::NoiseServiceSpec {
base: noise::NoiseSpec {
key: X25519::pubkey(&key).to_vec(),
service: noise::ServiceSelector(service.0),
pre_shared_keys: noise::NoisePreSharedKeys::Absent,
protocol: if let Some(p) = protocol {
noise::NoiseProtocol::Present { protocol: p }
} else {
noise::NoiseProtocol::Absent
},
},
secret_key: noise::SecretKeyField::Present {
secret_key: key.to_vec(),
},
};
println!("{}", TextWriter::encode(&mut NoEmbeddedDomainCodec,
&language().unparse(&n))?);
}
Action::Mint { oid, phrase, hex, caveat: caveats } => {
let key =
if let Some(hex) = hex {
HexParser::Liberal.decode(&hex).expect("hex encoded sturdyref")
} else if let Some(phrase) = phrase {
phrase.as_bytes().to_owned()
} else {
unreachable!()
};
let attenuation = caveats.into_iter().map(|c| {
let r = language().parse(&c.0);
if let Ok(Caveat::Unknown(_)) = &r {
eprintln!("Warning: Unknown caveat format: {:?}", &c.0);
}
r
}).collect::<Result<Vec<Caveat>, _>>()?;
let m = SturdyRef::mint(oid.0, &key).attenuate(&attenuation)?;
println!("{}", TextWriter::encode(&mut NoEmbeddedDomainCodec,
&language().unparse(&m))?);
}
}
Ok(())
}

View File

@ -1,6 +1,6 @@
[package] [package]
name = "syndicate" name = "syndicate"
version = "0.20.0" version = "0.40.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"] authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018" edition = "2018"
@ -13,23 +13,23 @@ license = "Apache-2.0"
vendored-openssl = ["openssl/vendored"] vendored-openssl = ["openssl/vendored"]
[build-dependencies] [build-dependencies]
preserves-schema = "^2" preserves-schema = "5.995"
[dependencies] [dependencies]
preserves = "^2" preserves = "4.995"
preserves-schema = "^2" preserves-schema = "5.995"
tokio = { version = "1.10", features = ["io-util", "macros", "rt", "rt-multi-thread", "time"] } tokio = { version = "1.10", features = ["io-std", "io-util", "macros", "rt", "rt-multi-thread", "time"] }
tokio-util = "0.6" tokio-util = "0.6"
bytes = "1.0" bytes = "1.0"
futures = "0.3" futures = "0.3"
blake2 = "0.10"
getrandom = "0.2" getrandom = "0.2"
hmac = "0.11" hmac = "0.12"
lazy_static = "1.4" lazy_static = "1.4"
parking_lot = "0.11" parking_lot = "0.11"
sha2 = "0.9"
tracing = "0.1" tracing = "0.1"
tracing-subscriber = "0.2" tracing-subscriber = "0.2"
@ -48,3 +48,6 @@ harness = false
[[bench]] [[bench]]
name = "ring" name = "ring"
harness = false harness = false
[package.metadata.workspaces]
independent = true

View File

@ -9,9 +9,9 @@ use syndicate::language;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::during::entity; use syndicate::during::entity;
use syndicate::dataspace::Dataspace; use syndicate::dataspace::Dataspace;
use syndicate::enclose;
use syndicate::schemas::dataspace::Observe; use syndicate::schemas::dataspace::Observe;
use syndicate::schemas::dataspace_patterns as p; use syndicate::schemas::dataspace_patterns as p;
use syndicate::value::Map;
use syndicate::value::NestedValue; use syndicate::value::NestedValue;
use syndicate::value::Value; use syndicate::value::Value;
@ -52,24 +52,18 @@ pub fn bench_pub(c: &mut Criterion) {
b.iter_custom(|iters| { b.iter_custom(|iters| {
let start = Instant::now(); let start = Instant::now();
rt.block_on(async move { rt.block_on(async move {
Actor::new(None).boot(syndicate::name!("dataspace"), move |t| { Actor::top(None, move |t| {
let ds = t.create(Dataspace::new()); let _ = t.prevent_inert_check();
// The reason this works is that all the messages to `ds` will be delivered
// before the message to `shutdown`, because `ds` and `shutdown` are in the
// same Actor.
let ds = t.create(Dataspace::new(None));
let shutdown = t.create(ShutdownEntity); let shutdown = t.create(ShutdownEntity);
let account = Account::new(syndicate::name!("sender-account")); for _ in 0..iters {
t.linked_task(syndicate::name!("sender"), async move { t.message(&ds, says(AnyValue::new("bench_pub"),
for _ in 0..iters { Value::ByteString(vec![]).wrap()));
external_event(&ds.mailbox, &account, Box::new( }
enclose!((ds) move |t| t.with_entity( t.message(&shutdown, AnyValue::new(true));
&ds,
|t, e| e.message(t, says(AnyValue::new("bench_pub"),
Value::ByteString(vec![]).wrap()))))))?
}
external_event(&shutdown.mailbox, &account, Box::new(
enclose!((shutdown) move |t| t.with_entity(
&shutdown,
|t, e| e.message(t, AnyValue::new(true))))))?;
Ok(LinkedTaskTermination::KeepFacet)
});
Ok(()) Ok(())
}).await.unwrap().unwrap(); }).await.unwrap().unwrap();
}); });
@ -83,27 +77,27 @@ pub fn bench_pub(c: &mut Criterion) {
rt.block_on(async move { rt.block_on(async move {
let turn_count = Arc::new(AtomicU64::new(0)); let turn_count = Arc::new(AtomicU64::new(0));
Actor::new(None).boot(syndicate::name!("dataspace"), { Actor::top(None, {
let iters = iters.clone(); let iters = iters.clone();
let turn_count = Arc::clone(&turn_count); let turn_count = Arc::clone(&turn_count);
move |t| { move |t| {
let ds = Cap::new(&t.create(Dataspace::new())); let ds = Cap::new(&t.create(Dataspace::new(None)));
let shutdown = entity(()) let shutdown = entity(())
.on_asserted(|_, _, _| Ok(Some(Box::new(|_, t| Ok(t.stop()))))) .on_asserted(|_, _, _| Ok(Some(Box::new(|_, t| Ok(t.stop())))))
.create_cap(t); .create_cap(t);
ds.assert(t, language(), &Observe { ds.assert(t, language(), &Observe {
pattern: p::Pattern::DBind(Box::new(p::DBind { pattern: p::Pattern::Bind {
pattern: p::Pattern::DLit(Box::new(p::DLit { pattern: Box::new(p::Pattern::Lit {
value: p::AnyAtom::Symbol("consumer".to_owned()), value: Box::new(p::AnyAtom::Symbol("consumer".to_owned())),
})), }),
})), },
observer: shutdown, observer: shutdown,
}); });
t.spawn(syndicate::name!("consumer"), move |t| { t.spawn(Some(AnyValue::symbol("consumer")), move |t| {
struct Receiver(Arc<AtomicU64>); struct Receiver(Arc<AtomicU64>);
impl Entity<AnyValue> for Receiver { impl Entity<AnyValue> for Receiver {
fn message(&mut self, _t: &mut Activation, _m: AnyValue) -> ActorResult { fn message(&mut self, _t: &mut Activation, _m: AnyValue) -> ActorResult {
@ -117,46 +111,39 @@ pub fn bench_pub(c: &mut Criterion) {
ds.assert(t, &(), &AnyValue::symbol("consumer")); ds.assert(t, &(), &AnyValue::symbol("consumer"));
ds.assert(t, language(), &Observe { ds.assert(t, language(), &Observe {
pattern: p::Pattern::DCompound(Box::new(p::DCompound::Rec { pattern: p::Pattern::Group {
label: AnyValue::symbol("Says"), type_: Box::new(p::GroupType::Rec {
fields: vec![ label: AnyValue::symbol("Says"),
p::Pattern::DLit(Box::new(p::DLit { }),
value: p::AnyAtom::String("bench_pub".to_owned()), entries: Map::from([
})), (p::_Any::new(0), p::Pattern::Lit {
p::Pattern::DBind(Box::new(p::DBind { value: Box::new(p::AnyAtom::String("bench_pub".to_owned())),
pattern: p::Pattern::DDiscard(Box::new(p::DDiscard)), }),
})), (p::_Any::new(1), p::Pattern::Bind {
]})), pattern: Box::new(p::Pattern::Discard),
}),
]),
},
observer: receiver, observer: receiver,
}); });
ds.assert(t, language(), &Observe { ds.assert(t, language(), &Observe {
pattern: p::Pattern::DBind(Box::new(p::DBind { pattern: p::Pattern::Bind {
pattern: p::Pattern::DLit(Box::new(p::DLit { pattern: Box::new(p::Pattern::Lit {
value: p::AnyAtom::Bool(true), value: Box::new(p::AnyAtom::Bool(true)),
})), }),
})), },
observer: shutdown, observer: shutdown,
}); });
let account = Arc::clone(t.account()); t.after(core::time::Duration::from_secs(0), move |t| {
t.linked_task(syndicate::name!("sender"), async move {
for _i in 0..iters { for _i in 0..iters {
let ds = Arc::clone(&ds); ds.message(t, &(), &says(AnyValue::new("bench_pub"),
external_event(&Arc::clone(&ds.underlying.mailbox), &account, Box::new( Value::ByteString(vec![]).wrap()));
move |t| t.with_entity(
&ds.underlying,
|t, e| e.message(t, says(AnyValue::new("bench_pub"),
Value::ByteString(vec![]).wrap())))))?
} }
{ ds.message(t, &(), &AnyValue::new(true));
let ds = Arc::clone(&ds); Ok(())
external_event(&Arc::clone(&ds.underlying.mailbox), &account, Box::new(
move |t| t.with_entity(
&ds.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
Ok(LinkedTaskTermination::KeepFacet)
}); });
Ok(()) Ok(())
}); });
Ok(()) Ok(())

View File

@ -7,6 +7,8 @@ use std::time::Duration;
use std::time::Instant; use std::time::Instant;
use syndicate::actor::*; use syndicate::actor::*;
use syndicate::preserves::rec;
use syndicate::value::NestedValue;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
@ -88,14 +90,16 @@ pub fn bench_ring(c: &mut Criterion) {
self.i += 1; self.i += 1;
let spawner_ref = Arc::clone(&self.self_ref); let spawner_ref = Arc::clone(&self.self_ref);
ACTORS_CREATED.fetch_add(1, Ordering::Relaxed); ACTORS_CREATED.fetch_add(1, Ordering::Relaxed);
t.spawn(syndicate::name!("forwarder", ?i), move |t| { t.spawn(
let _ = t.prevent_inert_check(); Some(rec![AnyValue::symbol("forwarder"), AnyValue::new(i)]),
let f = t.create(Forwarder { move |t| {
next, let _ = t.prevent_inert_check();
let f = t.create(Forwarder {
next,
});
t.message(&spawner_ref, f);
Ok(())
}); });
t.message(&spawner_ref, f);
Ok(())
});
} else { } else {
let mut c_state = Counter { let mut c_state = Counter {
start: Instant::now(), start: Instant::now(),
@ -118,7 +122,7 @@ pub fn bench_ring(c: &mut Criterion) {
} }
ACTORS_CREATED.fetch_add(1, Ordering::Relaxed); ACTORS_CREATED.fetch_add(1, Ordering::Relaxed);
Actor::new(None).boot(syndicate::name!("counter"), move |t| { Actor::top(None, move |t| {
let _ = t.prevent_inert_check(); let _ = t.prevent_inert_check();
let mut s = Spawner { let mut s = Spawner {
self_ref: t.create_inert(), self_ref: t.create_inert(),

View File

@ -26,11 +26,11 @@ fn main() -> std::io::Result<()> {
let mut gen_dir = buildroot.clone(); let mut gen_dir = buildroot.clone();
gen_dir.push("src/schemas"); gen_dir.push("src/schemas");
let mut c = CompilerConfig::new(gen_dir, "crate::schemas".to_owned()); let mut c = CompilerConfig::new("crate::schemas".to_owned());
c.plugins.push(Box::new(syndicate_plugins::PatternPlugin)); c.plugins.push(Box::new(syndicate_plugins::PatternPlugin));
c.add_external_module(ExternalModule::new(vec!["EntityRef".to_owned()], "crate::actor")); c.add_external_module(ExternalModule::new(vec!["EntityRef".to_owned()], "crate::actor"));
let inputs = expand_inputs(&vec!["protocols/schema-bundle.bin".to_owned()])?; let inputs = expand_inputs(&vec!["protocols/schema-bundle.bin".to_owned()])?;
c.load_schemas_and_bundles(&inputs)?; c.load_schemas_and_bundles(&inputs, &vec![])?;
compile(&c) compile(&c, &mut CodeCollector::files(gen_dir))
} }

View File

@ -1,17 +1,44 @@
´³bundle·µ³tcp„´³schema·³version³ definitions·³TcpLocal´³rec´³lit³ tcp-local„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpRemote´³rec´³lit³ ´³bundle·µ³tcp„´³schema·³version°³ definitions·³TcpLocal´³rec´³lit³ tcp-local„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpRemote´³rec´³lit³
tcp-remote„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpPeerInfo´³rec´³lit³tcp-peer„´³tupleµ´³named³handle´³embedded³any„„´³named³local´³refµ„³TcpLocal„„´³named³remote´³refµ„³ TcpRemote„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³timer„´³schema·³version³ definitions·³SetTimer´³rec´³lit³ set-timer„´³tupleµ´³named³label³any„´³named³msecs´³atom³Double„„´³named³kind´³refµ„³ TimerKind„„„„„³ LaterThan´³rec´³lit³ tcp-remote„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpPeerInfo´³rec´³lit³tcp-peer„´³tupleµ´³named³handle´³embedded³any„„´³named³local´³refµ„³TcpLocal„„´³named³remote´³refµ„³ TcpRemote„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³http„´³schema·³version°³ definitions·³Chunk´³orµµ±string´³atom³String„„µ±bytes´³atom³
later-than„´³tupleµ´³named³msecs´³atom³Double„„„„„³ TimerKind´³orµµ±relative´³lit³relative„„µ±absolute´³lit³absolute„„µ±clear´³lit³clear„„„„³ TimerExpired´³rec´³lit³ timer-expired„´³tupleµ´³named³label³any„´³named³msecs´³atom³Double„„„„„„³ embeddedType€„„µ³stream„´³schema·³version³ definitions·³Mode´³orµµ±bytes´³lit³bytes„„µ±lines´³refµ„³LineMode„„µ±packet´³rec´³lit³packet„´³tupleµ´³named³size´³atom³ SignedInteger„„„„„„µ±object´³rec´³lit³object„´³tupleµ´³named³ description³any„„„„„„„³Sink´³orµµ±source´³rec´³lit³source„´³tupleµ´³named³ ByteString„„„„³Headers´³dictof´³atom³Symbol„´³atom³String„„³MimeType´³atom³Symbol„³
QueryValue´³orµµ±string´³atom³String„„µ±file´³rec´³lit³file„´³tupleµ´³named³filename´³atom³String„„´³named³headers´³refµ„³Headers„„´³named³body´³atom³
ByteString„„„„„„„„³ HostPattern´³orµµ±host´³atom³String„„µ±any´³lit€„„„„³ HttpBinding´³rec´³lit³ http-bind„´³tupleµ´³named³host´³refµ„³ HostPattern„„´³named³port´³atom³ SignedInteger„„´³named³method´³refµ„³ MethodPattern„„´³named³path´³refµ„³ PathPattern„„´³named³handler´³embedded´³refµ„³ HttpRequest„„„„„„³ HttpContext´³rec´³lit³request„´³tupleµ´³named³req´³refµ„³ HttpRequest„„´³named³res´³embedded´³refµ„³ HttpResponse„„„„„„³ HttpRequest´³rec´³lit³ http-request„´³tupleµ´³named³sequenceNumber´³atom³ SignedInteger„„´³named³host´³refµ„³ RequestHost„„´³named³port´³atom³ SignedInteger„„´³named³method´³atom³Symbol„„´³named³path´³seqof´³atom³String„„„´³named³headers´³refµ„³Headers„„´³named³query´³dictof´³atom³Symbol„´³seqof´³refµ„³
QueryValue„„„„´³named³body´³refµ„³ RequestBody„„„„„³ HttpService´³rec´³lit³ http-service„´³tupleµ´³named³host´³refµ„³ HostPattern„„´³named³port´³atom³ SignedInteger„„´³named³method´³refµ„³ MethodPattern„„´³named³path´³refµ„³ PathPattern„„„„„³ PathPattern´³seqof´³refµ„³PathPatternElement„„³ RequestBody´³orµµ±present´³atom³
ByteString„„µ±absent´³lit€„„„„³ RequestHost´³orµµ±present´³atom³String„„µ±absent´³lit€„„„„³ HttpListener´³rec´³lit³ http-listener„´³tupleµ´³named³port´³atom³ SignedInteger„„„„„³ HttpResponse´³orµµ±status´³rec´³lit³status„´³tupleµ´³named³code´³atom³ SignedInteger„„´³named³message´³atom³String„„„„„„µ±header´³rec´³lit³header„´³tupleµ´³named³name´³atom³Symbol„„´³named³value´³atom³String„„„„„„µ±chunk´³rec´³lit³chunk„´³tupleµ´³named³chunk´³refµ„³Chunk„„„„„„µ±done´³rec´³lit³done„´³tupleµ´³named³chunk´³refµ„³Chunk„„„„„„„„³ MethodPattern´³orµµ±any´³lit€„„µ±specific´³atom³Symbol„„„„³PathPatternElement´³orµµ±label´³atom³String„„µ±wildcard´³lit³_„„µ±rest´³lit³...„„„„„³ embeddedType€„„µ³noise„´³schema·³version°³ definitions·³Packet´³orµµ±complete´³atom³
ByteString„„µ±
fragmented´³seqof´³atom³
ByteString„„„„„³ Initiator´³rec´³lit³ initiator„´³tupleµ´³named³initiatorSession´³embedded´³refµ„³Packet„„„„„„³ NoiseSpec´³andµ´³dict·³key´³named³key´³atom³
ByteString„„³service´³named³service´³refµ„³ServiceSelector„„„„´³named³protocol´³refµ„³ NoiseProtocol„„´³named³ preSharedKeys´³refµ„³NoisePreSharedKeys„„„„³ SessionItem´³orµµ± Initiator´³refµ„³ Initiator„„µ±Packet´³refµ„³Packet„„„„³ NoiseProtocol´³orµµ±present´³dict·³protocol´³named³protocol´³atom³String„„„„„µ±invalid´³dict·³protocol´³named³protocol³any„„„„µ±absent´³dict·„„„„„³ NoiseStepType´³lit³noise„³SecretKeyField´³orµµ±present´³dict·³ secretKey´³named³ secretKey´³atom³
ByteString„„„„„µ±invalid´³dict·³ secretKey´³named³ secretKey³any„„„„µ±absent´³dict·„„„„„³DefaultProtocol´³lit±!Noise_NK_25519_ChaChaPoly_BLAKE2s„³NoiseStepDetail´³refµ„³ServiceSelector„³ServiceSelector³any³NoiseServiceSpec´³andµ´³named³base´³refµ„³ NoiseSpec„„´³named³ secretKey´³refµ„³SecretKeyField„„„„³NoisePreSharedKeys´³orµµ±present´³dict·³ preSharedKeys´³named³ preSharedKeys´³seqof´³atom³
ByteString„„„„„„µ±invalid´³dict·³ preSharedKeys´³named³ preSharedKeys³any„„„„µ±absent´³dict·„„„„„³NoisePathStepDetail´³refµ„³ NoiseSpec„³NoiseDescriptionDetail´³refµ„³NoiseServiceSpec„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³timer„´³schema·³version°³ definitions·³SetTimer´³rec´³lit³ set-timer„´³tupleµ´³named³label³any„´³named³seconds´³atom³Double„„´³named³kind´³refµ„³ TimerKind„„„„„³ LaterThan´³rec´³lit³
later-than„´³tupleµ´³named³seconds´³atom³Double„„„„„³ TimerKind´³orµµ±relative´³lit³relative„„µ±absolute´³lit³absolute„„µ±clear´³lit³clear„„„„³ TimerExpired´³rec´³lit³ timer-expired„´³tupleµ´³named³label³any„´³named³seconds´³atom³Double„„„„„„³ embeddedType€„„µ³trace„´³schema·³version°³ definitions·³Oid³any³Name´³orµµ± anonymous´³rec´³lit³ anonymous„´³tupleµ„„„„µ±named´³rec´³lit³named„´³tupleµ´³named³name³any„„„„„„„³Target´³rec´³lit³entity„´³tupleµ´³named³actor´³refµ„³ActorId„„´³named³facet´³refµ„³FacetId„„´³named³oid´³refµ„³Oid„„„„„³TaskId³any³TurnId³any³ActorId³any³FacetId³any³ TurnCause´³orµµ±turn´³rec´³lit³ caused-by„´³tupleµ´³named³id´³refµ„³TurnId„„„„„„µ±cleanup´³rec´³lit³cleanup„´³tupleµ„„„„µ±linkedTaskRelease´³rec´³lit³linked-task-release„´³tupleµ´³named³id´³refµ„³TaskId„„´³named³reason´³refµ„³LinkedTaskReleaseReason„„„„„„µ±periodicActivation´³rec´³lit³periodic-activation„´³tupleµ´³named³period´³atom³Double„„„„„„µ±delay´³rec´³lit³delay„´³tupleµ´³named³ causingTurn´³refµ„³TurnId„„´³named³amount´³atom³Double„„„„„„µ±external´³rec´³lit³external„´³tupleµ´³named³ description³any„„„„„„„³ TurnEvent´³orµµ±assert´³rec´³lit³assert„´³tupleµ´³named³ assertion´³refµ„³AssertionDescription„„´³named³handle´³refµ³protocol„³Handle„„„„„„µ±retract´³rec´³lit³retract„´³tupleµ´³named³handle´³refµ³protocol„³Handle„„„„„„µ±message´³rec´³lit³message„´³tupleµ´³named³body´³refµ„³AssertionDescription„„„„„„µ±sync´³rec´³lit³sync„´³tupleµ´³named³peer´³refµ„³Target„„„„„„µ± breakLink´³rec´³lit³
break-link„´³tupleµ´³named³source´³refµ„³ActorId„„´³named³handle´³refµ³protocol„³Handle„„„„„„„„³
ExitStatus´³orµµ±ok´³lit³ok„„µ±Error´³refµ³protocol„³Error„„„„³
TraceEntry´³rec´³lit³trace„´³tupleµ´³named³ timestamp´³atom³Double„„´³named³actor´³refµ„³ActorId„„´³named³item´³refµ„³ActorActivation„„„„„³ActorActivation´³orµµ±start´³rec´³lit³start„´³tupleµ´³named³ actorName´³refµ„³Name„„„„„„µ±turn´³refµ„³TurnDescription„„µ±stop´³rec´³lit³stop„´³tupleµ´³named³status´³refµ„³
ExitStatus„„„„„„„„³FacetStopReason´³orµµ±explicitAction´³lit³explicit-action„„µ±inert´³lit³inert„„µ±parentStopping´³lit³parent-stopping„„µ± actorStopping´³lit³actor-stopping„„„„³TurnDescription´³rec´³lit³turn„´³tupleµ´³named³id´³refµ„³TurnId„„´³named³cause´³refµ„³ TurnCause„„´³named³actions´³seqof´³refµ„³ActionDescription„„„„„„³ActionDescription´³orµµ±dequeue´³rec´³lit³dequeue„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±enqueue´³rec´³lit³enqueue„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±dequeueInternal´³rec´³lit³dequeue-internal„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±enqueueInternal´³rec´³lit³enqueue-internal„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±spawn´³rec´³lit³spawn„´³tupleµ´³named³link´³atom³Boolean„„´³named³id´³refµ„³ActorId„„„„„„µ±link´³rec´³lit³link„´³tupleµ´³named³ parentActor´³refµ„³ActorId„„´³named³ childToParent´³refµ³protocol„³Handle„„´³named³
childActor´³refµ„³ActorId„„´³named³ parentToChild´³refµ³protocol„³Handle„„„„„„µ±
facetStart´³rec´³lit³ facet-start„´³tupleµ´³named³path´³seqof´³refµ„³FacetId„„„„„„„µ± facetStop´³rec´³lit³
facet-stop„´³tupleµ´³named³path´³seqof´³refµ„³FacetId„„„´³named³reason´³refµ„³FacetStopReason„„„„„„µ±linkedTaskStart´³rec´³lit³linked-task-start„´³tupleµ´³named³taskName´³refµ„³Name„„´³named³id´³refµ„³TaskId„„„„„„„„³TargetedTurnEvent´³rec´³lit³event„´³tupleµ´³named³target´³refµ„³Target„„´³named³detail´³refµ„³ TurnEvent„„„„„³AssertionDescription´³orµµ±value´³rec´³lit³value„´³tupleµ´³named³value³any„„„„„µ±opaque´³rec´³lit³opaque„´³tupleµ´³named³ description³any„„„„„„„³LinkedTaskReleaseReason´³orµµ± cancelled´³lit³ cancelled„„µ±normal´³lit³normal„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³stdenv„´³schema·³version°³ definitions·³ StandardRoute´³orµµ±standard´³ tuplePrefixµ´³named³
transports´³seqof´³refµ„³StandardTransport„„„´³named³key´³atom³
ByteString„„´³named³service³any„´³named³sig´³atom³
ByteString„„´³named³oid³any„„´³named³caveats´³seqof´³refµ³sturdy„³Caveat„„„„„µ±general´³refµ³
gatekeeper„³Route„„„„³StandardTransport´³orµµ±wsUrl´³atom³String„„µ±other³any„„„„³ embeddedType€„„µ³stream„´³schema·³version°³ definitions·³Mode´³orµµ±bytes´³lit³bytes„„µ±lines´³refµ„³LineMode„„µ±packet´³rec´³lit³packet„´³tupleµ´³named³size´³atom³ SignedInteger„„„„„„µ±object´³rec´³lit³object„´³tupleµ´³named³ description³any„„„„„„„³Sink´³orµµ±source´³rec´³lit³source„´³tupleµ´³named³
controller´³embedded´³refµ„³Source„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±data´³rec´³lit³data„´³tupleµ´³named³payload³any„´³named³mode´³refµ„³Mode„„„„„„µ±eof´³rec´³lit³eof„´³tupleµ„„„„„„³Source´³orµµ±sink´³rec´³lit³sink„´³tupleµ´³named³ controller´³embedded´³refµ„³Source„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±data´³rec´³lit³data„´³tupleµ´³named³payload³any„´³named³mode´³refµ„³Mode„„„„„„µ±eof´³rec´³lit³eof„´³tupleµ„„„„„„³Source´³orµµ±sink´³rec´³lit³sink„´³tupleµ´³named³
controller´³embedded´³refµ„³Sink„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±credit´³rec´³lit³credit„´³tupleµ´³named³amount´³refµ„³ CreditAmount„„´³named³mode´³refµ„³Mode„„„„„„„„³LineMode´³orµµ±lf´³lit³lf„„µ±crlf´³lit³crlf„„„„³ StreamError´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„„„„³ CreditAmount´³orµµ±count´³atom³ SignedInteger„„µ± unbounded´³lit³ unbounded„„„„³StreamConnection´³rec´³lit³stream-connection„´³tupleµ´³named³source´³embedded´³refµ„³Source„„„´³named³sink´³embedded´³refµ„³Sink„„„´³named³spec³any„„„„³StreamListenerError´³rec´³lit³stream-listener-error„´³tupleµ´³named³spec³any„´³named³message´³atom³String„„„„„³StreamListenerReady´³rec´³lit³stream-listener-ready„´³tupleµ´³named³spec³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³sturdy„´³schema·³version³ definitions·³Lit´³rec´³lit³lit„´³tupleµ´³named³value³any„„„„³Oid´³atom³ SignedInteger„³Alts´³rec´³lit³or„´³tupleµ´³named³ alternatives´³seqof´³refµ„³Rewrite„„„„„„³PAnd´³rec´³lit³and„´³tupleµ´³named³patterns´³seqof´³refµ„³Pattern„„„„„„³PNot´³rec´³lit³not„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³TRef´³rec´³lit³ref„´³tupleµ´³named³binding´³atom³ SignedInteger„„„„„³PAtom´³orµµ±Boolean´³lit³Boolean„„µ±Float´³lit³Float„„µ±Double´³lit³Double„„µ± SignedInteger´³lit³ SignedInteger„„µ±String´³lit³String„„µ± controller´³embedded´³refµ„³Sink„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±credit´³rec´³lit³credit„´³tupleµ´³named³amount´³refµ„³ CreditAmount„„´³named³mode´³refµ„³Mode„„„„„„„„³LineMode´³orµµ±lf´³lit³lf„„µ±crlf´³lit³crlf„„„„³ StreamError´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„„„„³ CreditAmount´³orµµ±count´³atom³ SignedInteger„„µ± unbounded´³lit³ unbounded„„„„³StreamConnection´³rec´³lit³stream-connection„´³tupleµ´³named³source´³embedded´³refµ„³Source„„„´³named³sink´³embedded´³refµ„³Sink„„„´³named³spec³any„„„„³StreamListenerError´³rec´³lit³stream-listener-error„´³tupleµ´³named³spec³any„´³named³message´³atom³String„„„„„³StreamListenerReady´³rec´³lit³stream-listener-ready„´³tupleµ´³named³spec³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³sturdy„´³schema·³version°³ definitions·³Lit´³rec´³lit³lit„´³tupleµ´³named³value³any„„„„³Oid´³atom³ SignedInteger„³Alts´³rec´³lit³or„´³tupleµ´³named³ alternatives´³seqof´³refµ„³Rewrite„„„„„„³PAnd´³rec´³lit³and„´³tupleµ´³named³patterns´³seqof´³refµ„³Pattern„„„„„„³PNot´³rec´³lit³not„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³TRef´³rec´³lit³ref„´³tupleµ´³named³binding´³atom³ SignedInteger„„„„„³PAtom´³orµµ±Boolean´³lit³Boolean„„µ±Double´³lit³Double„„µ± SignedInteger´³lit³ SignedInteger„„µ±String´³lit³String„„µ±
ByteString´³lit³ ByteString´³lit³
ByteString„„µ±Symbol´³lit³Symbol„„„„³PBind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³Caveat´³orµµ±Rewrite´³refµ„³Rewrite„„µ±Alts´³refµ„³Alts„„„„³Pattern´³orµµ±PDiscard´³refµ„³PDiscard„„µ±PAtom´³refµ„³PAtom„„µ± PEmbedded´³refµ„³ PEmbedded„„µ±PBind´³refµ„³PBind„„µ±PAnd´³refµ„³PAnd„„µ±PNot´³refµ„³PNot„„µ±Lit´³refµ„³Lit„„µ± PCompound´³refµ„³ PCompound„„„„³Rewrite´³rec´³lit³rewrite„´³tupleµ´³named³pattern´³refµ„³Pattern„„´³named³template´³refµ„³Template„„„„„³WireRef´³orµµ±mine´³tupleµ´³lit<69>´³named³oid´³refµ„³Oid„„„„„µ±yours´³ tuplePrefixµ´³lit´³named³oid´³refµ„³Oid„„„´³named³ attenuation´³seqof´³refµ„³Caveat„„„„„„„³PDiscard´³rec´³lit³_„´³tupleµ„„„³Template´³orµµ± ByteString„„µ±Symbol´³lit³Symbol„„„„³PBind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³Caveat´³orµµ±Rewrite´³refµ„³Rewrite„„µ±Alts´³refµ„³Alts„„µ±Reject´³refµ„³Reject„„µ±unknown³any„„„³Reject´³rec´³lit³reject„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³Pattern´³orµµ±PDiscard´³refµ„³PDiscard„„µ±PAtom´³refµ„³PAtom„„µ± PEmbedded´³refµ„³ PEmbedded„„µ±PBind´³refµ„³PBind„„µ±PAnd´³refµ„³PAnd„„µ±PNot´³refµ„³PNot„„µ±Lit´³refµ„³Lit„„µ± PCompound´³refµ„³ PCompound„„„„³Rewrite´³rec´³lit³rewrite„´³tupleµ´³named³pattern´³refµ„³Pattern„„´³named³template´³refµ„³Template„„„„„³WireRef´³orµµ±mine´³tupleµ´³lit°„´³named³oid´³refµ„³Oid„„„„„µ±yours´³ tuplePrefixµ´³lit°„´³named³oid´³refµ„³Oid„„„´³named³ attenuation´³seqof´³refµ„³Caveat„„„„„„„³PDiscard´³rec´³lit³_„´³tupleµ„„„³Template´³orµµ±
TAttenuate´³refµ„³ TAttenuate´³refµ„³
TAttenuate„„µ±TRef´³refµ„³TRef„„µ±Lit´³refµ„³Lit„„µ± TCompound´³refµ„³ TCompound„„„„³ PCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Pattern„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Pattern„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„³ PEmbedded´³lit³Embedded„³ SturdyRef´³rec´³lit³ref„´³tupleµ´³named³oid³any„´³named³ caveatChain´³seqof´³refµ„³ Attenuation„„„´³named³sig´³atom³ TAttenuate„„µ±TRef´³refµ„³TRef„„µ±Lit´³refµ„³Lit„„µ± TCompound´³refµ„³ TCompound„„„„³ PCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Pattern„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Pattern„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„³ PEmbedded´³lit³Embedded„³ SturdyRef´³rec´³lit³ref„´³tupleµ´³named³
ByteString„„„„„³ TCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Template„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Template„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Template„„„„„„„„„³ parameters´³refµ„³
TAttenuate´³rec´³lit³ attenuate„´³tupleµ´³named³template´³refµ„³Template„„´³named³ attenuation´³refµ„³ Attenuation„„„„„³ Attenuation´³seqof´³refµ„³Caveat„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³worker„´³schema·³version³ definitions·³Instance´³rec´³lit³Instance„´³tupleµ´³named³name´³atom³String„„´³named³argument³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³service„´³schema·³version³ definitions·³State´³orµµ±started´³lit³started„„µ±ready´³lit³ready„„µ±failed´³lit³failed„„µ±complete´³lit³complete„„„„³ Parameters„„„„„³ TCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Template„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Template„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Template„„„„„„„„„³
Parameters´³andµ´³dict·³oid´³named³oid³any„³sig´³named³sig´³atom³
ByteString„„„„´³named³caveats´³refµ„³ CaveatsField„„„„³
TAttenuate´³rec´³lit³ attenuate„´³tupleµ´³named³template´³refµ„³Template„„´³named³ attenuation´³seqof´³refµ„³Caveat„„„„„„³ CaveatsField´³orµµ±present´³dict·³caveats´³named³caveats´³seqof´³refµ„³Caveat„„„„„„µ±invalid´³dict·³caveats´³named³caveats³any„„„„µ±absent´³dict·„„„„„³SturdyStepType´³lit³ref„³SturdyStepDetail´³refµ„³
Parameters„³SturdyPathStepDetail´³refµ„³
Parameters„³SturdyDescriptionDetail´³dict·³key´³named³key´³atom³
ByteString„„³oid´³named³oid³any„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³worker„´³schema·³version°³ definitions·³Instance´³rec´³lit³Instance„´³tupleµ´³named³name´³atom³String„„´³named³argument³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³service„´³schema·³version°³ definitions·³State´³orµµ±started´³lit³started„„µ±ready´³lit³ready„„µ±failed´³lit³failed„„µ±complete´³lit³complete„„µ± userDefined³any„„„³
RunService´³rec´³lit³ run-service„´³tupleµ´³named³ serviceName³any„„„„³ ServiceState´³rec´³lit³ service-state„´³tupleµ´³named³ serviceName³any„´³named³state´³refµ„³State„„„„„³ ServiceObject´³rec´³lit³service-object„´³tupleµ´³named³ serviceName³any„´³named³object³any„„„„³RequireService´³rec´³lit³require-service„´³tupleµ´³named³ serviceName³any„„„„³RestartService´³rec´³lit³restart-service„´³tupleµ´³named³ serviceName³any„„„„³ServiceDependency´³rec´³lit³ RunService´³rec´³lit³ run-service„´³tupleµ´³named³ serviceName³any„„„„³ ServiceState´³rec´³lit³ service-state„´³tupleµ´³named³ serviceName³any„´³named³state´³refµ„³State„„„„„³ ServiceObject´³rec´³lit³service-object„´³tupleµ´³named³ serviceName³any„´³named³object³any„„„„³RequireService´³rec´³lit³require-service„´³tupleµ´³named³ serviceName³any„„„„³RestartService´³rec´³lit³restart-service„´³tupleµ´³named³ serviceName³any„„„„³ServiceDependency´³rec´³lit³
depends-on„´³tupleµ´³named³depender³any„´³named³dependee´³refµ„³ ServiceState„„„„„³SystemLayerService´³rec´³lit³system-layer-service„´³tupleµ´³named³ serviceName³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³protocol„´³schema·³version³ definitions·³Oid´³atom³ SignedInteger„³Sync´³rec´³lit³sync„´³tupleµ´³named³peer´³embedded´³lit<69>„„„„„„³Turn´³seqof´³refµ„³ TurnEvent„„³Error´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„´³named³detail³any„„„„³Event´³orµµ±Assert´³refµ„³Assert„„µ±Retract´³refµ„³Retract„„µ±Message´³refµ„³Message„„µ±Sync´³refµ„³Sync„„„„³Assert´³rec´³lit³assert„´³tupleµ´³named³ assertion´³refµ„³ Assertion„„´³named³handle´³refµ„³Handle„„„„„³Handle´³atom³ SignedInteger„³Packet´³orµµ±Turn´³refµ„³Turn„„µ±Error´³refµ„³Error„„„„³Message´³rec´³lit³message„´³tupleµ´³named³body´³refµ„³ Assertion„„„„„³Retract´³rec´³lit³retract„´³tupleµ´³named³handle´³refµ„³Handle„„„„„³ Assertion³any³ TurnEvent´³tupleµ´³named³oid´³refµ„³Oid„„´³named³event´³refµ„³Event„„„„„³ embeddedType€„„µ³ dataspace„´³schema·³version³ definitions·³Observe´³rec´³lit³Observe„´³tupleµ´³named³pattern´³refµ³dataspacePatterns„³Pattern„„´³named³observer´³embedded³any„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³ depends-on„´³tupleµ´³named³depender³any„´³named³dependee´³refµ„³ ServiceState„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³protocol„´³schema·³version°³ definitions·³Oid´³atom³ SignedInteger„³Sync´³rec´³lit³S„´³tupleµ´³named³peer´³embedded´³lit<69>„„„„„„³Turn´³seqof´³refµ„³ TurnEvent„„³Error´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„´³named³detail³any„„„„³Event´³orµµ±Assert´³refµ„³Assert„„µ±Retract´³refµ„³Retract„„µ±Message´³refµ„³Message„„µ±Sync´³refµ„³Sync„„„„³Assert´³rec´³lit³A„´³tupleµ´³named³ assertion´³refµ„³ Assertion„„´³named³handle´³refµ„³Handle„„„„„³Handle´³atom³ SignedInteger„³Packet´³orµµ±Turn´³refµ„³Turn„„µ±Error´³refµ„³Error„„µ± Extension´³refµ„³ Extension„„„„³Message´³rec´³lit³M„´³tupleµ´³named³body´³refµ„³ Assertion„„„„„³Retract´³rec´³lit³R„´³tupleµ´³named³handle´³refµ„³Handle„„„„„³ Assertion³any³ Extension´³rec´³named³label³any„´³named³fields´³seqof³any„„„³ TurnEvent´³tupleµ´³named³oid´³refµ„³Oid„„´³named³event´³refµ„³Event„„„„„³ embeddedType€„„µ³ dataspace„´³schema·³version°³ definitions·³Observe´³rec´³lit³Observe„´³tupleµ´³named³pattern´³refµ³dataspacePatterns„³Pattern„„´³named³observer´³embedded³any„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³
gatekeeper„´³schema·³version³ definitions·³Bind´³rec´³lit³bind„´³tupleµ´³named³oid³any„´³named³key´³atom³ gatekeeper„´³schema·³version°³ definitions·³Bind´³rec´³lit³bind„´³tupleµ´³named³ description´³refµ„³ Description„„´³named³target´³embedded³any„„´³named³observer´³refµ„³ BindObserver„„„„„³Step´³rec´³named³stepType´³atom³Symbol„„´³tupleµ´³named³detail³any„„„„³Bound´³orµµ±bound´³rec´³lit³bound„´³tupleµ´³named³pathStep´³refµ„³PathStep„„„„„„µ±Rejected´³refµ„³Rejected„„„„³Route´³rec´³lit³route„´³ tuplePrefixµ´³named³
ByteString„„´³named³target´³embedded³any„„„„„³Resolve´³rec´³lit³resolve„´³tupleµ´³named³ sturdyref´³refµ³sturdy„³ SturdyRef„„´³named³observer´³embedded´³embedded³any„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³ racketEvent„´³schema·³version³ definitions·³ RacketEvent´³rec´³lit³ racket-event„´³tupleµ´³named³source´³embedded³any„„´³named³event´³embedded³any„„„„„„³ embeddedType€„„µ³transportAddress„´³schema·³version³ definitions·³Tcp´³rec´³lit³tcp„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³Unix´³rec´³lit³unix„´³tupleµ´³named³path´³atom³String„„„„„³Stdio´³rec´³lit³stdio„´³tupleµ„„„³ WebSocket´³rec´³lit³ws„´³tupleµ´³named³url´³atom³String„„„„„„³ embeddedType€„„µ³dataspacePatterns„´³schema·³version³ definitions·³DLit´³rec´³lit³lit„´³tupleµ´³named³value´³refµ„³AnyAtom„„„„„³DBind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³AnyAtom´³orµµ±bool´³atom³Boolean„„µ±float´³atom³Float„„µ±double´³atom³Double„„µ±int´³atom³ SignedInteger„„µ±string´³atom³String„„µ±bytes´³atom³ transports´³seqof³any„„„´³named³ pathSteps´³seqof´³refµ„³PathStep„„„„„³Resolve´³rec´³lit³resolve„´³tupleµ´³named³step´³refµ„³Step„„´³named³observer´³embedded´³refµ„³Resolved„„„„„„³PathStep´³rec´³named³stepType´³atom³Symbol„„´³tupleµ´³named³detail³any„„„„³Rejected´³rec´³lit³rejected„´³tupleµ´³named³detail³any„„„„³Resolved´³orµµ±accepted´³rec´³lit³accepted„´³tupleµ´³named³responderSession´³embedded³any„„„„„„µ±Rejected´³refµ„³Rejected„„„„³ Description´³rec´³named³stepType´³atom³Symbol„„´³tupleµ´³named³detail³any„„„„³ ResolvePath´³rec´³lit³ resolve-path„´³tupleµ´³named³route´³refµ„³Route„„´³named³addr³any„´³named³control´³embedded´³refµ„³TransportControl„„„´³named³resolved´³refµ„³Resolved„„„„„³ BindObserver´³orµµ±present´³embedded´³refµ„³Bound„„„µ±absent´³lit€„„„„³ForceDisconnect´³rec´³lit³force-disconnect„´³tupleµ„„„³ResolvedPathStep´³rec´³lit³ path-step„´³tupleµ´³named³origin´³embedded´³refµ„³Resolve„„„´³named³pathStep´³refµ„³PathStep„„´³named³resolved´³refµ„³Resolved„„„„„³TransportControl´³refµ„³ForceDisconnect„³TransportConnection´³rec´³lit³connect-transport„´³tupleµ´³named³addr³any„´³named³control´³embedded´³refµ„³TransportControl„„„´³named³resolved´³refµ„³Resolved„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³transportAddress„´³schema·³version°³ definitions·³Tcp´³rec´³lit³tcp„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³Unix´³rec´³lit³unix„´³tupleµ´³named³path´³atom³String„„„„„³Stdio´³rec´³lit³stdio„´³tupleµ„„„³ WebSocket´³rec´³lit³ws„´³tupleµ´³named³url´³atom³String„„„„„„³ embeddedType€„„µ³dataspacePatterns„´³schema·³version°³ definitions·³AnyAtom´³orµµ±bool´³atom³Boolean„„µ±double´³atom³Double„„µ±int´³atom³ SignedInteger„„µ±string´³atom³String„„µ±bytes´³atom³
ByteString„„µ±symbol´³atom³Symbol„„µ±embedded´³embedded³any„„„„³Pattern´³orµµ±DDiscard´³refµ„³DDiscard„„µ±DBind´³refµ„³DBind„„µ±DLit´³refµ„³DLit„„µ± DCompound´³refµ„³ DCompound„„„„³DDiscard´³rec´³lit³_„´³tupleµ„„„³ DCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Pattern„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Pattern„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³secureChatProtocol„´³schema·³version³ definitions·³Join´³rec´³lit³ ByteString„„µ±symbol´³atom³Symbol„„µ±embedded´³embedded³any„„„„³Pattern´³orµµ±discard´³rec´³lit³_„´³tupleµ„„„„µ±bind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„„µ±lit´³rec´³lit³lit„´³tupleµ´³named³value´³refµ„³AnyAtom„„„„„„µ±group´³rec´³lit³group„´³tupleµ´³named³type´³refµ„³ GroupType„„´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„³ GroupType´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„„„„„µ±arr´³rec´³lit³arr„´³tupleµ„„„„µ±dict´³rec´³lit³dict„´³tupleµ„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„
joinedUser„´³tupleµ´³named³uid´³refµ„³UserId„„´³named³handle´³embedded´³refµ„³Session„„„„„„³Says´³rec´³lit³says„´³tupleµ´³named³who´³refµ„³UserId„„´³named³what´³atom³String„„„„„³UserId´³atom³ SignedInteger„³Session´³orµµ± observeUsers´³rec´³lit³Observe„´³tupleµ´³lit³user„´³named³observer´³embedded´³refµ„³UserInfo„„„„„„„µ± observeSpeech´³rec´³lit³Observe„´³tupleµ´³lit³says„´³named³observer´³embedded´³refµ„³Says„„„„„„„µ± NickClaim´³refµ„³ NickClaim„„µ±Says´³refµ„³Says„„„„³UserInfo´³rec´³lit³user„´³tupleµ´³named³uid´³refµ„³UserId„„´³named³name´³atom³String„„„„„³ NickClaim´³rec´³lit³ claimNick„´³tupleµ´³named³uid´³refµ„³UserId„„´³named³name´³atom³String„„´³named³k´³embedded´³refµ„³NickClaimResponse„„„„„„³ NickConflict´³rec´³lit³ nickConflict„´³tupleµ„„„³NickClaimResponse´³orµµ±true´³lit<69>„„µ± NickConflict´³refµ„³ NickConflict„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³simpleChatProtocol„´³schema·³version³ definitions·³Says´³rec´³lit³Says„´³tupleµ´³named³who´³atom³String„„´³named³what´³atom³String„„„„„³Present´³rec´³lit³Present„´³tupleµ´³named³username´³atom³String„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„

View File

@ -1,4 +1,4 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
Observe = <Observe @pattern dataspacePatterns.Pattern @observer #!any>. Observe = <Observe @pattern dataspacePatterns.Pattern @observer #:any>.

View File

@ -1,23 +1,30 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
; Dataspace patterns: a sublanguage of attenuation patterns. # Dataspace patterns: *almost* a sublanguage of attenuation patterns.
Pattern = DDiscard / DBind / DLit / DCompound . #
# One key difference is that Dataspace patterns are extensible, in that
# they ignore fields not mentioned in group patterns.
DDiscard = <_>. Pattern =
DBind = <bind @pattern Pattern>. / @discard <_>
DLit = <lit @value AnyAtom>. / <bind @pattern Pattern>
DCompound = <rec @label any @fields [Pattern ...]> / <lit @value AnyAtom>
/ <arr @items [Pattern ...]> / <group @type GroupType @entries { any: Pattern ...:... }>
/ <dict @entries { any: Pattern ...:... }> . .
GroupType =
/ <rec @label any>
/ <arr>
/ <dict>
.
AnyAtom = AnyAtom =
/ @bool bool / @bool bool
/ @float float
/ @double double / @double double
/ @int int / @int int
/ @string string / @string string
/ @bytes bytes / @bytes bytes
/ @symbol symbol / @symbol symbol
/ @embedded #!any / @embedded #:any
. .

View File

@ -1,5 +1,87 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
Resolve = <resolve @sturdyref sturdy.SturdyRef @observer #!#!any>. # ---------------------------------------------------------------------------
Bind = <bind @oid any @key bytes @target #!any>. # Protocol at *gatekeeper* entities
# Assertion. Gatekeeper will attempt to resolve `step`, responding with a `Resolved` to
# `observer`.
Resolve = <resolve @step Step @observer #:Resolved> .
Resolved = <accepted @responderSession #:any> / Rejected .
Step = <<rec> @stepType symbol [@detail any]> .
# ---------------------------------------------------------------------------
# Protocol at dataspaces *associated* with gatekeeper entities
# ## Handling `Resolve` requests
#
# When the gatekeeper entity receives a `Resolve` assertion (call it R1), it
#
# 1. asserts a `Resolve` (call it R2) into its associated dataspace that
# is the same as R1 except it has a different `observer`; and
#
# 2. observes a `Bind` with `description` matching the `step` of R1/R2
# according to `stepType` (e.g. treatment of SturdyStepType is not the
# same as treatment of NoiseStepType).
#
# Normally, an appropriate `Bind` is expected to exist. If the gatekeeper
# sees the `Bind` first, it takes the `target` from it and does whatever
# `stepType` mandates before replying to R1's observer.
#
# However, if a `Resolved` is asserted to R2's observer before a `Bind`
# appears, that resolution is relayed on to R1's observer directly, be it
# positive or negative, and the gatekeeper stops waiting for a `Bind`.
#
# This way, entities can keep an eye out for `Resolve` requests that will
# never complete, and answer `Rejected` to them even when no matching
# `Bind` exists. Entities could also use `Resolve` requests to synthesize a
# `Bind` in a "just-in-time" fashion.
#
# ## General treatment of `Bind` assertions
#
# When the gatekeeper sees a `Bind`, independently of any potential
# `Resolve` requests, it computes an appropriate PathStep from
# `description` pointing at `target`, and responds with a `Bound` to
# `observer` (if supplied).
#
Bind = <bind @description Description @target #:any @observer BindObserver> .
Description = <<rec> @stepType symbol [@detail any]> .
BindObserver = @present #:Bound / @absent #f .
Bound = <bound @pathStep PathStep> / Rejected .
# ---------------------------------------------------------------------------
# Protocol at client-side dataspaces, for resolution utilities
# Assertion. In response to observation of this with appropriate captures/wildcards in `addr`
# and `resolved`, respondent will follow `route.pathSteps` starting from one of the
# `route.transports`, asserting `ResolvePath` with the final `Resolved` as well as the selected
# transport `addr` and a `control` for it.
ResolvePath = <resolve-path @route Route @addr any @control #:TransportControl @resolved Resolved> .
TransportConnection = <connect-transport @addr any @control #:TransportControl @resolved Resolved> .
ResolvedPathStep = <path-step @origin #:Resolve @pathStep PathStep @resolved Resolved> .
PathStep = <<rec> @stepType symbol [@detail any]> .
# A `Route` describes a network path that can be followed to reach some target entity.
#
# It starts with a set of zero or more possible non-Syndicate `transports`. These could be
# `transportAddress.Tcp` values or similar. They are just suggestions; it's quite possible the
# endpoint is reachable by some means not listed. The network outside Syndicate is, after all,
# pretty diverse! In particular, *zero* `transports` may be provided, in which case some
# out-of-band means has to be used to make that first connection.
#
# The `transports` give instructions for contacting the first entity in the `Route` path. Often
# this will be a `gatekeeper`, or a `noise` protocol endpoint, or both. Occasionally, it may
# even be the desired target entity. Subsequent `pathSteps` describe how to proceed from the
# initial entity to the target.
#
# (`transports` should by rights be a set, not a sequence, but that opens up a Can Of Worms
# regarding dataspace patterns including literal sets that I can't deal with right now.)
Route = <route @transports [any ...] @pathSteps PathStep ...> .
TransportControl = ForceDisconnect .
ForceDisconnect = <force-disconnect> .
# ---------------------------------------------------------------------------
Rejected = <rejected @detail any> .

View File

@ -0,0 +1,62 @@
version 1 .
# Assertion in driver DS
# Causes creation of server and route
HttpBinding = <http-bind @host HostPattern @port int @method MethodPattern @path PathPattern @handler #:HttpRequest> .
# Assertion in driver DS
# Describes active server and route
HttpService = <http-service @host HostPattern @port int @method MethodPattern @path PathPattern> .
# Assertion in driver DS
# Describes active listener
HttpListener = <http-listener @port int> .
HostPattern = @host string / @any #f .
PathPattern = [PathPatternElement ...] .
PathPatternElement = @label string / @wildcard =_ / @rest =... .
MethodPattern = @any #f / @specific @"Lowercase" symbol .
# Assertion in driver DS
HttpRequest = <http-request
@sequenceNumber int
@host RequestHost
@port int
@method @"Lowercase" symbol
@path [string ...]
@headers Headers
@query {symbol: [QueryValue ...] ...:...}
@body RequestBody> .
Headers = {@"Lowercase" symbol: string ...:...} .
QueryValue = @string string / <file @filename string @headers Headers @body bytes> .
RequestBody = @present bytes / @absent #f .
RequestHost = @present string / @absent #f .
# Assertion to handler entity
HttpContext = <request @req HttpRequest @res #:HttpResponse> .
# HttpResponse protocol. Delivered to the `res` ref in `HttpContext`.
#
# (status | header)* . chunk* . done
#
# Done triggers completion of the response and retraction of the frame by the peer. If the
# HttpBinding responsible for the request is withdrawn mid-way through a response (i.e. when
# chunked transfer is used and at least one chunk has been sent) the request is abruptly
# closed; if it is withdrawn at any other moment in the lifetime of the request, a 500 Internal
# Server Error is send to the client.
#
@<TODO "trailers?">
HttpResponse =
# Messages.
/ <status @code int @message string>
/ <header @name symbol @value string>
/ <chunk @chunk Chunk>
/ <done @chunk Chunk>
.
Chunk = @string string / @bytes bytes .
# e.g. text/plain, text/html, application/json
MimeType = symbol .

View File

@ -0,0 +1,83 @@
version 1 .
embeddedType EntityRef.Cap .
# https://noiseprotocol.org/
# ---------------------------------------------------------------------------
# Binding and connection
NoiseStepType = =noise .
# In a gatekeeper.Step, use ServiceSelector as detail.
NoiseStepDetail = ServiceSelector .
# In a gatekeeper.PathStep, use a NoiseSpec as detail.
NoisePathStepDetail = NoiseSpec .
# In a gatekeeper.Description, use a NoiseServiceSpec as detail.
NoiseDescriptionDetail = NoiseServiceSpec .
# ---------------------------------------------------------------------------
# Specification of target and bind addresses
ServiceSelector = any .
NoiseSpec = {
# The `serviceSelector` to use in a `NoiseStep` for `gatekeeper.Resolve`.
service: ServiceSelector,
# The responder's static public key. If not required (uncommon!), supply the empty ByteString.
key: bytes,
}
& @protocol NoiseProtocol
& @preSharedKeys NoisePreSharedKeys
.
NoiseServiceSpec = @base NoiseSpec & @secretKey SecretKeyField .
SecretKeyField = @present { secretKey: bytes } / @invalid { secretKey: any } / @absent {} .
# If absent, a default of DefaultProtocol is used. Most services will speak the default.
NoiseProtocol = @present { protocol: string } / @invalid { protocol: any } / @absent {} .
DefaultProtocol = "Noise_NK_25519_ChaChaPoly_BLAKE2s" .
# If present, Noise pre-shared-keys (PSKs) are drawn from the sequence as required; if the
# sequence is exhausted or not supplied, an all-zeros key is used each time a PSK is needed.
NoisePreSharedKeys = @present { preSharedKeys: [bytes ...] } / @invalid { preSharedKeys: any } / @absent {} .
# ---------------------------------------------------------------------------
# Handshaking and running a session
# 1. initiator asserts <resolve <noise ServiceSelector> #:A> at Gatekeeper
# 2. gatekeeper asserts <accepted #:B> at #:A
# 3. initiator asserts <initiator #:C> at #:B and then sends `Packet`s to #:B
# 4. responder sends `Packet`s to #:C
#
# Sessions begin with introduction of initiator (#:C) and responder (#:B) to each other, and
# then proceed by sending `Packet`s (from #:C) to #:B and (from #:B) to #:C according to
# the Noise protocol definition. Each `Packet` represents a complete logical unit of
# communication; for example, a complete Turn when layering the Syndicate protocol over Noise.
# Note well the restriction on Noise messages: no individual complete packet or packet fragment
# may exceed 65535 bytes (N.B. not 65536!). When `fragmented`, each portion of a `Packet` is a
# complete Noise "transport message"; when `complete`, the whole thing is likewise a complete
# "transport message".
#
# Retraction of the `Initiator` ends the session from the initiator-side; retraction of the
# `<accepted ...>` assertion ends the session from the responder-side.
SessionItem = Initiator / Packet .
# Assertion
Initiator = <initiator @initiatorSession #:Packet> .
# Message
Packet = @complete bytes / @fragmented [bytes ...] .
# When layering Syndicate protocol over noise,
#
# - the canonical encoding of the serviceSelector is the prologue
# - protocol.Packets MUST be encoded using the machine-oriented Preserves syntax
# - zero or more Turns are permitted per noise.Packet
# - each Turn must fit inside a single noise.Packet (fragment if needed)
# - payloads inside a noise.Packet may be padded at the end with byte 0x80 (128), which
# encodes `#f` in the machine-oriented Preserves syntax.
#
# In summary, each noise.Packet, once (reassembled and) decrypted, will be a sequence of zero
# or more machine-encoded protocol.Packets, followed by zero or more 0x80 bytes.
.

View File

@ -1,6 +1,8 @@
version 1 . version 1 .
Packet = Turn / Error . Packet = Turn / Error / Extension .
Extension = <<rec> @label any @fields [any ...]> .
Error = <error @message string @detail any>. Error = <error @message string @detail any>.
@ -11,7 +13,7 @@ Oid = int .
Turn = [TurnEvent ...]. Turn = [TurnEvent ...].
TurnEvent = [@oid Oid @event Event]. TurnEvent = [@oid Oid @event Event].
Assert = <assert @assertion Assertion @handle Handle>. Assert = <A @assertion Assertion @handle Handle>.
Retract = <retract @handle Handle>. Retract = <R @handle Handle>.
Message = <message @body Assertion>. Message = <M @body Assertion>.
Sync = <sync @peer #!#t>. Sync = <S @peer #:#t>.

View File

@ -1,3 +0,0 @@
version 1 .
RacketEvent = <racket-event @source #!any @event #!any>.

View File

@ -1,21 +0,0 @@
version 1 .
embeddedType EntityRef.Cap .
UserId = int .
Join = <joinedUser @uid UserId @handle #!Session>.
Session = @observeUsers <Observe =user @observer #!UserInfo>
/ @observeSpeech <Observe =says @observer #!Says>
/ NickClaim
/ Says
.
NickClaim = <claimNick @uid UserId @name string @k #!NickClaimResponse>.
NickClaimResponse = #t / NickConflict .
UserInfo = <user @uid UserId @name string>.
Says = <says @who UserId @what string>.
NickConflict = <nickConflict>.

View File

@ -1,54 +1,51 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
; Asserts that a service should begin (and stay) running after waiting # Asserts that a service should begin (and stay) running after waiting
; for its dependencies and considering reverse-dependencies, blocks, # for its dependencies and considering reverse-dependencies, blocks,
; and so on. # and so on.
RequireService = <require-service @serviceName any>. RequireService = <require-service @serviceName any>.
; Asserts that a service should begin (and stay) running RIGHT NOW, # Asserts that a service should begin (and stay) running RIGHT NOW,
; without considering its dependencies. # without considering its dependencies.
RunService = <run-service @serviceName any>. RunService = <run-service @serviceName any>.
; Asserts one or more current states of service `serviceName`. The # Asserts one or more current states of service `serviceName`. The
; overall state of the service is the union of asserted `state`s. # overall state of the service is the union of asserted `state`s.
; #
; Only a few combinations make sense: # Only a few combinations make sense:
; - `started` # - `started`
; - `started` + `ready` # - `started` + `ready`
; - `failed` # - `failed`
; - `complete` # - `complete`
; #
ServiceState = <service-state @serviceName any @state State>. ServiceState = <service-state @serviceName any @state State>.
; A running service publishes zero or more of these. The details of # A running service publishes zero or more of these. The details of
; the object vary by service. # the object vary by service.
; #
ServiceObject = <service-object @serviceName any @object any>. ServiceObject = <service-object @serviceName any @object any>.
; Possible service states. # Possible service states.
State = State =
/ ; The service has begun its startup routine, and may or may not be / # The service has begun its startup routine, and may or may not be
; ready to take requests from other parties. # ready to take requests from other parties.
=started =started
/ ; The service is ready to take requests from other parties. / # The service is ready to take requests from other parties.
; (This state is special in that it is asserted *in addition* to `started`.) # (This state is special in that it is asserted *in addition* to `started`.)
=ready =ready
/ ; The service has failed. / # The service has failed.
=failed =failed
/ ; The service has completed execution. / # The service has completed execution.
=complete =complete
/ # Extension or user-defined state
@userDefined any
. .
; Asserts that, when `depender` is `require-service`d, it should not # Asserts that, when `depender` is `require-service`d, it should not be started until
; be started until `dependee` has been asserted. # `dependee` has been asserted, and also that `dependee`'s `serviceName` should be
# `require-service`d.
ServiceDependency = <depends-on @depender any @dependee ServiceState>. ServiceDependency = <depends-on @depender any @dependee ServiceState>.
; Asserts that the service is a "system layer" service. If *not* # Message. Triggers a service restart.
; specified for a service X, where X is not `<milestone Y>` for some
; Y, the system acts as if `<depends-on X <service-state <milestone
; system-layer> ready>>` were asserted.
SystemLayerService = <system-layer-service @serviceName any>.
; Message. Triggers a service restart.
RestartService = <restart-service @serviceName any>. RestartService = <restart-service @serviceName any>.

View File

@ -1,5 +0,0 @@
version 1 .
embeddedType EntityRef.Cap .
Present = <Present @username string>.
Says = <Says @who string @what string>.

View File

@ -0,0 +1,31 @@
version 1 .
# A "standard" route is
#
# - a collection of websocket urls, for transport.
# - a noise tunnel, for server authentication, confidentiality and integrity.
# - a macaroon, for authorization.
#
# Making these choices allows a compact representation. Encoding a binary-syntax representation
# of a standard route using base64 produces a somewhat-convenient blob of text representing
# access to a network object that users can cut and paste.
#
# A `stdenv.StandardRoute.standard` can be rewritten to a `gatekeeper.Route` like this (with
# `$caveats`, if any, added as appropriate):
#
# <route $transports <noise { service: $service key: $key }> <ref { sig: $sig oid: $oid }>>
#
StandardRoute =
/ @standard [@transports [StandardTransport ...]
@key bytes
@service any
@sig bytes
@oid any
@caveats sturdy.Caveat ...]
/ @general gatekeeper.Route
.
StandardTransport =
/ @wsUrl string
/ @other any
.

View File

@ -1,38 +1,38 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
; Assertion: # Assertion:
StreamConnection = <stream-connection @source #!Source @sink #!Sink @spec any>. StreamConnection = <stream-connection @source #:Source @sink #:Sink @spec any>.
; Assertions: # Assertions:
StreamListenerReady = <stream-listener-ready @spec any>. StreamListenerReady = <stream-listener-ready @spec any>.
StreamListenerError = <stream-listener-error @spec any @message string>. StreamListenerError = <stream-listener-error @spec any @message string>.
; Assertion: # Assertion:
StreamError = <error @message string>. StreamError = <error @message string>.
Source = Source =
; Assertions: # Assertions:
/ <sink @controller #!Sink> / <sink @controller #:Sink>
/ StreamError / StreamError
; Messages: # Messages:
/ <credit @amount CreditAmount @mode Mode> / <credit @amount CreditAmount @mode Mode>
. .
Sink = Sink =
; Assertions: # Assertions:
/ <source @controller #!Source> / <source @controller #:Source>
/ StreamError / StreamError
; Messages: # Messages:
/ <data @payload any @mode Mode> / <data @payload any @mode Mode>
/ <eof> / <eof>
. .
; Value: # Value:
CreditAmount = @count int / @unbounded =unbounded . CreditAmount = @count int / @unbounded =unbounded .
; Value: # Value:
Mode = =bytes / @lines LineMode / <packet @size int> / <object @description any>. Mode = =bytes / @lines LineMode / <packet @size int> / <object @description any>.
LineMode = =lf / =crlf . LineMode = =lf / =crlf .

View File

@ -1,30 +1,57 @@
version 1 . version 1 .
embeddedType EntityRef.Cap . embeddedType EntityRef.Cap .
; Each Attenuation is a stage. The sequence of Attenuations is run RIGHT-TO-LEFT. # ---------------------------------------------------------------------------
; That is, the newest Attenuations are at the right. # Binding and connection
SturdyRef = <ref @oid any @caveatChain [Attenuation ...] @sig bytes>.
; An individual Attenuation is run RIGHT-TO-LEFT. SturdyStepType = =ref .
; That is, the newest Caveats are at the right.
Attenuation = [Caveat ...].
; embodies 1st-party caveats over assertion structure, but nothing else # In a gatekeeper.Step or gatekeeper.PathStep, use Parameters as detail.
; can add 3rd-party caveats and richer predicates later SturdyStepDetail = Parameters .
Caveat = Rewrite / Alts . SturdyPathStepDetail = Parameters .
Rewrite = <rewrite @pattern Pattern @template Template>.
# In a gatekeeper.Description, use the following detail.
SturdyDescriptionDetail = {
oid: any,
key: bytes,
} .
# ---------------------------------------------------------------------------
# Macaroons
# The sequence of Caveats is run RIGHT-TO-LEFT.
# That is, the newest Caveats are at the right.
#
# Let f(k,d) = HMAC-BLAKE2s-256(k,d)[0..16),
# e = canonical machine-oriented serialization of some preserves value, and
# k = the original secret key for the ref.
#
# The `sig` is then f(f(f(f(k, e(oid)), ...), e(Caveat)), ...).
#
SturdyRef = <ref @parameters Parameters> .
Parameters = {
oid: any,
sig: bytes,
} & @caveats CaveatsField .
CaveatsField = @present { caveats: [Caveat ...] } / @invalid { caveats: any } / @absent {} .
# embodies 1st-party caveats over assertion structure, but nothing else
# can add 3rd-party caveats and richer predicates later
Caveat = Rewrite / Alts / Reject / @unknown any .
Rewrite = <rewrite @pattern Pattern @template Template> .
Reject = <reject @pattern Pattern> .
Alts = <or @alternatives [Rewrite ...]>. Alts = <or @alternatives [Rewrite ...]>.
Oid = int . Oid = int .
WireRef = @mine [0 @oid Oid] / @yours [1 @oid Oid @attenuation Caveat ...]. WireRef = @mine [0 @oid Oid] / @yours [1 @oid Oid @attenuation Caveat ...].
;--------------------------------------------------------------------------- # ---------------------------------------------------------------------------
Lit = <lit @value any>. Lit = <lit @value any>.
Pattern = PDiscard / PAtom / PEmbedded / PBind / PAnd / PNot / Lit / PCompound . Pattern = PDiscard / PAtom / PEmbedded / PBind / PAnd / PNot / Lit / PCompound .
PDiscard = <_>. PDiscard = <_>.
PAtom = =Boolean / =Float / =Double / =SignedInteger / =String / =ByteString / =Symbol . PAtom = =Boolean / =Double / =SignedInteger / =String / =ByteString / =Symbol .
PEmbedded = =Embedded . PEmbedded = =Embedded .
PBind = <bind @pattern Pattern>. PBind = <bind @pattern Pattern>.
PAnd = <and @patterns [Pattern ...]>. PAnd = <and @patterns [Pattern ...]>.
@ -35,7 +62,7 @@ PCompound =
/ @dict <dict @entries { any: Pattern ...:... }> . / @dict <dict @entries { any: Pattern ...:... }> .
Template = TAttenuate / TRef / Lit / TCompound . Template = TAttenuate / TRef / Lit / TCompound .
TAttenuate = <attenuate @template Template @attenuation Attenuation>. TAttenuate = <attenuate @template Template @attenuation [Caveat ...]>.
TRef = <ref @binding int>. TRef = <ref @binding int>.
TCompound = TCompound =
/ @rec <rec @label any @fields [Template ...]> / @rec <rec @label any @fields [Template ...]>

View File

@ -4,4 +4,4 @@ embeddedType EntityRef.Cap .
TcpRemote = <tcp-remote @host string @port int>. TcpRemote = <tcp-remote @host string @port int>.
TcpLocal = <tcp-local @host string @port int>. TcpLocal = <tcp-local @host string @port int>.
TcpPeerInfo = <tcp-peer @handle #!any @local TcpLocal @remote TcpRemote>. TcpPeerInfo = <tcp-peer @handle #:any @local TcpLocal @remote TcpRemote>.

View File

@ -1,7 +1,7 @@
version 1 . version 1 .
SetTimer = <set-timer @label any @msecs double @kind TimerKind>. SetTimer = <set-timer @label any @seconds double @kind TimerKind>.
TimerExpired = <timer-expired @label any @msecs double>. TimerExpired = <timer-expired @label any @seconds double>.
TimerKind = =relative / =absolute / =clear . TimerKind = =relative / =absolute / =clear .
LaterThan = <later-than @msecs double>. LaterThan = <later-than @seconds double>.

View File

@ -0,0 +1,96 @@
version 1 .
embeddedType EntityRef.Cap .
TraceEntry = <trace
@timestamp @"seconds since Unix epoch" double
@actor ActorId
@item ActorActivation> .
ActorActivation =
/ <start @actorName Name>
/ @turn TurnDescription
/ <stop @status ExitStatus>
.
Name =
/ <anonymous>
/ <named @name any>
.
ActorId = any .
FacetId = any .
Oid = any .
TaskId = any .
TurnId = any .
ExitStatus = =ok / protocol.Error .
# Trace information associated with a turn.
TurnDescription = <turn @id TurnId @cause TurnCause @actions [ActionDescription ...]> .
# The cause of a turn.
TurnCause =
/ @turn <caused-by @id TurnId>
/ <cleanup>
/ @linkedTaskRelease <linked-task-release @id TaskId @reason LinkedTaskReleaseReason>
/ @periodicActivation <periodic-activation @"`period` is in seconds" @period double>
/ <delay @causingTurn TurnId @"`amount` is in seconds" @amount double>
/ <external @description any>
.
LinkedTaskReleaseReason = =cancelled / =normal .
# An actual event carried within a turn.
TurnEvent =
/ <assert @assertion AssertionDescription @handle protocol.Handle>
/ <retract @handle protocol.Handle>
/ <message @body AssertionDescription>
/ <sync @peer Target>
/ # A souped-up, disguised, special-purpose `retract` event.
@breakLink <break-link @source ActorId @handle protocol.Handle>
.
TargetedTurnEvent = <event @target Target @detail TurnEvent> .
# An action taken during a turn.
ActionDescription =
/ # The active party is processing a new `event` for `target` from the received Turn.
<dequeue @event TargetedTurnEvent>
/ # The active party has queued a new `event` to be processed later by `target`.
<enqueue @event TargetedTurnEvent>
/ # The active party is processing an internally-queued event for one of its own entities.
@dequeueInternal <dequeue-internal @event TargetedTurnEvent>
/ # The active party has scheduled an internally-queued event for one of its own entities.
@enqueueInternal <enqueue-internal @event TargetedTurnEvent>
/ <spawn @link bool @id ActorId>
/ <link
@parentActor ActorId
@childToParent protocol.Handle
@childActor ActorId
@parentToChild protocol.Handle>
/ @facetStart <facet-start @path [FacetId ...]>
/ @facetStop <facet-stop @path [FacetId ...] @reason FacetStopReason>
/ @linkedTaskStart <linked-task-start @taskName Name @id TaskId>
.
# An assertion or the body of a message: either a Preserves value, or
# some opaque system-internal value, represented according to the
# system concerned.
AssertionDescription =
/ <value @value any>
/ <opaque @description any>
.
FacetStopReason =
/ @explicitAction =explicit-action
/ =inert
/ @parentStopping =parent-stopping
/ @actorStopping =actor-stopping
.
Target = <entity @actor ActorId @facet FacetId @oid Oid> .
# For the future: consider including information about `protocol`-level `Turn`s etc sent to
# peers over e.g. Websockets or TCP/IP, allowing cross-correlation of traces from different
# processes and implementations with each other to form a large overall picture.
.

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::collections::btree_map::{Iter, Keys, Entry}; use std::collections::btree_map::{Iter, Keys, Entry};
use std::iter::{FromIterator, IntoIterator}; use std::iter::FromIterator;
/// Element counts in [`BTreeBag`]s are 32-bit signed integers. /// Element counts in [`BTreeBag`]s are 32-bit signed integers.
pub type Count = i32; pub type Count = i32;

View File

@ -12,7 +12,6 @@ use super::language;
use super::skeleton; use super::skeleton;
use super::actor::*; use super::actor::*;
use super::schemas::dataspace::*; use super::schemas::dataspace::*;
use super::schemas::dataspace::_Any;
use preserves::value::Map; use preserves::value::Map;
use preserves_schema::Codec; use preserves_schema::Codec;
@ -20,7 +19,7 @@ use preserves_schema::Codec;
/// A Dataspace object (entity). /// A Dataspace object (entity).
#[derive(Debug)] #[derive(Debug)]
pub struct Dataspace { pub struct Dataspace {
pub name: tracing::Span, pub name: Name,
/// Index over assertions placed in the dataspace; used to /// Index over assertions placed in the dataspace; used to
/// efficiently route assertion changes and messages to observers. /// efficiently route assertion changes and messages to observers.
pub index: skeleton::Index, pub index: skeleton::Index,
@ -31,10 +30,9 @@ pub struct Dataspace {
impl Dataspace { impl Dataspace {
/// Construct a new, empty dataspace. /// Construct a new, empty dataspace.
pub fn new(name: Option<tracing::Span>) -> Self { pub fn new(name: Name) -> Self {
Self { Self {
name: name.map_or_else(|| crate::name!("anonymous_dataspace"), name,
|n| crate::name!(parent: &n, "dataspace")),
index: skeleton::Index::new(), index: skeleton::Index::new(),
handle_map: Map::new(), handle_map: Map::new(),
} }
@ -62,10 +60,8 @@ impl Dataspace {
impl Entity<_Any> for Dataspace { impl Entity<_Any> for Dataspace {
fn assert(&mut self, t: &mut Activation, a: _Any, h: Handle) -> ActorResult { fn assert(&mut self, t: &mut Activation, a: _Any, h: Handle) -> ActorResult {
let _guard = self.name.enter();
let is_new = self.index.insert(t, &a); let is_new = self.index.insert(t, &a);
tracing::trace!(assertion = ?a, handle = ?h, ?is_new, "assert"); tracing::trace!(dataspace = ?self.name, assertion = ?a, handle = ?h, ?is_new, "assert");
if is_new { if is_new {
if let Ok(o) = language().parse::<Observe>(&a) { if let Ok(o) = language().parse::<Observe>(&a) {
@ -78,13 +74,11 @@ impl Entity<_Any> for Dataspace {
} }
fn retract(&mut self, t: &mut Activation, h: Handle) -> ActorResult { fn retract(&mut self, t: &mut Activation, h: Handle) -> ActorResult {
let _guard = self.name.enter();
match self.handle_map.remove(&h) { match self.handle_map.remove(&h) {
None => tracing::warn!(handle = ?h, "retract of unknown handle"), None => tracing::warn!(dataspace = ?self.name, handle = ?h, "retract of unknown handle"),
Some(a) => { Some(a) => {
let is_last = self.index.remove(t, &a); let is_last = self.index.remove(t, &a);
tracing::trace!(assertion = ?a, handle = ?h, ?is_last, "retract"); tracing::trace!(dataspace = ?self.name, assertion = ?a, handle = ?h, ?is_last, "retract");
if is_last { if is_last {
if let Ok(o) = language().parse::<Observe>(&a) { if let Ok(o) = language().parse::<Observe>(&a) {
@ -97,9 +91,7 @@ impl Entity<_Any> for Dataspace {
} }
fn message(&mut self, t: &mut Activation, m: _Any) -> ActorResult { fn message(&mut self, t: &mut Activation, m: _Any) -> ActorResult {
let _guard = self.name.enter(); tracing::trace!(dataspace = ?self.name, body = ?m, "message");
tracing::trace!(body = ?m, "message");
self.index.send(t, &m); self.index.send(t, &m);
Ok(()) Ok(())
} }

View File

@ -17,7 +17,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>, Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult, Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult, Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult, Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
{ {
state: E, state: E,
assertion_handler: Option<Fa>, assertion_handler: Option<Fa>,
@ -54,7 +54,7 @@ pub fn entity<M: 'static + Send, E>(
fn (&mut E, &mut Activation, M) -> DuringResult<E>, fn (&mut E, &mut Activation, M) -> DuringResult<E>,
fn (&mut E, &mut Activation, M) -> ActorResult, fn (&mut E, &mut Activation, M) -> ActorResult,
fn (&mut E, &mut Activation) -> ActorResult, fn (&mut E, &mut Activation) -> ActorResult,
fn (&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult> fn (&mut E, &mut Activation, &Arc<ExitStatus>)>
where where
E: 'static + Send, E: 'static + Send,
{ {
@ -68,7 +68,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>, Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult, Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult, Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult, Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
{ {
pub fn new( pub fn new(
state: E, state: E,
@ -154,7 +154,7 @@ where
pub fn on_exit<Fx1>(self, exit_handler: Fx1) -> DuringEntity<M, E, Fa, Fm, Fs, Fx1> pub fn on_exit<Fx1>(self, exit_handler: Fx1) -> DuringEntity<M, E, Fa, Fm, Fs, Fx1>
where where
Fx1: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult, Fx1: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
{ {
DuringEntity { DuringEntity {
state: self.state, state: self.state,
@ -175,7 +175,7 @@ where
t.on_stop_notify(&r); t.on_stop_notify(&r);
} }
if should_register_exit_hook { if should_register_exit_hook {
t.state.add_exit_hook(&r); t.add_exit_hook(&r);
} }
r r
} }
@ -187,7 +187,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, AnyValue) -> DuringResult<E>, Fa: 'static + Send + FnMut(&mut E, &mut Activation, AnyValue) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, AnyValue) -> ActorResult, Fm: 'static + Send + FnMut(&mut E, &mut Activation, AnyValue) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult, Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult, Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
{ {
pub fn create_cap(self, t: &mut Activation) -> Arc<Cap> pub fn create_cap(self, t: &mut Activation) -> Arc<Cap>
{ {
@ -202,7 +202,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>, Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult, Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult, Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult, Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
{ {
fn assert(&mut self, t: &mut Activation, a: M, h: Handle) -> ActorResult { fn assert(&mut self, t: &mut Activation, a: M, h: Handle) -> ActorResult {
match &mut self.assertion_handler { match &mut self.assertion_handler {
@ -232,10 +232,9 @@ where
} }
} }
fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ActorResult>) -> ActorResult { fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ExitStatus>) {
match &mut self.exit_handler { if let Some(handler) = &mut self.exit_handler {
Some(handler) => handler(&mut self.state, t, exit_status), handler(&mut self.state, t, exit_status);
None => Ok(()),
} }
} }
} }

View File

@ -73,3 +73,21 @@ impl From<preserves::error::Error> for Error {
error(&format!("{}", v), AnyValue::new(false)) error(&format!("{}", v), AnyValue::new(false))
} }
} }
impl From<Box<dyn std::error::Error>> for Error {
fn from(v: Box<dyn std::error::Error>) -> Self {
match v.downcast::<Error>() {
Ok(e) => *e,
Err(v) => error(&format!("{}", v), AnyValue::new(false)),
}
}
}
impl From<Box<dyn std::error::Error + Send + Sync + 'static>> for Error {
fn from(v: Box<dyn std::error::Error + Send + Sync + 'static>) -> Self {
match v.downcast::<Error>() {
Ok(e) => *e,
Err(v) => error(&format!("{}", v), AnyValue::new(false)),
}
}
}

View File

@ -1,4 +1,5 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![feature(min_specialization)]
#[doc(inline)] #[doc(inline)]
pub use preserves::value; pub use preserves::value;
@ -29,14 +30,54 @@ pub mod schemas {
pub mod skeleton; pub mod skeleton;
pub mod sturdy; pub mod sturdy;
pub mod tracer; pub mod trace;
#[doc(inline)] #[doc(inline)]
pub use during::entity; pub use during::entity;
#[doc(inline)] /// Sets up [`tracing`] logging in a reasonable way.
pub use tracer::convenient_logging; ///
/// Useful at the top of `main` functions.
pub fn convenient_logging() -> actor::ActorResult {
let filter = match std::env::var(tracing_subscriber::filter::EnvFilter::DEFAULT_ENV) {
Err(std::env::VarError::NotPresent) =>
tracing_subscriber::filter::EnvFilter::default()
.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()),
_ =>
tracing_subscriber::filter::EnvFilter::try_from_default_env()?,
};
let subscriber = tracing_subscriber::fmt()
.with_ansi(true)
.with_thread_ids(true)
.with_max_level(tracing::Level::TRACE)
.with_env_filter(filter)
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Could not set tracing global subscriber");
Ok(())
}
/// Retrieve the version of the Syndicate crate.
pub fn syndicate_package_version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
preserves_schema::define_language!(language(): Language<actor::AnyValue> { preserves_schema::define_language!(language(): Language<actor::AnyValue> {
syndicate: schemas::Language, syndicate: schemas::Language,
}); });
#[cfg(test)]
mod protocol_test {
use crate::*;
use preserves::value::{BytesBinarySource, BinarySource, IOValueDomainCodec, ViaCodec, IOValue};
use preserves_schema::Deserialize;
#[test] fn decode_sync() {
let input_str = "[[2 <S #:[0 11]>]]";
let mut src = BytesBinarySource::new(input_str.as_bytes());
let mut r = src.text::<IOValue, _>(ViaCodec::new(IOValueDomainCodec));
let packet: schemas::protocol::Packet<IOValue> = schemas::protocol::Packet::deserialize(&mut r).unwrap();
println!("{:?}", packet);
}
}

View File

@ -1,3 +1,5 @@
use std::sync::Arc;
use crate::schemas::dataspace_patterns::*; use crate::schemas::dataspace_patterns::*;
use super::language; use super::language;
@ -8,23 +10,25 @@ use preserves::value::Record;
use preserves::value::Value; use preserves::value::Value;
use preserves_schema::Codec; use preserves_schema::Codec;
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] pub type PathStep = _Any;
pub enum PathStep {
Index(usize),
Key(_Any),
}
pub type Path = Vec<PathStep>; pub type Path = Vec<PathStep>;
pub type Paths = Vec<Path>; pub type Paths = Vec<Path>;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct ConstantPositions {
pub with_values: Paths,
pub required_to_exist: Paths,
}
struct Analyzer { struct Analyzer {
pub const_paths: Paths, pub const_paths: Paths,
pub const_values: Vec<_Any>, pub const_values: Vec<_Any>,
pub checked_paths: Paths,
pub capture_paths: Paths, pub capture_paths: Paths,
} }
pub struct PatternAnalysis { pub struct PatternAnalysis {
pub const_paths: Paths, pub const_positions: Arc<ConstantPositions>,
pub const_values: _Any, pub const_values: _Any,
pub capture_paths: Paths, pub capture_paths: Paths,
} }
@ -38,11 +42,15 @@ impl PatternAnalysis {
let mut analyzer = Analyzer { let mut analyzer = Analyzer {
const_paths: Vec::new(), const_paths: Vec::new(),
const_values: Vec::new(), const_values: Vec::new(),
checked_paths: Vec::new(),
capture_paths: Vec::new(), capture_paths: Vec::new(),
}; };
analyzer.walk(&mut Vec::new(), p); analyzer.walk(&mut Vec::new(), p);
PatternAnalysis { PatternAnalysis {
const_paths: analyzer.const_paths, const_positions: Arc::new(ConstantPositions {
with_values: analyzer.const_paths,
required_to_exist: analyzer.checked_paths,
}),
const_values: _Any::new(analyzer.const_values), const_values: _Any::new(analyzer.const_values),
capture_paths: analyzer.capture_paths, capture_paths: analyzer.capture_paths,
} }
@ -58,34 +66,21 @@ impl Analyzer {
fn walk(&mut self, path: &mut Path, p: &Pattern) { fn walk(&mut self, path: &mut Path, p: &Pattern) {
match p { match p {
Pattern::DCompound(b) => match &**b { Pattern::Group { entries, .. } => {
DCompound::Rec { fields, .. } => { for (k, p) in entries {
for (i, p) in fields.iter().enumerate() { self.walk_step(path, k.clone(), p)
self.walk_step(path, PathStep::Index(i), p);
}
}
DCompound::Arr { items, .. } => {
for (i, p) in items.iter().enumerate() {
self.walk_step(path, PathStep::Index(i), p);
}
}
DCompound::Dict { entries, .. } => {
for (k, p) in entries {
self.walk_step(path, PathStep::Key(k.clone()), p);
}
} }
} }
Pattern::DBind(b) => { Pattern::Bind { pattern } => {
let DBind { pattern, .. } = &**b;
self.capture_paths.push(path.clone()); self.capture_paths.push(path.clone());
self.walk(path, pattern) self.walk(path, &**pattern);
} }
Pattern::DDiscard(_) => Pattern::Discard => {
(), self.checked_paths.push(path.clone());
Pattern::DLit(b) => { }
let DLit { value } = &**b; Pattern::Lit { value } => {
self.const_paths.push(path.clone()); self.const_paths.push(path.clone());
self.const_values.push(language().unparse(value)); self.const_values.push(language().unparse(&**value));
} }
} }
} }
@ -109,52 +104,47 @@ impl PatternMatcher {
} }
} }
fn run_seq<'a, F: 'a + Fn(usize) -> &'a _Any>(&mut self, entries: &Map<_Any, Pattern<_Any>>, values: F) -> bool {
for (k, p) in entries {
match k.value().as_usize() {
None => return false,
Some(i) => if !self.run(p, values(i)) {
return false;
}
}
}
true
}
fn run(&mut self, pattern: &Pattern<_Any>, value: &_Any) -> bool { fn run(&mut self, pattern: &Pattern<_Any>, value: &_Any) -> bool {
match pattern { match pattern {
Pattern::DDiscard(_) => true, Pattern::Discard => true,
Pattern::DBind(b) => { Pattern::Bind { pattern } => {
self.captures.push(value.clone()); self.captures.push(value.clone());
self.run(&b.pattern, value) self.run(&**pattern, value)
} }
Pattern::DLit(b) => value == &language().unparse(&b.value), Pattern::Lit { value: expected } => value == &language().unparse(&**expected),
Pattern::DCompound(b) => match &**b { Pattern::Group { type_, entries } => match &**type_ {
DCompound::Rec { label, fields } => { GroupType::Rec { label } => {
match value.value().as_record(Some(fields.len())) { match value.value().as_record(None) {
None => false, None => false,
Some(r) => { Some(r) =>
if r.label() != label { r.label() == label &&
return false; self.run_seq(entries, |i| &r.fields()[i])
}
for (i, p) in fields.iter().enumerate() {
if !self.run(p, &r.fields()[i]) {
return false;
}
}
true
}
} }
} }
DCompound::Arr { items } => { GroupType::Arr => {
match value.value().as_sequence() { match value.value().as_sequence() {
None => false, None => false,
Some(vs) => { Some(vs) =>
if vs.len() != items.len() { self.run_seq(entries, |i| &vs[i])
return false;
}
for (i, p) in items.iter().enumerate() {
if !self.run(p, &vs[i]) {
return false;
}
}
true
}
} }
} }
DCompound::Dict { entries: expected_entries } => { GroupType::Dict => {
match value.value().as_dictionary() { match value.value().as_dictionary() {
None => false, None => false,
Some(actual_entries) => { Some(actual_entries) => {
for (k, p) in expected_entries.iter() { for (k, p) in entries {
if !actual_entries.get(k).map(|v| self.run(p, v)).unwrap_or(false) { if !actual_entries.get(k).map(|v| self.run(p, v)).unwrap_or(false) {
return false; return false;
} }
@ -170,42 +160,68 @@ impl PatternMatcher {
pub fn lift_literal(v: &_Any) -> Pattern { pub fn lift_literal(v: &_Any) -> Pattern {
match v.value() { match v.value() {
Value::Record(r) => Pattern::DCompound(Box::new(DCompound::Rec { Value::Record(r) => Pattern::Group {
label: r.label().clone(), type_: Box::new(GroupType::Rec { label: r.label().clone() }),
fields: r.fields().iter().map(lift_literal).collect(), entries: r.fields().iter().enumerate()
})), .map(|(i, v)| (_Any::new(i), lift_literal(v)))
Value::Sequence(items) => Pattern::DCompound(Box::new(DCompound::Arr { .collect(),
items: items.iter().map(lift_literal).collect(), },
})), Value::Sequence(items) => Pattern::Group {
type_: Box::new(GroupType::Arr),
entries: items.iter().enumerate()
.map(|(i, v)| (_Any::new(i), lift_literal(v)))
.collect(),
},
Value::Set(_members) => panic!("Cannot express literal set in pattern"), Value::Set(_members) => panic!("Cannot express literal set in pattern"),
Value::Dictionary(entries) => Pattern::DCompound(Box::new(DCompound::Dict { Value::Dictionary(entries) => Pattern::Group {
entries: entries.iter().map(|(k, v)| (k.clone(), lift_literal(v))).collect(), type_: Box::new(GroupType::Dict),
})), entries: entries.iter()
_other => Pattern::DLit(Box::new(DLit { .map(|(k, v)| (k.clone(), lift_literal(v)))
value: language().parse(v).expect("Non-compound datum can be converted to AnyAtom"), .collect(),
})), },
_other => Pattern::Lit {
value: Box::new(language().parse(v).expect("Non-compound datum can be converted to AnyAtom")),
},
} }
} }
const DISCARD: Pattern = Pattern::Discard;
pub fn pattern_seq_from_dictionary(entries: &Map<_Any, Pattern>) -> Option<Vec<&Pattern>> {
let mut max_k: Option<usize> = None;
for k in entries.keys() {
max_k = max_k.max(Some(k.value().as_usize()?));
}
let mut seq = vec![];
if let Some(max_k) = max_k {
seq.reserve(max_k + 1);
for i in 0..=max_k {
seq.push(entries.get(&_Any::new(i)).unwrap_or(&DISCARD));
}
}
return Some(seq);
}
fn drop_literal_entries_seq(mut seq: Vec<_Any>, entries: &Map<_Any, Pattern>) -> Option<Vec<_Any>> {
for p in pattern_seq_from_dictionary(entries)?.into_iter() {
seq.push(drop_literal(p)?);
}
Some(seq)
}
pub fn drop_literal(p: &Pattern) -> Option<_Any> { pub fn drop_literal(p: &Pattern) -> Option<_Any> {
match p { match p {
Pattern::DCompound(b) => match &**b { Pattern::Group { type_, entries } => match &**type_ {
DCompound::Rec { label, fields } => { GroupType::Rec { label } =>
let mut r = vec![label.clone()]; Some(Value::Record(Record(drop_literal_entries_seq(vec![label.clone()], entries)?)).wrap()),
for f in fields.iter() { GroupType::Arr =>
r.push(drop_literal(f)?); Some(Value::Sequence(drop_literal_entries_seq(vec![], entries)?).wrap()),
} GroupType::Dict =>
Some(Value::Record(Record(r)).wrap()) Some(Value::Dictionary(entries.iter()
} .map(|(k, p)| Some((k.clone(), drop_literal(p)?)))
DCompound::Arr { items } => .collect::<Option<Map<_Any, _Any>>>()?).wrap()),
Some(Value::Sequence(items.iter().map(drop_literal)
.collect::<Option<Vec<_Any>>>()?).wrap()),
DCompound::Dict { entries } =>
Some(Value::Dictionary(entries.iter()
.map(|(k, p)| Some((k.clone(), drop_literal(p)?)))
.collect::<Option<Map<_Any, _Any>>>()?).wrap()),
}, },
Pattern::DLit(b) => Some(language().unparse(&b.value)), Pattern::Lit { value } => Some(language().unparse(&**value)),
_ => None, _ => None,
} }
} }

View File

@ -1,6 +1,7 @@
use bytes::Buf; use bytes::Buf;
use bytes::BytesMut; use bytes::BytesMut;
use crate::Language;
use crate::language; use crate::language;
use crate::actor::*; use crate::actor::*;
use crate::during; use crate::during;
@ -9,13 +10,14 @@ use crate::error::error;
use crate::schemas::gatekeeper; use crate::schemas::gatekeeper;
use crate::schemas::protocol as P; use crate::schemas::protocol as P;
use crate::schemas::sturdy; use crate::schemas::sturdy;
use crate::trace;
use futures::Sink; use futures::Sink;
use futures::SinkExt; use futures::SinkExt;
use futures::Stream; use futures::Stream;
use futures::StreamExt; use futures::StreamExt;
use parking_lot::Mutex; pub use parking_lot::Mutex;
use preserves::error::Error as PreservesError; use preserves::error::Error as PreservesError;
use preserves::error::is_eof_io_error; use preserves::error::is_eof_io_error;
@ -27,6 +29,7 @@ use preserves::value::Map;
use preserves::value::NestedValue; use preserves::value::NestedValue;
use preserves::value::NoEmbeddedDomainCodec; use preserves::value::NoEmbeddedDomainCodec;
use preserves::value::PackedWriter; use preserves::value::PackedWriter;
use preserves::value::Set;
use preserves::value::TextWriter; use preserves::value::TextWriter;
use preserves::value::ViaCodec; use preserves::value::ViaCodec;
use preserves::value::Writer; use preserves::value::Writer;
@ -35,6 +38,7 @@ use preserves::value::signed_integer::SignedInteger;
use preserves_schema::Codec; use preserves_schema::Codec;
use preserves_schema::Deserialize; use preserves_schema::Deserialize;
use preserves_schema::ParseError; use preserves_schema::ParseError;
use preserves_schema::support::Unparse;
use std::io; use std::io;
use std::pin::Pin; use std::pin::Pin;
@ -73,6 +77,7 @@ struct Membranes {
exported: Membrane, exported: Membrane,
imported: Membrane, imported: Membrane,
next_export_oid: usize, next_export_oid: usize,
reimported_attenuations: Map<sturdy::Oid, Set<Arc<Cap>>>,
} }
pub enum Input { pub enum Input {
@ -88,6 +93,7 @@ pub enum Output {
type TunnelRelayRef = Arc<Mutex<Option<TunnelRelay>>>; type TunnelRelayRef = Arc<Mutex<Option<TunnelRelay>>>;
// There are other kinds of relay. This one has exactly two participants connected to each other. // There are other kinds of relay. This one has exactly two participants connected to each other.
#[derive(Debug)]
pub struct TunnelRelay pub struct TunnelRelay
{ {
self_ref: TunnelRelayRef, self_ref: TunnelRelayRef,
@ -95,7 +101,6 @@ pub struct TunnelRelay
outbound_assertions: Map<P::Handle, Vec<Arc<WireSymbol>>>, outbound_assertions: Map<P::Handle, Vec<Arc<WireSymbol>>>,
membranes: Membranes, membranes: Membranes,
pending_outbound: Vec<P::TurnEvent<AnyValue>>, pending_outbound: Vec<P::TurnEvent<AnyValue>>,
self_entity: Arc<Ref<()>>,
output: UnboundedSender<LoanedItem<Vec<u8>>>, output: UnboundedSender<LoanedItem<Vec<u8>>>,
output_text: bool, output_text: bool,
} }
@ -109,8 +114,8 @@ struct TunnelRefEntity {
relay_ref: TunnelRelayRef, relay_ref: TunnelRelayRef,
} }
struct ActivatedMembranes<'a, 'activation, 'm> { struct ActivatedMembranes<'a, 'm> {
turn: &'a mut Activation<'activation>, turn: &'a mut Activation,
tr_ref: &'m TunnelRelayRef, tr_ref: &'m TunnelRelayRef,
membranes: &'m mut Membranes, membranes: &'m mut Membranes,
} }
@ -169,36 +174,46 @@ impl Membrane {
ws ws
} }
fn remove(&mut self, ws: &Arc<WireSymbol>) {
self.oid_map.remove(&ws.oid);
self.ref_map.remove(&ws.obj);
}
fn insert_inert_entity(&mut self, t: &mut Activation, oid: sturdy::Oid) -> Arc<WireSymbol> { fn insert_inert_entity(&mut self, t: &mut Activation, oid: sturdy::Oid) -> Arc<WireSymbol> {
self.insert(oid, Cap::new(&t.inert_entity())) self.insert(oid, Cap::new(&t.inert_entity()))
} }
} }
pub fn connect_stream<I, O, E, F>( pub fn connect_stream<I, O, Step, E, F>(
t: &mut Activation, t: &mut Activation,
i: I, i: I,
o: O, o: O,
output_text: bool, output_text: bool,
sturdyref: sturdy::SturdyRef, step: Step,
initial_state: E, initial_state: E,
mut f: F, mut f: F,
) where ) -> ActorResult where
I: 'static + Send + AsyncRead, I: 'static + Send + AsyncRead,
O: 'static + Send + AsyncWrite, O: 'static + Send + AsyncWrite,
Step: for<'a> Unparse<&'a Language<AnyValue>, AnyValue>,
E: 'static + Send, E: 'static + Send,
F: 'static + Send + FnMut(&mut E, &mut Activation, Arc<Cap>) -> during::DuringResult<E> F: 'static + Send + FnMut(&mut E, &mut Activation, Arc<Cap>) -> during::DuringResult<E>
{ {
let i = Input::Bytes(Box::pin(i)); let i = Input::Bytes(Box::pin(i));
let o = Output::Bytes(Box::pin(o)); let o = Output::Bytes(Box::pin(o));
let gatekeeper = TunnelRelay::run(t, i, o, None, Some(sturdy::Oid(0.into())), output_text).unwrap(); let gatekeeper = TunnelRelay::run(t, i, o, None, Some(sturdy::Oid(0.into())), output_text).unwrap();
let main_entity = t.create(during::entity(initial_state).on_asserted(move |state, t, a: AnyValue| { let main_entity = t.create(during::entity(initial_state).on_asserted(move |state, t, a: gatekeeper::Resolved| {
let denotation = a.value().to_embedded()?; match a {
f(state, t, Arc::clone(denotation)) gatekeeper::Resolved::Accepted { responder_session } => f(state, t, responder_session),
gatekeeper::Resolved::Rejected(r) => Err(error("Resolve rejected", r.detail))?,
}
})); }));
gatekeeper.assert(t, language(), &gatekeeper::Resolve { let step = language().parse::<gatekeeper::Step>(&language().unparse(&step))?;
sturdyref, gatekeeper.assert(t, language(), &gatekeeper::Resolve::<AnyValue> {
observer: Cap::new(&main_entity), step,
observer: Cap::guard(Language::arc(), main_entity),
}); });
Ok(())
} }
impl std::fmt::Debug for Membrane { impl std::fmt::Debug for Membrane {
@ -213,7 +228,57 @@ impl std::fmt::Debug for Membrane {
macro_rules! dump_membranes { ($e:expr) => { tracing::trace!("membranes: {:#?}", $e); } } macro_rules! dump_membranes { ($e:expr) => { tracing::trace!("membranes: {:#?}", $e); } }
// macro_rules! dump_membranes { ($e:expr) => { (); } } // macro_rules! dump_membranes { ($e:expr) => { (); } }
/// Main entry point for stdio-based Syndicate services.
pub async fn stdio_service<F>(f: F) -> !
where
F: 'static + Send + FnOnce(&mut Activation) -> Result<Arc<Cap>, ActorError>
{
let result = Actor::top(None, move |t| {
let service = f(t)?;
Ok(TunnelRelay::stdio_service(t, service))
}).await;
// Because we're currently using tokio::io::stdin(), which can prevent shutdown of the
// runtime, this routine uses std::process::exit directly as a special case. It's a
// stopgap: eventually, we'd like to do things Properly, as indicated in the comment
// attached (at the time of writing) to tokio::io::stdin(), which reads in part:
//
// This handle is best used for non-interactive uses, such as when a file
// is piped into the application. For technical reasons, `stdin` is
// implemented by using an ordinary blocking read on a separate thread, and
// it is impossible to cancel that read. This can make shutdown of the
// runtime hang until the user presses enter.
//
// For interactive uses, it is recommended to spawn a thread dedicated to
// user input and use blocking IO directly in that thread.
//
// TODO: Revisit this.
match result {
Ok(Ok(())) => {
std::process::exit(0);
}
Ok(Err(e)) => {
tracing::error!("Main stdio_service actor failed: {}", e);
std::process::exit(1);
},
Err(e) => {
tracing::error!("Join of main stdio_service actor failed: {}", e);
std::process::exit(2);
}
}
}
impl TunnelRelay { impl TunnelRelay {
pub fn stdio_service(t: &mut Activation, service: Arc<Cap>) -> () {
TunnelRelay::run(t,
Input::Bytes(Box::pin(tokio::io::stdin())),
Output::Bytes(Box::pin(tokio::io::stdout())),
Some(service),
None,
false);
}
pub fn run( pub fn run(
t: &mut Activation, t: &mut Activation,
i: Input, i: Input,
@ -222,6 +287,20 @@ impl TunnelRelay {
initial_oid: Option<sturdy::Oid>, initial_oid: Option<sturdy::Oid>,
output_text: bool, output_text: bool,
) -> Option<Arc<Cap>> { ) -> Option<Arc<Cap>> {
let (result, tr_ref, output_rx) = TunnelRelay::_run(t, initial_ref, initial_oid, output_text);
t.linked_task(Some(AnyValue::symbol("writer")),
output_loop(o, output_rx));
t.linked_task(Some(AnyValue::symbol("reader")),
input_loop(t.trace_collector(), t.facet_ref(), i, tr_ref));
result
}
pub fn _run(
t: &mut Activation,
initial_ref: Option<Arc<Cap>>,
initial_oid: Option<sturdy::Oid>,
output_text: bool,
) -> (Option<Arc<Cap>>, Arc<Mutex<Option<TunnelRelay>>>, UnboundedReceiver<LoanedItem<Vec<u8>>>) {
let (output_tx, output_rx) = unbounded_channel(); let (output_tx, output_rx) = unbounded_channel();
let tr_ref = Arc::new(Mutex::new(None)); let tr_ref = Arc::new(Mutex::new(None));
let self_entity = t.create(TunnelRefEntity { let self_entity = t.create(TunnelRefEntity {
@ -237,9 +316,9 @@ impl TunnelRelay {
exported: Membrane::new(WireSymbolSide::Exported), exported: Membrane::new(WireSymbolSide::Exported),
imported: Membrane::new(WireSymbolSide::Imported), imported: Membrane::new(WireSymbolSide::Imported),
next_export_oid: 0, next_export_oid: 0,
reimported_attenuations: Map::new(),
}, },
pending_outbound: Vec::new(), pending_outbound: Vec::new(),
self_entity: self_entity.clone(),
}; };
if let Some(ir) = initial_ref { if let Some(ir) = initial_ref {
tr.membranes.export_ref(ir).inc_ref(); tr.membranes.export_ref(ir).inc_ref();
@ -248,10 +327,8 @@ impl TunnelRelay {
|io| Arc::clone(&tr.membranes.import_oid(t, &tr_ref, io).inc_ref().obj)); |io| Arc::clone(&tr.membranes.import_oid(t, &tr_ref, io).inc_ref().obj));
dump_membranes!(tr.membranes); dump_membranes!(tr.membranes);
*tr_ref.lock() = Some(tr); *tr_ref.lock() = Some(tr);
t.linked_task(crate::name!("writer"), output_loop(o, output_rx)); t.add_exit_hook(&self_entity);
t.linked_task(crate::name!("reader"), input_loop(t.facet.clone(), i, tr_ref)); (result, tr_ref, output_rx)
t.state.add_exit_hook(&self_entity);
result
} }
fn deserialize_one(&mut self, t: &mut Activation, bs: &[u8]) -> (Result<P::Packet<AnyValue>, ParseError>, usize) { fn deserialize_one(&mut self, t: &mut Activation, bs: &[u8]) -> (Result<P::Packet<AnyValue>, ParseError>, usize) {
@ -278,13 +355,13 @@ impl TunnelRelay {
} }
} }
fn handle_inbound_datagram(&mut self, t: &mut Activation, bs: &[u8]) -> ActorResult { pub fn handle_inbound_datagram(&mut self, t: &mut Activation, bs: &[u8]) -> ActorResult {
tracing::trace!(bytes = ?bs, "inbound datagram"); tracing::trace!(bytes = ?bs, "inbound datagram");
let item = self.deserialize_one(t, bs).0?; let item = self.deserialize_one(t, bs).0?;
self.handle_inbound_packet(t, item) self.handle_inbound_packet(t, item)
} }
fn handle_inbound_stream(&mut self, t: &mut Activation, buf: &mut BytesMut) -> ActorResult { pub fn handle_inbound_stream(&mut self, t: &mut Activation, buf: &mut BytesMut) -> ActorResult {
loop { loop {
tracing::trace!(buffer = ?buf, "inbound stream"); tracing::trace!(buffer = ?buf, "inbound stream");
let (result, count) = self.deserialize_one(t, buf); let (result, count) = self.deserialize_one(t, buf);
@ -300,15 +377,20 @@ impl TunnelRelay {
} }
} }
fn handle_inbound_packet(&mut self, t: &mut Activation, p: P::Packet<AnyValue>) -> ActorResult { pub fn handle_inbound_packet(&mut self, t: &mut Activation, p: P::Packet<AnyValue>) -> ActorResult {
tracing::debug!(packet = ?p, "-->"); tracing::debug!(packet = ?p, "-->");
match p { match p {
P::Packet::Extension(b) => {
let P::Extension { label, fields } = *b;
tracing::info!(?label, ?fields, "received Extension from peer");
Ok(())
}
P::Packet::Error(b) => { P::Packet::Error(b) => {
tracing::info!(message = ?b.message.clone(), tracing::info!(message = ?b.message.clone(),
detail = ?b.detail.clone(), detail = ?b.detail.clone(),
"received Error from peer"); "received Error from peer");
Err(*b) Err(*b)?
}, }
P::Packet::Turn(b) => { P::Packet::Turn(b) => {
let P::Turn(events) = *b; let P::Turn(events) = *b;
for P::TurnEvent { oid, event } in events { for P::TurnEvent { oid, event } in events {
@ -332,7 +414,7 @@ impl TunnelRelay {
&mut |r| Ok(pins.push(self.membranes.lookup_ref(r))))?; &mut |r| Ok(pins.push(self.membranes.lookup_ref(r))))?;
if let Some(local_handle) = target.assert(t, &(), &a) { if let Some(local_handle) = target.assert(t, &(), &a) {
if let Some(_) = self.inbound_assertions.insert(remote_handle, (local_handle, pins)) { if let Some(_) = self.inbound_assertions.insert(remote_handle, (local_handle, pins)) {
return Err(error("Assertion with duplicate handle", AnyValue::new(false))); return Err(error("Assertion with duplicate handle", AnyValue::new(false)))?;
} }
} else { } else {
self.membranes.release(pins); self.membranes.release(pins);
@ -342,7 +424,7 @@ impl TunnelRelay {
P::Event::Retract(b) => { P::Event::Retract(b) => {
let P::Retract { handle: remote_handle } = *b; let P::Retract { handle: remote_handle } = *b;
let (local_handle, previous_pins) = match self.inbound_assertions.remove(&remote_handle) { let (local_handle, previous_pins) = match self.inbound_assertions.remove(&remote_handle) {
None => return Err(error("Retraction of nonexistent handle", language().unparse(&remote_handle))), None => return Err(error("Retraction of nonexistent handle", language().unparse(&remote_handle)))?,
Some(wss) => wss, Some(wss) => wss,
}; };
self.membranes.release(previous_pins); self.membranes.release(previous_pins);
@ -389,12 +471,11 @@ impl TunnelRelay {
peer: Arc::clone(&peer), peer: Arc::clone(&peer),
pins, pins,
}); });
t.sync(&peer.underlying, k); target.sync(t, k);
} }
} }
} }
t.deliver(); t.commit()
Ok(())
} }
} }
} }
@ -466,6 +547,7 @@ impl TunnelRelay {
} else { } else {
PackedWriter::encode(&mut self.membranes, &item)? PackedWriter::encode(&mut self.membranes, &item)?
}; };
tracing::trace!(buffer = ?bs, "outbound bytes");
let _ = self.output.send(LoanedItem::new(account, cost, bs)); let _ = self.output.send(LoanedItem::new(account, cost, bs));
Ok(()) Ok(())
@ -473,7 +555,19 @@ impl TunnelRelay {
pub fn send_event(&mut self, t: &mut Activation, remote_oid: sturdy::Oid, event: P::Event<AnyValue>) -> ActorResult { pub fn send_event(&mut self, t: &mut Activation, remote_oid: sturdy::Oid, event: P::Event<AnyValue>) -> ActorResult {
if self.pending_outbound.is_empty() { if self.pending_outbound.is_empty() {
t.message_for_myself(&self.self_entity, ()); let self_ref = Arc::clone(&self.self_ref);
t.pre_commit(move |t| {
let mut g = self_ref.lock();
let tr = g.as_mut().expect("initialized");
let events = std::mem::take(&mut tr.pending_outbound);
tr.send_packet(&t.account(),
events.len(),
P::Packet::Turn(Box::new(P::Turn(events.clone()))))?;
for P::TurnEvent { oid, event } in events.into_iter() {
tr.outbound_event_bookkeeping(t, sturdy::Oid(oid.0), &event)?;
}
Ok(())
});
} }
self.pending_outbound.push(P::TurnEvent { oid: P::Oid(remote_oid.0), event }); self.pending_outbound.push(P::TurnEvent { oid: P::Oid(remote_oid.0), event });
Ok(()) Ok(())
@ -514,9 +608,10 @@ impl Membranes {
#[inline] #[inline]
fn release_one(&mut self, ws: Arc<WireSymbol>) -> bool { fn release_one(&mut self, ws: Arc<WireSymbol>) -> bool {
if ws.dec_ref() { if ws.dec_ref() {
let membrane = self.membrane(ws.side); if let WireSymbolSide::Exported = ws.side {
membrane.oid_map.remove(&ws.oid); self.reimported_attenuations.remove(&ws.oid);
membrane.ref_map.remove(&ws.obj); }
self.membrane(ws.side).remove(&ws);
true true
} else { } else {
false false
@ -537,38 +632,47 @@ impl Membranes {
src: &'src mut S, src: &'src mut S,
_read_annotations: bool, _read_annotations: bool,
) -> io::Result<Arc<Cap>> { ) -> io::Result<Arc<Cap>> {
let ws = match sturdy::WireRef::deserialize(&mut src.packed(NoEmbeddedDomainCodec))? { match sturdy::WireRef::deserialize(&mut src.packed(NoEmbeddedDomainCodec))? {
sturdy::WireRef::Mine{ oid: b } => { sturdy::WireRef::Mine{ oid: b } => {
let oid = *b; let oid = *b;
self.imported.oid_map.get(&oid).map(Arc::clone) let ws = self.imported.oid_map.get(&oid).map(Arc::clone)
.unwrap_or_else(|| self.import_oid(t, relay_ref, oid)) .unwrap_or_else(|| self.import_oid(t, relay_ref, oid));
Ok(Arc::clone(&ws.inc_ref().obj))
} }
sturdy::WireRef::Yours { oid: b, attenuation } => { sturdy::WireRef::Yours { oid: b, attenuation } => {
let oid = *b; let oid = *b;
let ws = self.exported.oid_map.get(&oid).map(Arc::clone)
.unwrap_or_else(|| self.exported.insert_inert_entity(t, oid.clone()));
if attenuation.is_empty() { if attenuation.is_empty() {
self.exported.oid_map.get(&oid).map(Arc::clone).unwrap_or_else( Ok(Arc::clone(&ws.inc_ref().obj))
|| self.exported.insert_inert_entity(t, oid))
} else { } else {
match self.exported.oid_map.get(&oid) { let attenuated_obj = ws.obj.attenuate(&attenuation)
None => self.exported.insert_inert_entity(t, oid), .map_err(|e| {
Some(ws) => { io::Error::new(
let attenuated_obj = ws.obj.attenuate(&sturdy::Attenuation(attenuation)) io::ErrorKind::InvalidInput,
.map_err(|e| { format!("Invalid capability attenuation: {:?}", e))
io::Error::new( })?;
io::ErrorKind::InvalidInput,
format!("Invalid capability attenuation: {:?}", e)) ws.inc_ref();
})?;
self.exported.insert(oid, attenuated_obj) let variations = self.reimported_attenuations.entry(oid).or_default();
match variations.get(&attenuated_obj) {
None => {
variations.insert(Arc::clone(&attenuated_obj));
self.exported.ref_map.insert(Arc::clone(&attenuated_obj), Arc::clone(&ws));
Ok(attenuated_obj)
} }
Some(existing) =>
Ok(Arc::clone(existing))
} }
} }
} }
}; }
Ok(Arc::clone(&ws.inc_ref().obj))
} }
} }
impl<'a, 'activation, 'm> DomainDecode<Arc<Cap>> for ActivatedMembranes<'a, 'activation, 'm> { impl<'a, 'm> DomainDecode<Arc<Cap>> for ActivatedMembranes<'a, 'm> {
fn decode_embedded<'de, 'src, S: BinarySource<'de>>( fn decode_embedded<'de, 'src, S: BinarySource<'de>>(
&mut self, &mut self,
src: &'src mut S, src: &'src mut S,
@ -614,22 +718,30 @@ impl DomainEncode<Arc<Cap>> for Membranes {
} }
async fn input_loop( async fn input_loop(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef, facet: FacetRef,
i: Input, i: Input,
relay: TunnelRelayRef, relay: TunnelRelayRef,
) -> Result<LinkedTaskTermination, Error> { ) -> Result<LinkedTaskTermination, Error> {
let account = Account::new(crate::name!("input-loop")); let account = Account::new(Some(AnyValue::symbol("input-loop")), trace_collector);
let cause = trace::TurnCause::external("input-loop");
match i { match i {
Input::Packets(mut src) => { Input::Packets(mut src) => {
loop { loop {
account.ensure_clear_funds().await; account.ensure_clear_funds().await;
match src.next().await { match src.next().await {
None => return Ok(LinkedTaskTermination::Normal), None => break,
Some(bs) => facet.activate(Arc::clone(&account), |t| { Some(bs) => {
let mut g = relay.lock(); if !facet.activate(
let tr = g.as_mut().expect("initialized"); &account, Some(cause.clone()), |t| {
tr.handle_inbound_datagram(t, &bs?) let mut g = relay.lock();
})?, let tr = g.as_mut().expect("initialized");
tr.handle_inbound_datagram(t, &bs?)
})
{
break;
}
}
} }
} }
} }
@ -641,24 +753,31 @@ async fn input_loop(
buf.reserve(BUFSIZE); buf.reserve(BUFSIZE);
let n = match r.read_buf(&mut buf).await { let n = match r.read_buf(&mut buf).await {
Ok(n) => n, Ok(n) => n,
Err(e) => Err(e) => {
if e.kind() == io::ErrorKind::ConnectionReset { if e.kind() == io::ErrorKind::ConnectionReset {
return Ok(LinkedTaskTermination::Normal); break;
} else { }
return Err(e)?; return Err(e)?;
}, }
}; };
match n { match n {
0 => return Ok(LinkedTaskTermination::Normal), 0 => break,
_ => facet.activate(Arc::clone(&account), |t| { _ => {
let mut g = relay.lock(); if !facet.activate(
let tr = g.as_mut().expect("initialized"); &account, Some(cause.clone()), |t| {
tr.handle_inbound_stream(t, &mut buf) let mut g = relay.lock();
})?, let tr = g.as_mut().expect("initialized");
tr.handle_inbound_stream(t, &mut buf)
})
{
break;
}
}
} }
} }
} }
} }
Ok(LinkedTaskTermination::Normal)
} }
async fn output_loop( async fn output_loop(
@ -683,25 +802,15 @@ async fn output_loop(
} }
impl Entity<()> for TunnelRefEntity { impl Entity<()> for TunnelRefEntity {
fn message(&mut self, t: &mut Activation, _m: ()) -> ActorResult { fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ExitStatus>) {
let mut g = self.relay_ref.lock(); if let ExitStatus::Error(e) = &**exit_status {
let tr = g.as_mut().expect("initialized");
let events = std::mem::take(&mut tr.pending_outbound);
tr.send_packet(&t.account(), events.len(), P::Packet::Turn(Box::new(P::Turn(events.clone()))))?;
for P::TurnEvent { oid, event } in events.into_iter() {
tr.outbound_event_bookkeeping(t, sturdy::Oid(oid.0), &event)?;
}
Ok(())
}
fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ActorResult>) -> ActorResult {
if let Err(e) = &**exit_status {
let e = e.clone(); let e = e.clone();
let mut g = self.relay_ref.lock(); let mut g = self.relay_ref.lock();
let tr = g.as_mut().expect("initialized"); let tr = g.as_mut().expect("initialized");
tr.send_packet(&t.account(), 1, P::Packet::Error(Box::new(e)))?; if let Err(f) = tr.send_packet(&t.account(), 1, P::Packet::Error(Box::new(e))) {
tracing::error!("Failed to send error packet: {:?}", f);
}
} }
Ok(())
} }
} }
@ -728,7 +837,7 @@ impl Entity<AnyValue> for RelayEntity {
fn sync(&mut self, t: &mut Activation, peer: Arc<Ref<Synced>>) -> ActorResult { fn sync(&mut self, t: &mut Activation, peer: Arc<Ref<Synced>>) -> ActorResult {
self.relay_ref.lock().as_mut().expect("initialized") self.relay_ref.lock().as_mut().expect("initialized")
.send_event(t, self.oid.clone(), P::Event::Sync(Box::new(P::Sync { .send_event(t, self.oid.clone(), P::Event::Sync(Box::new(P::Sync {
peer: Cap::guard(Arc::new(()), peer) peer: Cap::guard(&Arc::new(()), peer)
}))) })))
} }
} }

View File

@ -16,7 +16,10 @@ pub type CheckedRewrite = (usize, Pattern, Template);
/// A safety-checked [`Caveat`]: none of the errors enumerated in /// A safety-checked [`Caveat`]: none of the errors enumerated in
/// `CaveatError` apply. /// `CaveatError` apply.
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct CheckedCaveat { alts: Vec<CheckedRewrite> } pub enum CheckedCaveat {
Alts(Vec<CheckedRewrite>),
Reject(Pattern),
}
/// Represents any detected error in a [`Caveat`]; that is, in a /// Represents any detected error in a [`Caveat`]; that is, in a
/// [`Pattern`] or a [`Template`]. /// [`Pattern`] or a [`Template`].
@ -28,43 +31,44 @@ pub enum CaveatError {
BindingUnderNegation, BindingUnderNegation,
} }
impl Attenuation { impl Caveat {
/// Yields `Ok(())` iff `self` has no [`CaveatError`]. /// Yields `Ok(())` iff `caveats` have no [`CaveatError`].
pub fn validate(&self) -> Result<(), CaveatError> { pub fn validate_many(caveats: &[Caveat]) -> Result<(), CaveatError> {
for c in &self.0 { c.validate()? } for c in caveats { c.validate()? }
Ok(()) Ok(())
} }
/// Yields a vector of [`CheckedCaveat`s][CheckedCaveat] iff
/// `self` has no [`CaveatError`].
pub fn check(&self) -> Result<Vec<CheckedCaveat>, CaveatError> {
self.0.iter().map(Caveat::check).collect()
}
}
impl Caveat {
/// Yields `Ok(())` iff `self` has no [`CaveatError`]. /// Yields `Ok(())` iff `self` has no [`CaveatError`].
pub fn validate(&self) -> Result<(), CaveatError> { pub fn validate(&self) -> Result<(), CaveatError> {
match self { match self {
Caveat::Rewrite(b) => (&**b).validate(), Caveat::Rewrite(b) => (&**b).validate(),
Caveat::Alts(b) => (&**b).alternatives.iter().map(Rewrite::validate).collect::<Result<(), _>>(), Caveat::Alts(b) => (&**b).alternatives.iter().map(Rewrite::validate).collect::<Result<(), _>>(),
Caveat::Reject(_) => Ok(()),
Caveat::Unknown(_) => Ok(()), /* it's valid to have unknown caveats, they just won't pass anything */
} }
} }
/// Yields a vector of [`CheckedCaveat`s][CheckedCaveat] iff
/// `caveats` have no [`CaveatError`].
pub fn check_many(caveats: &[Caveat]) -> Result<Vec<CheckedCaveat>, CaveatError> {
caveats.iter().map(Caveat::check).collect()
}
/// Yields a [`CheckedCaveat`] iff `self` has no [`CaveatError`]. /// Yields a [`CheckedCaveat`] iff `self` has no [`CaveatError`].
pub fn check(&self) -> Result<CheckedCaveat, CaveatError> { pub fn check(&self) -> Result<CheckedCaveat, CaveatError> {
match self { match self {
Caveat::Rewrite(b) => Caveat::Rewrite(b) =>
Ok(CheckedCaveat { Ok(CheckedCaveat::Alts(vec![ (*b).check()? ])),
alts: vec![ (*b).check()? ]
}),
Caveat::Alts(b) => { Caveat::Alts(b) => {
let Alts { alternatives } = &**b; let Alts { alternatives } = &**b;
Ok(CheckedCaveat { Ok(CheckedCaveat::Alts(
alts: alternatives.into_iter().map(Rewrite::check) alternatives.into_iter().map(Rewrite::check)
.collect::<Result<Vec<CheckedRewrite>, CaveatError>>()? .collect::<Result<Vec<CheckedRewrite>, CaveatError>>()?))
})
} }
Caveat::Reject(b) =>
Ok(CheckedCaveat::Reject(b.pattern.clone())),
Caveat::Unknown(_) =>
Ok(CheckedCaveat::Reject(Pattern::PDiscard(Box::new(PDiscard)))),
} }
} }
} }
@ -117,7 +121,6 @@ impl Pattern {
Pattern::PDiscard(_) => true, Pattern::PDiscard(_) => true,
Pattern::PAtom(b) => match &**b { Pattern::PAtom(b) => match &**b {
PAtom::Boolean => a.value().is_boolean(), PAtom::Boolean => a.value().is_boolean(),
PAtom::Float => a.value().is_float(),
PAtom::Double => a.value().is_double(), PAtom::Double => a.value().is_double(),
PAtom::SignedInteger => a.value().is_signedinteger(), PAtom::SignedInteger => a.value().is_signedinteger(),
PAtom::String => a.value().is_string(), PAtom::String => a.value().is_string(),
@ -187,7 +190,7 @@ impl Template {
match self { match self {
Template::TAttenuate(b) => { Template::TAttenuate(b) => {
let TAttenuate { template, attenuation } = &**b; let TAttenuate { template, attenuation } = &**b;
attenuation.validate()?; Caveat::validate_many(attenuation)?;
Ok(template.implied_binding_count()?) Ok(template.implied_binding_count()?)
} }
Template::TRef(b) => match usize::try_from(&(&**b).binding) { Template::TRef(b) => match usize::try_from(&(&**b).binding) {
@ -273,12 +276,24 @@ impl Rewrite {
impl CheckedCaveat { impl CheckedCaveat {
/// Rewrites `a` using the patterns/templates contained in `self`. /// Rewrites `a` using the patterns/templates contained in `self`.
pub fn rewrite(&self, a: &_Any) -> Option<_Any> { pub fn rewrite(&self, a: &_Any) -> Option<_Any> {
for (n, p, t) in &self.alts { match self {
let mut bindings = Vec::with_capacity(*n); CheckedCaveat::Alts(alts) => {
if let true = p.matches(a, &mut bindings) { for (n, p, t) in alts {
return t.instantiate(&bindings); let mut bindings = Vec::with_capacity(*n);
if p.matches(a, &mut bindings) {
return t.instantiate(&bindings);
}
}
None
},
CheckedCaveat::Reject(pat) => {
let mut bindings = Vec::with_capacity(0);
if pat.matches(a, &mut bindings) {
None
} else {
Some(a.clone())
}
} }
} }
None
} }
} }

View File

@ -16,19 +16,12 @@ use crate::actor::Activation;
use crate::actor::Handle; use crate::actor::Handle;
use crate::actor::Cap; use crate::actor::Cap;
use crate::schemas::dataspace_patterns as ds; use crate::schemas::dataspace_patterns as ds;
use crate::pattern::{self, PathStep, Path, Paths}; use crate::pattern::{self, ConstantPositions, PathStep, Path, Paths};
type Bag<A> = bag::BTreeBag<A>; type Bag<A> = bag::BTreeBag<A>;
type Captures = AnyValue; type Captures = AnyValue;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum Guard {
Rec(AnyValue, usize),
Seq(usize),
Map,
}
/// Index of assertions and [`Observe`rs][crate::schemas::dataspace::Observe]. /// Index of assertions and [`Observe`rs][crate::schemas::dataspace::Observe].
/// ///
/// Generally speaking, you will not need to use this structure; /// Generally speaking, you will not need to use this structure;
@ -44,13 +37,13 @@ pub struct Index {
#[derive(Debug)] #[derive(Debug)]
struct Node { struct Node {
continuation: Continuation, continuation: Continuation,
edges: Map<Selector, Map<Guard, Node>>, edges: Map<Selector, Map<ds::GroupType, Node>>,
} }
#[derive(Debug)] #[derive(Debug)]
struct Continuation { struct Continuation {
cached_assertions: Set<AnyValue>, cached_assertions: Set<AnyValue>,
leaf_map: Map<Paths, Map<Captures, Leaf>>, leaf_map: Map<Arc<ConstantPositions>, Map<Captures, Leaf>>,
} }
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
@ -205,7 +198,7 @@ impl Node {
} }
fn extend(&mut self, pat: &ds::Pattern) -> &mut Continuation { fn extend(&mut self, pat: &ds::Pattern) -> &mut Continuation {
let (_pop_count, final_node) = self.extend_walk(&mut Vec::new(), 0, PathStep::Index(0), pat); let (_pop_count, final_node) = self.extend_walk(&mut Vec::new(), 0, PathStep::new(0), pat);
&mut final_node.continuation &mut final_node.continuation
} }
@ -216,23 +209,13 @@ impl Node {
step: PathStep, step: PathStep,
pat: &ds::Pattern, pat: &ds::Pattern,
) -> (usize, &mut Node) { ) -> (usize, &mut Node) {
let (guard, members): (Guard, Vec<(PathStep, &ds::Pattern)>) = match pat { let (guard, members): (ds::GroupType, Vec<(PathStep, &ds::Pattern)>) = match pat {
ds::Pattern::DCompound(b) => match &**b { ds::Pattern::Group { type_, entries } =>
ds::DCompound::Arr { items } => ((&**type_).clone(),
(Guard::Seq(items.len()), entries.iter().map(|(k, p)| (k.clone(), p)).collect()),
items.iter().enumerate().map(|(i, p)| (PathStep::Index(i), p)).collect()), ds::Pattern::Bind { pattern } =>
ds::DCompound::Rec { label, fields } => return self.extend_walk(path, pop_count, step, &**pattern),
(Guard::Rec(label.clone(), fields.len()), ds::Pattern::Discard | ds::Pattern::Lit { .. } =>
fields.iter().enumerate().map(|(i, p)| (PathStep::Index(i), p)).collect()),
ds::DCompound::Dict { entries, .. } =>
(Guard::Map,
entries.iter().map(|(k, p)| (PathStep::Key(k.clone()), p)).collect()),
}
ds::Pattern::DBind(b) => {
let ds::DBind { pattern, .. } = &**b;
return self.extend_walk(path, pop_count, step, pattern);
}
ds::Pattern::DDiscard(_) | ds::Pattern::DLit(_) =>
return (pop_count, self), return (pop_count, self),
}; };
@ -336,41 +319,46 @@ where FCont: FnMut(&mut Continuation, &AnyValue) -> (),
fn continuation(&mut self, c: &mut Continuation) { fn continuation(&mut self, c: &mut Continuation) {
(self.m_cont)(c, self.outer_value); (self.m_cont)(c, self.outer_value);
let mut empty_const_paths = Vec::new(); let mut empty_const_positions = Vec::new();
for (const_paths, const_val_map) in &mut c.leaf_map { for (const_positions, const_val_map) in &mut c.leaf_map {
if let Some(const_vals) = project_paths(self.outer_value, const_paths) { if project_paths(self.outer_value, &const_positions.required_to_exist).is_none() {
let leaf_opt = if self.create_leaf_if_absent { continue;
Some(const_val_map.entry(const_vals.clone()).or_insert_with(Leaf::new)) }
} else { let const_vals = match project_paths(self.outer_value, &const_positions.with_values) {
const_val_map.get_mut(&const_vals) Some(vs) => vs,
}; None => continue,
if let Some(leaf) = leaf_opt { };
(self.m_leaf)(leaf, self.outer_value); let leaf_opt = if self.create_leaf_if_absent {
for (capture_paths, endpoints) in &mut leaf.endpoints_map { Some(const_val_map.entry(const_vals.clone()).or_insert_with(Leaf::new))
if let Some(cs) = project_paths(self.outer_value, &capture_paths) { } else {
(self.m_endpoints)(endpoints, cs); const_val_map.get_mut(&const_vals)
} };
if let Some(leaf) = leaf_opt {
(self.m_leaf)(leaf, self.outer_value);
for (capture_paths, endpoints) in &mut leaf.endpoints_map {
if let Some(cs) = project_paths(self.outer_value, &capture_paths) {
(self.m_endpoints)(endpoints, cs);
} }
if leaf.is_empty() { }
const_val_map.remove(&const_vals); if leaf.is_empty() {
if const_val_map.is_empty() { const_val_map.remove(&const_vals);
empty_const_paths.push(const_paths.clone()); if const_val_map.is_empty() {
} empty_const_positions.push(const_positions.clone());
} }
} }
} }
} }
for const_paths in empty_const_paths { for const_positions in empty_const_positions {
c.leaf_map.remove(&const_paths); c.leaf_map.remove(&const_positions);
} }
} }
} }
fn class_of(v: &AnyValue) -> Option<Guard> { fn class_of(v: &AnyValue) -> Option<ds::GroupType> {
match v.value() { match v.value() {
Value::Sequence(vs) => Some(Guard::Seq(vs.len())), Value::Sequence(_) => Some(ds::GroupType::Arr),
Value::Record(r) => Some(Guard::Rec(r.label().clone(), r.arity())), Value::Record(r) => Some(ds::GroupType::Rec { label: r.label().clone() }),
Value::Dictionary(_) => Some(Guard::Map), Value::Dictionary(_) => Some(ds::GroupType::Dict),
_ => None, _ => None,
} }
} }
@ -398,15 +386,17 @@ fn project_paths<'a>(v: &'a AnyValue, ps: &Paths) -> Option<Captures> {
} }
fn step<'a>(v: &'a AnyValue, s: &PathStep) -> Option<&'a AnyValue> { fn step<'a>(v: &'a AnyValue, s: &PathStep) -> Option<&'a AnyValue> {
match (v.value(), s) { match v.value() {
(Value::Sequence(vs), PathStep::Index(i)) => Value::Sequence(vs) => {
if *i < vs.len() { Some(&vs[*i]) } else { None }, let i = s.value().as_usize()?;
(Value::Record(r), PathStep::Index(i)) => if i < vs.len() { Some(&vs[i]) } else { None }
if *i < r.arity() { Some(&r.fields()[*i]) } else { None }, }
(Value::Dictionary(m), PathStep::Key(k)) => Value::Record(r) => {
m.get(k), let i = s.value().as_usize()?;
_ => if i < r.arity() { Some(&r.fields()[i]) } else { None }
None, }
Value::Dictionary(m) => m.get(s),
_ => None,
} }
} }
@ -423,11 +413,14 @@ impl Continuation {
) { ) {
let cached_assertions = &self.cached_assertions; let cached_assertions = &self.cached_assertions;
let const_val_map = let const_val_map =
self.leaf_map.entry(analysis.const_paths.clone()).or_insert_with({ self.leaf_map.entry(analysis.const_positions.clone()).or_insert_with({
|| { || {
let mut cvm = Map::new(); let mut cvm = Map::new();
for a in cached_assertions { for a in cached_assertions {
if let Some(key) = project_paths(a, &analysis.const_paths) { if project_paths(a, &analysis.const_positions.required_to_exist).is_none() {
continue;
}
if let Some(key) = project_paths(a, &analysis.const_positions.with_values) {
cvm.entry(key).or_insert_with(Leaf::new) cvm.entry(key).or_insert_with(Leaf::new)
.cached_assertions.insert(a.clone()); .cached_assertions.insert(a.clone());
} }
@ -462,7 +455,7 @@ impl Continuation {
observer: &Arc<Cap>, observer: &Arc<Cap>,
) { ) {
if let Entry::Occupied(mut const_val_map_entry) if let Entry::Occupied(mut const_val_map_entry)
= self.leaf_map.entry(analysis.const_paths) = self.leaf_map.entry(analysis.const_positions)
{ {
let const_val_map = const_val_map_entry.get_mut(); let const_val_map = const_val_map_entry.get_mut();
if let Entry::Occupied(mut leaf_entry) if let Entry::Occupied(mut leaf_entry)

View File

@ -1,7 +1,8 @@
use blake2::Blake2s256;
use getrandom::getrandom; use getrandom::getrandom;
use hmac::{SimpleHmac, Mac};
use hmac::{Hmac, Mac, NewMac, crypto_mac::MacError}; use preserves::error::io_syntax_error;
use preserves::hex::HexParser; use preserves::hex::HexParser;
use preserves::hex::HexFormatter; use preserves::hex::HexFormatter;
use preserves::value::NestedValue; use preserves::value::NestedValue;
@ -10,8 +11,6 @@ use preserves::value::packed::PackedWriter;
use preserves::value::packed::from_bytes; use preserves::value::packed::from_bytes;
use preserves_schema::Codec; use preserves_schema::Codec;
use sha2::Sha256;
use std::io; use std::io;
use super::language; use super::language;
@ -21,33 +20,50 @@ pub use super::schemas::sturdy::*;
#[derive(Debug)] #[derive(Debug)]
pub enum ValidationError { pub enum ValidationError {
SignatureError(MacError), SignatureError,
AttenuationError(CaveatError), AttenuationError(CaveatError),
BadCaveatsField,
} }
impl std::fmt::Display for ValidationError { impl std::fmt::Display for ValidationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self { match self {
ValidationError::SignatureError(_) => ValidationError::SignatureError =>
write!(f, "Invalid SturdyRef signature"), write!(f, "Invalid SturdyRef signature"),
ValidationError::AttenuationError(e) => ValidationError::AttenuationError(e) =>
write!(f, "Invalid SturdyRef attenuation: {:?}", e), write!(f, "Invalid SturdyRef attenuation: {:?}", e),
ValidationError::BadCaveatsField =>
write!(f, "Invalid caveats field in SturdyRef parameters"),
} }
} }
} }
impl From<ValidationError> for io::Error {
fn from(v: ValidationError) -> Self {
io_syntax_error(&v.to_string())
}
}
impl std::error::Error for ValidationError {} impl std::error::Error for ValidationError {}
const KEY_LENGTH: usize = 16; // bytes; 128 bits const KEY_LENGTH: usize = 16; // bytes; 128 bits
fn signature(key: &[u8], data: &[u8]) -> Vec<u8> { fn signature(key: &[u8], data: &[u8]) -> Vec<u8> {
let mut m = Hmac::<Sha256>::new_from_slice(key).expect("valid key length"); let mut m = SimpleHmac::<Blake2s256>::new_from_slice(key).expect("valid key length");
m.update(data); m.update(data);
let mut result = m.finalize().into_bytes().to_vec(); let mut result = m.finalize().into_bytes().to_vec();
result.truncate(KEY_LENGTH); result.truncate(KEY_LENGTH);
result result
} }
fn chain_signature(key: &[u8], chain: &[Caveat]) -> Vec<u8> {
let mut key = key.to_vec();
for c in chain {
key = signature(&key, &encode(&language().unparse(c)));
}
key
}
pub fn new_key() -> Vec<u8> { pub fn new_key() -> Vec<u8> {
let mut buf = vec![0; KEY_LENGTH]; let mut buf = vec![0; KEY_LENGTH];
getrandom(&mut buf).expect("successful random number generation"); getrandom(&mut buf).expect("successful random number generation");
@ -65,7 +81,21 @@ pub fn decode<N: NestedValue>(bs: &[u8]) -> io::Result<N> {
impl SturdyRef { impl SturdyRef {
pub fn mint(oid: _Any, key: &[u8]) -> Self { pub fn mint(oid: _Any, key: &[u8]) -> Self {
let sig = signature(key, &encode(&oid)); let sig = signature(key, &encode(&oid));
SturdyRef { oid, caveat_chain: Vec::new(), sig } SturdyRef::from_parts(oid, vec![], sig)
}
pub fn from_parts(oid: _Any, caveats: Vec<Caveat>, sig: Vec<u8>) -> Self {
SturdyRef {
parameters: Parameters {
oid,
sig,
caveats: if caveats.is_empty() {
CaveatsField::Absent
} else {
CaveatsField::Present { caveats }
}
}
}
} }
pub fn from_hex(s: &str) -> Result<Self, Error> { pub fn from_hex(s: &str) -> Result<Self, Error> {
@ -77,44 +107,44 @@ impl SturdyRef {
HexFormatter::Packed.encode(&encode(&language().unparse(self))) HexFormatter::Packed.encode(&encode(&language().unparse(self)))
} }
pub fn caveat_chain(&self) -> Result<&[Caveat], ValidationError> {
match &self.parameters.caveats {
CaveatsField::Absent => Ok(&[]),
CaveatsField::Invalid { .. } => Err(ValidationError::BadCaveatsField),
CaveatsField::Present { caveats } => Ok(caveats),
}
}
pub fn validate_and_attenuate( pub fn validate_and_attenuate(
&self, &self,
key: &[u8], key: &[u8],
unattenuated_target: &_Ptr, unattenuated_target: &_Ptr,
) -> Result<_Ptr, ValidationError> { ) -> Result<_Ptr, ValidationError> {
self.validate(key).map_err(ValidationError::SignatureError)?; self.validate(key).map_err(|_| ValidationError::SignatureError)?;
let mut attenuation = Vec::new();
// TODO:: Make sure of the ordering here!!
for a in self.caveat_chain.iter().rev() {
attenuation.extend(a.0.iter().rev().cloned());
}
let target = unattenuated_target let target = unattenuated_target
.attenuate(&Attenuation(attenuation)) .attenuate(self.caveat_chain()?)
.map_err(ValidationError::AttenuationError)?; .map_err(ValidationError::AttenuationError)?;
Ok(target) Ok(target)
} }
pub fn validate(&self, key: &[u8]) -> Result<(), MacError> { pub fn validate(&self, key: &[u8]) -> Result<(), ()> {
let SturdyRef { oid, caveat_chain, sig } = self; let SturdyRef { parameters: Parameters { oid, sig, .. } } = self;
let mut key = key.to_vec(); let key = chain_signature(&signature(&key, &encode(oid)),
key = signature(&key, &encode(oid)); self.caveat_chain().map_err(|_| ())?);
for c in caveat_chain {
key = signature(&key, &encode(&language().unparse(c)));
}
if &key == sig { if &key == sig {
Ok(()) Ok(())
} else { } else {
Err(MacError) Err(())
} }
} }
pub fn attenuate(&self, attenuation: &Attenuation) -> Result<Self, CaveatError> { pub fn attenuate(&self, attenuation: &[Caveat]) -> Result<Self, ValidationError> {
attenuation.validate()?; Caveat::validate_many(attenuation).map_err(ValidationError::AttenuationError)?;
let SturdyRef { oid, caveat_chain, sig } = self; let SturdyRef { parameters: Parameters { oid, sig, .. } } = self;
let oid = oid.clone(); let oid = oid.clone();
let mut caveat_chain = caveat_chain.clone(); let mut caveat_chain = self.caveat_chain()?.to_vec();
caveat_chain.push(attenuation.clone()); caveat_chain.extend(attenuation.iter().cloned());
let sig = signature(&sig, &encode(&language().unparse(attenuation))); let sig = chain_signature(&sig, attenuation);
Ok(SturdyRef { oid, caveat_chain, sig }) Ok(SturdyRef::from_parts(oid, caveat_chain, sig))
} }
} }

View File

@ -1,23 +1,19 @@
//! Extremely simple single-actor supervision. Vastly simplified compared to the available //! Extremely simple single-actor supervision. Vastly simplified compared to the available
//! options in [Erlang/OTP](https://erlang.org/doc/man/supervisor.html). //! options in [Erlang/OTP](https://erlang.org/doc/man/supervisor.html).
use preserves::value::NestedValue;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::sync::Arc; use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration; use std::time::Duration;
use tokio::time::Instant; use tokio::time::Instant;
use crate::actor::*; use crate::actor::*;
use crate::enclose;
use crate::schemas::service::State; use crate::schemas::service::State;
pub type Boot = Box<dyn Send + FnMut(&mut Activation) -> ActorResult>; pub type Boot = Arc<Mutex<Box<dyn Send + FnMut(&mut Activation) -> ActorResult>>>;
enum Protocol {
SuperviseeStarted, // assertion
BootFunction(Boot), // message
Retry, // message
}
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum RestartPolicy { pub enum RestartPolicy {
@ -34,26 +30,19 @@ pub struct SupervisorConfiguration {
pub restart_policy: RestartPolicy, pub restart_policy: RestartPolicy,
} }
#[derive(Debug)]
struct StartNow;
pub struct Supervisor { pub struct Supervisor {
self_ref: Arc<Ref<Protocol>>, self_ref: Arc<Ref<StartNow>>,
name: tracing::Span, child_name: Name,
config: SupervisorConfiguration, config: SupervisorConfiguration,
boot_fn: Option<Boot>, boot_fn: Boot,
restarts: VecDeque<Instant>, restarts: VecDeque<Instant>,
state: Arc<Field<State>>, state: Arc<Field<State>>,
ac_ref: Option<ActorRef>, ac_ref: Option<ActorRef>,
} }
impl std::fmt::Debug for Protocol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Protocol::SuperviseeStarted => write!(f, "Protocol::SuperviseeStarted"),
Protocol::BootFunction(_) => write!(f, "Protocol::BootFunction(_)"),
Protocol::Retry => write!(f, "Protocol::Retry"),
}
}
}
impl Default for SupervisorConfiguration { impl Default for SupervisorConfiguration {
fn default() -> Self { fn default() -> Self {
Self { Self {
@ -75,30 +64,23 @@ impl SupervisorConfiguration {
} }
} }
impl Entity<Protocol> for Supervisor impl Entity<StartNow> for Supervisor
{ {
fn assert(&mut self, t: &mut Activation, m: Protocol, _h: Handle) -> ActorResult { fn message(&mut self, t: &mut Activation, _m: StartNow) -> ActorResult {
match m { self.start_now(t)
Protocol::SuperviseeStarted => t.set(&self.state, State::Started),
_ => Err(format!("Unexpected assertion: {:?}", m).as_str())?,
}
Ok(())
} }
fn retract(&mut self, t: &mut Activation, _h: Handle) -> ActorResult { fn stop(&mut self, t: &mut Activation) -> ActorResult {
let _name = self.name.clone(); let _entry = tracing::info_span!("supervisor", name = ?self.child_name).entered();
let _entry = _name.enter(); match self.ac_ref.take().expect("valid supervisee ActorRef").exit_status() {
let exit_status = None =>
self.ac_ref.take().expect("valid supervisee ActorRef") tracing::debug!("Supervisor shut down; supervisee will exit soon"),
.exit_status() Some(Ok(())) if self.config.restart_policy == RestartPolicy::OnErrorOnly => {
.expect("supervisee to have terminated");
tracing::debug!(?exit_status);
match exit_status {
Ok(()) if self.config.restart_policy == RestartPolicy::OnErrorOnly => {
tracing::trace!("Not restarting: normal exit, restart_policy is OnErrorOnly"); tracing::trace!("Not restarting: normal exit, restart_policy is OnErrorOnly");
t.set(&self.state, State::Complete); t.set(&self.state, State::Complete);
}, },
_ => { Some(exit_status) => {
tracing::debug!(?exit_status);
tracing::trace!("Restarting: restart_policy is Always or exit was abnormal"); tracing::trace!("Restarting: restart_policy is Always or exit was abnormal");
t.set(&self.state, t.set(&self.state,
if exit_status.is_ok() { State::Complete } else { State::Failed }); if exit_status.is_ok() { State::Complete } else { State::Failed });
@ -122,105 +104,63 @@ impl Entity<Protocol> for Supervisor
}; };
t.after(wait_time, move |t| { t.after(wait_time, move |t| {
tracing::trace!("Sending retry trigger"); tracing::trace!("Sending retry trigger");
t.message(&self_ref, Protocol::Retry); t.message(&self_ref, StartNow);
Ok(()) Ok(())
}); });
}, },
} }
Ok(()) Ok(())
} }
fn message(&mut self, t: &mut Activation, m: Protocol) -> ActorResult {
match m {
Protocol::BootFunction(b) => {
self.boot_fn = Some(b);
Ok(())
}
Protocol::Retry => {
self.ensure_started(t)
}
_ => Ok(())
}
}
fn stop(&mut self, _t: &mut Activation) -> ActorResult {
let _entry = self.name.enter();
tracing::info!(self_ref = ?self.self_ref, "Supervisor terminating");
Ok(())
}
} }
impl Supervisor { impl Supervisor {
pub fn start<C: 'static + Send + FnMut(&mut Activation, State) -> ActorResult, pub fn start<C: 'static + Send + FnMut(&mut Activation, State) -> ActorResult,
B: 'static + Send + FnMut(&mut Activation) -> ActorResult>( B: 'static + Send + FnMut(&mut Activation) -> ActorResult>(
t: &mut Activation, t: &mut Activation,
name: tracing::Span, name: Name,
config: SupervisorConfiguration, config: SupervisorConfiguration,
mut state_cb: C, mut state_cb: C,
boot_fn: B, boot_fn: B,
) -> ActorResult { ) -> ActorResult {
let _entry = name.enter(); let _entry = tracing::info_span!("supervisor", ?name).entered();
tracing::trace!(?config); tracing::trace!(?config);
let self_ref = t.create_inert(); let self_ref = t.create_inert();
let state_field = t.named_field("supervisee_state", State::Started); let state_field = t.named_field("supervisee_state", State::Started);
let my_name = name.as_ref().map(
|n| preserves::rec![AnyValue::symbol("supervisor"), n.clone()]);
let mut supervisor = Supervisor { let mut supervisor = Supervisor {
self_ref: Arc::clone(&self_ref), self_ref: Arc::clone(&self_ref),
name: name.clone(), child_name: name,
config, config,
boot_fn: Some(Box::new(boot_fn)), boot_fn: Arc::new(Mutex::new(Box::new(boot_fn))),
restarts: VecDeque::new(), restarts: VecDeque::new(),
state: Arc::clone(&state_field), state: Arc::clone(&state_field),
ac_ref: None, ac_ref: None,
}; };
tracing::info!(self_ref = ?supervisor.self_ref, "Supervisor starting"); tracing::info!(self_ref = ?supervisor.self_ref, "Supervisor starting");
supervisor.ensure_started(t)?; supervisor.start_now(t)?;
t.dataflow(enclose!((name) move |t| { t.dataflow(move |t| {
let state = t.get(&state_field).clone(); let state = t.get(&state_field).clone();
{ tracing::debug!(name = ?my_name, ?state);
let _entry = name.enter();
tracing::debug!(?state);
}
state_cb(t, state) state_cb(t, state)
}))?; })?;
self_ref.become_entity(supervisor); self_ref.become_entity(supervisor);
t.on_stop_notify(&self_ref);
Ok(()) Ok(())
} }
fn ensure_started(&mut self, t: &mut Activation) -> ActorResult { fn start_now(&mut self, t: &mut Activation) -> ActorResult {
match self.boot_fn.take() { let boot_cell = Arc::clone(&self.boot_fn);
None => { t.facet(|t: &mut Activation| {
let _entry = self.name.enter(); t.on_stop_notify(&self.self_ref);
t.set(&self.state, State::Failed); self.ac_ref = Some(t.spawn_link(
tracing::error!("Cannot restart supervisee, because it panicked at startup") self.child_name.clone(),
} move |t| boot_cell.lock().expect("Unpoisoned boot_fn mutex")(t)));
Some(mut boot_fn) => { tracing::debug!(self_ref = ?self.self_ref,
let self_ref = Arc::clone(&self.self_ref); supervisee = ?self.ac_ref,
t.facet(|t: &mut Activation| { "Supervisee started");
t.assert(&self.self_ref, Protocol::SuperviseeStarted); Ok(())
self.ac_ref = Some(t.spawn_link( })?;
crate::name!(parent: &self.name, "supervisee"), t.set(&self.state, State::Started);
move |t| {
match boot_fn(t) {
Ok(()) => {
t.message(&self_ref, Protocol::BootFunction(boot_fn));
Ok(())
}
Err(e) => {
t.clear();
t.message(&self_ref, Protocol::BootFunction(boot_fn));
t.deliver();
Err(e)
}
}
}));
tracing::debug!(self_ref = ?self.self_ref,
supervisee = ?self.ac_ref,
"Supervisee started");
Ok(())
})?;
}
}
Ok(()) Ok(())
} }
} }

174
syndicate/src/trace.rs Normal file
View File

@ -0,0 +1,174 @@
//! Records *describing* actions committed at the end of a turn and
//! events triggering the start of a turn. These are not the actions
//! or events themselves: they are reflective information on the
//! action of the system, enough to reconstruct interesting
//! projections of system activity.
pub use super::schemas::trace::*;
use preserves::value::NestedValue;
use preserves::value::Writer;
use preserves_schema::Codec;
use super::actor::{self, AnyValue, Ref, Cap};
use super::language;
use std::num::NonZeroU64;
use std::sync::Arc;
use std::time::SystemTime;
use tokio::select;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
#[derive(Debug, Clone)]
pub struct TraceCollector {
pub tx: UnboundedSender<TraceEntry>,
}
impl<M> From<&Ref<M>> for Target {
fn from(v: &Ref<M>) -> Target {
Target {
actor: v.mailbox.actor_id.into(),
facet: v.facet_id.into(),
oid: Oid(AnyValue::new(v.oid())),
}
}
}
impl<M: std::fmt::Debug> From<&M> for AssertionDescription {
default fn from(v: &M) -> Self {
Self::Opaque { description: AnyValue::new(format!("{:?}", v)) }
}
}
impl From<&AnyValue> for AssertionDescription {
fn from(v: &AnyValue) -> Self {
Self::Value { value: v.clone() }
}
}
impl TraceCollector {
pub fn record(&self, id: actor::ActorId, a: ActorActivation) {
let _ = self.tx.send(TraceEntry {
timestamp: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
.expect("Time after Unix epoch").as_secs_f64().into(),
actor: id.into(),
item: a,
});
}
}
impl TurnDescription {
pub fn new(activation_id: u64, cause: TurnCause) -> Self {
Self {
id: TurnId(AnyValue::new(activation_id)),
cause,
actions: Vec::new(),
}
}
pub fn record(&mut self, a: ActionDescription) {
self.actions.push(a)
}
pub fn take(&mut self) -> Self {
Self {
id: self.id.clone(),
cause: self.cause.clone(),
actions: std::mem::take(&mut self.actions),
}
}
}
impl TurnCause {
pub fn external(description: &str) -> Self {
Self::External { description: AnyValue::new(description) }
}
}
struct CapEncoder;
impl preserves::value::DomainEncode<Arc<Cap>> for CapEncoder {
fn encode_embedded<W: Writer>(
&mut self,
w: &mut W,
d: &Arc<Cap>,
) -> std::io::Result<()> {
w.write_string(&d.debug_str())
}
}
pub enum CollectorEvent {
Event(TraceEntry),
PeriodicFlush,
}
impl TraceCollector {
pub fn new<F: 'static + Send + FnMut(CollectorEvent)>(mut f: F) -> TraceCollector {
let (tx, mut rx) = unbounded_channel::<TraceEntry>();
tokio::spawn(async move {
let mut timer = tokio::time::interval(std::time::Duration::from_millis(100));
loop {
select! {
maybe_entry = rx.recv() => {
match maybe_entry {
None => break,
Some(entry) => {
tracing::trace!(?entry);
f(CollectorEvent::Event(entry));
}
}
},
_ = timer.tick() => f(CollectorEvent::PeriodicFlush),
}
}
});
TraceCollector { tx }
}
pub fn ascii<W: 'static + std::io::Write + Send>(w: W) -> TraceCollector {
let mut writer = preserves::value::TextWriter::new(w);
Self::new(move |event| match event {
CollectorEvent::Event(entry) => {
writer.write(&mut CapEncoder, &language().unparse(&entry))
.expect("failed to write TraceCollector entry");
writer.borrow_write().write_all(b"\n")
.expect("failed to write TraceCollector newline");
},
CollectorEvent::PeriodicFlush =>
writer.flush().expect("failed to flush TraceCollector output"),
})
}
pub fn packed<W: 'static + std::io::Write + Send>(w: W) -> TraceCollector {
let mut writer = preserves::value::PackedWriter::new(w);
Self::new(move |event| match event {
CollectorEvent::Event(entry) =>
writer.write(&mut CapEncoder, &language().unparse(&entry))
.expect("failed to write TraceCollector entry"),
CollectorEvent::PeriodicFlush =>
writer.flush().expect("failed to flush TraceCollector output"),
})
}
}
impl From<actor::Name> for Name {
fn from(v: actor::Name) -> Name {
match v {
None => Name::Anonymous,
Some(n) => Name::Named { name: n.clone() },
}
}
}
impl From<NonZeroU64> for ActorId {
fn from(v: NonZeroU64) -> Self {
ActorId(AnyValue::new(u64::from(v)))
}
}
impl From<NonZeroU64> for FacetId {
fn from(v: NonZeroU64) -> Self {
FacetId(AnyValue::new(u64::from(v)))
}
}

View File

@ -1,66 +0,0 @@
use crate::actor::*;
use std::fmt::Debug;
use std::io;
use std::sync::Arc;
struct Tracer(tracing::Span);
fn set_name_oid<M>(t: &mut Tracer, r: &Arc<Ref<M>>) {
t.0.record("oid", &tracing::field::display(&r.oid()));
}
pub fn tracer<M: Debug>(t: &mut Activation, name: tracing::Span) -> Arc<Ref<M>> {
let mut e = Tracer(name);
let r = t.create_inert();
set_name_oid(&mut e, &r);
r.become_entity(e);
r
}
impl<M: Debug> Entity<M> for Tracer {
fn assert(&mut self, _t: &mut Activation, a: M, h: Handle) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?a, ?h, "assert");
Ok(())
}
fn retract(&mut self, _t: &mut Activation, h: Handle) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?h, "retract");
Ok(())
}
fn message(&mut self, _t: &mut Activation, m: M) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?m, "message");
Ok(())
}
fn sync(&mut self, t: &mut Activation, peer: Arc<Ref<Synced>>) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?peer, "sync");
t.message(&peer, Synced);
Ok(())
}
}
/// Sets up [`tracing`] logging in a reasonable way.
///
/// Useful at the top of `main` functions.
pub fn convenient_logging() -> Result<(), Box<dyn std::error::Error>> {
let filter = match std::env::var(tracing_subscriber::filter::EnvFilter::DEFAULT_ENV) {
Err(std::env::VarError::NotPresent) =>
tracing_subscriber::filter::EnvFilter::default()
.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()),
_ =>
tracing_subscriber::filter::EnvFilter::try_from_default_env()?,
};
let subscriber = tracing_subscriber::fmt()
.with_ansi(true)
.with_thread_ids(true)
.with_max_level(tracing::Level::TRACE)
.with_env_filter(filter)
.with_writer(io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Could not set tracing global subscriber");
Ok(())
}