Compare commits

..

1 Commits

97 changed files with 2873 additions and 6724 deletions

1
.gitignore vendored
View File

@ -1,4 +1,3 @@
/target
**/*.rs.bk
localdev/
scratch/

1687
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -4,9 +4,7 @@ cargo-features = ["strip"]
members = [
"syndicate",
"syndicate-macros",
"syndicate-schema-plugin",
"syndicate-server",
"syndicate-tools",
]
# [patch.crates-io]
@ -26,9 +24,3 @@ strip = true
[profile.bench]
debug = true
# [patch.crates-io]
# # Unfortunately, until [1] is fixed (perhaps via [2]), we have to use a patched proc-macro2.
# # [1]: https://github.com/dtolnay/proc-macro2/issues/402
# # [2]: https://github.com/dtolnay/proc-macro2/pull/407
# proc-macro2 = { git = "https://github.com/tonyg/proc-macro2", branch = "repair_span_start_end" }

View File

@ -11,28 +11,33 @@ test:
test-all:
cargo test --all-targets
ws-bump:
cargo workspaces version \
--no-global-tag \
--individual-tag-prefix '%n-v' \
--allow-branch 'main' \
$(BUMP_ARGS)
# Try
#
# make release-minor
#
# to check things, and
#
# make release-minor RELEASE_DRY_RUN=
#
# to do things for real.
ws-publish:
cargo workspaces publish \
--from-git
RELEASE_DRY_RUN=--dry-run
release-%:
PUBLISH_GRACE_SLEEP=15 cargo release \
$(RELEASE_DRY_RUN) \
-vv --no-dev-version --exclude-unchanged \
$*
PROTOCOLS_BRANCH=main
pull-protocols:
git subtree pull -P syndicate/protocols \
-m 'Merge latest changes from the syndicate-protocols repository' \
git@git.syndicate-lang.org:syndicate-lang/syndicate-protocols \
$(PROTOCOLS_BRANCH)
main
static: static-x86_64
static-%:
CARGO_TARGET_DIR=target/target.$* cross build --target $*-unknown-linux-musl --features vendored-openssl,jemalloc
cross build --target $*-unknown-linux-musl --features vendored-openssl
###########################################################################
@ -54,27 +59,28 @@ static-%:
x86_64-binary: x86_64-binary-release
x86_64-binary-release:
CARGO_TARGET_DIR=target/target.x86_64 cross build --target x86_64-unknown-linux-musl --release --all-targets --features vendored-openssl,jemalloc
cross build --target x86_64-unknown-linux-musl --release --all-targets --features vendored-openssl
x86_64-binary-debug:
CARGO_TARGET_DIR=target/target.x86_64 cross build --target x86_64-unknown-linux-musl --all-targets --features vendored-openssl
cross build --target x86_64-unknown-linux-musl --all-targets --features vendored-openssl
armv7-binary: armv7-binary-release
armv7-binary-release:
CARGO_TARGET_DIR=target/target.armv7 cross build --target=armv7-unknown-linux-musleabihf --release --all-targets --features vendored-openssl
cross build --target=armv7-unknown-linux-musleabihf --release --all-targets --features vendored-openssl
armv7-binary-debug:
CARGO_TARGET_DIR=target/target.armv7 cross build --target=armv7-unknown-linux-musleabihf --all-targets --features vendored-openssl
cross build --target=armv7-unknown-linux-musleabihf --all-targets --features vendored-openssl
# As of 2023-05-12 (and probably earlier!) this is no longer required with current Rust nightlies
# # Hack to workaround https://github.com/rust-embedded/cross/issues/598
# HACK_WORKAROUND_ISSUE_598=CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS="-C link-arg=/usr/local/aarch64-linux-musl/lib/libc.a"
# Hack to workaround https://github.com/rust-embedded/cross/issues/598
HACK_WORKAROUND_ISSUE_598=CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS="-C link-arg=/usr/local/aarch64-linux-musl/lib/libc.a"
aarch64-binary: aarch64-binary-release
aarch64-binary-release:
CARGO_TARGET_DIR=target/target.aarch64 cross build --target=aarch64-unknown-linux-musl --release --all-targets --features vendored-openssl,jemalloc
$(HACK_WORKAROUND_ISSUE_598) \
cross build --target=aarch64-unknown-linux-musl --release --all-targets --features vendored-openssl
aarch64-binary-debug:
CARGO_TARGET_DIR=target/target.aarch64 cross build --target=aarch64-unknown-linux-musl --all-targets --features vendored-openssl
$(HACK_WORKAROUND_ISSUE_598) \
cross build --target=aarch64-unknown-linux-musl --all-targets --features vendored-openssl

View File

@ -23,30 +23,16 @@ A Rust implementation of:
## Quickstart
From docker or podman:
docker run -it --rm leastfixedpoint/syndicate-server /syndicate-server -p 8001
Build and run from source:
git clone https://git.syndicate-lang.org/syndicate-lang/syndicate-rs
cd syndicate-rs
cargo build --release
./target/release/syndicate-server -p 8001
If you have [`mold`](https://github.com/rui314/mold) available (`apt install mold`), you may be
able to get faster linking by creating `.cargo/config.toml` as follows:
[build]
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
Enabling the `jemalloc` feature can get a *substantial* (~20%-50%) improvement in throughput.
## Running the examples
In one window, start the server with a basic configuration:
In one window, start the server:
./target/release/syndicate-server -c dev-scripts/benchmark-config.pr
./target/release/syndicate-server -p 8001
Then, choose one of the examples below.
@ -84,7 +70,7 @@ about who kicks off the pingpong session.
You may find better performance by restricting the server to fewer
cores than you have available. For example, for me, running
taskset -c 0,1 ./target/release/syndicate-server -c dev-scripts/benchmark-config.pr
taskset -c 0,1 ./target/release/syndicate-server -p 8001
roughly *doubles* throughput for a single producer/consumer pair,
roughly *quadruples* throughput for a single producer/consumer pair,
on my 48-core AMD CPU.

View File

@ -1,3 +1,3 @@
let ?root_ds = dataspace
<require-service <relay-listener <tcp "0.0.0.0" 9001> $gatekeeper>>
<bind <ref { oid: "syndicate" key: #x"" }> $root_ds #f>
<require-service <relay-listener <tcp "0.0.0.0" 8001> $gatekeeper>>
<bind "syndicate" #x"" $root_ds>

View File

@ -1,7 +1,2 @@
#!/bin/sh
TASKSET='taskset -c 0,1'
if [ $(uname -s) = 'Darwin' ]
then
TASKSET=
fi
make -C ../syndicate-server binary && exec $TASKSET ../target/release/syndicate-server -c benchmark-config.pr "$@"
make -C ../syndicate-server binary && exec taskset -c 0,1 ../target/release/syndicate-server -c benchmark-config.pr "$@"

1
docker/.gitignore vendored
View File

@ -1 +0,0 @@
syndicate-server.*

View File

@ -1,6 +0,0 @@
FROM busybox
RUN mkdir /data
ARG TARGETARCH
COPY ./syndicate-server.$TARGETARCH /syndicate-server
EXPOSE 1
CMD ["/syndicate-server", "-c", "/data", "-p", "1"]

View File

@ -1,37 +0,0 @@
U=leastfixedpoint
I=syndicate-server
ARCHITECTURES:=amd64 arm arm64
SERVERS:=$(patsubst %,syndicate-server.%,$(ARCHITECTURES))
VERSION=$(shell ./syndicate-server.$(shell ./docker-architecture $$(uname -m)) --version | cut -d' ' -f2)
all:
.PHONY: all clean image push push-only
clean:
rm -f syndicate-server.*
-podman images -q $(U)/$(I) | sort -u | xargs podman rmi -f
image: $(SERVERS)
for A in $(ARCHITECTURES); do set -x; \
podman build --platform=linux/$$A \
-t $(U)/$(I):$(VERSION)-$$A \
-t $(U)/$(I):latest-$$A \
.; \
done
rm -f tmp.image
push: image push-only
push-only:
$(patsubst %,podman push $(U)/$(I):$(VERSION)-%;,$(ARCHITECTURES))
$(patsubst %,podman push $(U)/$(I):latest-%;,$(ARCHITECTURES))
podman rmi -f $(U)/$(I):$(VERSION) $(U)/$(I):latest
podman manifest create $(U)/$(I):$(VERSION) $(patsubst %,$(U)/$(I):$(VERSION)-%,$(ARCHITECTURES))
podman manifest create $(U)/$(I):latest $(patsubst %,$(U)/$(I):latest-%,$(ARCHITECTURES))
podman manifest push $(U)/$(I):$(VERSION)
podman manifest push $(U)/$(I):latest
syndicate-server.%:
make -C .. $$(./alpine-architecture $*)-binary-release
cp -a ../target/target.$$(./alpine-architecture $*)/$$(./alpine-architecture $*)-unknown-linux-musl*/release/syndicate-server $@

View File

@ -1,9 +0,0 @@
# Docker images for syndicate-server
Build using podman:
apt install podman
and at least until the dependencies are fixed (?),
apt install uidmap slirp4netns

View File

@ -1,6 +0,0 @@
#!/bin/sh
case $1 in
amd64) echo x86_64;;
arm) echo armv7;;
arm64) echo aarch64;;
esac

View File

@ -1,6 +0,0 @@
#!/bin/sh
case $1 in
x86_64) echo amd64;;
armv7) echo arm;;
aarch64) echo arm64;;
esac

View File

@ -1,9 +0,0 @@
version: "3"
services:
syndicate:
image: leastfixedpoint/syndicate-server
ports:
- "1:1"
volumes:
- "/etc/syndicate:/data"

View File

@ -1,12 +0,0 @@
#!/bin/sh
buildtag() {
name=$(grep '^name' "$1" | head -1 | sed -e 's:^.*"\([^"]*\)":\1:')
version=$(grep '^version' "$1" | head -1 | sed -e 's:^.*"\([^"]*\)":\1:')
echo "$name-v$version"
}
git tag "$(buildtag syndicate/Cargo.toml)"
git tag "$(buildtag syndicate-macros/Cargo.toml)"
git tag "$(buildtag syndicate-server/Cargo.toml)"
git tag "$(buildtag syndicate-tools/Cargo.toml)"

View File

@ -1,152 +0,0 @@
# We will create a TCP listener on port 9222, which speaks unencrypted
# protocol and allows interaction with the default/system gatekeeper, which
# has a single noise binding for introducing encrypted interaction with a
# *second* gatekeeper, which finally allows resolution of references to
# other objects.
# First, build a space where we place bindings for the inner gatekeeper to
# expose.
let ?inner-bindings = dataspace
# Next, start the inner gatekeeper.
<require-service <gatekeeper $inner-bindings>>
? <service-object <gatekeeper $inner-bindings> ?inner-gatekeeper> [
# Expose it via a noise binding at the outer/system gatekeeper.
<bind <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
secretKey: #[qLkyuJw/K4yobr4XVKExbinDwEx9QTt9PfDWyx14/kg],
service: world }>
$inner-gatekeeper #f>
]
# Now, expose the outer gatekeeper to the world, via TCP. The system
# gatekeeper is a primordial syndicate-server object bound to $gatekeeper.
<require-service <relay-listener <tcp "0.0.0.0" 9222> $gatekeeper>>
# Finally, let's expose some behaviour accessible via the inner gatekeeper.
#
# We will create a service dataspace called $world.
let ?world = dataspace
# Running `syndicate-macaroon mint --oid a-service --phrase hello` yields:
#
# <ref {oid: a-service, sig: #[JTTGQeYCgohMXW/2S2XH8g]}>
#
# That's a root capability for the service. We use the corresponding
# sturdy.SturdyDescriptionDetail to bind it to $world.
#
$inner-bindings += <bind <ref {oid: a-service, key: #"hello"}>
$world #f>
# Now, we can hand out paths to our services involving an initial noise
# step and a subsequent sturdyref/macaroon step.
#
# For example, running `syndicate-macaroon` like this:
#
# syndicate-macaroon mint --oid a-service --phrase hello \
# --caveat '<rewrite <bind <_>> <rec labelled [<lit "alice"> <ref 0>]>>'
#
# generates
#
# <ref {caveats: [<rewrite <bind <_>> <rec labelled [<lit "alice">, <ref 0>]>>],
# oid: a-service,
# sig: #[CXn7+rAoO3Xr6Y6Laap3OA]}>
#
# which is an attenuation of the root capability we bound that wraps all
# assertions and messages in a `<labelled "alice" _>` wrapper.
#
# All together, the `gatekeeper.Route` that Alice would use would be
# something like:
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { caveats: [<rewrite <bind <_>> <rec labelled [<lit "alice">, <ref 0>]>>],
# oid: a-service,
# sig: #[CXn7+rAoO3Xr6Y6Laap3OA] }>>
#
# Here's one for "bob":
#
# syndicate-macaroon mint --oid a-service --phrase hello \
# --caveat '<rewrite <bind <_>> <rec labelled [<lit "bob"> <ref 0>]>>'
#
# <ref {caveats: [<rewrite <bind <_>> <rec labelled [<lit "bob">, <ref 0>]>>],
# oid: a-service,
# sig: #[/75BbF77LOiqNcvpzNHf0g]}>
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { caveats: [<rewrite <bind <_>> <rec labelled [<lit "bob">, <ref 0>]>>],
# oid: a-service,
# sig: #[/75BbF77LOiqNcvpzNHf0g] }>>
#
# We relay labelled to unlabelled information, enacting a chat protocol
# that enforces usernames.
$world [
# Assertions of presence have the username wiped out and replaced with the label.
? <labelled ?who <Present _>> <Present $who>
# Likewise utterance messages.
?? <labelled ?who <Says _ ?what>> ! <Says $who $what>
# We allow anyone to subscribe to presence and utterances.
? <labelled _ <Observe <rec Present ?p> ?o>> <Observe <rec Present $p> $o>
? <labelled _ <Observe <rec Says ?p> ?o>> <Observe <rec Says $p> $o>
]
# We can also use sturdyref rewrites to directly handle `Says` and
# `Present` values, rather than wrapping with `<labelled ...>` and
# unwrapping using the script fragment just above.
#
# The multiply-quoted patterns in the `Observe` cases start to get unwieldy
# at this point!
#
# For Alice:
#
# syndicate-macaroon mint --oid a-service --phrase hello --caveat '<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "alice">]>>
# <rewrite <rec Says [<_> <bind String>]> <rec Says [<lit "alice"> <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present> <_>]> <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says> <_>]> <_>]>> <ref 0>>
# ]>'
#
# <ref { oid: a-service sig: #[s918Jk6As8AWJ9rtozOTlg] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "alice">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "alice">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { oid: a-service sig: #[s918Jk6As8AWJ9rtozOTlg] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "alice">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "alice">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>>
#
# For Bob:
#
# syndicate-macaroon mint --oid a-service --phrase hello --caveat '<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "bob">]>>
# <rewrite <rec Says [<_> <bind String>]> <rec Says [<lit "bob"> <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present> <_>]> <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says> <_>]> <_>]>> <ref 0>>
# ]>'
#
# <ref { oid: a-service sig: #[QBbV4LrS0i3BG6OyCPJl+A] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "bob">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "bob">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>
#
# <route [<ws "wss://generic-dataspace.demo.leastfixedpoint.com/">]
# <noise { key: #[z1w/OLy0wi3Veyk8/D+2182YxcrKpgc8y0ZJEBDrmWs],
# service: world }>
# <ref { oid: a-service sig: #[QBbV4LrS0i3BG6OyCPJl+A] caveats: [<or [
# <rewrite <rec Present [<_>]> <rec Present [<lit "bob">]>>
# <rewrite <rec Says [<_>, <bind String>]> <rec Says [<lit "bob">, <ref 0>]>>
# <rewrite <bind <rec Observe [<rec rec [<lit Present>, <_>]>, <_>]>> <ref 0>>
# <rewrite <bind <rec Observe [<rec rec [<lit Says>, <_>]>, <_>]>> <ref 0>> ]>]}>>

View File

@ -1,65 +0,0 @@
# We use $root_ds as the httpd space.
let ?root_ds = dataspace
# Supplying $root_ds as the last parameter in this relay-listener enables httpd service.
<require-service <relay-listener <tcp "0.0.0.0" 9001> $gatekeeper $root_ds>>
# Regular gatekeeper stuff works too.
<bind <ref { oid: "syndicate" key: #x"" }> $root_ds #f>
# Create an httpd router monitoring $root_ds for requests and bind requests.
<require-service <http-router $root_ds>>
# Create a static file server. When it gets a request, it ignores the first n (here, 1)
# elements of the path, and takes the remainder as relative to its configured directory (here,
# ".").
#
<require-service <http-static-files "." 1>>
#
# It publishes a service object: requests should be asserted to this.
# The http-bind record establishes this mapping.
#
? <service-object <http-static-files "." 1> ?handler> [
$root_ds += <http-bind #f 9001 get ["files" ...] $handler>
]
# Separately, bind path /d to $index, and respond there.
#
let ?index = dataspace
$root_ds += <http-bind #f 9001 get ["d"] $index>
$index ? <request _ ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <chunk "<!DOCTYPE html>">
$k ! <done "<html><body>D</body></html>">
]
# Similarly, bind three paths, /d, /e and /t to $index2
# Because /d doubles up, the httpd router gives a warning when it is accessed.
# Accessing /e works fine.
# Accessing /t results in wasted work because of the hijacking listeners below.
#
let ?index2 = dataspace
$root_ds += <http-bind #f 9001 get ["d"] $index2>
$root_ds += <http-bind #f 9001 get ["e"] $index2>
$root_ds += <http-bind #f 9001 get ["t"] $index2>
$index2 ? <request _ ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <chunk "<!DOCTYPE html>">
$k ! <done "<html><body>D2</body></html>">
]
# These two hijack /t by listening for raw incoming requests the same way the httpd router
# does. They respond quicker and so win the race. The httpd router's responses are lost.
#
$root_ds ? <request <http-request _ _ _ get ["t"] _ _ _> ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <done "<html><body>T</body></html>">
]
$root_ds ? <request <http-request _ _ _ get ["t"] _ _ _> ?k> [
$k ! <status 200 "OK">
$k ! <header content-type "text/html">
$k ! <done "<html><body>T2</body></html>">
]

View File

@ -1,6 +1,6 @@
[package]
name = "syndicate-macros"
version = "0.32.0"
version = "0.15.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
@ -13,15 +13,12 @@ license = "Apache-2.0"
proc-macro = true
[dependencies]
syndicate = { path = "../syndicate", version = "0.40.0"}
syndicate = { path = "../syndicate", version = "^0.20.0"}
proc-macro2 = { version = "^1.0", features = ["span-locations"] }
quote = "^1.0"
syn = { version = "^1.0", features = ["extra-traits"] } # for impl Debug for syn::Expr
syn = "^1.0"
[dev-dependencies]
tokio = { version = "1.10", features = ["io-std"] }
tracing = "0.1"
[package.metadata.workspaces]
independent = true

View File

@ -6,13 +6,13 @@ use syndicate::schemas::dataspace::Observe;
use syndicate::value::NestedValue;
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
syndicate::convenient_logging()?;
Actor::top(None, |t| {
let ds = Cap::new(&t.create(Dataspace::new(None)));
Actor::new(None).boot(tracing::Span::current(), |t| {
let ds = Cap::new(&t.create(Dataspace::new()));
let _ = t.prevent_inert_check();
t.spawn(Some(AnyValue::symbol("box")), enclose!((ds) move |t| {
t.spawn(syndicate::name!("box"), enclose!((ds) move |t| {
let current_value = t.named_field("current_value", 0u64);
t.dataflow({
@ -49,7 +49,7 @@ async fn main() -> ActorResult {
Ok(())
}));
t.spawn(Some(AnyValue::symbol("client")), enclose!((ds) move |t| {
t.spawn(syndicate::name!("client"), enclose!((ds) move |t| {
let box_state_handler = syndicate::entity(0u32)
.on_asserted(enclose!((ds) move |count, t, captures: AnyValue| {
*count = *count + 1;

View File

@ -1,133 +0,0 @@
use syndicate::actor::*;
use std::env;
use std::sync::Arc;
#[derive(Debug)]
enum Instruction {
SetPeer(Arc<Ref<Instruction>>),
HandleMessage(u64),
}
struct Forwarder {
hop_limit: u64,
supervisor: Arc<Ref<Instruction>>,
peer: Option<Arc<Ref<Instruction>>>,
}
impl Drop for Forwarder {
fn drop(&mut self) {
let r = self.peer.take();
let _ = tokio::spawn(async move {
drop(r);
});
}
}
impl Entity<Instruction> for Forwarder {
fn message(&mut self, turn: &mut Activation, message: Instruction) -> ActorResult {
match message {
Instruction::SetPeer(r) => {
tracing::info!("Setting peer {:?}", r);
self.peer = Some(r);
}
Instruction::HandleMessage(n) => {
let target = if n >= self.hop_limit { &self.supervisor } else { self.peer.as_ref().expect("peer") };
turn.message(target, Instruction::HandleMessage(n + 1));
}
}
Ok(())
}
}
struct Supervisor {
latency_mode: bool,
total_transfers: u64,
remaining_to_receive: u32,
start_time: Option<std::time::Instant>,
}
impl Entity<Instruction> for Supervisor {
fn message(&mut self, turn: &mut Activation, message: Instruction) -> ActorResult {
match message {
Instruction::SetPeer(_) => {
tracing::info!("Start");
self.start_time = Some(std::time::Instant::now());
},
Instruction::HandleMessage(_n) => {
self.remaining_to_receive -= 1;
if self.remaining_to_receive == 0 {
let stop_time = std::time::Instant::now();
let duration = stop_time - self.start_time.unwrap();
tracing::info!("Stop after {:?}; {:?} messages, so {:?} Hz ({} mode)",
duration,
self.total_transfers,
(1000.0 * self.total_transfers as f64) / duration.as_millis() as f64,
if self.latency_mode { "latency" } else { "throughput" });
turn.stop_root();
}
},
}
Ok(())
}
}
#[tokio::main]
async fn main() -> ActorResult {
syndicate::convenient_logging()?;
Actor::top(None, |t| {
let args: Vec<String> = env::args().collect();
let n_actors: u32 = args.get(1).unwrap_or(&"1000000".to_string()).parse()?;
let n_rounds: u32 = args.get(2).unwrap_or(&"200".to_string()).parse()?;
let latency_mode: bool = match args.get(3).unwrap_or(&"throughput".to_string()).as_str() {
"latency" => true,
"throughput" => false,
_other => return Err("Invalid throughput/latency mode".into()),
};
tracing::info!("Will run {:?} actors for {:?} rounds", n_actors, n_rounds);
let total_transfers: u64 = n_actors as u64 * n_rounds as u64;
let (hop_limit, injection_count) = if latency_mode {
(total_transfers, 1)
} else {
(n_rounds as u64, n_actors)
};
let me = t.create(Supervisor {
latency_mode,
total_transfers,
remaining_to_receive: injection_count,
start_time: None,
});
let mut forwarders: Vec<Arc<Ref<Instruction>>> = Vec::new();
for _i in 0 .. n_actors {
if _i % 10000 == 0 { tracing::info!("Actor {:?}", _i); }
forwarders.push(
t.spawn_for_entity(None, true, Box::new(
Forwarder {
hop_limit,
supervisor: me.clone(),
peer: forwarders.last().cloned(),
}))
.0.expect("an entity"));
}
t.message(&forwarders[0], Instruction::SetPeer(forwarders.last().expect("an entity").clone()));
t.later(move |t| {
t.message(&me, Instruction::SetPeer(me.clone()));
t.later(move |t| {
let mut injected: u32 = 0;
for f in forwarders.into_iter() {
if injected >= injection_count {
break;
}
t.message(&f, Instruction::HandleMessage(0));
injected += 1;
}
Ok(())
});
Ok(())
});
Ok(())
}).await??;
Ok(())
}

View File

@ -1,175 +0,0 @@
use std::env;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
type Ref<T> = UnboundedSender<Box<T>>;
#[derive(Debug)]
enum Instruction {
SetPeer(Arc<Ref<Instruction>>),
HandleMessage(u64),
}
struct Forwarder {
hop_limit: u64,
supervisor: Arc<Ref<Instruction>>,
peer: Option<Arc<Ref<Instruction>>>,
}
impl Drop for Forwarder {
fn drop(&mut self) {
let r = self.peer.take();
let _ = tokio::spawn(async move {
drop(r);
});
}
}
enum Action { Continue, Stop }
trait Actor<T> {
fn message(&mut self, message: T) -> Action;
}
fn send<T: std::marker::Send + 'static>(ch: &Arc<Ref<T>>, message: T) -> () {
match ch.send(Box::new(message)) {
Ok(()) => (),
Err(v) => panic!("Aiee! Could not send {:?}", v),
}
}
fn spawn<T: std::marker::Send + 'static, R: Actor<T> + std::marker::Send + 'static>(rt: Option<Arc<AtomicU64>>, mut ac: R) -> Arc<Ref<T>> {
let (tx, mut rx) = unbounded_channel::<Box<T>>();
if let Some(ref c) = rt {
c.fetch_add(1, Ordering::SeqCst);
}
tokio::spawn(async move {
loop {
match rx.recv().await {
None => break,
Some(message) => {
match ac.message(*message) {
Action::Continue => continue,
Action::Stop => break,
}
}
}
}
if let Some(c) = rt {
c.fetch_sub(1, Ordering::SeqCst);
}
});
Arc::new(tx)
}
impl Actor<Instruction> for Forwarder {
fn message(&mut self, message: Instruction) -> Action {
match message {
Instruction::SetPeer(r) => {
tracing::info!("Setting peer {:?}", r);
self.peer = Some(r);
}
Instruction::HandleMessage(n) => {
let target = if n >= self.hop_limit { &self.supervisor } else { self.peer.as_ref().expect("peer") };
send(target, Instruction::HandleMessage(n + 1));
}
}
Action::Continue
}
}
struct Supervisor {
latency_mode: bool,
total_transfers: u64,
remaining_to_receive: u32,
start_time: Option<std::time::Instant>,
}
impl Actor<Instruction> for Supervisor {
fn message(&mut self, message: Instruction) -> Action {
match message {
Instruction::SetPeer(_) => {
tracing::info!("Start");
self.start_time = Some(std::time::Instant::now());
},
Instruction::HandleMessage(_n) => {
self.remaining_to_receive -= 1;
if self.remaining_to_receive == 0 {
let stop_time = std::time::Instant::now();
let duration = stop_time - self.start_time.unwrap();
tracing::info!("Stop after {:?}; {:?} messages, so {:?} Hz ({} mode)",
duration,
self.total_transfers,
(1000.0 * self.total_transfers as f64) / duration.as_millis() as f64,
if self.latency_mode { "latency" } else { "throughput" });
return Action::Stop;
}
},
}
Action::Continue
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + std::marker::Send + std::marker::Sync>> {
syndicate::convenient_logging()?;
let args: Vec<String> = env::args().collect();
let n_actors: u32 = args.get(1).unwrap_or(&"1000000".to_string()).parse()?;
let n_rounds: u32 = args.get(2).unwrap_or(&"200".to_string()).parse()?;
let latency_mode: bool = match args.get(3).unwrap_or(&"throughput".to_string()).as_str() {
"latency" => true,
"throughput" => false,
_other => return Err("Invalid throughput/latency mode".into()),
};
tracing::info!("Will run {:?} actors for {:?} rounds", n_actors, n_rounds);
let count = Arc::new(AtomicU64::new(0));
let total_transfers: u64 = n_actors as u64 * n_rounds as u64;
let (hop_limit, injection_count) = if latency_mode {
(total_transfers, 1)
} else {
(n_rounds as u64, n_actors)
};
let me = spawn(Some(count.clone()), Supervisor {
latency_mode,
total_transfers,
remaining_to_receive: injection_count,
start_time: None,
});
let mut forwarders: Vec<Arc<Ref<Instruction>>> = Vec::new();
for _i in 0 .. n_actors {
if _i % 10000 == 0 { tracing::info!("Actor {:?}", _i); }
forwarders.push(spawn(None, Forwarder {
hop_limit,
supervisor: me.clone(),
peer: forwarders.last().cloned(),
}));
}
send(&forwarders[0], Instruction::SetPeer(forwarders.last().expect("an entity").clone()));
send(&me, Instruction::SetPeer(me.clone()));
let mut injected: u32 = 0;
for f in forwarders.into_iter() {
if injected >= injection_count {
break;
}
send(&f, Instruction::HandleMessage(0));
injected += 1;
}
loop {
if count.load(Ordering::SeqCst) == 0 {
break;
}
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
}
Ok(())
}

View File

@ -27,7 +27,6 @@ use pat::lit;
enum SymbolVariant<'a> {
Normal(&'a str),
#[allow(dead_code)] // otherwise we get 'warning: field `0` is never read'
Binder(&'a str),
Substitution(&'a str),
Discard,
@ -36,7 +35,7 @@ enum SymbolVariant<'a> {
fn compile_sequence_members(vs: &[IOValue]) -> Vec<TokenStream> {
vs.iter().enumerate().map(|(i, f)| {
let p = compile_pattern(f);
quote!((syndicate::value::Value::from(#i).wrap(), #p))
quote!((#i .into(), #p))
}).collect::<Vec<_>>()
}
@ -80,6 +79,10 @@ impl ValueCompiler {
match v.value() {
Value::Boolean(b) =>
quote!(#V_::Value::from(#b).wrap()),
Value::Float(f) => {
let f = f.0;
quote!(#V_::Value::from(#f).wrap())
}
Value::Double(d) => {
let d = d.0;
quote!(#V_::Value::from(#d).wrap())
@ -151,14 +154,16 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
#[allow(non_snake_case)]
let V_: TokenStream = quote!(syndicate::value);
#[allow(non_snake_case)]
let MapFrom_: TokenStream = quote!(<#V_::Map<_, _>>::from);
let MapFromIterator_: TokenStream = quote!(<#V_::Map<_, _> as std::iter::FromIterator<_>>::from_iter);
match v.value() {
Value::Symbol(s) => match analyze_symbol(&s, true) {
SymbolVariant::Binder(_) =>
quote!(#P_::Pattern::Bind{ pattern: Box::new(#P_::Pattern::Discard) }),
quote!(#P_::Pattern::DBind(Box::new(#P_::DBind {
pattern: #P_::Pattern::DDiscard(Box::new(#P_::DDiscard))
}))),
SymbolVariant::Discard =>
quote!(#P_::Pattern::Discard),
quote!(#P_::Pattern::DDiscard(Box::new(#P_::DDiscard))),
SymbolVariant::Substitution(s) =>
lit(Ident::new(s, Span::call_site())),
SymbolVariant::Normal(_) =>
@ -170,7 +175,9 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
Some(label) =>
if label.starts_with("$") && r.arity() == 1 {
let nested = compile_pattern(&r.fields()[0]);
quote!(#P_::Pattern::Bind{ pattern: Box::new(#nested) })
quote!(#P_::Pattern::DBind(Box::new(#P_::DBind {
pattern: #nested
})))
} else {
let label_stx = if label.starts_with("=") {
let id = Ident::new(&label[1..], Span::call_site());
@ -179,19 +186,18 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
quote!(#V_::Value::symbol(#label).wrap())
};
let members = compile_sequence_members(r.fields());
quote!(#P_::Pattern::Group {
type_: Box::new(#P_::GroupType::Rec { label: #label_stx }),
entries: #MapFrom_([#(#members),*]),
})
quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Rec {
label: #label_stx,
fields: vec![#(#members),*],
})))
}
}
}
Value::Sequence(vs) => {
let members = compile_sequence_members(vs);
quote!(#P_::Pattern::Group {
type_: Box::new(#P_::GroupType::Arr),
entries: #MapFrom_([#(#members),*]),
})
quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Arr {
items: vec![#(#members),*],
})))
}
Value::Set(_) =>
panic!("Cannot match sets in patterns"),
@ -201,10 +207,9 @@ fn compile_pattern(v: &IOValue) -> TokenStream {
let v = compile_pattern(v);
quote!((#k, #v))
}).collect::<Vec<_>>();
quote!(#P_::Pattern::Group {
type_: Box::new(#P_::GroupType::Dict),
entries: #MapFrom_([#(#members),*]),
})
quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Dict {
entries: #MapFromIterator_(vec![#(#members),*])
})))
}
_ => lit(ValueCompiler::for_patterns().compile(v)),
}

View File

@ -15,9 +15,10 @@ pub fn lit<T: ToTokens>(e: T) -> TokenStream2 {
}
fn compile_sequence_members(stxs: &Vec<Stx>) -> Result<Vec<TokenStream2>, &'static str> {
stxs.iter().enumerate().map(|(i, stx)| {
let p = to_pattern_expr(stx)?;
Ok(quote!((syndicate::value::Value::from(#i).wrap(), #p)))
stxs.iter().map(|stx| {
// let p = to_pattern_expr(stx)?;
// Ok(quote!(#p))
to_pattern_expr(stx)
}).collect()
}
@ -27,7 +28,7 @@ pub fn to_pattern_expr(stx: &Stx) -> Result<TokenStream2, &'static str> {
#[allow(non_snake_case)]
let V_: TokenStream2 = quote!(syndicate::value);
#[allow(non_snake_case)]
let MapFrom_: TokenStream2 = quote!(<#V_::Map<_, _>>::from);
let MapFromIterator_: TokenStream2 = quote!(<#V_::Map<_, _> as std::iter::FromIterator<_>>::from_iter);
match stx {
Stx::Atom(v) =>
@ -40,27 +41,26 @@ pub fn to_pattern_expr(stx: &Stx) -> Result<TokenStream2, &'static str> {
None => to_pattern_expr(&Stx::Discard)?,
}
};
Ok(quote!(#P_::Pattern::Bind { pattern: Box::new(#inner_pat_expr) }))
Ok(quote!(#P_::Pattern::DBind(Box::new(#P_::DBind { pattern: #inner_pat_expr }))))
}
Stx::Subst(e) =>
Ok(lit(e)),
Stx::Discard =>
Ok(quote!(#P_::Pattern::Discard)),
Ok(quote!(#P_::Pattern::DDiscard(Box::new(#P_::DDiscard)))),
Stx::Rec(l, fs) => {
let label = to_value_expr(&*l)?;
let members = compile_sequence_members(fs)?;
Ok(quote!(#P_::Pattern::Group {
type_: Box::new(#P_::GroupType::Rec { label: #label }),
entries: #MapFrom_([#(#members),*]),
}))
Ok(quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Rec {
label: #label,
fields: vec![#(#members),*],
}))))
},
Stx::Seq(stxs) => {
let members = compile_sequence_members(stxs)?;
Ok(quote!(#P_::Pattern::Group {
type_: Box::new(#P_::GroupType::Arr),
entries: #MapFrom_([#(#members),*]),
}))
Ok(quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Arr {
items: vec![#(#members),*],
}))))
}
Stx::Set(_stxs) =>
Err("Set literals not supported in patterns"),
@ -70,10 +70,9 @@ pub fn to_pattern_expr(stx: &Stx) -> Result<TokenStream2, &'static str> {
let v = to_pattern_expr(v)?;
Ok(quote!((#k, #v)))
}).collect::<Result<Vec<_>, &'static str>>()?;
Ok(quote!(#P_::Pattern::Group {
type_: Box::new(#P_::GroupType::Dict),
entries: #MapFrom_([#(#members),*])
}))
Ok(quote!(#P_::Pattern::DCompound(Box::new(#P_::DCompound::Dict {
entries: #MapFromIterator_(vec![#(#members),*])
}))))
}
}
}

View File

@ -1,6 +1,5 @@
use proc_macro2::Delimiter;
use proc_macro2::LineColumn;
use proc_macro2::Span;
use proc_macro2::TokenStream;
use syn::ExprLit;
@ -15,6 +14,7 @@ use syn::parse::Parser;
use syn::parse::ParseStream;
use syn::parse_str;
use syndicate::value::Float;
use syndicate::value::Double;
use syndicate::value::IOValue;
use syndicate::value::NestedValue;
@ -70,41 +70,24 @@ fn punct_char(c: Cursor) -> Option<(char, Cursor)> {
c.punct().map(|(p, c)| (p.as_char(), c))
}
fn start_pos(s: Span) -> LineColumn {
// We would like to write
// s.start()
// here, but until [1] is fixed (perhaps via [2]), we have to go the unsafe route
// and assume we are in procedural macro context.
// [1]: https://github.com/dtolnay/proc-macro2/issues/402
// [2]: https://github.com/dtolnay/proc-macro2/pull/407
let u = s.unwrap().start();
LineColumn { column: u.column(), line: u.line() }
}
fn end_pos(s: Span) -> LineColumn {
// See start_pos
let u = s.unwrap().end();
LineColumn { column: u.column(), line: u.line() }
}
fn parse_id(mut c: Cursor) -> Result<(String, Cursor)> {
let mut id = String::new();
let mut prev_pos = start_pos(c.span());
let mut prev_pos = c.span().start();
loop {
if c.eof() || start_pos(c.span()) != prev_pos {
if c.eof() || c.span().start() != prev_pos {
return Ok((id, c));
} else if let Some((p, next)) = c.punct() {
match p.as_char() {
'<' | '>' | '(' | ')' | '{' | '}' | '[' | ']' | ',' | ':' => return Ok((id, c)),
ch => {
id.push(ch);
prev_pos = end_pos(c.span());
prev_pos = c.span().end();
c = next;
}
}
} else if let Some((i, next)) = c.ident() {
id.push_str(&i.to_string());
prev_pos = end_pos(i.span());
prev_pos = i.span().end();
c = next;
} else {
return Ok((id, c));
@ -143,7 +126,7 @@ fn skip_commas(mut c: Cursor) -> Cursor {
}
}
fn parse_group<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
fn parse_group_inner<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
mut c: Cursor<'c>,
f: F,
after: Cursor<'c>,
@ -160,6 +143,15 @@ fn parse_group<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
}
}
fn parse_group<'c, R, F: Fn(Cursor<'c>) -> Result<(R, Cursor<'c>)>>(
d: Delimiter,
f: F,
c: Cursor<'c>,
) -> Result<(Vec<R>, Cursor<'c>)> {
let (inner, _, after) = c.group(d).unwrap();
parse_group_inner(inner, f, after)
}
fn parse_kv(c: Cursor) -> Result<((Stx, Stx), Cursor)> {
let (k, c) = parse1(c)?;
if let Some((':', c)) = punct_char(c) {
@ -170,7 +162,7 @@ fn parse_kv(c: Cursor) -> Result<((Stx, Stx), Cursor)> {
}
fn adjacent_ident(pos: LineColumn, c: Cursor) -> (Option<Ident>, Cursor) {
if start_pos(c.span()) != pos {
if c.span().start() != pos {
(None, c)
} else if let Some((id, next)) = c.ident() {
(Some(id), next)
@ -194,8 +186,8 @@ fn parse_generic<T: Parse>(mut c: Cursor) -> Option<(T, Cursor)> {
// OK, because parse2 checks for end-of-stream, let's chop
// the input at the position of the error and try again (!).
let mut collected = Vec::new();
let upto = start_pos(e.span());
while !c.eof() && start_pos(c.span()) != upto {
let upto = e.span().start();
while !c.eof() && c.span().start() != upto {
let (tt, next) = c.token_tree().unwrap();
collected.push(tt);
c = next;
@ -216,8 +208,10 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
} else {
Ok((Stx::Rec(Box::new(q.remove(0)), q), c))
}),
'{' => parse_group(Delimiter::Brace, parse_kv, c).map(|(q,c)| (Stx::Dict(q),c)),
'[' => parse_group(Delimiter::Bracket, parse1, c).map(|(q,c)| (Stx::Seq(q),c)),
'$' => {
let (maybe_id, next) = adjacent_ident(end_pos(p.span()), next);
let (maybe_id, next) = adjacent_ident(p.span().end(), next);
let (maybe_type, next) = if let Some((':', next)) = punct_char(next) {
match parse_generic::<Type>(next) {
Some((t, next)) => (Some(t), next),
@ -235,7 +229,7 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
}
'#' => {
if let Some((inner, _, next)) = next.group(Delimiter::Brace) {
parse_group(inner, parse1, next).map(|(q,c)| (Stx::Set(q),c))
parse_group_inner(inner, parse1, next).map(|(q,c)| (Stx::Set(q),c))
} else if let Some((inner, _, next)) = next.group(Delimiter::Parenthesis) {
Ok((Stx::Subst(inner.token_stream()), next))
} else if let Some((tt, next)) = next.token_tree() {
@ -265,7 +259,7 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
IOValue::new(i.base10_parse::<i128>()?)
}
Lit::Float(f) => if f.suffix() == "f32" {
IOValue::new(&Double(f.base10_parse::<f32>()? as f64))
IOValue::new(&Float(f.base10_parse::<f32>()?))
} else {
IOValue::new(&Double(f.base10_parse::<f64>()?))
}
@ -273,10 +267,6 @@ fn parse1(c: Cursor) -> Result<(Stx, Cursor)> {
Lit::Verbatim(_) => return Err(Error::new(c.span(), "Verbatim literals not supported")),
};
Ok((Stx::Atom(v), next))
} else if let Some((inner, _, after)) = c.group(Delimiter::Brace) {
parse_group(inner, parse_kv, after).map(|(q,c)| (Stx::Dict(q),c))
} else if let Some((inner, _, after)) = c.group(Delimiter::Bracket) {
parse_group(inner, parse1, after).map(|(q,c)| (Stx::Seq(q),c))
} else {
Err(Error::new(c.span(), "Unexpected input"))
}

View File

@ -50,6 +50,10 @@ pub fn value_to_value_expr(v: &IOValue) -> TokenStream2 {
match v.value() {
Value::Boolean(b) =>
quote!(#V_::Value::from(#b).wrap()),
Value::Float(f) => {
let f = f.0;
quote!(#V_::Value::from(#f).wrap())
}
Value::Double(d) => {
let d = d.0;
quote!(#V_::Value::from(#d).wrap())

View File

@ -1,15 +0,0 @@
{
"folders": [
{
"path": "."
},
{
"path": "../syndicate-protocols"
}
],
"settings": {
"files.exclude": {
"target": true
}
}
}

View File

@ -1,19 +0,0 @@
[package]
name = "syndicate-schema-plugin"
version = "0.9.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
description = "Support for using Preserves Schema with Syndicate macros."
homepage = "https://syndicate-lang.org/"
repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"
license = "Apache-2.0"
[lib]
[dependencies]
preserves-schema = "5.995"
syndicate = { path = "../syndicate", version = "0.40.0"}
[package.metadata.workspaces]
independent = true

View File

@ -1,3 +0,0 @@
mod pattern_plugin;
pub use pattern_plugin::PatternPlugin;

View File

@ -1,164 +0,0 @@
use preserves_schema::*;
use preserves_schema::compiler::*;
use preserves_schema::compiler::context::ModuleContext;
use preserves_schema::compiler::types::definition_type;
use preserves_schema::compiler::types::Purpose;
use preserves_schema::gen::schema::*;
use preserves_schema::syntax::block::escape_string;
use preserves_schema::syntax::block::constructors::*;
use std::iter::FromIterator;
use syndicate::pattern::lift_literal;
use syndicate::schemas::dataspace_patterns as P;
use syndicate::value::IOValue;
use syndicate::value::Map;
use syndicate::value::NestedValue;
#[derive(Debug)]
pub struct PatternPlugin;
type WalkState<'a, 'm, 'b> =
preserves_schema::compiler::cycles::WalkState<&'a ModuleContext<'m, 'b>>;
impl Plugin for PatternPlugin {
fn generate_definition(
&self,
ctxt: &mut ModuleContext,
definition_name: &str,
definition: &Definition,
) {
if ctxt.mode == context::ModuleContextMode::TargetGeneric {
let mut s = WalkState::new(ctxt, ctxt.module_path.clone());
if let Some(p) = definition.wc(&mut s) {
let ty = definition_type(&ctxt.module_path,
Purpose::Codegen,
definition_name,
definition);
let v = syndicate::language().unparse(&p);
let v = preserves_schema::support::preserves::value::TextWriter::encode(
&mut preserves_schema::support::preserves::value::NoEmbeddedDomainCodec,
&v).unwrap();
ctxt.define_type(item(seq![
"impl",
ty.generic_decl(ctxt),
" ",
names::render_constructor(definition_name),
ty.generic_arg(ctxt),
" ", codeblock![
seq!["#[allow(unused)] pub fn wildcard_dataspace_pattern() ",
"-> syndicate::schemas::dataspace_patterns::Pattern ",
codeblock![
"use syndicate::schemas::dataspace_patterns::*;",
"use preserves_schema::Codec;",
seq!["let _v = syndicate::value::text::from_str(",
escape_string(&v),
", syndicate::value::ViaCodec::new(syndicate::value::NoEmbeddedDomainCodec)).unwrap();"],
"syndicate::language().parse(&_v).unwrap()"]]]]));
}
}
}
}
fn discard() -> P::Pattern {
P::Pattern::Discard
}
trait WildcardPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern>;
}
impl WildcardPattern for Definition {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Definition::Or { .. } => None,
Definition::And { .. } => None,
Definition::Pattern(p) => p.wc(s),
}
}
}
impl WildcardPattern for Pattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Pattern::CompoundPattern(p) => p.wc(s),
Pattern::SimplePattern(p) => p.wc(s),
}
}
}
fn from_io(v: &IOValue) -> Option<P::_Any> {
Some(v.value().copy_via(&mut |_| Err(())).ok()?.wrap())
}
impl WildcardPattern for CompoundPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
CompoundPattern::Tuple { patterns } |
CompoundPattern::TuplePrefix { fixed: patterns, .. }=>
Some(P::Pattern::Group {
type_: Box::new(P::GroupType::Arr),
entries: patterns.iter().enumerate()
.map(|(i, p)| Some((P::_Any::new(i), unname(p).wc(s)?)))
.collect::<Option<Map<P::_Any, P::Pattern>>>()?,
}),
CompoundPattern::Dict { entries } =>
Some(P::Pattern::Group {
type_: Box::new(P::GroupType::Dict),
entries: Map::from_iter(
entries.0.iter()
.map(|(k, p)| Some((from_io(k)?, unname_simple(p).wc(s)?)))
.filter(|e| discard() != e.as_ref().unwrap().1)
.collect::<Option<Vec<(P::_Any, P::Pattern)>>>()?
.into_iter()),
}),
CompoundPattern::Rec { label, fields } => match (unname(label), unname(fields)) {
(Pattern::SimplePattern(label), Pattern::CompoundPattern(fields)) =>
match (*label, *fields) {
(SimplePattern::Lit { value }, CompoundPattern::Tuple { patterns }) =>
Some(P::Pattern::Group{
type_: Box::new(P::GroupType::Rec { label: from_io(&value)? }),
entries: patterns.iter().enumerate()
.map(|(i, p)| Some((P::_Any::new(i), unname(p).wc(s)?)))
.collect::<Option<Map<P::_Any, P::Pattern>>>()?,
}),
_ => None,
},
_ => None,
},
}
}
}
impl WildcardPattern for SimplePattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
SimplePattern::Any |
SimplePattern::Atom { .. } |
SimplePattern::Embedded { .. } |
SimplePattern::Seqof { .. } |
SimplePattern::Setof { .. } |
SimplePattern::Dictof { .. } => Some(discard()),
SimplePattern::Lit { value } => Some(lift_literal(&from_io(value)?)),
SimplePattern::Ref(r) => s.cycle_check(
r,
|ctxt, r| ctxt.bundle.lookup_definition(r).map(|v| v.0),
|s, d| d.and_then(|d| d.wc(s)).or_else(|| Some(discard())),
|| Some(discard())),
}
}
}
fn unname(np: &NamedPattern) -> Pattern {
match np {
NamedPattern::Anonymous(p) => (**p).clone(),
NamedPattern::Named(b) => Pattern::SimplePattern(Box::new(b.pattern.clone())),
}
}
fn unname_simple(np: &NamedSimplePattern) -> &SimplePattern {
match np {
NamedSimplePattern::Anonymous(p) => p,
NamedSimplePattern::Named(b) => &b.pattern,
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "syndicate-server"
version = "0.45.0"
version = "0.20.1"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
@ -9,39 +9,27 @@ homepage = "https://syndicate-lang.org/"
repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"
license = "Apache-2.0"
[features]
jemalloc = ["dep:tikv-jemallocator"]
[build-dependencies]
preserves-schema = "5.995"
syndicate = { path = "../syndicate", version = "0.40.0"}
syndicate-schema-plugin = { path = "../syndicate-schema-plugin", version = "0.9.0"}
preserves-schema = "^2"
syndicate = { path = "../syndicate", version = "^0.20.0"}
[dependencies]
preserves-schema = "5.995"
syndicate = { path = "../syndicate", version = "0.40.0"}
syndicate-macros = { path = "../syndicate-macros", version = "0.32.0"}
preserves-schema = "^2"
syndicate = { path = "../syndicate", version = "^0.20.0"}
syndicate-macros = { path = "../syndicate-macros", version = "^0.15.0"}
chrono = "0.4"
futures = "0.3"
lazy_static = "1.4"
noise-protocol = "0.1"
noise-rust-crypto = "0.5"
notify = "4.0"
structopt = "0.3"
tikv-jemallocator = { version = "0.5.0", optional = true }
tungstenite = "0.13"
tokio-tungstenite = "0.14"
tokio = { version = "1.10", features = ["io-std", "time", "process"] }
tokio-util = "0.6"
tokio-stream = "0.1"
tracing = "0.1"
tracing-subscriber = "0.2"
tracing-futures = "0.2"
hyper = { version = "0.14.27", features = ["server", "http1", "stream"] }
hyper-tungstenite = "0.11.1"
parking_lot = "0.12.1"
[package.metadata.workspaces]
independent = true

View File

@ -13,7 +13,7 @@ inotifytest:
binary: binary-release
binary-release:
cargo build --release --all-targets --features jemalloc
cargo build --release --all-targets
binary-debug:
cargo build --all-targets

View File

@ -1,32 +1,176 @@
use preserves_schema::compiler::*;
mod pattern_plugin {
use preserves_schema::*;
use preserves_schema::compiler::*;
use preserves_schema::compiler::context::ModuleContext;
use preserves_schema::gen::schema::*;
use preserves_schema::syntax::block::escape_string;
use preserves_schema::syntax::block::constructors::*;
use std::iter::FromIterator;
use syndicate::pattern::lift_literal;
use syndicate::schemas::dataspace_patterns as P;
use syndicate::value::IOValue;
use syndicate::value::Map;
use syndicate::value::NestedValue;
#[derive(Debug)]
pub struct PatternPlugin;
type WalkState<'a, 'm, 'b> =
preserves_schema::compiler::cycles::WalkState<&'a ModuleContext<'m, 'b>>;
impl Plugin for PatternPlugin {
fn generate_definition(
&self,
ctxt: &mut ModuleContext,
definition_name: &str,
definition: &Definition,
) {
if ctxt.mode == context::ModuleContextMode::TargetGeneric {
let mut s = WalkState::new(ctxt, ctxt.module_path.clone());
if let Some(p) = definition.wc(&mut s) {
let v = syndicate::language().unparse(&p);
let v = preserves_schema::support::preserves::value::TextWriter::encode(
&mut preserves_schema::support::preserves::value::NoEmbeddedDomainCodec,
&v).unwrap();
ctxt.define_type(item(seq![
"impl ", definition_name.to_owned(), " ", codeblock![
seq!["#[allow(unused)] pub fn wildcard_dataspace_pattern() ",
"-> syndicate::schemas::dataspace_patterns::Pattern ",
codeblock![
"use syndicate::schemas::dataspace_patterns::*;",
"use preserves_schema::Codec;",
seq!["let _v = syndicate::value::text::from_str(",
escape_string(&v),
", syndicate::value::ViaCodec::new(syndicate::value::NoEmbeddedDomainCodec)).unwrap();"],
"syndicate::language().parse(&_v).unwrap()"]]]]));
}
}
}
}
fn discard() -> P::Pattern {
P::Pattern::DDiscard(Box::new(P::DDiscard))
}
trait WildcardPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern>;
}
impl WildcardPattern for Definition {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Definition::Or { .. } => None,
Definition::And { .. } => None,
Definition::Pattern(p) => p.wc(s),
}
}
}
impl WildcardPattern for Pattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
Pattern::CompoundPattern(p) => p.wc(s),
Pattern::SimplePattern(p) => p.wc(s),
}
}
}
fn from_io(v: &IOValue) -> Option<P::_Any> {
Some(v.value().copy_via(&mut |_| Err(())).ok()?.wrap())
}
impl WildcardPattern for CompoundPattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
CompoundPattern::Tuple { patterns } =>
Some(P::Pattern::DCompound(Box::new(P::DCompound::Arr {
items: patterns.iter()
.map(|p| unname(p).wc(s))
.collect::<Option<Vec<P::Pattern>>>()?,
}))),
CompoundPattern::TuplePrefix { .. } =>
Some(discard()),
CompoundPattern::Dict { entries } =>
Some(P::Pattern::DCompound(Box::new(P::DCompound::Dict {
entries: Map::from_iter(
entries.0.iter()
.map(|(k, p)| Some((from_io(k)?, unname_simple(p).wc(s)?)))
.filter(|e| discard() != e.as_ref().unwrap().1)
.collect::<Option<Vec<(P::_Any, P::Pattern)>>>()?
.into_iter()),
}))),
CompoundPattern::Rec { label, fields } => match (unname(label), unname(fields)) {
(Pattern::SimplePattern(label), Pattern::CompoundPattern(fields)) =>
match (*label, *fields) {
(SimplePattern::Lit { value }, CompoundPattern::Tuple { patterns }) =>
Some(P::Pattern::DCompound(Box::new(P::DCompound::Rec {
label: from_io(&value)?,
fields: patterns.iter()
.map(|p| unname(p).wc(s))
.collect::<Option<Vec<P::Pattern>>>()?,
}))),
_ => None,
},
_ => None,
},
}
}
}
impl WildcardPattern for SimplePattern {
fn wc(&self, s: &mut WalkState) -> Option<P::Pattern> {
match self {
SimplePattern::Any |
SimplePattern::Atom { .. } |
SimplePattern::Embedded { .. } |
SimplePattern::Seqof { .. } |
SimplePattern::Setof { .. } |
SimplePattern::Dictof { .. } => Some(discard()),
SimplePattern::Lit { value } => Some(lift_literal(&from_io(value)?)),
SimplePattern::Ref(r) => s.cycle_check(
r,
|ctxt, r| ctxt.bundle.lookup_definition(r),
|s, d| d.and_then(|d| d.wc(s)).or_else(|| Some(discard())),
|| Some(discard())),
}
}
}
fn unname(np: &NamedPattern) -> Pattern {
match np {
NamedPattern::Anonymous(p) => (**p).clone(),
NamedPattern::Named(b) => Pattern::SimplePattern(Box::new(b.pattern.clone())),
}
}
fn unname_simple(np: &NamedSimplePattern) -> &SimplePattern {
match np {
NamedSimplePattern::Anonymous(p) => p,
NamedSimplePattern::Named(b) => &b.pattern,
}
}
}
fn main() -> std::io::Result<()> {
let buildroot = std::path::PathBuf::from(std::env::var_os("OUT_DIR").unwrap());
let mut gen_dir = buildroot.clone();
gen_dir.push("src/schemas");
let mut c = CompilerConfig::new("crate::schemas".to_owned());
c.plugins.push(Box::new(syndicate_schema_plugin::PatternPlugin));
let mut c = CompilerConfig::new(gen_dir, "crate::schemas".to_owned());
c.plugins.push(Box::new(pattern_plugin::PatternPlugin));
c.add_external_module(ExternalModule::new(vec!["EntityRef".to_owned()], "syndicate::actor"));
c.add_external_module(
ExternalModule::new(vec!["TransportAddress".to_owned()],
"syndicate::schemas::transport_address")
.set_fallback_language_types(
|v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect()));
c.add_external_module(
ExternalModule::new(vec!["gatekeeper".to_owned()], "syndicate::schemas::gatekeeper")
.set_fallback_language_types(
|v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect())
);
c.add_external_module(
ExternalModule::new(vec!["noise".to_owned()], "syndicate::schemas::noise")
.set_fallback_language_types(
|v| vec![format!("syndicate::schemas::Language<{}>", v)].into_iter().collect())
);
let inputs = expand_inputs(&vec!["protocols/schema-bundle.bin".to_owned()])?;
c.load_schemas_and_bundles(&inputs, &vec![])?;
c.load_xref_bin("syndicate", syndicate::schemas::_bundle())?;
compile(&c, &mut CodeCollector::files(gen_dir))
c.load_schemas_and_bundles(&inputs)?;
compile(&c)
}

View File

@ -12,20 +12,21 @@ use syndicate::value::NestedValue;
use tokio::net::TcpStream;
use core::time::Duration;
use tokio::time::interval;
#[derive(Clone, Debug, StructOpt)]
pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
syndicate::convenient_logging()?;
let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::top(None, |t| {
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split();
Actor::new(None).boot(syndicate::name!("consumer"), |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), |_state, t, ds| {
let consumer = syndicate::entity(0)
.on_message(|message_count, _t, m: AnyValue| {
@ -43,13 +44,21 @@ async fn main() -> ActorResult {
observer: Arc::clone(&consumer),
});
t.every(Duration::from_secs(1), move |t| {
consumer.message(t, &(), &AnyValue::new(true));
Ok(())
})?;
t.linked_task(syndicate::name!("tick"), async move {
let mut stats_timer = interval(Duration::from_secs(1));
loop {
stats_timer.tick().await;
let consumer = Arc::clone(&consumer);
external_event(&Arc::clone(&consumer.underlying.mailbox),
&Account::new(syndicate::name!("account")),
Box::new(move |t| t.with_entity(
&consumer.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
});
Ok(None)
})
});
Ok(())
}).await??;
Ok(())
}

View File

@ -26,14 +26,14 @@ mod dirty;
#[derive(Clone, Debug, StructOpt)]
pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = Config::from_args();
let mut stream = TcpStream::connect("127.0.0.1:9001")?;
let mut stream = TcpStream::connect("127.0.0.1:8001")?;
dirty::dirty_resolve(&mut stream, &config.dataspace)?;
let iolang = Language::<IOValue>::default();
@ -58,10 +58,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut buf = [0; 131072];
let turn_size = {
let n = stream.read(&mut buf)?;
if n == 0 {
return Ok(());
}
stream.read(&mut buf)?;
let mut src = BytesBinarySource::new(&buf);
src.packed_iovalues().demand_next(false)?;
src.index

View File

@ -25,7 +25,7 @@ pub struct Config {
#[structopt(short = "b", default_value = "0")]
bytes_padding: usize,
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
@ -40,7 +40,7 @@ fn says(who: IOValue, what: IOValue) -> IOValue {
fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = Config::from_args();
let mut stream = TcpStream::connect("127.0.0.1:9001")?;
let mut stream = TcpStream::connect("127.0.0.1:8001")?;
dirty::dirty_resolve(&mut stream, &config.dataspace)?;
let padding: IOValue = Value::ByteString(vec![0; config.bytes_padding]).wrap();

View File

@ -16,16 +16,15 @@ pub fn dirty_resolve(stream: &mut TcpStream, dataspace: &str) -> Result<(), Box<
let iolang = Language::<IOValue>::default();
let sturdyref = sturdy::SturdyRef::from_hex(dataspace)?;
let sturdyref = iolang.parse::<gatekeeper::Step<IOValue>>(
&syndicate::language().unparse(&sturdyref)
.copy_via(&mut |_| Err("no!"))?)?;
let sturdyref = iolang.parse(&syndicate::language().unparse(&sturdyref)
.copy_via(&mut |_| Err("no!"))?)?;
let resolve_turn = P::Turn(vec![
P::TurnEvent {
oid: P::Oid(0.into()),
event: P::Event::Assert(Box::new(P::Assert {
assertion: P::Assertion(iolang.unparse(&gatekeeper::Resolve::<IOValue> {
step: sturdyref,
sturdyref,
observer: iolang.unparse(&sturdy::WireRef::Mine {
oid: Box::new(sturdy::Oid(0.into())),
}),

View File

@ -1,11 +1,9 @@
use std::sync::Arc;
use std::sync::Mutex;
use std::time::SystemTime;
use structopt::StructOpt;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::language;
use syndicate::relay;
use syndicate::schemas::dataspace::Observe;
@ -16,6 +14,7 @@ use syndicate::value::Value;
use tokio::net::TcpStream;
use core::time::Duration;
use tokio::time::interval;
#[derive(Clone, Debug, StructOpt)]
pub struct PingConfig {
@ -43,7 +42,7 @@ pub struct Config {
#[structopt(subcommand)]
mode: PingPongMode,
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
@ -89,12 +88,12 @@ fn report_latencies(rtt_ns_samples: &Vec<u64>) {
}
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
syndicate::convenient_logging()?;
let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::top(None, |t| {
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split();
Actor::new(None).boot(syndicate::name!("pingpong"), |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| {
let (send_label, recv_label, report_latency_every, should_echo, bytes_padding) =
@ -111,18 +110,22 @@ async fn main() -> ActorResult {
let mut event_counter: u64 = 0;
let mut rtt_ns_samples: Vec<u64> = vec![0; report_latency_every];
let mut rtt_batch_count: usize = 0;
let current_reply = Arc::new(Mutex::new(None));
Cap::new(&t.create(
syndicate::entity(())
.on_message(move |(), t, m: AnyValue| {
let mut current_reply = None;
let self_ref = t.create_inert();
self_ref.become_entity(
syndicate::entity(Arc::clone(&self_ref))
.on_message(move |self_ref, t, m: AnyValue| {
match m.value().as_boolean() {
Some(_) => {
Some(true) => {
tracing::info!("{:?} turns, {:?} events in the last second",
turn_counter,
event_counter);
turn_counter = 0;
event_counter = 0;
}
Some(false) => {
current_reply = None;
}
None => {
event_counter += 1;
let bindings = m.value().to_sequence()?;
@ -134,13 +137,9 @@ async fn main() -> ActorResult {
timestamp.clone(),
padding.clone()));
} else {
let mut g = current_reply.lock().expect("unpoisoned");
if let None = *g {
if let None = current_reply {
turn_counter += 1;
t.pre_commit(enclose!((current_reply) move |_| {
*current_reply.lock().expect("unpoisoned") = None;
Ok(())
}));
t.message_for_myself(&self_ref, AnyValue::new(false));
let rtt_ns = now() - timestamp.value().to_u64()?;
rtt_ns_samples[rtt_batch_count] = rtt_ns;
rtt_batch_count += 1;
@ -151,16 +150,18 @@ async fn main() -> ActorResult {
rtt_batch_count = 0;
}
*g = Some(simple_record2(&send_label,
Value::from(now()).wrap(),
padding.clone()));
current_reply = Some(
simple_record2(&send_label,
Value::from(now()).wrap(),
padding.clone()));
}
ds.message(t, &(), g.as_ref().expect("some reply"));
ds.message(t, &(), current_reply.as_ref().expect("some reply"));
}
}
}
Ok(())
})))
}));
Cap::new(&self_ref)
};
ds.assert(t, language(), &Observe {
@ -171,35 +172,46 @@ async fn main() -> ActorResult {
observer: Arc::clone(&consumer),
});
t.every(Duration::from_secs(1), move |t| {
consumer.message(t, &(), &AnyValue::new(true));
Ok(())
})?;
t.linked_task(syndicate::name!("tick"), async move {
let mut stats_timer = interval(Duration::from_secs(1));
loop {
stats_timer.tick().await;
let consumer = Arc::clone(&consumer);
external_event(&Arc::clone(&consumer.underlying.mailbox),
&Account::new(syndicate::name!("account")),
Box::new(move |t| t.with_entity(
&consumer.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
});
if let PingPongMode::Ping(c) = &config.mode {
let facet = t.facet_ref();
let turn_count = c.turn_count;
let action_count = c.action_count;
let account = Arc::clone(t.account());
t.linked_task(Some(AnyValue::symbol("boot-ping")), async move {
t.linked_task(syndicate::name!("boot-ping"), async move {
let padding = AnyValue::bytestring(vec![0; bytes_padding]);
for _ in 0..turn_count {
let mut events: PendingEventQueue = vec![];
let current_rec = simple_record2(send_label,
Value::from(now()).wrap(),
padding.clone());
facet.activate(&account, None, |t| {
for _ in 0..action_count {
ds.message(t, &(), &current_rec);
}
Ok(())
});
for _ in 0..action_count {
let ds = Arc::clone(&ds);
let current_rec = current_rec.clone();
events.push(Box::new(move |t| t.with_entity(
&ds.underlying,
|t, e| e.message(t, current_rec))));
}
external_events(&ds.underlying.mailbox, &account, events)?
}
Ok(LinkedTaskTermination::KeepFacet)
});
}
Ok(None)
})
});
Ok(())
}).await??;
Ok(())
}

View File

@ -1,10 +1,10 @@
use structopt::StructOpt;
use syndicate::actor::*;
use syndicate::preserves::rec;
use syndicate::enclose;
use syndicate::relay;
use syndicate::sturdy;
use syndicate::value::NestedValue;
use syndicate::value::Value;
use tokio::net::TcpStream;
@ -16,37 +16,44 @@ pub struct Config {
#[structopt(short = "b", default_value = "0")]
bytes_padding: usize,
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
#[inline]
fn says(who: AnyValue, what: AnyValue) -> AnyValue {
let mut r = Value::simple_record("Says", 2);
r.fields_vec_mut().push(who);
r.fields_vec_mut().push(what);
r.finish().wrap()
}
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
syndicate::convenient_logging()?;
let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::top(None, |t| {
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split();
Actor::new(None).boot(syndicate::name!("producer"), |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| {
let facet = t.facet_ref();
let padding = AnyValue::new(&vec![0u8; config.bytes_padding][..]);
let padding: AnyValue = Value::ByteString(vec![0; config.bytes_padding]).wrap();
let action_count = config.action_count;
let account = Account::new(None, None);
t.linked_task(Some(AnyValue::symbol("sender")), async move {
let account = Account::new(syndicate::name!("account"));
t.linked_task(syndicate::name!("sender"), async move {
loop {
account.ensure_clear_funds().await;
facet.activate(&account, None, |t| {
for _ in 0..action_count {
ds.message(t, &(), &rec![AnyValue::symbol("Says"),
AnyValue::new("producer"),
padding.clone()]);
}
Ok(())
});
let mut events: PendingEventQueue = Vec::new();
for _ in 0..action_count {
events.push(Box::new(enclose!((ds, padding) move |t| t.with_entity(
&ds.underlying, |t, e| e.message(
t, says(Value::from("producer").wrap(), padding))))));
}
external_events(&ds.underlying.mailbox, &account, events)?;
}
});
Ok(None)
})
});
Ok(())
}).await??;
Ok(())
}

View File

@ -12,20 +12,21 @@ use syndicate::value::NestedValue;
use tokio::net::TcpStream;
use core::time::Duration;
use tokio::time::interval;
#[derive(Clone, Debug, StructOpt)]
pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
syndicate::convenient_logging()?;
let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::top(None, |t| {
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split();
Actor::new(None).boot(syndicate::name!("state-consumer"), |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), |_state, t, ds| {
let consumer = {
#[derive(Default)]
@ -64,13 +65,21 @@ async fn main() -> ActorResult {
observer: Arc::clone(&consumer),
});
t.every(Duration::from_secs(1), move |t| {
consumer.message(t, &(), &AnyValue::new(true));
Ok(())
})?;
t.linked_task(syndicate::name!("tick"), async move {
let mut stats_timer = interval(Duration::from_secs(1));
loop {
stats_timer.tick().await;
let consumer = Arc::clone(&consumer);
external_event(&Arc::clone(&consumer.underlying.mailbox),
&Account::new(syndicate::name!("account")),
Box::new(move |t| t.with_entity(
&consumer.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
});
Ok(None)
})
});
Ok(())
}).await??;
Ok(())
}

View File

@ -1,48 +1,57 @@
use std::sync::Arc;
use structopt::StructOpt;
use syndicate::actor::*;
use syndicate::preserves::rec;
use syndicate::enclose;
use syndicate::relay;
use syndicate::sturdy;
use syndicate::value::NestedValue;
use syndicate::value::Value;
use tokio::net::TcpStream;
#[derive(Clone, Debug, StructOpt)]
pub struct Config {
#[structopt(short = "d", default_value = "b4b303726566b7b3036f6964b10973796e646963617465b303736967b21069ca300c1dbfa08fba692102dd82311a8484")]
#[structopt(short = "d", default_value = "b4b303726566b10973796e646963617465b584b210a6480df5306611ddd0d3882b546e197784")]
dataspace: String,
}
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
syndicate::convenient_logging()?;
let config = Config::from_args();
let sturdyref = sturdy::SturdyRef::from_hex(&config.dataspace)?;
let (i, o) = TcpStream::connect("127.0.0.1:9001").await?.into_split();
Actor::top(None, |t| {
let (i, o) = TcpStream::connect("127.0.0.1:8001").await?.into_split();
Actor::new(None).boot(syndicate::name!("state-producer"), |t| {
relay::connect_stream(t, i, o, false, sturdyref, (), move |_state, t, ds| {
let facet = t.facet_ref();
let account = Account::new(None, None);
t.linked_task(Some(AnyValue::symbol("sender")), async move {
let presence = rec![AnyValue::symbol("Present"), AnyValue::new(std::process::id())];
let account = Account::new(syndicate::name!("account"));
t.linked_task(syndicate::name!("sender"), async move {
let presence: AnyValue = Value::simple_record1(
"Present",
Value::from(std::process::id()).wrap()).wrap();
let handle = syndicate::actor::next_handle();
let assert_e = || {
external_event(
&Arc::clone(&ds.underlying.mailbox), &account, Box::new(enclose!(
(ds, presence, handle) move |t| t.with_entity(
&ds.underlying, |t, e| e.assert(t, presence, handle)))))
};
let retract_e = || {
external_event(
&Arc::clone(&ds.underlying.mailbox), &account, Box::new(enclose!(
(ds, handle) move |t| t.with_entity(
&ds.underlying, |t, e| e.retract(t, handle)))))
};
assert_e()?;
loop {
let mut handle = None;
facet.activate(&account, None, |t| {
handle = ds.assert(t, &(), &presence);
Ok(())
});
account.ensure_clear_funds().await;
facet.activate(&account, None, |t| {
if let Some(h) = handle {
t.retract(h);
}
Ok(())
});
retract_e()?;
assert_e()?;
}
});
Ok(None)
})
});
Ok(())
}).await??;
Ok(())
}

View File

@ -1,17 +1,10 @@
´³bundle·µ³control„´³schema·³version°³ definitions·³
ExitServer´³rec´³lit³exit„´³tupleµ´³named³code´³atom³ SignedInteger„„„„„„³ embeddedType€„„µ³ documentation„´³schema·³version°³ definitions·³Url´³orµµ±present´³dict·³url´³named³url´³atom³String„„„„„µ±invalid´³dict·³url´³named³url³any„„„„µ±absent´³dict·„„„„„³IOList´³orµµ±bytes´³atom³
ByteString„„µ±string´³atom³String„„µ±nested´³seqof´³refµ„³IOList„„„„„³Metadata´³rec´³lit³metadata„´³tupleµ´³named³object³any„´³named³info´³dictof´³atom³Symbol„³any„„„„„³ Description´³orµµ±present´³dict·³ description´³named³ description´³refµ„³IOList„„„„„µ±invalid´³dict·³ description´³named³ description³any„„„„µ±absent´³dict·„„„„„„³ embeddedType€„„µ³externalServices„´³schema·³version°³ definitions·³Process´³orµµ±simple´³refµ„³ CommandLine„„µ±full´³refµ„³ FullProcess„„„„³Service´³refµ„³ DaemonService„³ClearEnv´³orµµ±present´³dict·³clearEnv´³named³clearEnv´³atom³Boolean„„„„„µ±invalid´³dict·³clearEnv´³named³clearEnv³any„„„„µ±absent´³dict·„„„„„³EnvValue´³orµµ±set´³atom³String„„µ±remove´³lit€„„µ±invalid³any„„„³Protocol´³orµµ±none´³lit³none„„µ±binarySyndicate´³lit³application/syndicate„„µ± textSyndicate´³lit³text/syndicate„„„„³
´³bundle·µ³ documentation„´³schema·³version³ definitions·³Url´³orµµ±present´³dict·³url´³named³url´³atom³String„„„„„µ±invalid´³dict·³url´³named³url³any„„„„µ±absent´³dict·„„„„„³IOList´³orµµ±bytes´³atom³
ByteString„„µ±string´³atom³String„„µ±nested´³seqof´³refµ„³IOList„„„„„³Metadata´³rec´³lit³metadata„´³tupleµ´³named³object³any„´³named³info´³dictof´³atom³Symbol„³any„„„„„³ Description´³orµµ±present´³dict·³ description´³named³ description´³refµ„³IOList„„„„„µ±invalid´³dict·³ description´³named³ description³any„„„„µ±absent´³dict·„„„„„„³ embeddedType€„„µ³externalServices„´³schema·³version³ definitions·³Process´³orµµ±simple´³refµ„³ CommandLine„„µ±full´³refµ„³ FullProcess„„„„³Service´³refµ„³ DaemonService„³ClearEnv´³orµµ±present´³dict·³clearEnv´³named³clearEnv´³atom³Boolean„„„„„µ±invalid´³dict·³clearEnv´³named³clearEnv³any„„„„µ±absent´³dict·„„„„„³EnvValue´³orµµ±set´³atom³String„„µ±remove´³lit€„„µ±invalid³any„„„³Protocol´³orµµ±none´³lit³none„„µ±binarySyndicate´³lit³application/syndicate„„µ± textSyndicate´³lit³text/syndicate„„„„³
ProcessDir´³orµµ±present´³dict·³dir´³named³dir´³atom³String„„„„„µ±invalid´³dict·³dir´³named³dir³any„„„„µ±absent´³dict·„„„„„³
ProcessEnv´³orµµ±present´³dict·³env´³named³env´³dictof´³refµ„³ EnvVariable„´³refµ„³EnvValue„„„„„„µ±invalid´³dict·³env´³named³env³any„„„„µ±absent´³dict·„„„„„³ CommandLine´³orµµ±shell´³atom³String„„µ±full´³refµ„³FullCommandLine„„„„³ EnvVariable´³orµµ±string´³atom³String„„µ±symbol´³atom³Symbol„„µ±invalid³any„„„³ FullProcess´³andµ´³dict·³argv´³named³argv´³refµ„³ CommandLine„„„„´³named³env´³refµ„³
ProcessEnv„„´³named³dir´³refµ„³
ProcessDir„„´³named³clearEnv´³refµ„³ClearEnv„„„„³ ReadyOnStart´³orµµ±present´³dict·³ readyOnStart´³named³ readyOnStart´³atom³Boolean„„„„„µ±invalid´³dict·³ readyOnStart´³named³ readyOnStart³any„„„„µ±absent´³dict·„„„„„³ RestartField´³orµµ±present´³dict·³restart´³named³restart´³refµ„³ RestartPolicy„„„„„µ±invalid´³dict·³restart´³named³restart³any„„„„µ±absent´³dict·„„„„„³ DaemonProcess´³rec´³lit³daemon„´³tupleµ´³named³id³any„´³named³config´³refµ„³DaemonProcessSpec„„„„„³ DaemonService´³rec´³lit³daemon„´³tupleµ´³named³id³any„„„„³ ProtocolField´³orµµ±present´³dict·³protocol´³named³protocol´³refµ„³Protocol„„„„„µ±invalid´³dict·³protocol´³named³protocol³any„„„„µ±absent´³dict·„„„„„³ RestartPolicy´³orµµ±always´³lit³always„„µ±onError´³lit³on-error„„µ±all´³lit³all„„µ±never´³lit³never„„„„³FullCommandLine´³ tuplePrefixµ´³named³program´³atom³String„„„´³named³args´³seqof´³atom³String„„„„³DaemonProcessSpec´³orµµ±simple´³refµ„³ CommandLine„„µ±oneShot´³rec´³lit³one-shot„´³tupleµ´³named³setup´³refµ„³ CommandLine„„„„„„µ±full´³refµ„³FullDaemonProcess„„„„³FullDaemonProcess´³andµ´³named³process´³refµ„³ FullProcess„„´³named³ readyOnStart´³refµ„³ ReadyOnStart„„´³named³restart´³refµ„³ RestartField„„´³named³protocol´³refµ„³ ProtocolField„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³internalServices„´³schema·³version°³ definitions·³ ConfigEnv´³dictof´³atom³Symbol„³any„³
Gatekeeper´³rec´³lit³
gatekeeper„´³tupleµ´³named³ bindspace´³embedded´³refµ³
gatekeeper„³Bind„„„„„„³
HttpRouter´³rec´³lit³ http-router„´³tupleµ´³named³httpd´³embedded³any„„„„„³ TcpWithHttp´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Tcp„„´³named³
ProcessDir„„´³named³clearEnv´³refµ„³ClearEnv„„„„³ ReadyOnStart´³orµµ±present´³dict·³ readyOnStart´³named³ readyOnStart´³atom³Boolean„„„„„µ±invalid´³dict·³ readyOnStart´³named³ readyOnStart³any„„„„µ±absent´³dict·„„„„„³ RestartField´³orµµ±present´³dict·³restart´³named³restart´³refµ„³ RestartPolicy„„„„„µ±invalid´³dict·³restart´³named³restart³any„„„„µ±absent´³dict·„„„„„³ DaemonProcess´³rec´³lit³daemon„´³tupleµ´³named³id³any„´³named³config´³refµ„³DaemonProcessSpec„„„„„³ DaemonService´³rec´³lit³daemon„´³tupleµ´³named³id³any„„„„³ ProtocolField´³orµµ±present´³dict·³protocol´³named³protocol´³refµ„³Protocol„„„„„µ±invalid´³dict·³protocol´³named³protocol³any„„„„µ±absent´³dict·„„„„„³ RestartPolicy´³orµµ±always´³lit³always„„µ±onError´³lit³on-error„„µ±all´³lit³all„„„„³FullCommandLine´³ tuplePrefixµ´³named³program´³atom³String„„„´³named³args´³seqof´³atom³String„„„„³DaemonProcessSpec´³orµµ±simple´³refµ„³ CommandLine„„µ±oneShot´³rec´³lit³one-shot„´³tupleµ´³named³setup´³refµ„³ CommandLine„„„„„„µ±full´³refµ„³FullDaemonProcess„„„„³FullDaemonProcess´³andµ´³named³process´³refµ„³ FullProcess„„´³named³ readyOnStart´³refµ„³ ReadyOnStart„„´³named³restart´³refµ„³ RestartField„„´³named³protocol´³refµ„³ ProtocolField„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³internalServices„´³schema·³version³ definitions·³ ConfigEnv´³dictof´³atom³Symbol„³any„³ Milestone´³rec´³lit³ milestone„´³tupleµ´³named³name³any„„„„³ DebtReporter´³rec´³lit³ debt-reporter„´³tupleµ´³named³intervalSeconds´³atom³Double„„„„„³ ConfigWatcher´³rec´³lit³config-watcher„´³tupleµ´³named³path´³atom³String„„´³named³env´³refµ„³ ConfigEnv„„„„„³TcpRelayListener´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Tcp„„´³named³
gatekeeper´³embedded´³refµ³
gatekeeper„³Resolve„„„´³named³httpd´³embedded´³refµ³http„³ HttpContext„„„„„„³ DebtReporter´³rec´³lit³ debt-reporter„´³tupleµ´³named³intervalSeconds´³atom³Double„„„„„³ ConfigWatcher´³rec´³lit³config-watcher„´³tupleµ´³named³path´³atom³String„„´³named³env´³refµ„³ ConfigEnv„„„„„³TcpWithoutHttp´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Tcp„„´³named³
gatekeeper„³Resolve„„„„„„³UnixRelayListener´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Unix„„´³named³
gatekeeper´³embedded´³refµ³
gatekeeper„³Resolve„„„„„„³TcpRelayListener´³orµµ±TcpWithoutHttp´³refµ„³TcpWithoutHttp„„µ± TcpWithHttp´³refµ„³ TcpWithHttp„„„„³UnixRelayListener´³rec´³lit³relay-listener„´³tupleµ´³named³addr´³refµ³TransportAddress„³Unix„„´³named³
gatekeeper´³embedded´³refµ³
gatekeeper„³Resolve„„„„„„³HttpStaticFileServer´³rec´³lit³http-static-files„´³tupleµ´³named³dir´³atom³String„„´³named³pathPrefixElements´³atom³ SignedInteger„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„
gatekeeper„³Resolve„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„

View File

@ -1,12 +0,0 @@
version 1 .
# Messages and assertions relating to the `$control` entity enabled in syndicate-server when
# the `--control` flag is supplied.
#
# For example, placing the following into `control-config.pr` and starting the server with
# `syndicate-server --control -c control-config.pr` will result in the server exiting with
# exit code 2:
#
# $control ! <exit 2>
ExitServer = <exit @code int> .

View File

@ -1,11 +1,11 @@
version 1 .
# Assertion. Describes `object`.
; Assertion. Describes `object`.
Metadata = <metadata @object any @info { symbol: any ...:... }> .
# Projections of the `info` in a `Metadata` record.
; Projections of the `info` in a `Metadata` record.
Description = @present { description: IOList } / @invalid { description: any } / @absent {} .
Url = @present { url: string } / @invalid { url: any } / @absent {} .
# Data type. From preserves' `conventions.md`.
; Data type. From preserves' `conventions.md`.
IOList = @bytes bytes / @string string / @nested [IOList ...] .

View File

@ -30,26 +30,23 @@ EnvVariable = @string string / @symbol symbol / @invalid any .
EnvValue = @set string / @remove #f / @invalid any .
RestartPolicy =
/ # Whether the process terminates normally or abnormally, restart it
# without affecting any peer processes within the service.
/ ; Whether the process terminates normally or abnormally, restart it
; without affecting any peer processes within the service.
=always
/ # If the process terminates normally, leave everything alone; if it
# terminates abnormally, restart it without affecting peers.
/ ; If the process terminates normally, leave everything alone; if it
; terminates abnormally, restart it without affecting peers.
@onError =on-error
/ # If the process terminates normally, leave everything alone; if it
# terminates abnormally, restart the whole daemon (all processes
# within the daemon).
/ ; If the process terminates normally, leave everything alone; if it
; terminates abnormally, restart the whole daemon (all processes
; within the daemon).
=all
/ # Treat both normal and abnormal termination as normal termination; that is, never restart,
# and enter state "complete" even if the process fails.
=never
.
Protocol =
/ # stdin is /dev/null, output and error are logged
/ ; stdin is /dev/null, output and error are logged
=none
/ # stdin and stdout are *binary* Syndicate-protocol channels
/ ; stdin and stdout are *binary* Syndicate-protocol channels
@binarySyndicate =application/syndicate
/ # stdin and stdout are *text* Syndicate-protocol channels
/ ; stdin and stdout are *text* Syndicate-protocol channels
@textSyndicate =text/syndicate
.

View File

@ -1,18 +1,11 @@
version 1 .
embeddedType EntityRef.Cap .
Gatekeeper = <gatekeeper @bindspace #:gatekeeper.Bind> .
DebtReporter = <debt-reporter @intervalSeconds double>.
TcpRelayListener = TcpWithoutHttp / TcpWithHttp .
TcpWithoutHttp = <relay-listener @addr TransportAddress.Tcp @gatekeeper #:gatekeeper.Resolve> .
TcpWithHttp = <relay-listener @addr TransportAddress.Tcp @gatekeeper #:gatekeeper.Resolve @httpd #:http.HttpContext> .
UnixRelayListener = <relay-listener @addr TransportAddress.Unix @gatekeeper #:gatekeeper.Resolve> .
TcpRelayListener = <relay-listener @addr TransportAddress.Tcp @gatekeeper #!gatekeeper.Resolve> .
UnixRelayListener = <relay-listener @addr TransportAddress.Unix @gatekeeper #!gatekeeper.Resolve> .
ConfigWatcher = <config-watcher @path string @env ConfigEnv>.
Milestone = <milestone @name any>.
ConfigEnv = { symbol: any ...:... }.
HttpRouter = <http-router @httpd #:any> .
HttpStaticFileServer = <http-static-files @dir string @pathPrefixElements int> .

View File

@ -3,21 +3,23 @@ use preserves_schema::Codec;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::during::entity;
use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::schemas::dataspace::Observe;
use syndicate::schemas::service;
use syndicate::value::NestedValue;
use crate::counter;
use crate::language::language;
use crate::schemas::internal_services;
use syndicate_macros::during;
pub fn boot(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("dependencies_listener")), move |t| {
t.spawn(syndicate::name!("dependencies"), move |t| {
Ok(during!(t, ds, language(), <require-service $spec>, |t: &mut Activation| {
tracing::debug!(?spec, "tracking dependencies");
t.spawn_link(Some(rec![AnyValue::symbol("dependencies"), language().unparse(&spec)]),
t.spawn_link(syndicate::name!(parent: None, "dependencies", spec = ?spec),
enclose!((ds) |t| run(t, ds, spec)));
Ok(())
}))
@ -25,9 +27,34 @@ pub fn boot(t: &mut Activation, ds: Arc<Cap>) {
}
fn run(t: &mut Activation, ds: Arc<Cap>, service_name: AnyValue) -> ActorResult {
if !service_name.value().is_simple_record("milestone", Some(1)) {
let system_layer_dep = service::ServiceDependency {
depender: service_name.clone(),
dependee: service::ServiceState {
service_name: language().unparse(&internal_services::Milestone {
name: AnyValue::symbol("system-layer"),
}),
state: service::State::Ready,
},
};
let milestone_monitor = entity(ds.assert(t, language(), &system_layer_dep))
.on_asserted(enclose!((ds) move |handle, t, _captures: AnyValue| {
ds.update::<_, service::ServiceDependency>(t, handle, language(), None);
Ok(Some(Box::new(enclose!((ds, system_layer_dep) move |handle, t| {
ds.update(t, handle, language(), Some(&system_layer_dep));
Ok(())
}))))
}))
.create_cap(t);
ds.assert(t, language(), &Observe {
pattern: syndicate_macros::pattern!{<system-layer-service #(&service_name)>},
observer: milestone_monitor,
});
}
let obstacle_count = t.named_field("obstacle_count", 1isize);
t.dataflow(enclose!((service_name, obstacle_count) move |t| {
tracing::trace!(?service_name, obstacle_count = ?t.get(&obstacle_count));
t.dataflow(enclose!((obstacle_count) move |t| {
tracing::trace!(obstacle_count = ?t.get(&obstacle_count));
Ok(())
}))?;
@ -46,25 +73,24 @@ fn run(t: &mut Activation, ds: Arc<Cap>, service_name: AnyValue) -> ActorResult
})
})?;
let depender = service_name.clone();
enclose!((ds, obstacle_count) during!(
t, ds, language(), <depends-on #(&depender) $dependee>,
enclose!((service_name, ds, obstacle_count) move |t: &mut Activation| {
t, ds, language(), <depends-on #(&service_name) $dependee>,
enclose!((ds, obstacle_count) move |t: &mut Activation| {
if let Ok(dependee) = language().parse::<service::ServiceState>(&dependee) {
tracing::trace!(?service_name, ?dependee, "new dependency");
tracing::trace!(on = ?dependee, "new dependency");
ds.assert(t, language(), &service::RequireService {
service_name: dependee.service_name,
});
} else {
tracing::warn!(?service_name, ?dependee, "cannot deduce dependee service name");
tracing::warn!(on = ?dependee, "cannot deduce dependee service name");
}
counter::adjust(t, &obstacle_count, 1);
let d = &dependee.clone();
during!(t, ds, language(), #d, enclose!(
(service_name, obstacle_count, dependee) move |t: &mut Activation| {
tracing::trace!(?service_name, ?dependee, "dependency satisfied");
(obstacle_count, dependee) move |t: &mut Activation| {
tracing::trace!(on = ?dependee, "dependency satisfied");
counter::adjust(t, &obstacle_count, -1);
Ok(())
}));

View File

@ -1,179 +1,37 @@
use noise_protocol::CipherState;
use noise_protocol::U8Array;
use noise_protocol::patterns::HandshakePattern;
use noise_rust_crypto::Blake2s;
use noise_rust_crypto::ChaCha20Poly1305;
use noise_rust_crypto::X25519;
use preserves_schema::Codec;
use syndicate::relay::Mutex;
use syndicate::relay::TunnelRelay;
use syndicate::trace::TurnCause;
use syndicate::value::NoEmbeddedDomainCodec;
use syndicate::value::packed::PackedWriter;
use std::convert::TryInto;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::value::NestedValue;
use syndicate::schemas::dataspace;
use syndicate::during::DuringResult;
use syndicate::schemas::gatekeeper;
use syndicate::schemas::noise;
use syndicate::schemas::sturdy;
use syndicate::value::NestedValue;
use crate::language::language;
use syndicate_macros::during;
use syndicate_macros::pattern;
// pub fn bind(
// t: &mut Activation,
// ds: &Arc<Cap>,
// oid: syndicate::schemas::sturdy::_Any,
// key: [u8; 16],
// target: Arc<Cap>,
// ) {
// let sr = sturdy::SturdyRef::mint(oid.clone(), &key);
// tracing::info!(cap = ?language().unparse(&sr), hex = %sr.to_hex());
// ds.assert(t, language(), &gatekeeper::Bind { oid, key: key.to_vec(), target });
// }
fn sturdy_step_type() -> String {
language().unparse(&sturdy::SturdyStepType).value().to_symbol().unwrap().clone()
}
fn noise_step_type() -> String {
language().unparse(&noise::NoiseStepType).value().to_symbol().unwrap().clone()
}
pub fn handle_binds(t: &mut Activation, ds: &Arc<Cap>) -> ActorResult {
during!(t, ds, language(), <bind <ref $desc> $target $observer>, |t: &mut Activation| {
t.spawn_link(None, move |t| {
target.value().to_embedded()?;
let observer = language().parse::<gatekeeper::BindObserver>(&observer)?;
let desc = language().parse::<sturdy::SturdyDescriptionDetail>(&desc)?;
let sr = sturdy::SturdyRef::mint(desc.oid, &desc.key);
if let gatekeeper::BindObserver::Present(o) = observer {
o.assert(t, language(), &gatekeeper::Bound::Bound {
path_step: Box::new(gatekeeper::PathStep {
step_type: sturdy_step_type(),
detail: language().unparse(&sr.parameters),
}),
});
}
Ok(())
});
Ok(())
});
during!(t, ds, language(), <bind <noise $desc> $target $observer>, |t: &mut Activation| {
t.spawn_link(None, move |t| {
target.value().to_embedded()?;
let observer = language().parse::<gatekeeper::BindObserver>(&observer)?;
let spec = language().parse::<noise::NoiseDescriptionDetail<AnyValue>>(&desc)?.0;
match validate_noise_spec(spec) {
Ok(spec) => if let gatekeeper::BindObserver::Present(o) = observer {
o.assert(t, language(), &gatekeeper::Bound::Bound {
path_step: Box::new(gatekeeper::PathStep {
step_type: noise_step_type(),
detail: language().unparse(&noise::NoisePathStepDetail(noise::NoiseSpec {
key: spec.public_key,
service: noise::ServiceSelector(spec.service),
protocol: if spec.protocol == default_noise_protocol() {
noise::NoiseProtocol::Absent
} else {
noise::NoiseProtocol::Present {
protocol: spec.protocol,
}
},
pre_shared_keys: if spec.psks.is_empty() {
noise::NoisePreSharedKeys::Absent
} else {
noise::NoisePreSharedKeys::Present {
pre_shared_keys: spec.psks,
}
},
})),
}),
});
},
Err(e) => {
if let gatekeeper::BindObserver::Present(o) = observer {
o.assert(t, language(), &gatekeeper::Bound::Rejected(
Box::new(gatekeeper::Rejected {
detail: AnyValue::new(format!("{}", &e)),
})));
}
tracing::error!("Invalid noise bind description: {}", e);
}
}
Ok(())
});
Ok(())
});
Ok(())
}
pub fn facet_handle_resolve(
pub fn handle_resolve(
ds: &mut Arc<Cap>,
t: &mut Activation,
a: gatekeeper::Resolve,
) -> ActorResult {
let mut detail: &'static str = "unsupported";
) -> DuringResult<Arc<Cap>> {
use syndicate::schemas::dataspace;
if a.step.step_type == sturdy_step_type() {
detail = "invalid";
if let Ok(s) = language().parse::<sturdy::SturdyStepDetail>(&a.step.detail) {
t.facet(|t| {
let f = handle_direct_resolution(ds, t, a.clone())?;
await_bind_sturdyref(ds, t, sturdy::SturdyRef { parameters: s.0 }, a.observer, f)
})?;
return Ok(());
}
}
if a.step.step_type == noise_step_type() {
detail = "invalid";
if let Ok(s) = language().parse::<noise::NoiseStepDetail<AnyValue>>(&a.step.detail) {
t.facet(|t| {
let f = handle_direct_resolution(ds, t, a.clone())?;
await_bind_noise(ds, t, s.0.0, a.observer, f)
})?;
return Ok(());
}
}
a.observer.assert(t, language(), &gatekeeper::Rejected {
detail: AnyValue::symbol(detail),
});
Ok(())
}
fn handle_direct_resolution(
ds: &mut Arc<Cap>,
t: &mut Activation,
a: gatekeeper::Resolve,
) -> Result<FacetId, ActorError> {
let outer_facet = t.facet_id();
t.facet(move |t| {
let handler = syndicate::entity(a.observer)
.on_asserted(move |observer, t, a: AnyValue| {
t.stop_facet_and_continue(outer_facet, Some(
enclose!((observer, a) move |t: &mut Activation| {
observer.assert(t, language(), &a);
Ok(())
})))?;
Ok(None)
})
.create_cap(t);
ds.assert(t, language(), &gatekeeper::Resolve {
step: a.step.clone(),
observer: handler,
});
Ok(())
})
}
fn await_bind_sturdyref(
ds: &mut Arc<Cap>,
t: &mut Activation,
sturdyref: sturdy::SturdyRef,
observer: Arc<Cap>,
direct_resolution_facet: FacetId,
) -> ActorResult {
let queried_oid = sturdyref.parameters.oid.clone();
let gatekeeper::Resolve { sturdyref, observer } = a;
let queried_oid = sturdyref.oid.clone();
let handler = syndicate::entity(observer)
.on_asserted(move |observer, t, a: AnyValue| {
t.stop_facet(direct_resolution_facet);
let bindings = a.value().to_sequence()?;
let key = bindings[0].value().to_bytestring()?;
let unattenuated_target = bindings[1].value().to_embedded()?;
@ -181,320 +39,28 @@ fn await_bind_sturdyref(
Err(e) => {
tracing::warn!(sturdyref = ?language().unparse(&sturdyref),
"sturdyref failed validation: {}", e);
observer.assert(t, language(), &gatekeeper::Resolved::Rejected(
Box::new(gatekeeper::Rejected {
detail: AnyValue::symbol("sturdyref-failed-validation"),
})));
Ok(None)
},
Ok(target) => {
tracing::trace!(sturdyref = ?language().unparse(&sturdyref),
?target,
"sturdyref resolved");
observer.assert(t, language(), &gatekeeper::Resolved::Accepted {
responder_session: target,
});
}
}
Ok(None)
})
.create_cap(t);
ds.assert(t, language(), &dataspace::Observe {
// TODO: codegen plugin to generate pattern constructors
pattern: pattern!{<bind <ref { oid: #(&queried_oid), key: $ }> $ _>},
observer: handler,
});
Ok(())
}
struct ValidatedNoiseSpec {
service: AnyValue,
protocol: String,
pattern: HandshakePattern,
psks: Vec<Vec<u8>>,
secret_key: Option<Vec<u8>>,
public_key: Vec<u8>,
}
fn default_noise_protocol() -> String {
language().unparse(&noise::DefaultProtocol).value().to_string().unwrap().clone()
}
fn validate_noise_spec(
spec: noise::NoiseServiceSpec<AnyValue>,
) -> Result<ValidatedNoiseSpec, ActorError> {
let protocol = match spec.base.protocol {
noise::NoiseProtocol::Present { protocol } => protocol,
noise::NoiseProtocol::Invalid { protocol } =>
Err(format!("Invalid noise protocol {:?}", protocol))?,
noise::NoiseProtocol::Absent => default_noise_protocol(),
};
const PREFIX: &'static str = "Noise_";
const SUFFIX: &'static str = "_25519_ChaChaPoly_BLAKE2s";
if !protocol.starts_with(PREFIX) || !protocol.ends_with(SUFFIX) {
Err(format!("Unsupported protocol {:?}", protocol))?;
}
let pattern_name = &protocol[PREFIX.len()..(protocol.len()-SUFFIX.len())];
let pattern = lookup_pattern(pattern_name).ok_or_else::<ActorError, _>(
|| format!("Unsupported handshake pattern {:?}", pattern_name).into())?;
let psks = match spec.base.pre_shared_keys {
noise::NoisePreSharedKeys::Present { pre_shared_keys } => pre_shared_keys,
noise::NoisePreSharedKeys::Invalid { pre_shared_keys } =>
Err(format!("Invalid pre-shared-keys {:?}", pre_shared_keys))?,
noise::NoisePreSharedKeys::Absent => vec![],
};
let secret_key = match spec.secret_key {
noise::SecretKeyField::Present { secret_key } => Some(secret_key),
noise::SecretKeyField::Invalid { secret_key } =>
Err(format!("Invalid secret key {:?}", secret_key))?,
noise::SecretKeyField::Absent => None,
};
Ok(ValidatedNoiseSpec {
service: spec.base.service.0,
protocol,
pattern,
psks,
secret_key,
public_key: spec.base.key,
})
}
fn await_bind_noise(
ds: &mut Arc<Cap>,
t: &mut Activation,
service_selector: AnyValue,
observer: Arc<Cap>,
direct_resolution_facet: FacetId,
) -> ActorResult {
let handler = syndicate::entity(())
.on_asserted_facet(move |_state, t, a: AnyValue| {
t.stop_facet(direct_resolution_facet);
let observer = Arc::clone(&observer);
t.spawn_link(None, move |t| {
let bindings = a.value().to_sequence()?;
let spec = validate_noise_spec(language().parse(&bindings[0])?)?;
let service = bindings[1].value().to_embedded()?;
run_noise_responder(t, spec, observer, Arc::clone(service))
});
Ok(())
})
.create_cap(t);
ds.assert(t, language(), &dataspace::Observe {
// TODO: codegen plugin to generate pattern constructors
pattern: pattern!{
<bind <noise $spec:NoiseServiceSpec{ { service: #(&service_selector) } }> $service _>
},
observer: handler,
});
Ok(())
}
type HandshakeState = noise_protocol::HandshakeState<X25519, ChaCha20Poly1305, Blake2s>;
enum ResponderState {
Invalid, // used during state transitions
Introduction {
service: Arc<Cap>,
hs: HandshakeState,
},
Handshake {
initiator_session: Arc<Cap>,
service: Arc<Cap>,
hs: HandshakeState,
},
Transport {
relay_input: Arc<Mutex<Option<TunnelRelay>>>,
c_recv: CipherState<ChaCha20Poly1305>,
},
}
impl Entity<noise::SessionItem> for ResponderState {
fn assert(&mut self, _t: &mut Activation, item: noise::SessionItem, _handle: Handle) -> ActorResult {
let initiator_session = match item {
noise::SessionItem::Initiator(i_box) => i_box.initiator_session,
noise::SessionItem::Packet(_) => Err("Unexpected Packet assertion")?,
};
match std::mem::replace(self, ResponderState::Invalid) {
ResponderState::Introduction { service, hs } => {
*self = ResponderState::Handshake { initiator_session, service, hs };
Ok(())
}
_ =>
Err("Received second Initiator")?,
}
}
fn message(&mut self, t: &mut Activation, item: noise::SessionItem) -> ActorResult {
let p = match item {
noise::SessionItem::Initiator(_) => Err("Unexpected Initiator message")?,
noise::SessionItem::Packet(p_box) => *p_box,
};
match self {
ResponderState::Invalid | ResponderState::Introduction { .. } =>
Err("Received Packet in invalid ResponderState")?,
ResponderState::Handshake { initiator_session, service, hs } => match p {
noise::Packet::Complete(bs) => {
if bs.len() < hs.get_next_message_overhead() {
Err("Invalid handshake message for pattern")?;
}
if bs.len() > hs.get_next_message_overhead() {
Err("Cannot accept payload during handshake")?;
}
hs.read_message(&bs, &mut [])?;
let mut reply = vec![0u8; hs.get_next_message_overhead()];
hs.write_message(&[], &mut reply[..])?;
initiator_session.message(t, language(), &noise::Packet::Complete(reply.into()));
if hs.completed() {
let (c_recv, mut c_send) = hs.get_ciphers();
let (_, relay_input, mut relay_output) =
TunnelRelay::_run(t, Some(Arc::clone(service)), None, false);
let trace_collector = t.trace_collector();
let initiator_session = Arc::clone(initiator_session);
let relay_output_name = Some(AnyValue::symbol("relay_output"));
let transport_facet = t.facet_ref();
t.linked_task(relay_output_name.clone(), async move {
let account = Account::new(relay_output_name, trace_collector);
let cause = TurnCause::external("relay_output");
loop {
match relay_output.recv().await {
None => return Ok(LinkedTaskTermination::KeepFacet),
Some(loaned_item) => {
const MAXSIZE: usize = 65535 - 16; /* Noise tag length is 16 */
let p = if loaned_item.item.len() > MAXSIZE {
noise::Packet::Fragmented(
loaned_item.item
.chunks(MAXSIZE)
.map(|c| c_send.encrypt_vec(c))
.collect())
} else {
noise::Packet::Complete(c_send.encrypt_vec(&loaned_item.item))
};
if !transport_facet.activate(&account, Some(cause.clone()), |t| {
initiator_session.message(t, language(), &p);
Ok(())
}) {
break;
}
}
}
}
Ok(LinkedTaskTermination::Normal)
});
*self = ResponderState::Transport { relay_input, c_recv };
if let Some(h) = observer.assert(t, &(), &AnyValue::domain(target)) {
Ok(Some(Box::new(move |_observer, t| Ok(t.retract(h)))))
} else {
Ok(None)
}
}
_ => Err("Fragmented handshake is not allowed")?,
},
ResponderState::Transport { relay_input, c_recv } => {
let bs = match p {
noise::Packet::Complete(bs) =>
c_recv.decrypt_vec(&bs[..]).map_err(|_| "Cannot decrypt packet")?,
noise::Packet::Fragmented(pieces) => {
let mut result = Vec::with_capacity(1024);
for piece in pieces {
result.extend(c_recv.decrypt_vec(&piece[..])
.map_err(|_| "Cannot decrypt packet fragment")?);
}
result
}
};
let mut g = relay_input.lock();
let tr = g.as_mut().expect("initialized");
tr.handle_inbound_datagram(t, &bs[..])?;
}
}
Ok(())
})
.create_cap(t);
if let Some(oh) = ds.assert(t, language(), &dataspace::Observe {
// TODO: codegen plugin to generate pattern constructors
pattern: syndicate_macros::pattern!{<bind #(&queried_oid) $ $>},
observer: handler,
}) {
Ok(Some(Box::new(move |_ds, t| Ok(t.retract(oh)))))
} else {
Ok(None)
}
}
fn lookup_pattern(name: &str) -> Option<HandshakePattern> {
use noise_protocol::patterns::*;
Some(match name {
"N" => noise_n(),
"K" => noise_k(),
"X" => noise_x(),
"NN" => noise_nn(),
"NK" => noise_nk(),
"NX" => noise_nx(),
"XN" => noise_xn(),
"XK" => noise_xk(),
"XX" => noise_xx(),
"KN" => noise_kn(),
"KK" => noise_kk(),
"KX" => noise_kx(),
"IN" => noise_in(),
"IK" => noise_ik(),
"IX" => noise_ix(),
"Npsk0" => noise_n_psk0(),
"Kpsk0" => noise_k_psk0(),
"Xpsk1" => noise_x_psk1(),
"NNpsk0" => noise_nn_psk0(),
"NNpsk2" => noise_nn_psk2(),
"NKpsk0" => noise_nk_psk0(),
"NKpsk2" => noise_nk_psk2(),
"NXpsk2" => noise_nx_psk2(),
"XNpsk3" => noise_xn_psk3(),
"XKpsk3" => noise_xk_psk3(),
"XXpsk3" => noise_xx_psk3(),
"KNpsk0" => noise_kn_psk0(),
"KNpsk2" => noise_kn_psk2(),
"KKpsk0" => noise_kk_psk0(),
"KKpsk2" => noise_kk_psk2(),
"KXpsk2" => noise_kx_psk2(),
"INpsk1" => noise_in_psk1(),
"INpsk2" => noise_in_psk2(),
"IKpsk1" => noise_ik_psk1(),
"IKpsk2" => noise_ik_psk2(),
"IXpsk2" => noise_ix_psk2(),
"NNpsk0+psk2" => noise_nn_psk0_psk2(),
"NXpsk0+psk1+psk2" => noise_nx_psk0_psk1_psk2(),
"XNpsk1+psk3" => noise_xn_psk1_psk3(),
"XKpsk0+psk3" => noise_xk_psk0_psk3(),
"KNpsk1+psk2" => noise_kn_psk1_psk2(),
"KKpsk0+psk2" => noise_kk_psk0_psk2(),
"INpsk1+psk2" => noise_in_psk1_psk2(),
"IKpsk0+psk2" => noise_ik_psk0_psk2(),
"IXpsk0+psk2" => noise_ix_psk0_psk2(),
"XXpsk0+psk1" => noise_xx_psk0_psk1(),
"XXpsk0+psk2" => noise_xx_psk0_psk2(),
"XXpsk0+psk3" => noise_xx_psk0_psk3(),
"XXpsk0+psk1+psk2+psk3" => noise_xx_psk0_psk1_psk2_psk3(),
_ => return None,
})
}
fn run_noise_responder(
t: &mut Activation,
spec: ValidatedNoiseSpec,
observer: Arc<Cap>,
service: Arc<Cap>,
) -> ActorResult {
let hs = {
let mut builder = noise_protocol::HandshakeStateBuilder::new();
builder.set_pattern(spec.pattern);
builder.set_is_initiator(false);
let prologue = PackedWriter::encode(&mut NoEmbeddedDomainCodec, &spec.service)?;
builder.set_prologue(&prologue);
match spec.secret_key {
None => (),
Some(sk) => {
let sk: [u8; 32] = sk.try_into().map_err(|_| "Bad secret key length")?;
builder.set_s(U8Array::from_slice(&sk));
},
}
let mut hs = builder.build_handshake_state();
for psk in spec.psks.into_iter() {
hs.push_psk(&psk);
}
hs
};
let responder_session =
Cap::guard(crate::Language::arc(), t.create(ResponderState::Introduction{ service, hs }));
observer.assert(t, language(), &gatekeeper::Resolved::Accepted { responder_session });
Ok(())
}

View File

@ -1,195 +0,0 @@
use std::convert::TryInto;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use hyper::{Request, Response, Body, StatusCode};
use hyper::body;
use hyper::header::HeaderName;
use hyper::header::HeaderValue;
use syndicate::actor::*;
use syndicate::error::Error;
use syndicate::trace;
use syndicate::value::Map;
use syndicate::value::NestedValue;
use syndicate::schemas::http;
use tokio::sync::oneshot;
use tokio::sync::mpsc::{UnboundedSender, unbounded_channel};
use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::language;
static NEXT_SEQ: AtomicU64 = AtomicU64::new(0);
pub fn empty_response(code: StatusCode) -> Response<Body> {
let mut r = Response::new(Body::empty());
*r.status_mut() = code;
r
}
type ChunkItem = Result<body::Bytes, Box<dyn std::error::Error + Send + Sync>>;
struct ResponseCollector {
tx_res: Option<(oneshot::Sender<Response<Body>>, Response<Body>)>,
body_tx: Option<UnboundedSender<ChunkItem>>,
}
impl ResponseCollector {
fn new(tx: oneshot::Sender<Response<Body>>) -> Self {
let (body_tx, body_rx) = unbounded_channel();
let body_stream: Box<dyn futures::Stream<Item = ChunkItem> + Send> =
Box::new(UnboundedReceiverStream::new(body_rx));
let mut res = Response::new(body_stream.into());
*res.status_mut() = StatusCode::OK;
ResponseCollector {
tx_res: Some((tx, res)),
body_tx: Some(body_tx),
}
}
fn with_res<F: FnOnce(&mut Response<Body>) -> ActorResult>(&mut self, f: F) -> ActorResult {
if let Some((_, res)) = &mut self.tx_res {
f(res)?;
}
Ok(())
}
fn deliver_res(&mut self) {
if let Some((tx, res)) = std::mem::replace(&mut self.tx_res, None) {
let _ = tx.send(res);
}
}
fn add_chunk(&mut self, value: http::Chunk) -> ActorResult {
self.deliver_res();
if let Some(body_tx) = self.body_tx.as_mut() {
body_tx.send(Ok(match value {
http::Chunk::Bytes(bs) => bs.into(),
http::Chunk::String(s) => s.as_bytes().to_vec().into(),
}))?;
}
Ok(())
}
fn finish(&mut self, t: &mut Activation) -> ActorResult {
self.deliver_res();
self.body_tx = None;
t.stop();
Ok(())
}
}
impl Entity<http::HttpResponse> for ResponseCollector {
fn message(&mut self, t: &mut Activation, message: http::HttpResponse) -> ActorResult {
match message {
http::HttpResponse::Status { code, .. } => self.with_res(|r| {
*r.status_mut() = StatusCode::from_u16(
(&code).try_into().map_err(|_| "bad status code")?)?;
Ok(())
}),
http::HttpResponse::Header { name, value } => self.with_res(|r| {
r.headers_mut().insert(HeaderName::from_bytes(name.as_bytes())?,
HeaderValue::from_str(value.as_str())?);
Ok(())
}),
http::HttpResponse::Chunk { chunk } => {
self.add_chunk(*chunk)
}
http::HttpResponse::Done { chunk } => {
self.add_chunk(*chunk)?;
self.finish(t)
}
}
}
}
pub async fn serve(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef,
httpd: Arc<Cap>,
mut req: Request<Body>,
port: u16,
) -> Result<Response<Body>, Error> {
let host = match req.headers().get("host").and_then(|v| v.to_str().ok()) {
None => http::RequestHost::Absent,
Some(h) => http::RequestHost::Present(match h.rsplit_once(':') {
None => h.to_string(),
Some((h, _port)) => h.to_string(),
})
};
let uri = req.uri();
let mut path: Vec<String> = uri.path().split('/').map(|s| s.to_string()).collect();
path.remove(0);
let mut query: Map<String, Vec<http::QueryValue>> = Map::new();
for piece in uri.query().unwrap_or("").split('&').into_iter() {
match piece.split_once('=') {
Some((k, v)) => {
let key = k.to_string();
let value = v.to_string();
match query.get_mut(&key) {
None => { query.insert(key, vec![http::QueryValue::String(value)]); },
Some(vs) => { vs.push(http::QueryValue::String(value)); },
}
}
None => {
if piece.len() > 0 {
let key = piece.to_string();
if !query.contains_key(&key) {
query.insert(key, vec![]);
}
}
}
}
}
let mut headers: Map<String, String> = Map::new();
for h in req.headers().into_iter() {
match h.1.to_str() {
Ok(v) => { headers.insert(h.0.as_str().to_string().to_lowercase(), v.to_string()); },
Err(_) => return Ok(empty_response(StatusCode::BAD_REQUEST)),
}
}
let body = match body::to_bytes(req.body_mut()).await {
Ok(b) => http::RequestBody::Present(b.to_vec()),
Err(_) => return Ok(empty_response(StatusCode::BAD_REQUEST)),
};
let account = Account::new(Some(AnyValue::symbol("http")), trace_collector);
let (tx, rx) = oneshot::channel();
facet.activate(&account, Some(trace::TurnCause::external("http")), |t| {
t.facet(move |t| {
let sreq = http::HttpRequest {
sequence_number: NEXT_SEQ.fetch_add(1, Ordering::Relaxed).into(),
host,
port: port.into(),
method: req.method().to_string().to_lowercase(),
path,
headers: http::Headers(headers),
query,
body,
};
tracing::debug!(?sreq);
let srep = Cap::guard(&language().syndicate, t.create(ResponseCollector::new(tx)));
httpd.assert(t, language(), &http::HttpContext { req: sreq, res: srep });
Ok(())
})?;
Ok(())
});
let response_result = rx.await;
match response_result {
Ok(response) => Ok(response),
Err(_ /* sender dropped */) => Ok(empty_response(StatusCode::INTERNAL_SERVER_ERROR)),
}
}

View File

@ -1,7 +1,5 @@
use preserves_schema::Codec;
use std::convert::TryInto;
use std::io;
use std::path::PathBuf;
use std::sync::Arc;
@ -13,7 +11,6 @@ use syndicate::enclose;
use syndicate::relay;
use syndicate::schemas::service;
use syndicate::schemas::transport_address;
use syndicate::trace;
use syndicate::value::Map;
use syndicate::value::NestedValue;
@ -21,22 +18,16 @@ use syndicate::value::NestedValue;
mod counter;
mod dependencies;
mod gatekeeper;
mod http;
mod language;
mod lifecycle;
mod protocol;
mod script;
mod services;
#[cfg(feature = "jemalloc")]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
mod schemas {
include!(concat!(env!("OUT_DIR"), "/src/schemas/mod.rs"));
}
use language::Language;
use language::language;
use schemas::internal_services;
@ -59,17 +50,10 @@ struct ServerConfig {
#[structopt(long)]
no_banner: bool,
#[structopt(short = "t", long)]
trace_file: Option<PathBuf>,
/// Enable `$control` entity.
#[structopt(long)]
control: bool,
}
#[tokio::main]
async fn main() -> ActorResult {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = Arc::new(ServerConfig::from_args());
syndicate::convenient_logging()?;
@ -90,7 +74,7 @@ async fn main() -> ActorResult {
eprintln!(r"{} \____{}/{}_/ {} /____/\__, /_/ /_/\____/_/\___/\__/_/\__/\___/", GREEN, BRIGHT_GREEN, GREEN, NORMAL);
eprintln!(r" /____/");
eprintln!(r"");
eprintln!(r" {}version {} [syndicate {}]{}", BRIGHT_YELLOW, env!("CARGO_PKG_VERSION"), syndicate::syndicate_package_version(), NORMAL);
eprintln!(r" {}version {}{}", BRIGHT_YELLOW, env!("CARGO_PKG_VERSION"), NORMAL);
eprintln!(r"");
eprintln!(r" documentation & reference material: https://syndicate-lang.org/");
eprintln!(r" source code & bugs: https://git.syndicate-lang.org/syndicate-lang/syndicate-rs");
@ -99,18 +83,13 @@ async fn main() -> ActorResult {
tracing::trace!("startup");
let trace_collector = config.trace_file.clone().map(
|p| Ok::<trace::TraceCollector, io::Error>(trace::TraceCollector::ascii(
io::BufWriter::new(std::fs::File::create(p)?))))
.transpose()?;
Actor::top(trace_collector, move |t| {
let server_config_ds = Cap::new(&t.create(Dataspace::new(Some(AnyValue::symbol("config")))));
let log_ds = Cap::new(&t.create(Dataspace::new(Some(AnyValue::symbol("log")))));
Actor::new(None).boot(tracing::Span::current(), move |t| {
let server_config_ds = Cap::new(&t.create(Dataspace::new(Some(syndicate::name!("config")))));
let log_ds = Cap::new(&t.create(Dataspace::new(Some(syndicate::name!("log")))));
if config.inferior {
tracing::info!("inferior server instance");
t.spawn(Some(AnyValue::symbol("parent")), enclose!((server_config_ds) move |t| {
t.spawn(syndicate::name!("parent"), enclose!((server_config_ds) move |t| {
protocol::run_io_relay(t,
relay::Input::Bytes(Box::pin(tokio::io::stdin())),
relay::Output::Bytes(Box::pin(tokio::io::stdout())),
@ -118,36 +97,20 @@ async fn main() -> ActorResult {
}));
}
let gatekeeper = Cap::guard(Language::arc(), t.create(
let gatekeeper = Cap::guard(Arc::clone(&language().syndicate), t.create(
syndicate::entity(Arc::clone(&server_config_ds))
.on_asserted_facet(gatekeeper::facet_handle_resolve)));
gatekeeper::handle_binds(t, &server_config_ds)?;
.on_asserted(gatekeeper::handle_resolve)));
let mut env = Map::new();
env.insert("config".to_owned(), AnyValue::domain(Arc::clone(&server_config_ds)));
env.insert("log".to_owned(), AnyValue::domain(Arc::clone(&log_ds)));
env.insert("gatekeeper".to_owned(), AnyValue::domain(Arc::clone(&gatekeeper)));
if config.control {
env.insert("control".to_owned(), AnyValue::domain(Cap::guard(Language::arc(), t.create(
syndicate::entity(())
.on_message(|_, _t, m: crate::schemas::control::ExitServer| {
tracing::info!("$control received exit request with code {}", m.code);
std::process::exit((&m.code).try_into().unwrap_or_else(|_| {
tracing::warn!(
"exit code {} out-of-range of 32-bit signed integer, using 1 instead",
m.code);
1
}))
})))));
}
dependencies::boot(t, Arc::clone(&server_config_ds));
services::config_watcher::on_demand(t, Arc::clone(&server_config_ds));
services::daemon::on_demand(t, Arc::clone(&server_config_ds), Arc::clone(&log_ds));
services::debt_reporter::on_demand(t, Arc::clone(&server_config_ds));
services::gatekeeper::on_demand(t, Arc::clone(&server_config_ds));
services::http_router::on_demand(t, Arc::clone(&server_config_ds));
services::milestone::on_demand(t, Arc::clone(&server_config_ds));
services::tcp_relay_listener::on_demand(t, Arc::clone(&server_config_ds));
services::unix_relay_listener::on_demand(t, Arc::clone(&server_config_ds));
@ -161,7 +124,7 @@ async fn main() -> ActorResult {
for port in config.ports.clone() {
server_config_ds.assert(t, language(), &service::RunService {
service_name: language().unparse(&internal_services::TcpWithoutHttp {
service_name: language().unparse(&internal_services::TcpRelayListener {
addr: transport_address::Tcp {
host: "0.0.0.0".to_owned(),
port: (port as i32).into(),
@ -191,7 +154,7 @@ async fn main() -> ActorResult {
});
}
t.spawn(Some(AnyValue::symbol("logger")), enclose!((log_ds) move |t| {
t.spawn(tracing::Span::current(), enclose!((log_ds) move |t| {
let n_unknown: AnyValue = AnyValue::symbol("-");
let n_pid: AnyValue = AnyValue::symbol("pid");
let n_line: AnyValue = AnyValue::symbol("line");

View File

@ -1,30 +1,26 @@
use futures::SinkExt;
use futures::StreamExt;
use hyper::header::HeaderValue;
use hyper::service::service_fn;
use std::future::ready;
use std::io;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::error::Error;
use syndicate::error::error;
use syndicate::relay;
use syndicate::trace;
use syndicate::value::NestedValue;
use tokio::net::TcpStream;
use hyper_tungstenite::tungstenite::Message;
use tungstenite::Message;
struct ExitListener;
impl Entity<()> for ExitListener {
fn exit_hook(&mut self, _t: &mut Activation, exit_status: &Arc<ExitStatus>) {
fn exit_hook(&mut self, _t: &mut Activation, exit_status: &Arc<ActorResult>) -> ActorResult {
tracing::info!(?exit_status, "disconnect");
Ok(())
}
}
@ -35,97 +31,52 @@ pub fn run_io_relay(
initial_ref: Arc<Cap>,
) -> ActorResult {
let exit_listener = t.create(ExitListener);
t.add_exit_hook(&exit_listener);
t.state.add_exit_hook(&exit_listener);
relay::TunnelRelay::run(t, i, o, Some(initial_ref), None, false);
Ok(())
}
pub fn run_connection(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef,
i: relay::Input,
o: relay::Output,
initial_ref: Arc<Cap>,
) {
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("start-session"));
let account = Account::new(Some(AnyValue::symbol("start-session")), trace_collector);
facet.activate(&account, cause, |t| run_io_relay(t, i, o, initial_ref));
) -> ActorResult {
facet.activate(Account::new(syndicate::name!("start-session")),
|t| run_io_relay(t, i, o, initial_ref)).into()
}
pub async fn detect_protocol(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef,
stream: TcpStream,
gateway: Arc<Cap>,
httpd: Option<Arc<Cap>>,
addr: std::net::SocketAddr,
server_port: u16,
) -> ActorResult {
let mut buf = [0; 1]; // peek at the first byte to see what kind of connection to expect
match stream.peek(&mut buf).await? {
1 => match buf[0] {
v if v == b'[' /* Turn */ || v == b'<' /* Error and Extension */ || v >= 128 => {
tracing::info!(protocol = %(if v >= 128 { "application/syndicate" } else { "text/syndicate" }), peer = ?addr);
let (i, o) = stream.into_split();
let i = relay::Input::Bytes(Box::pin(i));
let o = relay::Output::Bytes(Box::pin(o /* BufWriter::new(o) */));
run_connection(trace_collector, facet, i, o, gateway);
Ok(())
}
_ => {
let upgraded = Arc::new(AtomicBool::new(false));
let keepalive = facet.actor.keep_alive();
let mut http = hyper::server::conn::Http::new();
http.http1_keep_alive(true);
http.http1_only(true);
let service = service_fn(|mut req| enclose!(
(upgraded, keepalive, trace_collector, facet, gateway, httpd) async move {
if hyper_tungstenite::is_upgrade_request(&req) {
tracing::info!(protocol = %"websocket",
method=%req.method(),
uri=?req.uri(),
host=?req.headers().get("host").unwrap_or(&HeaderValue::from_static("")));
let (response, websocket) = hyper_tungstenite::upgrade(&mut req, None)
.map_err(|e| message_error(e))?;
upgraded.store(true, Ordering::SeqCst);
tokio::spawn(enclose!(() async move {
let (o, i) = websocket.await?.split();
let i = i.filter_map(|r| ready(extract_binary_packets(r).transpose()));
let o = o.sink_map_err(message_error).with(|bs| ready(Ok(Message::Binary(bs))));
let i = relay::Input::Packets(Box::pin(i));
let o = relay::Output::Packets(Box::pin(o));
run_connection(trace_collector, facet, i, o, gateway);
drop(keepalive);
Ok(()) as ActorResult
}));
Ok(response)
} else {
match httpd {
None => Ok(crate::http::empty_response(
hyper::StatusCode::SERVICE_UNAVAILABLE)),
Some(httpd) => {
tracing::info!(protocol = %"http",
method=%req.method(),
uri=?req.uri(),
host=?req.headers().get("host").unwrap_or(&HeaderValue::from_static("")));
crate::http::serve(trace_collector, facet, httpd, req, server_port).await
}
}
}
}));
http.serve_connection(stream, service).with_upgrades().await?;
if upgraded.load(Ordering::SeqCst) {
tracing::debug!("serve_connection completed after upgrade to websocket");
} else {
tracing::debug!("serve_connection completed after regular HTTP session");
facet.activate(&Account::new(None, None), None, |t| Ok(t.stop()));
let (i, o) = {
let mut buf = [0; 1]; // peek at the first byte to see what kind of connection to expect
match stream.peek(&mut buf).await? {
1 => match buf[0] {
b'G' /* ASCII 'G' for "GET" */ => {
tracing::info!(protocol = %"websocket", peer = ?addr);
let s = tokio_tungstenite::accept_async(stream).await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let (o, i) = s.split();
let i = i.filter_map(|r| ready(extract_binary_packets(r).transpose()));
let o = o.sink_map_err(message_error).with(|bs| ready(Ok(Message::Binary(bs))));
(relay::Input::Packets(Box::pin(i)), relay::Output::Packets(Box::pin(o)))
},
_ => {
tracing::info!(protocol = %"raw", peer = ?addr);
let (i, o) = stream.into_split();
(relay::Input::Bytes(Box::pin(i)),
relay::Output::Bytes(Box::pin(o /* BufWriter::new(o) */)))
}
Ok(())
},
}
0 => Err(error("closed before starting", AnyValue::new(false)))?,
_ => unreachable!()
}
0 => Err(error("closed before starting", AnyValue::new(false)))?,
_ => unreachable!()
}
};
run_connection(facet, i, o, gateway)
}
fn message_error<E: std::fmt::Display>(e: E) -> Error {
@ -133,7 +84,7 @@ fn message_error<E: std::fmt::Display>(e: E) -> Error {
}
fn extract_binary_packets(
r: Result<Message, hyper_tungstenite::tungstenite::Error>,
r: Result<Message, tungstenite::Error>,
) -> Result<Option<Vec<u8>>, Error> {
match r {
Ok(m) => match m {
@ -147,8 +98,6 @@ fn extract_binary_packets(
Ok(None), // unsolicited pongs are to be ignored
Message::Close(_) =>
Ok(None), // we're about to see the end of the stream, so ignore this
Message::Frame(_) =>
Err("Raw frames are not accepted")?,
},
Err(e) => Err(message_error(e)),
}

View File

@ -9,16 +9,14 @@ use syndicate::actor::*;
use syndicate::dataspace::Dataspace;
use syndicate::during;
use syndicate::enclose;
use syndicate::pattern::{lift_literal, drop_literal, pattern_seq_from_dictionary};
use syndicate::pattern::{lift_literal, drop_literal};
use syndicate::schemas::dataspace;
use syndicate::schemas::dataspace_patterns as P;
use syndicate::schemas::sturdy;
use syndicate::value::Map;
use syndicate::value::NestedValue;
use syndicate::value::NoEmbeddedDomainCodec;
use syndicate::value::Record;
use syndicate::value::Set;
use syndicate::value::TextWriter;
use syndicate::value::Value;
use crate::language::language;
@ -78,12 +76,6 @@ pub enum Instruction {
pattern_template: AnyValue,
expr: Expr,
},
Cond {
value_var: String,
pattern_template: AnyValue,
on_match: Box<Instruction>,
on_nomatch: Box<Instruction>,
},
}
#[derive(Debug, Clone)]
@ -94,14 +86,11 @@ pub enum Expr {
Dataspace,
Timestamp,
Facet,
Stringify {
expr: Box<Expr>,
},
}
#[derive(Debug, Clone)]
enum RewriteTemplate {
Accept {
Filter {
pattern_template: AnyValue,
},
Rewrite {
@ -110,16 +99,6 @@ enum RewriteTemplate {
},
}
#[derive(Debug, Clone)]
enum CaveatTemplate {
Alts {
alternatives: Vec<RewriteTemplate>,
},
Reject {
pattern_template: AnyValue,
},
}
#[derive(Debug)]
enum Symbolic {
Reference(String),
@ -173,7 +152,7 @@ fn bad_instruction(message: &str) -> io::Error {
}
fn discard() -> P::Pattern {
P::Pattern::Discard
P::Pattern::DDiscard(Box::new(P::DDiscard))
}
fn dlit(value: AnyValue) -> P::Pattern {
@ -184,83 +163,62 @@ fn tlit(value: AnyValue) -> sturdy::Template {
sturdy::Template::Lit(Box::new(sturdy::Lit { value }))
}
fn parse_rewrite(raw_base_name: &AnyValue, e: &AnyValue) -> io::Result<RewriteTemplate> {
if let Some(fields) = e.value().as_simple_record("accept", Some(1)) {
return Ok(RewriteTemplate::Accept {
pattern_template: fields[0].clone(),
});
}
if let Some(fields) = e.value().as_simple_record("rewrite", Some(2)) {
return Ok(RewriteTemplate::Rewrite {
pattern_template: fields[0].clone(),
template_template: fields[1].clone(),
});
}
Err(bad_instruction(&format!("Bad rewrite in attenuation of {:?}: {:?}", raw_base_name, e)))
}
fn parse_caveat(raw_base_name: &AnyValue, e: &AnyValue) -> io::Result<CaveatTemplate> {
if let Some(fields) = e.value().as_simple_record("or", Some(1)) {
let raw_rewrites = match fields[0].value().as_sequence() {
None => Err(bad_instruction(&format!(
"Alternatives in <or> in attenuation of {:?} must have sequence of rewrites; got {:?}",
raw_base_name,
fields[0])))?,
Some(vs) => vs,
};
let alternatives =
raw_rewrites.iter().map(|r| parse_rewrite(raw_base_name, r)).collect::<Result<Vec<_>, _>>()?;
return Ok(CaveatTemplate::Alts{ alternatives });
}
if let Some(fields) = e.value().as_simple_record("reject", Some(1)) {
return Ok(CaveatTemplate::Reject{ pattern_template: fields[0].clone() });
}
if let Ok(r) = parse_rewrite(raw_base_name, e) {
return Ok(CaveatTemplate::Alts { alternatives: vec![r] });
}
Err(bad_instruction(&format!("Bad caveat in attenuation of {:?}: {:?}", raw_base_name, e)))
}
fn parse_attenuation(r: &Record<AnyValue>) -> io::Result<Option<(String, Vec<CaveatTemplate>)>> {
fn parse_attenuation(r: &Record<AnyValue>) -> io::Result<Option<(String, Vec<RewriteTemplate>)>> {
if r.label() != &AnyValue::symbol("*") {
return Ok(None);
}
if r.fields().len() != 2 {
Err(bad_instruction(&format!(
"Attenuation requires a reference and a sequence of caveats; got {:?}",
"Attenuation requires a reference and a sequence of rewrites; got {:?}",
r)))?;
}
let raw_base_name = &r.fields()[0];
let base_name = match raw_base_name.value().as_symbol().map(|s| analyze(&s)) {
let base_name = match r.fields()[0].value().as_symbol().map(|s| analyze(&s)) {
Some(Symbolic::Reference(s)) => s,
_ => Err(bad_instruction(&format!(
"Attenuation must have variable reference as first argument; got {:?}",
raw_base_name)))?,
r.fields()[0])))?,
};
let raw_caveats = match r.fields()[1].value().as_sequence() {
let raw_alternatives = match r.fields()[1].value().as_sequence() {
None => Err(bad_instruction(&format!(
"Attenuation of {:?} must have sequence of caveats; got {:?}",
raw_base_name,
"Attenuation of {:?} must have sequence of rewrites; got {:?}",
r.fields()[0],
r.fields()[1])))?,
Some(vs) => vs,
};
let caveats = raw_caveats.iter().map(|c| parse_caveat(raw_base_name, c)).collect::<Result<Vec<_>, _>>()?;
Ok(Some((base_name, caveats)))
let mut alternatives = Vec::new();
for e in raw_alternatives.iter() {
match e.value().as_simple_record("filter", Some(1)) {
Some(fields) =>
alternatives.push(RewriteTemplate::Filter {
pattern_template: fields[0].clone()
}),
None => match e.value().as_simple_record("rewrite", Some(2)) {
Some(fields) =>
alternatives.push(RewriteTemplate::Rewrite {
pattern_template: fields[0].clone(),
template_template: fields[1].clone(),
}),
None => Err(bad_instruction(&format!(
"Bad rewrite in attenuation of {:?}: {:?}",
r.fields()[0],
e)))?,
}
}
}
Ok(Some((base_name, alternatives)))
}
impl<'env> PatternInstantiator<'env> {
fn instantiate_pattern(&mut self, template: &AnyValue) -> io::Result<P::Pattern> {
Ok(match template.value() {
Value::Boolean(_) |
Value::Float(_) |
Value::Double(_) |
Value::SignedInteger(_) |
Value::String(_) |
@ -272,7 +230,7 @@ impl<'env> PatternInstantiator<'env> {
Symbolic::Discard => discard(),
Symbolic::Binder(s) => {
self.binding_names.push(s);
P::Pattern::Bind { pattern: Box::new(discard()) }
P::Pattern::DBind(Box::new(P::DBind { pattern: discard() }))
}
Symbolic::Reference(s) =>
dlit(self.env.lookup(&s, "pattern-template variable")?.clone()),
@ -281,53 +239,49 @@ impl<'env> PatternInstantiator<'env> {
},
Value::Record(r) => match parse_attenuation(r)? {
Some((base_name, caveats)) =>
dlit(self.env.eval_attenuation(base_name, caveats)?),
Some((base_name, alternatives)) =>
dlit(self.env.eval_attenuation(base_name, alternatives)?),
None => match self.maybe_binder_with_pattern(r)? {
Some(pat) => pat,
None => {
let label = self.instantiate_pattern(r.label())?;
let entries = r.fields().iter().enumerate()
.map(|(i, p)| Ok((AnyValue::new(i), self.instantiate_pattern(p)?)))
.collect::<io::Result<Map<AnyValue, P::Pattern>>>()?;
P::Pattern::Group {
type_: Box::new(P::GroupType::Rec {
label: drop_literal(&label)
.ok_or(bad_instruction("Record pattern must have literal label"))?,
}),
entries,
}
let fields = r.fields().iter().map(|p| self.instantiate_pattern(p))
.collect::<io::Result<Vec<P::Pattern>>>()?;
P::Pattern::DCompound(Box::new(P::DCompound::Rec {
label: drop_literal(&label)
.ok_or(bad_instruction("Record pattern must have literal label"))?,
fields,
}))
}
}
},
Value::Sequence(v) =>
P::Pattern::Group {
type_: Box::new(P::GroupType::Arr),
entries: v.iter().enumerate()
.map(|(i, p)| Ok((AnyValue::new(i), self.instantiate_pattern(p)?)))
.collect::<io::Result<Map<AnyValue, P::Pattern>>>()?,
},
P::Pattern::DCompound(Box::new(P::DCompound::Arr {
items: v.iter()
.map(|p| self.instantiate_pattern(p))
.collect::<io::Result<Vec<P::Pattern>>>()?,
})),
Value::Set(_) =>
Err(bad_instruction(&format!("Sets not permitted in patterns: {:?}", template)))?,
Value::Dictionary(v) =>
P::Pattern::Group {
type_: Box::new(P::GroupType::Dict),
P::Pattern::DCompound(Box::new(P::DCompound::Dict {
entries: v.iter()
.map(|(a, b)| Ok((a.clone(), self.instantiate_pattern(b)?)))
.collect::<io::Result<Map<AnyValue, P::Pattern>>>()?,
},
})),
})
}
fn maybe_binder_with_pattern(&mut self, r: &Record<AnyValue>) -> io::Result<Option<P::Pattern>> {
match r.label().value().as_symbol().map(|s| analyze(&s)) {
Some(Symbolic::Binder(formal)) if r.fields().len() == 1 => {
Some(Symbolic::Binder(formal)) => if r.fields().len() == 1 {
let pattern = self.instantiate_pattern(&r.fields()[0])?;
self.binding_names.push(formal);
Ok(Some(P::Pattern::Bind { pattern: Box::new(pattern) }))
return Ok(Some(P::Pattern::DBind(Box::new(P::DBind { pattern }))));
},
_ => Ok(None),
_ => (),
}
Ok(None)
}
}
@ -375,6 +329,7 @@ impl Env {
fn instantiate_value(&self, template: &AnyValue) -> io::Result<AnyValue> {
Ok(match template.value() {
Value::Boolean(_) |
Value::Float(_) |
Value::Double(_) |
Value::SignedInteger(_) |
Value::String(_) |
@ -393,8 +348,8 @@ impl Env {
},
Value::Record(r) => match parse_attenuation(r)? {
Some((base_name, caveats)) =>
self.eval_attenuation(base_name, caveats)?,
Some((base_name, alternatives)) =>
self.eval_attenuation(base_name, alternatives)?,
None =>
Value::Record(Record(r.fields_vec().iter().map(|a| self.instantiate_value(a))
.collect::<Result<Vec<_>, _>>()?)).wrap(),
@ -432,7 +387,7 @@ impl Env {
fn eval_attenuation(
&self,
base_name: String,
caveats: Vec<CaveatTemplate>,
alternatives: Vec<RewriteTemplate>,
) -> io::Result<AnyValue> {
let base_value = self.lookup(&base_name, "attenuation-base variable")?;
match base_value.value().as_embedded() {
@ -440,7 +395,9 @@ impl Env {
"Value to be attenuated is {:?} but must be capability",
base_value))),
Some(base_cap) => {
match base_cap.attenuate(&caveats.iter().map(|c| self.instantiate_caveat(c)).collect::<Result<Vec<_>, _>>()?) {
match base_cap.attenuate(&sturdy::Attenuation(vec![
self.instantiate_caveat(&alternatives)?]))
{
Ok(derived_cap) => Ok(AnyValue::domain(derived_cap)),
Err(caveat_error) =>
Err(bad_instruction(&format!("Attenuation of {:?} failed: {:?}",
@ -520,17 +477,6 @@ impl Env {
}
}
}
Instruction::Cond { value_var, pattern_template, on_match, on_nomatch } => {
let (binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
let value = self.lookup(value_var, "value in conditional expression")?;
match pattern.match_value(&value) {
None => self.eval(t, on_nomatch)?,
Some(captures) => {
self.extend(&binding_names, captures);
self.eval(t, on_match)?
}
}
}
}
Ok(())
}
@ -541,58 +487,38 @@ impl Env {
Expr::Dataspace => Ok(AnyValue::domain(Cap::new(&t.create(Dataspace::new(None))))),
Expr::Timestamp => Ok(AnyValue::new(chrono::Utc::now().to_rfc3339())),
Expr::Facet => Ok(AnyValue::domain(Cap::new(&t.create(FacetHandle::new())))),
Expr::Stringify { expr } => {
let v = self.eval_expr(t, expr)?;
let s = TextWriter::encode(&mut NoEmbeddedDomainCodec, &v)?;
Ok(AnyValue::new(s))
}
}
}
fn instantiate_rewrite(
&self,
rw: &RewriteTemplate,
) -> io::Result<sturdy::Rewrite> {
match rw {
RewriteTemplate::Accept { pattern_template } => {
let (_binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
Ok(sturdy::Rewrite {
pattern: embed_pattern(&P::Pattern::Bind { pattern: Box::new(pattern) }),
template: sturdy::Template::TRef(Box::new(sturdy::TRef { binding: 0.into() })),
})
}
RewriteTemplate::Rewrite { pattern_template, template_template } => {
let (binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
Ok(sturdy::Rewrite {
pattern: embed_pattern(&pattern),
template: self.instantiate_template(&binding_names, template_template)?,
})
}
}
}
fn instantiate_caveat(
&self,
c: &CaveatTemplate,
alternatives: &Vec<RewriteTemplate>,
) -> io::Result<sturdy::Caveat> {
match c {
CaveatTemplate::Alts { alternatives } => {
let mut rewrites =
alternatives.iter().map(|r| self.instantiate_rewrite(r)).collect::<Result<Vec<_>, _>>()?;
if rewrites.len() == 1 {
Ok(sturdy::Caveat::Rewrite(Box::new(rewrites.pop().unwrap())))
} else {
Ok(sturdy::Caveat::Alts(Box::new(sturdy::Alts {
alternatives: rewrites,
})))
let mut rewrites = Vec::new();
for rw in alternatives {
match rw {
RewriteTemplate::Filter { pattern_template } => {
let (_binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
rewrites.push(sturdy::Rewrite {
pattern: embed_pattern(&P::Pattern::DBind(Box::new(P::DBind { pattern }))),
template: sturdy::Template::TRef(Box::new(sturdy::TRef { binding: 0.into() })),
})
}
RewriteTemplate::Rewrite { pattern_template, template_template } => {
let (binding_names, pattern) = self.instantiate_pattern(pattern_template)?;
rewrites.push(sturdy::Rewrite {
pattern: embed_pattern(&pattern),
template: self.instantiate_template(&binding_names, template_template)?,
})
}
}
CaveatTemplate::Reject { pattern_template } => {
Ok(sturdy::Caveat::Reject(Box::new(
sturdy::Reject {
pattern: embed_pattern(&self.instantiate_pattern(pattern_template)?.1),
})))
}
}
if rewrites.len() == 1 {
Ok(sturdy::Caveat::Rewrite(Box::new(rewrites.pop().unwrap())))
} else {
Ok(sturdy::Caveat::Alts(Box::new(sturdy::Alts {
alternatives: rewrites,
})))
}
}
@ -607,6 +533,7 @@ impl Env {
Ok(match template.value() {
Value::Boolean(_) |
Value::Float(_) |
Value::Double(_) |
Value::SignedInteger(_) |
Value::String(_) |
@ -630,19 +557,18 @@ impl Env {
},
Value::Record(r) => match parse_attenuation(r)? {
Some((base_name, caveats)) =>
Some((base_name, alternatives)) =>
match find_bound(&base_name) {
Some(i) =>
sturdy::Template::TAttenuate(Box::new(sturdy::TAttenuate {
template: sturdy::Template::TRef(Box::new(sturdy::TRef {
binding: i.into(),
})),
attenuation: caveats.iter()
.map(|c| self.instantiate_caveat(c))
.collect::<Result<Vec<_>, _>>()?,
attenuation: sturdy::Attenuation(vec![
self.instantiate_caveat(&alternatives)?]),
})),
None =>
tlit(self.eval_attenuation(base_name, caveats)?),
tlit(self.eval_attenuation(base_name, alternatives)?),
},
None => {
// TODO: properly consolidate constant templates into literals.
@ -678,26 +604,24 @@ impl Env {
fn embed_pattern(p: &P::Pattern) -> sturdy::Pattern {
match p {
P::Pattern::Discard => sturdy::Pattern::PDiscard(Box::new(sturdy::PDiscard)),
P::Pattern::Bind { pattern } => sturdy::Pattern::PBind(Box::new(sturdy::PBind {
pattern: embed_pattern(&**pattern),
P::Pattern::DDiscard(_) => sturdy::Pattern::PDiscard(Box::new(sturdy::PDiscard)),
P::Pattern::DBind(b) => sturdy::Pattern::PBind(Box::new(sturdy::PBind {
pattern: embed_pattern(&b.pattern),
})),
P::Pattern::Lit { value } => sturdy::Pattern::Lit(Box::new(sturdy::Lit {
value: language().unparse(&**value),
P::Pattern::DLit(b) => sturdy::Pattern::Lit(Box::new(sturdy::Lit {
value: language().unparse(&b.value),
})),
P::Pattern::Group { type_, entries } => sturdy::Pattern::PCompound(Box::new(match &**type_ {
P::GroupType::Rec { label } =>
P::Pattern::DCompound(b) => sturdy::Pattern::PCompound(Box::new(match &**b {
P::DCompound::Rec { label, fields } =>
sturdy::PCompound::Rec {
label: label.clone(),
fields: pattern_seq_from_dictionary(entries).expect("correct field entries")
.into_iter().map(embed_pattern).collect(),
fields: fields.iter().map(embed_pattern).collect(),
},
P::GroupType::Arr =>
P::DCompound::Arr { items } =>
sturdy::PCompound::Arr {
items: pattern_seq_from_dictionary(entries).expect("correct element entries")
.into_iter().map(embed_pattern).collect(),
items: items.iter().map(embed_pattern).collect(),
},
P::GroupType::Dict =>
P::DCompound::Dict { entries } =>
sturdy::PCompound::Dict {
entries: entries.iter().map(|(k, v)| (k.clone(), embed_pattern(v))).collect(),
},
@ -740,7 +664,7 @@ impl<'t> Parser<'t> {
T::default()
}
pub fn parse(&mut self, target: &str, outer_target: &str) -> Parsed<Instruction> {
pub fn parse(&mut self, target: &str) -> Parsed<Instruction> {
if self.ateof() {
return Parsed::Eof;
}
@ -755,7 +679,7 @@ impl<'t> Parser<'t> {
if let Some(tokens) = self.peek().as_sequence() {
self.drop();
let mut inner_parser = Parser::new(tokens);
let instructions = inner_parser.parse_all(target, outer_target);
let instructions = inner_parser.parse_all(target);
self.errors.extend(inner_parser.errors);
return Parsed::Value(Instruction::Sequence { instructions });
}
@ -770,7 +694,7 @@ impl<'t> Parser<'t> {
Instruction::During { target, pattern_template, body } },
"?" => |target, pattern_template, body| { // "??"
Instruction::OnMessage { target, pattern_template, body } },
"-" => match self.parse(target, outer_target) { // "?-"
"-" => match self.parse(target) { // "?-"
Parsed::Value(i) => return Parsed::Value(Instruction::OnStop {
body: Box::new(i),
}),
@ -785,7 +709,7 @@ impl<'t> Parser<'t> {
}
let pattern_template = self.shift();
return match self.parse(target, outer_target) {
return match self.parse(target) {
Parsed::Eof =>
self.error(format!(
"Missing instruction in react with pattern {:?}",
@ -809,7 +733,7 @@ impl<'t> Parser<'t> {
let m = format!("Missing instruction after retarget: {:?}", self.peek());
return self.error(m);
}
return self.parse(&s, target);
return self.parse(&s);
}
Symbolic::Bare(s) => {
if s == "let" {
@ -850,30 +774,26 @@ impl<'t> Parser<'t> {
}
Symbolic::Literal(s) => {
if s == "~" { // "=~"
self.drop();
if self.ateof() {
return self.error("Missing pattern, true-instruction and false-continuation in match");
}
let match_template = self.shift();
return match self.parse(outer_target, outer_target) {
Parsed::Eof =>
self.error(format!(
"Missing true-instruction in conditional with pattern {:?}",
match_template)),
Parsed::Skip =>
Parsed::Skip,
Parsed::Value(true_instruction) => {
let false_instructions = self.parse_all(outer_target, outer_target);
Parsed::Value(Instruction::Cond {
value_var: target.to_owned(),
pattern_template: match_template,
on_match: Box::new(true_instruction),
on_nomatch: Box::new(Instruction::Sequence {
instructions: false_instructions,
}),
})
}
};
// s.drop();
// if self.ateof() {
// return self.error("Missing pattern, true-instruction and false-continuation in match");
// }
// let match_template = self.shift();
// return match self.parse(target) {
// Parsed::Eof =>
// self.error(format!(
// "Missing true-instruction in conditional with pattern {:?}",
// match_template)),
// Parsed::Skip =>
// Parsed::Skip,
// Parsed::Value(true_instruction) => {
// let false_instructions = self.parse_all();
// Parsed::Value(Instruction::Cond {
// value: target.to_owned(),
// pattern: match_template,
// on_match: true_instruction,
// on_nomatch: self.parse_all(
// };
} else {
/* fall through */
}
@ -887,10 +807,10 @@ impl<'t> Parser<'t> {
}
}
pub fn parse_all(&mut self, target: &str, outer_target: &str) -> Vec<Instruction> {
pub fn parse_all(&mut self, target: &str) -> Vec<Instruction> {
let mut instructions = Vec::new();
loop {
match self.parse(target, outer_target) {
match self.parse(target) {
Parsed::Value(i) => instructions.push(i),
Parsed::Skip => (),
Parsed::Eof => break,
@ -900,7 +820,7 @@ impl<'t> Parser<'t> {
}
pub fn parse_top(&mut self, target: &str) -> Result<Option<Instruction>, Vec<String>> {
let instructions = self.parse_all(target, target);
let instructions = self.parse_all(target);
if self.errors.is_empty() {
match instructions.len() {
0 => Ok(None),
@ -931,11 +851,6 @@ impl<'t> Parser<'t> {
return Some(Expr::Facet);
}
if self.peek() == &Value::symbol("stringify") {
self.drop();
return Some(Expr::Stringify { expr: Box::new(self.parse_expr()?) });
}
return Some(Expr::Template{ template: self.shift() });
}
}

View File

@ -3,8 +3,6 @@ use notify::Watcher;
use notify::RecursiveMode;
use notify::watcher;
use syndicate::preserves::rec;
use std::fs;
use std::future;
use std::io;
@ -18,7 +16,6 @@ use syndicate::actor::*;
use syndicate::error::Error;
use syndicate::enclose;
use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use syndicate::value::BinarySource;
use syndicate::value::BytesBinarySource;
use syndicate::value::Map;
@ -35,11 +32,11 @@ use crate::script;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, config_ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("config_watcher")), move |t| {
Ok(during!(t, config_ds, language(), <run-service $spec: internal_services::ConfigWatcher::<AnyValue>>, |t| {
t.spawn(syndicate::name!("config_watcher"), move |t| {
Ok(during!(t, config_ds, language(), <run-service $spec: internal_services::ConfigWatcher>, |t| {
Supervisor::start(
t,
Some(rec![AnyValue::symbol("config"), AnyValue::new(spec.path.clone())]),
syndicate::name!(parent: None, "config", path = ?spec.path),
SupervisorConfiguration::default(),
enclose!((config_ds, spec) lifecycle::updater(config_ds, spec)),
enclose!((config_ds) move |t| enclose!((config_ds, spec) run(t, config_ds, spec))))
@ -145,20 +142,13 @@ fn initial_scan(
scan_file(t, path_state, env);
} else {
match fs::read_dir(&env.path) {
Ok(unsorted_entries) => {
let mut entries: Vec<fs::DirEntry> = Vec::new();
for er in unsorted_entries {
match er {
Ok(e) =>
entries.push(e),
Err(e) =>
tracing::warn!(
"initial_scan: transient during scan of {:?}: {:?}", &env.path, e),
}
}
entries.sort_by_key(|e| e.file_name());
for e in entries {
initial_scan(t, path_state, config_ds, env.clone_with_path(e.path()));
Ok(entries) => for er in entries {
match er {
Ok(e) =>
initial_scan(t, path_state, config_ds, env.clone_with_path(e.path())),
Err(e) =>
tracing::warn!(
"initial_scan: transient during scan of {:?}: {:?}", &env.path, e),
}
}
Err(e) => tracing::warn!("initial_scan: enumerating {:?}: {:?}", &env.path, e),
@ -184,33 +174,26 @@ fn run(
let mut watcher = watcher(tx, Duration::from_millis(100)).map_err(convert_notify_error)?;
watcher.watch(&env.path, RecursiveMode::Recursive).map_err(convert_notify_error)?;
let facet = t.facet_ref();
let trace_collector = t.trace_collector();
let facet = t.facet.clone();
let span = tracing::Span::current();
thread::spawn(move || {
let _entry = span.enter();
let mut path_state: Map<PathBuf, FacetId> = Map::new();
{
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("initial_scan"));
let account = Account::new(Some(AnyValue::symbol("initial_scan")), trace_collector.clone());
if !facet.activate(
&account, cause, |t| {
initial_scan(t, &mut path_state, &config_ds, env.clone());
config_ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
})
{
return;
}
let initial_scan_result = facet.activate(
Account::new(syndicate::name!("initial_scan")), |t| {
initial_scan(t, &mut path_state, &config_ds, env.clone());
config_ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
});
if initial_scan_result.is_already_terminated() {
return;
}
tracing::trace!("initial_scan complete");
let mut rescan = |paths: Vec<PathBuf>| {
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("rescan"));
let account = Account::new(Some(AnyValue::symbol("rescan")), trace_collector.clone());
facet.activate(&account, cause, |t| {
facet.activate(Account::new(syndicate::name!("rescan")), |t| {
let mut to_stop = Vec::new();
for path in paths.into_iter() {
let maybe_facet_id = path_state.remove(&path);
@ -228,43 +211,42 @@ fn run(
t.stop_facet(facet_id);
}
Ok(())
})
}).as_result()
};
while let Ok(event) = rx.recv() {
tracing::trace!("notification: {:?}", &event);
let keep_running = match event {
DebouncedEvent::NoticeWrite(_p) |
DebouncedEvent::NoticeRemove(_p) =>
true,
DebouncedEvent::Create(p) |
DebouncedEvent::Write(p) |
DebouncedEvent::Chmod(p) |
DebouncedEvent::Remove(p) =>
rescan(vec![p]),
DebouncedEvent::Rename(p, q) =>
rescan(vec![p, q]),
_ => {
tracing::info!("{:?}", event);
true
}
};
if !keep_running { break; }
if
match event {
DebouncedEvent::NoticeWrite(_p) |
DebouncedEvent::NoticeRemove(_p) =>
Ok(()),
DebouncedEvent::Create(p) |
DebouncedEvent::Write(p) |
DebouncedEvent::Chmod(p) |
DebouncedEvent::Remove(p) =>
rescan(vec![p]),
DebouncedEvent::Rename(p, q) =>
rescan(vec![p, q]),
_ => {
tracing::info!("{:?}", event);
Ok(())
}
}.is_err()
{
return;
}
}
{
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("termination"));
let account = Account::new(Some(AnyValue::symbol("termination")), trace_collector);
facet.activate(&account, cause, |t| {
tracing::trace!("linked thread terminating associated facet");
Ok(t.stop())
});
}
let _ = facet.activate(Account::new(syndicate::name!("termination")), |t| {
tracing::trace!("linked thread terminating associated facet");
Ok(t.stop())
});
tracing::trace!("linked thread done");
});
t.linked_task(Some(AnyValue::symbol("cancel-wait")), async move {
t.linked_task(syndicate::name!("cancel-wait"), async move {
future::pending::<()>().await;
drop(watcher);
Ok(LinkedTaskTermination::KeepFacet)

View File

@ -4,10 +4,8 @@ use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::schemas::service;
use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use syndicate::value::NestedValue;
use tokio::io::AsyncRead;
@ -23,8 +21,8 @@ use crate::schemas::external_services::*;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, config_ds: Arc<Cap>, root_ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("daemon_listener")), move |t| {
Ok(during!(t, config_ds, language(), <run-service $spec: DaemonService::<AnyValue>>,
t.spawn(syndicate::name!("daemon"), move |t| {
Ok(during!(t, config_ds, language(), <run-service $spec: DaemonService>,
enclose!((config_ds, root_ds) move |t: &mut Activation| {
supervise_daemon(t, config_ds, root_ds, spec)
})))
@ -41,14 +39,14 @@ fn supervise_daemon(
lifecycle::on_service_restart(t, &config_ds, &spec, enclose!(
(config_ds, root_ds, spec) move |t| {
tracing::info!(id = ?spec.id, "Terminating to restart");
t.stop_facet_and_continue(t.facet_id(), Some(
t.stop_facet_and_continue(t.facet.facet_id, Some(
enclose!((config_ds, root_ds, spec) move |t: &mut Activation| {
supervise_daemon(t, config_ds, root_ds, spec)
})))
}));
Supervisor::start(
t,
Some(language().unparse(&spec)),
syndicate::name!(parent: None, "daemon", id = ?spec.id),
SupervisorConfiguration::on_error_only(),
enclose!((config_ds, spec) lifecycle::updater(config_ds, spec)),
enclose!((config_ds, root_ds) move |t|
@ -139,7 +137,7 @@ impl DaemonProcessSpec {
},
DaemonProcessSpec::OneShot { setup } => FullDaemonProcess {
process: Process::Simple(setup).elaborate(),
ready_on_start: ReadyOnStart::Present { ready_on_start: false },
ready_on_start: ReadyOnStart::Absent,
restart: RestartField::Present { restart: Box::new(RestartPolicy::OnError) },
protocol: ProtocolField::Absent,
},
@ -164,6 +162,7 @@ struct DaemonInstance {
config_ds: Arc<Cap>,
log_ds: Arc<Cap>,
service: AnyValue,
name: tracing::Span,
cmd: process::Command,
announce_presumed_readiness: bool,
unready_configs: Arc<Field<isize>>,
@ -176,7 +175,7 @@ impl DaemonInstance {
fn handle_exit(self, t: &mut Activation, error_message: Option<String>) -> ActorResult {
let delay =
std::time::Duration::from_millis(if let None = error_message { 200 } else { 1000 });
t.stop_facet_and_continue(t.facet_id(), Some(move |t: &mut Activation| {
t.stop_facet_and_continue(t.facet.facet_id, Some(move |t: &mut Activation| {
#[derive(Debug)]
enum NextStep {
SleepAndRestart,
@ -199,7 +198,6 @@ impl DaemonInstance {
Err(s.as_str())?
}
},
RestartPolicy::Never => SignalSuccessfulCompletion,
};
match error_message {
@ -213,6 +211,7 @@ impl DaemonInstance {
t.facet(|t| {
let _ = t.prevent_inert_check();
counter::adjust(t, &self.completed_processes, 1);
counter::adjust(t, &self.unready_configs, -1);
Ok(())
})?;
()
@ -225,30 +224,25 @@ impl DaemonInstance {
fn log<R: 'static + Send + AsyncRead + Unpin>(
&self,
t: &mut Activation,
facet: FacetRef,
pid: Option<u32>,
r: R,
kind: &str
) -> ActorResult {
t.facet(|t| {
let facet = t.facet_ref();
let log_ds = self.log_ds.clone();
let service = self.service.clone();
let kind = AnyValue::symbol(kind);
let pid = match pid {
Some(n) => AnyValue::new(n),
None => AnyValue::symbol("unknown"),
};
let trace_collector = t.trace_collector();
t.linked_task(None, async move {
) {
let log_ds = self.log_ds.clone();
let service = self.service.clone();
let kind = AnyValue::symbol(kind);
let pid = match pid {
Some(n) => AnyValue::new(n),
None => AnyValue::symbol("unknown"),
};
t.spawn(syndicate::name!(parent: self.name.clone(), "log"), move |t| {
t.linked_task(tracing::Span::current(), async move {
let mut r = BufReader::new(r);
let cause = trace_collector.as_ref().map(
|_| trace::TurnCause::external(kind.value().as_symbol().unwrap()));
let account = Account::new(None, trace_collector);
loop {
let mut buf = Vec::new();
match r.read_until(b'\n', &mut buf).await {
Ok(0) | Err(_) => break,
Ok(_) => (),
if r.read_until(b'\n', &mut buf).await? == 0 {
return Ok(LinkedTaskTermination::Normal);
}
let buf = match std::str::from_utf8(&buf) {
Ok(s) => AnyValue::new(s),
@ -256,16 +250,17 @@ impl DaemonInstance {
};
let now = AnyValue::new(chrono::Utc::now().to_rfc3339());
if !facet.activate(
&account, cause.clone(), enclose!((pid, service, kind) |t| {
Account::new(tracing::Span::current()),
enclose!((pid, service, kind) |t| {
log_ds.message(t, &(), &syndicate_macros::template!(
"<log =now {
pid: =pid,
service: =service,
stream: =kind,
line: =buf,
}>"));
pid: =pid,
service: =service,
stream: =kind,
line: =buf,
}>"));
Ok(())
}))
})).is_success()
{
break;
}
@ -273,8 +268,7 @@ impl DaemonInstance {
Ok(LinkedTaskTermination::Normal)
});
Ok(())
})?;
Ok(())
});
}
fn start(mut self, t: &mut Activation) -> ActorResult {
@ -290,10 +284,10 @@ impl DaemonInstance {
let pid = child.id();
tracing::debug!(?pid, cmd = ?self.cmd, "started");
let facet = t.facet_ref();
let facet = t.facet.clone();
if let Some(r) = child.stderr.take() {
self.log(t, pid, r, "stderr")?;
self.log(t, facet.clone(), pid, r, "stderr");
}
match self.protocol {
@ -301,7 +295,7 @@ impl DaemonInstance {
Protocol::BinarySyndicate => self.relay_facet(t, &mut child, false)?,
Protocol::None => {
if let Some(r) = child.stdout.take() {
self.log(t, pid, r, "stdout")?;
self.log(t, facet.clone(), pid, r, "stdout");
}
}
}
@ -310,20 +304,16 @@ impl DaemonInstance {
counter::adjust(t, &self.unready_configs, -1);
}
let trace_collector = t.trace_collector();
t.linked_task(
Some(rec![AnyValue::symbol("wait"), self.service.clone()]),
syndicate::name!(parent: self.name.clone(), "wait"),
enclose!((facet) async move {
tracing::trace!("waiting for process exit");
let status = child.wait().await?;
tracing::debug!(?status);
let cause = trace_collector.as_ref().map(
|_| trace::TurnCause::external("instance-terminated"));
let account = Account::new(Some(AnyValue::symbol("instance-terminated")), trace_collector);
facet.activate(&account, cause, |t| {
facet.activate(Account::new(syndicate::name!("instance-terminated")), |t| {
let m = if status.success() { None } else { Some(format!("{}", status)) };
self.handle_exit(t, m)
});
}).ignore_termination()?;
Ok(LinkedTaskTermination::Normal)
}));
Ok(())
@ -388,10 +378,9 @@ fn run(
Ok(())
}))?;
let trace_collector = t.trace_collector();
enclose!((config_ds, unready_configs, completed_processes)
during!(t, config_ds.clone(), language(), <daemon #(&service.id) $config>, {
enclose!((spec, config_ds, root_ds, unready_configs, completed_processes, trace_collector)
enclose!((spec, config_ds, root_ds, unready_configs, completed_processes)
|t: &mut Activation| {
tracing::debug!(?config, "new config");
counter::adjust(t, &unready_configs, 1);
@ -401,8 +390,8 @@ fn run(
Ok(config) => {
tracing::info!(?config);
let config = config.elaborate();
let facet = t.facet_ref();
t.linked_task(Some(AnyValue::symbol("subprocess")), async move {
let facet = t.facet.clone();
t.linked_task(syndicate::name!("subprocess"), async move {
let mut cmd = config.process.build_command().ok_or("Cannot start daemon process")?;
let announce_presumed_readiness = match config.ready_on_start {
@ -443,6 +432,7 @@ fn run(
config_ds,
log_ds: root_ds,
service: spec,
name: tracing::Span::current(),
cmd,
announce_presumed_readiness,
unready_configs,
@ -451,12 +441,9 @@ fn run(
protocol,
};
let cause = trace_collector.as_ref().map(
|_| trace::TurnCause::external("instance-startup"));
let account = Account::new(Some(AnyValue::symbol("instance-startup")), trace_collector);
facet.activate(&account, cause, |t| {
facet.activate(Account::new(syndicate::name!("instance-startup")), |t| {
daemon_instance.start(t)
});
}).ignore_termination()?;
Ok(LinkedTaskTermination::KeepFacet)
});
Ok(())

View File

@ -1,12 +1,7 @@
use preserves_schema::Codec;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use crate::language::language;
use crate::lifecycle;
@ -15,10 +10,9 @@ use crate::schemas::internal_services::DebtReporter;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("debt_reporter_listener")), move |t| {
t.spawn(syndicate::name!("debt_reporter"), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: DebtReporter>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("debt_reporter"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec)));
t.spawn_link(tracing::Span::current(), enclose!((ds) |t| run(t, ds, spec)));
Ok(())
}))
});
@ -28,38 +22,10 @@ fn run(t: &mut Activation, ds: Arc<Cap>, spec: DebtReporter) -> ActorResult {
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
t.every(core::time::Duration::from_millis((spec.interval_seconds.0 * 1000.0) as u64), |_t| {
for (account_id, (name, debt)) in syndicate::actor::ACCOUNTS.read().iter() {
tracing::info!(account_id, ?name, debt = ?debt.load(Ordering::Relaxed));
for (id, (name, debt)) in syndicate::actor::ACCOUNTS.read().iter() {
let _enter = name.enter();
tracing::info!(id, debt = ?debt.load(std::sync::atomic::Ordering::Relaxed));
}
// let snapshot = syndicate::actor::ACTORS.read().clone();
// for (id, (name, ac_ref)) in snapshot.iter() {
// if *id == _t.state.actor_id {
// tracing::debug!("skipping report on the reporting actor, to avoid deadlock");
// continue;
// }
// tracing::trace!(?id, "about to lock");
// tracing::info_span!("actor", id, ?name).in_scope(|| match &*ac_ref.state.lock() {
// ActorState::Terminated { exit_status } =>
// tracing::info!(?exit_status, "terminated"),
// ActorState::Running(state) => {
// tracing::info!(field_count = ?state.fields.len(),
// outbound_assertion_count = ?state.outbound_assertions.len(),
// facet_count = ?state.facet_nodes.len());
// tracing::info_span!("facets").in_scope(|| {
// for (facet_id, f) in state.facet_nodes.iter() {
// tracing::info!(
// ?facet_id,
// parent_id = ?f.parent_facet_id,
// outbound_handle_count = ?f.outbound_handles.len(),
// linked_task_count = ?f.linked_tasks.len(),
// inert_check_preventers = ?f.inert_check_preventers.load(Ordering::Relaxed));
// }
// });
// }
// });
// }
Ok(())
})
}

View File

@ -1,39 +0,0 @@
use preserves_schema::Codec;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use crate::gatekeeper;
use crate::language::Language;
use crate::language::language;
use crate::lifecycle;
use crate::schemas::internal_services::Gatekeeper;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("gatekeeper_listener")), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: Gatekeeper::<AnyValue>>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("gatekeeper"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec)));
Ok(())
}))
});
}
fn run(t: &mut Activation, ds: Arc<Cap>, spec: Gatekeeper<AnyValue>) -> ActorResult {
let resolver = t.create(syndicate::entity(Arc::clone(&spec.bindspace))
.on_asserted_facet(gatekeeper::facet_handle_resolve));
ds.assert(t, language(), &syndicate::schemas::service::ServiceObject {
service_name: language().unparse(&spec),
object: AnyValue::domain(Cap::guard(Language::arc(), resolver)),
});
gatekeeper::handle_binds(t, &spec.bindspace)?;
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
}

View File

@ -1,348 +0,0 @@
use preserves_schema::Codec;
use std::convert::TryFrom;
use std::io::Read;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::error::Error;
use syndicate::preserves::rec;
use syndicate::preserves::value::Map;
use syndicate::preserves::value::NestedValue;
use syndicate::schemas::http;
use syndicate::value::signed_integer::SignedInteger;
use crate::language::language;
use crate::lifecycle;
use crate::schemas::internal_services::HttpRouter;
use crate::schemas::internal_services::HttpStaticFileServer;
use syndicate_macros::during;
lazy_static::lazy_static! {
pub static ref MIME_TABLE: Map<String, String> = load_mime_table("/etc/mime.types").unwrap_or_default();
}
pub fn load_mime_table(path: &str) -> Result<Map<String, String>, std::io::Error> {
let mut table = Map::new();
let file = std::fs::read_to_string(path)?;
for line in file.split('\n') {
if line.starts_with('#') {
continue;
}
let pieces = line.split(&[' ', '\t'][..]).collect::<Vec<&str>>();
for i in 1..pieces.len() {
table.insert(pieces[i].to_string(), pieces[0].to_string());
}
}
Ok(table)
}
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("http_router_listener")), move |t| {
enclose!((ds) during!(t, ds, language(), <run-service $spec: HttpRouter::<AnyValue>>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("http_router"), language().unparse(&spec)]),
enclose!((ds) |t| run(t, ds, spec)));
Ok(())
}));
enclose!((ds) during!(t, ds, language(), <run-service $spec: HttpStaticFileServer>, |t: &mut Activation| {
t.spawn_link(Some(rec![AnyValue::symbol("http_static_file_server"), language().unparse(&spec)]),
enclose!((ds) |t| run_static_file_server(t, ds, spec)));
Ok(())
}));
Ok(())
});
}
#[derive(Debug, Clone)]
struct ActiveHandler {
cap: Arc<Cap>,
terminated: Arc<Field<bool>>,
}
type MethodTable = Map<http::MethodPattern, Vec<ActiveHandler>>;
type HostTable = Map<http::HostPattern, Map<http::PathPattern, MethodTable>>;
type RoutingTable = Map<SignedInteger, HostTable>;
fn request_host(value: &http::RequestHost) -> Option<String> {
match value {
http::RequestHost::Present(h) => Some(h.to_owned()),
http::RequestHost::Absent => None,
}
}
fn run(t: &mut Activation, ds: Arc<Cap>, spec: HttpRouter) -> ActorResult {
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
let httpd = spec.httpd;
let routes: Arc<Field<RoutingTable>> = t.named_field("routes", Map::new());
enclose!((httpd, routes) during!(t, httpd, language(), <http-bind _ $port _ _ _>, |t: &mut Activation| {
let port1 = port.clone();
enclose!((httpd, routes) during!(t, httpd, language(), <http-listener #(&port1)>, enclose!((routes, port) |t: &mut Activation| {
let port2 = port.clone();
during!(t, httpd, language(), <http-bind $host #(&port2) $method $path $handler>, |t: &mut Activation| {
tracing::debug!("+HTTP binding {:?} {:?} {:?} {:?} {:?}", host, port, method, path, handler);
let port = port.value().to_signedinteger()?;
let host = language().parse::<http::HostPattern>(&host)?;
let path = language().parse::<http::PathPattern>(&path)?;
let method = language().parse::<http::MethodPattern>(&method)?;
let handler_cap = handler.value().to_embedded()?.clone();
let handler_terminated = t.named_field("handler-terminated", false);
t.get_mut(&routes)
.entry(port.clone()).or_default()
.entry(host.clone()).or_default()
.entry(path.clone()).or_default()
.entry(method.clone()).or_default()
.push(ActiveHandler {
cap: handler_cap.clone(),
terminated: handler_terminated,
});
t.on_stop(enclose!((routes, method, path, host, port) move |t| {
tracing::debug!("-HTTP binding {:?} {:?} {:?} {:?} {:?}", host, port, method, path, handler);
let port_map = t.get_mut(&routes);
let host_map = port_map.entry(port.clone()).or_default();
let path_map = host_map.entry(host.clone()).or_default();
let method_map = path_map.entry(path.clone()).or_default();
let handler_vec = method_map.entry(method.clone()).or_default();
let handler = {
let i = handler_vec.iter().position(|a| a.cap == handler_cap)
.expect("Expected an index of an active handler to remove");
handler_vec.swap_remove(i)
};
if handler_vec.is_empty() {
method_map.remove(&method);
}
if method_map.is_empty() {
path_map.remove(&path);
}
if path_map.is_empty() {
host_map.remove(&host);
}
if host_map.is_empty() {
port_map.remove(&port);
}
*t.get_mut(&handler.terminated) = true;
Ok(())
}));
Ok(())
});
Ok(())
})));
Ok(())
}));
during!(t, httpd, language(), <request $req $res>, |t: &mut Activation| {
let req = match language().parse::<http::HttpRequest>(&req) { Ok(v) => v, Err(_) => return Ok(()) };
let res = match res.value().to_embedded() { Ok(v) => v, Err(_) => return Ok(()) };
tracing::trace!("Looking up handler for {:#?} in {:#?}", &req, &t.get(&routes));
let host_map = match t.get(&routes).get(&req.port) {
Some(host_map) => host_map,
None => return send_empty(t, res, 404, "Not found"),
};
let methods = match request_host(&req.host).and_then(|h| try_hostname(host_map, http::HostPattern::Host(h), &req.path).transpose()).transpose()? {
Some(methods) => methods,
None => match try_hostname(host_map, http::HostPattern::Any, &req.path)? {
Some(methods) => methods,
None => return send_empty(t, res, 404, "Not found"),
}
};
let handlers = match methods.get(&http::MethodPattern::Specific(req.method.clone())) {
Some(handlers) => handlers,
None => match methods.get(&http::MethodPattern::Any) {
Some(handlers) => handlers,
None => {
let allowed = methods.keys().map(|k| match k {
http::MethodPattern::Specific(m) => m.to_uppercase(),
http::MethodPattern::Any => unreachable!(),
}).collect::<Vec<String>>().join(", ");
res.message(t, language(), &http::HttpResponse::Status {
code: 405.into(), message: "Method Not Allowed".into() });
res.message(t, language(), &http::HttpResponse::Header {
name: "allow".into(), value: allowed });
return send_done(t, res);
}
}
};
if handlers.len() > 1 {
tracing::warn!(?req, "Too many handlers available");
}
let ActiveHandler { cap, terminated } = handlers.first().expect("Nonempty handler set").clone();
tracing::trace!("Handler for {:?} is {:?}", &req, &cap);
t.dataflow(enclose!((terminated, req, res) move |t| {
if *t.get(&terminated) {
tracing::trace!("Handler for {:?} terminated", &req);
send_empty(t, &res, 500, "Internal Server Error")?;
}
Ok(())
}))?;
cap.assert(t, language(), &http::HttpContext { req, res: res.clone() });
Ok(())
});
Ok(())
}
fn send_done(t: &mut Activation, res: &Arc<Cap>) -> ActorResult {
res.message(t, language(), &http::HttpResponse::Done {
chunk: Box::new(http::Chunk::Bytes(vec![])) });
Ok(())
}
fn send_empty(t: &mut Activation, res: &Arc<Cap>, code: u16, message: &str) -> ActorResult {
res.message(t, language(), &http::HttpResponse::Status {
code: code.into(), message: message.into() });
send_done(t, res)
}
fn path_pattern_matches(path_pat: &http::PathPattern, path: &Vec<String>) -> bool {
let mut path_iter = path.iter();
for pat_elem in path_pat.0.iter() {
match pat_elem {
http::PathPatternElement::Label(v) => match path_iter.next() {
Some(path_elem) => {
if v != path_elem {
return false;
}
}
None => return false,
},
http::PathPatternElement::Wildcard => match path_iter.next() {
Some(_) => (),
None => return false,
},
http::PathPatternElement::Rest => return true,
}
}
match path_iter.next() {
Some(_more) => false,
None => true,
}
}
fn try_hostname<'table>(
host_map: &'table HostTable,
host_pat: http::HostPattern,
path: &Vec<String>,
) -> Result<Option<&'table MethodTable>, Error> {
match host_map.get(&host_pat) {
None => Ok(None),
Some(path_table) => {
for (path_pat, method_table) in path_table.iter() {
tracing::trace!("Checking path {:?} against pattern {:?}", &path, &path_pat);
if path_pattern_matches(path_pat, path) {
return Ok(Some(method_table));
}
}
Ok(None)
}
}
}
fn render_dir(path: std::path::PathBuf) -> Result<(Vec<u8>, Option<&'static str>), Error> {
let mut body = String::new();
for entry in std::fs::read_dir(&path)? {
if let Ok(entry) = entry {
let is_dir = entry.metadata().map(|m| m.is_dir()).unwrap_or(false);
let name = entry.file_name().to_string_lossy()
.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('\'', "&apos;")
.replace('"', "&quot;") + (if is_dir { "/" } else { "" });
body.push_str(&format!("<a href=\"{}\">{}</a><br>\n", name, name));
}
}
Ok((body.into_bytes(), Some("text/html")))
}
impl HttpStaticFileServer {
fn respond(&mut self, t: &mut Activation, req: &http::HttpRequest, res: &Arc<Cap>) -> ActorResult {
let path_prefix_elements = usize::try_from(&self.path_prefix_elements)
.map_err(|_| "Bad pathPrefixElements")?;
let mut is_index = false;
let mut path = req.path[path_prefix_elements..].iter().cloned().collect::<Vec<String>>();
if let Some(e) = path.last_mut() {
if e.len() == 0 {
*e = "index.html".into();
is_index = true;
}
}
let mut realpath = std::path::PathBuf::from(&self.dir);
for element in path.into_iter() {
if element.contains('/') || element.starts_with('.') { Err("Invalid path element")?; }
realpath.push(element);
}
let (body, mime_type) = match std::fs::File::open(&realpath) {
Err(_) => {
if is_index {
realpath.pop();
}
if std::fs::metadata(&realpath).is_ok_and(|m| m.is_dir()) {
render_dir(realpath)?
} else {
return send_empty(t, res, 404, "Not found")
}
},
Ok(mut fh) => {
if fh.metadata().is_ok_and(|m| m.is_dir()) {
drop(fh);
res.message(t, language(), &http::HttpResponse::Status {
code: 301.into(), message: "Moved permanently".into() });
res.message(t, language(), &http::HttpResponse::Header {
name: "location".into(), value: format!("/{}/", req.path.join("/")) });
return send_done(t, res);
} else {
let mut buf = Vec::new();
fh.read_to_end(&mut buf)?;
if let Some(extension) = realpath.extension().and_then(|e| e.to_str()) {
(buf, MIME_TABLE.get(extension).map(|m| m.as_str()))
} else {
(buf, None)
}
}
}
};
res.message(t, language(), &http::HttpResponse::Status {
code: 200.into(), message: "OK".into() });
if let Some(mime_type) = mime_type {
res.message(t, language(), &http::HttpResponse::Header {
name: "content-type".into(), value: mime_type.to_owned() });
}
res.message(t, language(), &http::HttpResponse::Done {
chunk: Box::new(http::Chunk::Bytes(body)) });
Ok(())
}
}
impl Entity<http::HttpContext<AnyValue>> for HttpStaticFileServer {
fn assert(&mut self, t: &mut Activation, assertion: http::HttpContext<AnyValue>, _handle: Handle) -> ActorResult {
let http::HttpContext { req, res } = assertion;
if let Err(e) = self.respond(t, &req, &res) {
tracing::error!(?req, error=?e);
send_empty(t, &res, 500, "Internal server error")?;
}
Ok(())
}
}
fn run_static_file_server(t: &mut Activation, ds: Arc<Cap>, spec: HttpStaticFileServer) -> ActorResult {
let object = Cap::guard(&language().syndicate, t.create(spec.clone()));
ds.assert(t, language(), &syndicate::schemas::service::ServiceObject {
service_name: language().unparse(&spec),
object: AnyValue::domain(object),
});
Ok(())
}

View File

@ -0,0 +1,37 @@
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use crate::language::language;
use crate::lifecycle;
use crate::schemas::internal_services::Milestone;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(syndicate::name!("milestone"), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: Milestone>, |t: &mut Activation| {
Supervisor::start(
t,
syndicate::name!(parent: None, "milestone", name = ?spec.name),
SupervisorConfiguration::default(),
|_, _| Ok(()),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
}))
});
}
fn run(
t: &mut Activation,
ds: Arc<Cap>,
spec: Milestone,
) -> ActorResult {
lifecycle::terminate_on_service_restart(t, &ds, &spec);
tracing::info!(milestone = ?spec.name, "entered");
ds.assert(t, language(), &lifecycle::started(&spec));
ds.assert(t, language(), &lifecycle::ready(&spec));
t.on_stop(move |_| { tracing::info!(milestone = ?spec.name, "exited"); Ok(()) });
Ok(())
}

View File

@ -1,7 +1,6 @@
pub mod config_watcher;
pub mod daemon;
pub mod debt_reporter;
pub mod gatekeeper;
pub mod http_router;
pub mod milestone;
pub mod tcp_relay_listener;
pub mod unix_relay_listener;

View File

@ -1,119 +1,62 @@
use preserves_schema::Codec;
use std::convert::TryFrom;
use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use tokio::net::TcpListener;
use crate::language::language;
use crate::lifecycle;
use crate::protocol::detect_protocol;
use crate::schemas::internal_services::TcpWithoutHttp;
use crate::schemas::internal_services::TcpRelayListener;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("tcp_relay_listener")), move |t| {
enclose!((ds) during!(t, ds, language(), <run-service $spec: TcpWithoutHttp::<AnyValue>>, |t| {
run_supervisor(t, ds.clone(), spec)
}));
Ok(())
t.spawn(syndicate::name!("tcp_relay_listener"), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: TcpRelayListener>, |t| {
Supervisor::start(
t,
syndicate::name!(parent: None, "relay", addr = ?spec),
SupervisorConfiguration::default(),
enclose!((ds, spec) lifecycle::updater(ds, spec)),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
}))
});
}
fn run_supervisor(t: &mut Activation, ds: Arc<Cap>, spec: TcpWithoutHttp) -> ActorResult {
Supervisor::start(
t,
Some(rec![AnyValue::symbol("relay"), language().unparse(&spec)]),
SupervisorConfiguration::default(),
enclose!((ds, spec) lifecycle::updater(ds, spec)),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
}
fn run(t: &mut Activation, ds: Arc<Cap>, spec: TcpWithoutHttp) -> ActorResult {
fn run(t: &mut Activation, ds: Arc<Cap>, spec: TcpRelayListener) -> ActorResult {
lifecycle::terminate_on_service_restart(t, &ds, &spec);
let httpd = t.named_field("httpd", None::<Arc<Cap>>);
{
let ad = spec.addr.clone();
let ad2 = ad.clone();
let gk = spec.gatekeeper.clone();
enclose!((ds, httpd) during!(t, ds, language(),
<run-service <relay-listener #(&language().unparse(&ad)) #(&AnyValue::domain(gk)) $h>>, |t: &mut Activation| {
if let Some(h) = h.value().as_embedded().cloned() {
h.assert(t, language(), &syndicate::schemas::http::HttpListener { port: ad2.port.clone() });
*t.get_mut(&httpd) = Some(h.clone());
t.on_stop(enclose!((httpd) move |t| {
let f = t.get_mut(&httpd);
if *f == Some(h.clone()) { *f = None; }
Ok(())
}));
}
Ok(())
}));
}
let TcpWithoutHttp { addr, gatekeeper } = spec.clone();
let host = addr.host.clone();
let port = u16::try_from(&addr.port).map_err(|_| "Invalid TCP port number")?;
let facet = t.facet_ref();
let trace_collector = t.trace_collector();
t.linked_task(Some(AnyValue::symbol("listener")), async move {
let host = spec.addr.host.clone();
let port = u16::try_from(&spec.addr.port).map_err(|_| "Invalid TCP port number")?;
let parent_span = tracing::Span::current();
let facet = t.facet.clone();
t.linked_task(syndicate::name!("listener"), async move {
let listen_addr = format!("{}:{}", host, port);
let listener = TcpListener::bind(listen_addr).await?;
{
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("readiness"));
let account = Account::new(Some(AnyValue::symbol("readiness")), trace_collector.clone());
if !facet.activate(
&account, cause, |t| {
tracing::info!("listening");
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
})
{
return Ok(LinkedTaskTermination::Normal);
}
}
if !facet.activate(Account::new(syndicate::name!("readiness")), |t| {
tracing::info!("listening");
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
}).is_success() { return Ok(LinkedTaskTermination::Normal); }
loop {
let (stream, addr) = listener.accept().await?;
let gatekeeper = gatekeeper.clone();
let name = Some(rec![AnyValue::symbol("tcp"), AnyValue::new(format!("{}", &addr))]);
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("connect"));
let account = Account::new(name.clone(), trace_collector.clone());
if !facet.activate(
&account, cause, enclose!((trace_collector, httpd) move |t| {
let httpd = t.get(&httpd).clone();
t.spawn(name, move |t| {
Ok(t.linked_task(None, {
let facet = t.facet_ref();
async move {
detect_protocol(trace_collector,
facet,
stream,
gatekeeper,
httpd,
addr,
port).await?;
Ok(LinkedTaskTermination::KeepFacet)
}
}))
});
Ok(())
}))
{
return Ok(LinkedTaskTermination::Normal);
}
let gatekeeper = spec.gatekeeper.clone();
let name = syndicate::name!(parent: parent_span.clone(), "conn");
if !facet.activate(Account::new(name.clone()), move |t| {
t.spawn(name, move |t| {
Ok(t.linked_task(tracing::Span::current(), {
let facet = t.facet.clone();
async move {
detect_protocol(facet, stream, gatekeeper, addr).await?;
Ok(LinkedTaskTermination::KeepFacet)
}
}))
});
Ok(())
}).is_success() { return Ok(LinkedTaskTermination::Normal); }
}
});
Ok(())

View File

@ -1,5 +1,3 @@
use preserves_schema::Codec;
use std::io;
use std::path::PathBuf;
use std::sync::Arc;
@ -7,11 +5,8 @@ use std::sync::Arc;
use syndicate::actor::*;
use syndicate::enclose;
use syndicate::error::Error;
use syndicate::preserves::rec;
use syndicate::preserves::value::NestedValue;
use syndicate::relay;
use syndicate::supervise::{Supervisor, SupervisorConfiguration};
use syndicate::trace;
use tokio::net::UnixListener;
use tokio::net::UnixStream;
@ -24,11 +19,11 @@ use crate::schemas::internal_services::UnixRelayListener;
use syndicate_macros::during;
pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
t.spawn(Some(AnyValue::symbol("unix_relay_listener")), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: UnixRelayListener::<AnyValue>>, |t| {
t.spawn(syndicate::name!("unix_relay_listener"), move |t| {
Ok(during!(t, ds, language(), <run-service $spec: UnixRelayListener>, |t| {
Supervisor::start(
t,
Some(rec![AnyValue::symbol("relay"), language().unparse(&spec)]),
syndicate::name!(parent: None, "relay", addr = ?spec),
SupervisorConfiguration::default(),
enclose!((ds, spec) lifecycle::updater(ds, spec)),
enclose!((ds) move |t| enclose!((ds, spec) run(t, ds, spec))))
@ -39,56 +34,39 @@ pub fn on_demand(t: &mut Activation, ds: Arc<Cap>) {
fn run(t: &mut Activation, ds: Arc<Cap>, spec: UnixRelayListener) -> ActorResult {
lifecycle::terminate_on_service_restart(t, &ds, &spec);
let path_str = spec.addr.path.clone();
let facet = t.facet_ref();
let trace_collector = t.trace_collector();
t.linked_task(Some(AnyValue::symbol("listener")), async move {
let parent_span = tracing::Span::current();
let facet = t.facet.clone();
t.linked_task(syndicate::name!("listener"), async move {
let listener = bind_unix_listener(&PathBuf::from(path_str)).await?;
{
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("readiness"));
let account = Account::new(Some(AnyValue::symbol("readiness")), trace_collector.clone());
if !facet.activate(
&account, cause, |t| {
tracing::info!("listening");
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
})
{
return Ok(LinkedTaskTermination::Normal);
}
}
if !facet.activate(Account::new(syndicate::name!("readiness")), |t| {
tracing::info!("listening");
ds.assert(t, language(), &lifecycle::ready(&spec));
Ok(())
}).is_success() { return Ok(LinkedTaskTermination::Normal); }
loop {
let (stream, _addr) = listener.accept().await?;
let peer = stream.peer_cred()?;
let gatekeeper = spec.gatekeeper.clone();
let name = Some(rec![AnyValue::symbol("unix"),
AnyValue::new(peer.pid().unwrap_or(-1)),
AnyValue::new(peer.uid())]);
let cause = trace_collector.as_ref().map(|_| trace::TurnCause::external("connect"));
let account = Account::new(name.clone(), trace_collector.clone());
if !facet.activate(
&account, cause, enclose!((trace_collector) move |t| {
t.spawn(name, |t| {
Ok(t.linked_task(None, {
let facet = t.facet_ref();
async move {
tracing::info!(protocol = %"unix");
let (i, o) = stream.into_split();
run_connection(trace_collector,
facet,
relay::Input::Bytes(Box::pin(i)),
relay::Output::Bytes(Box::pin(o)),
gatekeeper);
Ok(LinkedTaskTermination::KeepFacet)
}
}))
});
Ok(())
}))
{
return Ok(LinkedTaskTermination::Normal);
}
let name = syndicate::name!(parent: parent_span.clone(), "conn",
pid = ?peer.pid().unwrap_or(-1),
uid = peer.uid());
if !facet.activate(Account::new(name.clone()), move |t| {
t.spawn(name, |t| {
Ok(t.linked_task(tracing::Span::current(), {
let facet = t.facet.clone();
async move {
tracing::info!(protocol = %"unix");
let (i, o) = stream.into_split();
run_connection(facet,
relay::Input::Bytes(Box::pin(i)),
relay::Output::Bytes(Box::pin(o)),
gatekeeper)?;
Ok(LinkedTaskTermination::KeepFacet)
}
}))
});
Ok(())
}).is_success() { return Ok(LinkedTaskTermination::Normal); }
}
});
Ok(())

View File

@ -1,23 +0,0 @@
[package]
name = "syndicate-tools"
version = "0.18.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
description = "Syndicate command-line utilities."
homepage = "https://syndicate-lang.org/"
repository = "https://git.syndicate-lang.org/syndicate-lang/syndicate-rs"
license = "Apache-2.0"
[dependencies]
preserves = "4.995"
syndicate = { path = "../syndicate", version = "0.40.0"}
clap = { version = "^4.0", features = ["derive"] }
clap_complete = "^4.0"
noise-protocol = "0.1"
noise-rust-crypto = "0.5"
[package.metadata.workspaces]
independent = true

View File

@ -1,168 +0,0 @@
use std::io;
use std::str::FromStr;
use clap::ArgGroup;
use clap::CommandFactory;
use clap::Parser;
use clap::Subcommand;
use clap::arg;
use clap_complete::{generate, Shell};
use noise_protocol::DH;
use noise_protocol::Hash;
use noise_rust_crypto::Blake2s;
use noise_rust_crypto::X25519;
use preserves::hex::HexParser;
use preserves::value::BytesBinarySource;
use preserves::value::NestedValue;
use preserves::value::NoEmbeddedDomainCodec;
use preserves::value::Reader;
use preserves::value::TextReader;
use preserves::value::ViaCodec;
use preserves::value::TextWriter;
use syndicate::language;
use syndicate::preserves_schema::Codec;
use syndicate::preserves_schema::ParseError;
use syndicate::schemas::noise;
use syndicate::sturdy::Caveat;
use syndicate::sturdy::SturdyRef;
use syndicate::sturdy::_Any;
#[derive(Clone, Debug)]
struct Preserves<N: NestedValue>(N);
#[derive(Subcommand, Debug)]
enum Action {
#[command(group(ArgGroup::new("key").required(true)))]
/// Generate a fresh SturdyRef from an OID value and a key
Mint {
#[arg(long, value_name="VALUE")]
/// Preserves value to use as SturdyRef OID
oid: Preserves<_Any>,
#[arg(long, group="key")]
/// Key phrase
phrase: Option<String>,
#[arg(long, group="key")]
/// Key bytes, encoded as hex
hex: Option<String>,
#[arg(long)]
/// Caveats to add
caveat: Vec<Preserves<_Any>>,
},
#[command(group(ArgGroup::new("key").required(true)))]
/// Generate a fresh NoiseServiceSpec from a service selector and a key
Noise {
#[arg(long, value_name="VALUE")]
/// Preserves value to use as the service selector
service: Preserves<_Any>,
#[arg(long, value_name="PROTOCOL")]
/// Noise handshake protocol name
protocol: Option<String>,
#[arg(long, group="key")]
/// Key phrase
phrase: Option<String>,
#[arg(long, group="key")]
/// Key bytes, encoded as hex
hex: Option<String>,
#[arg(long, group="key")]
/// Generate a random key
random: bool,
},
/// Emit shell completion code
Completions {
/// Shell dialect to generate
shell: Shell,
}
}
#[derive(Parser, Debug)]
#[command(version)]
struct Cli {
#[command(subcommand)]
action: Action,
}
impl<N: NestedValue> FromStr for Preserves<N> {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Preserves(TextReader::new(&mut BytesBinarySource::new(s.as_bytes()),
ViaCodec::new(NoEmbeddedDomainCodec)).demand_next(false)?))
}
}
fn main() -> io::Result<()> {
let args = <Cli as Parser>::parse();
match args.action {
Action::Completions { shell } => {
let mut cmd = <Cli as CommandFactory>::command();
let name = cmd.get_name().to_string();
generate(shell, &mut cmd, name, &mut io::stdout());
}
Action::Noise { service, protocol, phrase, hex, random } => {
let key =
if random {
X25519::genkey()
} else if let Some(hex) = hex {
let mut hash = Blake2s::default();
hash.input(hex.as_bytes());
hash.result()
} else if let Some(phrase) = phrase {
let mut hash = Blake2s::default();
hash.input(phrase.as_bytes());
hash.result()
} else {
unreachable!()
};
let n = noise::NoiseServiceSpec {
base: noise::NoiseSpec {
key: X25519::pubkey(&key).to_vec(),
service: noise::ServiceSelector(service.0),
pre_shared_keys: noise::NoisePreSharedKeys::Absent,
protocol: if let Some(p) = protocol {
noise::NoiseProtocol::Present { protocol: p }
} else {
noise::NoiseProtocol::Absent
},
},
secret_key: noise::SecretKeyField::Present {
secret_key: key.to_vec(),
},
};
println!("{}", TextWriter::encode(&mut NoEmbeddedDomainCodec,
&language().unparse(&n))?);
}
Action::Mint { oid, phrase, hex, caveat: caveats } => {
let key =
if let Some(hex) = hex {
HexParser::Liberal.decode(&hex).expect("hex encoded sturdyref")
} else if let Some(phrase) = phrase {
phrase.as_bytes().to_owned()
} else {
unreachable!()
};
let attenuation = caveats.into_iter().map(|c| {
let r = language().parse(&c.0);
if let Ok(Caveat::Unknown(_)) = &r {
eprintln!("Warning: Unknown caveat format: {:?}", &c.0);
}
r
}).collect::<Result<Vec<Caveat>, _>>()?;
let m = SturdyRef::mint(oid.0, &key).attenuate(&attenuation)?;
println!("{}", TextWriter::encode(&mut NoEmbeddedDomainCodec,
&language().unparse(&m))?);
}
}
Ok(())
}

View File

@ -1,6 +1,6 @@
[package]
name = "syndicate"
version = "0.40.0"
version = "0.20.0"
authors = ["Tony Garnock-Jones <tonyg@leastfixedpoint.com>"]
edition = "2018"
@ -13,23 +13,23 @@ license = "Apache-2.0"
vendored-openssl = ["openssl/vendored"]
[build-dependencies]
preserves-schema = "5.995"
preserves-schema = "^2"
[dependencies]
preserves = "4.995"
preserves-schema = "5.995"
preserves = "^2"
preserves-schema = "^2"
tokio = { version = "1.10", features = ["io-std", "io-util", "macros", "rt", "rt-multi-thread", "time"] }
tokio = { version = "1.10", features = ["io-util", "macros", "rt", "rt-multi-thread", "time"] }
tokio-util = "0.6"
bytes = "1.0"
futures = "0.3"
blake2 = "0.10"
getrandom = "0.2"
hmac = "0.12"
hmac = "0.11"
lazy_static = "1.4"
parking_lot = "0.11"
sha2 = "0.9"
tracing = "0.1"
tracing-subscriber = "0.2"
@ -48,6 +48,3 @@ harness = false
[[bench]]
name = "ring"
harness = false
[package.metadata.workspaces]
independent = true

View File

@ -9,9 +9,9 @@ use syndicate::language;
use syndicate::actor::*;
use syndicate::during::entity;
use syndicate::dataspace::Dataspace;
use syndicate::enclose;
use syndicate::schemas::dataspace::Observe;
use syndicate::schemas::dataspace_patterns as p;
use syndicate::value::Map;
use syndicate::value::NestedValue;
use syndicate::value::Value;
@ -52,18 +52,24 @@ pub fn bench_pub(c: &mut Criterion) {
b.iter_custom(|iters| {
let start = Instant::now();
rt.block_on(async move {
Actor::top(None, move |t| {
let _ = t.prevent_inert_check();
// The reason this works is that all the messages to `ds` will be delivered
// before the message to `shutdown`, because `ds` and `shutdown` are in the
// same Actor.
let ds = t.create(Dataspace::new(None));
Actor::new(None).boot(syndicate::name!("dataspace"), move |t| {
let ds = t.create(Dataspace::new());
let shutdown = t.create(ShutdownEntity);
for _ in 0..iters {
t.message(&ds, says(AnyValue::new("bench_pub"),
Value::ByteString(vec![]).wrap()));
}
t.message(&shutdown, AnyValue::new(true));
let account = Account::new(syndicate::name!("sender-account"));
t.linked_task(syndicate::name!("sender"), async move {
for _ in 0..iters {
external_event(&ds.mailbox, &account, Box::new(
enclose!((ds) move |t| t.with_entity(
&ds,
|t, e| e.message(t, says(AnyValue::new("bench_pub"),
Value::ByteString(vec![]).wrap()))))))?
}
external_event(&shutdown.mailbox, &account, Box::new(
enclose!((shutdown) move |t| t.with_entity(
&shutdown,
|t, e| e.message(t, AnyValue::new(true))))))?;
Ok(LinkedTaskTermination::KeepFacet)
});
Ok(())
}).await.unwrap().unwrap();
});
@ -77,27 +83,27 @@ pub fn bench_pub(c: &mut Criterion) {
rt.block_on(async move {
let turn_count = Arc::new(AtomicU64::new(0));
Actor::top(None, {
Actor::new(None).boot(syndicate::name!("dataspace"), {
let iters = iters.clone();
let turn_count = Arc::clone(&turn_count);
move |t| {
let ds = Cap::new(&t.create(Dataspace::new(None)));
let ds = Cap::new(&t.create(Dataspace::new()));
let shutdown = entity(())
.on_asserted(|_, _, _| Ok(Some(Box::new(|_, t| Ok(t.stop())))))
.create_cap(t);
ds.assert(t, language(), &Observe {
pattern: p::Pattern::Bind {
pattern: Box::new(p::Pattern::Lit {
value: Box::new(p::AnyAtom::Symbol("consumer".to_owned())),
}),
},
pattern: p::Pattern::DBind(Box::new(p::DBind {
pattern: p::Pattern::DLit(Box::new(p::DLit {
value: p::AnyAtom::Symbol("consumer".to_owned()),
})),
})),
observer: shutdown,
});
t.spawn(Some(AnyValue::symbol("consumer")), move |t| {
t.spawn(syndicate::name!("consumer"), move |t| {
struct Receiver(Arc<AtomicU64>);
impl Entity<AnyValue> for Receiver {
fn message(&mut self, _t: &mut Activation, _m: AnyValue) -> ActorResult {
@ -111,39 +117,46 @@ pub fn bench_pub(c: &mut Criterion) {
ds.assert(t, &(), &AnyValue::symbol("consumer"));
ds.assert(t, language(), &Observe {
pattern: p::Pattern::Group {
type_: Box::new(p::GroupType::Rec {
label: AnyValue::symbol("Says"),
}),
entries: Map::from([
(p::_Any::new(0), p::Pattern::Lit {
value: Box::new(p::AnyAtom::String("bench_pub".to_owned())),
}),
(p::_Any::new(1), p::Pattern::Bind {
pattern: Box::new(p::Pattern::Discard),
}),
]),
},
pattern: p::Pattern::DCompound(Box::new(p::DCompound::Rec {
label: AnyValue::symbol("Says"),
fields: vec![
p::Pattern::DLit(Box::new(p::DLit {
value: p::AnyAtom::String("bench_pub".to_owned()),
})),
p::Pattern::DBind(Box::new(p::DBind {
pattern: p::Pattern::DDiscard(Box::new(p::DDiscard)),
})),
]})),
observer: receiver,
});
ds.assert(t, language(), &Observe {
pattern: p::Pattern::Bind {
pattern: Box::new(p::Pattern::Lit {
value: Box::new(p::AnyAtom::Bool(true)),
}),
},
pattern: p::Pattern::DBind(Box::new(p::DBind {
pattern: p::Pattern::DLit(Box::new(p::DLit {
value: p::AnyAtom::Bool(true),
})),
})),
observer: shutdown,
});
t.after(core::time::Duration::from_secs(0), move |t| {
let account = Arc::clone(t.account());
t.linked_task(syndicate::name!("sender"), async move {
for _i in 0..iters {
ds.message(t, &(), &says(AnyValue::new("bench_pub"),
Value::ByteString(vec![]).wrap()));
let ds = Arc::clone(&ds);
external_event(&Arc::clone(&ds.underlying.mailbox), &account, Box::new(
move |t| t.with_entity(
&ds.underlying,
|t, e| e.message(t, says(AnyValue::new("bench_pub"),
Value::ByteString(vec![]).wrap())))))?
}
ds.message(t, &(), &AnyValue::new(true));
Ok(())
{
let ds = Arc::clone(&ds);
external_event(&Arc::clone(&ds.underlying.mailbox), &account, Box::new(
move |t| t.with_entity(
&ds.underlying,
|t, e| e.message(t, AnyValue::new(true)))))?;
}
Ok(LinkedTaskTermination::KeepFacet)
});
Ok(())
});
Ok(())

View File

@ -7,8 +7,6 @@ use std::time::Duration;
use std::time::Instant;
use syndicate::actor::*;
use syndicate::preserves::rec;
use syndicate::value::NestedValue;
use tokio::runtime::Runtime;
@ -90,16 +88,14 @@ pub fn bench_ring(c: &mut Criterion) {
self.i += 1;
let spawner_ref = Arc::clone(&self.self_ref);
ACTORS_CREATED.fetch_add(1, Ordering::Relaxed);
t.spawn(
Some(rec![AnyValue::symbol("forwarder"), AnyValue::new(i)]),
move |t| {
let _ = t.prevent_inert_check();
let f = t.create(Forwarder {
next,
});
t.message(&spawner_ref, f);
Ok(())
t.spawn(syndicate::name!("forwarder", ?i), move |t| {
let _ = t.prevent_inert_check();
let f = t.create(Forwarder {
next,
});
t.message(&spawner_ref, f);
Ok(())
});
} else {
let mut c_state = Counter {
start: Instant::now(),
@ -122,7 +118,7 @@ pub fn bench_ring(c: &mut Criterion) {
}
ACTORS_CREATED.fetch_add(1, Ordering::Relaxed);
Actor::top(None, move |t| {
Actor::new(None).boot(syndicate::name!("counter"), move |t| {
let _ = t.prevent_inert_check();
let mut s = Spawner {
self_ref: t.create_inert(),

View File

@ -26,11 +26,11 @@ fn main() -> std::io::Result<()> {
let mut gen_dir = buildroot.clone();
gen_dir.push("src/schemas");
let mut c = CompilerConfig::new("crate::schemas".to_owned());
let mut c = CompilerConfig::new(gen_dir, "crate::schemas".to_owned());
c.plugins.push(Box::new(syndicate_plugins::PatternPlugin));
c.add_external_module(ExternalModule::new(vec!["EntityRef".to_owned()], "crate::actor"));
let inputs = expand_inputs(&vec!["protocols/schema-bundle.bin".to_owned()])?;
c.load_schemas_and_bundles(&inputs, &vec![])?;
compile(&c, &mut CodeCollector::files(gen_dir))
c.load_schemas_and_bundles(&inputs)?;
compile(&c)
}

View File

@ -1,44 +1,17 @@
´³bundle·µ³tcp„´³schema·³version°³ definitions·³TcpLocal´³rec´³lit³ tcp-local„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpRemote´³rec´³lit³
tcp-remote„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpPeerInfo´³rec´³lit³tcp-peer„´³tupleµ´³named³handle´³embedded³any„„´³named³local´³refµ„³TcpLocal„„´³named³remote´³refµ„³ TcpRemote„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³http„´³schema·³version°³ definitions·³Chunk´³orµµ±string´³atom³String„„µ±bytes´³atom³
ByteString„„„„³Headers´³dictof´³atom³Symbol„´³atom³String„„³MimeType´³atom³Symbol„³
QueryValue´³orµµ±string´³atom³String„„µ±file´³rec´³lit³file„´³tupleµ´³named³filename´³atom³String„„´³named³headers´³refµ„³Headers„„´³named³body´³atom³
ByteString„„„„„„„„³ HostPattern´³orµµ±host´³atom³String„„µ±any´³lit€„„„„³ HttpBinding´³rec´³lit³ http-bind„´³tupleµ´³named³host´³refµ„³ HostPattern„„´³named³port´³atom³ SignedInteger„„´³named³method´³refµ„³ MethodPattern„„´³named³path´³refµ„³ PathPattern„„´³named³handler´³embedded´³refµ„³ HttpRequest„„„„„„³ HttpContext´³rec´³lit³request„´³tupleµ´³named³req´³refµ„³ HttpRequest„„´³named³res´³embedded´³refµ„³ HttpResponse„„„„„„³ HttpRequest´³rec´³lit³ http-request„´³tupleµ´³named³sequenceNumber´³atom³ SignedInteger„„´³named³host´³refµ„³ RequestHost„„´³named³port´³atom³ SignedInteger„„´³named³method´³atom³Symbol„„´³named³path´³seqof´³atom³String„„„´³named³headers´³refµ„³Headers„„´³named³query´³dictof´³atom³Symbol„´³seqof´³refµ„³
QueryValue„„„„´³named³body´³refµ„³ RequestBody„„„„„³ HttpService´³rec´³lit³ http-service„´³tupleµ´³named³host´³refµ„³ HostPattern„„´³named³port´³atom³ SignedInteger„„´³named³method´³refµ„³ MethodPattern„„´³named³path´³refµ„³ PathPattern„„„„„³ PathPattern´³seqof´³refµ„³PathPatternElement„„³ RequestBody´³orµµ±present´³atom³
ByteString„„µ±absent´³lit€„„„„³ RequestHost´³orµµ±present´³atom³String„„µ±absent´³lit€„„„„³ HttpListener´³rec´³lit³ http-listener„´³tupleµ´³named³port´³atom³ SignedInteger„„„„„³ HttpResponse´³orµµ±status´³rec´³lit³status„´³tupleµ´³named³code´³atom³ SignedInteger„„´³named³message´³atom³String„„„„„„µ±header´³rec´³lit³header„´³tupleµ´³named³name´³atom³Symbol„„´³named³value´³atom³String„„„„„„µ±chunk´³rec´³lit³chunk„´³tupleµ´³named³chunk´³refµ„³Chunk„„„„„„µ±done´³rec´³lit³done„´³tupleµ´³named³chunk´³refµ„³Chunk„„„„„„„„³ MethodPattern´³orµµ±any´³lit€„„µ±specific´³atom³Symbol„„„„³PathPatternElement´³orµµ±label´³atom³String„„µ±wildcard´³lit³_„„µ±rest´³lit³...„„„„„³ embeddedType€„„µ³noise„´³schema·³version°³ definitions·³Packet´³orµµ±complete´³atom³
ByteString„„µ±
fragmented´³seqof´³atom³
ByteString„„„„„³ Initiator´³rec´³lit³ initiator„´³tupleµ´³named³initiatorSession´³embedded´³refµ„³Packet„„„„„„³ NoiseSpec´³andµ´³dict·³key´³named³key´³atom³
ByteString„„³service´³named³service´³refµ„³ServiceSelector„„„„´³named³protocol´³refµ„³ NoiseProtocol„„´³named³ preSharedKeys´³refµ„³NoisePreSharedKeys„„„„³ SessionItem´³orµµ± Initiator´³refµ„³ Initiator„„µ±Packet´³refµ„³Packet„„„„³ NoiseProtocol´³orµµ±present´³dict·³protocol´³named³protocol´³atom³String„„„„„µ±invalid´³dict·³protocol´³named³protocol³any„„„„µ±absent´³dict·„„„„„³ NoiseStepType´³lit³noise„³SecretKeyField´³orµµ±present´³dict·³ secretKey´³named³ secretKey´³atom³
ByteString„„„„„µ±invalid´³dict·³ secretKey´³named³ secretKey³any„„„„µ±absent´³dict·„„„„„³DefaultProtocol´³lit±!Noise_NK_25519_ChaChaPoly_BLAKE2s„³NoiseStepDetail´³refµ„³ServiceSelector„³ServiceSelector³any³NoiseServiceSpec´³andµ´³named³base´³refµ„³ NoiseSpec„„´³named³ secretKey´³refµ„³SecretKeyField„„„„³NoisePreSharedKeys´³orµµ±present´³dict·³ preSharedKeys´³named³ preSharedKeys´³seqof´³atom³
ByteString„„„„„„µ±invalid´³dict·³ preSharedKeys´³named³ preSharedKeys³any„„„„µ±absent´³dict·„„„„„³NoisePathStepDetail´³refµ„³ NoiseSpec„³NoiseDescriptionDetail´³refµ„³NoiseServiceSpec„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³timer„´³schema·³version°³ definitions·³SetTimer´³rec´³lit³ set-timer„´³tupleµ´³named³label³any„´³named³seconds´³atom³Double„„´³named³kind´³refµ„³ TimerKind„„„„„³ LaterThan´³rec´³lit³
later-than„´³tupleµ´³named³seconds´³atom³Double„„„„„³ TimerKind´³orµµ±relative´³lit³relative„„µ±absolute´³lit³absolute„„µ±clear´³lit³clear„„„„³ TimerExpired´³rec´³lit³ timer-expired„´³tupleµ´³named³label³any„´³named³seconds´³atom³Double„„„„„„³ embeddedType€„„µ³trace„´³schema·³version°³ definitions·³Oid³any³Name´³orµµ± anonymous´³rec´³lit³ anonymous„´³tupleµ„„„„µ±named´³rec´³lit³named„´³tupleµ´³named³name³any„„„„„„„³Target´³rec´³lit³entity„´³tupleµ´³named³actor´³refµ„³ActorId„„´³named³facet´³refµ„³FacetId„„´³named³oid´³refµ„³Oid„„„„„³TaskId³any³TurnId³any³ActorId³any³FacetId³any³ TurnCause´³orµµ±turn´³rec´³lit³ caused-by„´³tupleµ´³named³id´³refµ„³TurnId„„„„„„µ±cleanup´³rec´³lit³cleanup„´³tupleµ„„„„µ±linkedTaskRelease´³rec´³lit³linked-task-release„´³tupleµ´³named³id´³refµ„³TaskId„„´³named³reason´³refµ„³LinkedTaskReleaseReason„„„„„„µ±periodicActivation´³rec´³lit³periodic-activation„´³tupleµ´³named³period´³atom³Double„„„„„„µ±delay´³rec´³lit³delay„´³tupleµ´³named³ causingTurn´³refµ„³TurnId„„´³named³amount´³atom³Double„„„„„„µ±external´³rec´³lit³external„´³tupleµ´³named³ description³any„„„„„„„³ TurnEvent´³orµµ±assert´³rec´³lit³assert„´³tupleµ´³named³ assertion´³refµ„³AssertionDescription„„´³named³handle´³refµ³protocol„³Handle„„„„„„µ±retract´³rec´³lit³retract„´³tupleµ´³named³handle´³refµ³protocol„³Handle„„„„„„µ±message´³rec´³lit³message„´³tupleµ´³named³body´³refµ„³AssertionDescription„„„„„„µ±sync´³rec´³lit³sync„´³tupleµ´³named³peer´³refµ„³Target„„„„„„µ± breakLink´³rec´³lit³
break-link„´³tupleµ´³named³source´³refµ„³ActorId„„´³named³handle´³refµ³protocol„³Handle„„„„„„„„³
ExitStatus´³orµµ±ok´³lit³ok„„µ±Error´³refµ³protocol„³Error„„„„³
TraceEntry´³rec´³lit³trace„´³tupleµ´³named³ timestamp´³atom³Double„„´³named³actor´³refµ„³ActorId„„´³named³item´³refµ„³ActorActivation„„„„„³ActorActivation´³orµµ±start´³rec´³lit³start„´³tupleµ´³named³ actorName´³refµ„³Name„„„„„„µ±turn´³refµ„³TurnDescription„„µ±stop´³rec´³lit³stop„´³tupleµ´³named³status´³refµ„³
ExitStatus„„„„„„„„³FacetStopReason´³orµµ±explicitAction´³lit³explicit-action„„µ±inert´³lit³inert„„µ±parentStopping´³lit³parent-stopping„„µ± actorStopping´³lit³actor-stopping„„„„³TurnDescription´³rec´³lit³turn„´³tupleµ´³named³id´³refµ„³TurnId„„´³named³cause´³refµ„³ TurnCause„„´³named³actions´³seqof´³refµ„³ActionDescription„„„„„„³ActionDescription´³orµµ±dequeue´³rec´³lit³dequeue„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±enqueue´³rec´³lit³enqueue„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±dequeueInternal´³rec´³lit³dequeue-internal„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±enqueueInternal´³rec´³lit³enqueue-internal„´³tupleµ´³named³event´³refµ„³TargetedTurnEvent„„„„„„µ±spawn´³rec´³lit³spawn„´³tupleµ´³named³link´³atom³Boolean„„´³named³id´³refµ„³ActorId„„„„„„µ±link´³rec´³lit³link„´³tupleµ´³named³ parentActor´³refµ„³ActorId„„´³named³ childToParent´³refµ³protocol„³Handle„„´³named³
childActor´³refµ„³ActorId„„´³named³ parentToChild´³refµ³protocol„³Handle„„„„„„µ±
facetStart´³rec´³lit³ facet-start„´³tupleµ´³named³path´³seqof´³refµ„³FacetId„„„„„„„µ± facetStop´³rec´³lit³
facet-stop„´³tupleµ´³named³path´³seqof´³refµ„³FacetId„„„´³named³reason´³refµ„³FacetStopReason„„„„„„µ±linkedTaskStart´³rec´³lit³linked-task-start„´³tupleµ´³named³taskName´³refµ„³Name„„´³named³id´³refµ„³TaskId„„„„„„„„³TargetedTurnEvent´³rec´³lit³event„´³tupleµ´³named³target´³refµ„³Target„„´³named³detail´³refµ„³ TurnEvent„„„„„³AssertionDescription´³orµµ±value´³rec´³lit³value„´³tupleµ´³named³value³any„„„„„µ±opaque´³rec´³lit³opaque„´³tupleµ´³named³ description³any„„„„„„„³LinkedTaskReleaseReason´³orµµ± cancelled´³lit³ cancelled„„µ±normal´³lit³normal„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³stdenv„´³schema·³version°³ definitions·³ StandardRoute´³orµµ±standard´³ tuplePrefixµ´³named³
transports´³seqof´³refµ„³StandardTransport„„„´³named³key´³atom³
ByteString„„´³named³service³any„´³named³sig´³atom³
ByteString„„´³named³oid³any„„´³named³caveats´³seqof´³refµ³sturdy„³Caveat„„„„„µ±general´³refµ³
gatekeeper„³Route„„„„³StandardTransport´³orµµ±wsUrl´³atom³String„„µ±other³any„„„„³ embeddedType€„„µ³stream„´³schema·³version°³ definitions·³Mode´³orµµ±bytes´³lit³bytes„„µ±lines´³refµ„³LineMode„„µ±packet´³rec´³lit³packet„´³tupleµ´³named³size´³atom³ SignedInteger„„„„„„µ±object´³rec´³lit³object„´³tupleµ´³named³ description³any„„„„„„„³Sink´³orµµ±source´³rec´³lit³source„´³tupleµ´³named³
´³bundle·µ³tcp„´³schema·³version³ definitions·³TcpLocal´³rec´³lit³ tcp-local„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpRemote´³rec´³lit³
tcp-remote„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³ TcpPeerInfo´³rec´³lit³tcp-peer„´³tupleµ´³named³handle´³embedded³any„„´³named³local´³refµ„³TcpLocal„„´³named³remote´³refµ„³ TcpRemote„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³timer„´³schema·³version³ definitions·³SetTimer´³rec´³lit³ set-timer„´³tupleµ´³named³label³any„´³named³msecs´³atom³Double„„´³named³kind´³refµ„³ TimerKind„„„„„³ LaterThan´³rec´³lit³
later-than„´³tupleµ´³named³msecs´³atom³Double„„„„„³ TimerKind´³orµµ±relative´³lit³relative„„µ±absolute´³lit³absolute„„µ±clear´³lit³clear„„„„³ TimerExpired´³rec´³lit³ timer-expired„´³tupleµ´³named³label³any„´³named³msecs´³atom³Double„„„„„„³ embeddedType€„„µ³stream„´³schema·³version³ definitions·³Mode´³orµµ±bytes´³lit³bytes„„µ±lines´³refµ„³LineMode„„µ±packet´³rec´³lit³packet„´³tupleµ´³named³size´³atom³ SignedInteger„„„„„„µ±object´³rec´³lit³object„´³tupleµ´³named³ description³any„„„„„„„³Sink´³orµµ±source´³rec´³lit³source„´³tupleµ´³named³
controller´³embedded´³refµ„³Source„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±data´³rec´³lit³data„´³tupleµ´³named³payload³any„´³named³mode´³refµ„³Mode„„„„„„µ±eof´³rec´³lit³eof„´³tupleµ„„„„„„³Source´³orµµ±sink´³rec´³lit³sink„´³tupleµ´³named³
controller´³embedded´³refµ„³Sink„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±credit´³rec´³lit³credit„´³tupleµ´³named³amount´³refµ„³ CreditAmount„„´³named³mode´³refµ„³Mode„„„„„„„„³LineMode´³orµµ±lf´³lit³lf„„µ±crlf´³lit³crlf„„„„³ StreamError´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„„„„³ CreditAmount´³orµµ±count´³atom³ SignedInteger„„µ± unbounded´³lit³ unbounded„„„„³StreamConnection´³rec´³lit³stream-connection„´³tupleµ´³named³source´³embedded´³refµ„³Source„„„´³named³sink´³embedded´³refµ„³Sink„„„´³named³spec³any„„„„³StreamListenerError´³rec´³lit³stream-listener-error„´³tupleµ´³named³spec³any„´³named³message´³atom³String„„„„„³StreamListenerReady´³rec´³lit³stream-listener-ready„´³tupleµ´³named³spec³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³sturdy„´³schema·³version°³ definitions·³Lit´³rec´³lit³lit„´³tupleµ´³named³value³any„„„„³Oid´³atom³ SignedInteger„³Alts´³rec´³lit³or„´³tupleµ´³named³ alternatives´³seqof´³refµ„³Rewrite„„„„„„³PAnd´³rec´³lit³and„´³tupleµ´³named³patterns´³seqof´³refµ„³Pattern„„„„„„³PNot´³rec´³lit³not„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³TRef´³rec´³lit³ref„´³tupleµ´³named³binding´³atom³ SignedInteger„„„„„³PAtom´³orµµ±Boolean´³lit³Boolean„„µ±Double´³lit³Double„„µ± SignedInteger´³lit³ SignedInteger„„µ±String´³lit³String„„µ±
controller´³embedded´³refµ„³Sink„„„„„„„µ± StreamError´³refµ„³ StreamError„„µ±credit´³rec´³lit³credit„´³tupleµ´³named³amount´³refµ„³ CreditAmount„„´³named³mode´³refµ„³Mode„„„„„„„„³LineMode´³orµµ±lf´³lit³lf„„µ±crlf´³lit³crlf„„„„³ StreamError´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„„„„³ CreditAmount´³orµµ±count´³atom³ SignedInteger„„µ± unbounded´³lit³ unbounded„„„„³StreamConnection´³rec´³lit³stream-connection„´³tupleµ´³named³source´³embedded´³refµ„³Source„„„´³named³sink´³embedded´³refµ„³Sink„„„´³named³spec³any„„„„³StreamListenerError´³rec´³lit³stream-listener-error„´³tupleµ´³named³spec³any„´³named³message´³atom³String„„„„„³StreamListenerReady´³rec´³lit³stream-listener-ready„´³tupleµ´³named³spec³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³sturdy„´³schema·³version³ definitions·³Lit´³rec´³lit³lit„´³tupleµ´³named³value³any„„„„³Oid´³atom³ SignedInteger„³Alts´³rec´³lit³or„´³tupleµ´³named³ alternatives´³seqof´³refµ„³Rewrite„„„„„„³PAnd´³rec´³lit³and„´³tupleµ´³named³patterns´³seqof´³refµ„³Pattern„„„„„„³PNot´³rec´³lit³not„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³TRef´³rec´³lit³ref„´³tupleµ´³named³binding´³atom³ SignedInteger„„„„„³PAtom´³orµµ±Boolean´³lit³Boolean„„µ±Float´³lit³Float„„µ±Double´³lit³Double„„µ± SignedInteger´³lit³ SignedInteger„„µ±String´³lit³String„„µ±
ByteString´³lit³
ByteString„„µ±Symbol´³lit³Symbol„„„„³PBind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³Caveat´³orµµ±Rewrite´³refµ„³Rewrite„„µ±Alts´³refµ„³Alts„„µ±Reject´³refµ„³Reject„„µ±unknown³any„„„³Reject´³rec´³lit³reject„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³Pattern´³orµµ±PDiscard´³refµ„³PDiscard„„µ±PAtom´³refµ„³PAtom„„µ± PEmbedded´³refµ„³ PEmbedded„„µ±PBind´³refµ„³PBind„„µ±PAnd´³refµ„³PAnd„„µ±PNot´³refµ„³PNot„„µ±Lit´³refµ„³Lit„„µ± PCompound´³refµ„³ PCompound„„„„³Rewrite´³rec´³lit³rewrite„´³tupleµ´³named³pattern´³refµ„³Pattern„„´³named³template´³refµ„³Template„„„„„³WireRef´³orµµ±mine´³tupleµ´³lit°„´³named³oid´³refµ„³Oid„„„„„µ±yours´³ tuplePrefixµ´³lit°„´³named³oid´³refµ„³Oid„„„´³named³ attenuation´³seqof´³refµ„³Caveat„„„„„„„³PDiscard´³rec´³lit³_„´³tupleµ„„„³Template´³orµµ±
ByteString„„µ±Symbol´³lit³Symbol„„„„³PBind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³Caveat´³orµµ±Rewrite´³refµ„³Rewrite„„µ±Alts´³refµ„³Alts„„„„³Pattern´³orµµ±PDiscard´³refµ„³PDiscard„„µ±PAtom´³refµ„³PAtom„„µ± PEmbedded´³refµ„³ PEmbedded„„µ±PBind´³refµ„³PBind„„µ±PAnd´³refµ„³PAnd„„µ±PNot´³refµ„³PNot„„µ±Lit´³refµ„³Lit„„µ± PCompound´³refµ„³ PCompound„„„„³Rewrite´³rec´³lit³rewrite„´³tupleµ´³named³pattern´³refµ„³Pattern„„´³named³template´³refµ„³Template„„„„„³WireRef´³orµµ±mine´³tupleµ´³lit<69>´³named³oid´³refµ„³Oid„„„„„µ±yours´³ tuplePrefixµ´³lit´³named³oid´³refµ„³Oid„„„´³named³ attenuation´³seqof´³refµ„³Caveat„„„„„„„³PDiscard´³rec´³lit³_„´³tupleµ„„„³Template´³orµµ±
TAttenuate´³refµ„³
TAttenuate„„µ±TRef´³refµ„³TRef„„µ±Lit´³refµ„³Lit„„µ± TCompound´³refµ„³ TCompound„„„„³ PCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Pattern„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Pattern„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„³ PEmbedded´³lit³Embedded„³ SturdyRef´³rec´³lit³ref„´³tupleµ´³named³
parameters´³refµ„³
Parameters„„„„„³ TCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Template„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Template„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Template„„„„„„„„„³
Parameters´³andµ´³dict·³oid´³named³oid³any„³sig´³named³sig´³atom³
ByteString„„„„´³named³caveats´³refµ„³ CaveatsField„„„„³
TAttenuate´³rec´³lit³ attenuate„´³tupleµ´³named³template´³refµ„³Template„„´³named³ attenuation´³seqof´³refµ„³Caveat„„„„„„³ CaveatsField´³orµµ±present´³dict·³caveats´³named³caveats´³seqof´³refµ„³Caveat„„„„„„µ±invalid´³dict·³caveats´³named³caveats³any„„„„µ±absent´³dict·„„„„„³SturdyStepType´³lit³ref„³SturdyStepDetail´³refµ„³
Parameters„³SturdyPathStepDetail´³refµ„³
Parameters„³SturdyDescriptionDetail´³dict·³key´³named³key´³atom³
ByteString„„³oid´³named³oid³any„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³worker„´³schema·³version°³ definitions·³Instance´³rec´³lit³Instance„´³tupleµ´³named³name´³atom³String„„´³named³argument³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³service„´³schema·³version°³ definitions·³State´³orµµ±started´³lit³started„„µ±ready´³lit³ready„„µ±failed´³lit³failed„„µ±complete´³lit³complete„„µ± userDefined³any„„„³
TAttenuate„„µ±TRef´³refµ„³TRef„„µ±Lit´³refµ„³Lit„„µ± TCompound´³refµ„³ TCompound„„„„³ PCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Pattern„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Pattern„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„³ PEmbedded´³lit³Embedded„³ SturdyRef´³rec´³lit³ref„´³tupleµ´³named³oid³any„´³named³ caveatChain´³seqof´³refµ„³ Attenuation„„„´³named³sig´³atom³
ByteString„„„„„³ TCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Template„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Template„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Template„„„„„„„„„³
TAttenuate´³rec´³lit³ attenuate„´³tupleµ´³named³template´³refµ„³Template„„´³named³ attenuation´³refµ„³ Attenuation„„„„„³ Attenuation´³seqof´³refµ„³Caveat„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³worker„´³schema·³version³ definitions·³Instance´³rec´³lit³Instance„´³tupleµ´³named³name´³atom³String„„´³named³argument³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³service„´³schema·³version³ definitions·³State´³orµµ±started´³lit³started„„µ±ready´³lit³ready„„µ±failed´³lit³failed„„µ±complete´³lit³complete„„„„³
RunService´³rec´³lit³ run-service„´³tupleµ´³named³ serviceName³any„„„„³ ServiceState´³rec´³lit³ service-state„´³tupleµ´³named³ serviceName³any„´³named³state´³refµ„³State„„„„„³ ServiceObject´³rec´³lit³service-object„´³tupleµ´³named³ serviceName³any„´³named³object³any„„„„³RequireService´³rec´³lit³require-service„´³tupleµ´³named³ serviceName³any„„„„³RestartService´³rec´³lit³restart-service„´³tupleµ´³named³ serviceName³any„„„„³ServiceDependency´³rec´³lit³
depends-on„´³tupleµ´³named³depender³any„´³named³dependee´³refµ„³ ServiceState„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³protocol„´³schema·³version°³ definitions·³Oid´³atom³ SignedInteger„³Sync´³rec´³lit³S„´³tupleµ´³named³peer´³embedded´³lit<69>„„„„„„³Turn´³seqof´³refµ„³ TurnEvent„„³Error´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„´³named³detail³any„„„„³Event´³orµµ±Assert´³refµ„³Assert„„µ±Retract´³refµ„³Retract„„µ±Message´³refµ„³Message„„µ±Sync´³refµ„³Sync„„„„³Assert´³rec´³lit³A„´³tupleµ´³named³ assertion´³refµ„³ Assertion„„´³named³handle´³refµ„³Handle„„„„„³Handle´³atom³ SignedInteger„³Packet´³orµµ±Turn´³refµ„³Turn„„µ±Error´³refµ„³Error„„µ± Extension´³refµ„³ Extension„„„„³Message´³rec´³lit³M„´³tupleµ´³named³body´³refµ„³ Assertion„„„„„³Retract´³rec´³lit³R„´³tupleµ´³named³handle´³refµ„³Handle„„„„„³ Assertion³any³ Extension´³rec´³named³label³any„´³named³fields´³seqof³any„„„³ TurnEvent´³tupleµ´³named³oid´³refµ„³Oid„„´³named³event´³refµ„³Event„„„„„³ embeddedType€„„µ³ dataspace„´³schema·³version°³ definitions·³Observe´³rec´³lit³Observe„´³tupleµ´³named³pattern´³refµ³dataspacePatterns„³Pattern„„´³named³observer´³embedded³any„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³
gatekeeper„´³schema·³version°³ definitions·³Bind´³rec´³lit³bind„´³tupleµ´³named³ description´³refµ„³ Description„„´³named³target´³embedded³any„„´³named³observer´³refµ„³ BindObserver„„„„„³Step´³rec´³named³stepType´³atom³Symbol„„´³tupleµ´³named³detail³any„„„„³Bound´³orµµ±bound´³rec´³lit³bound„´³tupleµ´³named³pathStep´³refµ„³PathStep„„„„„„µ±Rejected´³refµ„³Rejected„„„„³Route´³rec´³lit³route„´³ tuplePrefixµ´³named³
transports´³seqof³any„„„´³named³ pathSteps´³seqof´³refµ„³PathStep„„„„„³Resolve´³rec´³lit³resolve„´³tupleµ´³named³step´³refµ„³Step„„´³named³observer´³embedded´³refµ„³Resolved„„„„„„³PathStep´³rec´³named³stepType´³atom³Symbol„„´³tupleµ´³named³detail³any„„„„³Rejected´³rec´³lit³rejected„´³tupleµ´³named³detail³any„„„„³Resolved´³orµµ±accepted´³rec´³lit³accepted„´³tupleµ´³named³responderSession´³embedded³any„„„„„„µ±Rejected´³refµ„³Rejected„„„„³ Description´³rec´³named³stepType´³atom³Symbol„„´³tupleµ´³named³detail³any„„„„³ ResolvePath´³rec´³lit³ resolve-path„´³tupleµ´³named³route´³refµ„³Route„„´³named³addr³any„´³named³control´³embedded´³refµ„³TransportControl„„„´³named³resolved´³refµ„³Resolved„„„„„³ BindObserver´³orµµ±present´³embedded´³refµ„³Bound„„„µ±absent´³lit€„„„„³ForceDisconnect´³rec´³lit³force-disconnect„´³tupleµ„„„³ResolvedPathStep´³rec´³lit³ path-step„´³tupleµ´³named³origin´³embedded´³refµ„³Resolve„„„´³named³pathStep´³refµ„³PathStep„„´³named³resolved´³refµ„³Resolved„„„„„³TransportControl´³refµ„³ForceDisconnect„³TransportConnection´³rec´³lit³connect-transport„´³tupleµ´³named³addr³any„´³named³control´³embedded´³refµ„³TransportControl„„„´³named³resolved´³refµ„³Resolved„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³transportAddress„´³schema·³version°³ definitions·³Tcp´³rec´³lit³tcp„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³Unix´³rec´³lit³unix„´³tupleµ´³named³path´³atom³String„„„„„³Stdio´³rec´³lit³stdio„´³tupleµ„„„³ WebSocket´³rec´³lit³ws„´³tupleµ´³named³url´³atom³String„„„„„„³ embeddedType€„„µ³dataspacePatterns„´³schema·³version°³ definitions·³AnyAtom´³orµµ±bool´³atom³Boolean„„µ±double´³atom³Double„„µ±int´³atom³ SignedInteger„„µ±string´³atom³String„„µ±bytes´³atom³
ByteString„„µ±symbol´³atom³Symbol„„µ±embedded´³embedded³any„„„„³Pattern´³orµµ±discard´³rec´³lit³_„´³tupleµ„„„„µ±bind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„„µ±lit´³rec´³lit³lit„´³tupleµ´³named³value´³refµ„³AnyAtom„„„„„„µ±group´³rec´³lit³group„´³tupleµ´³named³type´³refµ„³ GroupType„„´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„³ GroupType´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„„„„„µ±arr´³rec´³lit³arr„´³tupleµ„„„„µ±dict´³rec´³lit³dict„´³tupleµ„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„
depends-on„´³tupleµ´³named³depender³any„´³named³dependee´³refµ„³ ServiceState„„„„„³SystemLayerService´³rec´³lit³system-layer-service„´³tupleµ´³named³ serviceName³any„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³protocol„´³schema·³version³ definitions·³Oid´³atom³ SignedInteger„³Sync´³rec´³lit³sync„´³tupleµ´³named³peer´³embedded´³lit<69>„„„„„„³Turn´³seqof´³refµ„³ TurnEvent„„³Error´³rec´³lit³error„´³tupleµ´³named³message´³atom³String„„´³named³detail³any„„„„³Event´³orµµ±Assert´³refµ„³Assert„„µ±Retract´³refµ„³Retract„„µ±Message´³refµ„³Message„„µ±Sync´³refµ„³Sync„„„„³Assert´³rec´³lit³assert„´³tupleµ´³named³ assertion´³refµ„³ Assertion„„´³named³handle´³refµ„³Handle„„„„„³Handle´³atom³ SignedInteger„³Packet´³orµµ±Turn´³refµ„³Turn„„µ±Error´³refµ„³Error„„„„³Message´³rec´³lit³message„´³tupleµ´³named³body´³refµ„³ Assertion„„„„„³Retract´³rec´³lit³retract„´³tupleµ´³named³handle´³refµ„³Handle„„„„„³ Assertion³any³ TurnEvent´³tupleµ´³named³oid´³refµ„³Oid„„´³named³event´³refµ„³Event„„„„„³ embeddedType€„„µ³ dataspace„´³schema·³version³ definitions·³Observe´³rec´³lit³Observe„´³tupleµ´³named³pattern´³refµ³dataspacePatterns„³Pattern„„´³named³observer´³embedded³any„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³
gatekeeper„´³schema·³version³ definitions·³Bind´³rec´³lit³bind„´³tupleµ´³named³oid³any„´³named³key´³atom³
ByteString„„´³named³target´³embedded³any„„„„„³Resolve´³rec´³lit³resolve„´³tupleµ´³named³ sturdyref´³refµ³sturdy„³ SturdyRef„„´³named³observer´³embedded´³embedded³any„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³ racketEvent„´³schema·³version³ definitions·³ RacketEvent´³rec´³lit³ racket-event„´³tupleµ´³named³source´³embedded³any„„´³named³event´³embedded³any„„„„„„³ embeddedType€„„µ³transportAddress„´³schema·³version³ definitions·³Tcp´³rec´³lit³tcp„´³tupleµ´³named³host´³atom³String„„´³named³port´³atom³ SignedInteger„„„„„³Unix´³rec´³lit³unix„´³tupleµ´³named³path´³atom³String„„„„„³Stdio´³rec´³lit³stdio„´³tupleµ„„„³ WebSocket´³rec´³lit³ws„´³tupleµ´³named³url´³atom³String„„„„„„³ embeddedType€„„µ³dataspacePatterns„´³schema·³version³ definitions·³DLit´³rec´³lit³lit„´³tupleµ´³named³value´³refµ„³AnyAtom„„„„„³DBind´³rec´³lit³bind„´³tupleµ´³named³pattern´³refµ„³Pattern„„„„„³AnyAtom´³orµµ±bool´³atom³Boolean„„µ±float´³atom³Float„„µ±double´³atom³Double„„µ±int´³atom³ SignedInteger„„µ±string´³atom³String„„µ±bytes´³atom³
ByteString„„µ±symbol´³atom³Symbol„„µ±embedded´³embedded³any„„„„³Pattern´³orµµ±DDiscard´³refµ„³DDiscard„„µ±DBind´³refµ„³DBind„„µ±DLit´³refµ„³DLit„„µ± DCompound´³refµ„³ DCompound„„„„³DDiscard´³rec´³lit³_„´³tupleµ„„„³ DCompound´³orµµ±rec´³rec´³lit³rec„´³tupleµ´³named³label³any„´³named³fields´³seqof´³refµ„³Pattern„„„„„„„µ±arr´³rec´³lit³arr„´³tupleµ´³named³items´³seqof´³refµ„³Pattern„„„„„„„µ±dict´³rec´³lit³dict„´³tupleµ´³named³entries´³dictof³any´³refµ„³Pattern„„„„„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³secureChatProtocol„´³schema·³version³ definitions·³Join´³rec´³lit³
joinedUser„´³tupleµ´³named³uid´³refµ„³UserId„„´³named³handle´³embedded´³refµ„³Session„„„„„„³Says´³rec´³lit³says„´³tupleµ´³named³who´³refµ„³UserId„„´³named³what´³atom³String„„„„„³UserId´³atom³ SignedInteger„³Session´³orµµ± observeUsers´³rec´³lit³Observe„´³tupleµ´³lit³user„´³named³observer´³embedded´³refµ„³UserInfo„„„„„„„µ± observeSpeech´³rec´³lit³Observe„´³tupleµ´³lit³says„´³named³observer´³embedded´³refµ„³Says„„„„„„„µ± NickClaim´³refµ„³ NickClaim„„µ±Says´³refµ„³Says„„„„³UserInfo´³rec´³lit³user„´³tupleµ´³named³uid´³refµ„³UserId„„´³named³name´³atom³String„„„„„³ NickClaim´³rec´³lit³ claimNick„´³tupleµ´³named³uid´³refµ„³UserId„„´³named³name´³atom³String„„´³named³k´³embedded´³refµ„³NickClaimResponse„„„„„„³ NickConflict´³rec´³lit³ nickConflict„´³tupleµ„„„³NickClaimResponse´³orµµ±true´³lit<69>„„µ± NickConflict´³refµ„³ NickConflict„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„µ³simpleChatProtocol„´³schema·³version³ definitions·³Says´³rec´³lit³Says„´³tupleµ´³named³who´³atom³String„„´³named³what´³atom³String„„„„„³Present´³rec´³lit³Present„´³tupleµ´³named³username´³atom³String„„„„„„³ embeddedType´³refµ³ EntityRef„³Cap„„„„„

View File

@ -1,4 +1,4 @@
version 1 .
embeddedType EntityRef.Cap .
Observe = <Observe @pattern dataspacePatterns.Pattern @observer #:any>.
Observe = <Observe @pattern dataspacePatterns.Pattern @observer #!any>.

View File

@ -1,30 +1,23 @@
version 1 .
embeddedType EntityRef.Cap .
# Dataspace patterns: *almost* a sublanguage of attenuation patterns.
#
# One key difference is that Dataspace patterns are extensible, in that
# they ignore fields not mentioned in group patterns.
; Dataspace patterns: a sublanguage of attenuation patterns.
Pattern = DDiscard / DBind / DLit / DCompound .
Pattern =
/ @discard <_>
/ <bind @pattern Pattern>
/ <lit @value AnyAtom>
/ <group @type GroupType @entries { any: Pattern ...:... }>
.
GroupType =
/ <rec @label any>
/ <arr>
/ <dict>
.
DDiscard = <_>.
DBind = <bind @pattern Pattern>.
DLit = <lit @value AnyAtom>.
DCompound = <rec @label any @fields [Pattern ...]>
/ <arr @items [Pattern ...]>
/ <dict @entries { any: Pattern ...:... }> .
AnyAtom =
/ @bool bool
/ @float float
/ @double double
/ @int int
/ @string string
/ @bytes bytes
/ @symbol symbol
/ @embedded #:any
/ @embedded #!any
.

View File

@ -1,87 +1,5 @@
version 1 .
embeddedType EntityRef.Cap .
# ---------------------------------------------------------------------------
# Protocol at *gatekeeper* entities
# Assertion. Gatekeeper will attempt to resolve `step`, responding with a `Resolved` to
# `observer`.
Resolve = <resolve @step Step @observer #:Resolved> .
Resolved = <accepted @responderSession #:any> / Rejected .
Step = <<rec> @stepType symbol [@detail any]> .
# ---------------------------------------------------------------------------
# Protocol at dataspaces *associated* with gatekeeper entities
# ## Handling `Resolve` requests
#
# When the gatekeeper entity receives a `Resolve` assertion (call it R1), it
#
# 1. asserts a `Resolve` (call it R2) into its associated dataspace that
# is the same as R1 except it has a different `observer`; and
#
# 2. observes a `Bind` with `description` matching the `step` of R1/R2
# according to `stepType` (e.g. treatment of SturdyStepType is not the
# same as treatment of NoiseStepType).
#
# Normally, an appropriate `Bind` is expected to exist. If the gatekeeper
# sees the `Bind` first, it takes the `target` from it and does whatever
# `stepType` mandates before replying to R1's observer.
#
# However, if a `Resolved` is asserted to R2's observer before a `Bind`
# appears, that resolution is relayed on to R1's observer directly, be it
# positive or negative, and the gatekeeper stops waiting for a `Bind`.
#
# This way, entities can keep an eye out for `Resolve` requests that will
# never complete, and answer `Rejected` to them even when no matching
# `Bind` exists. Entities could also use `Resolve` requests to synthesize a
# `Bind` in a "just-in-time" fashion.
#
# ## General treatment of `Bind` assertions
#
# When the gatekeeper sees a `Bind`, independently of any potential
# `Resolve` requests, it computes an appropriate PathStep from
# `description` pointing at `target`, and responds with a `Bound` to
# `observer` (if supplied).
#
Bind = <bind @description Description @target #:any @observer BindObserver> .
Description = <<rec> @stepType symbol [@detail any]> .
BindObserver = @present #:Bound / @absent #f .
Bound = <bound @pathStep PathStep> / Rejected .
# ---------------------------------------------------------------------------
# Protocol at client-side dataspaces, for resolution utilities
# Assertion. In response to observation of this with appropriate captures/wildcards in `addr`
# and `resolved`, respondent will follow `route.pathSteps` starting from one of the
# `route.transports`, asserting `ResolvePath` with the final `Resolved` as well as the selected
# transport `addr` and a `control` for it.
ResolvePath = <resolve-path @route Route @addr any @control #:TransportControl @resolved Resolved> .
TransportConnection = <connect-transport @addr any @control #:TransportControl @resolved Resolved> .
ResolvedPathStep = <path-step @origin #:Resolve @pathStep PathStep @resolved Resolved> .
PathStep = <<rec> @stepType symbol [@detail any]> .
# A `Route` describes a network path that can be followed to reach some target entity.
#
# It starts with a set of zero or more possible non-Syndicate `transports`. These could be
# `transportAddress.Tcp` values or similar. They are just suggestions; it's quite possible the
# endpoint is reachable by some means not listed. The network outside Syndicate is, after all,
# pretty diverse! In particular, *zero* `transports` may be provided, in which case some
# out-of-band means has to be used to make that first connection.
#
# The `transports` give instructions for contacting the first entity in the `Route` path. Often
# this will be a `gatekeeper`, or a `noise` protocol endpoint, or both. Occasionally, it may
# even be the desired target entity. Subsequent `pathSteps` describe how to proceed from the
# initial entity to the target.
#
# (`transports` should by rights be a set, not a sequence, but that opens up a Can Of Worms
# regarding dataspace patterns including literal sets that I can't deal with right now.)
Route = <route @transports [any ...] @pathSteps PathStep ...> .
TransportControl = ForceDisconnect .
ForceDisconnect = <force-disconnect> .
# ---------------------------------------------------------------------------
Rejected = <rejected @detail any> .
Resolve = <resolve @sturdyref sturdy.SturdyRef @observer #!#!any>.
Bind = <bind @oid any @key bytes @target #!any>.

View File

@ -1,62 +0,0 @@
version 1 .
# Assertion in driver DS
# Causes creation of server and route
HttpBinding = <http-bind @host HostPattern @port int @method MethodPattern @path PathPattern @handler #:HttpRequest> .
# Assertion in driver DS
# Describes active server and route
HttpService = <http-service @host HostPattern @port int @method MethodPattern @path PathPattern> .
# Assertion in driver DS
# Describes active listener
HttpListener = <http-listener @port int> .
HostPattern = @host string / @any #f .
PathPattern = [PathPatternElement ...] .
PathPatternElement = @label string / @wildcard =_ / @rest =... .
MethodPattern = @any #f / @specific @"Lowercase" symbol .
# Assertion in driver DS
HttpRequest = <http-request
@sequenceNumber int
@host RequestHost
@port int
@method @"Lowercase" symbol
@path [string ...]
@headers Headers
@query {symbol: [QueryValue ...] ...:...}
@body RequestBody> .
Headers = {@"Lowercase" symbol: string ...:...} .
QueryValue = @string string / <file @filename string @headers Headers @body bytes> .
RequestBody = @present bytes / @absent #f .
RequestHost = @present string / @absent #f .
# Assertion to handler entity
HttpContext = <request @req HttpRequest @res #:HttpResponse> .
# HttpResponse protocol. Delivered to the `res` ref in `HttpContext`.
#
# (status | header)* . chunk* . done
#
# Done triggers completion of the response and retraction of the frame by the peer. If the
# HttpBinding responsible for the request is withdrawn mid-way through a response (i.e. when
# chunked transfer is used and at least one chunk has been sent) the request is abruptly
# closed; if it is withdrawn at any other moment in the lifetime of the request, a 500 Internal
# Server Error is send to the client.
#
@<TODO "trailers?">
HttpResponse =
# Messages.
/ <status @code int @message string>
/ <header @name symbol @value string>
/ <chunk @chunk Chunk>
/ <done @chunk Chunk>
.
Chunk = @string string / @bytes bytes .
# e.g. text/plain, text/html, application/json
MimeType = symbol .

View File

@ -1,83 +0,0 @@
version 1 .
embeddedType EntityRef.Cap .
# https://noiseprotocol.org/
# ---------------------------------------------------------------------------
# Binding and connection
NoiseStepType = =noise .
# In a gatekeeper.Step, use ServiceSelector as detail.
NoiseStepDetail = ServiceSelector .
# In a gatekeeper.PathStep, use a NoiseSpec as detail.
NoisePathStepDetail = NoiseSpec .
# In a gatekeeper.Description, use a NoiseServiceSpec as detail.
NoiseDescriptionDetail = NoiseServiceSpec .
# ---------------------------------------------------------------------------
# Specification of target and bind addresses
ServiceSelector = any .
NoiseSpec = {
# The `serviceSelector` to use in a `NoiseStep` for `gatekeeper.Resolve`.
service: ServiceSelector,
# The responder's static public key. If not required (uncommon!), supply the empty ByteString.
key: bytes,
}
& @protocol NoiseProtocol
& @preSharedKeys NoisePreSharedKeys
.
NoiseServiceSpec = @base NoiseSpec & @secretKey SecretKeyField .
SecretKeyField = @present { secretKey: bytes } / @invalid { secretKey: any } / @absent {} .
# If absent, a default of DefaultProtocol is used. Most services will speak the default.
NoiseProtocol = @present { protocol: string } / @invalid { protocol: any } / @absent {} .
DefaultProtocol = "Noise_NK_25519_ChaChaPoly_BLAKE2s" .
# If present, Noise pre-shared-keys (PSKs) are drawn from the sequence as required; if the
# sequence is exhausted or not supplied, an all-zeros key is used each time a PSK is needed.
NoisePreSharedKeys = @present { preSharedKeys: [bytes ...] } / @invalid { preSharedKeys: any } / @absent {} .
# ---------------------------------------------------------------------------
# Handshaking and running a session
# 1. initiator asserts <resolve <noise ServiceSelector> #:A> at Gatekeeper
# 2. gatekeeper asserts <accepted #:B> at #:A
# 3. initiator asserts <initiator #:C> at #:B and then sends `Packet`s to #:B
# 4. responder sends `Packet`s to #:C
#
# Sessions begin with introduction of initiator (#:C) and responder (#:B) to each other, and
# then proceed by sending `Packet`s (from #:C) to #:B and (from #:B) to #:C according to
# the Noise protocol definition. Each `Packet` represents a complete logical unit of
# communication; for example, a complete Turn when layering the Syndicate protocol over Noise.
# Note well the restriction on Noise messages: no individual complete packet or packet fragment
# may exceed 65535 bytes (N.B. not 65536!). When `fragmented`, each portion of a `Packet` is a
# complete Noise "transport message"; when `complete`, the whole thing is likewise a complete
# "transport message".
#
# Retraction of the `Initiator` ends the session from the initiator-side; retraction of the
# `<accepted ...>` assertion ends the session from the responder-side.
SessionItem = Initiator / Packet .
# Assertion
Initiator = <initiator @initiatorSession #:Packet> .
# Message
Packet = @complete bytes / @fragmented [bytes ...] .
# When layering Syndicate protocol over noise,
#
# - the canonical encoding of the serviceSelector is the prologue
# - protocol.Packets MUST be encoded using the machine-oriented Preserves syntax
# - zero or more Turns are permitted per noise.Packet
# - each Turn must fit inside a single noise.Packet (fragment if needed)
# - payloads inside a noise.Packet may be padded at the end with byte 0x80 (128), which
# encodes `#f` in the machine-oriented Preserves syntax.
#
# In summary, each noise.Packet, once (reassembled and) decrypted, will be a sequence of zero
# or more machine-encoded protocol.Packets, followed by zero or more 0x80 bytes.
.

View File

@ -1,8 +1,6 @@
version 1 .
Packet = Turn / Error / Extension .
Extension = <<rec> @label any @fields [any ...]> .
Packet = Turn / Error .
Error = <error @message string @detail any>.
@ -13,7 +11,7 @@ Oid = int .
Turn = [TurnEvent ...].
TurnEvent = [@oid Oid @event Event].
Assert = <A @assertion Assertion @handle Handle>.
Retract = <R @handle Handle>.
Message = <M @body Assertion>.
Sync = <S @peer #:#t>.
Assert = <assert @assertion Assertion @handle Handle>.
Retract = <retract @handle Handle>.
Message = <message @body Assertion>.
Sync = <sync @peer #!#t>.

View File

@ -0,0 +1,3 @@
version 1 .
RacketEvent = <racket-event @source #!any @event #!any>.

View File

@ -0,0 +1,21 @@
version 1 .
embeddedType EntityRef.Cap .
UserId = int .
Join = <joinedUser @uid UserId @handle #!Session>.
Session = @observeUsers <Observe =user @observer #!UserInfo>
/ @observeSpeech <Observe =says @observer #!Says>
/ NickClaim
/ Says
.
NickClaim = <claimNick @uid UserId @name string @k #!NickClaimResponse>.
NickClaimResponse = #t / NickConflict .
UserInfo = <user @uid UserId @name string>.
Says = <says @who UserId @what string>.
NickConflict = <nickConflict>.

View File

@ -1,51 +1,54 @@
version 1 .
embeddedType EntityRef.Cap .
# Asserts that a service should begin (and stay) running after waiting
# for its dependencies and considering reverse-dependencies, blocks,
# and so on.
; Asserts that a service should begin (and stay) running after waiting
; for its dependencies and considering reverse-dependencies, blocks,
; and so on.
RequireService = <require-service @serviceName any>.
# Asserts that a service should begin (and stay) running RIGHT NOW,
# without considering its dependencies.
; Asserts that a service should begin (and stay) running RIGHT NOW,
; without considering its dependencies.
RunService = <run-service @serviceName any>.
# Asserts one or more current states of service `serviceName`. The
# overall state of the service is the union of asserted `state`s.
#
# Only a few combinations make sense:
# - `started`
# - `started` + `ready`
# - `failed`
# - `complete`
#
; Asserts one or more current states of service `serviceName`. The
; overall state of the service is the union of asserted `state`s.
;
; Only a few combinations make sense:
; - `started`
; - `started` + `ready`
; - `failed`
; - `complete`
;
ServiceState = <service-state @serviceName any @state State>.
# A running service publishes zero or more of these. The details of
# the object vary by service.
#
; A running service publishes zero or more of these. The details of
; the object vary by service.
;
ServiceObject = <service-object @serviceName any @object any>.
# Possible service states.
; Possible service states.
State =
/ # The service has begun its startup routine, and may or may not be
# ready to take requests from other parties.
/ ; The service has begun its startup routine, and may or may not be
; ready to take requests from other parties.
=started
/ # The service is ready to take requests from other parties.
# (This state is special in that it is asserted *in addition* to `started`.)
/ ; The service is ready to take requests from other parties.
; (This state is special in that it is asserted *in addition* to `started`.)
=ready
/ # The service has failed.
/ ; The service has failed.
=failed
/ # The service has completed execution.
/ ; The service has completed execution.
=complete
/ # Extension or user-defined state
@userDefined any
.
# Asserts that, when `depender` is `require-service`d, it should not be started until
# `dependee` has been asserted, and also that `dependee`'s `serviceName` should be
# `require-service`d.
; Asserts that, when `depender` is `require-service`d, it should not
; be started until `dependee` has been asserted.
ServiceDependency = <depends-on @depender any @dependee ServiceState>.
# Message. Triggers a service restart.
; Asserts that the service is a "system layer" service. If *not*
; specified for a service X, where X is not `<milestone Y>` for some
; Y, the system acts as if `<depends-on X <service-state <milestone
; system-layer> ready>>` were asserted.
SystemLayerService = <system-layer-service @serviceName any>.
; Message. Triggers a service restart.
RestartService = <restart-service @serviceName any>.

View File

@ -0,0 +1,5 @@
version 1 .
embeddedType EntityRef.Cap .
Present = <Present @username string>.
Says = <Says @who string @what string>.

View File

@ -1,31 +0,0 @@
version 1 .
# A "standard" route is
#
# - a collection of websocket urls, for transport.
# - a noise tunnel, for server authentication, confidentiality and integrity.
# - a macaroon, for authorization.
#
# Making these choices allows a compact representation. Encoding a binary-syntax representation
# of a standard route using base64 produces a somewhat-convenient blob of text representing
# access to a network object that users can cut and paste.
#
# A `stdenv.StandardRoute.standard` can be rewritten to a `gatekeeper.Route` like this (with
# `$caveats`, if any, added as appropriate):
#
# <route $transports <noise { service: $service key: $key }> <ref { sig: $sig oid: $oid }>>
#
StandardRoute =
/ @standard [@transports [StandardTransport ...]
@key bytes
@service any
@sig bytes
@oid any
@caveats sturdy.Caveat ...]
/ @general gatekeeper.Route
.
StandardTransport =
/ @wsUrl string
/ @other any
.

View File

@ -1,38 +1,38 @@
version 1 .
embeddedType EntityRef.Cap .
# Assertion:
StreamConnection = <stream-connection @source #:Source @sink #:Sink @spec any>.
; Assertion:
StreamConnection = <stream-connection @source #!Source @sink #!Sink @spec any>.
# Assertions:
; Assertions:
StreamListenerReady = <stream-listener-ready @spec any>.
StreamListenerError = <stream-listener-error @spec any @message string>.
# Assertion:
; Assertion:
StreamError = <error @message string>.
Source =
# Assertions:
/ <sink @controller #:Sink>
; Assertions:
/ <sink @controller #!Sink>
/ StreamError
# Messages:
; Messages:
/ <credit @amount CreditAmount @mode Mode>
.
Sink =
# Assertions:
/ <source @controller #:Source>
; Assertions:
/ <source @controller #!Source>
/ StreamError
# Messages:
; Messages:
/ <data @payload any @mode Mode>
/ <eof>
.
# Value:
; Value:
CreditAmount = @count int / @unbounded =unbounded .
# Value:
; Value:
Mode = =bytes / @lines LineMode / <packet @size int> / <object @description any>.
LineMode = =lf / =crlf .

View File

@ -1,57 +1,30 @@
version 1 .
embeddedType EntityRef.Cap .
# ---------------------------------------------------------------------------
# Binding and connection
; Each Attenuation is a stage. The sequence of Attenuations is run RIGHT-TO-LEFT.
; That is, the newest Attenuations are at the right.
SturdyRef = <ref @oid any @caveatChain [Attenuation ...] @sig bytes>.
SturdyStepType = =ref .
; An individual Attenuation is run RIGHT-TO-LEFT.
; That is, the newest Caveats are at the right.
Attenuation = [Caveat ...].
# In a gatekeeper.Step or gatekeeper.PathStep, use Parameters as detail.
SturdyStepDetail = Parameters .
SturdyPathStepDetail = Parameters .
# In a gatekeeper.Description, use the following detail.
SturdyDescriptionDetail = {
oid: any,
key: bytes,
} .
# ---------------------------------------------------------------------------
# Macaroons
# The sequence of Caveats is run RIGHT-TO-LEFT.
# That is, the newest Caveats are at the right.
#
# Let f(k,d) = HMAC-BLAKE2s-256(k,d)[0..16),
# e = canonical machine-oriented serialization of some preserves value, and
# k = the original secret key for the ref.
#
# The `sig` is then f(f(f(f(k, e(oid)), ...), e(Caveat)), ...).
#
SturdyRef = <ref @parameters Parameters> .
Parameters = {
oid: any,
sig: bytes,
} & @caveats CaveatsField .
CaveatsField = @present { caveats: [Caveat ...] } / @invalid { caveats: any } / @absent {} .
# embodies 1st-party caveats over assertion structure, but nothing else
# can add 3rd-party caveats and richer predicates later
Caveat = Rewrite / Alts / Reject / @unknown any .
Rewrite = <rewrite @pattern Pattern @template Template> .
Reject = <reject @pattern Pattern> .
; embodies 1st-party caveats over assertion structure, but nothing else
; can add 3rd-party caveats and richer predicates later
Caveat = Rewrite / Alts .
Rewrite = <rewrite @pattern Pattern @template Template>.
Alts = <or @alternatives [Rewrite ...]>.
Oid = int .
WireRef = @mine [0 @oid Oid] / @yours [1 @oid Oid @attenuation Caveat ...].
# ---------------------------------------------------------------------------
;---------------------------------------------------------------------------
Lit = <lit @value any>.
Pattern = PDiscard / PAtom / PEmbedded / PBind / PAnd / PNot / Lit / PCompound .
PDiscard = <_>.
PAtom = =Boolean / =Double / =SignedInteger / =String / =ByteString / =Symbol .
PAtom = =Boolean / =Float / =Double / =SignedInteger / =String / =ByteString / =Symbol .
PEmbedded = =Embedded .
PBind = <bind @pattern Pattern>.
PAnd = <and @patterns [Pattern ...]>.
@ -62,7 +35,7 @@ PCompound =
/ @dict <dict @entries { any: Pattern ...:... }> .
Template = TAttenuate / TRef / Lit / TCompound .
TAttenuate = <attenuate @template Template @attenuation [Caveat ...]>.
TAttenuate = <attenuate @template Template @attenuation Attenuation>.
TRef = <ref @binding int>.
TCompound =
/ @rec <rec @label any @fields [Template ...]>

View File

@ -4,4 +4,4 @@ embeddedType EntityRef.Cap .
TcpRemote = <tcp-remote @host string @port int>.
TcpLocal = <tcp-local @host string @port int>.
TcpPeerInfo = <tcp-peer @handle #:any @local TcpLocal @remote TcpRemote>.
TcpPeerInfo = <tcp-peer @handle #!any @local TcpLocal @remote TcpRemote>.

View File

@ -1,7 +1,7 @@
version 1 .
SetTimer = <set-timer @label any @seconds double @kind TimerKind>.
TimerExpired = <timer-expired @label any @seconds double>.
SetTimer = <set-timer @label any @msecs double @kind TimerKind>.
TimerExpired = <timer-expired @label any @msecs double>.
TimerKind = =relative / =absolute / =clear .
LaterThan = <later-than @seconds double>.
LaterThan = <later-than @msecs double>.

View File

@ -1,96 +0,0 @@
version 1 .
embeddedType EntityRef.Cap .
TraceEntry = <trace
@timestamp @"seconds since Unix epoch" double
@actor ActorId
@item ActorActivation> .
ActorActivation =
/ <start @actorName Name>
/ @turn TurnDescription
/ <stop @status ExitStatus>
.
Name =
/ <anonymous>
/ <named @name any>
.
ActorId = any .
FacetId = any .
Oid = any .
TaskId = any .
TurnId = any .
ExitStatus = =ok / protocol.Error .
# Trace information associated with a turn.
TurnDescription = <turn @id TurnId @cause TurnCause @actions [ActionDescription ...]> .
# The cause of a turn.
TurnCause =
/ @turn <caused-by @id TurnId>
/ <cleanup>
/ @linkedTaskRelease <linked-task-release @id TaskId @reason LinkedTaskReleaseReason>
/ @periodicActivation <periodic-activation @"`period` is in seconds" @period double>
/ <delay @causingTurn TurnId @"`amount` is in seconds" @amount double>
/ <external @description any>
.
LinkedTaskReleaseReason = =cancelled / =normal .
# An actual event carried within a turn.
TurnEvent =
/ <assert @assertion AssertionDescription @handle protocol.Handle>
/ <retract @handle protocol.Handle>
/ <message @body AssertionDescription>
/ <sync @peer Target>
/ # A souped-up, disguised, special-purpose `retract` event.
@breakLink <break-link @source ActorId @handle protocol.Handle>
.
TargetedTurnEvent = <event @target Target @detail TurnEvent> .
# An action taken during a turn.
ActionDescription =
/ # The active party is processing a new `event` for `target` from the received Turn.
<dequeue @event TargetedTurnEvent>
/ # The active party has queued a new `event` to be processed later by `target`.
<enqueue @event TargetedTurnEvent>
/ # The active party is processing an internally-queued event for one of its own entities.
@dequeueInternal <dequeue-internal @event TargetedTurnEvent>
/ # The active party has scheduled an internally-queued event for one of its own entities.
@enqueueInternal <enqueue-internal @event TargetedTurnEvent>
/ <spawn @link bool @id ActorId>
/ <link
@parentActor ActorId
@childToParent protocol.Handle
@childActor ActorId
@parentToChild protocol.Handle>
/ @facetStart <facet-start @path [FacetId ...]>
/ @facetStop <facet-stop @path [FacetId ...] @reason FacetStopReason>
/ @linkedTaskStart <linked-task-start @taskName Name @id TaskId>
.
# An assertion or the body of a message: either a Preserves value, or
# some opaque system-internal value, represented according to the
# system concerned.
AssertionDescription =
/ <value @value any>
/ <opaque @description any>
.
FacetStopReason =
/ @explicitAction =explicit-action
/ =inert
/ @parentStopping =parent-stopping
/ @actorStopping =actor-stopping
.
Target = <entity @actor ActorId @facet FacetId @oid Oid> .
# For the future: consider including information about `protocol`-level `Turn`s etc sent to
# peers over e.g. Websockets or TCP/IP, allowing cross-correlation of traces from different
# processes and implementations with each other to form a large overall picture.
.

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@
use std::collections::BTreeMap;
use std::collections::btree_map::{Iter, Keys, Entry};
use std::iter::FromIterator;
use std::iter::{FromIterator, IntoIterator};
/// Element counts in [`BTreeBag`]s are 32-bit signed integers.
pub type Count = i32;

View File

@ -12,6 +12,7 @@ use super::language;
use super::skeleton;
use super::actor::*;
use super::schemas::dataspace::*;
use super::schemas::dataspace::_Any;
use preserves::value::Map;
use preserves_schema::Codec;
@ -19,7 +20,7 @@ use preserves_schema::Codec;
/// A Dataspace object (entity).
#[derive(Debug)]
pub struct Dataspace {
pub name: Name,
pub name: tracing::Span,
/// Index over assertions placed in the dataspace; used to
/// efficiently route assertion changes and messages to observers.
pub index: skeleton::Index,
@ -30,9 +31,10 @@ pub struct Dataspace {
impl Dataspace {
/// Construct a new, empty dataspace.
pub fn new(name: Name) -> Self {
pub fn new(name: Option<tracing::Span>) -> Self {
Self {
name,
name: name.map_or_else(|| crate::name!("anonymous_dataspace"),
|n| crate::name!(parent: &n, "dataspace")),
index: skeleton::Index::new(),
handle_map: Map::new(),
}
@ -60,8 +62,10 @@ impl Dataspace {
impl Entity<_Any> for Dataspace {
fn assert(&mut self, t: &mut Activation, a: _Any, h: Handle) -> ActorResult {
let _guard = self.name.enter();
let is_new = self.index.insert(t, &a);
tracing::trace!(dataspace = ?self.name, assertion = ?a, handle = ?h, ?is_new, "assert");
tracing::trace!(assertion = ?a, handle = ?h, ?is_new, "assert");
if is_new {
if let Ok(o) = language().parse::<Observe>(&a) {
@ -74,11 +78,13 @@ impl Entity<_Any> for Dataspace {
}
fn retract(&mut self, t: &mut Activation, h: Handle) -> ActorResult {
let _guard = self.name.enter();
match self.handle_map.remove(&h) {
None => tracing::warn!(dataspace = ?self.name, handle = ?h, "retract of unknown handle"),
None => tracing::warn!(handle = ?h, "retract of unknown handle"),
Some(a) => {
let is_last = self.index.remove(t, &a);
tracing::trace!(dataspace = ?self.name, assertion = ?a, handle = ?h, ?is_last, "retract");
tracing::trace!(assertion = ?a, handle = ?h, ?is_last, "retract");
if is_last {
if let Ok(o) = language().parse::<Observe>(&a) {
@ -91,7 +97,9 @@ impl Entity<_Any> for Dataspace {
}
fn message(&mut self, t: &mut Activation, m: _Any) -> ActorResult {
tracing::trace!(dataspace = ?self.name, body = ?m, "message");
let _guard = self.name.enter();
tracing::trace!(body = ?m, "message");
self.index.send(t, &m);
Ok(())
}

View File

@ -17,7 +17,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult,
{
state: E,
assertion_handler: Option<Fa>,
@ -54,7 +54,7 @@ pub fn entity<M: 'static + Send, E>(
fn (&mut E, &mut Activation, M) -> DuringResult<E>,
fn (&mut E, &mut Activation, M) -> ActorResult,
fn (&mut E, &mut Activation) -> ActorResult,
fn (&mut E, &mut Activation, &Arc<ExitStatus>)>
fn (&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult>
where
E: 'static + Send,
{
@ -68,7 +68,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult,
{
pub fn new(
state: E,
@ -154,7 +154,7 @@ where
pub fn on_exit<Fx1>(self, exit_handler: Fx1) -> DuringEntity<M, E, Fa, Fm, Fs, Fx1>
where
Fx1: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
Fx1: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult,
{
DuringEntity {
state: self.state,
@ -175,7 +175,7 @@ where
t.on_stop_notify(&r);
}
if should_register_exit_hook {
t.add_exit_hook(&r);
t.state.add_exit_hook(&r);
}
r
}
@ -187,7 +187,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, AnyValue) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, AnyValue) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult,
{
pub fn create_cap(self, t: &mut Activation) -> Arc<Cap>
{
@ -202,7 +202,7 @@ where
Fa: 'static + Send + FnMut(&mut E, &mut Activation, M) -> DuringResult<E>,
Fm: 'static + Send + FnMut(&mut E, &mut Activation, M) -> ActorResult,
Fs: 'static + Send + FnMut(&mut E, &mut Activation) -> ActorResult,
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ExitStatus>),
Fx: 'static + Send + FnMut(&mut E, &mut Activation, &Arc<ActorResult>) -> ActorResult,
{
fn assert(&mut self, t: &mut Activation, a: M, h: Handle) -> ActorResult {
match &mut self.assertion_handler {
@ -232,9 +232,10 @@ where
}
}
fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ExitStatus>) {
if let Some(handler) = &mut self.exit_handler {
handler(&mut self.state, t, exit_status);
fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ActorResult>) -> ActorResult {
match &mut self.exit_handler {
Some(handler) => handler(&mut self.state, t, exit_status),
None => Ok(()),
}
}
}

View File

@ -73,21 +73,3 @@ impl From<preserves::error::Error> for Error {
error(&format!("{}", v), AnyValue::new(false))
}
}
impl From<Box<dyn std::error::Error>> for Error {
fn from(v: Box<dyn std::error::Error>) -> Self {
match v.downcast::<Error>() {
Ok(e) => *e,
Err(v) => error(&format!("{}", v), AnyValue::new(false)),
}
}
}
impl From<Box<dyn std::error::Error + Send + Sync + 'static>> for Error {
fn from(v: Box<dyn std::error::Error + Send + Sync + 'static>) -> Self {
match v.downcast::<Error>() {
Ok(e) => *e,
Err(v) => error(&format!("{}", v), AnyValue::new(false)),
}
}
}

View File

@ -1,5 +1,4 @@
#![doc = include_str!("../README.md")]
#![feature(min_specialization)]
#[doc(inline)]
pub use preserves::value;
@ -30,54 +29,14 @@ pub mod schemas {
pub mod skeleton;
pub mod sturdy;
pub mod trace;
pub mod tracer;
#[doc(inline)]
pub use during::entity;
/// Sets up [`tracing`] logging in a reasonable way.
///
/// Useful at the top of `main` functions.
pub fn convenient_logging() -> actor::ActorResult {
let filter = match std::env::var(tracing_subscriber::filter::EnvFilter::DEFAULT_ENV) {
Err(std::env::VarError::NotPresent) =>
tracing_subscriber::filter::EnvFilter::default()
.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()),
_ =>
tracing_subscriber::filter::EnvFilter::try_from_default_env()?,
};
let subscriber = tracing_subscriber::fmt()
.with_ansi(true)
.with_thread_ids(true)
.with_max_level(tracing::Level::TRACE)
.with_env_filter(filter)
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Could not set tracing global subscriber");
Ok(())
}
/// Retrieve the version of the Syndicate crate.
pub fn syndicate_package_version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
#[doc(inline)]
pub use tracer::convenient_logging;
preserves_schema::define_language!(language(): Language<actor::AnyValue> {
syndicate: schemas::Language,
});
#[cfg(test)]
mod protocol_test {
use crate::*;
use preserves::value::{BytesBinarySource, BinarySource, IOValueDomainCodec, ViaCodec, IOValue};
use preserves_schema::Deserialize;
#[test] fn decode_sync() {
let input_str = "[[2 <S #:[0 11]>]]";
let mut src = BytesBinarySource::new(input_str.as_bytes());
let mut r = src.text::<IOValue, _>(ViaCodec::new(IOValueDomainCodec));
let packet: schemas::protocol::Packet<IOValue> = schemas::protocol::Packet::deserialize(&mut r).unwrap();
println!("{:?}", packet);
}
}

View File

@ -1,5 +1,3 @@
use std::sync::Arc;
use crate::schemas::dataspace_patterns::*;
use super::language;
@ -10,25 +8,23 @@ use preserves::value::Record;
use preserves::value::Value;
use preserves_schema::Codec;
pub type PathStep = _Any;
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum PathStep {
Index(usize),
Key(_Any),
}
pub type Path = Vec<PathStep>;
pub type Paths = Vec<Path>;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct ConstantPositions {
pub with_values: Paths,
pub required_to_exist: Paths,
}
struct Analyzer {
pub const_paths: Paths,
pub const_values: Vec<_Any>,
pub checked_paths: Paths,
pub capture_paths: Paths,
}
pub struct PatternAnalysis {
pub const_positions: Arc<ConstantPositions>,
pub const_paths: Paths,
pub const_values: _Any,
pub capture_paths: Paths,
}
@ -42,15 +38,11 @@ impl PatternAnalysis {
let mut analyzer = Analyzer {
const_paths: Vec::new(),
const_values: Vec::new(),
checked_paths: Vec::new(),
capture_paths: Vec::new(),
};
analyzer.walk(&mut Vec::new(), p);
PatternAnalysis {
const_positions: Arc::new(ConstantPositions {
with_values: analyzer.const_paths,
required_to_exist: analyzer.checked_paths,
}),
const_paths: analyzer.const_paths,
const_values: _Any::new(analyzer.const_values),
capture_paths: analyzer.capture_paths,
}
@ -66,21 +58,34 @@ impl Analyzer {
fn walk(&mut self, path: &mut Path, p: &Pattern) {
match p {
Pattern::Group { entries, .. } => {
for (k, p) in entries {
self.walk_step(path, k.clone(), p)
Pattern::DCompound(b) => match &**b {
DCompound::Rec { fields, .. } => {
for (i, p) in fields.iter().enumerate() {
self.walk_step(path, PathStep::Index(i), p);
}
}
DCompound::Arr { items, .. } => {
for (i, p) in items.iter().enumerate() {
self.walk_step(path, PathStep::Index(i), p);
}
}
DCompound::Dict { entries, .. } => {
for (k, p) in entries {
self.walk_step(path, PathStep::Key(k.clone()), p);
}
}
}
Pattern::Bind { pattern } => {
Pattern::DBind(b) => {
let DBind { pattern, .. } = &**b;
self.capture_paths.push(path.clone());
self.walk(path, &**pattern);
self.walk(path, pattern)
}
Pattern::Discard => {
self.checked_paths.push(path.clone());
}
Pattern::Lit { value } => {
Pattern::DDiscard(_) =>
(),
Pattern::DLit(b) => {
let DLit { value } = &**b;
self.const_paths.push(path.clone());
self.const_values.push(language().unparse(&**value));
self.const_values.push(language().unparse(value));
}
}
}
@ -104,47 +109,52 @@ impl PatternMatcher {
}
}
fn run_seq<'a, F: 'a + Fn(usize) -> &'a _Any>(&mut self, entries: &Map<_Any, Pattern<_Any>>, values: F) -> bool {
for (k, p) in entries {
match k.value().as_usize() {
None => return false,
Some(i) => if !self.run(p, values(i)) {
return false;
}
}
}
true
}
fn run(&mut self, pattern: &Pattern<_Any>, value: &_Any) -> bool {
match pattern {
Pattern::Discard => true,
Pattern::Bind { pattern } => {
Pattern::DDiscard(_) => true,
Pattern::DBind(b) => {
self.captures.push(value.clone());
self.run(&**pattern, value)
self.run(&b.pattern, value)
}
Pattern::Lit { value: expected } => value == &language().unparse(&**expected),
Pattern::Group { type_, entries } => match &**type_ {
GroupType::Rec { label } => {
match value.value().as_record(None) {
Pattern::DLit(b) => value == &language().unparse(&b.value),
Pattern::DCompound(b) => match &**b {
DCompound::Rec { label, fields } => {
match value.value().as_record(Some(fields.len())) {
None => false,
Some(r) =>
r.label() == label &&
self.run_seq(entries, |i| &r.fields()[i])
Some(r) => {
if r.label() != label {
return false;
}
for (i, p) in fields.iter().enumerate() {
if !self.run(p, &r.fields()[i]) {
return false;
}
}
true
}
}
}
GroupType::Arr => {
DCompound::Arr { items } => {
match value.value().as_sequence() {
None => false,
Some(vs) =>
self.run_seq(entries, |i| &vs[i])
Some(vs) => {
if vs.len() != items.len() {
return false;
}
for (i, p) in items.iter().enumerate() {
if !self.run(p, &vs[i]) {
return false;
}
}
true
}
}
}
GroupType::Dict => {
DCompound::Dict { entries: expected_entries } => {
match value.value().as_dictionary() {
None => false,
Some(actual_entries) => {
for (k, p) in entries {
for (k, p) in expected_entries.iter() {
if !actual_entries.get(k).map(|v| self.run(p, v)).unwrap_or(false) {
return false;
}
@ -160,68 +170,42 @@ impl PatternMatcher {
pub fn lift_literal(v: &_Any) -> Pattern {
match v.value() {
Value::Record(r) => Pattern::Group {
type_: Box::new(GroupType::Rec { label: r.label().clone() }),
entries: r.fields().iter().enumerate()
.map(|(i, v)| (_Any::new(i), lift_literal(v)))
.collect(),
},
Value::Sequence(items) => Pattern::Group {
type_: Box::new(GroupType::Arr),
entries: items.iter().enumerate()
.map(|(i, v)| (_Any::new(i), lift_literal(v)))
.collect(),
},
Value::Record(r) => Pattern::DCompound(Box::new(DCompound::Rec {
label: r.label().clone(),
fields: r.fields().iter().map(lift_literal).collect(),
})),
Value::Sequence(items) => Pattern::DCompound(Box::new(DCompound::Arr {
items: items.iter().map(lift_literal).collect(),
})),
Value::Set(_members) => panic!("Cannot express literal set in pattern"),
Value::Dictionary(entries) => Pattern::Group {
type_: Box::new(GroupType::Dict),
entries: entries.iter()
.map(|(k, v)| (k.clone(), lift_literal(v)))
.collect(),
},
_other => Pattern::Lit {
value: Box::new(language().parse(v).expect("Non-compound datum can be converted to AnyAtom")),
},
Value::Dictionary(entries) => Pattern::DCompound(Box::new(DCompound::Dict {
entries: entries.iter().map(|(k, v)| (k.clone(), lift_literal(v))).collect(),
})),
_other => Pattern::DLit(Box::new(DLit {
value: language().parse(v).expect("Non-compound datum can be converted to AnyAtom"),
})),
}
}
const DISCARD: Pattern = Pattern::Discard;
pub fn pattern_seq_from_dictionary(entries: &Map<_Any, Pattern>) -> Option<Vec<&Pattern>> {
let mut max_k: Option<usize> = None;
for k in entries.keys() {
max_k = max_k.max(Some(k.value().as_usize()?));
}
let mut seq = vec![];
if let Some(max_k) = max_k {
seq.reserve(max_k + 1);
for i in 0..=max_k {
seq.push(entries.get(&_Any::new(i)).unwrap_or(&DISCARD));
}
}
return Some(seq);
}
fn drop_literal_entries_seq(mut seq: Vec<_Any>, entries: &Map<_Any, Pattern>) -> Option<Vec<_Any>> {
for p in pattern_seq_from_dictionary(entries)?.into_iter() {
seq.push(drop_literal(p)?);
}
Some(seq)
}
pub fn drop_literal(p: &Pattern) -> Option<_Any> {
match p {
Pattern::Group { type_, entries } => match &**type_ {
GroupType::Rec { label } =>
Some(Value::Record(Record(drop_literal_entries_seq(vec![label.clone()], entries)?)).wrap()),
GroupType::Arr =>
Some(Value::Sequence(drop_literal_entries_seq(vec![], entries)?).wrap()),
GroupType::Dict =>
Some(Value::Dictionary(entries.iter()
.map(|(k, p)| Some((k.clone(), drop_literal(p)?)))
.collect::<Option<Map<_Any, _Any>>>()?).wrap()),
Pattern::DCompound(b) => match &**b {
DCompound::Rec { label, fields } => {
let mut r = vec![label.clone()];
for f in fields.iter() {
r.push(drop_literal(f)?);
}
Some(Value::Record(Record(r)).wrap())
}
DCompound::Arr { items } =>
Some(Value::Sequence(items.iter().map(drop_literal)
.collect::<Option<Vec<_Any>>>()?).wrap()),
DCompound::Dict { entries } =>
Some(Value::Dictionary(entries.iter()
.map(|(k, p)| Some((k.clone(), drop_literal(p)?)))
.collect::<Option<Map<_Any, _Any>>>()?).wrap()),
},
Pattern::Lit { value } => Some(language().unparse(&**value)),
Pattern::DLit(b) => Some(language().unparse(&b.value)),
_ => None,
}
}

View File

@ -1,7 +1,6 @@
use bytes::Buf;
use bytes::BytesMut;
use crate::Language;
use crate::language;
use crate::actor::*;
use crate::during;
@ -10,14 +9,13 @@ use crate::error::error;
use crate::schemas::gatekeeper;
use crate::schemas::protocol as P;
use crate::schemas::sturdy;
use crate::trace;
use futures::Sink;
use futures::SinkExt;
use futures::Stream;
use futures::StreamExt;
pub use parking_lot::Mutex;
use parking_lot::Mutex;
use preserves::error::Error as PreservesError;
use preserves::error::is_eof_io_error;
@ -29,7 +27,6 @@ use preserves::value::Map;
use preserves::value::NestedValue;
use preserves::value::NoEmbeddedDomainCodec;
use preserves::value::PackedWriter;
use preserves::value::Set;
use preserves::value::TextWriter;
use preserves::value::ViaCodec;
use preserves::value::Writer;
@ -38,7 +35,6 @@ use preserves::value::signed_integer::SignedInteger;
use preserves_schema::Codec;
use preserves_schema::Deserialize;
use preserves_schema::ParseError;
use preserves_schema::support::Unparse;
use std::io;
use std::pin::Pin;
@ -77,7 +73,6 @@ struct Membranes {
exported: Membrane,
imported: Membrane,
next_export_oid: usize,
reimported_attenuations: Map<sturdy::Oid, Set<Arc<Cap>>>,
}
pub enum Input {
@ -93,7 +88,6 @@ pub enum Output {
type TunnelRelayRef = Arc<Mutex<Option<TunnelRelay>>>;
// There are other kinds of relay. This one has exactly two participants connected to each other.
#[derive(Debug)]
pub struct TunnelRelay
{
self_ref: TunnelRelayRef,
@ -101,6 +95,7 @@ pub struct TunnelRelay
outbound_assertions: Map<P::Handle, Vec<Arc<WireSymbol>>>,
membranes: Membranes,
pending_outbound: Vec<P::TurnEvent<AnyValue>>,
self_entity: Arc<Ref<()>>,
output: UnboundedSender<LoanedItem<Vec<u8>>>,
output_text: bool,
}
@ -114,8 +109,8 @@ struct TunnelRefEntity {
relay_ref: TunnelRelayRef,
}
struct ActivatedMembranes<'a, 'm> {
turn: &'a mut Activation,
struct ActivatedMembranes<'a, 'activation, 'm> {
turn: &'a mut Activation<'activation>,
tr_ref: &'m TunnelRelayRef,
membranes: &'m mut Membranes,
}
@ -174,46 +169,36 @@ impl Membrane {
ws
}
fn remove(&mut self, ws: &Arc<WireSymbol>) {
self.oid_map.remove(&ws.oid);
self.ref_map.remove(&ws.obj);
}
fn insert_inert_entity(&mut self, t: &mut Activation, oid: sturdy::Oid) -> Arc<WireSymbol> {
self.insert(oid, Cap::new(&t.inert_entity()))
}
}
pub fn connect_stream<I, O, Step, E, F>(
pub fn connect_stream<I, O, E, F>(
t: &mut Activation,
i: I,
o: O,
output_text: bool,
step: Step,
sturdyref: sturdy::SturdyRef,
initial_state: E,
mut f: F,
) -> ActorResult where
) where
I: 'static + Send + AsyncRead,
O: 'static + Send + AsyncWrite,
Step: for<'a> Unparse<&'a Language<AnyValue>, AnyValue>,
E: 'static + Send,
F: 'static + Send + FnMut(&mut E, &mut Activation, Arc<Cap>) -> during::DuringResult<E>
{
let i = Input::Bytes(Box::pin(i));
let o = Output::Bytes(Box::pin(o));
let gatekeeper = TunnelRelay::run(t, i, o, None, Some(sturdy::Oid(0.into())), output_text).unwrap();
let main_entity = t.create(during::entity(initial_state).on_asserted(move |state, t, a: gatekeeper::Resolved| {
match a {
gatekeeper::Resolved::Accepted { responder_session } => f(state, t, responder_session),
gatekeeper::Resolved::Rejected(r) => Err(error("Resolve rejected", r.detail))?,
}
let main_entity = t.create(during::entity(initial_state).on_asserted(move |state, t, a: AnyValue| {
let denotation = a.value().to_embedded()?;
f(state, t, Arc::clone(denotation))
}));
let step = language().parse::<gatekeeper::Step>(&language().unparse(&step))?;
gatekeeper.assert(t, language(), &gatekeeper::Resolve::<AnyValue> {
step,
observer: Cap::guard(Language::arc(), main_entity),
gatekeeper.assert(t, language(), &gatekeeper::Resolve {
sturdyref,
observer: Cap::new(&main_entity),
});
Ok(())
}
impl std::fmt::Debug for Membrane {
@ -228,57 +213,7 @@ impl std::fmt::Debug for Membrane {
macro_rules! dump_membranes { ($e:expr) => { tracing::trace!("membranes: {:#?}", $e); } }
// macro_rules! dump_membranes { ($e:expr) => { (); } }
/// Main entry point for stdio-based Syndicate services.
pub async fn stdio_service<F>(f: F) -> !
where
F: 'static + Send + FnOnce(&mut Activation) -> Result<Arc<Cap>, ActorError>
{
let result = Actor::top(None, move |t| {
let service = f(t)?;
Ok(TunnelRelay::stdio_service(t, service))
}).await;
// Because we're currently using tokio::io::stdin(), which can prevent shutdown of the
// runtime, this routine uses std::process::exit directly as a special case. It's a
// stopgap: eventually, we'd like to do things Properly, as indicated in the comment
// attached (at the time of writing) to tokio::io::stdin(), which reads in part:
//
// This handle is best used for non-interactive uses, such as when a file
// is piped into the application. For technical reasons, `stdin` is
// implemented by using an ordinary blocking read on a separate thread, and
// it is impossible to cancel that read. This can make shutdown of the
// runtime hang until the user presses enter.
//
// For interactive uses, it is recommended to spawn a thread dedicated to
// user input and use blocking IO directly in that thread.
//
// TODO: Revisit this.
match result {
Ok(Ok(())) => {
std::process::exit(0);
}
Ok(Err(e)) => {
tracing::error!("Main stdio_service actor failed: {}", e);
std::process::exit(1);
},
Err(e) => {
tracing::error!("Join of main stdio_service actor failed: {}", e);
std::process::exit(2);
}
}
}
impl TunnelRelay {
pub fn stdio_service(t: &mut Activation, service: Arc<Cap>) -> () {
TunnelRelay::run(t,
Input::Bytes(Box::pin(tokio::io::stdin())),
Output::Bytes(Box::pin(tokio::io::stdout())),
Some(service),
None,
false);
}
pub fn run(
t: &mut Activation,
i: Input,
@ -287,20 +222,6 @@ impl TunnelRelay {
initial_oid: Option<sturdy::Oid>,
output_text: bool,
) -> Option<Arc<Cap>> {
let (result, tr_ref, output_rx) = TunnelRelay::_run(t, initial_ref, initial_oid, output_text);
t.linked_task(Some(AnyValue::symbol("writer")),
output_loop(o, output_rx));
t.linked_task(Some(AnyValue::symbol("reader")),
input_loop(t.trace_collector(), t.facet_ref(), i, tr_ref));
result
}
pub fn _run(
t: &mut Activation,
initial_ref: Option<Arc<Cap>>,
initial_oid: Option<sturdy::Oid>,
output_text: bool,
) -> (Option<Arc<Cap>>, Arc<Mutex<Option<TunnelRelay>>>, UnboundedReceiver<LoanedItem<Vec<u8>>>) {
let (output_tx, output_rx) = unbounded_channel();
let tr_ref = Arc::new(Mutex::new(None));
let self_entity = t.create(TunnelRefEntity {
@ -316,9 +237,9 @@ impl TunnelRelay {
exported: Membrane::new(WireSymbolSide::Exported),
imported: Membrane::new(WireSymbolSide::Imported),
next_export_oid: 0,
reimported_attenuations: Map::new(),
},
pending_outbound: Vec::new(),
self_entity: self_entity.clone(),
};
if let Some(ir) = initial_ref {
tr.membranes.export_ref(ir).inc_ref();
@ -327,8 +248,10 @@ impl TunnelRelay {
|io| Arc::clone(&tr.membranes.import_oid(t, &tr_ref, io).inc_ref().obj));
dump_membranes!(tr.membranes);
*tr_ref.lock() = Some(tr);
t.add_exit_hook(&self_entity);
(result, tr_ref, output_rx)
t.linked_task(crate::name!("writer"), output_loop(o, output_rx));
t.linked_task(crate::name!("reader"), input_loop(t.facet.clone(), i, tr_ref));
t.state.add_exit_hook(&self_entity);
result
}
fn deserialize_one(&mut self, t: &mut Activation, bs: &[u8]) -> (Result<P::Packet<AnyValue>, ParseError>, usize) {
@ -355,13 +278,13 @@ impl TunnelRelay {
}
}
pub fn handle_inbound_datagram(&mut self, t: &mut Activation, bs: &[u8]) -> ActorResult {
fn handle_inbound_datagram(&mut self, t: &mut Activation, bs: &[u8]) -> ActorResult {
tracing::trace!(bytes = ?bs, "inbound datagram");
let item = self.deserialize_one(t, bs).0?;
self.handle_inbound_packet(t, item)
}
pub fn handle_inbound_stream(&mut self, t: &mut Activation, buf: &mut BytesMut) -> ActorResult {
fn handle_inbound_stream(&mut self, t: &mut Activation, buf: &mut BytesMut) -> ActorResult {
loop {
tracing::trace!(buffer = ?buf, "inbound stream");
let (result, count) = self.deserialize_one(t, buf);
@ -377,20 +300,15 @@ impl TunnelRelay {
}
}
pub fn handle_inbound_packet(&mut self, t: &mut Activation, p: P::Packet<AnyValue>) -> ActorResult {
fn handle_inbound_packet(&mut self, t: &mut Activation, p: P::Packet<AnyValue>) -> ActorResult {
tracing::debug!(packet = ?p, "-->");
match p {
P::Packet::Extension(b) => {
let P::Extension { label, fields } = *b;
tracing::info!(?label, ?fields, "received Extension from peer");
Ok(())
}
P::Packet::Error(b) => {
tracing::info!(message = ?b.message.clone(),
detail = ?b.detail.clone(),
"received Error from peer");
Err(*b)?
}
Err(*b)
},
P::Packet::Turn(b) => {
let P::Turn(events) = *b;
for P::TurnEvent { oid, event } in events {
@ -414,7 +332,7 @@ impl TunnelRelay {
&mut |r| Ok(pins.push(self.membranes.lookup_ref(r))))?;
if let Some(local_handle) = target.assert(t, &(), &a) {
if let Some(_) = self.inbound_assertions.insert(remote_handle, (local_handle, pins)) {
return Err(error("Assertion with duplicate handle", AnyValue::new(false)))?;
return Err(error("Assertion with duplicate handle", AnyValue::new(false)));
}
} else {
self.membranes.release(pins);
@ -424,7 +342,7 @@ impl TunnelRelay {
P::Event::Retract(b) => {
let P::Retract { handle: remote_handle } = *b;
let (local_handle, previous_pins) = match self.inbound_assertions.remove(&remote_handle) {
None => return Err(error("Retraction of nonexistent handle", language().unparse(&remote_handle)))?,
None => return Err(error("Retraction of nonexistent handle", language().unparse(&remote_handle))),
Some(wss) => wss,
};
self.membranes.release(previous_pins);
@ -471,11 +389,12 @@ impl TunnelRelay {
peer: Arc::clone(&peer),
pins,
});
target.sync(t, k);
t.sync(&peer.underlying, k);
}
}
}
t.commit()
t.deliver();
Ok(())
}
}
}
@ -547,7 +466,6 @@ impl TunnelRelay {
} else {
PackedWriter::encode(&mut self.membranes, &item)?
};
tracing::trace!(buffer = ?bs, "outbound bytes");
let _ = self.output.send(LoanedItem::new(account, cost, bs));
Ok(())
@ -555,19 +473,7 @@ impl TunnelRelay {
pub fn send_event(&mut self, t: &mut Activation, remote_oid: sturdy::Oid, event: P::Event<AnyValue>) -> ActorResult {
if self.pending_outbound.is_empty() {
let self_ref = Arc::clone(&self.self_ref);
t.pre_commit(move |t| {
let mut g = self_ref.lock();
let tr = g.as_mut().expect("initialized");
let events = std::mem::take(&mut tr.pending_outbound);
tr.send_packet(&t.account(),
events.len(),
P::Packet::Turn(Box::new(P::Turn(events.clone()))))?;
for P::TurnEvent { oid, event } in events.into_iter() {
tr.outbound_event_bookkeeping(t, sturdy::Oid(oid.0), &event)?;
}
Ok(())
});
t.message_for_myself(&self.self_entity, ());
}
self.pending_outbound.push(P::TurnEvent { oid: P::Oid(remote_oid.0), event });
Ok(())
@ -608,10 +514,9 @@ impl Membranes {
#[inline]
fn release_one(&mut self, ws: Arc<WireSymbol>) -> bool {
if ws.dec_ref() {
if let WireSymbolSide::Exported = ws.side {
self.reimported_attenuations.remove(&ws.oid);
}
self.membrane(ws.side).remove(&ws);
let membrane = self.membrane(ws.side);
membrane.oid_map.remove(&ws.oid);
membrane.ref_map.remove(&ws.obj);
true
} else {
false
@ -632,47 +537,38 @@ impl Membranes {
src: &'src mut S,
_read_annotations: bool,
) -> io::Result<Arc<Cap>> {
match sturdy::WireRef::deserialize(&mut src.packed(NoEmbeddedDomainCodec))? {
let ws = match sturdy::WireRef::deserialize(&mut src.packed(NoEmbeddedDomainCodec))? {
sturdy::WireRef::Mine{ oid: b } => {
let oid = *b;
let ws = self.imported.oid_map.get(&oid).map(Arc::clone)
.unwrap_or_else(|| self.import_oid(t, relay_ref, oid));
Ok(Arc::clone(&ws.inc_ref().obj))
self.imported.oid_map.get(&oid).map(Arc::clone)
.unwrap_or_else(|| self.import_oid(t, relay_ref, oid))
}
sturdy::WireRef::Yours { oid: b, attenuation } => {
let oid = *b;
let ws = self.exported.oid_map.get(&oid).map(Arc::clone)
.unwrap_or_else(|| self.exported.insert_inert_entity(t, oid.clone()));
if attenuation.is_empty() {
Ok(Arc::clone(&ws.inc_ref().obj))
self.exported.oid_map.get(&oid).map(Arc::clone).unwrap_or_else(
|| self.exported.insert_inert_entity(t, oid))
} else {
let attenuated_obj = ws.obj.attenuate(&attenuation)
.map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Invalid capability attenuation: {:?}", e))
})?;
ws.inc_ref();
let variations = self.reimported_attenuations.entry(oid).or_default();
match variations.get(&attenuated_obj) {
None => {
variations.insert(Arc::clone(&attenuated_obj));
self.exported.ref_map.insert(Arc::clone(&attenuated_obj), Arc::clone(&ws));
Ok(attenuated_obj)
match self.exported.oid_map.get(&oid) {
None => self.exported.insert_inert_entity(t, oid),
Some(ws) => {
let attenuated_obj = ws.obj.attenuate(&sturdy::Attenuation(attenuation))
.map_err(|e| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Invalid capability attenuation: {:?}", e))
})?;
self.exported.insert(oid, attenuated_obj)
}
Some(existing) =>
Ok(Arc::clone(existing))
}
}
}
}
};
Ok(Arc::clone(&ws.inc_ref().obj))
}
}
impl<'a, 'm> DomainDecode<Arc<Cap>> for ActivatedMembranes<'a, 'm> {
impl<'a, 'activation, 'm> DomainDecode<Arc<Cap>> for ActivatedMembranes<'a, 'activation, 'm> {
fn decode_embedded<'de, 'src, S: BinarySource<'de>>(
&mut self,
src: &'src mut S,
@ -718,29 +614,24 @@ impl DomainEncode<Arc<Cap>> for Membranes {
}
async fn input_loop(
trace_collector: Option<trace::TraceCollector>,
facet: FacetRef,
i: Input,
relay: TunnelRelayRef,
) -> Result<LinkedTaskTermination, Error> {
let account = Account::new(Some(AnyValue::symbol("input-loop")), trace_collector);
let cause = trace::TurnCause::external("input-loop");
let account = Account::new(crate::name!("input-loop"));
match i {
Input::Packets(mut src) => {
loop {
account.ensure_clear_funds().await;
match src.next().await {
None => break,
None => return Ok(LinkedTaskTermination::Normal),
Some(bs) => {
if !facet.activate(
&account, Some(cause.clone()), |t| {
let mut g = relay.lock();
let tr = g.as_mut().expect("initialized");
tr.handle_inbound_datagram(t, &bs?)
})
{
break;
}
let r = facet.activate(Arc::clone(&account), |t| {
let mut g = relay.lock();
let tr = g.as_mut().expect("initialized");
tr.handle_inbound_datagram(t, &bs?)
});
if !r.is_success() { return Ok(LinkedTaskTermination::Normal); }
}
}
}
@ -753,31 +644,27 @@ async fn input_loop(
buf.reserve(BUFSIZE);
let n = match r.read_buf(&mut buf).await {
Ok(n) => n,
Err(e) => {
Err(e) =>
if e.kind() == io::ErrorKind::ConnectionReset {
break;
}
return Err(e)?;
}
return Ok(LinkedTaskTermination::Normal);
} else {
return Err(e)?;
},
};
match n {
0 => break,
0 => return Ok(LinkedTaskTermination::Normal),
_ => {
if !facet.activate(
&account, Some(cause.clone()), |t| {
let mut g = relay.lock();
let tr = g.as_mut().expect("initialized");
tr.handle_inbound_stream(t, &mut buf)
})
{
break;
}
let r = facet.activate(Arc::clone(&account), |t| {
let mut g = relay.lock();
let tr = g.as_mut().expect("initialized");
tr.handle_inbound_stream(t, &mut buf)
});
if !r.is_success() { return Ok(LinkedTaskTermination::Normal); }
}
}
}
}
}
Ok(LinkedTaskTermination::Normal)
}
async fn output_loop(
@ -802,15 +689,25 @@ async fn output_loop(
}
impl Entity<()> for TunnelRefEntity {
fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ExitStatus>) {
if let ExitStatus::Error(e) = &**exit_status {
fn message(&mut self, t: &mut Activation, _m: ()) -> ActorResult {
let mut g = self.relay_ref.lock();
let tr = g.as_mut().expect("initialized");
let events = std::mem::take(&mut tr.pending_outbound);
tr.send_packet(&t.account(), events.len(), P::Packet::Turn(Box::new(P::Turn(events.clone()))))?;
for P::TurnEvent { oid, event } in events.into_iter() {
tr.outbound_event_bookkeeping(t, sturdy::Oid(oid.0), &event)?;
}
Ok(())
}
fn exit_hook(&mut self, t: &mut Activation, exit_status: &Arc<ActorResult>) -> ActorResult {
if let Err(e) = &**exit_status {
let e = e.clone();
let mut g = self.relay_ref.lock();
let tr = g.as_mut().expect("initialized");
if let Err(f) = tr.send_packet(&t.account(), 1, P::Packet::Error(Box::new(e))) {
tracing::error!("Failed to send error packet: {:?}", f);
}
tr.send_packet(&t.account(), 1, P::Packet::Error(Box::new(e)))?;
}
Ok(())
}
}
@ -837,7 +734,7 @@ impl Entity<AnyValue> for RelayEntity {
fn sync(&mut self, t: &mut Activation, peer: Arc<Ref<Synced>>) -> ActorResult {
self.relay_ref.lock().as_mut().expect("initialized")
.send_event(t, self.oid.clone(), P::Event::Sync(Box::new(P::Sync {
peer: Cap::guard(&Arc::new(()), peer)
peer: Cap::guard(Arc::new(()), peer)
})))
}
}

View File

@ -16,10 +16,7 @@ pub type CheckedRewrite = (usize, Pattern, Template);
/// A safety-checked [`Caveat`]: none of the errors enumerated in
/// `CaveatError` apply.
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum CheckedCaveat {
Alts(Vec<CheckedRewrite>),
Reject(Pattern),
}
pub struct CheckedCaveat { alts: Vec<CheckedRewrite> }
/// Represents any detected error in a [`Caveat`]; that is, in a
/// [`Pattern`] or a [`Template`].
@ -31,44 +28,43 @@ pub enum CaveatError {
BindingUnderNegation,
}
impl Caveat {
/// Yields `Ok(())` iff `caveats` have no [`CaveatError`].
pub fn validate_many(caveats: &[Caveat]) -> Result<(), CaveatError> {
for c in caveats { c.validate()? }
impl Attenuation {
/// Yields `Ok(())` iff `self` has no [`CaveatError`].
pub fn validate(&self) -> Result<(), CaveatError> {
for c in &self.0 { c.validate()? }
Ok(())
}
/// Yields a vector of [`CheckedCaveat`s][CheckedCaveat] iff
/// `self` has no [`CaveatError`].
pub fn check(&self) -> Result<Vec<CheckedCaveat>, CaveatError> {
self.0.iter().map(Caveat::check).collect()
}
}
impl Caveat {
/// Yields `Ok(())` iff `self` has no [`CaveatError`].
pub fn validate(&self) -> Result<(), CaveatError> {
match self {
Caveat::Rewrite(b) => (&**b).validate(),
Caveat::Alts(b) => (&**b).alternatives.iter().map(Rewrite::validate).collect::<Result<(), _>>(),
Caveat::Reject(_) => Ok(()),
Caveat::Unknown(_) => Ok(()), /* it's valid to have unknown caveats, they just won't pass anything */
}
}
/// Yields a vector of [`CheckedCaveat`s][CheckedCaveat] iff
/// `caveats` have no [`CaveatError`].
pub fn check_many(caveats: &[Caveat]) -> Result<Vec<CheckedCaveat>, CaveatError> {
caveats.iter().map(Caveat::check).collect()
}
/// Yields a [`CheckedCaveat`] iff `self` has no [`CaveatError`].
pub fn check(&self) -> Result<CheckedCaveat, CaveatError> {
match self {
Caveat::Rewrite(b) =>
Ok(CheckedCaveat::Alts(vec![ (*b).check()? ])),
Ok(CheckedCaveat {
alts: vec![ (*b).check()? ]
}),
Caveat::Alts(b) => {
let Alts { alternatives } = &**b;
Ok(CheckedCaveat::Alts(
alternatives.into_iter().map(Rewrite::check)
.collect::<Result<Vec<CheckedRewrite>, CaveatError>>()?))
Ok(CheckedCaveat {
alts: alternatives.into_iter().map(Rewrite::check)
.collect::<Result<Vec<CheckedRewrite>, CaveatError>>()?
})
}
Caveat::Reject(b) =>
Ok(CheckedCaveat::Reject(b.pattern.clone())),
Caveat::Unknown(_) =>
Ok(CheckedCaveat::Reject(Pattern::PDiscard(Box::new(PDiscard)))),
}
}
}
@ -121,6 +117,7 @@ impl Pattern {
Pattern::PDiscard(_) => true,
Pattern::PAtom(b) => match &**b {
PAtom::Boolean => a.value().is_boolean(),
PAtom::Float => a.value().is_float(),
PAtom::Double => a.value().is_double(),
PAtom::SignedInteger => a.value().is_signedinteger(),
PAtom::String => a.value().is_string(),
@ -190,7 +187,7 @@ impl Template {
match self {
Template::TAttenuate(b) => {
let TAttenuate { template, attenuation } = &**b;
Caveat::validate_many(attenuation)?;
attenuation.validate()?;
Ok(template.implied_binding_count()?)
}
Template::TRef(b) => match usize::try_from(&(&**b).binding) {
@ -276,24 +273,12 @@ impl Rewrite {
impl CheckedCaveat {
/// Rewrites `a` using the patterns/templates contained in `self`.
pub fn rewrite(&self, a: &_Any) -> Option<_Any> {
match self {
CheckedCaveat::Alts(alts) => {
for (n, p, t) in alts {
let mut bindings = Vec::with_capacity(*n);
if p.matches(a, &mut bindings) {
return t.instantiate(&bindings);
}
}
None
},
CheckedCaveat::Reject(pat) => {
let mut bindings = Vec::with_capacity(0);
if pat.matches(a, &mut bindings) {
None
} else {
Some(a.clone())
}
for (n, p, t) in &self.alts {
let mut bindings = Vec::with_capacity(*n);
if let true = p.matches(a, &mut bindings) {
return t.instantiate(&bindings);
}
}
None
}
}

View File

@ -16,12 +16,19 @@ use crate::actor::Activation;
use crate::actor::Handle;
use crate::actor::Cap;
use crate::schemas::dataspace_patterns as ds;
use crate::pattern::{self, ConstantPositions, PathStep, Path, Paths};
use crate::pattern::{self, PathStep, Path, Paths};
type Bag<A> = bag::BTreeBag<A>;
type Captures = AnyValue;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
enum Guard {
Rec(AnyValue, usize),
Seq(usize),
Map,
}
/// Index of assertions and [`Observe`rs][crate::schemas::dataspace::Observe].
///
/// Generally speaking, you will not need to use this structure;
@ -37,13 +44,13 @@ pub struct Index {
#[derive(Debug)]
struct Node {
continuation: Continuation,
edges: Map<Selector, Map<ds::GroupType, Node>>,
edges: Map<Selector, Map<Guard, Node>>,
}
#[derive(Debug)]
struct Continuation {
cached_assertions: Set<AnyValue>,
leaf_map: Map<Arc<ConstantPositions>, Map<Captures, Leaf>>,
leaf_map: Map<Paths, Map<Captures, Leaf>>,
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
@ -198,7 +205,7 @@ impl Node {
}
fn extend(&mut self, pat: &ds::Pattern) -> &mut Continuation {
let (_pop_count, final_node) = self.extend_walk(&mut Vec::new(), 0, PathStep::new(0), pat);
let (_pop_count, final_node) = self.extend_walk(&mut Vec::new(), 0, PathStep::Index(0), pat);
&mut final_node.continuation
}
@ -209,13 +216,23 @@ impl Node {
step: PathStep,
pat: &ds::Pattern,
) -> (usize, &mut Node) {
let (guard, members): (ds::GroupType, Vec<(PathStep, &ds::Pattern)>) = match pat {
ds::Pattern::Group { type_, entries } =>
((&**type_).clone(),
entries.iter().map(|(k, p)| (k.clone(), p)).collect()),
ds::Pattern::Bind { pattern } =>
return self.extend_walk(path, pop_count, step, &**pattern),
ds::Pattern::Discard | ds::Pattern::Lit { .. } =>
let (guard, members): (Guard, Vec<(PathStep, &ds::Pattern)>) = match pat {
ds::Pattern::DCompound(b) => match &**b {
ds::DCompound::Arr { items } =>
(Guard::Seq(items.len()),
items.iter().enumerate().map(|(i, p)| (PathStep::Index(i), p)).collect()),
ds::DCompound::Rec { label, fields } =>
(Guard::Rec(label.clone(), fields.len()),
fields.iter().enumerate().map(|(i, p)| (PathStep::Index(i), p)).collect()),
ds::DCompound::Dict { entries, .. } =>
(Guard::Map,
entries.iter().map(|(k, p)| (PathStep::Key(k.clone()), p)).collect()),
}
ds::Pattern::DBind(b) => {
let ds::DBind { pattern, .. } = &**b;
return self.extend_walk(path, pop_count, step, pattern);
}
ds::Pattern::DDiscard(_) | ds::Pattern::DLit(_) =>
return (pop_count, self),
};
@ -319,46 +336,41 @@ where FCont: FnMut(&mut Continuation, &AnyValue) -> (),
fn continuation(&mut self, c: &mut Continuation) {
(self.m_cont)(c, self.outer_value);
let mut empty_const_positions = Vec::new();
for (const_positions, const_val_map) in &mut c.leaf_map {
if project_paths(self.outer_value, &const_positions.required_to_exist).is_none() {
continue;
}
let const_vals = match project_paths(self.outer_value, &const_positions.with_values) {
Some(vs) => vs,
None => continue,
};
let leaf_opt = if self.create_leaf_if_absent {
Some(const_val_map.entry(const_vals.clone()).or_insert_with(Leaf::new))
} else {
const_val_map.get_mut(&const_vals)
};
if let Some(leaf) = leaf_opt {
(self.m_leaf)(leaf, self.outer_value);
for (capture_paths, endpoints) in &mut leaf.endpoints_map {
if let Some(cs) = project_paths(self.outer_value, &capture_paths) {
(self.m_endpoints)(endpoints, cs);
let mut empty_const_paths = Vec::new();
for (const_paths, const_val_map) in &mut c.leaf_map {
if let Some(const_vals) = project_paths(self.outer_value, const_paths) {
let leaf_opt = if self.create_leaf_if_absent {
Some(const_val_map.entry(const_vals.clone()).or_insert_with(Leaf::new))
} else {
const_val_map.get_mut(&const_vals)
};
if let Some(leaf) = leaf_opt {
(self.m_leaf)(leaf, self.outer_value);
for (capture_paths, endpoints) in &mut leaf.endpoints_map {
if let Some(cs) = project_paths(self.outer_value, &capture_paths) {
(self.m_endpoints)(endpoints, cs);
}
}
}
if leaf.is_empty() {
const_val_map.remove(&const_vals);
if const_val_map.is_empty() {
empty_const_positions.push(const_positions.clone());
if leaf.is_empty() {
const_val_map.remove(&const_vals);
if const_val_map.is_empty() {
empty_const_paths.push(const_paths.clone());
}
}
}
}
}
for const_positions in empty_const_positions {
c.leaf_map.remove(&const_positions);
for const_paths in empty_const_paths {
c.leaf_map.remove(&const_paths);
}
}
}
fn class_of(v: &AnyValue) -> Option<ds::GroupType> {
fn class_of(v: &AnyValue) -> Option<Guard> {
match v.value() {
Value::Sequence(_) => Some(ds::GroupType::Arr),
Value::Record(r) => Some(ds::GroupType::Rec { label: r.label().clone() }),
Value::Dictionary(_) => Some(ds::GroupType::Dict),
Value::Sequence(vs) => Some(Guard::Seq(vs.len())),
Value::Record(r) => Some(Guard::Rec(r.label().clone(), r.arity())),
Value::Dictionary(_) => Some(Guard::Map),
_ => None,
}
}
@ -386,17 +398,15 @@ fn project_paths<'a>(v: &'a AnyValue, ps: &Paths) -> Option<Captures> {
}
fn step<'a>(v: &'a AnyValue, s: &PathStep) -> Option<&'a AnyValue> {
match v.value() {
Value::Sequence(vs) => {
let i = s.value().as_usize()?;
if i < vs.len() { Some(&vs[i]) } else { None }
}
Value::Record(r) => {
let i = s.value().as_usize()?;
if i < r.arity() { Some(&r.fields()[i]) } else { None }
}
Value::Dictionary(m) => m.get(s),
_ => None,
match (v.value(), s) {
(Value::Sequence(vs), PathStep::Index(i)) =>
if *i < vs.len() { Some(&vs[*i]) } else { None },
(Value::Record(r), PathStep::Index(i)) =>
if *i < r.arity() { Some(&r.fields()[*i]) } else { None },
(Value::Dictionary(m), PathStep::Key(k)) =>
m.get(k),
_ =>
None,
}
}
@ -413,14 +423,11 @@ impl Continuation {
) {
let cached_assertions = &self.cached_assertions;
let const_val_map =
self.leaf_map.entry(analysis.const_positions.clone()).or_insert_with({
self.leaf_map.entry(analysis.const_paths.clone()).or_insert_with({
|| {
let mut cvm = Map::new();
for a in cached_assertions {
if project_paths(a, &analysis.const_positions.required_to_exist).is_none() {
continue;
}
if let Some(key) = project_paths(a, &analysis.const_positions.with_values) {
if let Some(key) = project_paths(a, &analysis.const_paths) {
cvm.entry(key).or_insert_with(Leaf::new)
.cached_assertions.insert(a.clone());
}
@ -455,7 +462,7 @@ impl Continuation {
observer: &Arc<Cap>,
) {
if let Entry::Occupied(mut const_val_map_entry)
= self.leaf_map.entry(analysis.const_positions)
= self.leaf_map.entry(analysis.const_paths)
{
let const_val_map = const_val_map_entry.get_mut();
if let Entry::Occupied(mut leaf_entry)

View File

@ -1,8 +1,7 @@
use blake2::Blake2s256;
use getrandom::getrandom;
use hmac::{SimpleHmac, Mac};
use preserves::error::io_syntax_error;
use hmac::{Hmac, Mac, NewMac, crypto_mac::MacError};
use preserves::hex::HexParser;
use preserves::hex::HexFormatter;
use preserves::value::NestedValue;
@ -11,6 +10,8 @@ use preserves::value::packed::PackedWriter;
use preserves::value::packed::from_bytes;
use preserves_schema::Codec;
use sha2::Sha256;
use std::io;
use super::language;
@ -20,50 +21,33 @@ pub use super::schemas::sturdy::*;
#[derive(Debug)]
pub enum ValidationError {
SignatureError,
SignatureError(MacError),
AttenuationError(CaveatError),
BadCaveatsField,
}
impl std::fmt::Display for ValidationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
ValidationError::SignatureError =>
ValidationError::SignatureError(_) =>
write!(f, "Invalid SturdyRef signature"),
ValidationError::AttenuationError(e) =>
write!(f, "Invalid SturdyRef attenuation: {:?}", e),
ValidationError::BadCaveatsField =>
write!(f, "Invalid caveats field in SturdyRef parameters"),
}
}
}
impl From<ValidationError> for io::Error {
fn from(v: ValidationError) -> Self {
io_syntax_error(&v.to_string())
}
}
impl std::error::Error for ValidationError {}
const KEY_LENGTH: usize = 16; // bytes; 128 bits
fn signature(key: &[u8], data: &[u8]) -> Vec<u8> {
let mut m = SimpleHmac::<Blake2s256>::new_from_slice(key).expect("valid key length");
let mut m = Hmac::<Sha256>::new_from_slice(key).expect("valid key length");
m.update(data);
let mut result = m.finalize().into_bytes().to_vec();
result.truncate(KEY_LENGTH);
result
}
fn chain_signature(key: &[u8], chain: &[Caveat]) -> Vec<u8> {
let mut key = key.to_vec();
for c in chain {
key = signature(&key, &encode(&language().unparse(c)));
}
key
}
pub fn new_key() -> Vec<u8> {
let mut buf = vec![0; KEY_LENGTH];
getrandom(&mut buf).expect("successful random number generation");
@ -81,21 +65,7 @@ pub fn decode<N: NestedValue>(bs: &[u8]) -> io::Result<N> {
impl SturdyRef {
pub fn mint(oid: _Any, key: &[u8]) -> Self {
let sig = signature(key, &encode(&oid));
SturdyRef::from_parts(oid, vec![], sig)
}
pub fn from_parts(oid: _Any, caveats: Vec<Caveat>, sig: Vec<u8>) -> Self {
SturdyRef {
parameters: Parameters {
oid,
sig,
caveats: if caveats.is_empty() {
CaveatsField::Absent
} else {
CaveatsField::Present { caveats }
}
}
}
SturdyRef { oid, caveat_chain: Vec::new(), sig }
}
pub fn from_hex(s: &str) -> Result<Self, Error> {
@ -107,44 +77,44 @@ impl SturdyRef {
HexFormatter::Packed.encode(&encode(&language().unparse(self)))
}
pub fn caveat_chain(&self) -> Result<&[Caveat], ValidationError> {
match &self.parameters.caveats {
CaveatsField::Absent => Ok(&[]),
CaveatsField::Invalid { .. } => Err(ValidationError::BadCaveatsField),
CaveatsField::Present { caveats } => Ok(caveats),
}
}
pub fn validate_and_attenuate(
&self,
key: &[u8],
unattenuated_target: &_Ptr,
) -> Result<_Ptr, ValidationError> {
self.validate(key).map_err(|_| ValidationError::SignatureError)?;
self.validate(key).map_err(ValidationError::SignatureError)?;
let mut attenuation = Vec::new();
// TODO:: Make sure of the ordering here!!
for a in self.caveat_chain.iter().rev() {
attenuation.extend(a.0.iter().rev().cloned());
}
let target = unattenuated_target
.attenuate(self.caveat_chain()?)
.attenuate(&Attenuation(attenuation))
.map_err(ValidationError::AttenuationError)?;
Ok(target)
}
pub fn validate(&self, key: &[u8]) -> Result<(), ()> {
let SturdyRef { parameters: Parameters { oid, sig, .. } } = self;
let key = chain_signature(&signature(&key, &encode(oid)),
self.caveat_chain().map_err(|_| ())?);
pub fn validate(&self, key: &[u8]) -> Result<(), MacError> {
let SturdyRef { oid, caveat_chain, sig } = self;
let mut key = key.to_vec();
key = signature(&key, &encode(oid));
for c in caveat_chain {
key = signature(&key, &encode(&language().unparse(c)));
}
if &key == sig {
Ok(())
} else {
Err(())
Err(MacError)
}
}
pub fn attenuate(&self, attenuation: &[Caveat]) -> Result<Self, ValidationError> {
Caveat::validate_many(attenuation).map_err(ValidationError::AttenuationError)?;
let SturdyRef { parameters: Parameters { oid, sig, .. } } = self;
pub fn attenuate(&self, attenuation: &Attenuation) -> Result<Self, CaveatError> {
attenuation.validate()?;
let SturdyRef { oid, caveat_chain, sig } = self;
let oid = oid.clone();
let mut caveat_chain = self.caveat_chain()?.to_vec();
caveat_chain.extend(attenuation.iter().cloned());
let sig = chain_signature(&sig, attenuation);
Ok(SturdyRef::from_parts(oid, caveat_chain, sig))
let mut caveat_chain = caveat_chain.clone();
caveat_chain.push(attenuation.clone());
let sig = signature(&sig, &encode(&language().unparse(attenuation)));
Ok(SturdyRef { oid, caveat_chain, sig })
}
}

View File

@ -1,19 +1,23 @@
//! Extremely simple single-actor supervision. Vastly simplified compared to the available
//! options in [Erlang/OTP](https://erlang.org/doc/man/supervisor.html).
use preserves::value::NestedValue;
use std::collections::VecDeque;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use tokio::time::Instant;
use crate::actor::*;
use crate::enclose;
use crate::schemas::service::State;
pub type Boot = Arc<Mutex<Box<dyn Send + FnMut(&mut Activation) -> ActorResult>>>;
pub type Boot = Box<dyn Send + FnMut(&mut Activation) -> ActorResult>;
enum Protocol {
SuperviseeStarted, // assertion
BootFunction(Boot), // message
Retry, // message
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum RestartPolicy {
@ -30,19 +34,26 @@ pub struct SupervisorConfiguration {
pub restart_policy: RestartPolicy,
}
#[derive(Debug)]
struct StartNow;
pub struct Supervisor {
self_ref: Arc<Ref<StartNow>>,
child_name: Name,
self_ref: Arc<Ref<Protocol>>,
name: tracing::Span,
config: SupervisorConfiguration,
boot_fn: Boot,
boot_fn: Option<Boot>,
restarts: VecDeque<Instant>,
state: Arc<Field<State>>,
ac_ref: Option<ActorRef>,
}
impl std::fmt::Debug for Protocol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Protocol::SuperviseeStarted => write!(f, "Protocol::SuperviseeStarted"),
Protocol::BootFunction(_) => write!(f, "Protocol::BootFunction(_)"),
Protocol::Retry => write!(f, "Protocol::Retry"),
}
}
}
impl Default for SupervisorConfiguration {
fn default() -> Self {
Self {
@ -64,23 +75,30 @@ impl SupervisorConfiguration {
}
}
impl Entity<StartNow> for Supervisor
impl Entity<Protocol> for Supervisor
{
fn message(&mut self, t: &mut Activation, _m: StartNow) -> ActorResult {
self.start_now(t)
fn assert(&mut self, t: &mut Activation, m: Protocol, _h: Handle) -> ActorResult {
match m {
Protocol::SuperviseeStarted => t.set(&self.state, State::Started),
_ => Err(format!("Unexpected assertion: {:?}", m).as_str())?,
}
Ok(())
}
fn stop(&mut self, t: &mut Activation) -> ActorResult {
let _entry = tracing::info_span!("supervisor", name = ?self.child_name).entered();
match self.ac_ref.take().expect("valid supervisee ActorRef").exit_status() {
None =>
tracing::debug!("Supervisor shut down; supervisee will exit soon"),
Some(Ok(())) if self.config.restart_policy == RestartPolicy::OnErrorOnly => {
fn retract(&mut self, t: &mut Activation, _h: Handle) -> ActorResult {
let _name = self.name.clone();
let _entry = _name.enter();
let exit_status =
self.ac_ref.take().expect("valid supervisee ActorRef")
.exit_status()
.expect("supervisee to have terminated");
tracing::debug!(?exit_status);
match exit_status {
Ok(()) if self.config.restart_policy == RestartPolicy::OnErrorOnly => {
tracing::trace!("Not restarting: normal exit, restart_policy is OnErrorOnly");
t.set(&self.state, State::Complete);
},
Some(exit_status) => {
tracing::debug!(?exit_status);
_ => {
tracing::trace!("Restarting: restart_policy is Always or exit was abnormal");
t.set(&self.state,
if exit_status.is_ok() { State::Complete } else { State::Failed });
@ -104,63 +122,105 @@ impl Entity<StartNow> for Supervisor
};
t.after(wait_time, move |t| {
tracing::trace!("Sending retry trigger");
t.message(&self_ref, StartNow);
t.message(&self_ref, Protocol::Retry);
Ok(())
});
},
}
Ok(())
}
fn message(&mut self, t: &mut Activation, m: Protocol) -> ActorResult {
match m {
Protocol::BootFunction(b) => {
self.boot_fn = Some(b);
Ok(())
}
Protocol::Retry => {
self.ensure_started(t)
}
_ => Ok(())
}
}
fn stop(&mut self, _t: &mut Activation) -> ActorResult {
let _entry = self.name.enter();
tracing::info!(self_ref = ?self.self_ref, "Supervisor terminating");
Ok(())
}
}
impl Supervisor {
pub fn start<C: 'static + Send + FnMut(&mut Activation, State) -> ActorResult,
B: 'static + Send + FnMut(&mut Activation) -> ActorResult>(
t: &mut Activation,
name: Name,
name: tracing::Span,
config: SupervisorConfiguration,
mut state_cb: C,
boot_fn: B,
) -> ActorResult {
let _entry = tracing::info_span!("supervisor", ?name).entered();
let _entry = name.enter();
tracing::trace!(?config);
let self_ref = t.create_inert();
let state_field = t.named_field("supervisee_state", State::Started);
let my_name = name.as_ref().map(
|n| preserves::rec![AnyValue::symbol("supervisor"), n.clone()]);
let mut supervisor = Supervisor {
self_ref: Arc::clone(&self_ref),
child_name: name,
name: name.clone(),
config,
boot_fn: Arc::new(Mutex::new(Box::new(boot_fn))),
boot_fn: Some(Box::new(boot_fn)),
restarts: VecDeque::new(),
state: Arc::clone(&state_field),
ac_ref: None,
};
tracing::info!(self_ref = ?supervisor.self_ref, "Supervisor starting");
supervisor.start_now(t)?;
t.dataflow(move |t| {
supervisor.ensure_started(t)?;
t.dataflow(enclose!((name) move |t| {
let state = t.get(&state_field).clone();
tracing::debug!(name = ?my_name, ?state);
{
let _entry = name.enter();
tracing::debug!(?state);
}
state_cb(t, state)
})?;
}))?;
self_ref.become_entity(supervisor);
t.on_stop_notify(&self_ref);
Ok(())
}
fn start_now(&mut self, t: &mut Activation) -> ActorResult {
let boot_cell = Arc::clone(&self.boot_fn);
t.facet(|t: &mut Activation| {
t.on_stop_notify(&self.self_ref);
self.ac_ref = Some(t.spawn_link(
self.child_name.clone(),
move |t| boot_cell.lock().expect("Unpoisoned boot_fn mutex")(t)));
tracing::debug!(self_ref = ?self.self_ref,
supervisee = ?self.ac_ref,
"Supervisee started");
Ok(())
})?;
t.set(&self.state, State::Started);
fn ensure_started(&mut self, t: &mut Activation) -> ActorResult {
match self.boot_fn.take() {
None => {
let _entry = self.name.enter();
t.set(&self.state, State::Failed);
tracing::error!("Cannot restart supervisee, because it panicked at startup")
}
Some(mut boot_fn) => {
let self_ref = Arc::clone(&self.self_ref);
t.facet(|t: &mut Activation| {
t.assert(&self.self_ref, Protocol::SuperviseeStarted);
self.ac_ref = Some(t.spawn_link(
crate::name!(parent: &self.name, "supervisee"),
move |t| {
match boot_fn(t) {
Ok(()) => {
t.message(&self_ref, Protocol::BootFunction(boot_fn));
Ok(())
}
Err(e) => {
t.clear();
t.message(&self_ref, Protocol::BootFunction(boot_fn));
t.deliver();
Err(e)
}
}
}));
tracing::debug!(self_ref = ?self.self_ref,
supervisee = ?self.ac_ref,
"Supervisee started");
Ok(())
})?;
}
}
Ok(())
}
}

View File

@ -1,174 +0,0 @@
//! Records *describing* actions committed at the end of a turn and
//! events triggering the start of a turn. These are not the actions
//! or events themselves: they are reflective information on the
//! action of the system, enough to reconstruct interesting
//! projections of system activity.
pub use super::schemas::trace::*;
use preserves::value::NestedValue;
use preserves::value::Writer;
use preserves_schema::Codec;
use super::actor::{self, AnyValue, Ref, Cap};
use super::language;
use std::num::NonZeroU64;
use std::sync::Arc;
use std::time::SystemTime;
use tokio::select;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
#[derive(Debug, Clone)]
pub struct TraceCollector {
pub tx: UnboundedSender<TraceEntry>,
}
impl<M> From<&Ref<M>> for Target {
fn from(v: &Ref<M>) -> Target {
Target {
actor: v.mailbox.actor_id.into(),
facet: v.facet_id.into(),
oid: Oid(AnyValue::new(v.oid())),
}
}
}
impl<M: std::fmt::Debug> From<&M> for AssertionDescription {
default fn from(v: &M) -> Self {
Self::Opaque { description: AnyValue::new(format!("{:?}", v)) }
}
}
impl From<&AnyValue> for AssertionDescription {
fn from(v: &AnyValue) -> Self {
Self::Value { value: v.clone() }
}
}
impl TraceCollector {
pub fn record(&self, id: actor::ActorId, a: ActorActivation) {
let _ = self.tx.send(TraceEntry {
timestamp: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
.expect("Time after Unix epoch").as_secs_f64().into(),
actor: id.into(),
item: a,
});
}
}
impl TurnDescription {
pub fn new(activation_id: u64, cause: TurnCause) -> Self {
Self {
id: TurnId(AnyValue::new(activation_id)),
cause,
actions: Vec::new(),
}
}
pub fn record(&mut self, a: ActionDescription) {
self.actions.push(a)
}
pub fn take(&mut self) -> Self {
Self {
id: self.id.clone(),
cause: self.cause.clone(),
actions: std::mem::take(&mut self.actions),
}
}
}
impl TurnCause {
pub fn external(description: &str) -> Self {
Self::External { description: AnyValue::new(description) }
}
}
struct CapEncoder;
impl preserves::value::DomainEncode<Arc<Cap>> for CapEncoder {
fn encode_embedded<W: Writer>(
&mut self,
w: &mut W,
d: &Arc<Cap>,
) -> std::io::Result<()> {
w.write_string(&d.debug_str())
}
}
pub enum CollectorEvent {
Event(TraceEntry),
PeriodicFlush,
}
impl TraceCollector {
pub fn new<F: 'static + Send + FnMut(CollectorEvent)>(mut f: F) -> TraceCollector {
let (tx, mut rx) = unbounded_channel::<TraceEntry>();
tokio::spawn(async move {
let mut timer = tokio::time::interval(std::time::Duration::from_millis(100));
loop {
select! {
maybe_entry = rx.recv() => {
match maybe_entry {
None => break,
Some(entry) => {
tracing::trace!(?entry);
f(CollectorEvent::Event(entry));
}
}
},
_ = timer.tick() => f(CollectorEvent::PeriodicFlush),
}
}
});
TraceCollector { tx }
}
pub fn ascii<W: 'static + std::io::Write + Send>(w: W) -> TraceCollector {
let mut writer = preserves::value::TextWriter::new(w);
Self::new(move |event| match event {
CollectorEvent::Event(entry) => {
writer.write(&mut CapEncoder, &language().unparse(&entry))
.expect("failed to write TraceCollector entry");
writer.borrow_write().write_all(b"\n")
.expect("failed to write TraceCollector newline");
},
CollectorEvent::PeriodicFlush =>
writer.flush().expect("failed to flush TraceCollector output"),
})
}
pub fn packed<W: 'static + std::io::Write + Send>(w: W) -> TraceCollector {
let mut writer = preserves::value::PackedWriter::new(w);
Self::new(move |event| match event {
CollectorEvent::Event(entry) =>
writer.write(&mut CapEncoder, &language().unparse(&entry))
.expect("failed to write TraceCollector entry"),
CollectorEvent::PeriodicFlush =>
writer.flush().expect("failed to flush TraceCollector output"),
})
}
}
impl From<actor::Name> for Name {
fn from(v: actor::Name) -> Name {
match v {
None => Name::Anonymous,
Some(n) => Name::Named { name: n.clone() },
}
}
}
impl From<NonZeroU64> for ActorId {
fn from(v: NonZeroU64) -> Self {
ActorId(AnyValue::new(u64::from(v)))
}
}
impl From<NonZeroU64> for FacetId {
fn from(v: NonZeroU64) -> Self {
FacetId(AnyValue::new(u64::from(v)))
}
}

66
syndicate/src/tracer.rs Normal file
View File

@ -0,0 +1,66 @@
use crate::actor::*;
use std::fmt::Debug;
use std::io;
use std::sync::Arc;
struct Tracer(tracing::Span);
fn set_name_oid<M>(t: &mut Tracer, r: &Arc<Ref<M>>) {
t.0.record("oid", &tracing::field::display(&r.oid()));
}
pub fn tracer<M: Debug>(t: &mut Activation, name: tracing::Span) -> Arc<Ref<M>> {
let mut e = Tracer(name);
let r = t.create_inert();
set_name_oid(&mut e, &r);
r.become_entity(e);
r
}
impl<M: Debug> Entity<M> for Tracer {
fn assert(&mut self, _t: &mut Activation, a: M, h: Handle) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?a, ?h, "assert");
Ok(())
}
fn retract(&mut self, _t: &mut Activation, h: Handle) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?h, "retract");
Ok(())
}
fn message(&mut self, _t: &mut Activation, m: M) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?m, "message");
Ok(())
}
fn sync(&mut self, t: &mut Activation, peer: Arc<Ref<Synced>>) -> ActorResult {
let _guard = self.0.enter();
tracing::trace!(?peer, "sync");
t.message(&peer, Synced);
Ok(())
}
}
/// Sets up [`tracing`] logging in a reasonable way.
///
/// Useful at the top of `main` functions.
pub fn convenient_logging() -> Result<(), Box<dyn std::error::Error>> {
let filter = match std::env::var(tracing_subscriber::filter::EnvFilter::DEFAULT_ENV) {
Err(std::env::VarError::NotPresent) =>
tracing_subscriber::filter::EnvFilter::default()
.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()),
_ =>
tracing_subscriber::filter::EnvFilter::try_from_default_env()?,
};
let subscriber = tracing_subscriber::fmt()
.with_ansi(true)
.with_thread_ids(true)
.with_max_level(tracing::Level::TRACE)
.with_env_filter(filter)
.with_writer(io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Could not set tracing global subscriber");
Ok(())
}