Initial commit

This commit is contained in:
Tyler Cloutier
2023-08-01 23:10:55 +02:00
commit 44df6c6e7d
661 changed files with 75064 additions and 0 deletions
+3
View File
@@ -0,0 +1,3 @@
**/target
# we do our own version pinning in the Dockerfile
rust-toolchain.toml
+10
View File
@@ -0,0 +1,10 @@
# Description of Changes
# API
- [ ] This is a breaking change to the module API
- [ ] This is a breaking change to the ClientAPI
*If the API is breaking, please state below what will break*
+93
View File
@@ -0,0 +1,93 @@
on:
push:
branches:
- master
workflow_dispatch:
inputs:
pr_number:
description: 'Pull Request Number'
required: false
default: ''
name: Benchmarks
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
benchmark:
name: run benchmarks
runs-on: self-hosted
steps:
- name: Checkout sources for a PR
if: ${{ github.event.inputs.ref }}
uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
- name: Checkout sources
if: github.event.inputs.ref == ''
uses: actions/checkout@v3
with:
fetch-depth: 10
- name: Set up for PR context
if: github.event.inputs.pr_number
run: |
echo "PR_NUMBER=${{ github.event.inputs.pr_number }}" >> $GITHUB_ENV
PR_DATA=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.inputs.pr_number }} --jq '{ baseRefName: .base.ref, headRefName: .head.ref }')
echo "PR_BASE_REF=$(echo $PR_DATA | jq -r '.baseRefName')" >> $GITHUB_ENV
echo "PR_HEAD_REF=$(echo $PR_DATA | jq -r '.headRefName')" >> $GITHUB_ENV
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: wasm32-unknown-unknown
override: true
- name: ⚡ Cache
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-bench-${{ hashFiles('**/Cargo.lock') }}
- name: Build
working-directory: crates/bench/
run: |
cargo build --release
- name: Criterion compare base branch
if: ${{ env.PR_BASE_REF }}
uses: clockworklabs/criterion-compare-action@main
with:
cwd: "crates/bench"
branchName: ${{ env.PR_BASE_REF }}
benchName: modules
- name: Criterion compare previous commit
if: env.PR_BASE_REF == ''
uses: clockworklabs/criterion-compare-action@main
with:
cwd: "crates/bench"
branchName: "HEAD~1"
benchName: modules
- name: Benchmark Vs Sqlite
working-directory: crates/bench/
run: |
python3 hyper_cmp.py versus > out.report
cat out.report >> $GITHUB_STEP_SUMMARY
- name: Clean up
if: always()
run: |
rm -fr /stdb/*
+69
View File
@@ -0,0 +1,69 @@
on:
pull_request:
push:
branches:
- master
name: CI
jobs:
docker_smoketests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Start containers
run: docker compose up -d
- name: Run smoketests
run: test/run-smoke-tests.sh -x bitcraftmini-pretest zz_docker-restart-repeating-reducer zz_docker-restart-module zz_docker-restart-sql
- name: Stop containers
if: always()
run: docker compose down
test:
name: Test Suite
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- uses: dsherret/rust-toolchain-file@v1
- name: Create /stdb dir
run: |
sudo mkdir /stdb
sudo chmod 777 /stdb
- name: Run cargo test
run: cargo test --all --features odb_rocksdb,odb_sled,tracelogging
lints:
name: Lints
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- uses: dsherret/rust-toolchain-file@v1
- run: echo ::add-matcher::.github/workflows/rust_matcher.json
- name: Run cargo fmt
run: cargo fmt --all -- --check
- name: Run cargo clippy
run: cargo clippy --all --features odb_rocksdb,odb_sled,tracelogging -- -D warnings
wasm_bindings:
name: Build and test wasm bindings
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: dsherret/rust-toolchain-file@v1
- run: echo ::add-matcher::.github/workflows/rust_matcher.json
- name: Build rust-wasm-test
run: cargo run -p spacetimedb-cli -- build crates/modules/rust-wasm-test
- name: Run bindgen tests
run: cargo test -p spacetimedb-cli
+21
View File
@@ -0,0 +1,21 @@
{
"problemMatcher": [
{
"owner": "rust",
"pattern": [
{
"regexp": "^(warning|warn|error)(\\[(.*)\\])?: (.*)$",
"severity": 1,
"message": 4,
"code": 3
},
{
"regexp": "^([\\s->=]*(.*):(\\d*):(\\d*)|.*)$",
"file": 2,
"line": 3,
"column": 4
}
]
}
]
}
+206
View File
@@ -0,0 +1,206 @@
# Created by https://www.toptal.com/developers/gitignore/api/rust,node,visualstudiocode
# Edit at https://www.toptal.com/developers/gitignore?templates=rust,node,visualstudiocode
flamegraphs/*.svg
flamegraphs/flamegraph.folded
### Node ###
# Logs
packages/logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
### Node Patch ###
# Serverless Webpack directories
.webpack/
# Optional stylelint cache
# SvelteKit build / generate output
.svelte-kit
### Rust ###
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### VisualStudioCode ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
# Built Visual Studio Code Extensions
*.vsix
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
.ionide
# Support for Project snippet scope
# End of https://www.toptal.com/developers/gitignore/api/rust,node,visualstudiocode
# Added by cargo
#
# already existing elements were commented out
/target
#Cargo.lock
## JetBrains
.idea/
/protobuf
cs-src/
crates/bench/spacetime.svg
crates/bench/sqlite.svg
# benchmark files
out.json
old.json
new.json
.history.txt
# Keys
*.pem
+1
View File
@@ -0,0 +1 @@
max_width = 120
Generated
+5727
View File
File diff suppressed because it is too large Load Diff
+48
View File
@@ -0,0 +1,48 @@
[workspace]
members = [
"crates/standalone",
"crates/lib",
"crates/core",
"crates/bindings-sys",
"crates/bindings",
"crates/bench",
"crates/bindings-macro",
"crates/cli",
"crates/sats",
"crates/testing",
"crates/vm",
"crates/replay",
"crates/client-api",
"crates/client-sdk",
"crates/client-api-messages",
"crates/sqltest",
"modules/rust-wasm-test",
"modules/benchmarks",
"modules/spacetimedb_quickstart",
]
default-members = ["crates/cli"]
[profile.release]
opt-level = 3
debug = true
debug-assertions = false
overflow-checks = false
lto = true
panic = 'unwind'
incremental = false
codegen-units = 16
rpath = false
[profile.dev]
opt-level = 0
debug = true
debug-assertions = true
overflow-checks = true
lto = false
panic = 'unwind'
incremental = true
codegen-units = 256
rpath = false
[profile.bench]
debug = true
+731
View File
@@ -0,0 +1,731 @@
SPACETIMEDB BUSINESS SOURCE LICENSE AGREEMENT
Business Source License 1.1
Parameters
Licensor: Clockwork Laboratories, Inc.
Licensed Work: SpacetimeDB 0.6.0
The Licensed Work is
(c) 2023 Clockwork Laboratories, Inc.
Additional Use Grant: You may make use the Licensed Work provided your
application or service uses the Licensed Work with no
more than one SpacetimeDB instance in production and
provided that you do not use the Licensed Work for a
Database Service.
A “Database Service” is a commercial offering that
allows third parties (other than your employees and
contractors) to access the functionality of the
Licensed Work by creating tables whose schemas are
controlled by such third parties.
Change Date: 2028-08-03
Change License: GNU Affero General Public License v3.0 with a linking
exception
For information about alternative licensing arrangements for the Software,
please visit: https://spacetimedb.com
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.
-----------------------------------------------------------------------------
Copyright (C) 2023 Clockwork Laboratories, Inc.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License, version 3, as published
by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, see <https://www.gnu.org/licenses>.
Additional permission under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or combining it
with SpacetimeDB (or a modified version of that library), containing parts
covered by the terms of the AGPL v3.0, the licensors of this Program grant
you additional permission to convey the resulting work.
Additional permission under GNU AGPL version 3 section 13
If you modify this Program, or any covered work, by linking or combining it
with SpacetimeDB (or a modified version of that library), containing parts
covered by the terms of the AGPL v3.0, the licensors of this Program grant
you additional permission that, notwithstanding any other provision of this
License, you need not prominently offer all users interacting with your
modified version remotely through a computer network an opportunity to
receive the Corresponding Source of your version from a network server at no
charge, if your version supports such interaction. This permission does not
waive or modify any other obligations or terms of the AGPL v3.0, except for
the specific requirement set forth in section 13.
A copy of the AGPL v3.0 license is reproduced below.
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed to take
away your freedom to share and change the works. By contrast, our General
Public Licenses are intended to guarantee your freedom to share and change
all versions of a program--to make sure it remains free software for all its
users.
When we speak of free software, we are referring to freedom, not price. Our
General Public Licenses are designed to make sure that you have the freedom
to distribute copies of free software (and charge for them if you wish), that
you receive source code or can get it if you want it, that you can change the
software or use pieces of it in new free programs, and that you know you can
do these things.
Developers that use our General Public Licenses protect your rights with two
steps: (1) assert copyright on the software, and (2) offer you this License
which gives you legal permission to copy, distribute and/or modify the
software.
A secondary benefit of defending all users' freedom is that improvements made
in alternate versions of the program, if they receive widespread use, become
available for other developers to incorporate. Many developers of free
software are heartened and encouraged by the resulting cooperation. However,
in the case of software used on network servers, this result may fail to come
about. The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its source
code to the public.
The GNU Affero General Public License is designed specifically to ensure
that, in such cases, the modified source code becomes available to the
community. It requires the operator of a network server to provide the source
code of the modified version running there to the users of that server.
Therefore, public use of a modified version, on a publicly accessible server,
gives the public access to the source code of the modified version.
An older license, called the Affero General Public License and published by
Affero, was designed to accomplish similar goals. This is a different
license, not a version of the Affero GPL, but Affero has released a new
version of the Affero GPL which permits relicensing under this license.
The precise terms and conditions for copying, distribution and modification
follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this License.
Each licensee is addressed as "you". "Licensees" and "recipients" may be
individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work in a
fashion requiring copyright permission, other than the making of an exact
copy. The resulting work is called a "modified version" of the earlier work
or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based on the
Program.
To "propagate" a work means to do anything with it that, without permission,
would make you directly or secondarily liable for infringement under
applicable copyright law, except executing it on a computer or modifying a
private copy. Propagation includes copying, distribution (with or without
modification), making available to the public, and in some countries other
activities as well.
To "convey" a work means any kind of propagation that enables other parties
to make or receive copies. Mere interaction with a user through a computer
network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices" to the
extent that it includes a convenient and prominently visible feature that (1)
displays an appropriate copyright notice, and (2) tells the user that there
is no warranty for the work (except to the extent that warranties are
provided), that licensees may convey the work under this License, and how to
view a copy of this License. If the interface presents a list of user
commands or options, such as a menu, a prominent item in the list meets this
criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work for making
modifications to it. "Object code" means any non-source form of a work.
A "Standard Interface" means an interface that either is an official standard
defined by a recognized standards body, or, in the case of interfaces
specified for a particular programming language, one that is widely used
among developers working in that language.
The "System Libraries" of an executable work include anything, other than the
work as a whole, that (a) is included in the normal form of packaging a Major
Component, but which is not part of that Major Component, and (b) serves only
to enable use of the work with that Major Component, or to implement a
Standard Interface for which an implementation is available to the public in
source code form. A "Major Component", in this context, means a major
essential component (kernel, window system, and so on) of the specific
operating system (if any) on which the executable work runs, or a compiler
used to produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all the
source code needed to generate, install, and (for an executable work) run the
object code and to modify the work, including scripts to control those
activities. However, it does not include the work's System Libraries, or
general-purpose tools or generally available free programs which are used
unmodified in performing those activities but which are not part of the work.
For example, Corresponding Source includes interface definition files
associated with source files for the work, and the source code for shared
libraries and dynamically linked subprograms that the work is specifically
designed to require, such as by intimate data communication or control flow
between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate
automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright
on the Program, and are irrevocable provided the stated conditions are met.
This License explicitly affirms your unlimited permission to run the
unmodified Program. The output from running a covered work is covered by this
License only if the output, given its content, constitutes a covered work.
This License acknowledges your rights of fair use or other equivalent, as
provided by copyright law.
You may make, run and propagate covered works that you do not convey, without
conditions so long as your license otherwise remains in force. You may convey
covered works to others for the sole purpose of having them make
modifications exclusively for you, or provide you with facilities for running
those works, provided that you comply with the terms of this License in
conveying all material for which you do not control copyright. Those thus
making or running the covered works for you must do so exclusively on your
behalf, under your direction and control, on terms that prohibit them from
making any copies of your copyrighted material outside their relationship
with you.
Conveying under any other circumstances is permitted solely under the
conditions stated below. Sublicensing is not allowed; section 10 makes it
unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure
under any applicable law fulfilling obligations under article 11 of the WIPO
copyright treaty adopted on 20 December 1996, or similar laws prohibiting or
restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention is
effected by exercising rights under this License with respect to the covered
work, and you disclaim any intention to limit operation or modification of
the work as a means of enforcing, against the work's users, your or third
parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive
it, in any medium, provided that you conspicuously and appropriately publish
on each copy an appropriate copyright notice; keep intact all notices stating
that this License and any non-permissive terms added in accord with section 7
apply to the code; keep intact all notices of the absence of any warranty;
and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you
may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce
it from the Program, in the form of source code under the terms of section 4,
provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it, and
giving a relevant date.
b) The work must carry prominent notices stating that it is released under
this License and any conditions added under section 7. This requirement
modifies the requirement in section 4 to "keep intact all notices".
c) You must license the entire work, as a whole, under this License to anyone
who comes into possession of a copy. This License will therefore apply, along
with any applicable section 7 additional terms, to the whole of the work, and
all its parts, regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not invalidate
such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display Appropriate
Legal Notices; however, if the Program has interactive interfaces that do not
display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works,
which are not by their nature extensions of the covered work, and which are
not combined with it such as to form a larger program, in or on a volume of a
storage or distribution medium, is called an "aggregate" if the compilation
and its resulting copyright are not used to limit the access or legal rights
of the compilation's users beyond what the individual works permit. Inclusion
of a covered work in an aggregate does not cause this License to apply to the
other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections
4 and 5, provided that you also convey the machine-readable Corresponding
Source under the terms of this License, in one of these ways:
a) Convey the object code in, or embodied in, a physical product (including a
physical distribution medium), accompanied by the Corresponding Source fixed
on a durable physical medium customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product (including a
physical distribution medium), accompanied by a written offer, valid for at
least three years and valid for as long as you offer spare parts or customer
support for that product model, to give anyone who possesses the object code
either (1) a copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical medium
customarily used for software interchange, for a price no more than your
reasonable cost of physically performing this conveying of source, or (2)
access to copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the written
offer to provide the Corresponding Source. This alternative is allowed only
occasionally and noncommercially, and only if you received the object code
with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place (gratis
or for a charge), and offer equivalent access to the Corresponding Source in
the same way through the same place at no further charge. You need not
require recipients to copy the Corresponding Source along with the object
code. If the place to copy the object code is a network server, the
Corresponding Source may be on a different server (operated by you or a third
party) that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the Corresponding
Source, you remain obligated to ensure that it is available for as long as
needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you
inform other peers where the object code and Corresponding Source of the work
are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from
the Corresponding Source as a System Library, need not be included in
conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any tangible
personal property which is normally used for personal, family, or household
purposes, or (2) anything designed or sold for incorporation into a dwelling.
In determining whether a product is a consumer product, doubtful cases shall
be resolved in favor of coverage. For a particular product received by a
particular user, "normally used" refers to a typical or common use of that
class of product, regardless of the status of the particular user or of the
way in which the particular user actually uses, or expects or is expected to
use, the product. A product is a consumer product regardless of whether the
product has substantial commercial, industrial or non-consumer uses, unless
such uses represent the only significant mode of use of the product.
"Installation Information" for a User Product means any methods, procedures,
authorization keys, or other information required to install and execute
modified versions of a covered work in that User Product from a modified
version of its Corresponding Source. The information must suffice to ensure
that the continued functioning of the modified object code is in no case
prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as part of
a transaction in which the right of possession and use of the User Product is
transferred to the recipient in perpetuity or for a fixed term (regardless of
how the transaction is characterized), the Corresponding Source conveyed
under this section must be accompanied by the Installation Information. But
this requirement does not apply if neither you nor any third party retains
the ability to install modified object code on the User Product (for example,
the work has been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates for
a work that has been modified or installed by the recipient, or for the User
Product in which it has been modified or installed. Access to a network may
be denied when the modification itself materially and adversely affects the
operation of the network or violates the rules and protocols for
communication across the network.
Corresponding Source conveyed, and Installation Information provided, in
accord with this section must be in a format that is publicly documented (and
with an implementation available to the public in source code form), and must
require no special password or key for unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this License
by making exceptions from one or more of its conditions. Additional
permissions that are applicable to the entire Program shall be treated as
though they were included in this License, to the extent that they are valid
under applicable law. If additional permissions apply only to part of the
Program, that part may be used separately under those permissions, but the
entire Program remains governed by this License without regard to the
additional permissions.
When you convey a copy of a covered work, you may at your option remove any
additional permissions from that copy, or from any part of it. (Additional
permissions may be written to require their own removal in certain cases when
you modify the work.) You may place additional permissions on material, added
by you to a covered work, for which you have or can give appropriate
copyright permission.
Notwithstanding any other provision of this License, for material you add to
a covered work, you may (if authorized by the copyright holders of that
material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the terms of
sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or author
attributions in that material or in the Appropriate Legal Notices displayed
by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or requiring
that modified versions of such material be marked in reasonable ways as
different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or authors
of the material; or
e) Declining to grant rights under trademark law for use of some trade names,
trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material by
anyone who conveys the material (or modified versions of it) with contractual
assumptions of liability to the recipient, for any liability that these
contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is governed
by this License along with a term that is a further restriction, you may
remove that term. If a license document contains a further restriction but
permits relicensing or conveying under this License, you may add to a covered
work material governed by the terms of that license document, provided that
the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must
place, in the relevant source files, a statement of the additional terms that
apply to those files, or a notice indicating where to find the applicable
terms.
Additional terms, permissive or non-permissive, may be stated in the form of
a separately written license, or stated as exceptions; the above requirements
apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided
under this License. Any attempt otherwise to propagate or modify it is void,
and will automatically terminate your rights under this License (including
any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a
particular copyright holder is reinstated (a) provisionally, unless and until
the copyright holder explicitly and finally terminates your license, and (b)
permanently, if the copyright holder fails to notify you of the violation by
some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated
permanently if the copyright holder notifies you of the violation by some
reasonable means, this is the first time you have received notice of
violation of this License (for any work) from that copyright holder, and you
cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses
of parties who have received copies or rights from you under this License. If
your rights have been terminated and not permanently reinstated, you do not
qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy
of the Program. Ancillary propagation of a covered work occurring solely as a
consequence of using peer-to-peer transmission to receive a copy likewise
does not require acceptance. However, nothing other than this License grants
you permission to propagate or modify any covered work. These actions
infringe copyright if you do not accept this License. Therefore, by modifying
or propagating a covered work, you indicate your acceptance of this License
to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a
license from the original licensors, to run, modify and propagate that work,
subject to this License. You are not responsible for enforcing compliance by
third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered work
results from an entity transaction, each party to that transaction who
receives a copy of the work also receives whatever licenses to the work the
party's predecessor in interest had or could give under the previous
paragraph, plus a right to possession of the Corresponding Source of the work
from the predecessor in interest, if the predecessor has it or can get it
with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights
granted or affirmed under this License. For example, you may not impose a
license fee, royalty, or other charge for exercise of rights granted under
this License, and you may not initiate litigation (including a cross-claim or
counterclaim in a lawsuit) alleging that any patent claim is infringed by
making, using, selling, offering for sale, or importing the Program or any
portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this License
of the Program or a work on which the Program is based. The work thus
licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims owned or
controlled by the contributor, whether already acquired or hereafter
acquired, that would be infringed by some manner, permitted by this License,
of making, using, or selling its contributor version, but do not include
claims that would be infringed only as a consequence of further modification
of the contributor version. For purposes of this definition, "control"
includes the right to grant patent sublicenses in a manner consistent with
the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent
license under the contributor's essential patent claims, to make, use, sell,
offer for sale, import and otherwise run, modify and propagate the contents
of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent (such
as an express permission to practice a patent or covenant not to sue for
patent infringement). To "grant" such a patent license to a party means to
make such an agreement or commitment not to enforce a patent against the
party.
If you convey a covered work, knowingly relying on a patent license, and the
Corresponding Source of the work is not available for anyone to copy, free of
charge and under the terms of this License, through a publicly available
network server or other readily accessible means, then you must either (1)
cause the Corresponding Source to be so available, or (2) arrange to deprive
yourself of the benefit of the patent license for this particular work, or
(3) arrange, in a manner consistent with the requirements of this License, to
extend the patent license to downstream recipients. "Knowingly relying" means
you have actual knowledge that, but for the patent license, your conveying
the covered work in a country, or your recipient's use of the covered work in
a country, would infringe one or more identifiable patents in that country
that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement,
you convey, or propagate by procuring conveyance of, a covered work, and
grant a patent license to some of the parties receiving the covered work
authorizing them to use, propagate, modify or convey a specific copy of the
covered work, then the patent license you grant is automatically extended to
all recipients of the covered work and works based on it.
A patent license is "discriminatory" if it does not include within the scope
of its coverage, prohibits the exercise of, or is conditioned on the
non-exercise of one or more of the rights that are specifically granted under
this License. You may not convey a covered work if you are a party to an
arrangement with a third party that is in the business of distributing
software, under which you make payment to the third party based on the extent
of your activity of conveying the work, and under which the third party
grants, to any of the parties who would receive the covered work from you, a
discriminatory patent license (a) in connection with copies of the covered
work conveyed by you (or copies made from those copies), or (b) primarily for
and in connection with specific products or compilations that contain the
covered work, unless you entered into that arrangement, or that patent
license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any
implied license or other defenses to infringement that may otherwise be
available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not excuse
you from the conditions of this License. If you cannot convey a covered work
so as to satisfy simultaneously your obligations under this License and any
other pertinent obligations, then as a consequence you may not convey it at
all. For example, if you agree to terms that obligate you to collect a
royalty for further conveying from those to whom you convey the Program, the
only way you could satisfy both those terms and this License would be to
refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users interacting
with it remotely through a computer network (if your version supports such
interaction) an opportunity to receive the Corresponding Source of your
version by providing access to the Corresponding Source from a network server
at no charge, through some standard or customary means of facilitating
copying of software. This Corresponding Source shall include the
Corresponding Source for any work covered by version 3 of the GNU General
Public License that is incorporated pursuant to the following paragraph.
Notwithstanding any other provision of this License, you have permission to
link or combine any covered work with a work licensed under version 3 of the
GNU General Public License into a single combined work, and to convey the
resulting work. The terms of this License will continue to apply to the part
which is the covered work, but the work with which it is combined will remain
governed by version 3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the
GNU Affero General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies that a certain numbered version of the GNU Affero General Public
License "or any later version" applies to it, you have the option of
following the terms and conditions either of that numbered version or of any
later version published by the Free Software Foundation. If the Program does
not specify a version number of the GNU Affero General Public License, you
may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the
GNU Affero General Public License can be used, that proxy's public statement
of acceptance of a version permanently authorizes you to choose that version
for the Program.
Later license versions may give you additional or different permissions.
However, no additional obligations are imposed on any author or copyright
holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE
LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND,
EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.
SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY
SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL
ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE
PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE
OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR
DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR
A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH
HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above
cannot be given local legal effect according to their terms, reviewing courts
shall apply local law that most closely approximates an absolute waiver of
all civil liability in connection with the Program, unless a warranty or
assumption of liability accompanies a copy of the Program in return for a
fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest possible
use to the public, the best way to achieve this is to make it free software
which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest to attach
them to the start of each source file to most effectively state the exclusion
of warranty; and each file should have at least the "copyright" line and a
pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer network,
you should also make sure that it provides a way for users to get its source.
For example, if your program is a web application, its interface could
display a "Source" link that leads users to an archive of the code. There are
many ways you could offer source, and different solutions will be better for
different programs; see section 13 for the specific requirements.
You should also get your employer (if you work as a programmer) or school, if
any, to sign a "copyright disclaimer" for the program, if necessary. For more
information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
+34
View File
@@ -0,0 +1,34 @@
[package]
name = "spacetimedb-bench"
version = "0.4.1"
edition = "2021"
license-file = "LICENSE"
description = "Bench library/utility for SpacetimeDB"
[[bench]]
name = "db"
harness = false
[[bench]]
name = "modules"
harness = false
[lib]
bench = false
[dependencies]
spacetimedb-lib = { path = "../lib" }
spacetimedb-core = { path = "../core" }
spacetimedb-standalone = { path = "../standalone" }
spacetimedb-client-api = { path = "../client-api" }
spacetimedb-testing = { path = "../testing" }
clap = { version = "4.2.4", features = ["derive"] }
rusqlite = {version = "0.29.0", features = ["bundled", "column_decltype"]}
criterion = { version = "0.4.0", features = ["async", "async_tokio", "html_reports"] }
tempdir = "0.3.7"
rand = { version = "0.8.5", features = [] }
tokio = { version = "1.25", features = ["full"]}
serde_json = "1.0"
anyhow = "1.0"
byte-unit = "4.0.18"
View File
+97
View File
@@ -0,0 +1,97 @@
spacetimedb-bench# Benchmarking suite for SpacetimeDB
## Install tools
- [hyperfine](https://github.com/sharkdp/hyperfine)
Note: These are just some examples. Go the repo page for full install instructions.
```bash
# Ubuntu
wget https://github.com/sharkdp/hyperfine/releases/download/v1.15.0/hyperfine_1.15.0_amd64.deb
sudo dpkg -i hyperfine_1.15.0_amd64.deb
# macOs
brew install hyperfine
# Windows
conda install -c conda-forge hyperfine
# Any
cargo install hyperfine
```
- [critcmp](https://github.com/BurntSushi/critcmp)
```bash
cargo install critcmp
```
### OSX Only
- [cargo-instrument](https://github.com/cmyr/cargo-instruments)
```bash
brew install cargo-instruments
```
## Run
List the available benchmarks:
```bash
# From root
# cd SpacetimeDB/crates/bench
cargo run -- --help
```
Exist two engines to test: `spacetimedb` & `sqlite`.
## Benches with Criterion
Run normally with cargo:
```bash
#cargo bench -- NAME_OF_COMMAND
cargo bench -- insert
```
To compare results across benches, use `critcmp`:
```bash
# Get the list of baselines you can compare
critcmp --baselines
# Compare current with older
critcmp base new
```
## Hyperfine
You can run benchmarks using `hyperfine.sh` script, it already tests against both engines:
```bash
# ./hyperfine.sh NAME_OF_COMMAND
./hyperfine.sh insert
```
## Flamegraph
You can generate flamegraphs using `flamegraph.sh` script, it already do it for both engines:
```bash
# ./flamegraph.sh NAME_OF_COMMAND
./flamegraph.sh insert
# Generated files
open spacetime.svg
open sqlite.svg
```
## Instruments
You can run benchmarks using `instruments.sh` script. This check against only one engine:
```bash
# ./instruments.sh TEMPLATE ENGINE NAME_OF_COMMAND
./instruments.sh time sqlite insert
```
Where `TEMPLATE` is one from
```bash
cargo instruments --list-templates
```
+65
View File
@@ -0,0 +1,65 @@
//! Benchmarks for evaluating how we fare against sqlite
use criterion::measurement::WallTime;
use criterion::{criterion_group, criterion_main, BenchmarkGroup, BenchmarkId, Criterion, SamplingMode, Throughput};
use spacetimedb_bench::prelude::*;
fn build_group<'a>(c: &'a mut Criterion, named: &str, run: Runs) -> BenchmarkGroup<'a, WallTime> {
let mut group = c.benchmark_group(named);
group.throughput(Throughput::Elements(run as u64));
group.sample_size(DB_POOL as usize);
group.sampling_mode(SamplingMode::Linear);
group
}
fn bench_insert_tx_per_row(c: &mut Criterion) {
let run = Runs::Tiny;
let mut group = build_group(c, "insert_row", run);
group.bench_function(BenchmarkId::new(SQLITE, 1), |b| {
let mut pool = Pool::new(false).unwrap();
b.iter(|| sqlite::insert_tx_per_row(&mut pool, run).unwrap())
});
group.bench_function(BenchmarkId::new(SPACETIME, 1), |b| {
let mut pool = Pool::new(false).unwrap();
b.iter(|| spacetime::insert_tx_per_row(&mut pool, run).unwrap())
});
group.finish();
}
fn bench_insert_tx(c: &mut Criterion) {
let run = Runs::Small;
let mut group = build_group(c, "insert_bulk_rows", run);
group.bench_function(BenchmarkId::new(SQLITE, 2), |b| {
let mut pool = Pool::new(true).unwrap();
b.iter(|| sqlite::insert_tx(&mut pool, run))
});
group.bench_function(BenchmarkId::new(SPACETIME, 2), |b| {
let mut pool = Pool::new(true).unwrap();
b.iter(|| spacetime::insert_tx(&mut pool, run))
});
group.finish();
}
fn bench_select_no_index(c: &mut Criterion) {
let run = Runs::Tiny;
let mut group = build_group(c, "select_index_no", run);
group.bench_function(BenchmarkId::new(SQLITE, 3), |b| {
let mut pool = Pool::new(true).unwrap();
b.iter(|| sqlite::select_no_index(&mut pool, run).unwrap())
});
group.bench_function(BenchmarkId::new(SPACETIME, 3), |b| {
let mut pool = Pool::new(true).unwrap();
b.iter(|| spacetime::select_no_index(&mut pool, run).unwrap())
});
group.finish();
}
// Note: Reflex this same benchmarks in `main.rs`
criterion_group!(benches, bench_insert_tx_per_row, bench_insert_tx, bench_select_no_index);
criterion_main!(benches);
+245
View File
@@ -0,0 +1,245 @@
use std::sync::Arc;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use spacetimedb_testing::modules::{compile, with_module};
use tokio::sync::Mutex;
fn criterion_benchmark(c: &mut Criterion) {
compile("benchmarks");
with_module("benchmarks", |runtime, module| {
c.bench_function("empty reducer", |b| {
b.to_async(runtime).iter(|| async move {
module.call_reducer("empty", "[]".into()).await.unwrap();
});
});
});
with_module("benchmarks", |runtime, module| {
let count = &Arc::new(Mutex::new(0usize));
c.bench_function("single insert", |b| {
b.to_async(runtime).iter(|| async move {
let count_clone = count.clone();
let mut count_locked = count_clone.lock().await;
let args = format!(r#"["name {}"]"#, *count_locked);
module.call_reducer("single_insert", args).await.unwrap();
*count_locked += 1;
});
});
});
with_module("benchmarks", |runtime, module| {
let mut group = c.benchmark_group("multi insert");
let offset = &Arc::new(Mutex::new(0usize));
for size in [10, 50, 100, 500, 1000].iter() {
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.to_async(runtime).iter(|| async move {
let offset_clone = offset.clone();
let offset_locked = offset_clone.lock().await;
let args = format!(r#"[{}, {}]"#, size, *offset_locked);
drop(offset_locked);
module.call_reducer("multi_insert", args).await.unwrap();
});
});
}
group.finish();
});
with_module("benchmarks", |runtime, module| {
let mut group = c.benchmark_group("with existing records");
let mut total = 0;
let record_id = &Arc::new(Mutex::new(0usize));
for i in 0..10 {
let count = 100_000;
let offset = i * count;
runtime.block_on(async {
let args = format!(r#"[{}, {}]"#, count, offset);
module.call_reducer("multi_insert", args).await.unwrap();
});
total += count;
group.bench_with_input(BenchmarkId::from_parameter(total), &total, |b, _| {
b.to_async(runtime).iter(|| async {
let record_id_clone = record_id.clone();
let mut record_id_locked = record_id_clone.lock().await;
let args = format!(r#"["name {}"]"#, *record_id_locked);
*record_id_locked += 1;
drop(record_id_locked);
module.call_reducer("single_insert", args).await.unwrap();
});
});
}
group.finish();
// As we now have a lot of records in the DB, we can check iterator
c.bench_function("iterator/1_000_000 rows", |b| {
b.to_async(runtime).iter(|| async move {
module.call_reducer("person_iterator", "[]".to_string()).await.unwrap();
});
});
});
with_module("benchmarks", |runtime, module| {
// TODO: when bigger params are merged this should be changed
// maybe even a group with different sizes
let size = byte_unit::Byte::from_str("64KB").unwrap().get_bytes() as usize;
let record_id = &Arc::new(Mutex::new(0usize));
let name = "0".repeat(size - 4);
c.bench_function("large input", |b| {
b.to_async(runtime).iter(|| async {
let record_id_clone = record_id.clone();
let mut record_id_locked = record_id_clone.lock().await;
let args = format!(r#"["{}{}"]"#, &name, record_id_locked);
*record_id_locked += 1;
drop(record_id_locked);
module.call_reducer("single_insert", args).await.unwrap();
});
});
});
with_module("benchmarks", |runtime, module| {
// TODO: when bigger params are merged this should be changed
// maybe even a group with different sizes
let size = byte_unit::Byte::from_str("64KB").unwrap().get_bytes() as usize;
let name = &"0".repeat(size - 4);
let record_id = &Arc::new(Mutex::new(0usize));
c.bench_function("multiple large arguments", |b| {
b.to_async(runtime).iter(|| async {
// TODO: I'm not sue how expensive this might be. I plan to add
// a helper that deals with preparing the data before hand, but
// for now this should be OK
let record_id_clone = record_id.clone();
let mut record_id_locked = record_id_clone.lock().await;
let args: String = vec![name; 32]
.iter()
.map(|s| format!("\"{}{}\"", s, *record_id_locked))
.collect::<Vec<String>>()
.join(", ");
let args = format!("[{args}]");
*record_id_locked += 1;
drop(record_id_locked);
module.call_reducer("a_lot_of_args", args).await.unwrap();
});
});
});
with_module("benchmarks", |runtime, module| {
let mut group = c.benchmark_group("filter unique random");
let sizes: [u64; 3] = [100, 1_000, 10_000];
const SEED: u64 = 23;
for size in sizes.iter() {
// Set up the table outside the bench function.
let args = format!("[{SEED}, {size}]");
runtime.block_on(async {
module
.call_reducer("create_random_unique_locations", args)
.await
.unwrap();
});
group.bench_function(BenchmarkId::from_parameter(size), |b| {
b.to_async(runtime).iter(|| async {
let args = format!("[{SEED}]");
module.call_reducer("find_unique_location", args).await.unwrap();
});
});
}
group.finish();
});
with_module("benchmarks", |runtime, module| {
let mut group = c.benchmark_group("filter unique sequential");
let sizes: [u64; 4] = [100, 1_000, 10_000, 100_000];
for (i, size) in sizes.iter().enumerate() {
// Set up the table outside the bench function.
let start: u64 = 1_000_000_000 * (i as u64);
const SEED: u64 = 23;
let args = format!("[{SEED}, {start}, {size}]");
runtime.block_on(async {
module
.call_reducer("create_sequential_unique_locations", args)
.await
.unwrap();
});
group.bench_function(BenchmarkId::from_parameter(size), |b| {
b.to_async(runtime).iter(|| async {
let last = start + size - 1;
let args = format!("[{last}]");
module.call_reducer("find_unique_location", args).await.unwrap();
});
});
}
group.finish();
});
with_module("benchmarks", |runtime, module| {
let mut group = c.benchmark_group("filter nonunique random");
let sizes: [u64; 3] = [100, 1_000, 10_000];
const SEED: u64 = 23;
for size in sizes.iter() {
// Set up the table outside the bench function.
let args = format!("[{SEED}, {size}]");
runtime.block_on(async {
module
.call_reducer("create_random_nonunique_locations", args)
.await
.unwrap();
});
group.bench_function(BenchmarkId::from_parameter(size), |b| {
b.to_async(runtime).iter(|| async {
let args = format!("[{SEED}]");
module.call_reducer("find_nonunique_location", args).await.unwrap();
});
});
}
group.finish();
});
with_module("benchmarks", |runtime, module| {
let mut group = c.benchmark_group("filter nonunique sequential");
let sizes: [u64; 4] = [100, 1_000, 10_000, 100_000];
for (i, size) in sizes.iter().enumerate() {
// Set up the table outside the bench function.
let start: u64 = 1_000_000_000 * (i as u64);
const SEED: u64 = 23;
let args = format!("[{SEED}, {start}, {size}]");
runtime.block_on(async {
module
.call_reducer("create_sequential_nonunique_locations", args)
.await
.unwrap();
});
group.bench_function(BenchmarkId::from_parameter(size), |b| {
b.to_async(runtime).iter(|| async {
let last = start + size - 1;
let args = format!("[{last}]");
module.call_reducer("find_nonunique_location", args).await.unwrap();
});
});
}
group.finish();
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
+15
View File
@@ -0,0 +1,15 @@
#!/bin/bash
set -euo pipefail
if [ "$#" -lt "1" ] ; then
echo "Usage: $0 <test-name>"
exit 1
fi
cd "$(dirname "$0")"
# sqlite vs spacetime
cargo build --release
bench="../../target/release/spacetimedb-bench"
cargo flamegraph --deterministic --notes "sqlite ${1}" -o sqlite.svg -- --db sqlite ${1}
cargo flamegraph --deterministic --notes "spacetime ${1}" -o spacetime.svg -- --db spacetime ${1}
+174
View File
@@ -0,0 +1,174 @@
# Mini-tool for comparing benchmark between PR or locally
import os
import json
import argparse
import subprocess
import shutil
def statDecoder(statDict):
return namedtuple('Stat', statDict.keys())(*statDict.values())
def clean(stat):
new = {}
for (k, v) in stat.items():
if k in ["stddev", "command", "exit_codes"]:
continue
if k == "times":
v = v[0]
if isinstance(v, float):
v = round(v, 2)
new[k] = v
return new
def larger(row: list):
return [len(str(x)) for x in row]
def bar_chart(data: list):
max_value = max(count for _, count in data.items())
increment = max_value / 25
longest_label_length = max(len(label) for label, _ in data.items())
for label, count in data.items():
# The ASCII block elements come in chunks of 8, so we work out how
# many fractions of 8 we need.
# https://en.wikipedia.org/wiki/Block_Elements
bar_chunks, remainder = divmod(int(count * 8 / increment), 8)
# First draw the full width chunks
bar = '' * bar_chunks
# Then add the fractional part. The Unicode code points for
# block elements are (8/8), (7/8), (6/8), ... , so we need to
# work backwards.
if remainder > 0:
bar += chr(ord('') + (8 - remainder))
# If the bar is empty, add a left one-eighth block
bar = bar or ''
print(f'{label.rjust(longest_label_length)}{count:#4f} {bar}')
class Report:
def __init__(self, title: str, header: list, bar: dict, rows: dict):
self.title = title
size = larger(header)
for row in rows:
size = size + larger(row)
self.header = header
self.bar = bar
self.rows = rows
self.larger = max(size) + 2
class Stat:
def __init__(self, results):
self.spacetime = clean(results[0])
self.sqlite = clean(results[1])
def load_file(named: str):
data = open(named).read()
return Stat(json.loads(data)['results'])
def print_cell(cell: str, size: int, is_last: bool):
spaces = " " * (size - len(cell))
if is_last:
return "| %s%s |" % (cell, spaces)
else:
return "| %s%s " % (cell, spaces)
def print_row(row: list, size: int):
line = ""
for (pos, x) in enumerate(row):
line += print_cell(str(x), size, pos + 1 == len(row))
print(line)
def print_mkdown(report: Report):
print("###", report.title)
print("\n```bash")
bar_chart(report.bar)
print("```\n")
print("*Smaller is better.*")
print_row(report.header, report.larger)
print_row(["-" * report.larger for x in report.header], report.larger)
for row in report.rows:
print_row(row, report.larger)
def pick_winner(a: dict, b: dict, label_a: str, label_b: str):
if a["mean"] > b["mean"]:
winner = label_b
delta = a["times"]
else:
if a["mean"] == b["mean"]:
winner = "TIE"
delta = a["times"]
else:
winner = label_a
delta = b["times"]
return winner, delta
# Check Sqlite VS Spacetime
def cmp_bench(stat: Stat):
winner, delta = pick_winner(stat.spacetime, stat.sqlite, "Spacetime", "Sqlite")
header = ["Stat", "Sqlite", "Spacetime", "Delta"]
rows = []
for (k, v) in stat.sqlite.items():
rows.append([k, v, stat.spacetime[k], round(stat.spacetime[k] - v, 2)])
bar = dict(SpaceTimeDb=stat.spacetime["mean"], Sqlite=stat.sqlite["mean"])
return Report("Comparing Sqlite VS Spacetime Winner: **%s**" % winner, header, bar, rows)
# Check the progress of Spacetime between branches / PR
def improvement_bench(old: Stat, new: Stat):
winner, delta = pick_winner(old.spacetime, new.spacetime, "Old", "New")
header = ["Stat", "OLD", "NEW", "Delta"]
rows = []
for (k, v) in old.spacetime.items():
rows.append([k, v, new.spacetime[k], round(v - new.spacetime[k], 2)])
bar = dict(Old=old.spacetime["mean"], New=new.spacetime["mean"])
return Report("Improvement of Spacetime. Winner: **%s**" % winner, header, bar, rows)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("bench", choices=['versus', 'pr'], help="Select bench")
args = vars(parser.parse_args())
# args = {"bench": "pr"}
cmd = "./hyperfine.sh insert"
if args["bench"] == "pr":
subprocess.check_call('./pr_copy.sh "%s"' % cmd, shell=True)
subprocess.check_call(cmd, shell=True, timeout=60 * 5)
shutil.copyfile("out.json", "new.json")
old = load_file("old.json")
new = load_file("new.json")
report = improvement_bench(old, new)
else:
subprocess.check_call(cmd, shell=True, timeout=60 * 5)
stat = load_file("out.json")
report = cmp_bench(stat)
print_mkdown(report)
+15
View File
@@ -0,0 +1,15 @@
#!/bin/bash
set -euo pipefail
if [ "$#" -lt "1" ] ; then
echo "Usage: $0 <test-name>"
exit 1
fi
cd "$(dirname "$0")"
# sqlite vs spacetime
cargo build --release
bench="../../target/release/spacetimedb-bench"
# Add --show-output to see errors...
hyperfine --shell=none --export-json out.json --warmup 1 --runs 1 "${bench} --db spacetime ${1}" "${bench} --db sqlite ${1}"
+10
View File
@@ -0,0 +1,10 @@
#!/bin/bash
set -euo pipefail
if [ "$#" -lt "3" ] ; then
echo "Usage: $0 <engine> <template> <test-name>"
exit 1
fi
# Only OSX: Run the benchmark in instruments.app
cargo instruments -t "${2}" -- --db "${1}" "${3}"
+29
View File
@@ -0,0 +1,29 @@
#!/bin/bash
set -euo pipefail
if [ "$#" -lt "1" ] ; then
echo "Usage: $0 <cmd>"
exit 1
fi
cd "$(dirname "$0")"
TEMPD=$(mktemp -d)
# Exit if the temp directory wasn't created successfully.
if [ ! -e "$TEMPD" ]; then
>&2 echo "Failed to create temp directory"
exit 1
fi
CURRENT=$(pwd)
git clone ../../ $TEMPD
cd $TEMPD
git switch master
cp -r $CURRENT $TEMPD/crates/
cd $TEMPD/crates/bench
$1
echo "Copy old.json..."
cp $TEMPD/crates/bench/old.json $CURRENT
echo "Done"
+7
View File
@@ -0,0 +1,7 @@
#!/bin/bash
set -euo pipefail
# sqlite vs spacetime
./hyperfine.sh insert
./hyperfine.sh insert-bulk
./hyperfine.sh select-no-index
+78
View File
@@ -0,0 +1,78 @@
use crate::utils::{encode, ResultBench, START_B};
use clap::ValueEnum;
use std::marker::PhantomData;
use std::ops::Range;
pub trait BuildDb {
fn build(prefill: bool) -> ResultBench<Self>
where
Self: Sized;
}
pub struct Pool<T> {
instance: u8,
pub(crate) prefill: bool,
_x: PhantomData<T>,
}
impl<T: BuildDb> Pool<T> {
pub fn new(prefill: bool) -> ResultBench<Self> {
Ok(Self {
instance: 0,
prefill,
_x: Default::default(),
})
}
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> ResultBench<T> {
self.instance += 1;
T::build(self.prefill)
}
}
#[derive(Debug)]
pub struct Data {
pub(crate) a: i32,
pub(crate) b: u64,
pub(crate) c: String,
}
impl Data {
pub fn new(a: i32) -> Self {
let b = (a as u64) + START_B;
Self { a, b, c: encode(b) }
}
}
/// Database engine to use
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum DbEngine {
Sqlite,
Spacetime,
}
/// # of Rows to use in the benchmark
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum Runs {
/// Tiny = 100
Tiny = 100,
/// Small = 1000
Small = 1000,
/// Medium = 5000
Medium = 5000,
/// Large = 25000
Large = 25000,
}
impl Runs {
pub fn range(self) -> Range<u16> {
let x = self as u16;
0..x
}
pub fn data(self) -> impl Iterator<Item = Data> {
let x = self as u16;
(0..x).map(|x| Data::new(x as i32))
}
}
+13
View File
@@ -0,0 +1,13 @@
pub mod data;
pub mod spacetime;
pub mod sqlite;
pub(crate) mod utils;
pub mod prelude {
pub use crate::data::*;
pub use crate::utils::{ResultBench, DB_POOL, SPACETIME, SQLITE, START_B};
pub(crate) use tempdir::TempDir;
pub use crate::spacetime;
pub use crate::sqlite;
}
+68
View File
@@ -0,0 +1,68 @@
use clap::{Parser, Subcommand};
use spacetimedb_bench::prelude::*;
/// Bencher for SpacetimeDB
#[derive(Debug, Parser)]
#[command(name = "bench")]
struct Cli {
#[arg(long)]
db: DbEngine,
#[command(subcommand)]
command: Commands,
}
// Note: Reflex this same benchmarks in `benches/db.rs`
#[derive(Debug, Subcommand)]
enum Commands {
/// Generate insert, each generate a transaction
Insert {
/// How many rows
#[arg(value_enum)]
rows: Option<Runs>,
},
/// Generate insert in bulk enclosed in a single transaction
InsertBulk {
/// How many rows
#[arg(value_enum)]
rows: Option<Runs>,
},
/// Run queries without a index
SelectNoIndex {
/// How many rows
#[arg(value_enum)]
rows: Option<Runs>,
},
}
macro_rules! bench_fn {
($cli:ident, $fun:ident, $run:expr, $prefill:literal) => {{
let run = $run;
match $cli.db {
DbEngine::Sqlite => {
let mut pool = Pool::new($prefill)?;
sqlite::$fun(&mut pool, run)
}
DbEngine::Spacetime => {
let mut pool = Pool::new($prefill)?;
spacetime::$fun(&mut pool, run)
}
}
}};
}
fn main() -> ResultBench<()> {
let cli = Cli::parse();
match cli.command {
Commands::Insert { rows } => {
bench_fn!(cli, insert_tx_per_row, rows.unwrap_or(Runs::Tiny), false)
}
Commands::InsertBulk { rows } => {
bench_fn!(cli, insert_tx, rows.unwrap_or(Runs::Small), true)
}
Commands::SelectNoIndex { rows } => {
bench_fn!(cli, select_no_index, rows.unwrap_or(Runs::Tiny), true)
}
}
}
+119
View File
@@ -0,0 +1,119 @@
use crate::prelude::*;
use spacetimedb::db::datastore::locking_tx_datastore::MutTxId;
use spacetimedb::db::datastore::traits::TableDef;
use spacetimedb::db::relational_db::{open_db, RelationalDB};
use spacetimedb_lib::sats::product;
use spacetimedb_lib::{AlgebraicType, AlgebraicValue, ProductType};
type DbResult = (RelationalDB, u32);
fn init_db() -> ResultBench<(TempDir, u32)> {
let tmp_dir = TempDir::new("stdb_test")?;
let stdb = open_db(tmp_dir.path())?;
let mut tx = stdb.begin_tx();
let table_id = stdb.create_table(
&mut tx,
TableDef::from(ProductType::from_iter([
("a", AlgebraicType::I32),
("b", AlgebraicType::U64),
("c", AlgebraicType::String),
])),
)?;
stdb.commit_tx(tx)?;
Ok((tmp_dir, table_id))
}
fn build_db() -> ResultBench<DbResult> {
let (tmp_dir, table_id) = init_db()?;
let stdb = open_db(&tmp_dir)?;
Ok((stdb, table_id))
}
fn insert(db: &RelationalDB, tx: &mut MutTxId, table_id: u32, run: Runs) -> ResultBench<()> {
for row in run.data() {
db.insert(
tx,
table_id,
product![
AlgebraicValue::I32(row.a),
AlgebraicValue::U64(row.b),
AlgebraicValue::String(row.c),
],
)?;
}
Ok(())
}
impl BuildDb for DbResult {
fn build(prefill: bool) -> ResultBench<Self>
where
Self: Sized,
{
let db = build_db()?;
if prefill {
prefill_data(&db, Runs::Small)?;
}
Ok(db)
}
}
pub fn prefill_data(db: &DbResult, run: Runs) -> ResultBench<()> {
let (conn, table_id) = db;
let mut tx = conn.begin_tx();
insert(conn, &mut tx, *table_id, run)?;
conn.commit_tx(tx)?;
Ok(())
}
pub fn insert_tx_per_row(pool: &mut Pool<DbResult>, run: Runs) -> ResultBench<()> {
let (conn, table_id) = pool.next()?;
//let mut log = log.lock().unwrap();
for row in run.data() {
let mut tx = conn.begin_tx();
conn.insert(
&mut tx,
table_id,
product![
AlgebraicValue::I32(row.a),
AlgebraicValue::U64(row.b),
AlgebraicValue::String(row.c),
],
)?;
conn.commit_tx(tx)?;
}
Ok(())
}
pub fn insert_tx(pool: &mut Pool<DbResult>, _run: Runs) -> ResultBench<()> {
pool.prefill = true;
pool.next()?;
Ok(())
}
pub fn select_no_index(pool: &mut Pool<DbResult>, run: Runs) -> ResultBench<()> {
let (conn, table_id) = pool.next()?;
let tx = conn.begin_tx();
for i in run.range().skip(1) {
let i = i as u64;
let _r = conn
.scan(&tx, table_id)?
.map(|r| Data {
a: *r.view().elements[0].as_i32().unwrap(),
b: *r.view().elements[1].as_u64().unwrap(),
c: r.view().elements[2].as_string().unwrap().clone(),
})
.filter(|x| x.b >= i * START_B && x.b < (START_B + (i * START_B)))
.collect::<Vec<_>>();
assert_eq!(_r.len() as u64, START_B);
//dbg!(_r.len());
}
Ok(())
}
+94
View File
@@ -0,0 +1,94 @@
use crate::prelude::*;
use rusqlite::{Connection, Transaction};
impl BuildDb for Connection {
fn build(prefill: bool) -> ResultBench<Self>
where
Self: Sized,
{
let tmp_dir = TempDir::new("sqlite_test")?;
let mut db = Connection::open(tmp_dir.path().join("test.db"))?;
//let mut db = Connection::open("test.db")?;
db.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = normal;",
)?;
db.execute_batch(
"CREATE TABLE data (
a INTEGER PRIMARY KEY,
b BIGINT NOT NULL,
c TEXT);",
)?;
if prefill {
prefill_data(&mut db, Runs::Small)?;
}
Ok(db)
}
}
fn insert(db: &Transaction, run: Runs) -> ResultBench<()> {
for row in run.data() {
db.execute(
&format!("INSERT INTO data (a, b, c) VALUES({} ,{}, '{}');", row.a, row.b, row.c),
(),
)?;
}
Ok(())
}
pub fn prefill_data(db: &mut Connection, run: Runs) -> ResultBench<()> {
let tx = db.transaction()?;
insert(&tx, run)?;
tx.commit()?;
Ok(())
}
pub fn insert_tx_per_row(pool: &mut Pool<Connection>, run: Runs) -> ResultBench<()> {
let db = pool.next()?;
for row in run.data() {
db.execute(
&format!("INSERT INTO data VALUES({} ,{}, '{}');", row.a, row.b, row.c),
(),
)?;
}
Ok(())
}
pub fn insert_tx(pool: &mut Pool<Connection>, _run: Runs) -> ResultBench<()> {
pool.next()?;
Ok(())
}
pub fn select_no_index(pool: &mut Pool<Connection>, run: Runs) -> ResultBench<()> {
let db = pool.next()?;
for i in run.range().skip(1) {
let i = i as u64;
let sql = &format!(
"SELECT * FROM data WHERE b >= {} AND b < {}",
i * START_B,
START_B + (i * START_B)
);
//dbg!(sql);
let mut stmt = db.prepare(sql)?;
let _r = stmt
.query_map([], |row| {
Ok(Data {
a: row.get(0)?,
b: row.get(1)?,
c: row.get(2)?,
})
})?
.collect::<Vec<_>>();
assert_eq!(_r.len() as u64, START_B);
//dbg!(_r.len());
}
Ok(())
}
+95
View File
@@ -0,0 +1,95 @@
use std::fmt;
use std::iter::successors;
pub const DB_POOL: u8 = 10;
pub const SQLITE: &str = "Sqlite";
pub const SPACETIME: &str = "SpacetimeDB";
//TODO: This should be 100 and in the prefill steps run with Large, but that take too much time with spacetimedb
pub const START_B: u64 = 10;
/// A wrapper for using on test so the error display nicely
pub struct TestError {
pub error: Box<dyn std::error::Error>,
}
impl fmt::Debug for TestError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Format the error in yellow
write!(f, "\x1b[93m{}\x1b[0m", self.error)
}
}
impl<E: std::error::Error + 'static> From<E> for TestError {
fn from(e: E) -> Self {
Self { error: Box::new(e) }
}
}
/// A wrapper for using [Result] in tests, so it display nicely
pub type ResultBench<T> = Result<T, TestError>;
const ONES: [&str; 20] = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
];
const TENS: [&str; 10] = [
"zero", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety",
];
const ORDERS: [&str; 7] = [
"zero",
"thousand",
"million",
"billion",
"trillion",
"quadrillion",
"quintillion", // enough for u64::MAX
];
fn format_num(num: u64, div: u64, order: &str) -> String {
match (num / div, num % div) {
(upper, 0) => format!("{} {}", encode(upper), order),
(upper, lower) => {
format!("{} {} {}", encode(upper), order, encode(lower))
}
}
}
pub fn encode(num: u64) -> String {
match num {
0..=19 => ONES[num as usize].to_string(),
20..=99 => {
let upper = (num / 10) as usize;
match num % 10 {
0 => TENS[upper].to_string(),
lower => format!("{}-{}", TENS[upper], encode(lower)),
}
}
100..=999 => format_num(num, 100, "hundred"),
_ => {
let (div, order) = successors(Some(1u64), |v| v.checked_mul(1000))
.zip(ORDERS.iter())
.find(|&(e, _)| e > num / 1000)
.unwrap();
format_num(num, div, order)
}
}
}
+19
View File
@@ -0,0 +1,19 @@
[package]
name = "spacetimedb-bindings-macro"
version = "0.4.1"
edition = "2021"
license-file = "LICENSE"
description = "Easy support for interacting between SpacetimeDB and Rust."
[lib]
proc-macro = true
# Benching off, because of https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options
bench = false
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
proc-macro2 = "1.0"
syn = { version = "2", features = ["full", "extra-traits"] }
quote = "1.0.8"
humantime = "2.1.0"
View File
File diff suppressed because it is too large Load Diff
+73
View File
@@ -0,0 +1,73 @@
use std::marker::PhantomData;
use syn::parse::{Lookahead1, Parse, ParseStream};
use syn::token::Token;
macro_rules! match_tok {
(match $input:ident { $($matches:tt)* }) => {{
use $crate::macros::PeekParse;
let input: syn::parse::ParseStream = $input;
let lookahead = input.lookahead1();
match_tok!(@match lookahead, input { $($matches)* })
}};
(@match $lookahead:ident, $input:ident { $binding:tt @ $tok:ty => $body:block $($rest:tt)* }) => {
match_tok!(@case $lookahead, $input, $binding, $tok, $body, { $($rest)* })
};
(@match $lookahead:ident, $input:ident { $tok:ty => $body:block $($rest:tt)* }) => {
match_tok!(@case $lookahead, $input, _, $tok, $body, { $($rest)* })
};
(@match $lookahead:ident, $input:ident { $binding:tt @ $tok:ty => $body:expr, $($rest:tt)* }) => {
match_tok!(@case $lookahead, $input, $binding, $tok, $body, { $($rest)* })
};
(@match $lookahead:ident, $input:ident { $tok:ty => $body:expr, $($rest:tt)* }) => {
match_tok!(@case $lookahead, $input, _, $tok, $body, { $($rest)* })
};
(@match $lookahead:ident, $input:ident {}) => {
return Err($lookahead.error())
};
(@case $lookahead:ident, $input:ident, $binding:tt, $tok:ty, $body:expr, { $($rest:tt)* }) => {
if $crate::macros::peekparser::<$tok>().peekparse_peek(&$lookahead, $input) {
let $binding = $crate::macros::peekparser::<$tok>().peekparse_parse($input)?;
$body
} else {
match_tok!(@match $lookahead, $input { $($rest)* })
}
};
}
pub fn peekparser<T>() -> &'static PhantomData<T> {
&PhantomData
}
pub trait PeekParse {
type Output;
fn peekparse_peek(&self, lookahead1: &Lookahead1, input: ParseStream) -> bool;
fn peekparse_parse(&self, input: ParseStream) -> syn::Result<Self::Output>;
}
impl<T: Token + Parse> PeekParse for PhantomData<T> {
type Output = T;
fn peekparse_peek(&self, lookahead1: &Lookahead1, _input: ParseStream) -> bool {
lookahead1.peek(|x| -> T { match x {} })
}
fn peekparse_parse(&self, input: ParseStream) -> syn::Result<Self::Output> {
input.parse()
}
}
impl<T1, T2> PeekParse for &PhantomData<(T1, T2)>
where
T1: Token + Parse,
T2: Token + Parse,
{
type Output = (T1, T2);
fn peekparse_peek(&self, lookahead1: &Lookahead1, input: ParseStream) -> bool {
lookahead1.peek(|x| -> T1 { match x {} }) && input.peek2(|x| -> T2 { match x {} })
}
fn peekparse_parse(&self, input: ParseStream) -> syn::Result<Self::Output> {
Ok((input.parse()?, input.parse()?))
}
}
+437
View File
@@ -0,0 +1,437 @@
extern crate core;
extern crate proc_macro;
use proc_macro2::{Span, TokenStream};
use quote::{quote, quote_spanned, ToTokens};
use syn::punctuated::Pair;
use syn::spanned::Spanned;
use syn::{LitStr, Token};
use crate::{check_duplicate_meta, sym};
pub(crate) struct SatsType<'a> {
pub ident: &'a syn::Ident,
pub generics: &'a syn::Generics,
pub name: String,
pub krate: TokenStream,
pub original_attrs: &'a [syn::Attribute],
pub data: SatsTypeData<'a>,
}
pub(crate) enum SatsTypeData<'a> {
Product(Vec<SatsField<'a>>),
Sum(Vec<SatsVariant<'a>>),
}
pub(crate) struct SatsField<'a> {
pub ident: Option<&'a syn::Ident>,
pub vis: &'a syn::Visibility,
pub name: Option<String>,
pub ty: &'a syn::Type,
pub original_attrs: &'a [syn::Attribute],
pub span: Span,
}
pub(crate) struct SatsVariant<'a> {
pub ident: &'a syn::Ident,
pub name: String,
pub ty: Option<&'a syn::Type>,
pub member: Option<syn::Member>,
#[allow(unused)]
pub original_attrs: &'a [syn::Attribute],
}
pub(crate) fn sats_type_from_derive(
input: &syn::DeriveInput,
crate_fallback: TokenStream,
) -> syn::Result<SatsType<'_>> {
let data = match &input.data {
syn::Data::Struct(struc) => {
let fields = struc.fields.iter().map(|field| SatsField {
ident: field.ident.as_ref(),
vis: &field.vis,
name: field.ident.as_ref().map(syn::Ident::to_string),
ty: &field.ty,
original_attrs: &field.attrs,
span: field.span(),
});
SatsTypeData::Product(fields.collect())
}
syn::Data::Enum(enu) => {
let variants = enu.variants.iter().map(|var| {
let (member, ty) = variant_data(var)?.unzip();
Ok(SatsVariant {
ident: &var.ident,
name: var.ident.to_string(),
ty,
member,
original_attrs: &var.attrs,
})
});
SatsTypeData::Sum(variants.collect::<syn::Result<Vec<_>>>()?)
}
syn::Data::Union(u) => return Err(syn::Error::new(u.union_token.span, "unions not supported")),
};
extract_sats_type(&input.ident, &input.generics, &input.attrs, data, crate_fallback)
}
pub(crate) fn extract_sats_type<'a>(
ident: &'a syn::Ident,
generics: &'a syn::Generics,
attrs: &'a [syn::Attribute],
data: SatsTypeData<'a>,
crate_fallback: TokenStream,
) -> syn::Result<SatsType<'a>> {
let mut name = None;
let mut krate = None;
for attr in attrs {
if attr.path() != sym::SATS {
continue;
}
attr.parse_nested_meta(|meta| {
if meta.path == sym::CRATE {
check_duplicate_meta(&krate, &meta)?;
let value = meta.value()?;
let v = value.call(syn::Path::parse_mod_style)?;
krate = Some(v.into_token_stream());
} else if meta.path == sym::NAME {
check_duplicate_meta(&name, &meta)?;
let value = meta.value()?;
let v = value.parse::<LitStr>()?;
name = Some(v.value());
} else {
return Err(meta.error("unknown sats attribute"));
}
Ok(())
})?;
}
let krate = krate.unwrap_or(crate_fallback);
let name = name.unwrap_or_else(|| ident.to_string());
Ok(SatsType {
ident,
generics,
name,
krate,
original_attrs: attrs,
data,
})
}
pub(crate) fn derive_satstype(ty: &SatsType<'_>, gen_type_alias: bool) -> TokenStream {
let ty_name = &ty.name;
let name = &ty.ident;
let typ = match &ty.data {
SatsTypeData::Product(fields) => {
let fields = fields.iter().map(|field| {
let field_name = match &field.name {
Some(name) => quote!(Some(#name.to_owned())),
None => quote!(None),
};
let ty = field.ty;
quote!(spacetimedb::sats::ProductTypeElement {
name: #field_name,
algebraic_type: <#ty as spacetimedb::SpacetimeType>::make_type(__typespace),
})
});
quote!(spacetimedb::sats::AlgebraicType::Product(
spacetimedb::sats::ProductType {
elements: vec![#(#fields),*],
}
))
}
SatsTypeData::Sum(variants) => {
let unit = syn::Type::Tuple(syn::TypeTuple {
paren_token: Default::default(),
elems: Default::default(),
});
let variants = variants.iter().map(|var| {
let variant_name = &var.name;
let ty = var.ty.unwrap_or(&unit);
quote!(spacetimedb::sats::SumTypeVariant {
name: Some(#variant_name.to_owned()),
algebraic_type: <#ty as spacetimedb::SpacetimeType>::make_type(__typespace),
})
});
quote!(spacetimedb::sats::AlgebraicType::Sum(spacetimedb::sats::SumType {
variants: vec![#(#variants),*],
}))
// todo!()
} // syn::Data::Union(u) => return Err(syn::Error::new(u.union_token.span, "unions not supported")),
};
let (impl_generics, ty_generics, where_clause) = ty.generics.split_for_impl();
let ty_name = if gen_type_alias {
quote!(Some(#ty_name))
} else {
quote!(None)
};
quote! {
#[allow(clippy::all)]
const _: () = {
impl #impl_generics spacetimedb::SpacetimeType for #name #ty_generics #where_clause {
fn make_type<S: spacetimedb::sats::typespace::TypespaceBuilder>(__typespace: &mut S) -> spacetimedb::sats::AlgebraicType {
spacetimedb::sats::typespace::TypespaceBuilder::add(
__typespace,
// is this correct? ignoring generics and stuff?
{ struct __Marker; core::any::TypeId::of::<__Marker>() },
#ty_name,
|__typespace| #typ,
)
}
}
};
}
}
pub(crate) fn derive_deserialize(ty: &SatsType<'_>) -> TokenStream {
let (name, tuple_name) = (&ty.ident, &ty.name);
let spacetimedb_lib = &ty.krate;
let (impl_generics, ty_generics, where_clause) = ty.generics.split_for_impl();
let mut de_generics = ty.generics.clone();
let de_lifetime = syn::Lifetime::new("'de", Span::call_site());
for lp in de_generics.lifetimes_mut() {
lp.bounds.push(de_lifetime.clone());
}
let mut de_lt_param = syn::LifetimeParam::new(de_lifetime);
de_lt_param.bounds = de_generics
.lifetimes()
.map(|lp| Pair::Punctuated(lp.lifetime.clone(), Token![+](Span::call_site())))
.collect();
de_generics.params.insert(0, de_lt_param.into());
let (de_impl_generics, _, _) = de_generics.split_for_impl();
let (iter_n, iter_n2, iter_n3) = (0usize.., 0usize.., 0usize..);
match &ty.data {
SatsTypeData::Product(fields) => {
let n_fields = fields.len();
let field_names = fields.iter().map(|f| f.ident.unwrap()).collect::<Vec<_>>();
let field_strings = fields.iter().map(|f| f.name.as_deref().unwrap()).collect::<Vec<_>>();
let field_types = fields.iter().map(|f| &f.ty);
quote! {
#[allow(non_camel_case_types)]
#[allow(clippy::all)]
const _: () = {
impl #de_impl_generics #spacetimedb_lib::de::Deserialize<'de> for #name #ty_generics #where_clause {
fn deserialize<D: #spacetimedb_lib::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
deserializer.deserialize_product(__ProductVisitor {
_marker: std::marker::PhantomData::<fn() -> #name #ty_generics>,
})
}
}
struct __ProductVisitor #impl_generics #where_clause {
_marker: std::marker::PhantomData<fn() -> #name #ty_generics>,
}
impl #de_impl_generics #spacetimedb_lib::de::ProductVisitor<'de> for __ProductVisitor #ty_generics #where_clause {
type Output = #name #ty_generics;
fn product_name(&self) -> Option<&str> {
Some(#tuple_name)
}
fn product_len(&self) -> usize {
#n_fields
}
fn visit_seq_product<A: #spacetimedb_lib::de::SeqProductAccess<'de>>(self, mut tup: A) -> Result<Self::Output, A::Error> {
Ok(#name {
#(#field_names:
tup.next_element::<#field_types>()?
.ok_or_else(|| #spacetimedb_lib::de::Error::invalid_product_length(#iter_n, &self))?,)*
})
}
fn visit_named_product<A: #spacetimedb_lib::de::NamedProductAccess<'de>>(self, mut __prod: A) -> Result<Self::Output, A::Error> {
#(let mut #field_names = None;)*
while let Some(__field) = #spacetimedb_lib::de::NamedProductAccess::get_field_ident(&mut __prod, Self {
_marker: std::marker::PhantomData,
})? {
match __field {
#(__ProductFieldIdent::#field_names => {
if #field_names.is_some() {
return Err(#spacetimedb_lib::de::Error::duplicate_field(#iter_n2, Some(#field_strings), &self))
}
#field_names = Some(#spacetimedb_lib::de::NamedProductAccess::get_field_value(&mut __prod)?)
})*
}
}
Ok(#name {
#(#field_names:
#field_names.ok_or_else(|| #spacetimedb_lib::de::Error::missing_field(#iter_n3, Some(#field_strings), &self))?,)*
})
}
}
impl #de_impl_generics #spacetimedb_lib::de::FieldNameVisitor<'de> for __ProductVisitor #ty_generics #where_clause {
type Output = __ProductFieldIdent;
fn field_names(&self, names: &mut dyn #spacetimedb_lib::de::ValidNames) {
names.extend::<&[&str]>(&[#(#field_strings),*])
}
fn visit<__E: #spacetimedb_lib::de::Error>(self, name: &str) -> Result<Self::Output, __E> {
match name {
#(#field_strings => Ok(__ProductFieldIdent::#field_names),)*
_ => Err(#spacetimedb_lib::de::Error::unknown_field_name(name, &self)),
}
}
}
enum __ProductFieldIdent {
#(#field_names,)*
}
};
}
}
SatsTypeData::Sum(variants) => {
let variant_names = variants.iter().map(|var| &*var.name).collect::<Vec<_>>();
let variant_idents = variants.iter().map(|var| var.ident).collect::<Vec<_>>();
let tags = 0u8..;
let arms = variants.iter().map(|var| {
let ident = var.ident;
if let (Some(member), Some(ty)) = (&var.member, var.ty) {
quote! {
__Variant::#ident => Ok(#name::#ident { #member: #spacetimedb_lib::de::VariantAccess::deserialize::<#ty>(__access)? }),
}
} else {
quote! {
__Variant::#ident => {
let () = #spacetimedb_lib::de::VariantAccess::deserialize(__access)?;
Ok(#name::#ident)
}
}
}
});
quote! {
#[allow(clippy::all)]
const _: () = {
impl #de_impl_generics #spacetimedb_lib::de::Deserialize<'de> for #name #ty_generics #where_clause {
fn deserialize<D: #spacetimedb_lib::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
deserializer.deserialize_sum(__SumVisitor {
_marker: std::marker::PhantomData::<fn() -> #name #ty_generics>,
})
}
}
struct __SumVisitor #impl_generics #where_clause {
_marker: std::marker::PhantomData<fn() -> #name #ty_generics>,
}
impl #de_impl_generics #spacetimedb_lib::de::SumVisitor<'de> for __SumVisitor #ty_generics #where_clause {
type Output = #name #ty_generics;
fn sum_name(&self) -> Option<&str> {
Some(#tuple_name)
}
fn visit_sum<A: #spacetimedb_lib::de::SumAccess<'de>>(self, __data: A) -> Result<Self::Output, A::Error> {
let (__variant, __access) = __data.variant(self)?;
match __variant {
#(#arms)*
}
}
}
enum __Variant {
#(#variant_idents,)*
}
impl #impl_generics #spacetimedb_lib::de::VariantVisitor for __SumVisitor #ty_generics #where_clause {
type Output = __Variant;
fn variant_names(&self, names: &mut dyn #spacetimedb_lib::de::ValidNames) {
names.extend([#(#variant_names,)*])
}
fn visit_tag<E: #spacetimedb_lib::de::Error>(self, __tag: u8) -> Result<Self::Output, E> {
match __tag {
#(#tags => Ok(__Variant::#variant_idents),)*
_ => Err(#spacetimedb_lib::de::Error::unknown_variant_tag(__tag, &self)),
}
}
fn visit_name<E: #spacetimedb_lib::de::Error>(self, __name: &str) -> Result<Self::Output, E> {
match __name {
#(#variant_names => Ok(__Variant::#variant_idents),)*
_ => Err(#spacetimedb_lib::de::Error::unknown_variant_name(__name, &self)),
}
}
}
};
}
}
}
}
pub(crate) fn derive_serialize(ty: &SatsType) -> TokenStream {
let spacetimedb_lib = &ty.krate;
let name = &ty.ident;
let (impl_generics, ty_generics, where_clause) = ty.generics.split_for_impl();
let body = match &ty.data {
SatsTypeData::Product(fields) => {
let fieldnames = fields.iter().map(|field| field.ident.as_ref().unwrap());
let tys = fields.iter().map(|f| &f.ty);
let fieldnamestrings = fields.iter().map(|field| field.name.as_ref().unwrap());
let nfields = fields.len();
quote! {
let mut __prod = __serializer.serialize_named_product(#nfields)?;
#(#spacetimedb_lib::ser::SerializeNamedProduct::serialize_element::<#tys>(&mut __prod, Some(#fieldnamestrings), &self.#fieldnames)?;)*
#spacetimedb_lib::ser::SerializeNamedProduct::end(__prod)
}
}
SatsTypeData::Sum(variants) => {
let arms = variants.iter().enumerate().map(|(i, var)| {
let (name,name_str) = (var.ident, &var.name);
let tag = i as u8;
if let (Some(member), Some(ty)) = (&var.member, var.ty) {
quote_spanned! {ty.span()=>
Self::#name { #member: __variant } => __serializer.serialize_variant::<#ty>(#tag, Some(#name_str), __variant),
}
} else {
quote! {
Self::#name => __serializer.serialize_variant(#tag, Some(#name_str), &()),
}
}
});
quote!(match self { #(#arms)* })
}
};
quote! {
impl #impl_generics #spacetimedb_lib::ser::Serialize for #name #ty_generics #where_clause {
fn serialize<S: #spacetimedb_lib::ser::Serializer>(&self, __serializer: S) -> Result<S::Ok, S::Error> {
#body
}
}
}
}
fn variant_data(variant: &syn::Variant) -> syn::Result<Option<(syn::Member, &syn::Type)>> {
let field = match &variant.fields {
syn::Fields::Named(f) if f.named.len() == 1 => &f.named[0],
syn::Fields::Named(_) => {
return Err(syn::Error::new_spanned(
&variant.fields,
"must be a unit variant or a newtype variant",
))
}
syn::Fields::Unnamed(f) if f.unnamed.len() != 1 => {
return Err(syn::Error::new_spanned(
&variant.fields,
"must be a unit variant or a newtype variant",
))
}
syn::Fields::Unnamed(f) => &f.unnamed[0],
syn::Fields::Unit => return Ok(None),
};
let member = field
.ident
.clone()
.map(Into::into)
.unwrap_or_else(|| syn::Member::from(0));
Ok(Some((member, &field.ty)))
}
+13
View File
@@ -0,0 +1,13 @@
[package]
name = "spacetimedb-bindings-sys"
version = "0.4.1"
edition = "2021"
license-file = "LICENSE"
description = "Easy support for interacting between SpacetimeDB and Rust."
[lib]
# Benching off, because of https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options
bench = false
[dependencies]
getrandom = { version = "0.2.7", features = ["custom"], optional = true }
View File
+13
View File
@@ -0,0 +1,13 @@
pub const NOTAB: u16 = 1;
pub const LOOKUP: u16 = 2;
pub const EXISTS: u16 = 3;
macro_rules! errnos {
($mac:ident) => {
$mac! {
NOTAB => "No such table",
LOOKUP => "Value or range provided not found in table",
EXISTS => "Value with given unique identifier already exists",
}
};
}
+673
View File
@@ -0,0 +1,673 @@
//! Defines sys calls to interact with SpacetimeDB.
//! This forms an ABI of sorts that modules written in Rust can use.
extern crate alloc;
#[macro_use]
mod errno;
use core::fmt;
use core::mem::MaybeUninit;
use core::num::NonZeroU16;
use std::ptr;
use alloc::boxed::Box;
/// The current version of the ABI.
///
/// Exported as `SPACETIME_ABI_VERSION`, a `u32` WASM global.
/// If this global contains an address into linear memory at which the version is stored,
/// then a WASM global named `SPACETIME_ABI_VERSION_IS_ADDR` is also be exported.
///
/// In rust this looks like:
/// ```rust,ignore
/// #[no_mangle]
/// static SPACETIME_ABI_VERSION: u32 = _; // right now, rust `static`s always export as an address.
/// #[no_mangle]
/// static SPACETIME_ABI_VERSION_IS_ADDR: () = ();
/// ```
///
/// The (big-endian) first 2 bytes constitute the major version (`A`) of the ABI,
/// and the last 2 bytes constitute the minor version (`B`).
///
/// The semantics of a version number `A.B` is that a host implementing version `A.B`
/// can run a module declaring `X.Y` if and only if `X == A && Y <= B`.
/// So, the minor version is intended for backwards-compatible changes, e.g. adding a new function,
/// and the major version is for fully breaking changes.
pub const ABI_VERSION: u32 = 0x0002_0000;
/// Provides a raw set of sys calls which abstractions can be built atop of.
pub mod raw {
use core::mem::ManuallyDrop;
#[link(wasm_import_module = "spacetime")]
extern "C" {
/*
/// Create a table with `name`, a UTF-8 slice in WASM memory lasting `name_len` bytes,
/// and with the table's `schema` in a slice in WASM memory lasting `schema_len` bytes.
///
/// Writes the table id of the new table into the WASM pointer `out`.
pub fn _create_table(
name: *const u8,
name_len: usize,
schema: *const u8,
schema_len: usize,
out: *mut u32,
) -> u16;
*/
/// Queries the `table_id` associated with the given (table) `name`
/// where `name` points to a UTF-8 slice in WASM memory of `name_len` bytes.
///
/// The table id is written into the `out` pointer.
///
/// Returns an error if the table does not exist.
pub fn _get_table_id(name: *const u8, name_len: usize, out: *mut u32) -> u16;
/// Creates an index with the name `index_name` and type `index_type`,
/// on a product of the given columns in `col_ids`
/// in the table identified by `table_id`.
///
/// Here `index_name` points to a UTF-8 slice in WASM memory
/// and `col_ids` points to a byte slice in WASM memory with each element being a column.
///
/// Currently only single-column-indices are supported
/// and they may only be of the btree index type.
/// In the former case, the function will panic,
/// and in latter, an error is returned.
///
/// Returns an error when a table with the provided `table_id` doesn't exist.
pub fn _create_index(
index_name: *const u8,
index_name_len: usize,
table_id: u32,
index_type: u8,
col_ids: *const u8,
col_len: usize,
) -> u16;
/// Finds all rows in the table identified by `table_id`,
/// where the row has a column, identified by `col_id`,
/// with data matching the byte string, in WASM memory, pointed to at by `val`.
///
/// The rows found are bsatn encoded and then concatenated.
/// The resulting byte string from the concatenation is written
/// to a fresh buffer with the buffer's identifier written to the WASM pointer `out`.
pub fn _seek_eq(table_id: u32, col_id: u32, value: *const u8, value_len: usize, out: *mut Buffer) -> u16;
/// Insert a row into the table identified by `table_id`,
/// where the row is read from the byte slice `row_ptr` in WASM memory,
/// lasting `row_len` bytes.
pub fn _insert(table_id: u32, row: *mut u8, row_len: usize) -> u16;
/// Deletes all rows in the table identified by `table_id`
/// where the column identified by `col_id` equates to the byte string,
/// in WASM memory, pointed to at by `value`.
///
/// The number of rows deleted is written to the WASM pointer `out`.
///
/// Returns an error if no columns were deleted or if the column wasn't found.
pub fn _delete_eq(table_id: u32, col_id: u32, value: *const u8, value_len: usize, out: *mut u32) -> u16;
/*
/// Deletes the primary key pointed to at by `pk` in the table identified by `table_id`.
pub fn _delete_pk(table_id: u32, pk: *const u8, pk_len: usize) -> u16;
pub fn _delete_value(table_id: u32, row: *const u8, row_len: usize) -> u16;
pub fn _delete_range(
table_id: u32,
col_id: u32,
range_start: *const u8,
range_start_len: usize,
range_end: *const u8,
range_end_len: usize,
out: *mut u32,
) -> u16;
*/
/// Start iteration on each row, as bytes, of a table identified by `table_id`.
///
/// The iterator is registered in the host environment
/// under an assigned index which is written to the `out` pointer provided.
pub fn _iter_start(table_id: u32, out: *mut BufferIter) -> u16;
/// Like [`_iter_start`], start iteration on each row,
/// as bytes, of a table identified by `table_id`.
///
/// The rows are filtered through `filter`, which is read from WASM memory
/// and is encoded in the embedded language defined by `spacetimedb_lib::filter::Expr`.
///
/// The iterator is registered in the host environment
/// under an assigned index which is written to the `out` pointer provided.
pub fn _iter_start_filtered(table_id: u32, filter: *const u8, filter_len: usize, out: *mut BufferIter) -> u16;
/// Advances the registered iterator with the index given by `iter_key`.
///
/// On success, the next element (the row as bytes) is written to a buffer.
/// The buffer's index is returned and written to the `out` pointer.
/// If there are no elements left, an invalid buffer index is written to `out`.
/// On failure however, the error is returned.
pub fn _iter_next(iter: ManuallyDrop<BufferIter>, out: *mut Buffer) -> u16;
/// Drops the entire registered iterator with the index given by `iter_key`.
/// The iterator is effectively de-registered.
///
/// Returns an error if the iterator does not exist.
pub fn _iter_drop(iter: ManuallyDrop<BufferIter>) -> u16;
/// Log at `level` a `text` message occuring in `filename:line_number`
/// with [`target`] being the module path at the `log!` invocation site.
///
/// These various pointers are interpreted lossily as UTF-8 strings with a corresponding `_len`.
///
/// [`target`]: https://docs.rs/log/latest/log/struct.Record.html#method.target
pub fn _console_log(
level: u8,
target: *const u8,
target_len: usize,
filename: *const u8,
filename_len: usize,
line_number: u32,
text: *const u8,
text_len: usize,
);
/// Schedule a reducer to be called asynchronously at `time`.
///
/// The reducer is named as the UTF-8 slice `(name, name_len)`,
/// and is passed the slice `(args, args_len)` as its argument.
///
/// A generated schedule id is assigned to the reducer.
/// This id is written to the pointer `out`.
pub fn _schedule_reducer(
name: *const u8,
name_len: usize,
args: *const u8,
args_len: usize,
time: u64,
out: *mut u64,
);
/// Unschedule a reducer using the same `id` generated as when it was scheduled.
///
/// This assumes that the reducer hasn't already been executed.
pub fn _cancel_reducer(id: u64);
/// Returns the length of buffer `bufh` without consuming the buffer handle.
///
/// Returns an error if the buffer does not exist.
pub fn _buffer_len(bufh: ManuallyDrop<Buffer>) -> usize;
/// Consumes the buffer `bufh`, moving its contents to the slice `(into, len)`.
///
/// Returns an error if the buffer does not exist.
pub fn _buffer_consume(bufh: Buffer, into: *mut u8, len: usize);
/// Creates a buffer of size `data_len` in the host environment.
/// The buffer is initialized with the contents at the `data` WASM pointer.
pub fn _buffer_alloc(data: *const u8, data_len: usize) -> Buffer;
}
/// What strategy does the database index use?
///
/// See also: https://www.postgresql.org/docs/current/sql-createindex.html
#[repr(u8)]
#[non_exhaustive]
pub enum IndexType {
/// Indexing works by putting the index key into a b-tree.
BTree = 0,
/// Indexing works by hashing the index key.
Hash = 1,
}
/// The error log level. See [`_console_log`].
pub const LOG_LEVEL_ERROR: u8 = 0;
/// The warn log level. See [`_console_log`].
pub const LOG_LEVEL_WARN: u8 = 1;
/// The info log level. See [`_console_log`].
pub const LOG_LEVEL_INFO: u8 = 2;
/// The debug log level. See [`_console_log`].
pub const LOG_LEVEL_DEBUG: u8 = 3;
/// The trace log level. See [`_console_log`].
pub const LOG_LEVEL_TRACE: u8 = 4;
/// The panic log level. See [`_console_log`].
///
/// A panic level is emitted just before a fatal error causes the WASM module to trap.
pub const LOG_LEVEL_PANIC: u8 = 101;
/// A handle into a buffer of bytes in the host environment.
///
/// Used for transporting bytes host <-> WASM linear memory.
#[repr(transparent)]
pub struct Buffer {
/// The actual handle. A key into a `ResourceSlab`.
raw: u32,
}
impl Buffer {
/// Returns a "handle" that can be passed across the FFI boundary
/// as if it was the Buffer itself, but without consuming it.
pub const fn handle(&self) -> ManuallyDrop<Self> {
ManuallyDrop::new(Self { raw: self.raw })
}
/// An invalid buffer handle.
///
/// Could happen if too many buffers exist, making the key overflow a `u32`.
/// `INVALID` is also used for parts of the protocol
/// that are "morally" sending a `None`s in `Option<Box<[u8]>>`s.
pub const INVALID: Self = Self { raw: u32::MAX };
/// Is the buffer handle invalid?
pub const fn is_invalid(&self) -> bool {
self.raw == Self::INVALID.raw
}
}
/// Represents table iterators, with a similar API to [`Buffer`].
#[repr(transparent)]
pub struct BufferIter {
raw: u32,
}
impl BufferIter {
/// Returns a handle usable for non-consuming operations.
pub const fn handle(&self) -> ManuallyDrop<Self> {
ManuallyDrop::new(Self { raw: self.raw })
}
}
#[cfg(any())]
mod module_exports {
type Encoded<T> = Buffer;
type Identity = Encoded<[u8; 32]>;
/// microseconds since the unix epoch
type Timestamp = u64;
/// Buffer::INVALID => Ok(()); else errmsg => Err(errmsg)
type Result = Buffer;
extern "C" {
/// All functions prefixed with `__preinit__` are run first in alphabetical order.
/// For those it's recommended to use /etc/xxxx.d conventions of like `__preinit__20_do_thing`:
/// <https://man7.org/linux/man-pages/man5/sysctl.d.5.html#CONFIGURATION_DIRECTORIES_AND_PRECEDENCE>
fn __preinit__XX_XXXX();
/// Optional. Run after `__preinit__`; can return an error. Intended for dynamic languages; this
/// would be where you would initialize the interepreter and load the user module into it.
fn __setup__() -> Result;
/// Required. Runs after `__setup__`; returns all the exports for the module.
fn __describe_module__() -> Encoded<ModuleDef>;
/// Required. id is an index into the `ModuleDef.reducers` returned from `__describe_module__`.
/// args is a bsatn-encoded product value defined by the schema at `reducers[id]`.
fn __call_reducer__(id: usize, sender: Identity, timestamp: Timestamp, args: Buffer) -> Result;
/// Optional. Called when a client connects to the database.
fn __identity_connected__(sender: Identity, timestamp: Timestamp) -> Result;
/// Optional. Called when a client disconnects to the database.
fn __identity_disconnected__(sender: Identity, timestamp: Timestamp) -> Result;
/// Currently unused?
fn __migrate_database__XXXX(sender: Identity, timestamp: Timestamp, something: Buffer) -> Result;
}
}
}
/// Error values used in the safe bindings API.
#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(transparent)]
pub struct Errno(NonZeroU16);
// once Error gets exposed from core this crate can be no_std again
impl std::error::Error for Errno {}
macro_rules! def_errno {
($($name:ident => $desc:literal,)*) => {
impl Errno {
// SAFETY: We've checked that `errnos!` contains no `0` values.
$(#[doc = $desc] pub const $name: Errno = Errno(unsafe { NonZeroU16::new_unchecked(errno::$name) });)*
}
/// Returns a string representation of the error.
const fn strerror(err: Errno) -> Option<&'static str> {
match err {
$(Errno::$name => Some($desc),)*
_ => None,
}
}
};
}
errnos!(def_errno);
impl Errno {
/// Returns a description of the errno value, if any.
pub const fn message(self) -> Option<&'static str> {
strerror(self)
}
/// Converts the given `code` to an error number in `Errno`'s representation.
#[inline]
pub const fn from_code(code: u16) -> Option<Self> {
match NonZeroU16::new(code) {
Some(code) => Some(Errno(code)),
None => None,
}
}
/// Converts this `errno` into a primitive error code.
#[inline]
pub const fn code(self) -> u16 {
self.0.get()
}
}
impl fmt::Debug for Errno {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut fmt = f.debug_struct("Errno");
fmt.field("code", &self.code());
if let Some(msg) = self.message() {
fmt.field("message", &msg);
}
fmt.finish()
}
}
impl fmt::Display for Errno {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let message = self.message().unwrap_or("Unknown error");
write!(f, "{message} (error {})", self.code())
}
}
/// Convert the status value `x` into a result.
/// When `x = 0`, we have a success status.
fn cvt(x: u16) -> Result<(), Errno> {
match Errno::from_code(x) {
None => Ok(()),
Some(err) => Err(err),
}
}
/// Runs the given function `f` provided with an uninitialized `out` pointer.
///
/// Assuming the call to `f` succeeds (`Ok(_)`), the `out` pointer's value is returned.
///
/// # Safety
///
/// This function is safe to call, if and only if,
/// - The function `f` writes a safe and valid `T` to the `out` pointer.
/// It's not required to write to `out` when `f(out)` returns an error code.
/// - The function `f` never reads a safe and valid `T` from the `out` pointer
/// before writing a safe and valid `T` to it.
/// - If running `Drop` on `T` is required for safety,
/// `f` must never panic nor return an error once `out` has been written to.
#[inline]
unsafe fn call<T>(f: impl FnOnce(*mut T) -> u16) -> Result<T, Errno> {
let mut out = MaybeUninit::uninit();
// TODO: If we have a panic here after writing a safe `T` to `out`,
// we will may have a memory leak if `T` requires running `Drop` for cleanup.
let f_code = f(out.as_mut_ptr());
// TODO: A memory leak may also result due to an error code from `f(out)`
// if `out` has been written to.
cvt(f_code)?;
Ok(out.assume_init())
}
/*
/// Create a table with `name`, a UTF-8 slice in WASM memory lasting `name_len` bytes,
/// and with the table's `schema` in a slice in WASM memory lasting `schem_len` bytes.
///
/// Returns the table id of the new table.
#[inline]
pub fn create_table(name: &str, schema: &[u8]) -> Result<u32, Errno> {
unsafe { call(|out| raw::_create_table(name.as_ptr(), name.len(), schema.as_ptr(), schema.len(), out)) }
}
*/
/// Queries and returns the `table_id` associated with the given (table) `name`.
///
/// Returns an error if the table does not exist.
#[inline]
pub fn get_table_id(name: &str) -> Result<u32, Errno> {
unsafe { call(|out| raw::_get_table_id(name.as_ptr(), name.len(), out)) }
}
/// Creates an index with the name `index_name` and type `index_type`,
/// on a product of the given columns ids in `col_ids`,
/// identifying columns in the table identified by `table_id`.
///
/// Currently only single-column-indices are supported
/// and they may only be of the btree index type.
/// In the former case, the function will panic,
/// and in latter, an error is returned.
///
/// Returns an error when a table with the provided `table_id` doesn't exist.
#[inline]
pub fn create_index(index_name: &str, table_id: u32, index_type: u8, col_ids: &[u8]) -> Result<(), Errno> {
cvt(unsafe {
raw::_create_index(
index_name.as_ptr(),
index_name.len(),
table_id,
index_type,
col_ids.as_ptr(),
col_ids.len(),
)
})
}
/// Finds all rows in the table identified by `table_id`,
/// where the row has a column, identified by `col_id`,
/// with data matching `val.
///
/// The rows found are bsatn encoded and then concatenated.
/// The resulting byte string from the concatenation is written
/// to a fresh buffer with a handle to it returned as a `Buffer`.
#[inline]
pub fn seek_eq(table_id: u32, col_id: u32, val: &[u8]) -> Result<Buffer, Errno> {
unsafe { call(|out| raw::_seek_eq(table_id, col_id, val.as_ptr(), val.len(), out)) }
}
/// Insert `row`, provided as a byte slice, into the table identified by `table_id`.
#[inline]
pub fn insert(table_id: u32, row: &mut [u8]) -> Result<(), Errno> {
cvt(unsafe { raw::_insert(table_id, row.as_mut_ptr(), row.len()) })
}
/// Deletes all rows in the table identified by `table_id`
/// where the column identified by `col_id` equates to `value`.
///
/// Returns the number of rows deleted
/// or an error if no columns were deleted or if the column wasn't found.
#[inline]
pub fn delete_eq(table_id: u32, col_id: u32, value: &[u8]) -> Result<u32, Errno> {
unsafe { call(|out| raw::_delete_eq(table_id, col_id, value.as_ptr(), value.len(), out)) }
}
/*
#[inline]
pub fn delete_pk(table_id: u32, pk: &[u8]) -> Result<(), Errno> {
cvt(unsafe { raw::_delete_pk(table_id, pk.as_ptr(), pk.len()) })
}
#[inline]
pub fn delete_value(table_id: u32, row: &[u8]) -> Result<(), Errno> {
cvt(unsafe { raw::_delete_value(table_id, row.as_ptr(), row.len()) })
}
#[inline]
pub fn delete_range(table_id: u32, col_id: u32, range_start: &[u8], range_end: &[u8]) -> Result<u32, Errno> {
unsafe {
call(|out| {
raw::_delete_range(
table_id,
col_id,
range_start.as_ptr(),
range_start.len(),
range_end.as_ptr(),
range_end.len(),
out,
)
})
}
}
*/
/// Returns an iterator for each row, as bytes, of a table identified by `table_id`.
/// The rows can be put through an optional `filter`,
/// which is encoded in the embedded language defined by `spacetimedb_lib::filter::Expr`.
///
/// The actual return value is a handle to an iterator registered with the host environment,
/// but [`BufferIter`] can be used directly as an `Iterator`.
#[inline]
pub fn iter(table_id: u32, filter: Option<&[u8]>) -> Result<BufferIter, Errno> {
unsafe {
call(|out| match filter {
None => raw::_iter_start(table_id, out),
Some(filter) => raw::_iter_start_filtered(table_id, filter.as_ptr(), filter.len(), out),
})
}
}
/// A log level that can be used in `console_log`.
/// The variants are convertible into a raw `u8` log level.
#[repr(u8)]
pub enum LogLevel {
/// The error log level. See [`console_log`].
Error = raw::LOG_LEVEL_ERROR,
/// The warn log level. See [`console_log`].
Warn = raw::LOG_LEVEL_WARN,
/// The info log level. See [`console_log`].
Info = raw::LOG_LEVEL_INFO,
/// The debug log level. See [`console_log`].
Debug = raw::LOG_LEVEL_DEBUG,
/// The trace log level. See [`console_log`].
Trace = raw::LOG_LEVEL_TRACE,
/// The panic log level. See [`console_log`].
///
/// A panic level is emitted just before a fatal error causes the WASM module to trap.
Panic = raw::LOG_LEVEL_PANIC,
}
/// Log at `level` a `text` message occuring in `filename:line_number`
/// with [`target`] being the module path at the `log!` invocation site.
///
/// [`target`]: https://docs.rs/log/latest/log/struct.Record.html#method.target
#[inline]
pub fn console_log(
level: LogLevel,
target: Option<&str>,
filename: Option<&str>,
line_number: Option<u32>,
text: &str,
) {
let opt_ptr = |b: Option<&str>| b.map_or(ptr::null(), |b| b.as_ptr());
let opt_len = |b: Option<&str>| b.map_or(0, |b| b.len());
unsafe {
raw::_console_log(
level as u8,
opt_ptr(target),
opt_len(target),
opt_ptr(filename),
opt_len(filename),
line_number.unwrap_or(u32::MAX),
text.as_ptr(),
text.len(),
)
}
}
/// Schedule a reducer to be called asynchronously at `time`.
///
/// The reducer is assigned `name` and is provided `args` as its argument.
///
/// A generated schedule id is assigned to the reducer which is returned.
///
/// TODO: not fully implemented yet
/// TODO(Centril): Unsure what is unimplemented; perhaps it refers to a new
/// implementation with a special system table rather than a special sys call.
#[inline]
pub fn schedule(name: &str, args: &[u8], time: u64) -> u64 {
let mut out = 0;
unsafe { raw::_schedule_reducer(name.as_ptr(), name.len(), args.as_ptr(), args.len(), time, &mut out) }
out
}
/// Unschedule a reducer using the same `id` generated as when it was scheduled.
///
/// This assumes that the reducer hasn't already been executed.
pub fn cancel_reducer(id: u64) {
unsafe { raw::_cancel_reducer(id) }
}
pub use raw::{Buffer, BufferIter};
impl Buffer {
/// Returns the number of bytes of the data stored in the buffer.
pub fn data_len(&self) -> usize {
unsafe { raw::_buffer_len(self.handle()) }
}
/// Read the contents of the buffer into a boxed byte slice.
pub fn read(self) -> Box<[u8]> {
let len = self.data_len();
let mut buf = alloc::vec::Vec::with_capacity(len);
self.read_uninit(buf.spare_capacity_mut());
// SAFETY: We just wrote `len` bytes to `buf`.
unsafe { buf.set_len(len) };
buf.into_boxed_slice()
}
/// Read the contents of the buffer into an array of fixed size `N`.
///
/// If the length is wrong, the module will crash.
pub fn read_array<const N: usize>(self) -> [u8; N] {
// use MaybeUninit::uninit_array once stable
let mut arr = unsafe { MaybeUninit::<[MaybeUninit<u8>; N]>::uninit().assume_init() };
self.read_uninit(&mut arr);
// use MaybeUninit::array_assume_init once stable
unsafe { (&arr as *const [_; N]).cast::<[u8; N]>().read() }
}
/// Reads the buffer into an uninitialized byte string `buf`.
///
/// The module will crash if `buf`'s length doesn't match the buffer.
pub fn read_uninit(self, buf: &mut [MaybeUninit<u8>]) {
unsafe { raw::_buffer_consume(self, buf.as_mut_ptr().cast(), buf.len()) }
}
/// Allocates a buffer with the contents of `data`.
pub fn alloc(data: &[u8]) -> Self {
unsafe { raw::_buffer_alloc(data.as_ptr(), data.len()) }
}
}
impl Iterator for BufferIter {
type Item = Result<Box<[u8]>, Errno>;
fn next(&mut self) -> Option<Self::Item> {
let buf = unsafe { call(|out| raw::_iter_next(self.handle(), out)) };
match buf {
Ok(buf) if buf.is_invalid() => None,
Ok(buf) => Some(Ok(buf.read())),
Err(e) => Some(Err(e)),
}
}
}
impl Drop for BufferIter {
fn drop(&mut self) {
cvt(unsafe { raw::_iter_drop(self.handle()) }).unwrap();
}
}
// TODO: eventually there should be a way to set a consistent random seed for a module
#[cfg(feature = "getrandom")]
fn fake_random(buf: &mut [u8]) -> Result<(), getrandom::Error> {
#[allow(clippy::needless_range_loop)]
for i in 0..buf.len() {
let start = match i % 4 {
0 => 0x64,
1 => 0xe9,
2 => 0x48,
_ => 0xb5,
};
buf[i] = (start ^ i) as u8;
}
Result::Ok(())
}
#[cfg(feature = "getrandom")]
getrandom::register_custom_getrandom!(fake_random);
+28
View File
@@ -0,0 +1,28 @@
[package]
name = "spacetimedb"
version = "0.4.1"
edition = "2021"
license-file = "LICENSE"
description = "Easy support for interacting between SpacetimeDB and Rust."
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "spacetimedb" # The name of the target.
path = "src/lib.rs" # The source file of the target.
# Benching off, because of https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options
bench = false
[features]
getrandom = ["spacetimedb-bindings-sys/getrandom"]
[dependencies]
spacetimedb-bindings-sys = { path = "../bindings-sys", version = "0.4.1" }
spacetimedb-lib = { path = "../lib", default-features = false, version = "0.4.1"}
spacetimedb-bindings-macro = { path = "../bindings-macro", version = "0.4.1"}
once_cell = "1.15.0"
scoped-tls = "1.0.1"
log = "0.4.17"
[dev-dependencies]
rand = "0.8.5"
bytes = "1.2.1"
View File
+56
View File
@@ -0,0 +1,56 @@
use spacetimedb_lib::{DataKey, Hash, Identity};
use super::PrimaryKey;
use crate::{FilterableValue, UniqueValue};
macro_rules! impl_primitives {
(uniq { $($t:ty => $x:ident,)*}) => {
$(
impl FilterableValue for $t {}
impl UniqueValue for $t {
fn into_primarykey(self) -> PrimaryKey {
todo!() // idk what this is
}
}
)*
};
}
impl FilterableValue for u8 {}
impl UniqueValue for u8 {
fn into_primarykey(self) -> PrimaryKey {
todo!() // idk what this is
}
}
impl_primitives! {
uniq {
i8 => I8,
u16 => U16,
i16 => I16,
u32 => U32,
i32 => I32,
u64 => U64,
i64 => I64,
u128 => U128,
i128 => I128,
bool => Bool,
String => String,
}
}
impl FilterableValue for Hash {}
impl UniqueValue for Hash {
fn into_primarykey(self) -> PrimaryKey {
PrimaryKey {
data_key: DataKey::Hash(self),
}
}
}
impl FilterableValue for Identity {}
impl UniqueValue for Identity {
fn into_primarykey(self) -> PrimaryKey {
todo!() // idk what this is
}
}
+48
View File
@@ -0,0 +1,48 @@
#[doc(hidden)]
#[macro_export]
macro_rules! println {
($($arg:tt)*) => ($crate::log::info!($($arg)*))
}
#[doc(hidden)]
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::log::info!($($arg)*))
}
#[doc(hidden)]
#[macro_export]
macro_rules! eprintln {
($($arg:tt)*) => ($crate::log::error!($($arg)*))
}
#[doc(hidden)]
#[macro_export]
macro_rules! eprint {
($($arg:tt)*) => ($crate::log::error!($($arg)*))
}
#[macro_export]
macro_rules! dbg {
// NOTE: We cannot use `concat!` to make a static string as a format argument
// of `eprintln!` because `file!` could contain a `{` or
// `$val` expression could be a block (`{ .. }`), in which case the `eprintln!`
// will be malformed.
() => {
$crate::log::debug!("[{}:{}]", file!(), line!())
};
($val:expr $(,)?) => {
// Use of `match` here is intentional because it affects the lifetimes
// of temporaries - https://stackoverflow.com/a/48732525/1063961
match $val {
tmp => {
$crate::log::debug!("{} = {:#?}",
stringify!($val), &tmp);
tmp
}
}
};
($($val:expr),+ $(,)?) => {
($($crate::dbg!($val)),+,)
};
}
+616
View File
@@ -0,0 +1,616 @@
//! Provides safe abstractions around `bindings-sys`
//! and re-exports `#[spacetimedb]` and `#[duration]`.
#[macro_use]
mod io;
mod impls;
mod logger;
#[doc(hidden)]
pub mod rt;
mod types;
use spacetimedb_lib::buffer::{BufReader, BufWriter, Cursor, DecodeError};
pub use spacetimedb_lib::de::{Deserialize, DeserializeOwned};
pub use spacetimedb_lib::ser::Serialize;
use spacetimedb_lib::{bsatn, ColumnIndexAttribute, IndexType, PrimaryKey, ProductType, ProductValue};
use std::cell::RefCell;
use std::marker::PhantomData;
use std::{fmt, panic};
pub use spacetimedb_bindings_macro::{duration, query, spacetimedb, TableType};
pub use sats::SpacetimeType;
pub use spacetimedb_lib;
pub use spacetimedb_lib::sats;
pub use spacetimedb_lib::AlgebraicValue;
pub use spacetimedb_lib::Identity;
pub use types::Timestamp;
pub use spacetimedb_bindings_sys as sys;
pub use sys::Errno;
use sys::{Buffer, BufferIter};
pub use log;
pub type Result<T = (), E = Errno> = core::result::Result<T, E>;
#[no_mangle]
static SPACETIME_ABI_VERSION: u32 = {
assert!(spacetimedb_lib::MODULE_ABI_VERSION.to_u32() == sys::ABI_VERSION);
sys::ABI_VERSION
};
#[no_mangle]
static SPACETIME_ABI_VERSION_IS_ADDR: () = ();
#[non_exhaustive]
#[derive(Copy, Clone)]
pub struct ReducerContext {
pub sender: Identity,
pub timestamp: Timestamp,
}
impl ReducerContext {
#[doc(hidden)]
pub fn __dummy() -> Self {
Self {
sender: Identity { data: [0; 32] },
timestamp: Timestamp::UNIX_EPOCH,
}
}
}
// #[cfg(target_arch = "wasm32")]
// #[global_allocator]
// static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
// this gets optimized away to a normal global since wasm32 doesn't have threads by default
thread_local! {
static ROW_BUF: RefCell<Vec<u8>> = RefCell::new(Vec::with_capacity(8 * 1024));
}
fn with_row_buf<R>(f: impl FnOnce(&mut Vec<u8>) -> R) -> R {
ROW_BUF.with(|r| {
let mut buf = r.borrow_mut();
buf.clear();
f(&mut buf)
})
}
pub fn encode_row(row: ProductValue, bytes: &mut impl BufWriter) {
row.encode(bytes);
}
pub fn decode_row<'a>(schema: &ProductType, bytes: &mut impl BufReader<'a>) -> Result<ProductValue, DecodeError> {
ProductValue::decode(schema, bytes)
}
pub fn encode_schema(schema: ProductType, bytes: &mut impl BufWriter) {
schema.encode(bytes);
}
pub fn decode_schema<'a>(bytes: &mut impl BufReader<'a>) -> Result<ProductType, DecodeError> {
ProductType::decode(bytes)
}
/*
pub fn create_table(table_name: &str, schema: ProductType) -> Result<u32> {
with_row_buf(|bytes| {
schema.encode(bytes);
sys::create_table(table_name, bytes)
})
}
*/
pub fn get_table_id(table_name: &str) -> u32 {
sys::get_table_id(table_name).unwrap_or_else(|_| {
panic!("Failed to get table with name: {}", table_name);
})
}
pub fn insert<T: TableType>(table_id: u32, row: T) -> T::InsertResult {
trait HasAutoinc: TableType {
const HAS_AUTOINC: bool;
}
impl<T: TableType> HasAutoinc for T {
const HAS_AUTOINC: bool = {
let mut i = 0;
let mut x = false;
while i < T::COLUMN_ATTRS.len() {
if T::COLUMN_ATTRS[i].is_autoinc() {
x = true;
break;
}
i += 1;
}
x
};
}
with_row_buf(|bytes| {
bsatn::to_writer(bytes, &row).unwrap();
let res = sys::insert(table_id, bytes).map(|()| {
if <T as HasAutoinc>::HAS_AUTOINC {
bsatn::from_slice(bytes).expect("decode error")
} else {
row
}
});
sealed::InsertResult::from_res(res)
})
}
pub fn seek_eq(table_id: u32, col_id: u8, val: &impl Serialize) -> Result<Buffer> {
with_row_buf(|bytes| {
bsatn::to_writer(bytes, val).unwrap();
sys::seek_eq(table_id, col_id as u32, bytes)
})
}
pub fn delete_eq(table_id: u32, col_id: u8, eq_value: &impl Serialize) -> Result<u32> {
with_row_buf(|bytes| {
bsatn::to_writer(bytes, eq_value).unwrap();
sys::delete_eq(table_id, col_id.into(), bytes)
})
}
/*
pub fn delete_pk(table_id: u32, primary_key: &PrimaryKey) -> Result<()> {
with_row_buf(|bytes| {
primary_key.encode(bytes);
sys::delete_pk(table_id, bytes)
})
}
pub fn delete_filter<F: Fn(&ProductValue) -> bool>(table_id: u32, f: F) -> Result<usize> {
with_row_buf(|bytes| {
let mut count = 0;
for tuple_value in pv_table_iter(table_id, None)? {
if f(&tuple_value) {
count += 1;
bytes.clear();
tuple_value.encode(bytes);
sys::delete_value(table_id, bytes)?;
}
}
Ok(count)
})
}
pub fn delete_range(table_id: u32, col_id: u8, range: Range<AlgebraicValue>) -> Result<u32> {
with_row_buf(|bytes| {
range.start.encode(bytes);
let mid = bytes.len();
range.end.encode(bytes);
let (range_start, range_end) = bytes.split_at(mid);
sys::delete_range(table_id, col_id.into(), range_start, range_end)
})
}
*/
// TODO: going to have to somehow ensure AlgebraicValue is equatable
// pub fn filter_eq(_table_id: u32, _col_id: u8, _eq_value: AlgebraicValue) -> Option<ProductValue> {
// return None;
// }
//
// fn page_table(table_id : u32, pager_token : u32, read_entries : u32) {
//
// }
// Get the buffer iterator for this table, and return it and its decoded `ProductType` schema.
fn buffer_table_iter(
table_id: u32,
filter: Option<spacetimedb_lib::filter::Expr>,
) -> Result<(BufferIter, ProductType)> {
let filter = filter
.as_ref()
.map(bsatn::to_vec)
.transpose()
.expect("Couldn't decode the filter query");
let mut iter = sys::iter(table_id, filter.as_deref())?;
// First item is an encoded schema.
let schema_raw = iter.next().expect("Missing schema").expect("Failed to get schema");
let schema = decode_schema(&mut &schema_raw[..]).expect("Could not decode schema");
Ok((iter, schema))
}
/// A table iterator which yields `ProductValue`s.
// type ProductValueTableIter = RawTableIter<ProductValue, ProductValueBufferDeserialize>;
// fn pv_table_iter(table_id: u32, filter: Option<spacetimedb_lib::filter::Expr>) -> Result<ProductValueTableIter> {
// let (iter, schema) = buffer_table_iter(table_id, filter)?;
// let deserializer = ProductValueBufferDeserialize::new(schema);
// Ok(RawTableIter::new(iter, deserializer))
// }
/// A table iterator which yields values of the `TableType` corresponding to the table.
type TableTypeTableIter<T> = RawTableIter<T, TableTypeBufferDeserialize<T>>;
fn table_iter<T: TableType>(table_id: u32, filter: Option<spacetimedb_lib::filter::Expr>) -> Result<TableIter<T>> {
// The TableType deserializer doesn't need the schema, as we have type-directed
// dispatch to deserialize any given `TableType`.
let (iter, _schema) = buffer_table_iter(table_id, filter)?;
let deserializer = TableTypeBufferDeserialize::new();
let iter = RawTableIter::new(iter, deserializer);
Ok(TableIter::new(iter))
}
/// A trait for deserializing mulitple items out of a single `BufReader`.
///
/// Each `BufReader` holds a number of concatenated serialized objects.
trait BufferDeserialize {
/// The type of the items being deserialized.
type Item;
/// Deserialize one entry from the `reader`, which must not be empty.
fn deserialize<'de>(&mut self, reader: impl BufReader<'de>) -> Self::Item;
}
/// Deserialize `ProductValue`s from `Buffer`s.
// struct ProductValueBufferDeserialize {
// /// The schema to deserialize with.
// schema: ProductType,
// }
// impl ProductValueBufferDeserialize {
// fn new(schema: ProductType) -> Self {
// Self { schema }
// }
// }
// impl BufferDeserialize for ProductValueBufferDeserialize {
// type Item = ProductValue;
// fn deserialize<'de>(&mut self, mut reader: impl BufReader<'de>) -> Self::Item {
// decode_row(&self.schema, &mut reader).expect("Failed to decode row!")
// }
// }
/// Deserialize bsatn values to a particular `TableType`.
struct TableTypeBufferDeserialize<T> {
_marker: PhantomData<T>,
}
impl<T> TableTypeBufferDeserialize<T> {
fn new() -> Self {
Self { _marker: PhantomData }
}
}
impl<T: TableType> BufferDeserialize for TableTypeBufferDeserialize<T> {
type Item = T;
fn deserialize<'de>(&mut self, mut reader: impl BufReader<'de>) -> Self::Item {
bsatn::from_reader(&mut reader).expect("Failed to decode row!")
}
}
/// Iterate over a sequence of `Buffer`s and deserialize a number of `T`s
/// out of each.
struct RawTableIter<T, De: BufferDeserialize<Item = T>> {
/// The underlying source of our `Buffer`s.
inner: BufferIter,
/// The current position in the current buffer, from which
/// `deserializer` can read. A value of `None` indicates that
/// we need to pull another `Buffer` from `inner`.
reader: Option<Cursor<Box<[u8]>>>,
deserializer: De,
}
impl<T, De: BufferDeserialize<Item = T>> RawTableIter<T, De> {
fn new(iter: BufferIter, deserializer: De) -> Self {
RawTableIter {
inner: iter,
reader: None,
deserializer,
}
}
}
impl<T, De: BufferDeserialize<Item = T>> Iterator for RawTableIter<T, De> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
loop {
// If we currently have some bytes in the buffer to still decode,
// do that. Otherwise, try to fetch the next buffer first.
match &self.reader {
Some(reader) => {
if reader.remaining() == 0 {
self.reader = None;
continue;
}
break;
}
None => {
// If we receive None here, iteration is complete.
let buffer = self.inner.next()?;
let buffer = buffer.expect("RawTableIter::next: Failed to get buffer!");
self.reader = Some(Cursor::new(buffer));
break;
}
}
}
let reader = self.reader.as_ref().unwrap();
let row = self.deserializer.deserialize(reader);
Some(row)
}
}
#[derive(Clone, Copy)]
pub struct IndexDef<'a> {
pub name: &'a str,
pub ty: IndexType,
pub col_ids: &'a [u8],
}
pub struct TableIter<T: TableType> {
iter: TableTypeTableIter<T>,
}
impl<T: TableType> TableIter<T> {
fn new(iter: TableTypeTableIter<T>) -> Self {
Self { iter }
}
}
impl<T: TableType> Iterator for TableIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
pub trait TableType: SpacetimeType + DeserializeOwned + Serialize {
const TABLE_NAME: &'static str;
const COLUMN_ATTRS: &'static [ColumnIndexAttribute];
const INDEXES: &'static [IndexDef<'static>];
type InsertResult: sealed::InsertResult<T = Self>;
fn table_id() -> u32;
fn insert(ins: Self) -> Self::InsertResult {
insert(Self::table_id(), ins)
}
fn iter() -> TableIter<Self> {
table_iter(Self::table_id(), None).unwrap()
}
#[doc(hidden)]
fn iter_filtered(filter: spacetimedb_lib::filter::Expr) -> TableIter<Self> {
table_iter(Self::table_id(), Some(filter)).unwrap()
}
}
mod sealed {
use super::*;
pub trait InsertResult {
type T: TableType;
fn from_res(res: Result<Self::T>) -> Self;
}
}
pub struct UniqueConstraintViolation<T: TableType>(PhantomData<T>);
impl<T: TableType> fmt::Debug for UniqueConstraintViolation<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "UniqueConstraintViolation({})", T::TABLE_NAME)
}
}
impl<T: TableType> fmt::Display for UniqueConstraintViolation<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"not able to insert into table {}; duplicate unique column",
T::TABLE_NAME
)
}
}
impl<T: TableType> std::error::Error for UniqueConstraintViolation<T> {}
impl<T: TableType> sealed::InsertResult for Result<T, UniqueConstraintViolation<T>> {
type T = T;
fn from_res(res: Result<Self::T>) -> Self {
res.map_err(|e| match e {
Errno::EXISTS => UniqueConstraintViolation(PhantomData),
_ => panic!("unexpected error from insert(): {e}"),
})
}
}
impl<T: TableType> sealed::InsertResult for T {
type T = T;
fn from_res(res: Result<Self::T>) -> Self {
res.unwrap_or_else(|e| panic!("unexpected error from insert(): {e}"))
}
}
// Decode exactly 0 or 1 |T|, leaving the buffer exactly empty.
fn bsatn_from_reader<'de, R: BufReader<'de>, T: spacetimedb_lib::de::Deserialize<'de>>(
r: &mut R,
) -> Result<Option<T>, DecodeError> {
Ok(match r.remaining() {
0 => None,
_ => {
let t = bsatn::from_reader(r)?;
assert_eq!(r.remaining(), 0);
Some(t)
}
})
}
pub trait FilterableValue: Serialize + Eq {}
pub trait UniqueValue: FilterableValue {
fn into_primarykey(self) -> PrimaryKey;
}
#[doc(hidden)]
pub mod query {
use super::*;
pub trait FieldAccess<const N: u8> {
type Field;
fn get_field(&self) -> &Self::Field;
}
#[doc(hidden)]
pub fn filter_by_unique_field<Table: TableType, T: UniqueValue, const COL_IDX: u8>(val: &T) -> Option<Table>
where
Table: FieldAccess<COL_IDX, Field = T>,
{
let buffer = seek_eq(Table::table_id(), COL_IDX, val).unwrap();
let bytes = buffer.read();
let mut slice: &[u8] = &bytes;
// We will always find either 0 or 1 rows here.
bsatn_from_reader(&mut slice).unwrap()
}
#[doc(hidden)]
pub fn filter_by_field<'a, Table: TableType, T: FilterableValue, const COL_IDX: u8>(
val: &'a T,
) -> FilterByIter<'a, Table, COL_IDX, T>
where
'a: 'a,
{
// In the future, this should instead call seek_eq.
FilterByIter {
inner: Table::iter(),
val,
}
}
#[doc(hidden)]
pub fn delete_by_field<Table: TableType, T: UniqueValue, const COL_IDX: u8>(val: &T) -> bool {
let result = delete_eq(Table::table_id(), COL_IDX, val);
match result {
Err(_) => {
//TODO: Returning here was supposed to signify an error, but it can also return `Err(_)` when there is nothing to delete.
//spacetimedb::println!("Internal server error on equatable type: {}", #primary_key_tuple_type_str);
false
}
Ok(count) => count > 0,
}
}
#[doc(hidden)]
pub fn update_by_field<Table: TableType, T: UniqueValue, const COL_IDX: u8>(val: &T, new_value: Table) -> bool {
delete_by_field::<Table, T, COL_IDX>(val);
Table::insert(new_value);
// For now this is always successful
true
}
#[doc(hidden)]
pub struct FilterByIter<'a, Table: TableType, const COL_IDX: u8, T: FilterableValue> {
inner: TableIter<Table>,
val: &'a T,
}
impl<'a, Table: TableType, const COL_IDX: u8, T: FilterableValue> Iterator for FilterByIter<'a, Table, COL_IDX, T>
where
Table: FieldAccess<COL_IDX, Field = T>,
{
type Item = Table;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|row| (row.get_field() == self.val).then_some(row))
}
}
}
#[macro_export]
macro_rules! schedule {
// this errors on literals with time unit suffixes, e.g. 100ms
// I swear I saw a rustc tracking issue to allow :literal to match even an invalid suffix but I can't seem to find it
($dur:literal, $($args:tt)*) => {
$crate::schedule!($crate::duration!($dur), $($args)*)
};
($dur:expr, $($args:tt)*) => {
$crate::__schedule_impl!($crate::rt::schedule_in($dur), [] [$($args)*])
};
}
#[macro_export]
macro_rules! schedule_at {
($time:expr, $($args:tt)*) => {
$crate::__schedule_impl!($time, [] [$($args)*])
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! __schedule_impl {
($time:expr, [$repeater:path] [($($args:tt)*)]) => {
$crate::__schedule_impl!(@process_args $time, $repeater, ($($args)*))
};
($time:expr, [$($cur:tt)*] [$next:tt $($rest:tt)*]) => {
$crate::__schedule_impl!($time, [$($cur)* $next] [$($rest)*])
};
(@process_args $time:expr, $repeater:path, (_$(, $args:expr)* $(,)?)) => {
$crate::__schedule_impl!(@call $time, $repeater, $crate::ReducerContext::__dummy(), ($($args),*))
};
(@process_args $time:expr, $repeater:path, ($($args:expr),* $(,)?)) => {
$crate::__schedule_impl!(@call $time, $repeater, , ($($args),*))
};
(@call $time:expr, $repeater:path, $($ctx:expr)?, ($($args:expr),*)) => {
<$repeater>::schedule($time, $($ctx,)? $($args),*);
};
}
pub struct ScheduleToken<R = AnyReducer> {
id: u64,
_marker: PhantomData<R>,
}
impl<R> Clone for ScheduleToken<R> {
fn clone(&self) -> Self {
*self
}
}
impl<R> Copy for ScheduleToken<R> {}
impl<R> Serialize for ScheduleToken<R> {
fn serialize<S: spacetimedb_lib::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.id.serialize(serializer)
}
}
impl<'de, R> Deserialize<'de> for ScheduleToken<R> {
fn deserialize<D: spacetimedb_lib::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
u64::deserialize(deserializer).map(Self::new)
}
}
impl<R> SpacetimeType for ScheduleToken<R> {
fn make_type<S: spacetimedb_lib::sats::typespace::TypespaceBuilder>(_ts: &mut S) -> spacetimedb_lib::AlgebraicType {
spacetimedb_lib::AlgebraicType::U64
}
}
impl<R> ScheduleToken<R> {
#[inline]
fn new(id: u64) -> Self {
Self {
id,
_marker: PhantomData,
}
}
#[inline]
pub fn erase(self) -> ScheduleToken {
ScheduleToken::new(self.id)
}
/// Cancel this scheduled reducer. This method is idempotent.
#[inline]
pub fn cancel(self) {
sys::cancel_reducer(self.id)
}
}
pub struct AnyReducer {
_never: std::convert::Infallible,
}
+69
View File
@@ -0,0 +1,69 @@
use crate::sys;
use std::sync::Mutex;
use std::{fmt, panic};
#[no_mangle]
extern "C" fn __preinit__00_panic_hook() {
panic::set_hook(Box::new(panic_hook));
}
fn panic_hook(info: &panic::PanicInfo) {
let msg = match info.payload().downcast_ref::<&'static str>() {
Some(s) => *s,
None => match info.payload().downcast_ref::<String>() {
Some(s) => &s[..],
None => "Box<dyn Any>",
},
};
let location = info.location();
sys::console_log(
sys::LogLevel::Panic,
None,
location.map(|l| l.file()),
location.map(|l| l.line()),
msg,
)
}
struct Logger {
// Mutex is fine here because wasm is single-threaded;
// this is actually basically a RefCell
buf: Mutex<String>,
}
const MAX_BUF_SIZE: usize = 0x4000; // 16 KiB
impl log::Log for Logger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
let level = match record.metadata().level() {
log::Level::Error => sys::LogLevel::Error,
log::Level::Warn => sys::LogLevel::Warn,
log::Level::Info => sys::LogLevel::Info,
log::Level::Debug => sys::LogLevel::Debug,
log::Level::Trace => sys::LogLevel::Trace,
};
let buf = &mut *self.buf.lock().unwrap();
buf.clear();
fmt::write(buf, *record.args()).unwrap();
sys::console_log(level, Some(record.target()), record.file(), record.line(), buf);
buf.shrink_to(MAX_BUF_SIZE);
}
fn flush(&self) {}
}
static LOGGER: Logger = Logger {
buf: Mutex::new(String::new()),
};
#[no_mangle]
extern "C" fn __preinit__15_init_log() {
// if the user wants to set their own logger, that's fine
if log::set_logger(&LOGGER).is_ok() {
log::set_max_level(log::LevelFilter::Trace);
}
}
+386
View File
@@ -0,0 +1,386 @@
#![deny(unsafe_op_in_unsafe_fn)]
use std::any::TypeId;
use std::collections::{btree_map, BTreeMap};
use std::fmt;
use std::marker::PhantomData;
use std::sync::Mutex;
use std::time::Duration;
use crate::{sys, ReducerContext, ScheduleToken, SpacetimeType, TableType, Timestamp};
use spacetimedb_lib::de::{self, Deserialize, SeqProductAccess};
use spacetimedb_lib::sats::typespace::TypespaceBuilder;
use spacetimedb_lib::sats::{AlgebraicType, AlgebraicTypeRef, ProductTypeElement};
use spacetimedb_lib::ser::{self, Serialize, SerializeSeqProduct};
use spacetimedb_lib::{bsatn, Identity, MiscModuleExport, ModuleDef, ReducerDef, TableDef, TypeAlias};
use sys::Buffer;
pub use once_cell::sync::{Lazy, OnceCell};
scoped_tls::scoped_thread_local! {
pub(crate) static CURRENT_TIMESTAMP: Timestamp
}
pub fn invoke_reducer<'a, A: Args<'a>, T>(
reducer: impl Reducer<'a, A, T>,
sender: Buffer,
timestamp: u64,
args: &'a [u8],
epilogue: impl FnOnce(Result<(), &str>),
) -> Buffer {
let ctx = assemble_context(sender, timestamp);
let SerDeArgs(args) = bsatn::from_slice(args).expect("unable to decode args");
let res = CURRENT_TIMESTAMP.set(&{ ctx.timestamp }, || {
let res: Result<(), Box<str>> = reducer.invoke(ctx, args);
epilogue(res.as_ref().map(|()| ()).map_err(|e| &**e));
res
});
cvt_result(res)
}
pub fn create_index(index_name: &str, table_id: u32, index_type: sys::raw::IndexType, col_ids: Vec<u8>) -> Buffer {
let result = sys::create_index(index_name, table_id, index_type as u8, &col_ids);
cvt_result(result.map_err(cvt_errno))
}
pub fn invoke_connection_func<R: ReducerResult>(
f: impl Fn(ReducerContext) -> R,
sender: Buffer,
timestamp: u64,
) -> Buffer {
let ctx = assemble_context(sender, timestamp);
let res = CURRENT_TIMESTAMP.set(&{ ctx.timestamp }, || f(ctx).into_result());
cvt_result(res)
}
fn assemble_context(sender: Buffer, timestamp: u64) -> ReducerContext {
let sender = sender.read_array::<32>();
let sender = Identity { data: sender };
let timestamp = Timestamp::UNIX_EPOCH + Duration::from_micros(timestamp);
ReducerContext { sender, timestamp }
}
fn cvt_errno(errno: sys::Errno) -> Box<str> {
let message = format!("{errno}");
message.into_boxed_str()
}
fn cvt_result(res: Result<(), Box<str>>) -> Buffer {
match res {
Ok(()) => Buffer::INVALID,
Err(errmsg) => Buffer::alloc(errmsg.as_bytes()),
}
}
pub trait Reducer<'de, A: Args<'de>, T> {
fn invoke(&self, ctx: ReducerContext, args: A) -> Result<(), Box<str>>;
}
pub trait ReducerInfo {
const NAME: &'static str;
const ARG_NAMES: &'static [Option<&'static str>];
const INVOKE: ReducerFn;
}
pub trait RepeaterInfo: ReducerInfo {
const REPEAT_INTERVAL: Duration;
}
pub trait Args<'de>: Sized {
const LEN: usize;
fn visit_seq_product<A: SeqProductAccess<'de>>(prod: A) -> Result<Self, A::Error>;
fn serialize_seq_product<S: SerializeSeqProduct>(&self, prod: &mut S) -> Result<(), S::Error>;
fn schema<I: ReducerInfo>(typespace: &mut impl TypespaceBuilder) -> ReducerDef;
}
pub trait ScheduleArgs<'de>: Sized {
type Args: Args<'de>;
fn into_args(self) -> Self::Args;
}
impl<'de, T: Args<'de>> ScheduleArgs<'de> for T {
type Args = Self;
fn into_args(self) -> Self::Args {
self
}
}
pub trait ReducerResult {
fn into_result(self) -> Result<(), Box<str>>;
}
impl ReducerResult for () {
#[inline]
fn into_result(self) -> Result<(), Box<str>> {
Ok(self)
}
}
impl<E: fmt::Debug> ReducerResult for Result<(), E> {
#[inline]
fn into_result(self) -> Result<(), Box<str>> {
self.map_err(|e| format!("{e:?}").into())
}
}
pub trait ReducerArg<'de> {}
impl<'de, T: Deserialize<'de>> ReducerArg<'de> for T {}
impl<'de> ReducerArg<'de> for ReducerContext {}
pub fn assert_reducerarg<'de, T: ReducerArg<'de>>() {}
pub fn assert_reducerret<T: ReducerResult>() {}
pub const fn assert_table<T: TableType>() {}
pub struct ContextArg;
pub struct NoContextArg;
struct ArgsVisitor<A> {
_marker: PhantomData<A>,
}
impl<'de, A: Args<'de>> de::ProductVisitor<'de> for ArgsVisitor<A> {
type Output = A;
fn product_name(&self) -> Option<&str> {
None
}
fn product_len(&self) -> usize {
A::LEN
}
fn product_kind(&self) -> de::ProductKind {
de::ProductKind::ReducerArgs
}
fn visit_seq_product<Acc: SeqProductAccess<'de>>(self, prod: Acc) -> Result<Self::Output, Acc::Error> {
A::visit_seq_product(prod)
}
fn visit_named_product<Acc: de::NamedProductAccess<'de>>(self, _prod: Acc) -> Result<Self::Output, Acc::Error> {
Err(de::Error::custom("named products not supported"))
}
}
macro_rules! impl_reducer {
($($T1:ident $(, $T:ident)*)?) => {
impl_reducer!(@impl $($T1 $(, $T)*)?);
$(impl_reducer!($($T),*);)?
};
(@impl $($T:ident),*) => {
impl<'de, $($T: SpacetimeType + Deserialize<'de> + Serialize),*> Args<'de> for ($($T,)*) {
const LEN: usize = impl_reducer!(@count $($T)*);
#[allow(non_snake_case)]
#[allow(unused)]
fn visit_seq_product<Acc: SeqProductAccess<'de>>(mut prod: Acc) -> Result<Self, Acc::Error> {
let vis = ArgsVisitor { _marker: PhantomData::<Self> };
let i = 0;
$(let $T = prod.next_element::<$T>()?.ok_or_else(|| de::Error::missing_field(i, None, &vis))?;
let i = i + 1;)*
Ok(($($T,)*))
}
fn serialize_seq_product<Ser: SerializeSeqProduct>(&self, _prod: &mut Ser) -> Result<(), Ser::Error> {
#[allow(non_snake_case)]
let ($($T,)*) = self;
$(_prod.serialize_element($T)?;)*
Ok(())
}
#[inline]
fn schema<Info: ReducerInfo>(_typespace: &mut impl TypespaceBuilder) -> ReducerDef {
#[allow(non_snake_case, irrefutable_let_patterns)]
let [.., $($T),*] = Info::ARG_NAMES else { panic!() };
ReducerDef {
name: Info::NAME.into(),
args: vec![
$(ProductTypeElement {
name: $T.map(str::to_owned),
algebraic_type: <$T>::make_type(_typespace),
}),*
],
}
}
}
impl<'de, $($T: SpacetimeType + Deserialize<'de> + Serialize),*> ScheduleArgs<'de> for (ReducerContext, $($T,)*) {
type Args = ($($T,)*);
#[allow(clippy::unused_unit)]
fn into_args(self) -> Self::Args {
#[allow(non_snake_case)]
let (_ctx, $($T,)*) = self;
($($T,)*)
}
}
impl<'de, Func, Ret, $($T: SpacetimeType + Deserialize<'de> + Serialize),*> Reducer<'de, ($($T,)*), ContextArg> for Func
where
Func: Fn(ReducerContext, $($T),*) -> Ret,
Ret: ReducerResult
{
fn invoke(&self, ctx: ReducerContext, args: ($($T,)*)) -> Result<(), Box<str>> {
#[allow(non_snake_case)]
let ($($T,)*) = args;
self(ctx, $($T),*).into_result()
}
}
impl<'de, Func, Ret, $($T: SpacetimeType + Deserialize<'de> + Serialize),*> Reducer<'de, ($($T,)*), NoContextArg> for Func
where
Func: Fn($($T),*) -> Ret,
Ret: ReducerResult
{
fn invoke(&self, _ctx: ReducerContext, args: ($($T,)*)) -> Result<(), Box<str>> {
#[allow(non_snake_case)]
let ($($T,)*) = args;
self($($T),*).into_result()
}
}
};
(@count $($T:ident)*) => {
0 $(+ impl_reducer!(@drop $T 1))*
};
(@drop $a:tt $b:tt) => { $b };
}
impl_reducer!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z, AA, AB, AC, AD, AE, AF);
struct SerDeArgs<A>(A);
impl<'de, A: Args<'de>> Deserialize<'de> for SerDeArgs<A> {
fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
deserializer
.deserialize_product(ArgsVisitor { _marker: PhantomData })
.map(Self)
}
}
impl<'de, A: Args<'de>> Serialize for SerDeArgs<A> {
fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut prod = serializer.serialize_seq_product(A::LEN)?;
self.0.serialize_seq_product(&mut prod)?;
prod.end()
}
}
#[track_caller]
pub fn schedule_in(dur: Duration) -> Timestamp {
Timestamp::now()
.checked_add(dur)
.unwrap_or_else(|| panic!("{dur:?} is too far into the future to schedule"))
}
pub fn schedule<'de, R: ReducerInfo>(time: Timestamp, args: impl ScheduleArgs<'de>) -> ScheduleToken<R> {
let arg_bytes = bsatn::to_vec(&SerDeArgs(args.into_args())).unwrap();
let id = sys::schedule(R::NAME, &arg_bytes, time.micros_since_epoch);
ScheduleToken::new(id)
}
pub fn schedule_repeater<A: RepeaterArgs, T, I: RepeaterInfo>(_reducer: impl for<'de> Reducer<'de, A, T>) {
let time = schedule_in(I::REPEAT_INTERVAL);
let args = bsatn::to_vec(&SerDeArgs(A::get_now())).unwrap();
sys::schedule(I::NAME, &args, time.micros_since_epoch);
}
pub trait RepeaterArgs: for<'de> Args<'de> {
fn get_now() -> Self;
}
impl RepeaterArgs for () {
fn get_now() -> Self {}
}
impl RepeaterArgs for (Timestamp,) {
fn get_now() -> Self {
(Timestamp::now(),)
}
}
fn register_describer(f: fn(&mut ModuleBuilder)) {
DESCRIBERS.lock().unwrap().push(f)
}
pub fn register_reftype<T: SpacetimeType>() {
register_describer(|module| {
T::make_type(module);
})
}
pub fn register_table<T: TableType>() {
register_describer(|module| {
let data = *T::make_type(module).as_ref().unwrap();
let schema = TableDef {
name: T::TABLE_NAME.into(),
data,
column_attrs: T::COLUMN_ATTRS.to_owned(),
indexes: T::INDEXES.iter().copied().map(Into::into).collect(),
};
module.module.tables.push(schema)
})
}
impl From<crate::IndexDef<'_>> for spacetimedb_lib::IndexDef {
fn from(index: crate::IndexDef<'_>) -> spacetimedb_lib::IndexDef {
spacetimedb_lib::IndexDef {
name: index.name.to_owned(),
ty: index.ty,
col_ids: index.col_ids.to_owned(),
}
}
}
pub fn register_reducer<'a, A: Args<'a>, T, I: ReducerInfo>(_: impl Reducer<'a, A, T>) {
register_describer(|module| {
let schema = A::schema::<I>(module);
module.module.reducers.push(schema);
module.reducers.push(I::INVOKE);
})
}
#[derive(Default)]
struct ModuleBuilder {
module: ModuleDef,
reducers: Vec<ReducerFn>,
type_map: BTreeMap<TypeId, AlgebraicTypeRef>,
}
impl TypespaceBuilder for ModuleBuilder {
fn add(
&mut self,
typeid: TypeId,
name: Option<&'static str>,
make_ty: impl FnOnce(&mut Self) -> AlgebraicType,
) -> AlgebraicType {
let r = match self.type_map.entry(typeid) {
btree_map::Entry::Occupied(o) => *o.get(),
btree_map::Entry::Vacant(v) => {
let slot_ref = self.module.typespace.add(AlgebraicType::UNIT_TYPE);
v.insert(slot_ref);
if let Some(name) = name {
self.module.misc_exports.push(MiscModuleExport::TypeAlias(TypeAlias {
name: name.to_owned(),
ty: slot_ref,
}));
}
let ty = make_ty(self);
self.module.typespace[slot_ref] = ty;
slot_ref
}
};
AlgebraicType::Ref(r)
}
}
// not actually a mutex; because wasm is single-threaded this basically just turns into a refcell
static DESCRIBERS: Mutex<Vec<fn(&mut ModuleBuilder)>> = Mutex::new(Vec::new());
pub type ReducerFn = fn(Buffer, u64, &[u8]) -> Buffer;
static REDUCERS: OnceCell<Vec<ReducerFn>> = OnceCell::new();
#[no_mangle]
extern "C" fn __describe_module__() -> Buffer {
let mut module = ModuleBuilder::default();
for describer in &*DESCRIBERS.lock().unwrap() {
describer(&mut module)
}
let bytes = bsatn::to_vec(&module.module).expect("unable to serialize typespace");
REDUCERS.set(module.reducers).ok().unwrap();
Buffer::alloc(&bytes)
}
#[no_mangle]
extern "C" fn __call_reducer__(id: usize, sender: Buffer, timestamp: u64, args: Buffer) -> Buffer {
let reducers = REDUCERS.get().unwrap();
let args = args.read();
reducers[id](sender, timestamp, &args)
}
+85
View File
@@ -0,0 +1,85 @@
use std::ops::{Add, Sub};
use std::time::Duration;
use spacetimedb_lib::de::Deserialize;
use spacetimedb_lib::ser::Serialize;
use crate::rt::CURRENT_TIMESTAMP;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Timestamp {
pub(crate) micros_since_epoch: u64,
}
impl Timestamp {
pub const UNIX_EPOCH: Self = Timestamp { micros_since_epoch: 0 };
/// Panics if not in the context of a reducer
pub fn now() -> Timestamp {
assert!(CURRENT_TIMESTAMP.is_set(), "there is no current time in this context");
CURRENT_TIMESTAMP.with(|x| *x)
}
pub fn elapsed(&self) -> Duration {
Self::now()
.duration_since(*self)
.expect("timestamp for elapsed() is after current time")
}
pub fn duration_since(&self, earlier: Timestamp) -> Result<Duration, Duration> {
let dur = Duration::from_micros(self.micros_since_epoch.abs_diff(earlier.micros_since_epoch));
if earlier < *self {
Ok(dur)
} else {
Err(dur)
}
}
pub fn checked_add(&self, duration: Duration) -> Option<Self> {
let micros = duration.as_micros().try_into().ok()?;
let micros_since_epoch = self.micros_since_epoch.checked_add(micros)?;
Some(Self { micros_since_epoch })
}
pub fn checked_sub(&self, duration: Duration) -> Option<Self> {
let micros = duration.as_micros().try_into().ok()?;
let micros_since_epoch = self.micros_since_epoch.checked_sub(micros)?;
Some(Self { micros_since_epoch })
}
}
impl Add<Duration> for Timestamp {
type Output = Timestamp;
fn add(self, rhs: Duration) -> Self::Output {
self.checked_add(rhs)
.expect("overflow when adding duration to timestamp")
}
}
impl Sub<Duration> for Timestamp {
type Output = Timestamp;
fn sub(self, rhs: Duration) -> Self::Output {
self.checked_sub(rhs)
.expect("underflow when subtracting duration from timestamp")
}
}
impl crate::SpacetimeType for Timestamp {
fn make_type<S: spacetimedb_lib::sats::typespace::TypespaceBuilder>(_ts: &mut S) -> spacetimedb_lib::AlgebraicType {
spacetimedb_lib::AlgebraicType::U64
}
}
impl<'de> Deserialize<'de> for Timestamp {
fn deserialize<D: spacetimedb_lib::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
u64::deserialize(deserializer).map(|micros_since_epoch| Self { micros_since_epoch })
}
}
impl Serialize for Timestamp {
fn serialize<S: spacetimedb_lib::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.micros_since_epoch.serialize(serializer)
}
}
+51
View File
@@ -0,0 +1,51 @@
[package]
name = "spacetimedb-cli"
version = "0.4.1"
edition = "2021"
license-file = "LICENSE"
description = "A command line interface for SpacetimeDB"
[lib]
bench = false
[[bin]]
name = "spacetime"
path = "src/main.rs"
# Benching off, because of https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options
bench = false
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = { version = "4.2.4", features = ["derive", "env"] }
reqwest = { version = "0.11.10", features = ["stream"] }
tokio = { version = "1", features = ["full"] }
anyhow = { version = "1.0.57", features = ["backtrace"] }
serde = { version = "1.0.136" , features = ["derive"] }
serde_json = { version = "1.0", features = ["raw_value"] }
toml = "0.5"
dirs = "4.0"
tabled = "0.8.0"
spacetimedb-lib = { path = "../lib", version = "0.4.1" }
convert_case = "0.6.0"
wasmtime = { version = "7", default-features = false, features = ["cranelift"] }
colored = "2.0.0"
duct = "0.13.5"
base64 = "0.13.1"
slab = "0.4.7"
cargo_metadata = "0.15.2"
email_address = "0.2.4"
termcolor = "1.2.0"
is-terminal = "0.4"
futures = "0.3"
tempfile = "3.3"
rustyline = { version = "11.0.0", features = [] }
syntect = { version = "5.0.0", default-features = false, features = ["default-fancy"]}
wasmbin = "0.6"
itertools = "0.10"
[dev-dependencies]
insta = { version = "1.21.0", features = ["toml"] }
[features]
tracelogging = []
View File
+76
View File
@@ -0,0 +1,76 @@
use reqwest::header::IntoHeaderName;
use reqwest::{header, Client, RequestBuilder};
use serde::Deserialize;
use serde_json::value::RawValue;
use spacetimedb_lib::sats::ProductType;
static APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
#[derive(Debug, Clone)]
pub struct Connection {
pub(crate) host: String,
pub(crate) address: String,
pub(crate) database: String,
pub(crate) auth_header: Option<String>,
}
pub fn build_headers<'a, K, I>(iter: I) -> header::HeaderMap
where
K: IntoHeaderName,
I: IntoIterator<Item = (K, &'a str)>,
{
let mut headers = header::HeaderMap::new();
for (k, v) in iter.into_iter() {
headers.insert(k, header::HeaderValue::from_str(v).unwrap());
}
headers
}
pub fn build_client(con: &Connection) -> Client {
let mut builder = Client::builder().user_agent(APP_USER_AGENT);
if let Some(auth_header) = &con.auth_header {
let headers = build_headers([("Authorization", auth_header.as_str())]);
builder = builder.default_headers(headers);
}
builder.build().unwrap()
}
pub struct ClientApi {
con: Connection,
client: Client,
}
impl ClientApi {
pub fn new(con: Connection) -> Self {
let client = build_client(&con);
Self { con, client }
}
pub fn sql(&self) -> RequestBuilder {
self.client
.post(format!("{}/database/sql/{}", self.con.host, self.con.address))
}
}
#[derive(Debug, Clone, Deserialize)]
pub struct StmtResultJson<'a> {
pub schema: ProductType,
#[serde(borrow)]
pub rows: Vec<&'a RawValue>,
}
pub fn from_json_seed<'de, T: serde::de::DeserializeSeed<'de>>(
s: &'de str,
seed: T,
) -> Result<T::Value, serde_json::Error> {
let mut de = serde_json::Deserializer::from_str(s);
let out = seed.deserialize(&mut de)?;
de.end()?;
Ok(out)
}
+274
View File
@@ -0,0 +1,274 @@
use serde::{Deserialize, Serialize};
use std::{
fs,
io::{Read, Write},
path::{Path, PathBuf},
};
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct IdentityConfig {
pub nickname: Option<String>,
pub identity: String,
pub token: String,
}
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct RawConfig {
host: Option<String>,
protocol: Option<String>,
default_identity: Option<String>,
default_address: Option<String>,
identity_configs: Option<Vec<IdentityConfig>>,
}
pub struct Config {
proj: RawConfig,
home: RawConfig,
}
const HOME_CONFIG_DIR: &str = ".spacetime";
const CONFIG_FILENAME: &str = "config.toml";
const SPACETIME_FILENAME: &str = "spacetime.toml";
const DOT_SPACETIME_FILENAME: &str = ".spacetime.toml";
const DEFAULT_HOST: &str = "spacetimedb.com/spacetimedb";
const DEFAULT_PROTOCOL: &str = "https";
impl Config {
pub fn host(&self) -> String {
if let Ok(host) = std::env::var("SPACETIMEDB_HOST") {
host
} else {
self.proj
.host
.as_ref()
.or(self.home.host.as_ref())
.map(|s| s.as_str())
.unwrap_or(DEFAULT_HOST)
.to_owned()
}
}
pub fn set_host(&mut self, host: &str) {
self.home.host = Some(host.to_string());
}
pub fn protocol(&self) -> String {
if let Ok(protocol) = std::env::var("SPACETIMEDB_PROTOCOL") {
protocol
} else {
self.proj
.protocol
.as_ref()
.or(self.home.protocol.as_ref())
.map(|s| s.as_str())
.unwrap_or(DEFAULT_PROTOCOL)
.to_owned()
}
}
pub fn set_protocol(&mut self, protocol: &str) {
self.home.protocol = Some(protocol.to_string());
}
pub fn default_identity(&self) -> Option<&str> {
self.proj
.default_identity
.as_ref()
.or(self.home.default_identity.as_ref())
.map(|s| s.as_str())
}
pub fn set_default_identity(&mut self, default_identity: String) {
self.home.default_identity = Some(default_identity);
}
pub fn default_address(&self) -> Option<&str> {
self.proj
.default_identity
.as_ref()
.or(self.home.default_address.as_ref())
.map(|s| s.as_str())
}
pub fn identity_configs(&self) -> &Vec<IdentityConfig> {
self.home.identity_configs.as_ref().unwrap()
}
pub fn identity_configs_mut(&mut self) -> &mut Vec<IdentityConfig> {
self.home.identity_configs.get_or_insert(vec![])
}
fn find_config_filename(config_dir: &PathBuf) -> Option<&'static str> {
let read_dir = fs::read_dir(config_dir).unwrap();
let filenames = [DOT_SPACETIME_FILENAME, SPACETIME_FILENAME, CONFIG_FILENAME];
let mut config_filename = None;
'outer: for path in read_dir {
for name in filenames {
if name == path.as_ref().unwrap().file_name().to_str().unwrap() {
config_filename = Some(name);
break 'outer;
}
}
}
config_filename
}
fn load_raw(config_dir: PathBuf) -> RawConfig {
if let Some(config_path) = std::env::var_os("SPACETIME_CONFIG_FILE") {
return Self::load_from_file(config_path.as_ref());
}
if !config_dir.exists() {
fs::create_dir_all(&config_dir).unwrap();
}
let config_filename = Self::find_config_filename(&config_dir);
let Some(config_filename) = config_filename else {
// Return an empty raw config without creating a file.
return toml::from_str("").unwrap();
};
let config_path = config_dir.join(config_filename);
Self::load_from_file(&config_path)
}
fn load_from_file(config_path: &Path) -> RawConfig {
let mut file = fs::OpenOptions::new()
.create(true)
.write(true)
.read(true)
.open(config_path)
.unwrap();
let mut text = String::new();
file.read_to_string(&mut text).unwrap();
toml::from_str(&text).unwrap()
}
pub fn load() -> Self {
let home_dir = dirs::home_dir().unwrap();
let mut home_config = Self::load_raw(home_dir.join(HOME_CONFIG_DIR));
// Ensure there is always an identity config. Simplifies other code.
home_config.identity_configs.get_or_insert(vec![]);
// TODO(cloutiertyler): For now we're checking for a spacetime.toml file
// in the current directory. Eventually this should really be that we
// search parent directories above the current directory to find
// spacetime.toml files like a .gitignore file
let cur_dir = std::env::current_dir().expect("No current working directory!");
let cur_config = Self::load_raw(cur_dir);
Self {
home: home_config,
proj: cur_config,
}
}
pub fn save(&self) {
let home_dir = dirs::home_dir().unwrap();
let config_dir = home_dir.join(HOME_CONFIG_DIR);
if !config_dir.exists() {
fs::create_dir_all(&config_dir).unwrap();
}
let config_filename = Self::find_config_filename(&config_dir).unwrap_or(CONFIG_FILENAME);
let config_path = config_dir.join(config_filename);
let mut file = fs::OpenOptions::new()
.create(true)
.write(true)
.open(config_path)
.unwrap();
let str = toml::to_string_pretty(&self.home).unwrap();
file.set_len(0).unwrap();
file.write_all(str.as_bytes()).unwrap();
file.sync_all().unwrap();
}
pub fn get_default_identity_config(&self) -> Option<&IdentityConfig> {
if let Some(identity) = &self.default_identity() {
let config = self
.identity_configs()
.iter()
.find(|c| &c.identity == identity)
.unwrap();
Some(config)
} else {
None
}
}
pub fn name_exists(&self, nickname: &str) -> bool {
for name in self.identity_configs().iter().map(|c| &c.nickname) {
if name.as_ref() == Some(&nickname.to_string()) {
return true;
}
}
false
}
pub fn get_identity_config_by_name(&self, name: &str) -> Option<&IdentityConfig> {
self.identity_configs()
.iter()
.find(|c| c.nickname.as_ref() == Some(&name.to_string()))
}
pub fn get_identity_config_by_identity(&self, identity: &str) -> Option<&IdentityConfig> {
self.identity_configs().iter().find(|c| c.identity == identity)
}
pub fn get_identity_config_by_identity_mut(&mut self, identity: &str) -> Option<&mut IdentityConfig> {
self.identity_configs_mut().iter_mut().find(|c| c.identity == identity)
}
pub fn delete_identity_config_by_name(&mut self, name: &str) -> Option<IdentityConfig> {
let index = self
.home
.identity_configs
.as_ref()
.unwrap()
.iter()
.position(|c| c.nickname.as_deref() == Some(name));
if let Some(index) = index {
Some(self.home.identity_configs.as_mut().unwrap().remove(index))
} else {
None
}
}
pub fn delete_identity_config_by_identity(&mut self, identity: &str) -> Option<IdentityConfig> {
let index = self
.home
.identity_configs
.as_ref()
.unwrap()
.iter()
.position(|c| c.identity == identity);
if let Some(index) = index {
Some(self.home.identity_configs.as_mut().unwrap().remove(index))
} else {
None
}
}
pub fn update_default_identity(&mut self) {
if let Some(default_identity) = &self.home.default_identity {
if self
.identity_configs()
.iter()
.map(|c| &c.identity)
.any(|i| i == default_identity)
{
return;
}
}
self.home.default_identity = self.identity_configs().first().map(|c| c.identity.clone())
}
pub fn get_host_url(&self) -> String {
format!("{}://{}", self.protocol(), self.host())
}
}
+57
View File
@@ -0,0 +1,57 @@
pub mod api;
mod config;
mod subcommands;
mod tasks;
pub mod util;
use clap::{ArgMatches, Command};
pub use config::Config;
pub use subcommands::*;
pub fn get_subcommands() -> Vec<Command> {
vec![
version::cli(),
publish::cli(),
delete::cli(),
logs::cli(),
call::cli(),
describe::cli(),
identity::cli(),
energy::cli(),
sql::cli(),
dns::cli(),
generate::cli(),
list::cli(),
init::cli(),
build::cli(),
#[cfg(feature = "tracelogging")]
tracelog::cli(),
server::cli(),
repl::cli(),
]
}
pub async fn exec_subcommand(config: Config, cmd: &str, args: &ArgMatches) -> Result<(), anyhow::Error> {
match cmd {
"version" => version::exec(config, args).await,
"identity" => identity::exec(config, args).await,
"call" => call::exec(config, args).await,
"describe" => describe::exec(config, args).await,
"energy" => energy::exec(config, args).await,
"publish" => publish::exec(config, args).await,
"delete" => delete::exec(config, args).await,
"logs" => logs::exec(config, args).await,
"sql" => sql::exec(config, args).await,
"dns" => dns::exec(config, args).await,
"generate" => generate::exec(args),
"list" => list::exec(config, args).await,
"init" => init::exec(config, args).await,
"build" => build::exec(config, args).await,
"server" => server::exec(config, args).await,
"repl" => repl::exec(config, args).await,
#[cfg(feature = "tracelogging")]
"tracelog" => tracelog::exec(config, args).await,
unknown => Err(anyhow::anyhow!("Invalid subcommand: {}", unknown)),
}
}
+41
View File
@@ -0,0 +1,41 @@
use clap::Command;
use spacetimedb_cli::*;
#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
let config = Config::load();
// Save a default version to disk
config.save();
let (cmd, subcommand_args) = util::match_subcommand_or_exit(get_command());
exec_subcommand(config, &cmd, &subcommand_args).await?;
Ok(())
}
fn get_command() -> Command {
Command::new("spacetime")
.args_conflicts_with_subcommands(true)
.subcommand_required(true)
.subcommands(get_subcommands())
.help_expected(true)
.help_template(
"\
SpacetimeDB Command Line Tool
Easily interact with a SpacetimeDB cluster
Give us feedback in our Discord server:
https://discord.gg/w2DVqNZXdN │
Usage:
{usage}
Options:
{options}
Commands:
{subcommands}
",
)
}
+57
View File
@@ -0,0 +1,57 @@
use crate::Config;
use clap::ArgAction::SetTrue;
use clap::{Arg, ArgMatches};
use std::path::PathBuf;
pub fn cli() -> clap::Command {
clap::Command::new("build")
.about("Builds a spacetime module.")
.arg(
Arg::new("project-path")
.default_value(".")
.value_parser(clap::value_parser!(PathBuf))
.help("The path of the project that you would like to build."),
)
.arg(
Arg::new("skip_clippy")
.long("skip_clippy")
.short('s')
.action(SetTrue)
.env("SPACETIME_SKIP_CLIPPY")
.value_parser(clap::builder::FalseyValueParser::new())
.help("Skips running clippy on the module before building (intended to speed up local iteration, not recommended for CI)"),
)
.arg(
Arg::new("debug")
.long("debug")
.short('d')
.action(SetTrue)
.help("Builds the module using debug instead of release (intended to speed up local iteration, not recommended for CI)"),
)
}
pub async fn exec(_: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let project_path = args.get_one::<PathBuf>("project-path").unwrap();
let skip_clippy = args.get_flag("skip_clippy");
let build_debug = args.get_flag("debug");
// Create the project path, or make sure the target project path is empty.
if project_path.exists() {
if !project_path.is_dir() {
return Err(anyhow::anyhow!(
"Fatal Error: path {} exists but is not a directory.",
project_path.display()
));
}
} else {
return Err(anyhow::anyhow!(
"Fatal Error: path {} does not exist.",
project_path.display()
));
}
crate::tasks::build(project_path, skip_clippy, build_debug)?;
println!("Build finished successfully.");
Ok(())
}
+81
View File
@@ -0,0 +1,81 @@
use crate::config::Config;
use crate::util::get_auth_header;
use crate::util::spacetime_dns;
use anyhow::Error;
use clap::Arg;
use clap::ArgAction;
use clap::ArgMatches;
use spacetimedb_lib::name::{is_address, DnsLookupResponse};
pub fn cli() -> clap::Command {
clap::Command::new("call")
.about("Invokes a reducer function in a database")
.arg(
Arg::new("database")
.required(true)
.help("The database domain or address to use to invoke the call"),
)
.arg(
Arg::new("reducer_name")
.required(true)
.help("The name of the reducer to call"),
)
.arg(
Arg::new("arguments")
.help("arguments as a JSON array")
.default_value("[]"),
)
.arg(
Arg::new("as_identity")
.long("as-identity")
.short('i')
.conflicts_with("anon_identity")
.help("The identity to use for the call"),
)
.arg(
Arg::new("anon_identity")
.long("anon-identity")
.short('a')
.conflicts_with("as_identity")
.action(ArgAction::SetTrue)
.help("If this flag is present, the call will be executed with no identity provided"),
)
.after_help("Run `spacetime help call` for more detailed information.\n")
}
pub async fn exec(mut config: Config, args: &ArgMatches) -> Result<(), Error> {
let database = args.get_one::<String>("database").unwrap();
let reducer_name = args.get_one::<String>("reducer_name").unwrap();
let arg_json = args.get_one::<String>("arguments").unwrap();
let as_identity = args.get_one::<String>("as_identity");
let anon_identity = args.get_flag("anon_identity");
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
let client = reqwest::Client::new();
let mut builder = client.post(format!(
"{}/database/call/{}/{}",
config.get_host_url(),
address,
reducer_name
));
if let Some((auth_header, _)) = get_auth_header(&mut config, anon_identity, as_identity.map(|x| x.as_str())).await {
builder = builder.header("Authorization", auth_header);
}
let res = builder.body(arg_json.to_owned()).send().await?;
res.error_for_status()?;
Ok(())
}
+56
View File
@@ -0,0 +1,56 @@
use crate::config::Config;
use crate::util::get_auth_header;
use crate::util::spacetime_dns;
use clap::Arg;
use clap::ArgMatches;
use spacetimedb_lib::name::{is_address, DnsLookupResponse};
pub fn cli() -> clap::Command {
clap::Command::new("delete")
.about("Deletes a SpacetimeDB database")
.arg(
Arg::new("database")
.required(true)
.help("The domain or address of the database to delete"),
)
.arg(
Arg::new("identity")
.long("identity")
.short('i')
.help("The identity to use for deleting this database")
.long_help("The identity to use for deleting this database. If no identity is provided, the default one will be used."),
)
.after_help("Run `spacetime help delete` for more detailed information.\n")
}
pub async fn exec(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let database = args.get_one::<String>("database").unwrap();
let identity = args.get_one::<String>("identity");
let auth_header = get_auth_header(&mut config, false, identity.map(|x| x.as_str()))
.await
.map(|x| x.0);
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
let client = reqwest::Client::new();
let mut builder = client.post(format!("{}/database/delete/{}", config.get_host_url(), address));
if let Some(auth_header) = auth_header {
builder = builder.header("Authorization", auth_header);
}
let res = builder.send().await?;
res.error_for_status()?;
Ok(())
}
+103
View File
@@ -0,0 +1,103 @@
use crate::config::Config;
use crate::util::{get_auth_header, spacetime_dns};
use clap::Arg;
use clap::ArgAction::SetTrue;
use clap::ArgMatches;
use spacetimedb_lib::name::{is_address, DnsLookupResponse};
pub fn cli() -> clap::Command {
clap::Command::new("describe")
.about("Describe the structure of a database or entities within it")
.arg(
Arg::new("database")
.required(true)
.help("The domain or address of the database to describe"),
)
.arg(
Arg::new("entity_type")
.value_parser(["reducer", "table"])
.help("Whether to describe a reducer or table"),
)
.arg(
Arg::new("entity_name")
.requires("entity_type")
.help("The name of the entity to describe"),
)
.arg(Arg::new("brief").long("brief").short('b').action(SetTrue)
.help("If this flag is present, a brief description shall be returned"))
.arg(
Arg::new("as_identity")
.long("as-identity")
.short('i')
.conflicts_with("anon_identity")
.help("The identity to use to describe the entity")
.long_help("The identity to use to describe the entity. If no identity is provided, the default one will be used."),
)
.arg(
Arg::new("anon_identity")
.long("anon-identity")
.short('a')
.conflicts_with("as_identity")
.action(SetTrue)
.help("If this flag is present, no identity will be provided when describing the database"),
)
.after_help("Run `spacetime help describe` for more detailed information.\n")
}
pub async fn exec(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let database = args.get_one::<String>("database").unwrap();
let expand = !args.get_flag("brief");
let entity_name = args.get_one::<String>("entity_name");
let entity_type = args.get_one::<String>("entity_type");
let as_identity = args.get_one::<String>("as_identity");
let anon_identity = args.get_flag("anon_identity");
let auth_header = get_auth_header(&mut config, anon_identity, as_identity.map(|x| x.as_str()))
.await
.map(|x| x.0);
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
let res = match entity_name {
None => {
let client = reqwest::Client::new();
let mut builder = client.get(format!("{}/database/schema/{}", config.get_host_url(), address));
if let Some(auth_header) = auth_header {
builder = builder.header("Authorization", auth_header);
}
builder.query(&[("expand", expand)]).send().await?
}
Some(entity_name) => {
let entity_type = format!("{}s", entity_type.unwrap());
let client = reqwest::Client::new();
let mut builder = client.get(format!(
"{}/database/schema/{}/{}/{}",
config.get_host_url(),
address,
entity_type,
entity_name
));
if let Some(auth_header) = auth_header {
builder = builder.header("Authorization", auth_header);
}
builder.query(&[("expand", expand)]).send().await?
}
};
let res = res.error_for_status()?;
let body = res.bytes().await?;
let str = String::from_utf8(body.to_vec())?;
println!("{}", str);
Ok(())
}
+190
View File
@@ -0,0 +1,190 @@
use crate::config::Config;
use crate::util::{get_auth_header, spacetime_dns, spacetime_register_tld, spacetime_reverse_dns};
use clap::ArgMatches;
use clap::{Arg, Command};
use reqwest::Url;
use spacetimedb_lib::name::{DnsLookupResponse, InsertDomainResult, RegisterTldResult};
pub fn cli() -> Command {
Command::new("dns")
.args_conflicts_with_subcommands(true)
.subcommand_required(true)
.subcommands(get_subcommands())
.about("Create, manage and query domains")
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let (cmd, subcommand_args) = args.subcommand().expect("Subcommand required");
exec_subcommand(config, cmd, subcommand_args).await
}
fn get_subcommands() -> Vec<Command> {
vec![
Command::new("register-tld")
.about("Registers a new top level domain")
.arg(
Arg::new("tld")
.required(true)
.help("The top level domain that you would like to register"),
)
.arg(Arg::new("identity").long("identity").short('i').help(
"The identity that should own this tld. If no identity is specified, then the default identity is used",
))
.after_help("Run `spacetime dns register-tld --help` for more detailed information.\n"),
Command::new("lookup")
.about("Resolves a domain to a database address")
.arg(Arg::new("domain").required(true).help("The name of the domain to lookup"))
.after_help("Run `spacetime dns lookup --help` for more detailed information"),
Command::new("reverse-lookup")
.about("Returns the domains for the provided database address")
.arg(Arg::new("address").required(true).help("The address you would like to find all of the known domains for"))
.after_help("Run `spacetime dns reverse-lookup --help` for more detailed information.\n"),
Command::new("set-name")
.about("Sets the domain of the database")
.arg(Arg::new("domain").required(true).help("The domain you would like to assign or create"))
.arg(Arg::new("address").required(true).help("The database address to assign to the domain"))
.arg(Arg::new("identity").long("identity").short('i').long_help(
"The identity that owns the tld for this domain. If no identity is specified, the default identity is used.",
).help("The identity that owns the tld for this domain"))
.after_help("Run `spacetime dns set-name --help` for more detailed information.\n"),
]
}
async fn exec_subcommand(config: Config, cmd: &str, args: &ArgMatches) -> Result<(), anyhow::Error> {
match cmd {
"register-tld" => exec_register_tld(config, args).await,
"lookup" => exec_dns_lookup(config, args).await,
"reverse-lookup" => exec_reverse_dns(config, args).await,
"set-name" => exec_set_name(config, args).await,
unknown => Err(anyhow::anyhow!("Invalid subcommand: {}", unknown)),
}
}
async fn exec_register_tld(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let tld = args.get_one::<String>("tld").unwrap().clone();
let identity = args.get_one::<String>("identity");
match spacetime_register_tld(&mut config, &tld, identity).await? {
RegisterTldResult::Success { domain } => {
println!("Registered domain: {}", domain);
}
RegisterTldResult::Unauthorized { domain } => {
return Err(anyhow::anyhow!("Domain is already registered by another: {}", domain));
}
RegisterTldResult::AlreadyRegistered { domain } => {
println!("Domain is already registered by the identity you provided: {}", domain);
}
}
config.save();
Ok(())
}
pub async fn exec_dns_lookup(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let domain = args.get_one::<String>("domain").unwrap();
let response = spacetime_dns(&config, domain).await?;
match response {
DnsLookupResponse::Success { domain: _, address } => {
println!("{}", address);
}
DnsLookupResponse::Failure { domain } => {
println!("No such database: {}", domain);
}
}
Ok(())
}
pub async fn exec_reverse_dns(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let addr = args.get_one::<String>("address").unwrap();
let response = spacetime_reverse_dns(&config, addr).await?;
if response.names.is_empty() {
Err(anyhow::anyhow!("Could not find a name for the address: {}", addr))
} else {
for name in response.names {
println!("{}", name);
}
Ok(())
}
}
pub async fn exec_set_name(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let domain = args.get_one::<String>("domain").unwrap();
let address = args.get_one::<String>("address").unwrap();
let identity = args.get_one::<String>("identity");
let (auth_header, _) = get_auth_header(&mut config, false, identity.map(|x| x.as_str()))
.await
.unwrap();
let query_params = vec![
("domain", domain.clone()),
("address", address.clone()),
("register_tld", "true".to_string()),
];
let builder = reqwest::Client::new()
.get(Url::parse_with_params(
format!("{}/database/set_name", config.get_host_url()).as_str(),
query_params,
)?)
.header("Authorization", auth_header);
let res = builder.send().await?.error_for_status()?;
let bytes = res.bytes().await.unwrap();
let result: InsertDomainResult = serde_json::from_slice(&bytes[..]).unwrap();
match result {
InsertDomainResult::Success { domain, address } => {
println!("Domain set to {} for address {}.", domain, address);
}
InsertDomainResult::TldNotRegistered { domain } => {
return Err(anyhow::anyhow!(
"The top level domain that you provided is not registered.\n\
This tld is not yet registered to any identity. You can register this domain with the following command:\n\
\n\
\tspacetime dns register-tld {}\n",
domain.tld()
));
}
InsertDomainResult::PermissionDenied { domain } => {
return match identity {
Some(identity) => {
//TODO(jdetter): Have a nice name generator here, instead of using some abstract characters
// we should perhaps generate fun names like 'green-fire-dragon' instead
let suggested_tld: String = identity.chars().take(12).collect();
if let Some(sub_domain) = domain.sub_domain() {
Err(anyhow::anyhow!(
"The top level domain {} is not registered to the identity you provided.\n\
We suggest you register a new tld:\n\
\tspacetime dns register-tld {}\n\
\n\
And then push to the domain that uses that tld:\n\
\tspacetime publish {}/{}\n",
domain.tld(),
suggested_tld,
suggested_tld,
sub_domain
))
} else {
Err(anyhow::anyhow!(
"The top level domain {} is not registered to the identity you provided.\n\
We suggest you register a new tld:\n\
\tspacetime dns register-tld {}\n\
\n\
And then push to the domain that uses that tld:\n\
\tspacetime publish {}\n",
domain.tld(),
suggested_tld,
suggested_tld
))
}
}
None => Err(anyhow::anyhow!(
"The domain {} is not registered to the identity you provided.",
domain
)),
};
}
}
Ok(())
}
+124
View File
@@ -0,0 +1,124 @@
// use clap::Arg;
use clap::{value_parser, Arg, ArgMatches};
use crate::config::Config;
pub fn cli() -> clap::Command {
clap::Command::new("energy")
.about("Invokes commands related to database budgets")
.args_conflicts_with_subcommands(true)
.subcommand_required(true)
.subcommands(get_energy_subcommands())
}
fn get_energy_subcommands() -> Vec<clap::Command> {
vec![
clap::Command::new("status")
.about("Show current energy balance for an identity")
.arg(
Arg::new("identity")
.help("The identity to check the balance for")
.long_help(
"The identity to check the balance for. If no identity is provided, the default one will be used.",
),
),
clap::Command::new("set-balance")
.about("Update the current budget balance for a database")
.arg(
Arg::new("balance")
.required(true)
.value_parser(value_parser!(u64))
.help("The balance value to set"),
)
.arg(
Arg::new("identity")
.help("The identity to set a balance for")
.long_help(
"The identity to set a balance for. If no identity is provided, the default one will be used.",
),
)
.arg(
Arg::new("quiet")
.long("quiet")
.short('q')
.action(clap::ArgAction::SetTrue)
.help("Runs command in silent mode"),
),
]
}
async fn exec_subcommand(config: Config, cmd: &str, args: &ArgMatches) -> Result<(), anyhow::Error> {
match cmd {
"status" => exec_status(config, args).await,
"set-balance" => exec_update_balance(config, args).await,
unknown => Err(anyhow::anyhow!("Invalid subcommand: {}", unknown)),
}
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let (cmd, subcommand_args) = args.subcommand().expect("Subcommand required");
exec_subcommand(config, cmd, subcommand_args).await
}
async fn exec_update_balance(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
// let project_name = args.value_of("project name").unwrap();
let hex_identity = args.get_one::<String>("identity");
let balance = *args.get_one::<u64>("balance").unwrap();
let quiet = args.get_flag("quiet");
let hex_identity = if let Some(hex_identity) = hex_identity {
hex_identity
} else {
config.get_default_identity_config().unwrap().identity.as_str()
};
let client = reqwest::Client::new();
let res = set_balance(&client, &config, hex_identity, balance).await?;
if !quiet {
let body = res.bytes().await?;
let str = String::from_utf8(body.to_vec())?;
println!("{}", str);
}
Ok(())
}
async fn exec_status(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
// let project_name = args.value_of("project name").unwrap();
let hex_identity = args.get_one::<String>("identity");
let hex_identity = if let Some(hex_identity) = hex_identity {
hex_identity
} else {
config.get_default_identity_config().unwrap().identity.as_str()
};
let client = reqwest::Client::new();
let res = client
.get(format!("{}/energy/{}", config.get_host_url(), hex_identity))
.send()
.await?;
let res = res.error_for_status()?;
let body = res.bytes().await?;
let str = String::from_utf8(body.to_vec())?;
println!("{}", str);
Ok(())
}
pub(super) async fn set_balance(
client: &reqwest::Client,
config: &Config,
hex_identity: &str,
balance: u64,
) -> anyhow::Result<reqwest::Response> {
// TODO: this really should be form data in POST body, not query string parameter, but gotham
// does not support that on the server side without an extension.
// see https://github.com/gotham-rs/gotham/issues/11
let url = format!("{}/energy/{}", config.get_host_url(), hex_identity);
let res = client.post(url).query(&[("balance", balance)]).send().await?;
let res = res.error_for_status()?;
Ok(res)
}
@@ -0,0 +1,100 @@
use std::fmt;
use std::ops::{Deref, DerefMut};
pub struct CodeIndenter<W: fmt::Write> {
writer: W,
level: u32,
needs_indenting: bool,
}
impl<W: fmt::Write> CodeIndenter<W> {
pub fn new(writer: W) -> Self {
CodeIndenter {
writer,
level: 0,
needs_indenting: true,
}
}
// pub fn get_ref(&self) -> &W {
// &self.writer
// }
// pub fn get_mut(&mut self) -> &mut W {
// &mut self.writer
// }
pub fn into_inner(self) -> W {
self.writer
}
pub fn indent(&mut self, n: u32) {
self.level = self.level.saturating_add(n);
}
pub fn dedent(&mut self, n: u32) {
self.level = self.level.saturating_sub(n);
}
pub fn indented(&mut self, n: u32) -> IndentScope<'_, W> {
self.indent(n);
IndentScope { fmt: self }
}
fn write_indent(&mut self) -> fmt::Result {
for _ in 0..self.level {
self.writer.write_str(super::INDENT)?;
}
Ok(())
}
/// Invoke `f` while indenting one level greater than `self` currently does.
pub fn with_indent<Res>(&mut self, f: impl FnOnce(&mut Self) -> Res) -> Res {
let mut indenter = self.indented(1);
f(&mut indenter)
}
pub fn newline(&mut self) {
self.writer.write_char('\n').unwrap();
}
/// Print an indented block delimited by `before` and `after`, with body written by `f`.
pub fn delimited_block<Res>(&mut self, before: &str, f: impl FnOnce(&mut Self) -> Res, after: &str) -> Res {
self.writer.write_str(before).unwrap();
let res = self.with_indent(|out| {
out.newline();
// Need an explicit `write_indent` call here because calling `out.newline`
// will not cause the subsequent line to be indented, as `write_str` thinks
// it's an empty line.
out.write_indent().unwrap();
f(out)
});
self.writer.write_str(after).unwrap();
res
}
}
impl<W: fmt::Write> fmt::Write for CodeIndenter<W> {
fn write_str(&mut self, s: &str) -> fmt::Result {
for (i, line) in s.split_inclusive('\n').enumerate() {
let write_indent = i != 0 || std::mem::take(&mut self.needs_indenting);
// skip the indent if it's an empty line
if write_indent && line != "\n" {
self.write_indent()?;
}
self.writer.write_str(line)?;
}
self.needs_indenting = s.ends_with('\n');
Ok(())
}
}
pub struct IndentScope<'a, W: fmt::Write> {
fmt: &'a mut CodeIndenter<W>,
}
impl<W: fmt::Write> Drop for IndentScope<'_, W> {
fn drop(&mut self) {
self.fmt.dedent(1);
}
}
impl<T: fmt::Write> Deref for IndentScope<'_, T> {
type Target = CodeIndenter<T>;
fn deref(&self) -> &Self::Target {
self.fmt
}
}
impl<T: fmt::Write> DerefMut for IndentScope<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.fmt
}
}
File diff suppressed because it is too large Load Diff
+424
View File
@@ -0,0 +1,424 @@
use std::fs;
use std::path::{Path, PathBuf};
use clap::Arg;
use clap::ArgAction::SetTrue;
use convert_case::{Case, Casing};
use spacetimedb_lib::sats::{AlgebraicType, Typespace};
use spacetimedb_lib::{bsatn, MiscModuleExport, ModuleDef, ReducerDef, TableDef, TypeAlias};
use wasmtime::{AsContext, Caller, ExternType};
mod code_indenter;
pub mod csharp;
pub mod python;
pub mod rust;
pub mod typescript;
const INDENT: &str = "\t";
pub fn cli() -> clap::Command {
clap::Command::new("generate")
.about("Generate client files for a spacetime module.")
.arg(
Arg::new("wasm_file")
.value_parser(clap::value_parser!(PathBuf))
.long("wasm-file")
.short('w')
.conflicts_with("project_path")
.help("The system path (absolute or relative) to the wasm file we should inspect"),
)
.arg(
Arg::new("project_path")
.value_parser(clap::value_parser!(PathBuf))
.long("project-path")
.short('p')
.default_value(".")
.conflicts_with("wasm_file")
.help("The path to the wasm project"),
)
.arg(
Arg::new("out_dir")
.value_parser(clap::value_parser!(PathBuf))
.required(true)
.long("out-dir")
.short('o')
.help("The system path (absolute or relative) to the generate output directory"),
)
.arg(
Arg::new("namespace")
.default_value("SpacetimeDB")
.long("namespace")
.short('n')
.help("The namespace that should be used (default is 'SpacetimeDB')"),
)
.arg(
Arg::new("lang")
.required(true)
.long("lang")
.short('l')
.value_parser(clap::value_parser!(Language))
.help("The language to generate"),
)
.arg(
Arg::new("skip_clippy")
.long("skip_clippy")
.short('s')
.action(SetTrue)
.env("SPACETIME_SKIP_CLIPPY")
.value_parser(clap::builder::FalseyValueParser::new())
.help("Skips running clippy on the module before generating (intended to speed up local iteration, not recommended for CI)"),
)
.arg(
Arg::new("debug")
.long("debug")
.short('d')
.action(SetTrue)
.help("Builds the module using debug instead of release (intended to speed up local iteration, not recommended for CI)"),
)
.after_help("Run `spacetime help publish` for more detailed information.")
}
pub fn exec(args: &clap::ArgMatches) -> anyhow::Result<()> {
let project_path = args.get_one::<PathBuf>("project_path").unwrap();
let wasm_file = args.get_one::<PathBuf>("wasm_file").cloned();
let out_dir = args.get_one::<PathBuf>("out_dir").unwrap();
let lang = *args.get_one::<Language>("lang").unwrap();
let namespace = args.get_one::<String>("namespace").unwrap();
let skip_clippy = args.get_flag("skip_clippy");
let build_debug = args.get_flag("debug");
let wasm_file = match wasm_file {
Some(x) => x,
None => crate::tasks::build(project_path, skip_clippy, build_debug)?,
};
if !out_dir.exists() {
return Err(anyhow::anyhow!(
"Output directory '{}' does not exist. Please create the directory and rerun this command.",
out_dir.to_str().unwrap()
));
}
for (fname, code) in generate(&wasm_file, lang, namespace.as_str())?.into_iter() {
fs::write(out_dir.join(fname), code)?;
}
println!("Generate finished successfully.");
Ok(())
}
#[derive(Clone, Copy, PartialEq)]
pub enum Language {
Csharp,
TypeScript,
Python,
Rust,
}
impl clap::ValueEnum for Language {
fn value_variants<'a>() -> &'a [Self] {
&[Self::Csharp, Self::TypeScript, Self::Python, Self::Rust]
}
fn to_possible_value(&self) -> Option<clap::builder::PossibleValue> {
match self {
Self::Csharp => Some(clap::builder::PossibleValue::new("csharp").aliases(["c#", "cs"])),
Self::TypeScript => Some(clap::builder::PossibleValue::new("typescript").aliases(["ts", "TS"])),
Self::Python => Some(clap::builder::PossibleValue::new("python").aliases(["py", "PY"])),
Self::Rust => Some(clap::builder::PossibleValue::new("rust").aliases(["rs", "RS"])),
}
}
}
pub struct GenCtx {
typespace: Typespace,
names: Vec<Option<String>>,
}
pub fn generate<'a>(wasm_file: &'a Path, lang: Language, namespace: &'a str) -> anyhow::Result<Vec<(String, String)>> {
let module = extract_descriptions(wasm_file)?;
let (ctx, items) = extract_from_moduledef(module);
let items: Vec<GenItem> = items.collect();
let mut files: Vec<(String, String)> = items
.iter()
.filter_map(|item| item.generate(&ctx, lang, namespace))
.collect();
for global in generate_globals(&ctx, lang, namespace, &items) {
files.push(global);
}
Ok(files)
}
fn generate_globals(ctx: &GenCtx, lang: Language, namespace: &str, items: &Vec<GenItem>) -> Vec<(String, String)> {
match lang {
Language::Csharp => csharp::autogen_csharp_globals(items, namespace),
Language::TypeScript => typescript::autogen_typescript_globals(ctx, items),
Language::Python => python::autogen_python_globals(ctx, items),
Language::Rust => rust::autogen_rust_globals(ctx, items),
}
}
pub fn extract_from_moduledef(module: ModuleDef) -> (GenCtx, impl Iterator<Item = GenItem>) {
let ModuleDef {
typespace,
tables,
reducers,
misc_exports,
} = module;
let mut names = vec![None; typespace.types.len()];
let name_info = itertools::chain!(
tables.iter().map(|t| (t.data, &t.name)),
misc_exports
.iter()
.map(|MiscModuleExport::TypeAlias(a)| (a.ty, &a.name)),
);
for (typeref, name) in name_info {
names[typeref.idx()] = Some(name.clone())
}
let ctx = GenCtx { typespace, names };
let iter = itertools::chain!(
misc_exports.into_iter().map(GenItem::from_misc_export),
tables.into_iter().map(GenItem::Table),
reducers.into_iter().map(GenItem::Reducer),
);
(ctx, iter)
}
pub enum GenItem {
Table(TableDef),
TypeAlias(TypeAlias),
Reducer(ReducerDef),
}
impl GenItem {
fn from_misc_export(exp: MiscModuleExport) -> Self {
match exp {
MiscModuleExport::TypeAlias(a) => Self::TypeAlias(a),
}
}
fn generate(&self, ctx: &GenCtx, lang: Language, namespace: &str) -> Option<(String, String)> {
match lang {
Language::Csharp => self.generate_csharp(ctx, namespace),
Language::TypeScript => self.generate_typescript(ctx),
Language::Python => self.generate_python(ctx),
Language::Rust => self.generate_rust(ctx),
}
}
fn generate_rust(&self, ctx: &GenCtx) -> Option<(String, String)> {
match self {
GenItem::Table(table) => {
let code = rust::autogen_rust_table(ctx, table);
let name = table.name.to_case(Case::Snake);
Some((name + ".rs", code))
}
GenItem::TypeAlias(TypeAlias { name, ty }) => {
let filename = name.replace('.', "").to_case(Case::Snake);
let filename = filename + ".rs";
let code = match &ctx.typespace[*ty] {
AlgebraicType::Sum(sum) => rust::autogen_rust_sum(ctx, name, sum),
AlgebraicType::Product(prod) => rust::autogen_rust_tuple(ctx, name, prod),
_ => todo!(),
};
Some((filename, code))
}
GenItem::Reducer(reducer) if reducer.name == "__init__" => None,
GenItem::Reducer(reducer) => {
let code = rust::autogen_rust_reducer(ctx, reducer);
let name = reducer.name.to_case(Case::Snake);
Some((name + "_reducer.rs", code))
}
}
}
fn generate_python(&self, ctx: &GenCtx) -> Option<(String, String)> {
match self {
GenItem::Table(table) => {
let code = python::autogen_python_table(ctx, table);
let name = table.name.to_case(Case::Snake);
Some((name + ".py", code))
}
GenItem::TypeAlias(TypeAlias { name, ty }) => match &ctx.typespace[*ty] {
AlgebraicType::Sum(sum) => {
let filename = name.replace('.', "").to_case(Case::Snake);
let code = python::autogen_python_sum(ctx, name, sum);
Some((filename + ".py", code))
}
AlgebraicType::Product(prod) => {
let code = python::autogen_python_tuple(ctx, name, prod);
let name = name.to_case(Case::Snake);
Some((name + ".py", code))
}
AlgebraicType::Builtin(_) => todo!(),
AlgebraicType::Ref(_) => todo!(),
},
// I'm not sure exactly how this should work; when does init_database get called with csharp?
GenItem::Reducer(reducer) if reducer.name == "__init__" => None,
GenItem::Reducer(reducer) => {
let code = python::autogen_python_reducer(ctx, reducer);
let name = reducer.name.to_case(Case::Snake);
Some((name + "_reducer.py", code))
}
}
}
fn generate_typescript(&self, ctx: &GenCtx) -> Option<(String, String)> {
match self {
GenItem::Table(table) => {
let code = typescript::autogen_typescript_table(ctx, table);
let name = table.name.to_case(Case::Snake);
Some((name + ".ts", code))
}
GenItem::TypeAlias(TypeAlias { name, ty }) => match &ctx.typespace[*ty] {
AlgebraicType::Sum(sum) => {
let filename = name.replace('.', "").to_case(Case::Snake);
let code = typescript::autogen_typescript_sum(ctx, name, sum);
Some((filename + ".ts", code))
}
AlgebraicType::Product(prod) => {
let code = typescript::autogen_typescript_tuple(ctx, name, prod);
let name = name.to_case(Case::Snake);
Some((name + ".ts", code))
}
AlgebraicType::Builtin(_) => todo!(),
AlgebraicType::Ref(_) => todo!(),
},
// I'm not sure exactly how this should work; when does init_database get called with csharp?
GenItem::Reducer(reducer) if reducer.name == "__init__" => None,
GenItem::Reducer(reducer) => {
let code = typescript::autogen_typescript_reducer(ctx, reducer);
let name = reducer.name.to_case(Case::Snake);
Some((name + "_reducer.ts", code))
}
}
}
fn generate_csharp(&self, ctx: &GenCtx, namespace: &str) -> Option<(String, String)> {
match self {
GenItem::Table(table) => {
let code = csharp::autogen_csharp_table(ctx, table, namespace);
Some((table.name.clone() + ".cs", code))
}
GenItem::TypeAlias(TypeAlias { name, ty }) => match &ctx.typespace[*ty] {
AlgebraicType::Sum(sum) => {
let filename = name.replace('.', "");
let code = csharp::autogen_csharp_sum(ctx, name, sum, namespace);
Some((filename + ".cs", code))
}
AlgebraicType::Product(prod) => {
let code = csharp::autogen_csharp_tuple(ctx, name, prod, namespace);
Some((name.clone() + ".cs", code))
}
AlgebraicType::Builtin(_) => todo!(),
AlgebraicType::Ref(_) => todo!(),
},
// I'm not sure exactly how this should work; when does init_database get called with csharp?
GenItem::Reducer(reducer) if reducer.name == "__init__" => None,
GenItem::Reducer(reducer) => {
let code = csharp::autogen_csharp_reducer(ctx, reducer, namespace);
let pascalcase = reducer.name.to_case(Case::Pascal);
Some((pascalcase + "Reducer.cs", code))
}
}
}
}
fn extract_descriptions(wasm_file: &Path) -> anyhow::Result<ModuleDef> {
let engine = wasmtime::Engine::default();
let t = std::time::Instant::now();
let module = wasmtime::Module::from_file(&engine, wasm_file)?;
println!("compilation took {:?}", t.elapsed());
let ctx = WasmCtx {
mem: None,
buffers: slab::Slab::new(),
};
let mut store = wasmtime::Store::new(&engine, ctx);
let mut linker = wasmtime::Linker::new(&engine);
linker.allow_shadowing(true);
for imp in module.imports() {
if let ExternType::Func(func_type) = imp.ty() {
linker
.func_new(imp.module(), imp.name(), func_type, |_, _, _| {
anyhow::bail!("don't call me!!")
})
.unwrap();
}
}
linker.func_wrap(
"spacetime",
"_console_log",
|caller: Caller<'_, WasmCtx>,
_level: u32,
_target: u32,
_target_len: u32,
_filename: u32,
_filename_len: u32,
_line_number: u32,
message: u32,
message_len: u32| {
let mem = caller.data().mem.unwrap();
let slice = mem.deref_slice(&caller, message, message_len);
if let Some(slice) = slice {
println!("from wasm: {}", String::from_utf8_lossy(slice));
} else {
println!("tried to print from wasm but out of bounds")
}
},
)?;
linker.func_wrap("spacetime", "_buffer_alloc", WasmCtx::buffer_alloc)?;
let instance = linker.instantiate(&mut store, &module)?;
let memory = Memory {
mem: instance.get_memory(&mut store, "memory").unwrap(),
};
store.data_mut().mem = Some(memory);
let mut preinits = instance
.exports(&mut store)
.filter_map(|exp| Some((exp.name().strip_prefix("__preinit__")?.to_owned(), exp.into_func()?)))
.collect::<Vec<_>>();
preinits.sort_by(|(a, _), (b, _)| a.cmp(b));
for (_, func) in preinits {
func.typed(&store)?.call(&mut store, ())?
}
let module = match instance.get_func(&mut store, "__describe_module__") {
Some(f) => {
let buf: u32 = f.typed(&store)?.call(&mut store, ()).unwrap();
let slice = store.data_mut().buffers.remove(buf as usize);
bsatn::from_slice(&slice)?
}
None => ModuleDef::default(),
};
Ok(module)
}
struct WasmCtx {
mem: Option<Memory>,
buffers: slab::Slab<Vec<u8>>,
}
impl WasmCtx {
fn mem(&self) -> Memory {
self.mem.unwrap()
}
fn buffer_alloc(mut caller: Caller<'_, Self>, data: u32, data_len: u32) -> u32 {
let buf = caller
.data()
.mem()
.deref_slice(&caller, data, data_len)
.unwrap()
.to_vec();
caller.data_mut().buffers.insert(buf) as u32
}
}
#[derive(Copy, Clone)]
struct Memory {
mem: wasmtime::Memory,
}
impl Memory {
fn deref_slice<'a>(&self, store: &'a impl AsContext, offset: u32, len: u32) -> Option<&'a [u8]> {
self.mem
.data(store.as_context())
.get(offset as usize..)?
.get(..len as usize)
}
}
@@ -0,0 +1,663 @@
use convert_case::{Case, Casing};
use spacetimedb_lib::{
sats::{AlgebraicType::Builtin, AlgebraicTypeRef, ArrayType, BuiltinType, MapType},
AlgebraicType, ColumnIndexAttribute, ProductType, ProductTypeElement, ReducerDef, SumType, TableDef,
};
use std::fmt::{self, Write};
use super::{code_indenter::CodeIndenter, csharp::is_enum, GenCtx, GenItem};
enum MaybePrimitive<'a> {
Primitive(&'static str),
Array(&'a ArrayType),
Map(&'a MapType),
}
fn maybe_primitive(b: &BuiltinType) -> MaybePrimitive {
MaybePrimitive::Primitive(match b {
BuiltinType::Bool => "bool",
BuiltinType::I8 => "int",
BuiltinType::U8 => "int",
BuiltinType::I16 => "int",
BuiltinType::U16 => "int",
BuiltinType::I32 => "int",
BuiltinType::U32 => "int",
BuiltinType::I64 => "int",
BuiltinType::U64 => "int",
BuiltinType::I128 => "int",
BuiltinType::U128 => "int",
BuiltinType::String => "str",
BuiltinType::F32 => "float",
BuiltinType::F64 => "float",
BuiltinType::Array(ty) => return MaybePrimitive::Array(ty),
BuiltinType::Map(m) => return MaybePrimitive::Map(m),
})
}
fn convert_builtintype<'a>(
ctx: &'a GenCtx,
vecnest: usize,
b: &'a BuiltinType,
value: impl fmt::Display + 'a,
ref_prefix: &'a str,
) -> impl fmt::Display + 'a {
fmt_fn(move |f| match maybe_primitive(b) {
MaybePrimitive::Primitive(p) => {
write!(f, "{p}({value})")
}
MaybePrimitive::Array(ArrayType { elem_ty }) if **elem_ty == AlgebraicType::U8 => {
write!(f, "bytes.fromhex({value})")
}
MaybePrimitive::Array(ArrayType { elem_ty }) => {
let convert_type = convert_type(ctx, vecnest + 1, elem_ty, "item", ref_prefix);
write!(f, "[{convert_type} for item in {value}]")
}
MaybePrimitive::Map(_) => unimplemented!(),
})
}
fn convert_type<'a>(
ctx: &'a GenCtx,
vecnest: usize,
ty: &'a AlgebraicType,
value: impl fmt::Display + 'a,
ref_prefix: &'a str,
) -> impl fmt::Display + 'a {
fmt_fn(move |f| match ty {
AlgebraicType::Product(_) => unreachable!(),
AlgebraicType::Sum(sum_type) if is_option_type(sum_type) => {
write!(
f,
"{} if '0' in {value} else None",
convert_type(
ctx,
vecnest,
&sum_type.variants[0].algebraic_type,
format!("{value}['0']"),
ref_prefix
)
)
}
AlgebraicType::Sum(_sum_type) => unimplemented!(),
AlgebraicType::Builtin(b) => fmt::Display::fmt(&convert_builtintype(ctx, vecnest, b, &value, ref_prefix), f),
AlgebraicType::Ref(r) => {
let name = python_typename(ctx, *r);
let algebraic_type = &ctx.typespace.types[r.idx()];
match algebraic_type {
// for enums in json this comes over as a dictionary where the key is actually the enum index
AlgebraicType::Sum(sum_type) if is_enum(sum_type) => write!(f, "{name}(int(next(iter({value})))+1)"),
_ => {
write!(f, "{name}({value})")
}
}
}
})
}
// can maybe do something fancy with this in the future
fn python_typename(ctx: &GenCtx, typeref: AlgebraicTypeRef) -> &str {
ctx.names[typeref.idx()].as_deref().expect("tuples should have names")
}
fn ty_fmt<'a>(ctx: &'a GenCtx, ty: &'a AlgebraicType, ref_prefix: &'a str) -> impl fmt::Display + 'a {
fmt_fn(move |f| match ty {
AlgebraicType::Sum(_sum_type) => {
unimplemented!()
}
AlgebraicType::Product(_) => unimplemented!(),
AlgebraicType::Builtin(b) => match maybe_primitive(b) {
MaybePrimitive::Primitive(p) => f.write_str(p),
MaybePrimitive::Array(ArrayType { elem_ty }) if **elem_ty == AlgebraicType::U8 => f.write_str("bytes"),
MaybePrimitive::Array(ArrayType { elem_ty }) => {
write!(f, "List[{}]", ty_fmt(ctx, elem_ty, ref_prefix))
}
MaybePrimitive::Map(ty) => {
write!(
f,
"Dict[{}, {}]",
ty_fmt(ctx, &ty.ty, ref_prefix),
ty_fmt(ctx, &ty.key_ty, ref_prefix)
)
}
},
AlgebraicType::Ref(r) => write!(f, "{}{}", ref_prefix, python_typename(ctx, *r)),
})
}
macro_rules! indent_scope {
($x:ident) => {
let mut $x = $x.indented(1);
};
}
fn python_filename(ctx: &GenCtx, typeref: AlgebraicTypeRef) -> String {
ctx.names[typeref.idx()]
.as_deref()
.expect("tuples should have names")
.to_case(Case::Snake)
}
fn fmt_fn(f: impl Fn(&mut fmt::Formatter) -> fmt::Result) -> impl fmt::Display {
struct FDisplay<F>(F);
impl<F: Fn(&mut fmt::Formatter) -> fmt::Result> fmt::Display for FDisplay<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(self.0)(f)
}
}
FDisplay(f)
}
fn is_option_type(ty: &SumType) -> bool {
if ty.variants.len() != 2 {
return false;
}
if ty.variants[0].name.clone().expect("Variants should have names!") != "some"
|| ty.variants[1].name.clone().expect("Variants should have names!") != "none"
{
return false;
}
if let AlgebraicType::Product(none_type) = &ty.variants[1].algebraic_type {
none_type.elements.is_empty()
} else {
false
}
}
pub fn autogen_python_table(ctx: &GenCtx, table: &TableDef) -> String {
let tuple = ctx.typespace[table.data].as_product().unwrap();
autogen_python_product_table_common(ctx, &table.name, tuple, Some(&table.column_attrs))
}
fn generate_imports(ctx: &GenCtx, elements: &Vec<ProductTypeElement>, imports: &mut Vec<String>) {
for field in elements {
_generate_imports(ctx, &field.algebraic_type, imports);
}
}
fn _generate_imports(ctx: &GenCtx, ty: &AlgebraicType, imports: &mut Vec<String>) {
match ty {
Builtin(b) => match b {
BuiltinType::Array(ArrayType { elem_ty }) => _generate_imports(ctx, elem_ty, imports),
BuiltinType::Map(map_type) => {
_generate_imports(ctx, &map_type.key_ty, imports);
_generate_imports(ctx, &map_type.ty, imports);
}
_ => (),
},
AlgebraicType::Sum(sum_type) if is_option_type(sum_type) => {
_generate_imports(ctx, &sum_type.variants[0].algebraic_type, imports);
}
AlgebraicType::Ref(r) => {
let class_name = python_typename(ctx, *r).to_string();
let filename = python_filename(ctx, *r);
let import = format!("from .{filename} import {class_name}");
imports.push(import);
}
_ => (),
}
}
fn autogen_python_product_table_common(
ctx: &GenCtx,
name: &str,
product_type: &ProductType,
column_attrs: Option<&[ColumnIndexAttribute]>,
) -> String {
let is_table = column_attrs.is_some();
let mut output = CodeIndenter::new(String::new());
writeln!(
output,
"# THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE",
)
.unwrap();
writeln!(output, "# WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.").unwrap();
writeln!(output).unwrap();
if is_table {
writeln!(output, "from __future__ import annotations").unwrap();
writeln!(output, "from typing import List, Iterator, Callable").unwrap();
writeln!(output).unwrap();
writeln!(
output,
"from spacetimedb_python_sdk.spacetimedb_client import SpacetimeDBClient"
)
.unwrap();
} else {
writeln!(output, "from typing import List").unwrap();
}
let mut imports = Vec::new();
generate_imports(ctx, &product_type.elements, &mut imports);
for import in imports {
writeln!(output, "{import}").unwrap();
}
writeln!(output).unwrap();
writeln!(output, "class {name}:").unwrap();
{
indent_scope!(output);
// if this is a table, mark it as such
let is_table_str = match column_attrs {
Some(_) => "True",
None => "False",
};
writeln!(output, "is_table_class = {is_table_str}").unwrap();
writeln!(output).unwrap();
if is_table {
writeln!(output, "@classmethod").unwrap();
writeln!(
output,
"def register_row_update(cls, callback: Callable[[str,{name},{name}], None]):"
)
.unwrap();
{
indent_scope!(output);
writeln!(
output,
"SpacetimeDBClient.instance._register_row_update(\"{name}\",callback)"
)
.unwrap()
}
writeln!(output).unwrap();
writeln!(output, "@classmethod").unwrap();
writeln!(output, "def iter(cls) -> Iterator[{name}]:").unwrap();
{
indent_scope!(output);
writeln!(
output,
"return SpacetimeDBClient.instance._get_table_cache(\"{name}\").values()"
)
.unwrap();
}
writeln!(output).unwrap();
for (idx, field) in product_type.elements.iter().enumerate() {
let attr = column_attrs.unwrap()[idx];
let field_type = &field.algebraic_type;
match field_type {
AlgebraicType::Product(_) | AlgebraicType::Ref(_) => {
// TODO: We don't allow filtering on tuples right now, its possible we may consider it for the future.
continue;
}
AlgebraicType::Sum(ty) => {
if !is_option_type(ty) {
// TODO: We don't allow filtering on enums right now, its possible we may consider it for the future.
continue;
}
}
AlgebraicType::Builtin(b) => match maybe_primitive(b) {
MaybePrimitive::Array(ArrayType { elem_ty }) if **elem_ty != AlgebraicType::U8 => {
// TODO: We don't allow filtering based on an array type, but we might want other functionality here in the future.
continue;
}
MaybePrimitive::Map(_) => {
// TODO: It would be nice to be able to say, give me all entries where this vec contains this value, which we can do.
continue;
}
_ => (),
},
};
let field_name = field
.name
.as_ref()
.expect("autogen'd tuples should have field names")
.replace("r#", "");
writeln!(output, "@classmethod").unwrap();
if attr.is_unique() {
writeln!(output, "def filter_by_{field_name}(cls, {field_name}) -> {name}:").unwrap();
} else {
writeln!(output, "def filter_by_{field_name}(cls, {field_name}) -> List[{name}]:").unwrap();
}
{
indent_scope!(output);
if attr.is_unique() {
writeln!(output, "return next(iter([column_value for column_value in SpacetimeDBClient.instance._get_table_cache(\"{name}\").values() if column_value.{field_name} == {field_name}]), None)").unwrap();
} else {
writeln!(output, "return [column_value for column_value in SpacetimeDBClient.instance._get_table_cache(\"{name}\").values() if column_value.{field_name} == {field_name}]").unwrap();
}
}
writeln!(output).unwrap();
}
}
writeln!(output, "def __init__(self, data: List[object]):").unwrap();
{
indent_scope!(output);
writeln!(output, "self.data = {{}}").unwrap();
for (idx, field) in product_type.elements.iter().enumerate() {
let field_name = field
.name
.as_ref()
.expect("autogen'd tuples should have field names")
.replace("r#", "");
let field_type = &field.algebraic_type;
let python_field_name = field_name.to_string().replace("r#", "");
writeln!(
output,
"self.data[\"{python_field_name}\"] = {}",
convert_type(ctx, 0, field_type, format_args!("data[{idx}]"), "")
)
.unwrap()
}
}
writeln!(output).unwrap();
writeln!(output, "def encode(self) -> List[object]:").unwrap();
{
indent_scope!(output);
let mut reducer_args = Vec::new();
for field in product_type.elements.iter() {
let field_name = field
.name
.as_deref()
.unwrap_or_else(|| panic!("autogen'd tuples should have field names"));
let python_field_name = field_name.to_string().replace("r#", "");
match &field.algebraic_type {
AlgebraicType::Sum(sum_type) if is_option_type(sum_type) => {
reducer_args.push(format!("{{'0': [self.{}]}}", python_field_name))
}
AlgebraicType::Sum(_) => unimplemented!(),
AlgebraicType::Product(_) => {
reducer_args.push(format!("self.{python_field_name}"));
}
Builtin(_) => {
reducer_args.push(format!("self.{python_field_name}"));
}
AlgebraicType::Ref(type_ref) => {
let ref_type = &ctx.typespace.types[type_ref.idx()];
if let AlgebraicType::Sum(sum_type) = ref_type {
if is_enum(sum_type) {
reducer_args.push(format!("{{str({}.value): []}}", python_field_name))
} else {
unimplemented!()
}
} else {
reducer_args.push(format!("self.{python_field_name}.encode()"));
}
}
}
}
let reducer_args_str = reducer_args.join(", ");
writeln!(output, "return [{}]", reducer_args_str).unwrap();
}
writeln!(output).unwrap();
writeln!(output, "def __getattr__(self, name: str):").unwrap();
{
indent_scope!(output);
writeln!(output, "return self.data.get(name)").unwrap();
}
}
output.into_inner()
}
pub fn autogen_python_sum(ctx: &GenCtx, name: &str, sum_type: &SumType) -> String {
if is_enum(sum_type) {
autogen_python_enum(ctx, name, sum_type)
} else {
unimplemented!()
}
}
pub fn autogen_python_enum(_ctx: &GenCtx, name: &str, sum_type: &SumType) -> String {
let mut output = CodeIndenter::new(String::new());
writeln!(
output,
"# THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE",
)
.unwrap();
writeln!(output, "# WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.").unwrap();
writeln!(output).unwrap();
writeln!(output, "from enum import Enum").unwrap();
writeln!(output).unwrap();
writeln!(output, "class {name}(Enum):").unwrap();
{
indent_scope!(output);
for (idx, variant) in sum_type.variants.iter().enumerate() {
let variant_name = variant
.name
.as_ref()
.expect("All sum variants should have names!")
.replace("r#", "");
let python_idx = idx + 1;
writeln!(output, "{variant_name} = {python_idx}").unwrap();
}
}
output.into_inner()
}
pub fn autogen_python_tuple(ctx: &GenCtx, name: &str, tuple: &ProductType) -> String {
autogen_python_product_table_common(ctx, name, tuple, None)
}
fn encode_builtintype<'a>(
ctx: &'a GenCtx,
vecnest: usize,
b: &'a BuiltinType,
value: impl fmt::Display + 'a,
ref_prefix: &'a str,
) -> impl fmt::Display + 'a {
fmt_fn(move |f| match maybe_primitive(b) {
MaybePrimitive::Primitive(_) => {
write!(f, "{value}")
}
MaybePrimitive::Array(ArrayType { elem_ty }) if **elem_ty == AlgebraicType::U8 => {
write!(f, "{value}.hex()")
}
MaybePrimitive::Array(ArrayType { elem_ty }) => {
let convert_type = encode_type(ctx, vecnest + 1, elem_ty, "item", ref_prefix);
write!(f, "[{convert_type} for item in {value}]")
}
MaybePrimitive::Map(_) => unimplemented!(),
})
}
pub fn encode_type<'a>(
ctx: &'a GenCtx,
vecnest: usize,
ty: &'a AlgebraicType,
value: impl fmt::Display + 'a,
ref_prefix: &'a str,
) -> impl fmt::Display + 'a {
fmt_fn(move |f| match ty {
AlgebraicType::Product(_) => unreachable!(),
AlgebraicType::Sum(sum_type) if is_option_type(sum_type) => {
write!(
f,
"{{'0': {}}} if value is not None else {{}}",
encode_type(
ctx,
vecnest,
&sum_type.variants[0].algebraic_type,
format!("{value}"),
ref_prefix
)
)
}
AlgebraicType::Sum(_sum_type) => unimplemented!(),
AlgebraicType::Builtin(b) => fmt::Display::fmt(&encode_builtintype(ctx, vecnest, b, &value, ref_prefix), f),
AlgebraicType::Ref(r) => {
let algebraic_type = &ctx.typespace.types[r.idx()];
match algebraic_type {
// for enums in json this comes over as a dictionary where the key is actually the enum index
AlgebraicType::Sum(sum_type) if is_enum(sum_type) => write!(f, "{{str({value}.value-1): []}}"),
_ => {
write!(f, "{value}.encode()")
}
}
}
})
}
pub fn autogen_python_reducer(ctx: &GenCtx, reducer: &ReducerDef) -> String {
let mut output = CodeIndenter::new(String::new());
writeln!(
output,
"# THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE",
)
.unwrap();
writeln!(output, "# WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.").unwrap();
writeln!(output).unwrap();
writeln!(output, "from typing import List, Callable").unwrap();
writeln!(output).unwrap();
writeln!(
output,
"from spacetimedb_python_sdk.spacetimedb_client import SpacetimeDBClient"
)
.unwrap();
writeln!(output).unwrap();
let mut imports = Vec::new();
generate_imports(
ctx,
&reducer.args.clone().into_iter().collect::<Vec<ProductTypeElement>>(),
&mut imports,
);
for import in imports {
writeln!(output, "{import}").unwrap();
}
writeln!(output).unwrap();
let mut func_call = Vec::new();
let mut func_arguments = Vec::new();
let mut func_types = Vec::new();
for arg in reducer.args.iter() {
let arg_name = arg
.name
.as_deref()
.unwrap_or_else(|| panic!("reducer args should have names: {}", reducer.name));
let arg_type = ty_fmt(ctx, &arg.algebraic_type, "");
func_call.push(arg_name);
func_arguments.push(format!("{arg_name}: {arg_type}"));
func_types.push(arg_type.to_string());
}
let func_arguments_str = func_arguments.join(", ");
let func_types_str = func_types.join(", ");
let mut func_call_str = func_call.join(", ");
if !func_call.is_empty() {
func_call_str = format!(", {func_call_str}");
}
let callback_sig_str = if !func_types.is_empty() {
format!(", {func_types_str}")
} else {
func_types_str
};
writeln!(output, "def {}({}):", reducer.name, func_arguments_str).unwrap();
{
indent_scope!(output);
for arg in reducer.args.iter() {
let field_name = arg
.name
.as_deref()
.unwrap_or_else(|| panic!("reducer args should have names: {}", reducer.name));
let field_type = &arg.algebraic_type;
let python_field_name = field_name.to_string().replace("r#", "");
writeln!(
output,
"{python_field_name} = {}",
encode_type(ctx, 0, field_type, format_args!("{python_field_name}"), "")
)
.unwrap();
}
writeln!(
output,
"SpacetimeDBClient.instance._reducer_call(\"{}\"{})",
reducer.name, func_call_str
)
.unwrap();
}
writeln!(output).unwrap();
writeln!(
output,
"def register_on_{}(callback: Callable[[bytes, str, str{}], None]):",
reducer.name, callback_sig_str
)
.unwrap();
{
indent_scope!(output);
writeln!(output, "if not _check_callback_signature(callback):").unwrap();
{
indent_scope!(output);
writeln!(
output,
"raise ValueError(\"Callback signature does not match expected arguments\")"
)
.unwrap();
}
writeln!(output).unwrap();
writeln!(
output,
"SpacetimeDBClient.instance._register_reducer(\"{}\", callback)",
reducer.name
)
.unwrap();
}
writeln!(output).unwrap();
writeln!(output, "def _decode_args(data):").unwrap();
{
indent_scope!(output);
let mut decode_strs = Vec::new();
for (idx, arg) in reducer.args.iter().enumerate() {
let field_type = &arg.algebraic_type;
decode_strs.push(format!(
"{}",
convert_type(ctx, 0, field_type, format_args!("data[{idx}]"), "")
));
}
writeln!(output, "return [{}]", decode_strs.join(", ")).unwrap();
}
writeln!(output).unwrap();
writeln!(output, "def _check_callback_signature(callback: Callable) -> bool:").unwrap();
{
indent_scope!(output);
writeln!(output, "expected_arguments = [bytes, str, str{}]", callback_sig_str).unwrap();
writeln!(output, "callback_arguments = callback.__annotations__.values()").unwrap();
writeln!(output).unwrap();
writeln!(output, "return list(callback_arguments) == expected_arguments").unwrap();
}
output.into_inner()
}
pub fn autogen_python_globals(_ctx: &GenCtx, _items: &[GenItem]) -> Vec<(String, String)> {
vec![] //TODO
}
+826
View File
@@ -0,0 +1,826 @@
use super::code_indenter::CodeIndenter;
use super::{GenCtx, GenItem};
use convert_case::{Case, Casing};
use spacetimedb_lib::sats::{
AlgebraicType, AlgebraicTypeRef, ArrayType, BuiltinType, MapType, ProductType, ProductTypeElement, SumType,
SumTypeVariant,
};
use spacetimedb_lib::{ColumnIndexAttribute, ReducerDef, TableDef};
use std::collections::HashSet;
use std::fmt::Write;
type Indenter = CodeIndenter<String>;
/// Pairs of (module_name, TypeName).
type Imports = HashSet<(String, String)>;
enum MaybePrimitive<'a> {
Primitive(&'a str),
Array(&'a ArrayType),
Map(&'a MapType),
}
fn maybe_primitive(b: &BuiltinType) -> MaybePrimitive {
MaybePrimitive::Primitive(match b {
BuiltinType::Bool => "bool",
BuiltinType::I8 => "i8",
BuiltinType::U8 => "u8",
BuiltinType::I16 => "i16",
BuiltinType::U16 => "u16",
BuiltinType::I32 => "i32",
BuiltinType::U32 => "u32",
BuiltinType::I64 => "i64",
BuiltinType::U64 => "u64",
BuiltinType::I128 => "i128",
BuiltinType::U128 => "u128",
BuiltinType::String => "String",
BuiltinType::F32 => "f32",
BuiltinType::F64 => "f64",
BuiltinType::Array(ty) => return MaybePrimitive::Array(ty),
BuiltinType::Map(m) => return MaybePrimitive::Map(m),
})
}
fn is_empty_product(ty: &AlgebraicType) -> bool {
if let AlgebraicType::Product(none_type) = ty {
none_type.elements.is_empty()
} else {
false
}
}
// This function is duplicated in [typescript.rs] and [csharp.rs], and should maybe be
// lifted into a module, or be a part of SATS itself.
fn is_option_type(ty: &SumType) -> bool {
let name_is = |variant: &SumTypeVariant, name| variant.name.as_ref().expect("Variants should have names!") == name;
matches!(
&ty.variants[..],
[a, b] if name_is(a, "some")
&& name_is(b, "none")
&& is_empty_product(&b.algebraic_type)
)
}
fn write_type(ctx: &GenCtx, out: &mut Indenter, ty: &AlgebraicType) {
match ty {
AlgebraicType::Sum(sum_type) => {
if is_option_type(sum_type) {
write!(out, "Option::<").unwrap();
write_type(ctx, out, &sum_type.variants[0].algebraic_type);
write!(out, ">").unwrap();
} else {
unimplemented!("No way to emit an anonymous sum type other than an Option to Rust currently")
}
}
AlgebraicType::Product(_) => unimplemented!("No way to emit an anonymous product type to Rust currently"),
AlgebraicType::Builtin(b) => match maybe_primitive(b) {
MaybePrimitive::Primitive(p) => write!(out, "{}", p).unwrap(),
MaybePrimitive::Array(ArrayType { elem_ty }) => {
write!(out, "Vec::<").unwrap();
write_type(ctx, out, elem_ty);
write!(out, ">").unwrap();
}
MaybePrimitive::Map(ty) => {
// TODO: Should `BuiltinType::Map` translate to `HashMap`? This requires
// that any map-key type implement `Hash`. We'll have to derive hash
// on generated types, and notably, `HashMap` is not itself `Hash`,
// so any type that holds a `Map` cannot derive `Hash` and cannot
// key a `Map`.
write!(out, "HashMap::<").unwrap();
write_type(ctx, out, &ty.key_ty);
write!(out, ", ").unwrap();
write_type(ctx, out, &ty.ty);
write!(out, ">").unwrap();
}
},
AlgebraicType::Ref(r) => {
let name = type_name(ctx, *r);
write!(out, "{}", name).unwrap();
}
}
}
// This is (effectively) duplicated in [typescript.rs] as `typescript_typename` and in
// [csharp.rs] as `csharp_typename`, and should probably be lifted to a shared utils
// module.
fn type_name(ctx: &GenCtx, typeref: AlgebraicTypeRef) -> String {
ctx.names[typeref.idx()]
.as_deref()
.expect("TypeRefs should have names")
.to_case(Case::Pascal)
}
fn print_lines(output: &mut Indenter, lines: &[&str]) {
for line in lines {
writeln!(output, "{}", line).unwrap();
}
}
// This is (effectively) duplicated in both [typescript.rs] and [csharp.rs], and should
// probably be lifted to a shared module.
const AUTO_GENERATED_FILE_COMMENT: &[&str] = &[
"// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE",
"// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.",
"",
];
fn print_auto_generated_file_comment(output: &mut Indenter) {
print_lines(output, AUTO_GENERATED_FILE_COMMENT);
}
const ALLOW_UNUSED: &str = "#[allow(unused)]";
const SPACETIMEDB_IMPORTS: &[&str] = &[
ALLOW_UNUSED,
"use spacetimedb_client_sdk::{",
"\tglobal_connection::with_connection,",
"\tsats::{ser::Serialize, de::Deserialize},",
"\ttable::{TableType, TableIter, TableWithPrimaryKey},",
"\treducer::{Reducer},",
// The `Serialize` and `Deserialize` macros depend on `spacetimedb_lib` existing in
// the root namespace.
"\tspacetimedb_lib,",
"\tanyhow::{Result, anyhow},",
"};",
];
fn print_spacetimedb_imports(output: &mut Indenter) {
print_lines(output, SPACETIMEDB_IMPORTS);
}
fn print_file_header(output: &mut Indenter) {
print_auto_generated_file_comment(output);
print_spacetimedb_imports(output);
}
// TODO: figure out if/when sum types should derive:
// - Clone
// - Debug
// - Copy
// - PartialEq, Eq
// - Hash
// - Complicated because `HashMap` is not `Hash`.
// - others?
const ENUM_DERIVES: &[&str] = &["#[derive(Serialize, Deserialize, Clone, PartialEq)]"];
fn print_enum_derives(output: &mut Indenter) {
print_lines(output, ENUM_DERIVES);
}
/// Generate a file which defines an `enum` corresponding to the `sum_type`.
pub fn autogen_rust_sum(ctx: &GenCtx, name: &str, sum_type: &SumType) -> String {
let mut output = CodeIndenter::new(String::new());
let out = &mut output;
let sum_type_name = name.replace("r#", "").to_case(Case::Pascal);
print_file_header(out);
// For some reason, deref coercion doesn't work on `&sum_type.variants` here - rustc
// wants to pass it as `&Vec<_>`, not `&[_]`. The slicing index `[..]` forces passing
// as a slice.
gen_and_print_imports(ctx, out, &sum_type.variants[..], generate_imports_variants);
out.newline();
print_enum_derives(out);
write!(out, "pub enum {} ", sum_type_name).unwrap();
out.delimited_block(
"{",
|out| {
for variant in &sum_type.variants {
write_enum_variant(ctx, out, variant);
out.newline();
}
},
"}\n",
);
output.into_inner()
}
fn write_enum_variant(ctx: &GenCtx, out: &mut Indenter, variant: &SumTypeVariant) {
let Some(name) = &variant.name else {
panic!("Sum type variant has no name: {:?}", variant);
};
let name = name.to_case(Case::Pascal);
write!(out, "{}", name).unwrap();
match &variant.algebraic_type {
AlgebraicType::Product(ProductType { elements }) if elements.is_empty() => {
// If the contained type is the unit type, i.e. this variant has no members,
// write it without parens or braces, like
// ```
// Foo,
// ```
writeln!(out, ",").unwrap();
}
AlgebraicType::Product(ProductType { elements }) => {
// If the contained type is a non-empty product, i.e. this variant is
// struct-like, write it with braces and named fields.
write_struct_type_fields_in_braces(
ctx, out, elements,
// Do not `pub`-qualify fields because enum fields are always public, and
// rustc errors on the redundant `pub`.
false,
);
}
otherwise => {
// If the contained type is not a product, i.e. this variant has a single
// member, write it tuple-style, with parens.
write!(out, "(").unwrap();
write_type(ctx, out, otherwise);
write!(out, "),").unwrap();
}
}
}
fn write_struct_type_fields_in_braces(
ctx: &GenCtx,
out: &mut Indenter,
elements: &[ProductTypeElement],
// Whether to print a `pub` qualifier on the fields. Necessary for `struct` defns,
// disallowed for `enum` defns.
pub_qualifier: bool,
) {
out.delimited_block(
"{",
|out| write_arglist_no_delimiters(ctx, out, elements, pub_qualifier.then_some("pub")),
"}",
);
}
fn write_arglist_no_delimiters(
ctx: &GenCtx,
out: &mut Indenter,
elements: &[ProductTypeElement],
// Written before each line. Useful for `pub`.
prefix: Option<&str>,
) {
for elt in elements {
if let Some(prefix) = prefix {
write!(out, "{} ", prefix).unwrap();
}
let Some(name) = &elt.name else {
panic!("Product type element has no name: {:?}", elt);
};
let name = name.to_case(Case::Snake);
write!(out, "{}: ", name).unwrap();
write_type(ctx, out, &elt.algebraic_type);
writeln!(out, ",").unwrap();
}
}
/// Generate a file which defines a `struct` corresponding to the `product` type.
pub fn autogen_rust_tuple(ctx: &GenCtx, name: &str, product: &ProductType) -> String {
let mut output = CodeIndenter::new(String::new());
let out = &mut output;
let type_name = name.to_case(Case::Pascal);
begin_rust_struct_def_shared(ctx, out, &type_name, &product.elements);
output.into_inner()
}
fn find_product_type(ctx: &GenCtx, ty: AlgebraicTypeRef) -> &ProductType {
ctx.typespace[ty].as_product().unwrap()
}
/// Generate a file which defines a `struct` corresponding to the `table`'s `ProductType`,
/// and implements `spacetimedb_client_sdk::table::TableType` for it.
pub fn autogen_rust_table(ctx: &GenCtx, table: &TableDef) -> String {
let mut output = CodeIndenter::new(String::new());
let out = &mut output;
let type_name = table.name.to_case(Case::Pascal);
begin_rust_struct_def_shared(ctx, out, &type_name, &find_product_type(ctx, table.data).elements);
out.newline();
print_impl_tabletype(ctx, out, table);
output.into_inner()
}
// TODO: figure out if/when product types should derive:
// - Clone
// - Debug
// - Copy
// - PartialEq, Eq
// - Hash
// - Complicated because `HashMap` is not `Hash`.
// - others?
const STRUCT_DERIVES: &[&str] = &["#[derive(Serialize, Deserialize, Clone, PartialEq)]"];
fn print_struct_derives(output: &mut Indenter) {
print_lines(output, STRUCT_DERIVES);
}
fn begin_rust_struct_def_shared(ctx: &GenCtx, out: &mut Indenter, name: &str, elements: &[ProductTypeElement]) {
print_auto_generated_file_comment(out);
print_spacetimedb_imports(out);
gen_and_print_imports(ctx, out, elements, generate_imports_elements);
out.newline();
print_struct_derives(out);
write!(out, "pub struct {} ", name).unwrap();
// TODO: if elements is empty, define a unit struct with no brace-delimited list of fields.
write_struct_type_fields_in_braces(
ctx, out, elements, // `pub`-qualify fields.
true,
);
out.newline();
}
fn find_primary_key_column_index(ctx: &GenCtx, table: &TableDef) -> Option<usize> {
// Search the whole slice, rather than stopping early like `Iter::position`, so we can
// report an error if multiple columns are primary.
let primaries = table
.column_attrs
.iter()
.enumerate()
.filter_map(|(i, attr)| attr.is_primary().then_some(i))
.collect::<Vec<_>>();
match primaries.len() {
2.. => {
let names = primaries
.iter()
.map(|&i| &find_product_type(ctx, table.data).elements[i])
.collect::<Vec<_>>();
panic!(
"Multiple primary columns defined for table {:?}: {:?}",
table.name, names
);
}
1 => Some(primaries[0]),
0 => None,
// rustc refuses to do exhaustiveness checking on `usize`, even in this case where
// one pattern is a range with no upper bound, so the entire range of `usize` is
// covered on any platform.
_ => unreachable!(),
}
}
fn print_impl_tabletype(ctx: &GenCtx, out: &mut Indenter, table: &TableDef) {
let type_name = table.name.to_case(Case::Pascal);
write!(out, "impl TableType for {} ", type_name).unwrap();
out.delimited_block(
"{",
|out| writeln!(out, "const TABLE_NAME: &'static str = {:?};", table.name).unwrap(),
"}\n",
);
out.newline();
if let Some(primary_column_index) = find_primary_key_column_index(ctx, table) {
let pk_field = &find_product_type(ctx, table.data).elements[primary_column_index];
let pk_field_name = pk_field
.name
.as_ref()
.expect("Fields designated as primary key should have names!")
.to_case(Case::Snake);
// TODO: ensure that primary key types are always `Eq`, `Hash`, `Clone`.
write!(out, "impl TableWithPrimaryKey for {} ", type_name).unwrap();
out.delimited_block(
"{",
|out| {
write!(out, "type PrimaryKey = ").unwrap();
write_type(ctx, out, &pk_field.algebraic_type);
writeln!(out, ";").unwrap();
out.delimited_block(
"fn primary_key(&self) -> &Self::PrimaryKey {",
|out| writeln!(out, "&self.{}", pk_field_name).unwrap(),
"}\n",
)
},
"}\n",
);
}
out.newline();
print_table_filter_methods(
ctx,
out,
&type_name,
&find_product_type(ctx, table.data).elements,
&table.column_attrs,
);
}
fn print_table_filter_methods(
ctx: &GenCtx,
out: &mut Indenter,
table_type_name: &str,
elements: &[ProductTypeElement],
attrs: &[ColumnIndexAttribute],
) {
write!(out, "impl {} ", table_type_name).unwrap();
out.delimited_block(
"{",
|out| {
for (elt, attr) in elements.iter().zip(attrs) {
let field_name = elt
.name
.as_ref()
.expect("Table columns should have names!")
.to_case(Case::Snake);
// TODO: ensure that fields are PartialEq
writeln!(out, "{}", ALLOW_UNUSED).unwrap();
write!(out, "pub fn filter_by_{}({}: ", field_name, field_name).unwrap();
// TODO: the filter methods should take the target value by
// reference. String fields should take &str, and array/vector
// fields should take &[T]. Determine if integer typeso should be by
// value. Is there a trait for this?
// Look at `Borrow` or Deref or AsRef?
write_type(ctx, out, &elt.algebraic_type);
write!(out, ") -> ").unwrap();
if attr.is_unique() {
write!(out, "Option<Self>").unwrap();
} else {
write!(out, "TableIter<Self>").unwrap();
}
out.delimited_block(
" {",
|out| {
writeln!(
out,
"Self::{}(|row| row.{} == {})",
// TODO: for primary keys, we should be able to do better than
// `find` or `filter`. We should be able to look up
// directly in the `TableCache`.
if attr.is_unique() { "find" } else { "filter" },
field_name,
field_name,
)
.unwrap()
},
"}\n",
);
}
},
"}\n",
)
}
/// Generate a file which defines a struct corresponding to the `reducer`'s arguments,
/// implements `spacetimedb_client_sdk::table::Reducer` for it, and defines a helper
/// function which invokes the reducer.
pub fn autogen_rust_reducer(ctx: &GenCtx, reducer: &ReducerDef) -> String {
let func_name = reducer.name.to_case(Case::Snake);
let type_name = reducer.name.to_case(Case::Pascal);
let mut output = CodeIndenter::new(String::new());
let out = &mut output;
begin_rust_struct_def_shared(ctx, out, &type_name, &reducer.args);
out.newline();
write!(out, "impl Reducer for {} ", type_name).unwrap();
out.delimited_block(
"{",
|out| writeln!(out, "const REDUCER_NAME: &'static str = {:?};", &reducer.name).unwrap(),
"}\n",
);
out.newline();
// Function definition for the convenient caller, which takes normal args, constructs
// an instance of the struct, and calls `invoke` on it.
write!(out, "{}", ALLOW_UNUSED).unwrap();
write!(out, "pub fn {}", func_name).unwrap();
// arglist
// TODO: if reducer.args is empty, just write "()" with no newlines
out.delimited_block(
"(",
|out| write_arglist_no_delimiters(ctx, out, &reducer.args, None),
") ",
);
// body
out.delimited_block(
"{",
|out| {
// This is a struct literal.
write!(out, "{} ", type_name).unwrap();
// TODO: if reducer.args is empty, write a unit struct.
out.delimited_block(
"{",
|out| {
for arg in &reducer.args {
let Some(name) = &arg.name else {
panic!("Reducer {} arg has no name: {:?}", reducer.name, arg);
};
let name = name.to_case(Case::Snake);
writeln!(out, "{},", name).unwrap();
}
},
"}.invoke();\n",
);
},
"}\n",
);
// TODO: generate `pub fn on_{REDUCER_NAME}` function which calls
// `Reducer::on_reducer` to register a callback. Like the relationship between
// `pub fn {REDUCER_NAME}` and `Reducer::invoke`, the callback passed to
// `on_{REDUCER_NAME}` should take an arglist, not an instance of the reducer
// struct. The fn should wrap the passed callback in a closure of the
// appropriate type for `Reducer::on_reducer` and unpacks the instance.
output.into_inner()
}
const DISPATCH_IMPORTS: &[&str] = &[
"use spacetimedb_client_sdk::client_api_messages::{TableUpdate, Event};",
"use spacetimedb_client_sdk::client_cache::{ClientCache};",
"use spacetimedb_client_sdk::background_connection::BackgroundDbConnection;",
"use spacetimedb_client_sdk::identity::Credentials;",
"use spacetimedb_client_sdk::callbacks::{DbCallbacks, ReducerCallbacks};",
];
fn print_dispatch_imports(out: &mut Indenter) {
print_lines(out, DISPATCH_IMPORTS);
}
fn is_init(reducer: &ReducerDef) -> bool {
reducer.name == "__init__"
}
const CONNECT_DOCSTRING: &[&str] = &[
"/// Connect to a database named `db_name` accessible over the internet at the URI `host`.",
"///",
"/// If `credentials` are supplied, they will be passed to the new connection to",
"/// identify and authenticate the user. Otherwise, a set of `Credentials` will be",
"/// generated by the server.",
];
fn print_connect_docstring(out: &mut Indenter) {
print_lines(out, CONNECT_DOCSTRING);
}
/// Generate a `mod.rs` as the entry point into the autogenerated code.
///
/// The `mod.rs` contains 5 things:
///
/// 1. `pub mod` declarations for all the other files generated. Without these, either the
/// other files wouldn't get compiled, or users would have to `mod`-declare each file
/// manually.
///
/// 2. `handle_table_update`, which dispatches on table name to find the appropriate type
/// to parse the rows and insert or remove them into/from the
/// `spacetimedb_client_sdk::client_cache::ClientCache`. The other SDKs avoid needing
/// such a dispatch function by dynamically discovering the set of table types,
/// e.g. using C#'s `AppDomain`. Rust's type system prevents this.
///
/// 3. `handle_resubscribe`, which serves the same role as `handle_table_update`, but for
/// re-subscriptions in a `SubscriptionUpdate` following an outgoing `Subscribe`.
///
/// 4. `handle_event`, which serves the same role as `handle_table_update`, but for
/// reducers.
///
/// 5. `connect`, which invokes
/// `spacetimedb_client_sdk::background_connection::BackgroundDbConnection::connect` to
/// connect to a remote database, and passes the `handle_row_update` and
/// `handle_event` functions so the `BackgroundDbConnection` can spawn workers
/// which use those functions to dispatch on the content of messages.
pub fn autogen_rust_globals(ctx: &GenCtx, items: &[GenItem]) -> Vec<(String, String)> {
let mut output = CodeIndenter::new(String::new());
let out = &mut output;
print_auto_generated_file_comment(out);
print_spacetimedb_imports(out);
print_dispatch_imports(out);
out.newline();
print_module_decls(out, items);
out.newline();
// Muffle unused warning for handle_row_update, which is not supposed to be visible to
// users. It will be used if and only if `connect` is used, so that unused warning is
// sufficient, and not as confusing.
writeln!(out, "{}", ALLOW_UNUSED).unwrap();
out.delimited_block(
"fn handle_table_update(table_update: TableUpdate, client_cache: &mut ClientCache, callbacks: &mut DbCallbacks) {",
|out| {
writeln!(out, "let table_name = &table_update.table_name[..];").unwrap();
out.delimited_block(
"match table_name {",
|out| {
for item in items {
if let GenItem::Table(table) = item {
writeln!(
out,
"{:?} => client_cache.{}::<{}::{}>(callbacks, table_update),",
table.name,
if find_primary_key_column_index(ctx, table).is_some() {
"handle_table_update_with_primary_key"
} else {
"handle_table_update_no_primary_key"
},
table.name.to_case(Case::Snake),
table.name.to_case(Case::Pascal),
).unwrap();
}
}
writeln!(
out,
"_ => spacetimedb_client_sdk::log::error!(\"TableRowOperation on unknown table {{:?}}\", table_name),",
).unwrap();
},
"}\n",
)
},
"}\n",
);
out.newline();
writeln!(out, "{}", ALLOW_UNUSED).unwrap();
out.delimited_block(
"fn handle_resubscribe(new_subs: TableUpdate, client_cache: &mut ClientCache, callbacks: &mut DbCallbacks) {",
|out| {
writeln!(out, "let table_name = &new_subs.table_name[..];").unwrap();
out.delimited_block(
"match table_name {",
|out| {
for item in items {
if let GenItem::Table(table) = item {
writeln!(
out,
"{:?} => client_cache.handle_resubscribe_for_type::<{}::{}>(callbacks, new_subs),",
table.name,
table.name.to_case(Case::Snake),
table.name.to_case(Case::Pascal),
).unwrap();
}
}
writeln!(
out,
"_ => spacetimedb_client_sdk::log::error!(\"TableRowOperation on unknown table {{:?}}\", table_name)," ,
).unwrap();
},
"}\n",
);
},
"}\n"
);
out.newline();
// Like `handle_row_update`, muffle unused warning for `handle_event`.
writeln!(out, "{}", ALLOW_UNUSED).unwrap();
out.delimited_block(
"fn handle_event(event: Event, reducer_callbacks: &mut ReducerCallbacks) {",
|out| {
out.delimited_block(
"let Some(function_call) = &event.function_call else {",
|out| writeln!(out, "spacetimedb_client_sdk::log::warn!(\"Received Event with None function_call\"); return;")
.unwrap(),
"};\n",
);
out.delimited_block(
"match &function_call.reducer[..] {",
|out| {
for item in items {
if let GenItem::Reducer(reducer) = item {
if !is_init(reducer) {
writeln!(
out,
"{:?} => reducer_callbacks.handle_event_of_type::<{}_reducer::{}>(event),",
reducer.name,
reducer.name.to_case(Case::Snake),
reducer.name.to_case(Case::Pascal),
).unwrap();
}
}
}
writeln!(
out,
"unknown => spacetimedb_client_sdk::log::error!(\"Event on an unknown reducer: {{:?}}\", unknown),",
).unwrap();
},
"}\n",
);
},
"}\n",
);
out.newline();
print_connect_docstring(out);
out.delimited_block(
"pub fn connect<Host>(host: Host, db_name: &str, credentials: Option<Credentials>) -> Result<()>
where
\tHost: TryInto<spacetimedb_client_sdk::http::Uri>,
\t<Host as TryInto<spacetimedb_client_sdk::http::Uri>>::Error: std::error::Error + Send + Sync + 'static,
{",
|out| out.delimited_block(
"with_connection(|connection| {",
|out| {
writeln!(
out,
"*connection = Some(BackgroundDbConnection::connect(host, db_name, credentials, handle_table_update, handle_resubscribe, handle_event)?);"
).unwrap();
writeln!(out, "Ok(())").unwrap();
},
"})\n",
),
"}\n",
);
vec![("mod.rs".to_string(), output.into_inner())]
}
fn print_module_decls(out: &mut Indenter, items: &[GenItem]) {
for item in items {
let (name, suffix) = match item {
GenItem::Table(table) => (&table.name, ""),
GenItem::TypeAlias(ty) => (&ty.name, ""),
GenItem::Reducer(reducer) => {
if is_init(reducer) {
continue;
}
(&reducer.name, "_reducer")
}
};
let module_name = name.to_case(Case::Snake);
writeln!(out, "pub mod {}{};", module_name, suffix).unwrap();
}
}
fn generate_imports_variants(ctx: &GenCtx, imports: &mut Imports, variants: &[SumTypeVariant]) {
for variant in variants {
generate_imports(ctx, imports, &variant.algebraic_type);
}
}
fn generate_imports_elements(ctx: &GenCtx, imports: &mut Imports, elements: &[ProductTypeElement]) {
for element in elements {
generate_imports(ctx, imports, &element.algebraic_type);
}
}
fn module_name(name: &str) -> String {
name.to_case(Case::Snake)
}
fn generate_imports(ctx: &GenCtx, imports: &mut Imports, ty: &AlgebraicType) {
match ty {
AlgebraicType::Builtin(BuiltinType::Array(ArrayType { elem_ty })) => generate_imports(ctx, imports, elem_ty),
AlgebraicType::Builtin(BuiltinType::Map(map_type)) => {
generate_imports(ctx, imports, &map_type.key_ty);
generate_imports(ctx, imports, &map_type.ty);
}
AlgebraicType::Builtin(_) => (),
AlgebraicType::Ref(r) => {
let type_name = type_name(ctx, *r);
let module_name = module_name(&type_name);
imports.insert((module_name, type_name));
}
// Recurse into variants of anonymous sum types, e.g. for `Option<T>`, import `T`.
AlgebraicType::Sum(s) => generate_imports_variants(ctx, imports, &s.variants),
// Do we need to generate imports for fields of anonymous product types?
_ => (),
}
}
fn print_imports(out: &mut Indenter, imports: Imports) {
for (module_name, type_name) in imports {
writeln!(out, "use super::{}::{};", module_name, type_name).unwrap();
}
}
fn gen_and_print_imports<Roots, SearchFn>(ctx: &GenCtx, out: &mut Indenter, roots: Roots, search_fn: SearchFn)
where
SearchFn: FnOnce(&GenCtx, &mut Imports, Roots),
{
let mut imports = HashSet::new();
search_fn(ctx, &mut imports, roots);
print_imports(out, imports);
}
File diff suppressed because it is too large Load Diff
+546
View File
@@ -0,0 +1,546 @@
use crate::{
config::{Config, IdentityConfig},
util::{init_default, IdentityTokenJson, InitDefaultResultType},
};
use std::io::Write;
use anyhow::Context;
use clap::{Arg, ArgAction, ArgMatches, Command};
use email_address::EmailAddress;
use reqwest::{StatusCode, Url};
use serde::Deserialize;
use spacetimedb_lib::recovery::RecoveryCodeResponse;
use tabled::{object::Columns, Alignment, Modify, Style, Table, Tabled};
pub fn cli() -> Command {
Command::new("identity")
.args_conflicts_with_subcommands(true)
.subcommand_required(true)
.subcommands(get_subcommands())
.about("Manage identities stored by the command line tool")
}
// TODO(jdetter): identity name and the identity itself should be ubiquitous. You should be able to pass
// an identity or the alias into the command instead of this --name/--identity business
fn get_subcommands() -> Vec<Command> {
vec![
Command::new("list").about("List saved identities"),
Command::new("set-default")
// TODO(jdetter): Unify providing an identity an a name
.about("Set the default identity")
.arg(
Arg::new("identity")
.long("identity")
.short('i')
.help("The identity that should become the new default identity")
.conflicts_with("name"),
)
.arg(
Arg::new("name")
.long("name")
.short('n')
.help("The name of the identity that should become the new default identity")
.conflicts_with("identity"),
),
Command::new("set-email")
.about("Associates an email address with an identity")
.arg(
Arg::new("identity")
.long("identity")
.short('i')
.help("The identity that should become the new default identity"),
)
.arg(
Arg::new("name")
.long("name")
.short('n')
.help("The name of the identity that should become the new default identity"),
)
.arg(
Arg::new("email")
.help("The email that should be assigned to the provided identity")
.required(true),
),
Command::new("init-default")
.about("Initialize a new default identity if it is missing from the global config")
.arg(
Arg::new("name")
.long("name")
.short('n')
.help("The name of the identity that should become the new default identity"),
)
.arg(
Arg::new("quiet")
.long("quiet")
.short('q')
.action(ArgAction::SetTrue)
.help("Runs command in silent mode"),
),
Command::new("new")
.about("Creates a new identity")
.arg(
Arg::new("no-save")
.help("Don't save save to local config, just create a new identity")
.long("no-save")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("name")
.long("name")
.short('n')
.help("Nickname for this identity")
.conflicts_with("no-save"),
)
.arg(
Arg::new("email")
.long("email")
.short('e')
.help("Recovery email for this identity")
.conflicts_with("no-email"),
)
.arg(
Arg::new("no-email")
.long("no-email")
.help("Creates an identity without a recovery email")
.conflicts_with("email")
.action(ArgAction::SetTrue),
),
Command::new("remove")
.about("Removes a saved identity from your spacetime config")
// TODO(jdetter): Unify identity + name parameters
.arg(
Arg::new("identity")
.long("identity")
.short('i')
.help("The identity to delete")
.conflicts_with("name"),
)
.arg(
Arg::new("name")
.long("name")
.short('n')
.help("The name of the identity to delete")
.conflicts_with("identity"),
),
Command::new("import")
.about("Imports an existing identity into your spacetime config")
.arg(
Arg::new("identity")
.required(true)
.help("The identity that is associated with the provided token"),
)
.arg(
Arg::new("token")
.required(true)
.help("The identity token to import. This is used for authenticating with SpacetimeDB"),
)
.arg(
Arg::new("name")
.long("name")
.short('n')
.help("A name for the newly imported identity"),
),
Command::new("find").about("Find an identity for an email").arg(
Arg::new("email")
.required(true)
.help("The email associated with the identity that you would like to find"),
),
Command::new("recover")
.about("Recover an existing identity and import it into your local config")
.arg(
Arg::new("email")
.required(true)
.help("The email associated with the identity that you would like to recover."),
)
// TODO(jdetter): Unify identity and name here
.arg(Arg::new("identity").required(true).help(
"The identity you would like to recover. This identity must be associated with the email provided.",
)),
]
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let (cmd, subcommand_args) = args.subcommand().expect("Subcommand required");
exec_subcommand(config, cmd, subcommand_args).await
}
async fn exec_subcommand(config: Config, cmd: &str, args: &ArgMatches) -> Result<(), anyhow::Error> {
match cmd {
"list" => exec_list(config, args).await,
"set-default" => exec_set_default(config, args).await,
"init-default" => exec_init_default(config, args).await,
"new" => exec_new(config, args).await,
"remove" => exec_remove(config, args).await,
// TODO(jdetter): Rename to import
"import" => exec_import(config, args).await,
"set-email" => exec_set_email(config, args).await,
"find" => exec_find(config, args).await,
"recover" => exec_recover(config, args).await,
// TODO(jdetter): Command for logging in via email recovery
unknown => Err(anyhow::anyhow!("Invalid subcommand: {}", unknown)),
}
}
async fn exec_set_default(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let name = args.get_one::<String>("name");
if let Some(name) = name {
if let Some(identity_config) = config.get_identity_config_by_name(name) {
config.set_default_identity(identity_config.identity.clone());
config.save();
return Ok(());
} else {
return Err(anyhow::anyhow!("No such identity by that name."));
}
}
if let Some(identity) = args.get_one::<String>("identity") {
if let Some(identity_config) = config.get_identity_config_by_identity(identity) {
config.set_default_identity(identity_config.identity.clone());
config.save();
return Ok(());
} else {
return Err(anyhow::anyhow!("No such identity."));
}
}
Err(anyhow::anyhow!(
"Either an identity or the name of an identity must be provided."
))
}
// TODO(cloutiertyler): Realistically this should just be run before every
// single command, but I'm separating it out into its own command for now for
// simplicity.
async fn exec_init_default(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let nickname = args.get_one::<String>("name").map(|s| s.to_owned());
let quiet = args.get_flag("quiet");
let init_default_result = init_default(&mut config, nickname).await?;
let identity_config = init_default_result.identity_config;
let result_type = init_default_result.result_type;
if !quiet {
match result_type {
InitDefaultResultType::Existing => {
println!(" Existing default identity");
// TODO(jdetter): This should be standardized output
println!(" IDENTITY {}", identity_config.identity);
println!(" NAME {}", identity_config.nickname.unwrap_or_default());
return Ok(());
}
InitDefaultResultType::SavedNew => {
println!(" Saved new identity");
// TODO(jdetter): This should be standardized output
println!(" IDENTITY {}", identity_config.identity);
println!(" NAME {}", identity_config.nickname.unwrap_or_default());
}
}
}
Ok(())
}
async fn exec_remove(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let name = args.get_one::<String>("name");
if let Some(name) = name {
let ic = config.delete_identity_config_by_name(name);
if let Some(ic) = ic {
config.update_default_identity();
config.save();
println!(" Removed identity");
// TODO(jdetter): Standardize this identity output
println!(" IDENTITY {}", ic.identity);
println!(" NAME {}", ic.nickname.unwrap_or_default());
return Ok(());
} else {
println!("No such identity by that name.");
return Err(anyhow::anyhow!("No such identity."));
}
}
let identity = args
.get_one::<String>("identity")
.context("You either need to supply a name or identity to delete.")?;
let ic = config
.delete_identity_config_by_identity(identity)
.context("No such identity")?;
config.update_default_identity();
config.save();
println!(" Removed identity");
// TODO(jdetter): This should be standardized output
println!(" IDENTITY {}", ic.identity);
println!(" NAME {}", ic.nickname.unwrap_or_default());
Ok(())
}
async fn exec_new(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let save = !args.get_flag("no-save");
let alias = args.get_one::<String>("name");
if let Some(alias) = alias {
if config.name_exists(alias) {
return Err(anyhow::anyhow!("An identity with that name already exists."));
}
}
let email = args.get_one::<String>("email");
let no_email = args.get_flag("no-email");
if email.is_none() && !no_email {
return Err(anyhow::anyhow!(
"You must either supply an email with --email <email>, or pass the --no-email flag."
));
}
let mut query_params = Vec::<(&str, &str)>::new();
if let Some(email) = email {
if !EmailAddress::is_valid(email.as_str()) {
return Err(anyhow::anyhow!("The email you provided is malformed: {}", email));
}
query_params.push(("email", email.as_str()))
}
let client = reqwest::Client::new();
let mut builder = client.post(Url::parse_with_params(
format!("{}/identity", config.get_host_url()).as_str(),
query_params,
)?);
if let Some(identity_token) = config.get_default_identity_config() {
builder = builder.basic_auth("token", Some(identity_token.token.clone()));
}
let res = builder.send().await?;
let res = res.error_for_status()?;
let body = res.bytes().await?;
let body = String::from_utf8(body.to_vec())?;
let identity_token: IdentityTokenJson = serde_json::from_str(&body)?;
let identity = identity_token.identity.clone();
if save {
config.identity_configs_mut().push(IdentityConfig {
identity: identity_token.identity,
token: identity_token.token,
nickname: alias.map(|s| s.to_string()),
});
if config.default_identity().is_none() {
config.set_default_identity(identity.clone());
}
config.save();
}
println!(" IDENTITY {}", identity);
println!(" NAME {}", alias.unwrap_or(&String::new()));
println!(" EMAIL {}", email.unwrap_or(&String::new()));
Ok(())
}
async fn exec_import(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let identity: String = args.get_one::<String>("identity").unwrap().clone();
let token: String = args.get_one::<String>("token").unwrap().clone();
//optional
let nickname = args.get_one::<String>("name").cloned();
config.identity_configs_mut().push(IdentityConfig {
identity,
token,
nickname: nickname.clone(),
});
config.save();
println!(" New Identity Imported");
println!(" NAME {}", nickname.unwrap_or_default());
// TODO(jdetter): For consistency lets query the database for the user's email and maybe any domain names
// associated with this identity.
Ok(())
}
#[derive(Tabled)]
#[tabled(rename_all = "UPPERCASE")]
struct LsRow {
default: String,
identity: String,
name: String,
// email: String,
}
async fn exec_list(config: Config, _args: &ArgMatches) -> Result<(), anyhow::Error> {
let mut rows: Vec<LsRow> = Vec::new();
for identity_token in config.identity_configs() {
let default_str = if config.default_identity().is_some()
&& config.default_identity().as_ref().unwrap() == &identity_token.identity
{
"***"
} else {
""
};
rows.push(LsRow {
default: default_str.to_string(),
identity: identity_token.clone().identity,
name: identity_token.nickname.clone().unwrap_or_default(),
// TODO(jdetter): We'll have to look this up via a query
// email: identity_token.email.unwrap_or_default(),
});
}
let table = Table::new(&rows)
.with(Style::empty())
.with(Modify::new(Columns::first()).with(Alignment::right()));
println!("{}", table);
Ok(())
}
#[derive(Debug, Clone, Deserialize)]
struct GetIdentityResponse {
identities: Vec<GetIdentityResponseEntry>,
}
#[derive(Debug, Clone, Deserialize)]
struct GetIdentityResponseEntry {
identity: String,
email: String,
}
async fn exec_find(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let email = args.get_one::<String>("email").unwrap().clone();
let client = reqwest::Client::new();
let builder = client.get(format!("{}/identity?email={}", config.get_host_url(), email));
let res = builder.send().await?;
if res.status() == StatusCode::OK {
let response: GetIdentityResponse =
serde_json::from_str(String::from_utf8(res.bytes().await?.to_vec())?.as_str())?;
if response.identities.is_empty() {
return Err(anyhow::anyhow!("Could not find identity for: {}", email));
}
for identity in response.identities {
println!("Identity");
println!(" IDENTITY {}", identity.identity);
println!(" EMAIL {}", identity.email);
}
Ok(())
} else if res.status() == StatusCode::NOT_FOUND {
Err(anyhow::anyhow!("Could not find identity for: {}", email))
} else {
Err(anyhow::anyhow!("Error occurred in lookup: {}", res.status()))
}
}
async fn exec_set_email(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let email = args.get_one::<String>("email").unwrap().clone();
let identity = args.get_one::<String>("identity").unwrap().clone();
let client = reqwest::Client::new();
let mut builder = client.post(format!(
"{}/identity/{}/set-email?email={}",
config.get_host_url(),
identity,
email
));
if let Some(identity_token) = config.get_identity_config_by_identity(&identity) {
builder = builder.basic_auth("token", Some(identity_token.token.clone()));
} else {
println!("Missing identity credentials for identity.");
std::process::exit(0);
}
let res = builder.send().await?;
res.error_for_status()?;
println!(" Associated email with identity");
// TODO(jdetter): standardize this output
println!(" IDENTITY {}", identity);
println!(" EMAIL {}", email);
Ok(())
}
async fn exec_recover(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let email = args.get_one::<String>("email").unwrap();
let identity = args.get_one::<String>("identity").unwrap().to_lowercase();
let query_params = vec![
("email", email.as_str()),
("identity", identity.as_str()),
("link", "false"),
];
if config
.identity_configs()
.iter()
.any(|a| a.identity.to_lowercase() == identity.to_lowercase())
{
return Err(anyhow::anyhow!("No need to recover this identity, it is already stored in your config. Use `spacetime identity list` to list identities."));
}
let client = reqwest::Client::new();
let builder = client.get(Url::parse_with_params(
format!("{}/database/request_recovery_code", config.get_host_url()).as_str(),
query_params,
)?);
let res = builder.send().await?;
res.error_for_status()?;
println!(
"We have successfully sent a recovery code to {}. Enter the code now.",
email
);
for _ in 0..5 {
print!("Recovery Code: ");
std::io::stdout().flush()?;
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
let code = match line.trim().parse::<u32>() {
Ok(value) => value,
Err(_) => {
println!("Malformed code. Please try again.");
continue;
}
};
let client = reqwest::Client::new();
let builder = client.get(Url::parse_with_params(
format!("{}/database/confirm_recovery_code", config.get_host_url()).as_str(),
vec![
("code", code.to_string().as_str()),
("email", email.as_str()),
("identity", identity.as_str()),
],
)?);
let res = builder.send().await?;
match res.error_for_status() {
Ok(res) => {
let buf = res.bytes().await?.to_vec();
let utf8 = String::from_utf8(buf)?;
let response: RecoveryCodeResponse = serde_json::from_str(utf8.as_str())?;
let identity_config = IdentityConfig {
nickname: None,
identity: response.identity.clone(),
token: response.token,
};
config.identity_configs_mut().push(identity_config);
config.update_default_identity();
config.save();
println!("Success. Identity imported.");
// TODO(jdetter): standardize this output
println!(" IDENTITY {}", response.identity);
println!(" EMAIL {}", email);
return Ok(());
}
Err(_) => {
println!("Invalid recovery code, please try again.");
}
}
}
Err(anyhow::anyhow!(
"Maximum amount of attempts reached. Please start the process over."
))
}
+163
View File
@@ -0,0 +1,163 @@
use crate::Config;
use anyhow::Context;
use clap::{Arg, ArgMatches};
use colored::Colorize;
use std::path::{Path, PathBuf};
pub fn cli() -> clap::Command {
clap::Command::new("init")
.about("Initializes a new spacetime project")
.arg(
Arg::new("project-path")
.value_parser(clap::value_parser!(PathBuf))
.default_value(".")
.help("The path where we will create the spacetime project"),
)
.arg(
Arg::new("lang")
.required(true)
.short('l')
.long("lang")
.help("The spacetime module language.")
.value_parser(clap::value_parser!(ProjectLang)),
)
}
#[derive(clap::ValueEnum, Clone, Copy)]
enum ProjectLang {
Rust,
}
fn check_for_cargo() -> bool {
match std::env::consts::OS {
"linux" | "freebsd" | "netbsd" | "openbsd" | "solaris" | "macos" => {
if find_executable("cargo").is_some() {
return true;
}
println!("{}", "Warning: You have created a rust project, but you are missing cargo. You should install cargo with the following command:\n\n\tcurl https://sh.rustup.rs -sSf | sh\n".yellow());
}
"windows" => {
if find_executable("cargo.exe").is_some() {
return true;
}
println!("{}", "Warning: You have created a rust project, but you are missing cargo. Visit the rust-lang official website for the latest instructions on install cargo on Windows:\n\n\tYou have created a rust project, but you are missing cargo.\n".yellow());
}
unsupported_os => {
println!("{}", format!("This OS may be unsupported: {}", unsupported_os).yellow());
}
}
false
}
fn check_for_git() -> bool {
match std::env::consts::OS {
"linux" | "freebsd" | "netbsd" | "openbsd" | "solaris" => {
if find_executable("git").is_some() {
return true;
}
println!(
"{}",
"Warning: Git is not installed. You should install git using your package manager.\n".yellow()
);
}
"macos" => {
if find_executable("git").is_some() {
return true;
}
println!(
"{}",
"Warning: Git is not installed. You can install git by invoking:\n\n\tgit --version\n".yellow()
);
}
"windows" => {
if find_executable("git.exe").is_some() {
return true;
}
println!("{}", "Warning: You are missing git. You should install git from here:\n\n\thttps://git-scm.com/download/win\n".yellow());
}
unsupported_os => {
println!("{}", format!("This OS may be unsupported: {}", unsupported_os).yellow());
}
}
false
}
pub async fn exec(_: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let project_path = args.get_one::<PathBuf>("project-path").unwrap();
let project_lang = *args.get_one::<ProjectLang>("lang").unwrap();
// Create the project path, or make sure the target project path is empty.
if project_path.exists() {
if !project_path.is_dir() {
return Err(anyhow::anyhow!(
"Path {} exists but is not a directory. A new SpacetimeDB project must be initialized in an empty directory.",
project_path.display()
));
}
if std::fs::read_dir(project_path).unwrap().count() > 0 {
return Err(anyhow::anyhow!(
"Cannot create new SpacetimeDB project in non-empty directory: {}",
project_path.display()
));
}
} else {
create_directory(project_path)?;
}
match project_lang {
ProjectLang::Rust => exec_init_rust(args).await,
}
}
pub async fn exec_init_rust(args: &ArgMatches) -> Result<(), anyhow::Error> {
let project_path = args.get_one::<PathBuf>("project-path").unwrap();
let export_files = vec![
(include_str!("project/Cargo._toml"), "Cargo.toml"),
(include_str!("project/lib._rs"), "src/lib.rs"),
(include_str!("project/rust_gitignore"), ".gitignore"),
];
for data_file in export_files {
let path = project_path.join(data_file.1);
create_directory(path.parent().unwrap())?;
std::fs::write(path, data_file.0)?;
}
// Check all dependencies
check_for_cargo();
check_for_git();
println!(
"{}",
format!("Project successfully created at path: {}", project_path.display()).green()
);
Ok(())
}
fn create_directory(path: &Path) -> Result<(), anyhow::Error> {
std::fs::create_dir_all(path).context("Failed to create directory")
}
fn find_executable<P>(exe_name: P) -> Option<std::path::PathBuf>
where
P: AsRef<Path>,
{
std::env::var_os("PATH").and_then(|paths| {
std::env::split_paths(&paths)
.filter_map(|dir| {
let full_path = dir.join(&exe_name);
if full_path.is_file() {
Some(full_path)
} else {
None
}
})
.next()
})
}
+75
View File
@@ -0,0 +1,75 @@
use crate::Config;
use clap::{Arg, ArgMatches, Command};
use reqwest::StatusCode;
use serde::Deserialize;
use serde_json;
use tabled::object::Columns;
use tabled::{Alignment, Modify, Style, Table, Tabled};
pub fn cli() -> Command {
Command::new("list")
.about("Lists the databases attached to an identity")
.arg(
Arg::new("identity")
.required(true)
.help("The identity to list databases for"),
)
}
#[derive(Deserialize)]
struct DatabasesResult {
pub addresses: Vec<String>,
}
#[derive(Tabled)]
struct AddressRow {
pub db_address: String,
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let identity = match args.get_one::<String>("identity") {
Some(value) => value.to_string(),
None => match config.default_identity() {
Some(default_ident) => default_ident.to_string(),
None => {
return Err(anyhow::anyhow!("No default identity, and no identity provided!"));
}
},
};
let client = reqwest::Client::new();
let mut builder = client.get(format!("{}/identity/{}/databases", config.get_host_url(), identity));
if let Some(identity_token) = config.get_identity_config_by_identity(&identity) {
builder = builder.basic_auth("token", Some(identity_token.token.clone()));
} else {
return Err(anyhow::anyhow!("Missing identity credentials for identity."));
}
let res = builder.send().await?;
if res.status() != StatusCode::OK {
return Err(anyhow::anyhow!(format!(
"Unable to retrieve databases for identity: {}",
res.status()
)));
}
let res_text = res.text().await?;
let result: DatabasesResult = serde_json::from_str(res_text.as_str())?;
let mut result_list: Vec<AddressRow> = Vec::<AddressRow>::new();
for entry in result.addresses {
result_list.push(AddressRow { db_address: entry });
}
if !result_list.is_empty() {
let table = Table::new(result_list)
.with(Style::psql())
.with(Modify::new(Columns::first()).with(Alignment::left()));
println!("Associated database addresses for {}:\n", identity);
println!("{}", table);
} else {
println!("No databases found for {}.", identity);
}
Ok(())
}
+196
View File
@@ -0,0 +1,196 @@
use std::borrow::Cow;
use std::io::{self, Write};
use crate::config::Config;
use crate::util::get_auth_header;
use crate::util::spacetime_dns;
use clap::ArgMatches;
use clap::{Arg, ArgAction};
use futures::{AsyncBufReadExt, TryStreamExt};
use is_terminal::IsTerminal;
use spacetimedb_lib::name::{is_address, DnsLookupResponse};
use termcolor::{Color, ColorSpec, WriteColor};
pub fn cli() -> clap::Command {
clap::Command::new("logs")
.about("Prints logs from a SpacetimeDB database")
.arg(
Arg::new("database")
.required(true)
.help("The domain or address of the database to print logs from"),
)
.arg(
// TODO(jdetter): unify this with identity + name
Arg::new("identity")
.long("identity")
.short('i')
.help("The identity to use for printing logs from this database"),
)
.arg(
Arg::new("num_lines")
.value_parser(clap::value_parser!(u32))
.help("The number of lines to print from the start of the log of this database")
.long_help("The number of lines to print from the start of the log of this database. If no num lines is provided, all lines will be returned."),
)
.arg(
Arg::new("follow")
.long("follow")
.short('f')
.required(false)
.action(ArgAction::SetTrue)
.help("A flag indicating whether or not to follow the logs")
.long_help("A flag that causes logs to not stop when end of the log file is reached, but rather to wait for additional data to be appended to the input."),
)
.after_help("Run `spacetime help logs` for more detailed information.\n")
}
#[derive(serde::Deserialize)]
pub enum LogLevel {
Error,
Warn,
Info,
Debug,
Trace,
Panic,
}
#[derive(serde::Deserialize)]
struct Record<'a> {
level: LogLevel,
#[serde(borrow)]
#[allow(unused)] // TODO: format this somehow
target: Option<Cow<'a, str>>,
#[serde(borrow)]
filename: Option<Cow<'a, str>>,
line_number: Option<u32>,
#[serde(borrow)]
message: Cow<'a, str>,
trace: Option<Vec<BacktraceFrame<'a>>>,
}
#[derive(serde::Deserialize)]
pub struct BacktraceFrame<'a> {
#[serde(borrow)]
pub module_name: Option<Cow<'a, str>>,
#[serde(borrow)]
pub func_name: Option<Cow<'a, str>>,
}
#[derive(serde::Serialize)]
struct LogsParams {
num_lines: Option<u32>,
follow: bool,
}
pub async fn exec(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let num_lines = args.get_one::<u32>("num_lines").copied();
let database = args.get_one::<String>("database").unwrap();
let follow = args.get_flag("follow");
let identity = args.get_one::<String>("identity");
let auth_header = get_auth_header(&mut config, false, identity.map(|x| x.as_str()))
.await
.map(|x| x.0);
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
// TODO: num_lines should default to like 10 if follow is specified?
let query_parms = LogsParams { num_lines, follow };
let client = reqwest::Client::new();
let mut builder = client.get(format!("{}/database/logs/{}", config.get_host_url(), address));
if let Some(auth_header) = auth_header {
builder = builder.header("Authorization", auth_header);
}
let res = builder.query(&query_parms).send().await?;
let res = res.error_for_status()?;
let term_color = if std::io::stderr().is_terminal() {
termcolor::ColorChoice::Auto
} else {
termcolor::ColorChoice::Never
};
let out = termcolor::StandardStream::stderr(term_color);
let mut out = out.lock();
let mut rdr = res
.bytes_stream()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.into_async_read();
let mut line = String::new();
while rdr.read_line(&mut line).await? != 0 {
let record = serde_json::from_str::<Record<'_>>(&line)?;
let mut color = ColorSpec::new();
let level = match record.level {
LogLevel::Error => {
color.set_fg(Some(Color::Red));
"ERROR"
}
LogLevel::Warn => {
color.set_fg(Some(Color::Yellow));
"WARN"
}
LogLevel::Info => {
color.set_fg(Some(Color::Blue));
"INFO"
}
LogLevel::Debug => {
color.set_dimmed(true).set_bold(true);
"DEBUG"
}
LogLevel::Trace => {
color.set_dimmed(true);
"TRACE"
}
LogLevel::Panic => {
color.set_fg(Some(Color::Red)).set_bold(true).set_intense(true);
"PANIC"
}
};
out.set_color(&color)?;
write!(out, "{level:>5}: ")?;
out.reset()?;
let dimmed = ColorSpec::new().set_dimmed(true).clone();
if let Some(filename) = record.filename {
out.set_color(&dimmed)?;
write!(out, "{filename}")?;
if let Some(line) = record.line_number {
write!(out, ":{line}")?;
}
out.reset()?;
}
writeln!(out, ": {}", record.message)?;
if let Some(trace) = &record.trace {
for frame in trace {
write!(out, " in ")?;
if let Some(module) = &frame.module_name {
out.set_color(&dimmed)?;
write!(out, "{module}")?;
out.reset()?;
write!(out, " :: ")?;
}
if let Some(function) = &frame.func_name {
out.set_color(&dimmed)?;
writeln!(out, "{function}")?;
out.reset()?;
}
}
}
line.clear();
}
Ok(())
}
+19
View File
@@ -0,0 +1,19 @@
pub mod build;
pub mod call;
pub mod delete;
pub mod describe;
pub mod dns;
pub mod energy;
pub mod generate;
pub mod identity;
pub mod init;
pub mod list;
pub mod logs;
pub mod publish;
pub mod repl;
pub mod server;
pub mod sql;
pub mod version;
#[cfg(feature = "tracelogging")]
pub mod tracelog;
@@ -0,0 +1,3 @@
host = ''
identity = ''
address = ''
@@ -0,0 +1,13 @@
[package]
name = "spacetime-module"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
spacetimedb = "0.4.1"
log = "0.4"
@@ -0,0 +1,35 @@
use spacetimedb::{spacetimedb, ReducerContext};
use log;
#[spacetimedb(table)]
pub struct Person {
name: String
}
#[spacetimedb(init)]
pub fn init() {
// Called when the module is initially published
}
#[spacetimedb(connect)]
pub fn identity_connected(_ctx: ReducerContext) {
// Called everytime a new client connects
}
#[spacetimedb(disconnect)]
pub fn identity_disconnected(_ctx: ReducerContext) {
// Called everytime a client disconnects
}
#[spacetimedb(reducer)]
pub fn add(name: String) {
Person::insert(Person { name });
}
#[spacetimedb(reducer)]
pub fn say_hello() {
for person in Person::iter() {
log::info!("Hello, {}!", person.name);
}
log::info!("Hello, World!");
}
@@ -0,0 +1,9 @@
#!@duckscript
# Make sure that we have the wasm target installed (ok to run if its already installed)
exec --fail-on-error rustup target add wasm32-unknown-unknown
exec --fail-on-error cargo --config net.git-fetch-with-cli=true build --target wasm32-unknown-unknown --release
# Update the running module
exec --fail-on-error spacetime identity init-default --quiet
exec --fail-on-error spacetime energy set-balance 5000000000000000 --quiet
@@ -0,0 +1,17 @@
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
# Spacetime ignore
/.spacetime
+227
View File
@@ -0,0 +1,227 @@
use anyhow::bail;
use clap::Arg;
use clap::ArgAction::SetTrue;
use clap::ArgMatches;
use reqwest::Url;
use spacetimedb_lib::name::PublishOp;
use spacetimedb_lib::name::{is_address, parse_domain_name, PublishResult};
use std::fs;
use std::path::PathBuf;
use crate::config::Config;
use crate::util::get_auth_header;
use crate::util::init_default;
pub fn cli() -> clap::Command {
clap::Command::new("publish")
.about("Create and update a SpacetimeDB database")
.arg(
Arg::new("host_type")
.long("host-type")
.short('t')
.value_parser(["wasmer"])
.default_value("wasmer")
.help("The type of host that should be for hosting this module"),
)
.arg(
// TODO(jdetter): Rename this to --delete-tables (clear doesn't really implies the tables are being dropped)
Arg::new("clear_database")
.long("clear-database")
.short('c')
.action(SetTrue)
.help("When publishing a new module to an existing address, also delete all tables associated with the database"),
)
.arg(
Arg::new("path_to_project")
.value_parser(clap::value_parser!(PathBuf))
.default_value(".")
.long("project-path")
.short('p')
.help("The system path (absolute or relative) to the module project")
)
.arg(
Arg::new("trace_log")
.long("trace_log")
.help("Turn on diagnostic/performance tracing for this project")
.action(SetTrue),
)
// TODO(tyler): We should be able to pass in either an identity or an alias here
// TODO(jdetter): Unify identity + identity alias
.arg(
Arg::new("identity")
.long("identity")
.short('I')
.help("The identity that should own the database")
.long_help("The identity that should own the database. If no identity is provided, your default identity will be used."))
// TODO(jdetter): add this back in when we actually support this
// .arg(
// Arg::new("as_identity")
// .long("as-identity")
// .short('i')
// .required(false)
// .conflicts_with("anon_identity"),
// )
.arg(
Arg::new("anon_identity")
.long("anon-identity")
.short('a')
.action(SetTrue)
.help("Instruct SpacetimeDB to allocate a new identity to own this database"),
)
.arg(
Arg::new("skip_clippy")
.long("skip_clippy")
.short('s')
.action(SetTrue)
.env("SPACETIME_SKIP_CLIPPY")
.value_parser(clap::builder::FalseyValueParser::new())
.help("Skips running clippy on the module before publishing (intended to speed up local iteration, not recommended for CI)"),
)
.arg(
Arg::new("debug")
.long("debug")
.short('d')
.action(SetTrue)
.help("Builds the module using debug instead of release (intended to speed up local iteration, not recommended for CI)"),
)
.arg(
Arg::new("name|address")
.help("A valid domain or address for this database"),
)
.after_help("Run `spacetime help publish` for more detailed information.")
}
pub async fn exec(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let identity = args.get_one::<String>("identity");
let name_or_address = args.get_one::<String>("name|address");
let path_to_project = args.get_one::<PathBuf>("path_to_project").unwrap();
let host_type = args.get_one::<String>("host_type").unwrap();
let clear_database = args.get_flag("clear_database");
let trace_log = args.get_flag("trace_log");
let anon_identity = args.get_flag("anon_identity");
let skip_clippy = args.get_flag("skip_clippy");
let build_debug = args.get_flag("debug");
let mut query_params = Vec::<(&str, &str)>::new();
query_params.push(("host_type", host_type.as_str()));
query_params.push(("register_tld", "true"));
// If a domain or address was provided, we should locally make sure it looks correct and
// append it as a query parameter
if let Some(name_or_address) = name_or_address {
if !is_address(name_or_address) {
parse_domain_name(name_or_address)?;
}
query_params.push(("name_or_address", name_or_address.as_str()));
}
if !path_to_project.exists() {
return Err(anyhow::anyhow!(
"Project path does not exist: {}",
path_to_project.display()
));
}
if clear_database {
query_params.push(("clear", "true"));
}
if trace_log {
query_params.push(("trace_log", "true"));
}
let client = reqwest::Client::new();
let path_to_wasm = crate::tasks::build(path_to_project, skip_clippy, build_debug)?;
let program_bytes = fs::read(path_to_wasm)?;
let mut builder = client.post(Url::parse_with_params(
format!("{}/database/publish", config.get_host_url()).as_str(),
query_params,
)?);
// If the user didn't specify an identity and we didn't specify an anonymous identity, then
// we want to use the default identity
// TODO(jdetter): We should maybe have some sort of user prompt here for them to be able to
// easily create a new identity with an email
let identity = if !anon_identity {
if identity.is_none() && config.default_identity().is_none() {
init_default(&mut config, None).await?;
}
if let Some((auth_header, chosen_identity)) =
get_auth_header(&mut config, anon_identity, identity.map(|x| x.as_str())).await
{
builder = builder.header("Authorization", auth_header);
Some(chosen_identity)
} else {
None
}
} else {
None
};
let res = builder.body(program_bytes).send().await?;
if res.status().is_client_error() || res.status().is_server_error() {
let err = res.text().await?;
bail!(err)
}
let bytes = res.bytes().await.unwrap();
let response: PublishResult = serde_json::from_slice(&bytes[..]).unwrap();
match response {
PublishResult::Success { domain, address, op } => {
let op = match op {
PublishOp::Created => "Created new",
PublishOp::Updated => "Updated",
};
if let Some(domain) = domain {
println!("{} database with domain: {}, address: {}", op, domain, address);
} else {
println!("{} database with address: {}", op, address);
}
}
PublishResult::TldNotRegistered { domain } => {
return Err(anyhow::anyhow!(
"The top level domain that you provided is not registered.\n\
This tld is not yet registered to any identity. You can register this domain with the following command:\n\
\n\
\tspacetime dns register-tld {}\n",
domain.tld()
));
}
PublishResult::PermissionDenied { domain } => {
return match identity {
Some(identity) => {
//TODO(jdetter): Have a nice name generator here, instead of using some abstract characters
// we should perhaps generate fun names like 'green-fire-dragon' instead
let suggested_tld: String = identity.to_hex().chars().take(12).collect();
if let Some(sub_domain) = domain.sub_domain() {
Err(anyhow::anyhow!(
"The top level domain {} is not registered to the identity you provided.\n\
We suggest you publish to a domain that starts with a TLD owned by you, or publish to a new domain like:\n\
\tspacetime publish {}/{}\n",
domain.tld(),
suggested_tld,
sub_domain
))
} else {
Err(anyhow::anyhow!(
"The top level domain {} is not registered to the identity you provided.\n\
We suggest you push to either a domain owned by you, or a new domain like:\n\
\tspacetime publish {}\n",
domain.tld(),
suggested_tld
))
}
}
None => Err(anyhow::anyhow!(
"The domain {} is not registered to the identity you provided.",
domain
)),
};
}
}
Ok(())
}
+247
View File
@@ -0,0 +1,247 @@
use crate::api::ClientApi;
use crate::sql::{parse_req, run_sql};
use crate::Config;
use clap::{Arg, ArgAction, ArgMatches};
use colored::*;
use std::io::Write;
use rustyline::completion::Completer;
use rustyline::error::ReadlineError;
use rustyline::highlight::Highlighter;
use rustyline::hint::Hinter;
use rustyline::history::DefaultHistory;
use rustyline::validate::{MatchingBracketValidator, Validator};
use rustyline::{Editor, Helper};
use syntect::easy::HighlightLines;
use syntect::highlighting::{Theme, ThemeSet};
use syntect::parsing::{SyntaxDefinition, SyntaxSet, SyntaxSetBuilder};
use syntect::util::LinesWithEndings;
static SQL_SYNTAX: &str = include_str!("../../tools/sublime/SpaceTimeDbSQL.sublime-syntax");
static SYNTAX_NAME: &str = "SQL (SpaceTimeDb)";
static AUTO_COMPLETE: &str = "\
true
false
select
from
insert
into
values
update,
delete,
create,
where
join
sort by
.exit
.clear
";
pub fn cli() -> clap::Command {
clap::Command::new("repl").about("Runs an interactive command prompt.")
.arg(
Arg::new("database")
.required(true)
.help("The domain or address of the database you would like to query"),
)
.arg(
Arg::new("as_identity")
.long("as-identity")
.short('i')
.conflicts_with("anon_identity")
.help("The identity to use for querying the database")
.long_help("The identity to use for querying the database. If no identity is provided, the default one will be used."),
)
.arg(
Arg::new("anon_identity")
.long("anon-identity")
.short('a')
.conflicts_with("as_identity")
.action(ArgAction::SetTrue)
.help("If this flag is present, no identity will be provided when querying the database")
)
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let con = parse_req(config, args).await?;
let database = con.database.clone();
let mut rl = Editor::<ReplHelper, DefaultHistory>::new().unwrap();
if rl.load_history(".history.txt").is_err() {
eprintln!("No previous history.");
}
rl.set_helper(Some(ReplHelper::new().unwrap()));
println!(
"\
.exit: Exit the REPL
.clear: Clear the Screen
Give us feedback in our Discord server:
https://discord.gg/w2DVqNZXdN │
",
);
let api = ClientApi::new(con);
loop {
let readline = rl.readline(&format!("🪐{}>", &database).green());
match readline {
Ok(line) => match line.as_str() {
".exit" => break,
".clear" => {
//todo: this could not work on windows
print!("{esc}[2J{esc}[1;1H", esc = 27 as char);
std::io::stdout().flush().ok();
}
sql => match run_sql(api.sql(), sql).await {
Ok(()) => {
rl.add_history_entry(line).ok();
}
Err(err) => {
eprintln!("{}", err.to_string().red())
}
},
},
Err(ReadlineError::Interrupted) | Err(ReadlineError::Eof) => {
println!("\n{}", "Aborted!".red());
break;
}
x => {
eprintln!("\nUnexpected: {x:?}");
break;
}
}
}
rl.save_history(".history.txt").ok();
Ok(())
}
pub(crate) struct ReplHelper {
syntaxes: SyntaxSet,
theme: Theme,
brackets: MatchingBracketValidator,
}
impl ReplHelper {
pub fn new() -> Result<Self, ()> {
let syntax_def = SyntaxDefinition::load_from_str(SQL_SYNTAX, false, Some(SYNTAX_NAME)).unwrap();
let mut builder = SyntaxSetBuilder::new();
builder.add(syntax_def);
let syntaxes = builder.build();
let _ps = SyntaxSet::load_defaults_newlines();
let ts = ThemeSet::load_defaults();
let theme = ts.themes["base16-ocean.dark"].clone();
Ok(ReplHelper {
syntaxes,
theme,
brackets: MatchingBracketValidator::new(),
})
}
}
impl Helper for ReplHelper {}
impl Completer for ReplHelper {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<Self::Candidate>)> {
let mut name = String::new();
let mut name_pos = pos;
while let Some(char) = line.chars().nth(name_pos.wrapping_sub(1)) {
if !char.is_ascii_alphanumeric() && !['_', '.'].contains(&char) {
break;
}
name.push(char);
name_pos -= 1;
}
if name.is_empty() {
return Ok((0, vec![]));
}
name = name.chars().rev().collect();
let mut completions: Vec<_> = AUTO_COMPLETE.split('\n').map(str::to_string).collect();
completions = completions
.iter()
.filter_map(|it| if it.starts_with(&name) { Some(it.clone()) } else { None })
.collect();
Ok((name_pos, completions))
}
}
impl Hinter for ReplHelper {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<Self::Hint> {
if line.len() > pos {
return None;
}
if let Ok((mut completion_pos, completions)) = self.complete(line, pos, ctx) {
if completions.is_empty() {
return None;
}
let mut hint = completions[0].clone();
while completion_pos < pos {
if hint.is_empty() {
return None;
}
hint.remove(0);
completion_pos += 1;
}
Some(hint)
} else {
None
}
}
}
impl Highlighter for ReplHelper {
fn highlight<'l>(&self, line: &'l str, _: usize) -> std::borrow::Cow<'l, str> {
let mut h = HighlightLines::new(self.syntaxes.find_syntax_by_name(SYNTAX_NAME).unwrap(), &self.theme);
let mut out = String::new();
for line in LinesWithEndings::from(line) {
let ranges = h.highlight_line(line, &self.syntaxes).unwrap();
let escaped = syntect::util::as_24_bit_terminal_escaped(&ranges[..], false);
out += &escaped;
}
std::borrow::Cow::Owned(out)
}
fn highlight_prompt<'b, 's: 'b, 'p: 'b>(&'s self, prompt: &'p str, _: bool) -> std::borrow::Cow<'b, str> {
std::borrow::Cow::Owned(prompt.green().to_string())
}
fn highlight_hint<'h>(&self, hint: &'h str) -> std::borrow::Cow<'h, str> {
std::borrow::Cow::Owned(hint.bright_black().to_string())
}
fn highlight_candidate<'c>(&self, candidate: &'c str, _: rustyline::CompletionType) -> std::borrow::Cow<'c, str> {
std::borrow::Cow::Owned(candidate.bright_cyan().to_string())
}
fn highlight_char(&self, _: &str, _: usize) -> bool {
true
}
}
impl Validator for ReplHelper {
fn validate(
&self,
ctx: &mut rustyline::validate::ValidationContext,
) -> rustyline::Result<rustyline::validate::ValidationResult> {
self.brackets.validate(ctx)
}
}
+40
View File
@@ -0,0 +1,40 @@
use crate::{util::VALID_PROTOCOLS, Config};
use clap::{Arg, ArgMatches};
pub fn cli() -> clap::Command {
clap::Command::new("server")
.about("Changes the host and protocol values for future interactions with spacetimedb")
.arg(
Arg::new("url")
.help("The URL of the SpacetimeDB server to connect to. Example: https://spacetimedb.com")
.required(true),
)
}
pub async fn exec(mut config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let url = args.get_one::<String>("url").unwrap();
let protocol: &str;
let host: &str;
if url.contains("://") {
protocol = url.split("://").next().unwrap();
host = url.split("://").last().unwrap();
if !VALID_PROTOCOLS.contains(&protocol) {
return Err(anyhow::anyhow!("Invalid protocol: {}", protocol));
}
} else {
return Err(anyhow::anyhow!("Invalid url: {}", url));
}
config.set_host(host);
config.set_protocol(protocol);
println!("Host: {}", host);
println!("Protocol: {}", protocol);
config.save();
Ok(())
}
+129
View File
@@ -0,0 +1,129 @@
use crate::api::{from_json_seed, ClientApi, Connection, StmtResultJson};
use anyhow::Context;
use clap::Arg;
use clap::ArgAction;
use clap::ArgMatches;
use reqwest::RequestBuilder;
use spacetimedb_lib::de::serde::SeedWrapper;
use spacetimedb_lib::name::{is_address, DnsLookupResponse};
use spacetimedb_lib::sats::satn;
use spacetimedb_lib::sats::Typespace;
use tabled::builder::Builder;
use tabled::Style;
use crate::config::Config;
use crate::util::get_auth_header;
use crate::util::spacetime_dns;
pub fn cli() -> clap::Command {
clap::Command::new("sql")
.about("Runs a SQL query on the database.")
.arg(
Arg::new("database")
.required(true)
.help("The domain or address of the database you would like to query"),
)
.arg(
Arg::new("query")
.required(true)
.help("The SQL query to execute"),
)
.arg(
Arg::new("as_identity")
.long("as-identity")
.short('i')
.conflicts_with("anon_identity")
.help("The identity to use for querying the database")
.long_help("The identity to use for querying the database. If no identity is provided, the default one will be used."),
)
.arg(
Arg::new("anon_identity")
.long("anon-identity")
.short('a')
.conflicts_with("as_identity")
.action(ArgAction::SetTrue)
.help("If this flag is present, no identity will be provided when querying the database")
)
}
pub(crate) async fn parse_req(mut config: Config, args: &ArgMatches) -> Result<Connection, anyhow::Error> {
let database = args.get_one::<String>("database").unwrap();
let as_identity = args.get_one::<String>("as_identity");
let anon_identity = args.get_flag("anon_identity");
let auth_header = get_auth_header(&mut config, anon_identity, as_identity.map(|x| x.as_str()))
.await
.map(|x| x.0);
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
let con = Connection {
host: config.get_host_url(),
address,
database: database.to_string(),
auth_header,
};
Ok(con)
}
pub(crate) async fn run_sql(builder: RequestBuilder, sql: &str) -> Result<(), anyhow::Error> {
let res = builder.body(sql.to_owned()).send().await?;
let res = res.error_for_status()?;
let body = res.bytes().await.unwrap();
let json = String::from_utf8(body.to_vec()).unwrap();
let stmt_result_json: Vec<StmtResultJson> = serde_json::from_str(&json).unwrap();
let stmt_result = stmt_result_json.first().context("Invalid sql query.")?;
let StmtResultJson { schema, rows } = &stmt_result;
let mut builder = Builder::default();
builder.set_columns(
schema
.elements
.iter()
.enumerate()
.map(|(i, e)| e.name.clone().unwrap_or_else(|| format!("column {i}"))),
);
let typespace = Typespace::default();
let ty = typespace.with_type(schema);
for row in rows {
let row = from_json_seed(row.get(), SeedWrapper(ty))?;
builder.add_record(
row.elements
.iter()
.zip(&schema.elements)
.map(|(v, e)| satn::PsqlWrapper(ty.with(&e.algebraic_type).with_value(v))),
);
}
let table = builder.build().with(Style::psql());
println!("{}", table);
Ok(())
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let query = args.get_one::<String>("query").unwrap();
let con = parse_req(config, args).await?;
let api = ClientApi::new(con);
run_sql(api.sql(), query).await?;
Ok(())
}
+143
View File
@@ -0,0 +1,143 @@
use crate::config::Config;
use crate::util::spacetime_dns;
use clap::Arg;
use clap::ArgMatches;
use reqwest::StatusCode;
use spacetimedb_lib::name::{is_address, DnsLookupResponse};
use std::path::Path;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
pub fn cli() -> clap::Command {
clap::Command::new("tracelog")
.about("Invokes commands related to tracelogs.")
.args_conflicts_with_subcommands(true)
.subcommand_required(true)
.subcommands(get_energy_subcommands())
}
fn get_energy_subcommands() -> Vec<clap::Command> {
vec![
clap::Command::new("get")
.about("Retrieve a copy of the trace log for a database, if tracing is turned on")
.arg(Arg::new("database").required(true))
.arg(Arg::new("outputfile").required(true).help("path to write tracelog to")),
clap::Command::new("stop")
.about("Stop tracing on a given database")
.arg(Arg::new("database").required(true)),
clap::Command::new("replay")
.about("Replay a tracelog on a temporary fresh DB instance on the server")
.arg(Arg::new("tracefile").required(true).help("path to read tracelog from")),
]
}
async fn exec_subcommand(config: Config, cmd: &str, args: &ArgMatches) -> Result<(), anyhow::Error> {
match cmd {
"get" => exec_get(config, args).await,
"stop" => exec_stop(config, args).await,
"replay" => exec_replay(config, args).await,
unknown => Err(anyhow::anyhow!("Invalid subcommand: {}", unknown)),
}
}
pub async fn exec(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let (cmd, subcommand_args) = args.subcommand().expect("Subcommand required");
exec_subcommand(config, cmd, subcommand_args).await
}
pub async fn exec_replay(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let tracefile = args.get_one::<String>("tracefile").unwrap();
match std::fs::read(tracefile) {
Ok(o) => {
let client = reqwest::Client::new();
let res = client
.post(format!("{}/tracelog/replay", config.get_host_url()))
.body(o)
.send()
.await?;
if res.status() != StatusCode::OK {
println!("Unable to replay log: {}", res.status())
} else {
let bytes = res.bytes().await?;
let json = String::from_utf8(bytes.to_vec()).unwrap();
println!("{}", json);
}
}
Err(e) => {
println!("Could not read tracefile: {}", e);
}
}
Ok(())
}
pub async fn exec_stop(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let database = args.get_one::<String>("database").unwrap();
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
let client = reqwest::Client::new();
let res = client
.post(format!("{}/tracelog/database/{}/stop", config.get_host_url(), address))
.send()
.await?;
let res = res.error_for_status()?;
if res.status() == StatusCode::NOT_FOUND {
println!("Could not find database {}", address);
return Ok(());
}
if res.status() != StatusCode::OK {
println!("Error while stopping tracelog for database {}", address);
return Ok(());
}
println!("Stopped tracing on: {}", address);
Ok(())
}
pub async fn exec_get(config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
let database = args.get_one::<String>("database").unwrap();
let address = if is_address(database.as_str()) {
database.clone()
} else {
match spacetime_dns(&config, database).await? {
DnsLookupResponse::Success { domain: _, address } => address,
DnsLookupResponse::Failure { domain } => {
return Err(anyhow::anyhow!("The dns resolution of {} failed.", domain));
}
}
};
let client = reqwest::Client::new();
let res = client
.get(format!("{}/tracelog/database/{}", config.get_host_url(), address))
.send()
.await?;
let res = res.error_for_status()?;
if res.status() == StatusCode::NOT_FOUND {
println!("Could not find tracelog for database {}", address);
return Ok(());
}
if res.status() != StatusCode::OK {
println!("Error while retrieving tracelog for database {}", address);
return Ok(());
}
let output_filename = args.get_one::<String>("outputfile").unwrap();
let content = res.bytes().await?;
{
let mut output_file = File::create(Path::new(output_filename)).await?;
output_file.write_all(content.to_vec().as_slice()).await?;
output_file.flush().await?;
}
println!("Wrote {} bytes to {}", content.len(), output_filename);
Ok(())
}
+32
View File
@@ -0,0 +1,32 @@
use clap::{Arg, ArgAction::SetTrue, ArgMatches};
const CLI_VERSION: &str = env!("CARGO_PKG_VERSION");
use crate::config::Config;
pub fn cli() -> clap::Command {
clap::Command::new("version")
.about("Print the version of the command line tool")
.after_help("Run `spacetime help version` for more detailed information.\n")
.arg(
Arg::new("cli")
.short('c')
.long("cli")
.action(SetTrue)
.help("Prints only the CLI version"),
)
}
pub async fn exec(_config: Config, args: &ArgMatches) -> Result<(), anyhow::Error> {
if args.get_flag("cli") {
println!("{}", CLI_VERSION);
return Ok(());
}
println!(
"spacetimedb tool version {}; spacetimedb-lib version {};",
CLI_VERSION,
spacetimedb_lib::version::spacetimedb_lib_version()
);
Ok(())
}
+128
View File
@@ -0,0 +1,128 @@
use std::path::{Path, PathBuf};
use std::{fs, io};
use anyhow::Context;
use cargo_metadata::Message;
use duct::cmd;
pub(crate) fn build(project_path: &Path, skip_clippy: bool, build_debug: bool) -> anyhow::Result<PathBuf> {
// Make sure that we have the wasm target installed (ok to run if its already installed)
cmd!("rustup", "target", "add", "wasm32-unknown-unknown").run()?;
let reader = if build_debug {
cmd!(
"cargo",
"--config=net.git-fetch-with-cli=true",
"build",
"--target=wasm32-unknown-unknown",
"--message-format=json-render-diagnostics"
)
} else {
cmd!(
"cargo",
"--config=net.git-fetch-with-cli=true",
"build",
"--target=wasm32-unknown-unknown",
"--release",
"--message-format=json-render-diagnostics"
)
}
.dir(project_path)
.reader()?;
let mut artifact = None;
for message in Message::parse_stream(io::BufReader::new(reader)) {
if let Ok(Message::CompilerArtifact(art)) = message {
artifact = Some(art);
} else if let Err(error) = message {
return Err(anyhow::anyhow!(error));
}
}
let artifact = artifact.context("no artifact found?")?;
let artifact = artifact.filenames.into_iter().next().context("no wasm?")?;
if !skip_clippy {
let clippy_conf_dir = tempfile::tempdir()?;
fs::write(clippy_conf_dir.path().join("clippy.toml"), CLIPPY_TOML)?;
println!("checking crate with spacetimedb's clippy configuration");
// TODO: should we pass --no-deps here? leaving it out could be valuable if a module is split
// into multiple crates, but without it it lints on proc-macro crates too
let out = cmd!(
"cargo",
"--config=net.git-fetch-with-cli=true",
"clippy",
"--target=wasm32-unknown-unknown",
// TODO: pass -q? otherwise it might be too busy
// "-q",
"--",
"--no-deps",
"-Aclippy::all",
"-Dclippy::disallowed-macros"
)
.dir(project_path)
.env("CLIPPY_DISABLE_DOCS_LINKS", "1")
.env("CLIPPY_CONF_DIR", clippy_conf_dir.path())
.unchecked()
.run()?;
anyhow::ensure!(out.status.success(), "clippy found a lint error");
}
check_for_wasm_bindgen(artifact.as_ref())?;
Ok(artifact.into())
}
const CLIPPY_TOML: &str = r#"
disallowed-macros = [
{ path = "std::print", reason = "print!() has no effect inside a spacetimedb module; use log::info!() instead" },
{ path = "std::println", reason = "println!() has no effect inside a spacetimedb module; use log::info!() instead" },
{ path = "std::eprint", reason = "eprint!() has no effect inside a spacetimedb module; use log::warn!() instead" },
{ path = "std::eprintln", reason = "eprintln!() has no effect inside a spacetimedb module; use log::warn!() instead" },
{ path = "std::dbg", reason = "std::dbg!() has no effect inside a spacetimedb module; import spacetime's dbg!() macro instead" },
]
"#;
fn check_for_wasm_bindgen(artifact: &Path) -> anyhow::Result<()> {
// if this fails for some reason, just let it fail elsewhere
let Ok(file) = fs::File::open(artifact) else { return Ok(()) };
let Ok(module) = wasmbin::Module::decode_from(&mut io::BufReader::new(file)) else { return Ok(()) };
if has_wasm_bindgen(&module) {
anyhow::bail!(
"wasm-bindgen detected.\n\
\n\
It seems like either you or a crate in your dependency tree is depending on\n\
wasm-bindgen. wasm-bindgen is only for webassembly modules that target the web\n\
platform, and will not work in the context of SpacetimeDB.\n\
\n\
To find the offending dependency, run `cargo tree -i wasm-bindgen`. Try checking\n\
its cargo features for 'js' or 'web' or 'wasm-bindgen' to see if there's a way\n\
to disable it."
)
}
Ok(())
}
const WBINDGEN_PREFIX: &str = "__wbindgen";
fn has_wasm_bindgen(module: &wasmbin::Module) -> bool {
let check_import = |import: &wasmbin::sections::Import| {
import.path.module.starts_with(WBINDGEN_PREFIX) || import.path.name.starts_with(WBINDGEN_PREFIX)
};
let check_export = |export: &wasmbin::sections::Export| export.name.starts_with(WBINDGEN_PREFIX);
if let Some(imports) = module.find_std_section::<wasmbin::sections::payload::Import>() {
if let Ok(imports) = imports.try_contents() {
if imports.iter().any(check_import) {
return true;
}
}
}
if let Some(exports) = module.find_std_section::<wasmbin::sections::payload::Export>() {
if let Ok(exports) = exports.try_contents() {
if exports.iter().any(check_export) {
return true;
}
}
}
false
}
+208
View File
@@ -0,0 +1,208 @@
use std::process::exit;
use clap::{
error::{ContextKind, ContextValue},
ArgMatches, Command,
};
use serde::Deserialize;
use spacetimedb_lib::name::{DnsLookupResponse, RegisterTldResult, ReverseDNSResponse};
use spacetimedb_lib::Identity;
use crate::config::{Config, IdentityConfig};
pub fn match_subcommand_or_exit(command: Command) -> (String, ArgMatches) {
let mut command_clone = command.clone();
let result = command.try_get_matches();
let args = match result {
Ok(args) => args,
Err(e) => match e.kind() {
clap::error::ErrorKind::MissingSubcommand => {
let cmd = e
.context()
.find_map(|c| match c {
(ContextKind::InvalidSubcommand, ContextValue::String(cmd)) => {
Some(cmd.split_ascii_whitespace().last().unwrap())
}
_ => None,
})
.expect("The InvalidArg to be present in the context of UnknownArgument.");
match command_clone.find_subcommand_mut(cmd) {
Some(subcmd) => subcmd.print_help().unwrap(),
None => command_clone.print_help().unwrap(),
}
exit(0);
}
_ => {
e.exit();
}
},
};
let (cmd, subcommand_args) = args.subcommand().unwrap();
(cmd.to_string(), subcommand_args.clone())
}
/// Converts a name to a database address.
pub async fn spacetime_dns(config: &Config, domain: &str) -> Result<DnsLookupResponse, anyhow::Error> {
let client = reqwest::Client::new();
let url = format!("{}/database/dns/{}", config.get_host_url(), domain);
let res = client.get(url).send().await?.error_for_status()?;
let bytes = res.bytes().await.unwrap();
Ok(serde_json::from_slice(&bytes[..]).unwrap())
}
/// Registers the given top level domain to the given identity. If None is passed in as identity, the default
/// identity will be looked up in the config and it will be used instead. Returns Ok() if the
/// domain is successfully registered, returns Err otherwise.
pub async fn spacetime_register_tld(
config: &mut Config,
tld: &str,
identity: Option<&String>,
) -> Result<RegisterTldResult, anyhow::Error> {
let (auth_header, _) = get_auth_header(config, false, identity.map(|x| x.as_str()))
.await
.unwrap();
// TODO(jdetter): Fix URL encoding on specifying this domain
let builder = reqwest::Client::new()
.get(format!("{}/database/register_tld?tld={}", config.get_host_url(), tld).as_str())
.header("Authorization", auth_header);
let res = builder.send().await?.error_for_status()?;
let bytes = res.bytes().await.unwrap();
Ok(serde_json::from_slice(&bytes[..]).unwrap())
}
/// Returns all known names for the given address.
pub async fn spacetime_reverse_dns(config: &Config, address: &str) -> Result<ReverseDNSResponse, anyhow::Error> {
let client = reqwest::Client::new();
let url = format!("{}/database/reverse_dns/{}", config.get_host_url(), address);
let res = client.get(url).send().await?.error_for_status()?;
let bytes = res.bytes().await.unwrap();
Ok(serde_json::from_slice(&bytes[..]).unwrap())
}
#[derive(Deserialize)]
pub struct IdentityTokenJson {
pub identity: String,
pub token: String,
}
pub enum InitDefaultResultType {
Existing,
SavedNew,
}
pub struct InitDefaultResult {
pub identity_config: IdentityConfig,
pub result_type: InitDefaultResultType,
}
pub async fn init_default(config: &mut Config, nickname: Option<String>) -> Result<InitDefaultResult, anyhow::Error> {
if config.name_exists(nickname.as_ref().unwrap_or(&"".to_string())) {
return Err(anyhow::anyhow!("An identity with that name already exists."));
}
let client = reqwest::Client::new();
let builder = client.post(format!("{}/identity", config.get_host_url()));
if let Some(identity_config) = config.get_default_identity_config() {
return Ok(InitDefaultResult {
identity_config: identity_config.clone(),
result_type: InitDefaultResultType::Existing,
});
}
let res = builder.send().await?;
let res = res.error_for_status()?;
let body = res.bytes().await?;
let body = String::from_utf8(body.to_vec())?;
let identity_token: IdentityTokenJson = serde_json::from_str(&body)?;
let identity = identity_token.identity.clone();
let identity_config = IdentityConfig {
identity: identity_token.identity,
token: identity_token.token,
nickname: nickname.clone(),
};
config.identity_configs_mut().push(identity_config.clone());
if config.default_identity().is_none() {
config.set_default_identity(identity);
}
config.save();
Ok(InitDefaultResult {
identity_config,
result_type: InitDefaultResultType::SavedNew,
})
}
/// Selects an `identity_config` from the config file. If you specify the
/// identity it will either return the `identity_config` for the specified
/// identity, or return an error if it cannot be found. If you do not specify
/// an identity this function will either get the default identity if one exists
/// or create and save a new default identity.
pub async fn select_identity_config(
config: &mut Config,
identity: Option<&str>,
) -> Result<IdentityConfig, anyhow::Error> {
if let Some(identity) = identity {
if let Some(identity_config) = config.get_identity_config_by_identity(identity) {
Ok(identity_config.clone())
} else {
Err(anyhow::anyhow!(
"Missing identity credentials for identity: {}",
identity
))
}
} else {
Ok(init_default(config, None).await?.identity_config)
}
}
/// Gets the `auth_header` for a request to the server depending on how you want
/// to identify yourself. If you specify `anon_identity = true` then no
/// `auth_header` is returned. If you specify an identity this function will try
/// to find the identity in the config file. If no identity can be found, the
/// program will `exit(1)`. If you do not specify an identity this function will
/// either get the default identity if one exists or create and save a new
/// default identity returning the one that was just created.
///
/// # Arguments
/// * `config` - The config file reference
/// * `anon_identity` - Whether or not to just use an anonymous identity (no identity)
/// * `identity` - The identity to try to lookup, which is typically provided from the command line
pub async fn get_auth_header(
config: &mut Config,
anon_identity: bool,
identity: Option<&str>,
) -> Option<(String, Identity)> {
if !anon_identity {
let identity_config = match select_identity_config(config, identity).await {
Ok(ic) => ic,
Err(err) => {
println!("{}", err);
exit(1);
}
};
// The current form is: Authorization: Basic base64("token:<token>")
let mut auth_header = String::new();
auth_header.push_str(format!("Basic {}", base64::encode(format!("token:{}", identity_config.token))).as_str());
match Identity::from_hex(identity_config.identity.clone()) {
Ok(identity) => Some((auth_header, identity)),
Err(_) => {
println!(
"Local config contains invalid malformed identity: {}",
identity_config.identity
);
exit(1)
}
}
} else {
None
}
}
pub const VALID_PROTOCOLS: [&str; 2] = ["http", "https"];
+44
View File
@@ -0,0 +1,44 @@
use std::collections::HashMap;
use std::path::Path;
#[test]
fn test_codegen_output() {
let path = Path::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../target/wasm32-unknown-unknown/release/rust_wasm_test.wasm"
));
if !path.exists() {
eprintln!("rust_wasm_test isn't built, skipping");
return;
}
use spacetimedb_cli::generate;
println!("{}", path.to_str().unwrap());
let outfiles: HashMap<_, _> = generate::generate(path, generate::Language::Csharp, "SpacetimeDB")
.unwrap()
.into_iter()
.collect();
insta::with_settings!({ sort_maps => true }, {
insta::assert_toml_snapshot!(outfiles);
});
}
#[test]
fn test_typescript_codegen_output() {
let path = Path::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../target/wasm32-unknown-unknown/release/rust_wasm_test.wasm"
));
if !path.exists() {
eprintln!("rust_wasm_test isn't built, skipping");
return;
}
use spacetimedb_cli::generate;
println!("{}", path.to_str().unwrap());
let outfiles: HashMap<_, _> = generate::generate(path, generate::Language::TypeScript, "SpacetimeDB")
.unwrap()
.into_iter()
.collect();
insta::with_settings!({ sort_maps => true }, {
insta::assert_toml_snapshot!(outfiles);
});
}
@@ -0,0 +1,832 @@
---
source: crates/cli/tests/codegen.rs
expression: outfiles
---
"AddPlayerReducer.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
using ClientApi;
using Newtonsoft.Json.Linq;
namespace SpacetimeDB
{
public static partial class Reducer
{
public static event Action<ClientApi.Event.Types.Status, Identity, string> OnAddPlayerEvent;
public static void AddPlayer(string name)
{
var _argArray = new object[] {name};
var _message = new NetworkManager.ReducerCallRequest {
fn = "add_player",
args = _argArray,
};
Newtonsoft.Json.JsonSerializerSettings _settings = new Newtonsoft.Json.JsonSerializerSettings
{
Converters = { new SpacetimeDB.SomeWrapperConverter(), new SpacetimeDB.EnumWrapperConverter() },
ContractResolver = new SpacetimeDB.JsonContractResolver(),
};
NetworkManager.instance.InternalCallReducer(Newtonsoft.Json.JsonConvert.SerializeObject(_message, _settings));
}
[ReducerCallback(FunctionName = "add_player")]
public static void OnAddPlayer(ClientApi.Event dbEvent)
{
if(OnAddPlayerEvent != null)
{
var args = dbEvent.FunctionCall.CallInfo.AddPlayerArgs;
OnAddPlayerEvent(dbEvent.Status, Identity.From(dbEvent.CallerIdentity.ToByteArray())
,(string)args.Name
);
}
}
[DeserializeEvent(FunctionName = "add_player")]
public static void AddPlayerDeserializeEventArgs(ClientApi.Event dbEvent)
{
var args = new AddPlayerArgsStruct();
var bsatnBytes = dbEvent.FunctionCall.ArgBytes;
using var ms = new System.IO.MemoryStream();
ms.SetLength(bsatnBytes.Length);
bsatnBytes.CopyTo(ms.GetBuffer(), 0);
ms.Position = 0;
using var reader = new System.IO.BinaryReader(ms);
var args_0_value = SpacetimeDB.SATS.AlgebraicValue.Deserialize(SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.String), reader);
args.Name = args_0_value.AsString();
var argsGeneric = new ReducerArgs();
argsGeneric.AddPlayerArgs = args;
dbEvent.FunctionCall.CallInfo = new ReducerEvent(ReducerType.AddPlayer, dbEvent.Message, dbEvent.Status, argsGeneric);
}
}
public struct AddPlayerArgsStruct
{
public string Name;
}
public partial struct ReducerArgs
{
[System.Runtime.InteropServices.FieldOffset(0)]
public AddPlayerArgsStruct AddPlayerArgs;
}
public partial class ReducerEvent
{
public AddPlayerArgsStruct AddPlayerArgs
{
get
{
if (Reducer != ReducerType.AddPlayer) throw new SpacetimeDB.ReducerMismatchException(Reducer.ToString(), "AddPlayer");
return Args.AddPlayerArgs;
}
}
}
}
'''
"NamespaceTestC.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
namespace SpacetimeDB
{
public partial class Namespace
{
public partial class Types
{
public enum TestC
{
Foo,
Bar,
}
}
public static SpacetimeDB.SATS.AlgebraicType GetAlgebraicTypeForTestC()
{
return SpacetimeDB.SATS.AlgebraicType.CreateSumType(new System.Collections.Generic.List<SpacetimeDB.SATS.SumTypeVariant>
{
new SpacetimeDB.SATS.SumTypeVariant("Foo", SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
})),
new SpacetimeDB.SATS.SumTypeVariant("Bar", SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
})),
});
}
public static Namespace.Types.TestC IntoTestC(SpacetimeDB.SATS.AlgebraicValue value)
{
var sumValue = value.AsSumValue();
switch(sumValue.tag)
{
case 0:
return Namespace.Types.TestC.Foo;
case 1:
return Namespace.Types.TestC.Bar;
}
return default;
}
}
}
'''
"ReducerEvent.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
using ClientApi;
using Newtonsoft.Json.Linq;
namespace SpacetimeDB
{
public enum ReducerType
{
Update,
AddPlayer,
RepeatingTest,
Test,
}
public partial class ReducerEvent
{
public ReducerType Reducer { get; private set; }
public string ErrMessage { get; private set; }
public ClientApi.Event.Types.Status Status { get; private set; }
private ReducerArgs Args;
public ReducerEvent(ReducerType reducer, string errMessage, ClientApi.Event.Types.Status status, ReducerArgs args)
{
Reducer = reducer;
ErrMessage = errMessage;
Status = status;
Args = args;
}
public object[] GetArgsAsObjectArray()
{
switch (Reducer)
{
case ReducerType.Update:
{
var args = UpdateArgs;
return new object[] {
};
}
case ReducerType.AddPlayer:
{
var args = AddPlayerArgs;
return new object[] {
args.Name,
};
}
case ReducerType.RepeatingTest:
{
var args = RepeatingTestArgs;
return new object[] {
args.PrevTime,
};
}
case ReducerType.Test:
{
var args = TestArgs;
return new object[] {
args.Arg,
args.Arg2,
args.Arg3,
};
}
default: throw new System.Exception($"Unhandled reducer case: {Reducer}. Please run SpacetimeDB code generator");
}
}
}
[System.Runtime.InteropServices.StructLayout(System.Runtime.InteropServices.LayoutKind.Explicit)]
public partial struct ReducerArgs { }
}
'''
"RepeatingTestReducer.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
using ClientApi;
using Newtonsoft.Json.Linq;
namespace SpacetimeDB
{
public static partial class Reducer
{
public static event Action<ClientApi.Event.Types.Status, Identity, ulong> OnRepeatingTestEvent;
public static void RepeatingTest(ulong prevTime)
{
var _argArray = new object[] {prevTime};
var _message = new NetworkManager.ReducerCallRequest {
fn = "repeating_test",
args = _argArray,
};
Newtonsoft.Json.JsonSerializerSettings _settings = new Newtonsoft.Json.JsonSerializerSettings
{
Converters = { new SpacetimeDB.SomeWrapperConverter(), new SpacetimeDB.EnumWrapperConverter() },
ContractResolver = new SpacetimeDB.JsonContractResolver(),
};
NetworkManager.instance.InternalCallReducer(Newtonsoft.Json.JsonConvert.SerializeObject(_message, _settings));
}
[ReducerCallback(FunctionName = "repeating_test")]
public static void OnRepeatingTest(ClientApi.Event dbEvent)
{
if(OnRepeatingTestEvent != null)
{
var args = dbEvent.FunctionCall.CallInfo.RepeatingTestArgs;
OnRepeatingTestEvent(dbEvent.Status, Identity.From(dbEvent.CallerIdentity.ToByteArray())
,(ulong)args.PrevTime
);
}
}
[DeserializeEvent(FunctionName = "repeating_test")]
public static void RepeatingTestDeserializeEventArgs(ClientApi.Event dbEvent)
{
var args = new RepeatingTestArgsStruct();
var bsatnBytes = dbEvent.FunctionCall.ArgBytes;
using var ms = new System.IO.MemoryStream();
ms.SetLength(bsatnBytes.Length);
bsatnBytes.CopyTo(ms.GetBuffer(), 0);
ms.Position = 0;
using var reader = new System.IO.BinaryReader(ms);
var args_0_value = SpacetimeDB.SATS.AlgebraicValue.Deserialize(SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.U64), reader);
args.PrevTime = args_0_value.AsU64();
var argsGeneric = new ReducerArgs();
argsGeneric.RepeatingTestArgs = args;
dbEvent.FunctionCall.CallInfo = new ReducerEvent(ReducerType.RepeatingTest, dbEvent.Message, dbEvent.Status, argsGeneric);
}
}
public struct RepeatingTestArgsStruct
{
public ulong PrevTime;
}
public partial struct ReducerArgs
{
[System.Runtime.InteropServices.FieldOffset(0)]
public RepeatingTestArgsStruct RepeatingTestArgs;
}
public partial class ReducerEvent
{
public RepeatingTestArgsStruct RepeatingTestArgs
{
get
{
if (Reducer != ReducerType.RepeatingTest) throw new SpacetimeDB.ReducerMismatchException(Reducer.ToString(), "RepeatingTest");
return Args.RepeatingTestArgs;
}
}
}
}
'''
"TestA.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
namespace SpacetimeDB
{
public partial class TestA : IDatabaseTable
{
[Newtonsoft.Json.JsonProperty("x")]
public uint X;
[Newtonsoft.Json.JsonProperty("y")]
public uint Y;
[Newtonsoft.Json.JsonProperty("z")]
public string Z;
public static SpacetimeDB.SATS.AlgebraicType GetAlgebraicType()
{
return SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
new SpacetimeDB.SATS.ProductTypeElement("x", SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.U32)),
new SpacetimeDB.SATS.ProductTypeElement("y", SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.U32)),
new SpacetimeDB.SATS.ProductTypeElement("z", SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.String)),
});
}
public static explicit operator TestA(SpacetimeDB.SATS.AlgebraicValue value)
{
if (value == null) {
return null;
}
var productValue = value.AsProductValue();
return new TestA
{
X = productValue.elements[0].AsU32(),
Y = productValue.elements[1].AsU32(),
Z = productValue.elements[2].AsString(),
};
}
public static System.Collections.Generic.IEnumerable<TestA> Iter()
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestA"))
{
yield return (TestA)entry.Item2;
}
}
public static int Count()
{
return NetworkManager.clientDB.Count("TestA");
}
public static System.Collections.Generic.IEnumerable<TestA> FilterByX(uint value)
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestA"))
{
var productValue = entry.Item1.AsProductValue();
var compareValue = (uint)productValue.elements[0].AsU32();
if (compareValue == value) {
yield return (TestA)entry.Item2;
}
}
}
public static System.Collections.Generic.IEnumerable<TestA> FilterByY(uint value)
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestA"))
{
var productValue = entry.Item1.AsProductValue();
var compareValue = (uint)productValue.elements[1].AsU32();
if (compareValue == value) {
yield return (TestA)entry.Item2;
}
}
}
public static System.Collections.Generic.IEnumerable<TestA> FilterByZ(string value)
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestA"))
{
var productValue = entry.Item1.AsProductValue();
var compareValue = (string)productValue.elements[2].AsString();
if (compareValue == value) {
yield return (TestA)entry.Item2;
}
}
}
public static bool ComparePrimaryKey(SpacetimeDB.SATS.AlgebraicType t, SpacetimeDB.SATS.AlgebraicValue _v1, SpacetimeDB.SATS.AlgebraicValue _v2)
{
return false;
}
public delegate void InsertEventHandler(TestA insertedValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void UpdateEventHandler(TestA oldValue, TestA newValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void DeleteEventHandler(TestA deletedValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void RowUpdateEventHandler(NetworkManager.TableOp op, TestA oldValue, TestA newValue, SpacetimeDB.ReducerEvent dbEvent);
public static event InsertEventHandler OnInsert;
public static event UpdateEventHandler OnUpdate;
public static event DeleteEventHandler OnBeforeDelete;
public static event DeleteEventHandler OnDelete;
public static event RowUpdateEventHandler OnRowUpdate;
public static void OnInsertEvent(object newValue, ClientApi.Event dbEvent)
{
OnInsert?.Invoke((TestA)newValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnUpdateEvent(object oldValue, object newValue, ClientApi.Event dbEvent)
{
OnUpdate?.Invoke((TestA)oldValue,(TestA)newValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnBeforeDeleteEvent(object oldValue, ClientApi.Event dbEvent)
{
OnBeforeDelete?.Invoke((TestA)oldValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnDeleteEvent(object oldValue, ClientApi.Event dbEvent)
{
OnDelete?.Invoke((TestA)oldValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnRowUpdateEvent(NetworkManager.TableOp op, object oldValue, object newValue, ClientApi.Event dbEvent)
{
OnRowUpdate?.Invoke(op, (TestA)oldValue,(TestA)newValue,dbEvent?.FunctionCall.CallInfo);
}
}
}
'''
"TestB.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
namespace SpacetimeDB
{
public partial class TestB : IDatabaseTable
{
[Newtonsoft.Json.JsonProperty("foo")]
public string Foo;
public static SpacetimeDB.SATS.AlgebraicType GetAlgebraicType()
{
return SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
new SpacetimeDB.SATS.ProductTypeElement("foo", SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.String)),
});
}
public static explicit operator TestB(SpacetimeDB.SATS.AlgebraicValue value)
{
if (value == null) {
return null;
}
var productValue = value.AsProductValue();
return new TestB
{
Foo = productValue.elements[0].AsString(),
};
}
}
}
'''
"TestD.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
namespace SpacetimeDB
{
public partial class TestD : IDatabaseTable
{
[Newtonsoft.Json.JsonProperty("test_c")]
[SpacetimeDB.Some]
public SpacetimeDB.Namespace.Types.TestC TestC;
public static SpacetimeDB.SATS.AlgebraicType GetAlgebraicType()
{
return SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
new SpacetimeDB.SATS.ProductTypeElement("test_c", SpacetimeDB.SATS.AlgebraicType.CreateSumType(new System.Collections.Generic.List<SpacetimeDB.SATS.SumTypeVariant>
{
new SpacetimeDB.SATS.SumTypeVariant("some", SpacetimeDB.Namespace.GetAlgebraicTypeForTestC()),
new SpacetimeDB.SATS.SumTypeVariant("none", SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
})),
})),
});
}
public static explicit operator TestD(SpacetimeDB.SATS.AlgebraicValue value)
{
if (value == null) {
return null;
}
var productValue = value.AsProductValue();
return new TestD
{
TestC = SpacetimeDB.Namespace.IntoTestC(productValue.elements[0].AsSumValue().tag == 1 ? null : productValue.elements[0].AsSumValue().value),
};
}
public static System.Collections.Generic.IEnumerable<TestD> Iter()
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestD"))
{
yield return (TestD)entry.Item2;
}
}
public static int Count()
{
return NetworkManager.clientDB.Count("TestD");
}
public static bool ComparePrimaryKey(SpacetimeDB.SATS.AlgebraicType t, SpacetimeDB.SATS.AlgebraicValue _v1, SpacetimeDB.SATS.AlgebraicValue _v2)
{
return false;
}
public delegate void InsertEventHandler(TestD insertedValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void UpdateEventHandler(TestD oldValue, TestD newValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void DeleteEventHandler(TestD deletedValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void RowUpdateEventHandler(NetworkManager.TableOp op, TestD oldValue, TestD newValue, SpacetimeDB.ReducerEvent dbEvent);
public static event InsertEventHandler OnInsert;
public static event UpdateEventHandler OnUpdate;
public static event DeleteEventHandler OnBeforeDelete;
public static event DeleteEventHandler OnDelete;
public static event RowUpdateEventHandler OnRowUpdate;
public static void OnInsertEvent(object newValue, ClientApi.Event dbEvent)
{
OnInsert?.Invoke((TestD)newValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnUpdateEvent(object oldValue, object newValue, ClientApi.Event dbEvent)
{
OnUpdate?.Invoke((TestD)oldValue,(TestD)newValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnBeforeDeleteEvent(object oldValue, ClientApi.Event dbEvent)
{
OnBeforeDelete?.Invoke((TestD)oldValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnDeleteEvent(object oldValue, ClientApi.Event dbEvent)
{
OnDelete?.Invoke((TestD)oldValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnRowUpdateEvent(NetworkManager.TableOp op, object oldValue, object newValue, ClientApi.Event dbEvent)
{
OnRowUpdate?.Invoke(op, (TestD)oldValue,(TestD)newValue,dbEvent?.FunctionCall.CallInfo);
}
}
}
'''
"TestE.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
namespace SpacetimeDB
{
public partial class TestE : IDatabaseTable
{
[Newtonsoft.Json.JsonProperty("id")]
public ulong Id;
[Newtonsoft.Json.JsonProperty("name")]
public string Name;
public static SpacetimeDB.SATS.AlgebraicType GetAlgebraicType()
{
return SpacetimeDB.SATS.AlgebraicType.CreateProductType(new SpacetimeDB.SATS.ProductTypeElement[]
{
new SpacetimeDB.SATS.ProductTypeElement("id", SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.U64)),
new SpacetimeDB.SATS.ProductTypeElement("name", SpacetimeDB.SATS.AlgebraicType.CreatePrimitiveType(SpacetimeDB.SATS.BuiltinType.Type.String)),
});
}
public static explicit operator TestE(SpacetimeDB.SATS.AlgebraicValue value)
{
if (value == null) {
return null;
}
var productValue = value.AsProductValue();
return new TestE
{
Id = productValue.elements[0].AsU64(),
Name = productValue.elements[1].AsString(),
};
}
public static System.Collections.Generic.IEnumerable<TestE> Iter()
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestE"))
{
yield return (TestE)entry.Item2;
}
}
public static int Count()
{
return NetworkManager.clientDB.Count("TestE");
}
public static TestE FilterById(ulong value)
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestE"))
{
var productValue = entry.Item1.AsProductValue();
var compareValue = (ulong)productValue.elements[0].AsU64();
if (compareValue == value) {
return (TestE)entry.Item2;
}
}
return null;
}
public static System.Collections.Generic.IEnumerable<TestE> FilterByName(string value)
{
foreach(var entry in NetworkManager.clientDB.GetEntries("TestE"))
{
var productValue = entry.Item1.AsProductValue();
var compareValue = (string)productValue.elements[1].AsString();
if (compareValue == value) {
yield return (TestE)entry.Item2;
}
}
}
public static bool ComparePrimaryKey(SpacetimeDB.SATS.AlgebraicType t, SpacetimeDB.SATS.AlgebraicValue v1, SpacetimeDB.SATS.AlgebraicValue v2)
{
var primaryColumnValue1 = v1.AsProductValue().elements[0];
var primaryColumnValue2 = v2.AsProductValue().elements[0];
return SpacetimeDB.SATS.AlgebraicValue.Compare(t.product.elements[0].algebraicType, primaryColumnValue1, primaryColumnValue2);
}
public delegate void InsertEventHandler(TestE insertedValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void UpdateEventHandler(TestE oldValue, TestE newValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void DeleteEventHandler(TestE deletedValue, SpacetimeDB.ReducerEvent dbEvent);
public delegate void RowUpdateEventHandler(NetworkManager.TableOp op, TestE oldValue, TestE newValue, SpacetimeDB.ReducerEvent dbEvent);
public static event InsertEventHandler OnInsert;
public static event UpdateEventHandler OnUpdate;
public static event DeleteEventHandler OnBeforeDelete;
public static event DeleteEventHandler OnDelete;
public static event RowUpdateEventHandler OnRowUpdate;
public static void OnInsertEvent(object newValue, ClientApi.Event dbEvent)
{
OnInsert?.Invoke((TestE)newValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnUpdateEvent(object oldValue, object newValue, ClientApi.Event dbEvent)
{
OnUpdate?.Invoke((TestE)oldValue,(TestE)newValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnBeforeDeleteEvent(object oldValue, ClientApi.Event dbEvent)
{
OnBeforeDelete?.Invoke((TestE)oldValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnDeleteEvent(object oldValue, ClientApi.Event dbEvent)
{
OnDelete?.Invoke((TestE)oldValue,dbEvent?.FunctionCall.CallInfo);
}
public static void OnRowUpdateEvent(NetworkManager.TableOp op, object oldValue, object newValue, ClientApi.Event dbEvent)
{
OnRowUpdate?.Invoke(op, (TestE)oldValue,(TestE)newValue,dbEvent?.FunctionCall.CallInfo);
}
}
}
'''
"TestReducer.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
using ClientApi;
using Newtonsoft.Json.Linq;
namespace SpacetimeDB
{
public static partial class Reducer
{
public static event Action<ClientApi.Event.Types.Status, Identity, SpacetimeDB.TestA, SpacetimeDB.TestB, SpacetimeDB.Namespace.Types.TestC> OnTestEvent;
public static void Test(SpacetimeDB.TestA arg, SpacetimeDB.TestB arg2, SpacetimeDB.Namespace.Types.TestC arg3)
{
var _argArray = new object[] {arg, arg2, new EnumWrapper<SpacetimeDB.Namespace.Types.TestC>(arg3)};
var _message = new NetworkManager.ReducerCallRequest {
fn = "test",
args = _argArray,
};
Newtonsoft.Json.JsonSerializerSettings _settings = new Newtonsoft.Json.JsonSerializerSettings
{
Converters = { new SpacetimeDB.SomeWrapperConverter(), new SpacetimeDB.EnumWrapperConverter() },
ContractResolver = new SpacetimeDB.JsonContractResolver(),
};
NetworkManager.instance.InternalCallReducer(Newtonsoft.Json.JsonConvert.SerializeObject(_message, _settings));
}
[ReducerCallback(FunctionName = "test")]
public static void OnTest(ClientApi.Event dbEvent)
{
if(OnTestEvent != null)
{
var args = dbEvent.FunctionCall.CallInfo.TestArgs;
OnTestEvent(dbEvent.Status, Identity.From(dbEvent.CallerIdentity.ToByteArray())
,(SpacetimeDB.TestA)args.Arg
,(SpacetimeDB.TestB)args.Arg2
,(SpacetimeDB.Namespace.Types.TestC)args.Arg3
);
}
}
[DeserializeEvent(FunctionName = "test")]
public static void TestDeserializeEventArgs(ClientApi.Event dbEvent)
{
var args = new TestArgsStruct();
var bsatnBytes = dbEvent.FunctionCall.ArgBytes;
using var ms = new System.IO.MemoryStream();
ms.SetLength(bsatnBytes.Length);
bsatnBytes.CopyTo(ms.GetBuffer(), 0);
ms.Position = 0;
using var reader = new System.IO.BinaryReader(ms);
var args_0_value = SpacetimeDB.SATS.AlgebraicValue.Deserialize(SpacetimeDB.TestA.GetAlgebraicType(), reader);
args.Arg = (SpacetimeDB.TestA)(args_0_value);
var args_1_value = SpacetimeDB.SATS.AlgebraicValue.Deserialize(SpacetimeDB.TestB.GetAlgebraicType(), reader);
args.Arg2 = (SpacetimeDB.TestB)(args_1_value);
var args_2_value = SpacetimeDB.SATS.AlgebraicValue.Deserialize(SpacetimeDB.Namespace.GetAlgebraicTypeForTestC(), reader);
args.Arg3 = SpacetimeDB.Namespace.IntoTestC(args_2_value);
var argsGeneric = new ReducerArgs();
argsGeneric.TestArgs = args;
dbEvent.FunctionCall.CallInfo = new ReducerEvent(ReducerType.Test, dbEvent.Message, dbEvent.Status, argsGeneric);
}
}
public struct TestArgsStruct
{
public SpacetimeDB.TestA Arg;
public SpacetimeDB.TestB Arg2;
public SpacetimeDB.Namespace.Types.TestC Arg3;
}
public partial struct ReducerArgs
{
[System.Runtime.InteropServices.FieldOffset(0)]
public TestArgsStruct TestArgs;
}
public partial class ReducerEvent
{
public TestArgsStruct TestArgs
{
get
{
if (Reducer != ReducerType.Test) throw new SpacetimeDB.ReducerMismatchException(Reducer.ToString(), "Test");
return Args.TestArgs;
}
}
}
}
'''
"UpdateReducer.cs" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
using System;
using ClientApi;
using Newtonsoft.Json.Linq;
namespace SpacetimeDB
{
public static partial class Reducer
{
public static event Action<ClientApi.Event.Types.Status, Identity> OnUpdateEvent;
public static void Update()
{
var _argArray = new object[] {};
var _message = new NetworkManager.ReducerCallRequest {
fn = "__update__",
args = _argArray,
};
Newtonsoft.Json.JsonSerializerSettings _settings = new Newtonsoft.Json.JsonSerializerSettings
{
Converters = { new SpacetimeDB.SomeWrapperConverter(), new SpacetimeDB.EnumWrapperConverter() },
ContractResolver = new SpacetimeDB.JsonContractResolver(),
};
NetworkManager.instance.InternalCallReducer(Newtonsoft.Json.JsonConvert.SerializeObject(_message, _settings));
}
[ReducerCallback(FunctionName = "__update__")]
public static void OnUpdate(ClientApi.Event dbEvent)
{
if(OnUpdateEvent != null)
{
var args = dbEvent.FunctionCall.CallInfo.UpdateArgs;
OnUpdateEvent(dbEvent.Status, Identity.From(dbEvent.CallerIdentity.ToByteArray())
);
}
}
[DeserializeEvent(FunctionName = "__update__")]
public static void UpdateDeserializeEventArgs(ClientApi.Event dbEvent)
{
var args = new UpdateArgsStruct();
var bsatnBytes = dbEvent.FunctionCall.ArgBytes;
using var ms = new System.IO.MemoryStream();
ms.SetLength(bsatnBytes.Length);
bsatnBytes.CopyTo(ms.GetBuffer(), 0);
ms.Position = 0;
using var reader = new System.IO.BinaryReader(ms);
var argsGeneric = new ReducerArgs();
argsGeneric.UpdateArgs = args;
dbEvent.FunctionCall.CallInfo = new ReducerEvent(ReducerType.Update, dbEvent.Message, dbEvent.Status, argsGeneric);
}
}
public struct UpdateArgsStruct
{
}
public partial struct ReducerArgs
{
[System.Runtime.InteropServices.FieldOffset(0)]
public UpdateArgsStruct UpdateArgs;
}
public partial class ReducerEvent
{
public UpdateArgsStruct UpdateArgs
{
get
{
if (Reducer != ReducerType.Update) throw new SpacetimeDB.ReducerMismatchException(Reducer.ToString(), "Update");
return Args.UpdateArgs;
}
}
}
}
'''
@@ -0,0 +1,584 @@
---
source: crates/cli/tests/codegen.rs
expression: outfiles
---
"add_player_reducer.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export class AddPlayerReducer
{
public static call(name: string)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.call("add_player", [name]);
}
}
public static deserializeArgs(rawArgs: any[]): any[] {
let nameType = AlgebraicType.createPrimitiveType(BuiltinType.Type.String);
let nameValue = AlgebraicValue.deserialize(nameType, rawArgs[0])
let name = nameValue.asString();
return [name];
}
public static on(callback: (status: string, identity: string, reducerArgs: any[]) => void)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.on("reducer:AddPlayer", callback);
}
}
}
__SPACETIMEDB__.reducers.set("AddPlayer", AddPlayerReducer);
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.registerReducer("AddPlayer", AddPlayerReducer);
}
export default AddPlayerReducer
'''
"namespace_test_c.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, SumTypeVariant, BuiltinType, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export namespace Namespace.testC {
export function getAlgebraicType(): AlgebraicType {
return AlgebraicType.createSumType([
new SumTypeVariant("Foo", AlgebraicType.createProductType([
])),
new SumTypeVariant("Bar", AlgebraicType.createProductType([
])),
]);
}
export function serialize(value: Namespace.testC): object {
const result: {[key: string]: any} = {};
result[value.tag] = [];
return result;
}
export type Foo = { tag: "Foo"; value: undefined };
export type Bar = { tag: "Bar"; value: undefined };
export function fromValue(value: AlgebraicValue): Namespace.testC {
let sumValue = value.asSumValue();
let tag = sumValue.tag;
let variant = Namespace.testC.getAlgebraicType().sum.variants[tag];
return { tag: variant.name, value: undefined } as Namespace.testC;
}
}
export type Namespace.testC = Namespace.testC.Foo | Namespace.testC.Bar;
export default Namespace.testC;
'''
"repeating_test_reducer.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export class RepeatingTestReducer
{
public static call(prevTime: number)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.call("repeating_test", [prevTime]);
}
}
public static deserializeArgs(rawArgs: any[]): any[] {
let prevTimeType = AlgebraicType.createPrimitiveType(BuiltinType.Type.U64);
let prevTimeValue = AlgebraicValue.deserialize(prevTimeType, rawArgs[0])
let prevTime = prevTimeValue.asNumber();
return [prevTime];
}
public static on(callback: (status: string, identity: string, reducerArgs: any[]) => void)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.on("reducer:RepeatingTest", callback);
}
}
}
__SPACETIMEDB__.reducers.set("RepeatingTest", RepeatingTestReducer);
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.registerReducer("RepeatingTest", RepeatingTestReducer);
}
export default RepeatingTestReducer
'''
"test_a.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, SumType, SumTypeVariant, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export class TestA extends IDatabaseTable
{
public static tableName = "TestA";
public x: number;
public y: number;
public z: string;
constructor(x: number, y: number, z: string) {
super();
this.x = x;
this.y = y;
this.z = z;
}
public static serialize(value: TestA): object {
return [
value.x, value.y, value.z
];
}
public static getAlgebraicType(): AlgebraicType
{
return AlgebraicType.createProductType([
new ProductTypeElement("x", AlgebraicType.createPrimitiveType(BuiltinType.Type.U32)),
new ProductTypeElement("y", AlgebraicType.createPrimitiveType(BuiltinType.Type.U32)),
new ProductTypeElement("z", AlgebraicType.createPrimitiveType(BuiltinType.Type.String)),
]);
}
public static fromValue(value: AlgebraicValue): TestA
{
let productValue = value.asProductValue();
let __x = productValue.elements[0].asNumber();
let __y = productValue.elements[1].asNumber();
let __z = productValue.elements[2].asString();
return new this(__x, __y, __z);
}
public static count(): number
{
return __SPACETIMEDB__.clientDB.getTable("TestA").count();
}
public static all(): TestA[]
{
return __SPACETIMEDB__.clientDB.getTable("TestA").getInstances() as unknown as TestA[];
}
public static filterByX(value: number): TestA[] | null
{
let result: TestA[] = [];
for(let entry of __SPACETIMEDB__.clientDB.getTable("TestA").getEntries())
{
var productValue = entry.asProductValue();
let compareValue = productValue.elements[0].asNumber() as number;
if (compareValue == value) {
result.push(TestA.fromValue(entry));
}
}
return result;
}
public static filterByY(value: number): TestA[] | null
{
let result: TestA[] = [];
for(let entry of __SPACETIMEDB__.clientDB.getTable("TestA").getEntries())
{
var productValue = entry.asProductValue();
let compareValue = productValue.elements[1].asNumber() as number;
if (compareValue == value) {
result.push(TestA.fromValue(entry));
}
}
return result;
}
public static filterByZ(value: string): TestA[] | null
{
let result: TestA[] = [];
for(let entry of __SPACETIMEDB__.clientDB.getTable("TestA").getEntries())
{
var productValue = entry.asProductValue();
let compareValue = productValue.elements[2].asString() as string;
if (compareValue == value) {
result.push(TestA.fromValue(entry));
}
}
return result;
}
public static onInsert(callback: (value: TestA) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestA").onInsert(callback);
}
public static onUpdate(callback: (oldValue: TestA, newValue: TestA) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestA").onUpdate(callback);
}
public static onDelete(callback: (value: TestA) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestA").onDelete(callback);
}
public static removeOnInsert(callback: (value: TestA) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestA").removeOnInsert(callback);
}
public static removeOnUpdate(callback: (oldValue: TestA, newValue: TestA) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestA").removeOnUpdate(callback);
}
public static removeOnDelete(callback: (value: TestA) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestA").removeOnDelete(callback);
}
}
export default TestA;
__SPACETIMEDB__.registerComponent("TestA", TestA);
'''
"test_b.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, SumType, SumTypeVariant, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export class TestB extends IDatabaseTable
{
public static tableName = "TestB";
public foo: string;
constructor(foo: string) {
super();
this.foo = foo;
}
public static serialize(value: TestB): object {
return [
value.foo
];
}
public static getAlgebraicType(): AlgebraicType
{
return AlgebraicType.createProductType([
new ProductTypeElement("foo", AlgebraicType.createPrimitiveType(BuiltinType.Type.String)),
]);
}
public static fromValue(value: AlgebraicValue): TestB
{
let productValue = value.asProductValue();
let __foo = productValue.elements[0].asString();
return new this(__foo);
}
}
export default TestB;
__SPACETIMEDB__.registerComponent("TestB", TestB);
'''
"test_d.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, SumType, SumTypeVariant, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
// @ts-ignore
import { Namespace.TestC } from "./namespace.test_c";
export class TestD extends IDatabaseTable
{
public static tableName = "TestD";
public testC: Namespace.TestC | null;
constructor(testC: Namespace.TestC | null) {
super();
this.testC = testC;
}
public static serialize(value: TestD): object {
return [
value.testC ? { "some": Namespace.TestC.serialize(value.testC) } : { "none": [] }
];
}
public static getAlgebraicType(): AlgebraicType
{
return AlgebraicType.createProductType([
new ProductTypeElement("test_c", AlgebraicType.createSumType([
new SumTypeVariant("some", Namespace.TestC.getAlgebraicType()),
new SumTypeVariant("none", AlgebraicType.createProductType([
])),
])),
]);
}
public static fromValue(value: AlgebraicValue): TestD
{
let productValue = value.asProductValue();
let __test_c = function() { const value = productValue.elements[0].asSumValue().tag == 1 ? null : productValue.elements[0].asSumValue().value; return value ? Namespace.TestC.fromValue(value) : null; }();
return new this(__test_c);
}
public static count(): number
{
return __SPACETIMEDB__.clientDB.getTable("TestD").count();
}
public static all(): TestD[]
{
return __SPACETIMEDB__.clientDB.getTable("TestD").getInstances() as unknown as TestD[];
}
public static onInsert(callback: (value: TestD) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestD").onInsert(callback);
}
public static onUpdate(callback: (oldValue: TestD, newValue: TestD) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestD").onUpdate(callback);
}
public static onDelete(callback: (value: TestD) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestD").onDelete(callback);
}
public static removeOnInsert(callback: (value: TestD) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestD").removeOnInsert(callback);
}
public static removeOnUpdate(callback: (oldValue: TestD, newValue: TestD) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestD").removeOnUpdate(callback);
}
public static removeOnDelete(callback: (value: TestD) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestD").removeOnDelete(callback);
}
}
export default TestD;
__SPACETIMEDB__.registerComponent("TestD", TestD);
'''
"test_e.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, SumType, SumTypeVariant, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export class TestE extends IDatabaseTable
{
public static tableName = "TestE";
public id: number;
public name: string;
constructor(id: number, name: string) {
super();
this.id = id;
this.name = name;
}
public static serialize(value: TestE): object {
return [
value.id, value.name
];
}
public static getAlgebraicType(): AlgebraicType
{
return AlgebraicType.createProductType([
new ProductTypeElement("id", AlgebraicType.createPrimitiveType(BuiltinType.Type.U64)),
new ProductTypeElement("name", AlgebraicType.createPrimitiveType(BuiltinType.Type.String)),
]);
}
public static fromValue(value: AlgebraicValue): TestE
{
let productValue = value.asProductValue();
let __id = productValue.elements[0].asNumber();
let __name = productValue.elements[1].asString();
return new this(__id, __name);
}
public static count(): number
{
return __SPACETIMEDB__.clientDB.getTable("TestE").count();
}
public static all(): TestE[]
{
return __SPACETIMEDB__.clientDB.getTable("TestE").getInstances() as unknown as TestE[];
}
public static filterById(value: number): TestE | null
{
for(let entry of __SPACETIMEDB__.clientDB.getTable("TestE").getEntries())
{
var productValue = entry.asProductValue();
let compareValue = productValue.elements[0].asNumber() as number;
if (compareValue == value) {
return TestE.fromValue(entry);
}
}
return null;
}
public static filterByName(value: string): TestE[] | null
{
let result: TestE[] = [];
for(let entry of __SPACETIMEDB__.clientDB.getTable("TestE").getEntries())
{
var productValue = entry.asProductValue();
let compareValue = productValue.elements[1].asString() as string;
if (compareValue == value) {
result.push(TestE.fromValue(entry));
}
}
return result;
}
public static onInsert(callback: (value: TestE) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestE").onInsert(callback);
}
public static onUpdate(callback: (oldValue: TestE, newValue: TestE) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestE").onUpdate(callback);
}
public static onDelete(callback: (value: TestE) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestE").onDelete(callback);
}
public static removeOnInsert(callback: (value: TestE) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestE").removeOnInsert(callback);
}
public static removeOnUpdate(callback: (oldValue: TestE, newValue: TestE) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestE").removeOnUpdate(callback);
}
public static removeOnDelete(callback: (value: TestE) => void)
{
__SPACETIMEDB__.clientDB.getTable("TestE").removeOnDelete(callback);
}
}
export default TestE;
__SPACETIMEDB__.registerComponent("TestE", TestE);
'''
"test_reducer.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
// @ts-ignore
import { TestA } from "./test_a";
// @ts-ignore
import { TestB } from "./test_b";
// @ts-ignore
import { Namespace.TestC } from "./namespace.test_c";
export class TestReducer
{
public static call(arg: TestA, arg2: TestB, arg3: Namespace.TestC)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.call("test", [TestA.serialize(arg), TestB.serialize(arg2), Namespace.TestC.serialize(arg3)]);
}
}
public static deserializeArgs(rawArgs: any[]): any[] {
let argType = TestA.getAlgebraicType();
let argValue = AlgebraicValue.deserialize(argType, rawArgs[0])
let arg = TestA.fromValue(argValue);
let arg2Type = TestB.getAlgebraicType();
let arg2Value = AlgebraicValue.deserialize(arg2Type, rawArgs[1])
let arg2 = TestB.fromValue(arg2Value);
let arg3Type = Namespace.TestC.getAlgebraicType();
let arg3Value = AlgebraicValue.deserialize(arg3Type, rawArgs[2])
let arg3 = Namespace.TestC.fromValue(arg3Value);
return [arg, arg2, arg3];
}
public static on(callback: (status: string, identity: string, reducerArgs: any[]) => void)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.on("reducer:Test", callback);
}
}
}
__SPACETIMEDB__.reducers.set("Test", TestReducer);
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.registerReducer("Test", TestReducer);
}
export default TestReducer
'''
"update_reducer.ts" = '''
// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE
// WILL NOT BE SAVED. MODIFY TABLES IN RUST INSTEAD.
// @ts-ignore
import { __SPACETIMEDB__, AlgebraicType, ProductType, BuiltinType, ProductTypeElement, IDatabaseTable, AlgebraicValue } from "@clockworklabs/spacetimedb-sdk";
export class UpdateReducer
{
public static call()
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.call("__update__", []);
}
}
public static deserializeArgs(): any[] {
return [];
}
public static on(callback: (status: string, identity: string, reducerArgs: any[]) => void)
{
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.on("reducer:Update", callback);
}
}
}
__SPACETIMEDB__.reducers.set("Update", UpdateReducer);
if (__SPACETIMEDB__.spacetimeDBClient) {
__SPACETIMEDB__.spacetimeDBClient.registerReducer("Update", UpdateReducer);
}
export default UpdateReducer
'''
@@ -0,0 +1,177 @@
%YAML 1.2
---
name: SQL (SpaceTimeDb)
file_extensions:
- sql
- stsql
scope: source.sql
variables:
ws: '[ \t]*'
wsnl: '([ \t\n])*'
int_literal: '[0-9](?:[0-9_])*'
# 00-23
time_hour: '[0-2][0-9]'
# 00-59
time_minute: '[0-5][0-9]'
# 00-58, 00-59, 00-60 (leap second rules)
time_second: '[0-6][0-9]'
# ( "+" / "-" ) time-hour ":" time-minute
time_numoffset: '[+-] {{time_hour}} : {{time_minute}}'
# time-hour ":" time-minute ":" time-second
partial_time: '{{time_hour}} : {{time_minute}} : {{time_second}}'
# partial-time time-offset
full_time: '{{partial_time}} {{time_numoffset}}'
# 2000
date_fullyear: '[0-9]{4}'
# 01-12
date_month: '[0-1][0-9]'
# 01-28, 01-29, 01-30, 01-31 based on month/year
date_mday: '[0-3][0-9]'
# date-fullyear "-" date-month "-" date-mday
full_date: '{{date_fullyear}} - {{date_month}} - {{date_mday}}'
# full-date T|%20 full-time
offset_date_time: '{{full_date}} [T ] {{full_time}}'
# full-date T|%20 partial-time
local_date_time: '{{full_date}} [T ] {{partial_time}}'
date_time: '{{offset_date_time}} | {{local_date_time}} | {{full_date}} | {{partial_time}}'
contexts:
# The prototype context is prepended to all contexts but those setting
# meta_include_prototype: false.
prototype:
- include: comments
main:
# The main context is the initial starting point of our syntax.
# Include other contexts from here (or specify them directly).
- include: keywords
- include: parens
- include: booleans
- include: numbers
- include: date-time
- include: strings
- include: ident
- match: '{{ws}}$'
# Don't show an incomplete line as invalid to avoid frequent red
# highlighting while typing.
pop: true
- match: '\w+|.'
scope: invalid.illegal.value.sql
pop: true
ident:
- name: variable.parameter.sql
match: \b([a-zA-Z0-9_]+)\b
keywords:
- name: keyword.operator.point.pgsql
match: \.
- name: keyword.operator.comma.pgsql
match: \,
- name: keyword.operator.semicolon.pgsql
match: \;
- name: keyword.operator.star.pgsql
match: \*
- match: '(?i)\b(select|from|insert|into|join|values|update|delete|create|where|order by)\b'
scope: keyword.control.sql
- match: '[!<>]?=|<>|<|>'
scope: keyword.operator.comparison.sql
- match: \+|\-|\*|/|\^
scope: keyword.operator.arithmetic.sql
- match: \b(and|in|not|or)\b
comment: keyword operators that evaluate to true or false
scope: keyword.operator.logical.sql
booleans:
- match: (?i)\b(true|false|null)\b
scope: constant.language.source.sql
numbers:
# Binary Float
- match: '\b({{int_literal}}(?:\.{{int_literal}})?)f\b'
scope: constant.numeric.source.sql
- match: '\b({{int_literal}}(?:\.{{int_literal}})?)\b'
scope: constant.numeric.source.sql
strings:
# Strings begin and end with quotes, and use backslashes as an escape
# character
- match: '"'
scope: punctuation.definition.string.begin.source.sql
push: double_quoted_string
- match: "'"
scope: punctuation.definition.string.begin.source.sql
push: single_quoted_string
double_quoted_string:
- meta_scope: string.quoted.double.source.sql
- match: '\\.'
scope: constant.character.escape.source.sql
- match: '"'
scope: punctuation.definition.string.end.source.sql
pop: true
single_quoted_string:
- meta_scope: string.quoted.double.source.sql
- match: '\\.'
scope: constant.character.escape.source.sql
- match: "'"
scope: punctuation.definition.string.end.source.sql
pop: true
date-time:
- match: "(d|t|dt)'"
scope: constant.other.datetime.begin.source.sql
push: single_quoted_date
- match: '(d|t|dt)"'
scope: constant.other.datetime.begin.source.sql
push: double_quoted_date
double_quoted_date:
- meta_scope: string.quoted.double.source.sql
- match: '(?x) {{date_time}}'
scope: constant.character.escape.source.sql
- match: '"'
scope: constant.other.datetime.end.source.sql
pop: true
single_quoted_date:
- meta_scope: string.quoted.double.source.sql
- match: '(?x) {{date_time}}'
scope: constant.character.escape.source.sql
- match: "'"
scope: constant.other.datetime.end.source.sql
pop: true
parens:
- match: \(
push: brackets
- match: \)
scope: invalid.illegal.stray-bracket-end
brackets:
- match: \)
pop: true
- include: parens
comments:
# Comments begin with a '--' and finish at the end of the line.
- match: '--'
scope: punctuation.definition.comment.source.sql
push:
# This is an anonymous context push for brevity.
- meta_scope: comment.line.double-slash.source.sql
- match: $\n?
pop: true
+11
View File
@@ -0,0 +1,11 @@
[package]
name = "spacetimedb-client-api-messages"
version = "0.1.0"
edition = "2021"
[dependencies]
strum = { version = "0.24.1", features = ["derive"] }
prost = "0.10"
[build-dependencies]
prost-build = { version = "0.10" }
+23
View File
@@ -0,0 +1,23 @@
use std::fs;
fn main() {
let proto_dir = "protobuf";
println!("cargo:rerun-if-changed={proto_dir}");
let protos = fs::read_dir(proto_dir)
.unwrap()
.map(|e| e.unwrap().path())
.filter(|p| p.extension() == Some("proto".as_ref()))
.collect::<Vec<_>>();
let includes = &[proto_dir];
prost_build::Config::new()
.btree_map(["."])
.include_file("protobuf.rs")
.type_attribute(
".control_db.HostType",
r#"#[derive(strum::EnumString, strum::AsRefStr)] #[strum(serialize_all = "lowercase")]"#,
)
.compile_protos(&protos, includes)
.unwrap();
}
@@ -0,0 +1,204 @@
syntax = "proto3";
package client_api;
////// Generic Message //////
// TODO: Theoretically this format could be replaced by AlgebraicValue/AlgebraicType
// but I don't think we want to do that yet.
// TODO: Split this up into ServerBound and ClientBound if there's no overlap
message Message {
oneof type {
// client -> database, request a reducer run.
FunctionCall functionCall = 1;
// database -> client, contained in `TransactionUpdate`, informs of changes to
// subscribed rows.
SubscriptionUpdate subscriptionUpdate = 2;
// database -> client, contained in `TransactionUpdate`, describes a reducer run.
Event event = 3;
// database -> client, upon reducer run.
TransactionUpdate transactionUpdate = 4;
// database -> client, after connecting, to inform client of its identity.
IdentityToken identityToken = 5;
// client -> database, register SQL queries on which to receive updates.
Subscribe subscribe = 6;
}
}
/// Received by database from client to inform of user's identity and token.
///
/// Do you receive this if you provide a token when connecting, or only if you connect
/// anonymously? Find out and document - pgoldman 2023-06-06.
message IdentityToken {
bytes identity = 1;
string token = 2;
}
// TODO: Evaluate if it makes sense for this to also include the
// address of the database this is calling
/// Sent by client to database to request a reducer runs.
///
/// `reducer` is the string name of a reducer to run.
///
/// `argBytes` is the arguments to the reducer, encoded as BSATN. (Possibly as SATN if
/// you're in the text API? Find out and document - pgoldman 2023-06-05)
///
/// SpacetimeDB models reducers as taking a single `AlgebraicValue` as an argument, which
/// generally will be a `ProductValue` containing all of the args (except the
/// `ReducerContext`, which is injected by the host, not provided in this API).
///
/// I don't think clients will ever receive a `FunctionCall` from the database, except
/// wrapped in an `Event` - pgoldman 2023-06-05.
message FunctionCall {
// TODO: Maybe this should be replaced with an int identifier for performance?
string reducer = 1;
bytes argBytes = 2;
}
/// Sent by client to database to register a set of queries, about which the client will
/// receive `TransactionUpdate`s.
///
/// `query_strings` is a sequence of strings, each of which is a SQL query.
///
/// After issuing a `Subscribe` message, the client will receive a single
/// `SubscriptionUpdate` message containing every current row of every table which matches
/// the subscribed queries. Then, after each reducer run which updates one or more
/// subscribed rows, the client will receive a `TransactionUpdate` containing the updates.
///
/// A `Subscribe` message sets or replaces the entire set of queries to which the client
/// is subscribed. If the client is previously subscribed to some set of queries `A`, and
/// then sends a `Subscribe` message to subscribe to a set `B`, afterwards, the client
/// will be subscribed to `B` but not `A`. In this case, the client will receive a
/// `SubscriptionUpdate` containing every existing row that matches `B`, even if some were
/// already in `A`.
///
/// I don't think clients will ever receive a `Subscribe` from the database - pgoldman
/// 2023-06-05.
message Subscribe {
repeated string query_strings = 1;
}
/// Part of a `TransactionUpdate` received by client from database upon a reducer run.
///
/// `timestamp` is the time when the reducer ran (started? finished? Find out and document
/// - pgoldman 2023-06-05), as microseconds since the Unix epoch.
///
/// `callerIdentity` is the identity token of the user who requested the reducer
/// run. (What if it's run by the database without a client request? Is
/// `callerIdentity` empty? Find out and document - pgoldman 2023-06-05).
///
/// `functionCall` contains the name of the reducer which ran and the arguments it
/// received.
///
/// `status` of `committed` means that the reducer ran successfully and its changes were
/// committed to the database. The rows altered in the database
/// will be recorded in the parent `TransactionUpdate`'s
/// `SubscriptionUpdate`.
///
/// `status` of `failed` means that the reducer panicked, and any changes it attempted to
/// make were rolled back.
///
/// `status` of `failed` means that the reducer was interrupted due to insufficient
/// energy/funds, and any changes it attempted to make were rolled
/// back. (Verify this - pgoldman 2023-06-05).
///
/// `message` what does it do? Find out and document - pgoldman 2023-06-05.
///
/// `energy_quanta_used` and `host_execution_duration_micros` seem self-explanatory; they
/// describe the amount of energy credits consumed by running the reducer, and how long it
/// took to run.
///
/// Do clients receive `TransactionUpdate`s / `Event`s for reducer runs which don't touch
/// any of the client's subscribed rows? Find out and document - pgoldman 2023-06-05.
///
/// Will a client ever receive an `Event` not wrapped in a `TransactionUpdate`? Possibly
/// when `status = failed` or `status = out_of_energy`? Find out and document - pgoldman
/// 2023-06-05.
message Event {
enum Status {
committed = 0;
failed = 1;
out_of_energy = 2;
}
uint64 timestamp = 1;
bytes callerIdentity = 2;
FunctionCall functionCall = 3;
// TODO: arguably these should go inside an EventStatus message
// since success doesn't have a message
Status status = 4;
string message = 5;
int64 energy_quanta_used = 6;
uint64 host_execution_duration_micros = 7;
}
// TODO: Maybe call this StateUpdate if it's implied to be a subscription update
/// Part of a `TransactionUpdate` received by client from database when subscribed rows in
/// a table are altered, or received alone after a `Subscription` to initialize the
/// client's mirror of the database.
///
/// A single `SubscriptionUpdate` may contain `TableUpdate` messages for multiple
/// tables.
message SubscriptionUpdate {
repeated TableUpdate tableUpdates = 1;
}
/// Part of a `SubscriptionUpdate` received by client from database for alterations to a
/// single table.
///
/// `tableId` and `tableName` identify the table. Clients should use the `tableName`, as
/// it is a stable part of a module's API, whereas `tableId` may
/// or may not change between runs.
///
/// `tableRowOperations` are actual modified rows.
///
/// Can a client send `TableUpdate`s to the database to alter the database? I don't think
/// so, but would be good to know for sure - pgoldman 2023-06-05.
message TableUpdate {
uint32 tableId = 1;
string tableName = 2;
repeated TableRowOperation tableRowOperations = 3;
}
/// Part of a `TableUpdate` received by client from database for alteration to a single
/// row of a table.
///
/// The table being altered is identified by the parent `TableUpdate`.
///
/// `op` of `DELETE` means that the row in question has been removed and is no longer
/// resident in the table.
///
/// `op` of `INSERT` means that the row in question has been either newly inserted or
/// updated, and is resident in the table.
///
/// `row_pk` is a hash of the row computed by the database. As of 2023-06-13, even for
/// tables with a `#[primarykey]` annotation on one column, the `row_pk` is not
/// that primary key.
///
/// `row` is the row itself, encoded as BSATN (or possibly SATN for the text api? Find out
/// and document - pgoldman 2023-06-05).
message TableRowOperation {
enum OperationType {
DELETE = 0;
INSERT = 1;
}
OperationType op = 1;
bytes row_pk = 2;
bytes row = 3;
}
/// Received by client from database upon a reducer run.
///
/// Do clients receive `TransactionUpdate`s for reducer runs which do not alter any of the
/// client's subscribed rows? Find out and document - pgoldman 2023-06-05.
///
/// `event` contains information about the reducer.
///
/// `subscriptionUpdate` contains changes to subscribed rows.
message TransactionUpdate {
Event event = 1;
SubscriptionUpdate subscriptionUpdate = 2;
}
+1
View File
@@ -0,0 +1 @@
include!(concat!(env!("OUT_DIR"), "/protobuf.rs"));
+34
View File
@@ -0,0 +1,34 @@
[package]
name = "spacetimedb-client-api"
version = "0.4.1"
edition = "2021"
publish = false
[features]
tracelogging = ["spacetimedb-core/tracelogging"]
[dependencies]
spacetimedb-core = { path = "../core" }
tokio = { version = "1.2", features = ["full"] }
lazy_static = "1.4.0"
spacetimedb-lib = { path = "../lib" }
log = "0.4.4"
serde = "1.0.136"
serde_json = { version = "1.0", features = ["raw_value"] }
anyhow = { version = "1.0.57", features = ["backtrace"] }
regex = "1"
prometheus = "0.13.0"
email_address = "0.2.3"
tempdir = "0.3.7"
async-trait = "0.1.60"
chrono = { version = "0.4.23", features = ["serde"]}
rand = "0.8.5"
axum = { version = "0.6.16", features = ["headers", "tracing"] }
hyper = "0.14"
http = "0.2"
mime = "0.3.17"
tokio-stream = { version = "0.1.12", features = ["sync"] }
futures = "0.3"
bytes = "1"
bytestring = "1"
tokio-tungstenite = "0.18.0"
+189
View File
@@ -0,0 +1,189 @@
use std::time::Duration;
use axum::extract::rejection::TypedHeaderRejectionReason;
use axum::headers::{self, authorization};
use axum::response::IntoResponse;
use axum::TypedHeader;
use http::{request, HeaderValue, StatusCode};
use spacetimedb::auth::identity::{
decode_token, encode_token, DecodingKey, EncodingKey, JwtError, SpacetimeIdentityClaims,
};
use spacetimedb::host::EnergyDiff;
use spacetimedb::identity::Identity;
use crate::{log_and_500, ControlNodeDelegate};
// Yes, this is using basic auth. See the below issues.
// The current form is: Authorization: Basic base64("token:<token>")
// FOOLS, the lot of them!
// If/when they fix this issue, this should be changed from
// basic auth, to a `Authorization: Bearer <token>` header
// https://github.com/whatwg/websockets/issues/16
// https://github.com/sta/websocket-sharp/pull/22
pub struct SpacetimeCreds(authorization::Basic);
const TOKEN_USERNAME: &str = "token";
impl authorization::Credentials for SpacetimeCreds {
const SCHEME: &'static str = authorization::Basic::SCHEME;
fn decode(value: &HeaderValue) -> Option<Self> {
let basic = authorization::Basic::decode(value)?;
if basic.username() != TOKEN_USERNAME {
return None;
}
Some(Self(basic))
}
fn encode(&self) -> HeaderValue {
self.0.encode()
}
}
impl SpacetimeCreds {
pub fn token(&self) -> &str {
self.0.password()
}
pub fn decode_token(&self, public_key: &DecodingKey) -> Result<SpacetimeIdentityClaims, JwtError> {
decode_token(public_key, self.token()).map(|x| x.claims)
}
pub fn encode_token(private_key: &EncodingKey, identity: Identity) -> Result<Self, JwtError> {
let token = encode_token(private_key, identity)?;
let headers::Authorization(basic) = headers::Authorization::basic(TOKEN_USERNAME, &token);
Ok(Self(basic))
}
}
pub struct SpacetimeAuth {
pub creds: SpacetimeCreds,
pub identity: Identity,
}
pub struct SpacetimeAuthHeader {
auth: Option<SpacetimeAuth>,
}
#[async_trait::async_trait]
impl<S: ControlNodeDelegate + Send + Sync> axum::extract::FromRequestParts<S> for SpacetimeAuthHeader {
type Rejection = AuthorizationRejection;
async fn from_request_parts(parts: &mut request::Parts, state: &S) -> Result<Self, Self::Rejection> {
match axum::TypedHeader::from_request_parts(parts, state).await {
Ok(axum::TypedHeader(headers::Authorization(creds @ SpacetimeCreds { .. }))) => {
let claims = creds
.decode_token(state.public_key())
.map_err(|_| AuthorizationRejection)?;
let auth = SpacetimeAuth {
creds,
identity: claims.hex_identity,
};
Ok(Self { auth: Some(auth) })
}
Err(e) => match e.reason() {
TypedHeaderRejectionReason::Missing => Ok(Self { auth: None }),
_ => Err(AuthorizationRejection),
},
}
}
}
pub struct AuthorizationRejection;
impl IntoResponse for AuthorizationRejection {
fn into_response(self) -> axum::response::Response {
(StatusCode::BAD_REQUEST, "Authorization is invalid - malformed token.").into_response()
}
}
impl SpacetimeAuth {
pub async fn alloc(ctx: &(impl ControlNodeDelegate + ?Sized)) -> axum::response::Result<Self> {
let identity = ctx.alloc_spacetime_identity().await.map_err(log_and_500)?;
let creds = SpacetimeCreds::encode_token(ctx.private_key(), identity).map_err(log_and_500)?;
Ok(Self { creds, identity })
}
pub fn into_headers(self) -> (TypedHeader<SpacetimeIdentity>, TypedHeader<SpacetimeIdentityToken>) {
let Self { creds, identity } = self;
(
TypedHeader(SpacetimeIdentity(identity)),
TypedHeader(SpacetimeIdentityToken(creds)),
)
}
}
impl SpacetimeAuthHeader {
pub fn get(self) -> Option<SpacetimeAuth> {
self.auth
}
/// Given an authorization header we will try to get the identity and token from the auth header (as JWT).
/// If there is no JWT in the auth header we will create a new identity and token and return it.
pub async fn get_or_create(
self,
ctx: &(impl ControlNodeDelegate + ?Sized),
) -> axum::response::Result<SpacetimeAuth> {
match self.get() {
Some(auth) => Ok(auth),
None => SpacetimeAuth::alloc(ctx).await,
}
}
}
pub struct SpacetimeIdentity(pub Identity);
impl headers::Header for SpacetimeIdentity {
fn name() -> &'static http::HeaderName {
static NAME: http::HeaderName = http::HeaderName::from_static("spacetime-identity");
&NAME
}
fn decode<'i, I: Iterator<Item = &'i HeaderValue>>(_values: &mut I) -> Result<Self, headers::Error> {
unimplemented!()
}
fn encode<E: Extend<HeaderValue>>(&self, values: &mut E) {
values.extend([self.0.to_hex().try_into().unwrap()])
}
}
pub struct SpacetimeIdentityToken(pub SpacetimeCreds);
impl headers::Header for SpacetimeIdentityToken {
fn name() -> &'static http::HeaderName {
static NAME: http::HeaderName = http::HeaderName::from_static("spacetime-identity-token");
&NAME
}
fn decode<'i, I: Iterator<Item = &'i HeaderValue>>(_values: &mut I) -> Result<Self, headers::Error> {
unimplemented!()
}
fn encode<E: Extend<HeaderValue>>(&self, values: &mut E) {
values.extend([self.0.token().try_into().unwrap()])
}
}
pub struct SpacetimeEnergyUsed(pub EnergyDiff);
impl headers::Header for SpacetimeEnergyUsed {
fn name() -> &'static http::HeaderName {
static NAME: http::HeaderName = http::HeaderName::from_static("spacetime-energy-used");
&NAME
}
fn decode<'i, I: Iterator<Item = &'i HeaderValue>>(_values: &mut I) -> Result<Self, headers::Error> {
unimplemented!()
}
fn encode<E: Extend<HeaderValue>>(&self, values: &mut E) {
values.extend([self.0 .0.into()])
}
}
pub struct SpacetimeExecutionDurationMicros(pub Duration);
impl headers::Header for SpacetimeExecutionDurationMicros {
fn name() -> &'static http::HeaderName {
static NAME: http::HeaderName = http::HeaderName::from_static("spacetime-execution-duration-micros");
&NAME
}
fn decode<'i, I: Iterator<Item = &'i HeaderValue>>(_values: &mut I) -> Result<Self, headers::Error> {
unimplemented!()
}
fn encode<E: Extend<HeaderValue>>(&self, values: &mut E) {
values.extend([(self.0.as_micros() as u64).into()])
}
}
+155
View File
@@ -0,0 +1,155 @@
use async_trait::async_trait;
use axum::extract::FromRef;
use http::StatusCode;
use spacetimedb::address::Address;
use spacetimedb::auth::identity::{DecodingKey, EncodingKey};
use spacetimedb::client::ClientActorIndex;
use spacetimedb::control_db::ControlDb;
use spacetimedb::database_instance_context_controller::DatabaseInstanceContextController;
use spacetimedb::hash::Hash;
use spacetimedb::host::HostController;
use spacetimedb::host::UpdateDatabaseResult;
use spacetimedb::identity::Identity;
use spacetimedb::messages::control_db::{Database, DatabaseInstance, HostType, Node};
use spacetimedb::messages::worker_db::DatabaseInstanceState;
use spacetimedb::module_host_context::ModuleHostContext;
use spacetimedb::object_db::ObjectDb;
use spacetimedb::sendgrid_controller::SendGridController;
use spacetimedb_lib::name::DomainName;
mod auth;
pub mod routes;
pub mod util;
use std::sync::Arc;
#[async_trait]
pub trait WorkerCtx: ControlNodeDelegate + ControlStateDelegate + Send + Sync {
fn gather_metrics(&self) -> Vec<prometheus::proto::MetricFamily>;
fn database_instance_context_controller(&self) -> &DatabaseInstanceContextController;
async fn load_module_host_context(&self, db: Database, instance_id: u64) -> anyhow::Result<ModuleHostContext>;
fn host_controller(&self) -> &Arc<HostController>;
fn client_actor_index(&self) -> &ClientActorIndex;
}
#[async_trait]
pub trait ControlStateDelegate: Send + Sync {
async fn get_node_id(&self) -> Result<Option<u64>, anyhow::Error>;
async fn get_node_by_id(&self, node_id: u64) -> spacetimedb::control_db::Result<Option<Node>>;
async fn get_nodes(&self) -> spacetimedb::control_db::Result<Vec<Node>>;
async fn get_database_instance_state(
&self,
database_instance_id: u64,
) -> Result<Option<DatabaseInstanceState>, anyhow::Error>;
async fn get_database_by_id(&self, id: u64) -> spacetimedb::control_db::Result<Option<Database>>;
async fn get_database_by_address(&self, address: &Address) -> spacetimedb::control_db::Result<Option<Database>>;
async fn get_databases(&self) -> spacetimedb::control_db::Result<Vec<Database>>;
async fn get_database_instance_by_id(&self, id: u64) -> spacetimedb::control_db::Result<Option<DatabaseInstance>>;
async fn get_database_instances(&self) -> spacetimedb::control_db::Result<Vec<DatabaseInstance>>;
async fn get_leader_database_instance_by_database(&self, database_id: u64) -> Option<DatabaseInstance>;
}
#[async_trait]
pub trait ControlCtx: ControlNodeDelegate + Send + Sync {
#[allow(clippy::too_many_arguments)]
async fn insert_database(
&self,
address: &Address,
identity: &Identity,
program_bytes_address: &Hash,
host_type: HostType,
num_replicas: u32,
force: bool,
trace_log: bool,
) -> Result<(), anyhow::Error>;
async fn update_database(
&self,
address: &Address,
program_bytes_address: &Hash,
num_replicas: u32,
) -> Result<Option<UpdateDatabaseResult>, anyhow::Error>;
async fn delete_database(&self, address: &Address) -> Result<(), anyhow::Error>;
fn object_db(&self) -> &ObjectDb;
fn control_db(&self) -> &ControlDb;
fn sendgrid_controller(&self) -> Option<&SendGridController>;
}
#[async_trait]
pub trait ControlNodeDelegate: Send + Sync {
async fn spacetime_dns(&self, domain: &DomainName) -> spacetimedb::control_db::Result<Option<Address>>;
async fn alloc_spacetime_identity(&self) -> spacetimedb::control_db::Result<Identity>;
fn public_key(&self) -> &DecodingKey;
fn private_key(&self) -> &EncodingKey;
}
pub struct ArcEnv<T: ?Sized>(pub Arc<T>);
impl<T: ?Sized> Clone for ArcEnv<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: ControlCtx + 'static> FromRef<ArcEnv<T>> for Arc<dyn ControlCtx> {
fn from_ref(env: &ArcEnv<T>) -> Self {
env.0.clone()
}
}
impl<T: WorkerCtx + 'static> FromRef<ArcEnv<T>> for Arc<dyn WorkerCtx> {
fn from_ref(env: &ArcEnv<T>) -> Self {
env.0.clone()
}
}
#[async_trait]
impl<T: ControlNodeDelegate + ?Sized> ControlNodeDelegate for ArcEnv<T> {
async fn spacetime_dns(&self, domain: &DomainName) -> spacetimedb::control_db::Result<Option<Address>> {
self.0.spacetime_dns(domain).await
}
async fn alloc_spacetime_identity(&self) -> spacetimedb::control_db::Result<Identity> {
self.0.alloc_spacetime_identity().await
}
fn public_key(&self) -> &DecodingKey {
self.0.public_key()
}
fn private_key(&self) -> &EncodingKey {
self.0.private_key()
}
}
#[async_trait]
impl<T: ControlNodeDelegate + ?Sized> ControlNodeDelegate for Arc<T> {
async fn spacetime_dns(&self, domain: &DomainName) -> spacetimedb::control_db::Result<Option<Address>> {
(**self).spacetime_dns(domain).await
}
async fn alloc_spacetime_identity(&self) -> spacetimedb::control_db::Result<Identity> {
(**self).alloc_spacetime_identity().await
}
fn public_key(&self) -> &DecodingKey {
(**self).public_key()
}
fn private_key(&self) -> &EncodingKey {
(**self).private_key()
}
}
pub fn log_and_500(e: impl std::fmt::Display) -> StatusCode {
log::error!("internal error: {e:#}");
StatusCode::INTERNAL_SERVER_ERROR
}
+967
View File
@@ -0,0 +1,967 @@
use std::collections::HashMap;
use std::sync::Arc;
use axum::body::Bytes;
use axum::extract::{DefaultBodyLimit, FromRef, Path, Query, State};
use axum::response::{ErrorResponse, IntoResponse};
use axum::{headers, TypedHeader};
use futures::StreamExt;
use http::StatusCode;
use serde::Deserialize;
use serde_json::{json, Value};
use spacetimedb::host::EntityDef;
use spacetimedb::host::ReducerArgs;
use spacetimedb::host::ReducerCallError;
use spacetimedb::host::ReducerOutcome;
use spacetimedb::host::UpdateDatabaseSuccess;
use spacetimedb_lib::name;
use spacetimedb_lib::name::DomainName;
use spacetimedb_lib::name::DomainParsingError;
use spacetimedb_lib::name::PublishOp;
use spacetimedb_lib::sats::TypeInSpace;
use crate::auth::{
SpacetimeAuth, SpacetimeAuthHeader, SpacetimeEnergyUsed, SpacetimeExecutionDurationMicros, SpacetimeIdentity,
SpacetimeIdentityToken,
};
use spacetimedb::address::Address;
use spacetimedb::database_logger::DatabaseLogger;
use spacetimedb::host::DescribedEntityType;
use spacetimedb::identity::Identity;
use spacetimedb::json::client_api::StmtResultJson;
use spacetimedb::messages::control_db::{DatabaseInstance, HostType};
use crate::util::{ByteStringBody, NameOrAddress};
use crate::{log_and_500, ControlCtx, ControlNodeDelegate, WorkerCtx};
pub(crate) struct DomainParsingRejection(pub(crate) DomainParsingError);
impl From<DomainParsingError> for DomainParsingRejection {
fn from(e: DomainParsingError) -> Self {
DomainParsingRejection(e)
}
}
impl IntoResponse for DomainParsingRejection {
fn into_response(self) -> axum::response::Response {
(StatusCode::BAD_REQUEST, "Unable to parse domain name").into_response()
}
}
#[derive(Deserialize)]
pub struct CallParams {
name_or_address: NameOrAddress,
reducer: String,
}
pub async fn call(
State(worker_ctx): State<Arc<dyn WorkerCtx>>,
auth: SpacetimeAuthHeader,
Path(CallParams {
name_or_address,
reducer,
}): Path<CallParams>,
ByteStringBody(body): ByteStringBody,
) -> axum::response::Result<impl IntoResponse> {
let SpacetimeAuth {
identity: caller_identity,
creds: caller_identity_token,
} = auth.get_or_create(&*worker_ctx).await?;
let args = ReducerArgs::Json(body);
let address = name_or_address.resolve(&*worker_ctx).await?;
let database = worker_ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or_else(|| {
log::error!("Could not find database: {}", address.to_hex());
(StatusCode::NOT_FOUND, "No such database.")
})?;
let identity = database.identity;
let database_instance = worker_ctx
.get_leader_database_instance_by_database(database.id)
.await
.ok_or((
StatusCode::NOT_FOUND,
"Database instance not scheduled to this node yet.",
))?;
let instance_id = database_instance.id;
let host = worker_ctx.host_controller();
let module = match host.get_module_host(instance_id) {
Ok(m) => m,
Err(_) => {
let dbic = worker_ctx
.load_module_host_context(database, instance_id)
.await
.map_err(log_and_500)?;
host.spawn_module_host(dbic).await.map_err(log_and_500)?
}
};
let result = match module.call_reducer(caller_identity, None, &reducer, args).await {
Ok(rcr) => rcr,
Err(e) => {
let status_code = match e {
ReducerCallError::Args(_) => {
log::debug!("Attempt to call reducer with invalid arguments");
StatusCode::BAD_REQUEST
}
ReducerCallError::NoSuchModule(_) => StatusCode::NOT_FOUND,
ReducerCallError::NoSuchReducer => {
log::debug!("Attempt to call non-existent reducer {}", reducer);
StatusCode::NOT_FOUND
}
};
log::debug!("Error while invoking reducer {:#}", e);
return Err((status_code, format!("{:#}", anyhow::anyhow!(e))).into());
}
};
let (status, body) = reducer_outcome_response(&identity, &reducer, result.outcome);
Ok((
status,
TypedHeader(SpacetimeIdentity(caller_identity)),
TypedHeader(SpacetimeIdentityToken(caller_identity_token)),
TypedHeader(SpacetimeEnergyUsed(result.energy_used)),
TypedHeader(SpacetimeExecutionDurationMicros(result.execution_duration)),
body,
))
}
fn reducer_outcome_response(identity: &Identity, reducer: &str, outcome: ReducerOutcome) -> (StatusCode, String) {
match outcome {
ReducerOutcome::Committed => (StatusCode::OK, "".to_owned()),
ReducerOutcome::Failed(errmsg) => {
// TODO: different status code? this is what cloudflare uses, sorta
(StatusCode::from_u16(530).unwrap(), errmsg)
}
ReducerOutcome::BudgetExceeded => {
log::warn!(
"Node's energy budget exceeded for identity: {} while executing {}",
identity,
reducer
);
(
StatusCode::PAYMENT_REQUIRED,
"Module energy budget exhausted.".to_owned(),
)
}
}
}
#[derive(Debug)]
pub enum DBCallErr {
HandlerError(ErrorResponse),
NoSuchDatabase,
InstanceNotScheduled,
}
use chrono::Utc;
use rand::Rng;
use spacetimedb::auth::identity::encode_token;
use spacetimedb::sql::execute::execute;
use spacetimedb_lib::name::{DnsLookupResponse, InsertDomainResult, PublishResult};
use spacetimedb_lib::recovery::{RecoveryCode, RecoveryCodeResponse};
use std::convert::From;
impl From<ErrorResponse> for DBCallErr {
fn from(error: ErrorResponse) -> Self {
DBCallErr::HandlerError(error)
}
}
pub struct DatabaseInformation {
database_instance: DatabaseInstance,
auth: SpacetimeAuth,
}
/// Extract some common parameters that most API call invocations to the database will use.
/// TODO(tyler): Ryan originally intended for extract call info to be used for any call that is specific to a
/// database. However, there are some functions that should be callable from anyone, possibly even if they
/// don't provide any credentials at all. The problem is that this function doesn't make sense in all places
/// where credentials are required (e.g. publish), so for now we're just going to keep this as is, but we're
/// going to generate a new set of credentials if you don't provide them.
async fn extract_db_call_info(
ctx: &dyn WorkerCtx,
auth: SpacetimeAuthHeader,
address: &Address,
) -> Result<DatabaseInformation, ErrorResponse> {
let auth = auth.get_or_create(ctx).await?;
let database = ctx
.get_database_by_address(address)
.await
.map_err(log_and_500)?
.ok_or_else(|| {
log::error!("Could not find database: {}", address.to_hex());
(StatusCode::NOT_FOUND, "No such database.")
})?;
let database_instance = ctx.get_leader_database_instance_by_database(database.id).await.ok_or((
StatusCode::NOT_FOUND,
"Database instance not scheduled to this node yet.",
))?;
Ok(DatabaseInformation {
database_instance,
auth,
})
}
fn entity_description_json(description: TypeInSpace<EntityDef>, expand: bool) -> Option<Value> {
let typ = DescribedEntityType::from_entitydef(description.ty()).as_str();
let len = match description.ty() {
EntityDef::Table(t) => description.resolve(t.data).ty().as_product()?.elements.len(),
EntityDef::Reducer(r) => r.args.len(),
};
if expand {
// TODO(noa): make this less hacky; needs coordination w/ spacetime-web
let schema = match description.ty() {
EntityDef::Table(table) => {
json!(description.with(&table.data).resolve_refs()?.as_product()?)
}
EntityDef::Reducer(r) => json!({
"name": r.name,
"elements": r.args,
}),
};
Some(json!({
"type": typ,
"arity": len,
"schema": schema
}))
} else {
Some(json!({
"type": typ,
"arity": len,
}))
}
}
#[derive(Deserialize)]
pub struct DescribeParams {
name_or_address: NameOrAddress,
entity_type: String,
entity: String,
}
#[derive(Deserialize)]
pub struct DescribeQueryParams {
expand: Option<bool>,
}
pub async fn describe(
State(worker_ctx): State<Arc<dyn WorkerCtx>>,
Path(DescribeParams {
name_or_address,
entity_type,
entity,
}): Path<DescribeParams>,
Query(DescribeQueryParams { expand }): Query<DescribeQueryParams>,
auth: SpacetimeAuthHeader,
) -> axum::response::Result<impl IntoResponse> {
let address = name_or_address.resolve(&*worker_ctx).await?;
let database = worker_ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
let call_info = extract_db_call_info(&*worker_ctx, auth, &address).await?;
let instance_id = call_info.database_instance.id;
let host = worker_ctx.host_controller();
let module = match host.get_module_host(instance_id) {
Ok(m) => m,
Err(_) => {
let dbic = worker_ctx
.load_module_host_context(database, instance_id)
.await
.map_err(log_and_500)?;
host.spawn_module_host(dbic).await.map_err(log_and_500)?
}
};
let entity_type = entity_type.as_str().parse().map_err(|()| {
log::debug!("Request to describe unhandled entity type: {}", entity_type);
(
StatusCode::NOT_FOUND,
format!("Invalid entity type for description: {}", entity_type),
)
})?;
let catalog = module.catalog();
let description = catalog
.get(&entity)
.filter(|desc| DescribedEntityType::from_entitydef(desc.ty()) == entity_type)
.ok_or_else(|| (StatusCode::NOT_FOUND, format!("{entity_type} {entity:?} not found")))?;
let expand = expand.unwrap_or(true);
let response_json = json!({ entity: entity_description_json(description, expand) });
Ok((
StatusCode::OK,
TypedHeader(SpacetimeIdentity(call_info.auth.identity)),
TypedHeader(SpacetimeIdentityToken(call_info.auth.creds)),
axum::Json(response_json),
))
}
#[derive(Deserialize)]
pub struct CatalogParams {
name_or_address: NameOrAddress,
}
pub async fn catalog(
State(worker_ctx): State<Arc<dyn WorkerCtx>>,
Path(CatalogParams { name_or_address }): Path<CatalogParams>,
Query(DescribeQueryParams { expand }): Query<DescribeQueryParams>,
auth: SpacetimeAuthHeader,
) -> axum::response::Result<impl IntoResponse> {
let address = name_or_address.resolve(&*worker_ctx).await?;
let database = worker_ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
let call_info = extract_db_call_info(&*worker_ctx, auth, &address).await?;
let instance_id = call_info.database_instance.id;
let host = worker_ctx.host_controller();
let module = match host.get_module_host(instance_id) {
Ok(m) => m,
Err(_) => {
let dbic = worker_ctx
.load_module_host_context(database, instance_id)
.await
.map_err(log_and_500)?;
host.spawn_module_host(dbic).await.map_err(log_and_500)?
}
};
let catalog = module.catalog();
let expand = expand.unwrap_or(false);
let response_catalog: HashMap<_, _> = catalog
.iter()
.map(|(name, entity)| (name, entity_description_json(entity, expand)))
.collect();
let response_json = json!(response_catalog);
Ok((
StatusCode::OK,
TypedHeader(SpacetimeIdentity(call_info.auth.identity)),
TypedHeader(SpacetimeIdentityToken(call_info.auth.creds)),
axum::Json(response_json),
))
}
#[derive(Deserialize)]
pub struct LogsParams {
name_or_address: NameOrAddress,
}
#[derive(Deserialize)]
pub struct LogsQuery {
num_lines: Option<u32>,
#[serde(default)]
follow: bool,
}
pub async fn logs(
State(worker_ctx): State<Arc<dyn WorkerCtx>>,
Path(LogsParams { name_or_address }): Path<LogsParams>,
Query(LogsQuery { num_lines, follow }): Query<LogsQuery>,
auth: SpacetimeAuthHeader,
) -> axum::response::Result<impl IntoResponse> {
// You should not be able to read the logs from a database that you do not own
// so, unless you are the owner, this will fail, hence using get() and not get_or_create
let auth = auth.get().ok_or((StatusCode::UNAUTHORIZED, "Invalid credentials."))?;
let address = name_or_address.resolve(&*worker_ctx).await?;
let database = worker_ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
if database.identity != auth.identity {
return Err((
StatusCode::BAD_REQUEST,
format!(
"Identity does not own database, expected: {} got: {}",
database.identity.to_hex(),
auth.identity.to_hex()
),
)
.into());
}
let database_instance = worker_ctx
.get_leader_database_instance_by_database(database.id)
.await
.ok_or((
StatusCode::NOT_FOUND,
"Database instance not scheduled to this node yet.",
))?;
let instance_id = database_instance.id;
let filepath = DatabaseLogger::filepath(&address, instance_id);
let lines = DatabaseLogger::read_latest(&filepath, num_lines).await;
let body = if follow {
let host = worker_ctx.host_controller();
let module = match host.get_module_host(instance_id) {
Ok(m) => m,
Err(_) => {
let dbic = worker_ctx
.load_module_host_context(database, instance_id)
.await
.map_err(log_and_500)?;
host.spawn_module_host(dbic).await.map_err(log_and_500)?
}
};
let log_rx = module.subscribe_to_logs().map_err(log_and_500)?;
let stream = tokio_stream::wrappers::BroadcastStream::new(log_rx).filter_map(move |x| {
std::future::ready(match x {
Ok(log) => Some(log),
Err(tokio_stream::wrappers::errors::BroadcastStreamRecvError::Lagged(skipped)) => {
log::trace!("Skipped {} lines in log for module {}", skipped, address.to_hex());
None
}
})
});
let stream = futures::stream::once(std::future::ready(lines.into()))
.chain(stream)
.map(Ok::<_, std::convert::Infallible>);
axum::body::boxed(axum::body::StreamBody::new(stream))
} else {
axum::body::boxed(axum::body::Full::from(lines))
};
Ok((
StatusCode::OK,
TypedHeader(headers::CacheControl::new().with_no_cache()),
TypedHeader(headers::ContentType::from(mime_ndjson())),
body,
))
}
fn mime_ndjson() -> mime::Mime {
"application/x-ndjson".parse().unwrap()
}
#[derive(Deserialize)]
pub struct SqlParams {
name_or_address: NameOrAddress,
}
#[derive(Deserialize)]
pub struct SqlQueryParams {}
pub async fn sql(
State(worker_ctx): State<Arc<dyn WorkerCtx>>,
Path(SqlParams { name_or_address }): Path<SqlParams>,
Query(SqlQueryParams {}): Query<SqlQueryParams>,
auth: SpacetimeAuthHeader,
body: String,
) -> axum::response::Result<impl IntoResponse> {
// You should not be able to query a database that you do not own
// so, unless you are the owner, this will fail, hence not using get_or_create
let auth = auth.get().ok_or((StatusCode::BAD_REQUEST, "Invalid credentials."))?;
let address = name_or_address.resolve(&*worker_ctx).await?;
let database = worker_ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
if database.identity != auth.identity {
return Err((StatusCode::BAD_REQUEST, "Identity does not own database.").into());
}
let database_instance = worker_ctx
.get_leader_database_instance_by_database(database.id)
.await
.ok_or((
StatusCode::NOT_FOUND,
"Database instance not scheduled to this node yet.",
))?;
let instance_id = database_instance.id;
let host = worker_ctx.host_controller();
match host.get_module_host(instance_id) {
Ok(_) => {}
Err(_) => {
let dbic = worker_ctx
.load_module_host_context(database, instance_id)
.await
.map_err(log_and_500)?;
host.spawn_module_host(dbic).await.map_err(log_and_500)?;
}
};
let results = match execute(worker_ctx.database_instance_context_controller(), instance_id, body) {
Ok(results) => results,
Err(err) => {
log::warn!("{}", err);
return Err(StatusCode::BAD_REQUEST.into());
}
};
let json = results
.into_iter()
.map(|result| StmtResultJson {
schema: result.head.ty(),
rows: result.data.into_iter().map(|x| x.elements).collect::<Vec<_>>(),
})
.collect::<Vec<_>>();
Ok((StatusCode::OK, axum::Json(json)))
}
#[derive(Deserialize)]
pub struct DNSParams {
database_name: String,
}
#[derive(Deserialize)]
pub struct ReverseDNSParams {
database_address: Address,
}
#[derive(Deserialize)]
pub struct DNSQueryParams {}
pub async fn dns(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(DNSParams { database_name }): Path<DNSParams>,
Query(DNSQueryParams {}): Query<DNSQueryParams>,
) -> axum::response::Result<impl IntoResponse> {
let domain = database_name.parse().map_err(DomainParsingRejection)?;
let address = ctx.control_db().spacetime_dns(&domain).await.map_err(log_and_500)?;
let response = if let Some(address) = address {
DnsLookupResponse::Success {
domain,
address: address.to_hex(),
}
} else {
DnsLookupResponse::Failure { domain }
};
Ok(axum::Json(response))
}
pub async fn reverse_dns(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(ReverseDNSParams { database_address }): Path<ReverseDNSParams>,
) -> axum::response::Result<impl IntoResponse> {
let names = ctx
.control_db()
.spacetime_reverse_dns(&database_address)
.await
.map_err(log_and_500)?;
let response = name::ReverseDNSResponse { names };
Ok(axum::Json(response))
}
#[derive(Deserialize)]
pub struct RegisterTldParams {
tld: String,
}
pub async fn register_tld(
State(ctx): State<Arc<dyn ControlCtx>>,
Query(RegisterTldParams { tld }): Query<RegisterTldParams>,
auth: SpacetimeAuthHeader,
) -> axum::response::Result<impl IntoResponse> {
// You should not be able to publish to a database that you do not own
// so, unless you are the owner, this will fail, hence not using get_or_create
let auth = auth.get().ok_or((StatusCode::BAD_REQUEST, "Invalid credentials."))?;
let tld = tld.parse::<DomainName>().map_err(DomainParsingRejection)?.into_tld();
let result = ctx
.control_db()
.spacetime_register_tld(tld, auth.identity)
.await
.map_err(log_and_500)?;
Ok(axum::Json(result))
}
#[derive(Deserialize)]
pub struct RequestRecoveryCodeParams {
/// Whether or not the client is requesting a login link for a web-login. This is false for CLI logins.
#[serde(default)]
link: bool,
email: String,
identity: Identity,
}
pub async fn request_recovery_code(
State(ctx): State<Arc<dyn ControlCtx>>,
Query(RequestRecoveryCodeParams { link, email, identity }): Query<RequestRecoveryCodeParams>,
) -> axum::response::Result<impl IntoResponse> {
let Some(sendgrid) = ctx.sendgrid_controller() else {
log::error!("A recovery code was requested, but SendGrid is disabled.");
return Err((StatusCode::INTERNAL_SERVER_ERROR, "SendGrid is disabled.").into());
};
if !ctx
.control_db()
.get_identities_for_email(email.as_str())
.map_err(log_and_500)?
.iter()
.any(|a| a.identity == identity)
{
return Err((
StatusCode::BAD_REQUEST,
"Email is not associated with the provided identity.",
)
.into());
}
let code = rand::thread_rng().gen_range(0..=999999);
let code = format!("{code:06}");
let recovery_code = RecoveryCode {
code: code.clone(),
generation_time: Utc::now(),
identity: identity.to_hex(),
};
ctx.control_db()
.spacetime_insert_recovery_code(email.as_str(), recovery_code)
.await
.map_err(log_and_500)?;
sendgrid
.send_recovery_email(email.as_str(), code.as_str(), &identity.to_hex(), link)
.await
.map_err(log_and_500)?;
Ok(())
}
#[derive(Deserialize)]
pub struct ConfirmRecoveryCodeParams {
pub email: String,
pub identity: Identity,
pub code: String,
}
/// Note: We should be slightly more security conscious about this function because
/// we are providing a login token to the user initiating the request. We want to make
/// sure there aren't any logical issues in here that would allow a user to request a token
/// for an identity that they don't have authority over.
pub async fn confirm_recovery_code(
State(ctx): State<Arc<dyn ControlCtx>>,
Query(ConfirmRecoveryCodeParams { email, identity, code }): Query<ConfirmRecoveryCodeParams>,
) -> axum::response::Result<impl IntoResponse> {
let recovery_code = ctx
.control_db()
.spacetime_get_recovery_code(email.as_str(), code.as_str())
.await
.map_err(log_and_500)?
.ok_or((StatusCode::BAD_REQUEST, "Recovery code not found."))?;
let duration = Utc::now() - recovery_code.generation_time;
if duration.num_seconds() > 60 * 10 {
return Err((StatusCode::BAD_REQUEST, "Recovery code expired.").into());
}
// Make sure the identity provided by the request matches the recovery code registration
if recovery_code.identity != identity.to_hex() {
return Err((
StatusCode::BAD_REQUEST,
"Recovery code doesn't match the provided identity.",
)
.into());
}
if !ctx
.control_db()
.get_identities_for_email(email.as_str())
.map_err(log_and_500)?
.iter()
.any(|a| a.identity == identity)
{
// This can happen if someone changes their associated email during a recovery request.
return Err((StatusCode::BAD_REQUEST, "No identity associated with that email.").into());
}
// Recovery code is verified, return the identity and token to the user
let token = encode_token(ctx.private_key(), identity).map_err(log_and_500)?;
let result = RecoveryCodeResponse {
identity: identity.to_hex(),
token,
};
Ok(axum::Json(result))
}
#[derive(Deserialize)]
pub struct PublishDatabaseParams {}
#[derive(Deserialize)]
pub struct PublishDatabaseQueryParams {
host_type: Option<String>,
#[serde(default)]
clear: bool,
name_or_address: Option<NameOrAddress>,
trace_log: Option<bool>,
#[serde(default)]
register_tld: bool,
}
#[cfg(not(feature = "tracelogging"))]
fn should_trace(_trace_log: Option<bool>) -> bool {
false
}
#[cfg(feature = "tracelogging")]
fn should_trace(trace_log: Option<bool>) -> bool {
trace_log.unwrap_or(false)
}
pub async fn publish(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(PublishDatabaseParams {}): Path<PublishDatabaseParams>,
Query(query_params): Query<PublishDatabaseQueryParams>,
auth: SpacetimeAuthHeader,
body: Bytes,
) -> axum::response::Result<axum::Json<PublishResult>> {
let PublishDatabaseQueryParams {
name_or_address,
host_type,
clear,
trace_log,
register_tld,
} = query_params;
// You should not be able to publish to a database that you do not own
// so, unless you are the owner, this will fail, hence not using get_or_create
let auth = auth.get().ok_or((StatusCode::BAD_REQUEST, "Invalid credentials."))?;
let specified_address = matches!(name_or_address, Some(NameOrAddress::Address(_)));
// Parse the address or convert the name to a usable address
let db_address = if let Some(name_or_address) = name_or_address.clone() {
match name_or_address.try_resolve(&*ctx).await? {
Ok(address) => address,
Err(name) => {
let domain = name.parse().map_err(DomainParsingRejection)?;
// Client specified a name which doesn't yet exist
// Create a new DNS record and a new address to assign to it
let address = ctx.control_db().alloc_spacetime_address().await.map_err(log_and_500)?;
let result = ctx
.control_db()
.spacetime_insert_domain(&address, domain, auth.identity, register_tld)
.await
.map_err(log_and_500)?;
match result {
InsertDomainResult::Success { .. } => {}
InsertDomainResult::TldNotRegistered { domain } => {
return Ok(axum::Json(PublishResult::TldNotRegistered { domain }))
}
InsertDomainResult::PermissionDenied { domain } => {
return Ok(axum::Json(PublishResult::PermissionDenied { domain }))
}
}
address
}
}
} else {
// No domain or address was specified, create a new one
ctx.control_db().alloc_spacetime_address().await.map_err(log_and_500)?
};
let host_type = match host_type {
None => HostType::Wasmer,
Some(ht) => ht
.parse()
.map_err(|_| (StatusCode::BAD_REQUEST, format!("unknown host type {ht}")))?,
};
let program_bytes_addr = ctx.object_db().insert_object(body.into()).unwrap();
let num_replicas = 1;
let trace_log = should_trace(trace_log);
let op = match ctx
.control_db()
.get_database_by_address(&db_address)
.await
.map_err(log_and_500)?
{
Some(db) => {
if Identity::from_slice(db.identity.as_slice()) != auth.identity {
return Err((StatusCode::BAD_REQUEST, "Identity does not own this database.").into());
}
if clear {
ctx.insert_database(
&db_address,
&auth.identity,
&program_bytes_addr,
host_type,
num_replicas,
clear,
trace_log,
)
.await
.map_err(log_and_500)?;
PublishOp::Created
} else {
let res = ctx
.update_database(&db_address, &program_bytes_addr, num_replicas)
.await
.map_err(log_and_500)?;
if let Some(res) = res {
let success = match res {
Ok(success) => success,
Err(e) => {
return Err((StatusCode::BAD_REQUEST, format!("Database update rejected: {e}")).into());
}
};
if let UpdateDatabaseSuccess {
update_result: Some(update_result),
migrate_results: _,
} = success
{
match reducer_outcome_response(&auth.identity, "update", update_result.outcome) {
(StatusCode::OK, _) => {}
(status, body) => return Err((status, body).into()),
}
}
}
log::debug!("Updated database {}", db_address.to_hex());
PublishOp::Updated
}
}
None if specified_address => {
return Err((
StatusCode::NOT_FOUND,
format!("Failed to find database at address: {}", db_address.to_hex()),
)
.into())
}
None => {
ctx.insert_database(
&db_address,
&auth.identity,
&program_bytes_addr,
host_type,
num_replicas,
false,
trace_log,
)
.await
.map_err(log_and_500)?;
PublishOp::Created
}
};
let response = PublishResult::Success {
domain: name_or_address.and_then(|noa| match noa {
NameOrAddress::Address(_) => None,
NameOrAddress::Name(name) => Some(name),
}),
address: db_address.to_hex(),
op,
};
//TODO(tyler): Eventually we want it to be possible to publish a database
// which no one has the credentials to. In that case we wouldn't want to
// return a token.
Ok(axum::Json(response))
}
#[derive(Deserialize)]
pub struct DeleteDatabaseParams {
address: Address,
}
pub async fn delete_database(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(DeleteDatabaseParams { address }): Path<DeleteDatabaseParams>,
) -> axum::response::Result<impl IntoResponse> {
// TODO(cloutiertyler): Validate that the creator has credentials for the identity of this database
ctx.delete_database(&address).await.map_err(log_and_500)?;
Ok(())
}
#[derive(Deserialize)]
pub struct SetNameQueryParams {
domain: String,
address: Address,
#[serde(default)]
register_tld: bool,
}
pub async fn set_name(
State(ctx): State<Arc<dyn ControlCtx>>,
Query(SetNameQueryParams {
domain,
address,
register_tld,
}): Query<SetNameQueryParams>,
auth: SpacetimeAuthHeader,
) -> axum::response::Result<impl IntoResponse> {
let auth = auth.get().ok_or((StatusCode::BAD_REQUEST, "Invalid credentials."))?;
let database = ctx
.control_db()
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
if database.identity != auth.identity {
return Err((StatusCode::BAD_REQUEST, "Identity does not own database.").into());
}
let domain = domain.parse().map_err(DomainParsingRejection)?;
let response = ctx
.control_db()
.spacetime_insert_domain(&address, domain, auth.identity, register_tld)
.await
.map_err(log_and_500)?;
Ok(axum::Json(response))
}
pub fn control_routes<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn ControlCtx>: FromRef<S>,
{
use axum::routing::{get, post};
axum::Router::new()
.route("/dns/:database_name", get(dns))
.route("/reverse_dns/:database_address", get(reverse_dns))
.route("/set_name", get(set_name))
.route("/register_tld", get(register_tld))
.route("/request_recovery_code", get(request_recovery_code))
.route("/confirm_recovery_code", get(confirm_recovery_code))
.route("/publish", post(publish).layer(DefaultBodyLimit::disable()))
.route("/delete/:address", post(delete_database))
}
pub fn worker_routes<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn WorkerCtx>: FromRef<S>,
{
use axum::routing::{get, post};
axum::Router::new()
.route("/subscribe/:name_or_address", get(super::subscribe::handle_websocket))
.route("/call/:name_or_address/:reducer", post(call))
.route("/schema/:name_or_address/:entity_type/:entity", get(describe))
.route("/schema/:name_or_address", get(catalog))
.route("/logs/:name_or_address", get(logs))
.route("/sql/:name_or_address", post(sql))
}
+89
View File
@@ -0,0 +1,89 @@
use std::sync::Arc;
use axum::extract::{FromRef, Path, Query, State};
use axum::response::IntoResponse;
use http::StatusCode;
use serde::Deserialize;
use serde_json::json;
use spacetimedb::messages::control_db::EnergyBalance;
use spacetimedb_lib::Identity;
use crate::{log_and_500, ControlCtx, ControlNodeDelegate};
#[derive(Deserialize)]
pub struct IdentityParams {
identity: Identity,
}
pub async fn get_budget(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(IdentityParams { identity }): Path<IdentityParams>,
) -> axum::response::Result<impl IntoResponse> {
// TODO: we need to do authorization here. For now, just short-circuit.
// Note: Consult the write-through cache on control_budget, not the control_db directly.
let budget = ctx
.control_db()
.get_energy_balance(&identity)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No budget for identity"))?;
let response_json = json!({
"balance": budget.balance_quanta
});
Ok(axum::Json(response_json))
}
#[derive(Deserialize)]
pub struct SetEnergyBalanceQueryParams {
balance: Option<i64>,
}
pub async fn set_energy_balance(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(IdentityParams { identity }): Path<IdentityParams>,
Query(SetEnergyBalanceQueryParams { balance }): Query<SetEnergyBalanceQueryParams>,
) -> axum::response::Result<impl IntoResponse> {
// TODO: we need to do authorization here. For now, just short-circuit. GOD MODE.
// We're only updating part of the budget, so we need to retrieve first and alter only the
// parts we're updating
// If there's no existing budget, create new with sensible defaults.
let budget = ctx
.control_db()
.get_energy_balance(&identity)
.await
.map_err(log_and_500)?;
let mut budget = budget.unwrap_or(EnergyBalance {
identity,
balance_quanta: 0,
});
if let Some(balance) = balance {
budget.balance_quanta = balance
}
ctx.control_db()
.set_energy_balance(&identity, &budget)
.map_err(log_and_500)?;
// Return the modified budget.
let response_json = json!({
"balance": budget.balance_quanta,
});
Ok(axum::Json(response_json))
}
pub fn router<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn ControlCtx>: FromRef<S>,
{
use axum::routing::{get, post};
axum::Router::new()
.route("/:identity", get(get_budget))
.route("/:identity", post(set_energy_balance))
}
+142
View File
@@ -0,0 +1,142 @@
use std::sync::Arc;
use axum::extract::{FromRef, Path, Query, State};
use axum::response::IntoResponse;
use http::StatusCode;
use serde::{Deserialize, Serialize};
use spacetimedb_lib::Identity;
use crate::auth::{SpacetimeAuth, SpacetimeAuthHeader};
use crate::{log_and_500, ControlCtx, ControlNodeDelegate};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateIdentityResponse {
identity: String,
token: String,
}
pub async fn create_identity(State(ctx): State<Arc<dyn ControlCtx>>) -> axum::response::Result<impl IntoResponse> {
let auth = SpacetimeAuth::alloc(&*ctx).await?;
let identity_response = CreateIdentityResponse {
identity: auth.identity.to_hex(),
token: auth.creds.token().to_owned(),
};
Ok(axum::Json(identity_response))
}
#[derive(Debug, Clone, Serialize)]
pub struct GetIdentityResponse {
identities: Vec<GetIdentityResponseEntry>,
}
#[derive(Debug, Clone, Serialize)]
pub struct GetIdentityResponseEntry {
identity: String,
email: String,
}
#[derive(Deserialize)]
pub struct GetIdentityQueryParams {
email: Option<String>,
}
pub async fn get_identity(
State(ctx): State<Arc<dyn ControlCtx>>,
Query(GetIdentityQueryParams { email }): Query<GetIdentityQueryParams>,
) -> axum::response::Result<impl IntoResponse> {
let lookup = match email {
None => None,
Some(email) => {
let identities = ctx
.control_db()
.get_identities_for_email(email.as_str())
.map_err(log_and_500)?;
if identities.is_empty() {
None
} else {
let mut response = GetIdentityResponse {
identities: Vec::<GetIdentityResponseEntry>::new(),
};
for identity_email in identities {
response.identities.push(GetIdentityResponseEntry {
identity: identity_email.identity.to_hex(),
email: identity_email.email,
})
}
Some(response)
}
}
};
let identity_response = lookup.ok_or(StatusCode::NOT_FOUND)?;
Ok(axum::Json(identity_response))
}
#[derive(Deserialize)]
pub struct SetEmailParams {
identity: Identity,
}
#[derive(Deserialize)]
pub struct SetEmailQueryParams {
email: email_address::EmailAddress,
}
pub async fn set_email(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(SetEmailParams { identity }): Path<SetEmailParams>,
Query(SetEmailQueryParams { email }): Query<SetEmailQueryParams>,
auth: SpacetimeAuthHeader,
) -> axum::response::Result<impl IntoResponse> {
let auth = auth.get().ok_or(StatusCode::BAD_REQUEST)?;
if auth.identity != identity {
return Err(StatusCode::UNAUTHORIZED.into());
}
ctx.control_db()
.associate_email_spacetime_identity(identity, email.as_str())
.await
.unwrap();
Ok(())
}
#[derive(Deserialize)]
pub struct GetDatabasesParams {
identity: Identity,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GetDatabasesResponse {
addresses: Vec<String>,
}
pub async fn get_databases(
State(ctx): State<Arc<dyn ControlCtx>>,
Path(GetDatabasesParams { identity }): Path<GetDatabasesParams>,
) -> axum::response::Result<impl IntoResponse> {
// Linear scan for all databases that have this identity, and return their addresses
let all_dbs = ctx.control_db().get_databases().await.map_err(|e| {
log::error!("Failure when retrieving databases for search: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?;
let matching_dbs = all_dbs.into_iter().filter(|db| db.identity == identity);
let addresses = matching_dbs.map(|db| db.address.to_hex());
let response = GetDatabasesResponse {
addresses: addresses.collect(),
};
Ok(axum::Json(response))
}
pub fn router<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn ControlCtx>: FromRef<S>,
{
use axum::routing::{get, post};
axum::Router::new()
.route("/", get(get_identity).post(create_identity))
.route("/:identity/set-email", post(set_email))
.route("/:identity/databases", get(get_databases))
}
+43
View File
@@ -0,0 +1,43 @@
use axum::extract::{FromRef, State};
use axum::response::IntoResponse;
use std::sync::Arc;
use crate::{ControlNodeDelegate, WorkerCtx};
// #[derive(Clone, NewMiddleware)]
// pub struct MetricsAuthMiddleware;
// impl Middleware for MetricsAuthMiddleware {
// fn call<Chain>(self, state: State, chain: Chain) -> Pin<Box<HandlerFuture>>
// where
// Chain: FnOnce(State) -> Pin<Box<HandlerFuture>>,
// {
// chain(state)
// }
// }
pub async fn metrics(State(ctx): State<Arc<dyn WorkerCtx>>) -> axum::response::Result<impl IntoResponse> {
let mut buf = String::new();
let mut encode_to_buffer = |mfs: &[_]| {
if let Err(e) = prometheus::TextEncoder.encode_utf8(mfs, &mut buf) {
log::error!("could not encode custom metrics: {}", e);
}
};
encode_to_buffer(&ctx.gather_metrics());
encode_to_buffer(&prometheus::gather());
Ok(buf)
}
pub fn router<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn WorkerCtx>: FromRef<S>,
{
use axum::routing::get;
axum::Router::new().route("/", get(metrics))
// TODO:
// .layer(MetricsAuthMiddleware)
}
+9
View File
@@ -0,0 +1,9 @@
pub mod database;
pub mod energy;
pub mod identity;
pub mod metrics;
pub mod prometheus;
pub mod subscribe;
#[cfg(feature = "tracelogging")]
pub mod tracelog;
@@ -0,0 +1,38 @@
use axum::extract::{FromRef, State};
use axum::response::IntoResponse;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use crate::{log_and_500, ControlCtx, ControlNodeDelegate};
#[derive(Serialize, Deserialize)]
struct SDConfig {
targets: Vec<String>,
labels: HashMap<String, String>,
}
pub async fn get_sd_config(State(ctx): State<Arc<dyn ControlCtx>>) -> axum::response::Result<impl IntoResponse> {
// TODO(cloutiertyler): security
let nodes = ctx.control_db().get_nodes().await.map_err(log_and_500)?;
let mut targets = Vec::new();
let labels = HashMap::new();
for node in nodes {
targets.push(node.advertise_addr);
}
let sd_config = SDConfig { targets, labels };
Ok(axum::Json(vec![sd_config]))
}
pub fn router<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn ControlCtx>: FromRef<S>,
{
use axum::routing::get;
axum::Router::new().route("/sd_config", get(get_sd_config))
}
+276
View File
@@ -0,0 +1,276 @@
use std::mem;
use std::pin::pin;
use std::sync::Arc;
use std::time::Duration;
use axum::extract::{Path, State};
use axum::response::IntoResponse;
use axum::TypedHeader;
use futures::{SinkExt, StreamExt};
use http::{HeaderValue, StatusCode};
use serde::Deserialize;
use spacetimedb::client::messages::{IdentityTokenMessage, ServerMessage};
use spacetimedb::client::{ClientActorId, ClientClosed, ClientConnection, DataMessage, MessageHandleError, Protocol};
use spacetimedb::host::NoSuchModule;
use spacetimedb::util::future_queue;
use tokio::sync::mpsc;
use crate::auth::{SpacetimeAuthHeader, SpacetimeIdentity, SpacetimeIdentityToken};
use crate::util::websocket::{
CloseCode, CloseFrame, Message as WsMessage, WebSocketConfig, WebSocketStream, WebSocketUpgrade,
};
use crate::util::{NameOrAddress, XForwardedFor};
use crate::{log_and_500, WorkerCtx};
#[allow(clippy::declare_interior_mutable_const)]
pub const TEXT_PROTOCOL: HeaderValue = HeaderValue::from_static("v1.text.spacetimedb");
#[allow(clippy::declare_interior_mutable_const)]
pub const BIN_PROTOCOL: HeaderValue = HeaderValue::from_static("v1.bin.spacetimedb");
#[derive(Deserialize)]
pub struct SubscribeParams {
pub name_or_address: NameOrAddress,
}
pub async fn handle_websocket(
State(worker_ctx): State<Arc<dyn WorkerCtx>>,
Path(SubscribeParams { name_or_address }): Path<SubscribeParams>,
forwarded_for: Option<TypedHeader<XForwardedFor>>,
auth: SpacetimeAuthHeader,
ws: WebSocketUpgrade,
) -> axum::response::Result<impl IntoResponse> {
let auth = auth.get_or_create(&*worker_ctx).await?;
let address = name_or_address.resolve(&*worker_ctx).await?;
let (res, ws_upgrade, protocol) =
ws.select_protocol([(BIN_PROTOCOL, Protocol::Binary), (TEXT_PROTOCOL, Protocol::Text)]);
let protocol = protocol.ok_or((StatusCode::BAD_REQUEST, "no valid protocol selected"))?;
// TODO: Should also maybe refactor the code and the protocol to allow a single websocket
// to connect to multiple modules
let database = worker_ctx
.get_database_by_address(&address)
.await
.unwrap()
.ok_or(StatusCode::BAD_REQUEST)?;
let database_instance = worker_ctx
.get_leader_database_instance_by_database(database.id)
.await
.ok_or(StatusCode::BAD_REQUEST)?;
let instance_id = database_instance.id;
let identity_token = auth.creds.token().to_owned();
let host = worker_ctx.host_controller();
let module = match host.get_module_host(instance_id) {
Ok(m) => m,
Err(_) => {
let dbic = worker_ctx
.load_module_host_context(database, instance_id)
.await
.map_err(log_and_500)?;
host.spawn_module_host(dbic).await.map_err(log_and_500)?
}
};
let client_id = ClientActorId {
identity: auth.identity,
name: worker_ctx.client_actor_index().next_client_name(),
};
let ws_config = WebSocketConfig {
max_send_queue: None,
max_message_size: Some(0x2000000),
max_frame_size: None,
accept_unmasked_frames: false,
};
tokio::spawn(async move {
let ws = match ws_upgrade.upgrade(ws_config).await {
Ok(ws) => ws,
Err(err) => {
log::error!("WebSocket init error: {}", err);
return;
}
};
match forwarded_for {
Some(TypedHeader(XForwardedFor(ip))) => log::debug!("New client connected from ip {}", ip),
None => log::debug!("New client connected from unknown ip"),
}
let actor = |client, sendrx| ws_client_actor(client, ws, sendrx);
let client = match ClientConnection::spawn(client_id, protocol, instance_id, module, actor).await {
Ok(s) => s,
Err(NoSuchModule) => {
// debug here should be fine because we *just* found a module, so this should be really rare
log::warn!("ModuleHost died while we were connecting");
return;
}
};
// Send the client their identity token message as the first message
// NOTE: We're adding this to the protocol because some client libraries are
// unable to access the http response headers.
// Clients that receive the token from the response headers should ignore this
// message.
let message = IdentityTokenMessage {
identity: auth.identity,
identity_token,
};
if let Err(ClientClosed) = client.send_message(message).await {
log::warn!("client closed before identity token was sent")
}
});
Ok((
TypedHeader(SpacetimeIdentity(auth.identity)),
TypedHeader(SpacetimeIdentityToken(auth.creds)),
res,
))
}
const LIVELINESS_TIMEOUT: Duration = Duration::from_secs(60);
async fn ws_client_actor(client: ClientConnection, mut ws: WebSocketStream, mut sendrx: mpsc::Receiver<DataMessage>) {
let mut liveness_check_interval = tokio::time::interval(LIVELINESS_TIMEOUT);
let mut got_pong = true;
// TODO: do we want this to have a fixed capacity? or should it be unbounded
let mut handle_queue = pin!(future_queue(|message| client.handle_message(message)));
let mut closed = false;
loop {
enum Item {
Message(ClientMessage),
HandleResult(Result<(), MessageHandleError>),
}
let message = tokio::select! {
// NOTE: all of the futures for these branches **must** be cancel safe. do not
// change this if you don't know what that means.
Some(res) = handle_queue.next() => Item::HandleResult(res),
message = ws.next() => match message {
Some(Ok(m)) => Item::Message(ClientMessage::from_message(m)),
Some(Err(error)) => {
log::warn!("Websocket receive error: {}", error);
continue;
}
// the client sent us a close frame
None => break,
},
Some(message) = sendrx.recv() => {
if closed {
// TODO: this isn't great. when we receive a close request from the peer,
// tungstenite doesn't let us send any new messages on the socket,
// even though the websocket RFC allows it. should we fork tungstenite?
log::info!("dropping message due to ws already being closed: {message:?}");
} else {
// TODO: I think we can be smarter about feeding messages here?
if let Err(error) = ws.send(datamsg_to_wsmsg(message)).await {
log::warn!("Websocket send error: {error}")
}
}
continue;
}
() = client.module.exited() => {
if let Err(e) = ws.close(Some(CloseFrame { code: CloseCode::Away, reason: "module exited".into() })).await {
log::warn!("error closing: {e:#}")
}
continue;
}
_ = liveness_check_interval.tick() => {
if mem::take(&mut got_pong) {
if let Err(e) = ws.send(WsMessage::Ping(Vec::new())).await {
log::warn!("error sending ping: {e:#}");
}
continue;
} else {
// the client never responded to our ping; drop them without trying to send them a Close
log::warn!("client {} timed out", client.id);
break;
}
}
};
match message {
Item::Message(ClientMessage::Message(message)) => handle_queue.as_mut().push(message),
Item::HandleResult(res) => {
if let Err(e) = res {
if let MessageHandleError::Execution(err) = e {
log::error!("{err:#}");
let msg = err.serialize(client.protocol);
if let Err(error) = ws.send(datamsg_to_wsmsg(msg)).await {
log::warn!("Websocket send error: {error}")
}
continue;
}
log::debug!("Client caused error on text message: {}", e);
if let Err(e) = ws
.close(Some(CloseFrame {
code: CloseCode::Error,
reason: format!("{e:#}").into(),
}))
.await
{
log::warn!("error closing websocket: {e:#}")
};
}
}
Item::Message(ClientMessage::Ping(_message)) => {
log::trace!("Received ping from client {}", client.id);
}
Item::Message(ClientMessage::Pong(_message)) => {
log::trace!("Received heartbeat from client {}", client.id);
got_pong = true;
}
Item::Message(ClientMessage::Close(close_frame)) => {
// This happens in 2 cases:
// a) We sent a Close frame and this is the ack.
// b) This is the client telling us they want to close.
// in either case, after the remaining messages in the queue flush,
// ws.next() will return None and we'll exit the loop.
// NOTE: No need to send a close frame, it's is queued
// automatically by tungstenite.
// if this is the closed-by-them case, let the ClientConnectionSenders know now.
sendrx.close();
closed = true;
log::trace!("Close frame {:?}", close_frame);
}
}
}
log::debug!("Client connection ended");
sendrx.close();
// ignore NoSuchModule; if the module's already closed, that's fine
let _ = client.module.subscription().remove_subscriber(client.id);
let _ = client
.module
.call_identity_connected_disconnected(client.id.identity, false)
.await;
}
enum ClientMessage {
Message(DataMessage),
Ping(Vec<u8>),
Pong(Vec<u8>),
Close(Option<CloseFrame<'static>>),
}
impl ClientMessage {
fn from_message(msg: WsMessage) -> Self {
match msg {
WsMessage::Text(s) => Self::Message(DataMessage::Text(s)),
WsMessage::Binary(b) => Self::Message(DataMessage::Binary(b)),
WsMessage::Ping(b) => Self::Ping(b),
WsMessage::Pong(b) => Self::Pong(b),
WsMessage::Close(frame) => Self::Close(frame),
// WebSocket::read_message() never returns a raw Message::Frame
WsMessage::Frame(_) => unreachable!(),
}
}
}
fn datamsg_to_wsmsg(msg: DataMessage) -> WsMessage {
match msg {
DataMessage::Text(text) => WsMessage::Text(text),
DataMessage::Binary(bin) => WsMessage::Binary(bin),
}
}
+103
View File
@@ -0,0 +1,103 @@
use std::sync::Arc;
use axum::body::Bytes;
use axum::extract::{FromRef, Path, State};
use axum::response::IntoResponse;
use http::StatusCode;
use serde::Deserialize;
use spacetimedb_lib::Identity;
use tempdir::TempDir;
use spacetimedb::address::Address;
use spacetimedb::database_instance_context::DatabaseInstanceContext;
use spacetimedb::hash::hash_bytes;
use spacetimedb::host::instance_env::InstanceEnv;
use spacetimedb::host::scheduler::Scheduler;
use spacetimedb::host::tracelog::replay::replay_report;
use crate::{log_and_500, ControlNodeDelegate, WorkerCtx};
#[derive(Deserialize)]
pub struct GetTraceParams {
address: Address,
}
pub async fn get_tracelog(
State(ctx): State<Arc<dyn WorkerCtx>>,
Path(GetTraceParams { address }): Path<GetTraceParams>,
) -> axum::response::Result<impl IntoResponse> {
let database = ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
let database_instance = ctx.get_leader_database_instance_by_database(database.id).await;
let instance_id = database_instance.unwrap().id;
let host = ctx.host_controller();
let trace = host.get_trace(instance_id).await.map_err(|e| {
log::error!("Unable to retrieve tracelog {}", e);
(StatusCode::SERVICE_UNAVAILABLE, "Database instance not ready.")
})?;
let trace = trace.ok_or(StatusCode::NOT_FOUND)?;
Ok(trace)
}
#[derive(Deserialize)]
pub struct StopTraceParams {
address: Address,
}
pub async fn stop_tracelog(
State(ctx): State<Arc<dyn WorkerCtx>>,
Path(StopTraceParams { address }): Path<StopTraceParams>,
) -> axum::response::Result<impl IntoResponse> {
let database = ctx
.get_database_by_address(&address)
.await
.map_err(log_and_500)?
.ok_or((StatusCode::NOT_FOUND, "No such database."))?;
let database_instance = ctx.get_leader_database_instance_by_database(database.id).await;
let instance_id = database_instance.unwrap().id;
let host = ctx.host_controller();
host.stop_trace(instance_id).await.map_err(|e| {
log::error!("Unable to retrieve tracelog {}", e);
(StatusCode::SERVICE_UNAVAILABLE, "Database instance not ready.")
})?;
Ok(())
}
pub async fn perform_tracelog_replay(body: Bytes) -> axum::response::Result<impl IntoResponse> {
// Build out a temporary database
let tmp_dir = TempDir::new("stdb_test").expect("establish tmpdir");
let db_path = tmp_dir.path();
let logger_path = tmp_dir.path();
let identity = Identity {
data: hash_bytes(b"This is a fake identity.").data,
};
let address = Address::from_slice(&identity.as_slice()[0..16]);
let dbic = DatabaseInstanceContext::new(0, 0, false, identity, address, db_path.to_path_buf(), logger_path);
let iv = InstanceEnv::new(dbic, Scheduler::dummy(&tmp_dir.path().join("scheduler")), None);
let tx = iv.dbic.relational_db.begin_tx();
let (_, resp_body) = iv.tx.set(tx, || replay_report(&iv, &mut &body[..]));
let resp_body = resp_body.map_err(log_and_500)?;
Ok(axum::Json(resp_body))
}
pub fn router<S>() -> axum::Router<S>
where
S: ControlNodeDelegate + Clone + 'static,
Arc<dyn WorkerCtx>: FromRef<S>,
{
use axum::routing::{get, post};
axum::Router::new()
.route("/database/:address", get(get_tracelog))
.route("/database/:address/stop", post(stop_tracelog))
.route("/replay", post(perform_tracelog_replay))
}
+108
View File
@@ -0,0 +1,108 @@
mod flat_csv;
pub mod websocket;
use std::net::IpAddr;
use axum::body::{Bytes, HttpBody};
use axum::extract::FromRequest;
use axum::headers;
use axum::response::IntoResponse;
use bytestring::ByteString;
use http::{HeaderName, HeaderValue, Request, StatusCode};
use spacetimedb::address::Address;
use crate::routes::database::DomainParsingRejection;
use crate::{log_and_500, ControlNodeDelegate};
pub struct ByteStringBody(pub ByteString);
#[async_trait::async_trait]
impl<S, B> FromRequest<S, B> for ByteStringBody
where
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: Into<axum::BoxError>,
S: Send + Sync,
{
type Rejection = axum::response::Response;
async fn from_request(req: Request<B>, state: &S) -> Result<Self, Self::Rejection> {
let bytes = Bytes::from_request(req, state)
.await
.map_err(IntoResponse::into_response)?;
let string = bytes
.try_into()
.map_err(|_| (StatusCode::BAD_REQUEST, "Request body didn't contain valid UTF-8").into_response())?;
Ok(ByteStringBody(string))
}
}
pub struct XForwardedFor(pub IpAddr);
impl headers::Header for XForwardedFor {
fn name() -> &'static HeaderName {
static NAME: HeaderName = HeaderName::from_static("x-forwarded-for");
&NAME
}
fn decode<'i, I: Iterator<Item = &'i HeaderValue>>(values: &mut I) -> Result<Self, headers::Error> {
let val = values.next().ok_or_else(headers::Error::invalid)?;
let val = val.to_str().map_err(|_| headers::Error::invalid())?;
let (first, _) = val.split_once(',').ok_or_else(headers::Error::invalid)?;
let ip = first.trim().parse().map_err(|_| headers::Error::invalid())?;
Ok(XForwardedFor(ip))
}
fn encode<E: Extend<HeaderValue>>(&self, values: &mut E) {
values.extend([self.0.to_string().try_into().unwrap()])
}
}
#[derive(Clone)]
pub enum NameOrAddress {
Address(Address),
Name(String),
}
impl NameOrAddress {
pub fn into_string(self) -> String {
match self {
NameOrAddress::Address(addr) => addr.to_hex(),
NameOrAddress::Name(name) => name,
}
}
pub async fn try_resolve(
&self,
ctx: &(impl ControlNodeDelegate + ?Sized),
) -> axum::response::Result<Result<Address, &str>> {
Ok(match self {
NameOrAddress::Address(addr) => Ok(*addr),
NameOrAddress::Name(name) => {
let domain = name.parse().map_err(DomainParsingRejection)?;
ctx.spacetime_dns(&domain).await.map_err(log_and_500)?.ok_or(name)
}
})
}
pub async fn resolve(&self, ctx: &(impl ControlNodeDelegate + ?Sized)) -> axum::response::Result<Address> {
self.try_resolve(ctx).await?.map_err(|_| StatusCode::BAD_REQUEST.into())
}
}
impl<'de> serde::Deserialize<'de> for NameOrAddress {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
String::deserialize(deserializer).map(|s| {
if let Ok(addr) = Address::from_hex(&s) {
NameOrAddress::Address(addr)
} else {
NameOrAddress::Name(s)
}
})
}
}

Some files were not shown because too many files have changed in this diff Show More