Compare commits

47 Commits

Author SHA1 Message Date
7f8c247d97 Pending a correct test of request cancellation 2026-04-22 15:58:34 +00:00
60c96964d9 bit of cleanup, few steps towards commands demo 2026-04-18 15:13:27 +00:00
6eed6b9831 Made some progress towards effectful programs 2026-04-18 13:25:03 +00:00
286040c3ec Fixed macro system by reintroducing MacTok::Resolved 2026-04-16 20:12:28 +00:00
23180b66e3 Deleted Gitea workflow 2026-04-11 11:07:04 +00:00
b44f3c1832 Updated macro system to process and return mactree and then lower it separately to gexpr
Some checks failed
Rust / build (push) Has been cancelled
2026-04-11 11:00:22 +00:00
9b4c7fa7d7 partway through fixes, macro system needs resdesign
Some checks failed
Rust / build (push) Has been cancelled
2026-04-08 18:02:20 +02:00
0909524dee Compiles again after command subsystem
Some checks failed
Rust / build (push) Failing after 3m52s
terrified to start testing
2026-03-27 23:50:58 +01:00
09cfcb1839 partway towards commands
I got very confused and started mucking about with "spawn" when in fact all I needed was the "inline" extension type in orcx that allows the interpreter to expose custom constants.
2026-03-13 16:48:42 +01:00
cdcca694c5 Method refactor now compiles
Some checks failed
Rust / build (push) Has been cancelled
2026-01-29 16:28:57 +01:00
534f08b45c Significantly extended stdlib
Some checks failed
Rust / build (push) Has been cancelled
2026-01-27 20:53:45 +01:00
66e5a71032 Added subscript lexer
Some checks failed
Rust / build (push) Has been cancelled
2026-01-25 18:52:57 +01:00
c461f82de1 Custom lexers can now terminate operators
Some checks failed
Rust / build (push) Has been cancelled
New constraint: custom lexer output is dropped whenever it is used to terminate an operator nested inside another custom lexer, because the recursive call has to return exactly one lexeme
2026-01-25 17:52:18 +01:00
b9f1bb74d7 Fixed a very nasty deadlock
All checks were successful
Rust / build (push) Successful in 3m34s
2026-01-22 20:56:02 +01:00
f38193edcc Protocols and operators mostly
All checks were successful
Rust / build (push) Successful in 4m8s
2026-01-21 22:22:58 +01:00
75b05a2965 Enabled wofkflow
All checks were successful
Rust / build (push) Successful in 4m6s
2026-01-20 15:30:19 +01:00
9a02c1b3ff Fixed workflow
All checks were successful
Rust / build (push) Has been skipped
2026-01-20 15:29:33 +01:00
4cce216e4e Added workflow 2026-01-20 15:28:38 +01:00
237b40ed2e Fixed a hang when the cleanup code for an extension is too slow 2026-01-20 15:24:34 +01:00
cb111a8d7b Removed superfluous logs 2026-01-19 03:51:17 +01:00
48942b3b2c Unit tests pass
Fixed a nasty deadlock in reqnot
2026-01-19 00:56:03 +01:00
6a3c1d5917 Introduced dylib extension format, cleared up shutdown sequence 2026-01-17 00:23:35 +01:00
1a7230ce9b Traditional route appears to work
Beginnings of dylib extensions, entirely untestted
2026-01-12 01:38:10 +01:00
32d6237dc5 task_local context over context objects
- interner impls logically separate from API in orchid-base (default host interner still in base for testing)
- error reporting, logging, and a variety of other features passed down via context in extension, not yet in host to maintain library-ish profile, should consider options
- no global spawn mechanic, the host has a spawn function but extensions only get a stash for enqueuing async work in sync callbacks which is then explicitly, manually, and with strict order popped and awaited
- still deadlocks nondeterministically for some ungodly reason
2026-01-01 14:54:29 +00:00
06debb3636 Tests pass for reqnot 2025-12-16 00:02:45 +01:00
0b2b05d44e Orchid-base uses task-local context.
Everything else is broken at the moment.
2025-12-14 17:17:43 +01:00
8753d4c751 Added docs to unsync-pipe 2025-12-14 01:32:24 +01:00
224c4ecca2 Added unsync-pipe with some tests 2025-12-13 02:28:10 +01:00
0f89cde246 added binary-safe (hopefully) pipe for upcoming dylib extension support 2025-12-12 17:32:01 +01:00
85d45cf0ef Unboxed whatever I coul 2025-12-11 16:33:49 +01:00
d211f3127d Added untested comm impl 2025-12-11 16:25:46 +01:00
4e4dc381ea Fixed match, and enabled macro keywords to share names with constants 2025-11-30 02:30:42 +01:00
ecf151158d Pattern matching works now 2025-11-27 22:47:02 +01:00
4f989271c5 Clarifications and waiver 2025-11-21 18:56:30 +01:00
219033be0d Updated readme 2025-11-21 15:13:41 +01:00
663ff612ba License fixes 2025-11-21 15:06:26 +01:00
603efef28e New macro system and stdlib additions 2025-11-21 14:25:03 +01:00
b77653f841 Added support for defining macros in Rust within the macro system
Also fixed a lot of bugs
2025-09-30 21:23:16 +02:00
7971a2b4eb Correctly halts 2025-09-16 22:54:22 +02:00
ee45dbd28e Hide todos in the legacy folder 2025-09-09 16:37:37 +02:00
ce08021e79 exec working up to halt
clean shutdown doesn't for some reason
2025-09-09 16:30:49 +02:00
e339350505 Phased out async-stream in pursuit of compile performance 2025-09-04 15:01:53 +02:00
088cb6a247 updated all deps
migrated away from paste and async-std
2025-09-03 18:42:54 +02:00
7031f3a7d8 Macro system done in theory
too afraid to begin debugging, resting for a moment
2025-09-03 16:05:26 +02:00
051b5e666f First steps for the macro system 2025-08-01 18:32:55 +02:00
f87185ef88 RA leaks memory in code-server, switching back to desktop 2025-07-31 14:31:26 +00:00
769c6cfc9f Various progress, doesnt compile
Added prelude, made lambdas a single-token prefix like NS, made progress on implementations, removed const line type
2025-07-31 00:30:41 +02:00
362 changed files with 15401 additions and 21929 deletions

View File

@@ -1,11 +1,24 @@
[alias]
xtask = "run --quiet --package xtask --"
orcx = "xtask orcx"
orcxdb = "xtask orcxdb"
orcx = "xtask orcx --"
orcxdb = "xtask orcxdb --"
[env]
CARGO_WORKSPACE_DIR = { value = "", relative = true }
ORCHID_EXTENSIONS = "target/debug/orchid-std"
ORCHID_DEFAULT_SYSTEMS = "orchid::std"
ORCHID_EXTENSIONS = "target/debug/orchid_std"
#ORCHID_EXTENSIONS = "target/debug/orchid-std-piped"
ORCHID_DEFAULT_SYSTEMS = "orchid::std;orchid::macros"
ORCHID_LOG_BUFFERS = "true"
RUSTBACKTRACE = "1"
RUST_BACKTRACE = "1"
[build]
# rustflags = ["-Znext-solver"]
[profile.dev]
opt-level = 0
debug = 2
strip = 'none'
debug-assertions = true
overflow-checks = true
lto = false
panic = 'abort'

13
.github/FUNDING.yml vendored
View File

@@ -1,13 +0,0 @@
# These are supported funding model platforms
github: lbfalvy
patreon: lbfalvy
open_collective: # Replace with a single Open Collective username
ko_fi: lbfalvy
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -1,29 +0,0 @@
name: Rust
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
env:
CARGO_TERM_COLOR: always
jobs:
build:
if: ${{ false }} # <- This make sure the workflow is skipped without any alert
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install rust toolchain
run: curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain nightly
- name: Build
run: $HOME/.cargo/bin/cargo build --verbose
- name: Run tests
run: $HOME/.cargo/bin/cargo test --verbose
- name: Clippy
run: cargo clippy
- name: Formatting
run: cargo fmt +nightly --check

1830
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,13 +2,17 @@
resolver = "2"
members = [
"orcx",
"orchid-std",
"orchid-host",
"orchid-extension",
"orchid-base",
"orchid-api",
"orchid-api-derive",
"orchid-api-traits",
"stdio-perftest", "xtask", "orchid-macros",
"orcx",
"orchid-std",
"orchid-host",
"orchid-extension",
"orchid-base",
"orchid-api",
"orchid-api-derive",
"orchid-api-traits",
"stdio-perftest",
"xtask",
"async-fn-stream",
"unsync-pipe",
"orchid-async-utils",
]

11
LICENCE Normal file
View File

@@ -0,0 +1,11 @@
THIS SOFTWARE IS PROVIDED WITHOUT WARRANTY
The code in this repository is free for noncommercial use, including derivative works and inclusion in other software if those are also free for noncommercial use. Commercial use, or inclusion in any derivative works licensed for commercial use is forbidden under this general licence.
Identifying marks stored in the repository are restricted for use with an unmodified copy of this software. If you distribute modified versions of this software, you must either replace these identifying marks or modify them in a way that clearly indicates that what you are distributing is a derivative work and not this official vversion. You must also replace any contact information in such a way that your derivative work does not suggest that we may be contacted about issues. Your derivative work may use the original identifying marks and contact information to identify this project as its basis, while emphasizing that the authors of the original project are neither in control of, nor liable for the derivative work.
Identifying marks include the Orchid logo, the ribbon image in the readme, and the names "Orchid", "Orchidlang" unless they are part of a technical interface.
Contact information includes email addresses, links to the source code and issue tracker.
Words listed as identifying marks are explicltly not considered as such when they appear in technical interfaces or APIs. For example, shell commands, identifiers within Orchid or Rust code, and names in package registries are not considered identifying marks.

674
LICENSE
View File

@@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -7,7 +7,7 @@ An experimental lazy, pure functional programming language designed to be embedd
## Usage
The standalone interpreter can be built as the binary target from this package. The language tutorial and standard library documentation is at [www.lbfalvy.com/orchid-reference](https://lbfalvy.github.io/orchid-reference/). Embedder guide and Rust API documentation are coming soon.
Updated language tutorial, standard library documentation, embedder guide and Rust API documentation coming soon.
## Design
@@ -19,10 +19,10 @@ Namespaces are inspired by Rust modules and ES6. Every file and directory is imp
## Try it out
The project uses the nighly rust toolchain. Go to one of the folders within `examples` and run
The project uses both the stable and nightly rust toolchain. Run the examples with
```sh
cargo run --release
cargo orcx --release exec --proj ./examples/hello-world "src::main::main"
```
you can try modifying the examples, but error reporting for the time being is pretty terrible.
@@ -35,12 +35,14 @@ Orchids and mangrove trees form complex ecosystems; The flowers persuade the tre
All contributions are welcome. For the time being, use the issue tracker to discuss ideas.
## Forks
Unless we agree on different terms, by contributing to this software you declare that you have created or otherwise have the right to license your contribution, agree to license it publicly under the general noncommercial licence included in this repository, and grant me (the owner of the project) a permanent, unrestricted license to use, modify, distribute and relicense your contribution. You retain ownership of your intellectual property to ensure that the copyleft protections cementing the noncommercial availability of the code are preserved.
The code in this repository is available under the GNU GPLv3, but identifying marks stored in the repository are restricted for use with an unmodified copy of this software. If you distribute modified versions of this software, you must either replace these identifying marks or modify them in a way that clearly indicates that what you are distributing is a derivative work and not this official vversion. You must also replace any contact information in such a way that your derivative work does not suggest that we may be contacted about issues. Your derivative work may use the original identifying marks and contact information to identify this project as its basis, while emphasizing that the authors of the original project are neither in control of, nor liable for the derivative work.
## About the license
Identifying marks include the Orchid logo, the ribbon image above, and the names "Orchid", "Orchidlang" unless they are part of a technical interface.
This software is free for noncommercial use. If you would like to use it for commercial purposes, or distribute your derivative work under a license that permits commercial use, contact me for a separate license. These licences are provided on a case-by-case basis with any limitations and compensation we agree on.
Contact information includes email addresses, links to the source code and issue tracker.
I generally appreciate the ethos of free software, and particularly the patterns used in copyleft to cement the guarantees of the licence. However, I don't think commercial entities fit that ethos, and I think they should be addressed separately rather than attempting to ignore the inherent unfairness towards contributors.
Words listed as identifying marks are explicltly not considered as such when they appear in technical interfaces or APIs. For example, shell commands, identifiers within Orchid or Rust code, and names in package registries are not considered as identifying marks.
My intent with the custom license included in this project is to enable the strong guarantees of copyleft towards noncommercial users, while leaving commercial users to engage with this project and its possible future ecosystem in a commercial way; if you intend to profit off my work, the barest cash flow should justify shooting me an email and agreeing on a simple temporary profit sharing deal until you figure out your business model, and the cash flow of a full scale business should more than justify dedicated attention to the software you rely on.
The clause about identifying marks is intended to prevent another pitfall of open-source, wherein Linux distros borrow entire codebases, break them, and then distribute the result under the original author's name. If would like to package Orchid, I'd be delighted if you would talk to me about making it official, but if you would rather operate independently, you should present your project as the rogue derivative work that it is rather than borrowing the original project's identity for something its owner has no control over.

View File

@@ -1,6 +1,8 @@
Since the macro AST is built as a custom tokenizer inside the system, it needs access to the import set. On the other hand, import sets aren't available until after parsing. Need a way to place this order in a lexer without restricting the expression value of the lexer.
Decide whether we need patterns at runtime. Maybe macros aren't obligated to return MacTree so destructuring can be done in a safer and easier way?
The daft option of accepting import resolution queries at runtime is available but consider better options.
Double-check type and templating logic in the note, it's a bit fishy.
Consider whether all macros need to be loaded or the const references could be used to pre-filter for a given let line.
## alternate extension mechanism

View File

@@ -0,0 +1,10 @@
[package]
name = "async-fn-stream"
version = "0.1.0"
edition = "2024"
[dependencies]
futures = { version = "0.3.31", features = ["std"], default-features = false }
[dev-dependencies]
test_executors = "0.4.1"

137
async-fn-stream/src/lib.rs Normal file
View File

@@ -0,0 +1,137 @@
use std::marker::PhantomData;
use futures::channel::mpsc;
use futures::stream::{PollNext, select_with_strategy};
use futures::{FutureExt, SinkExt, Stream, StreamExt};
/// Handle that allows you to emit values on a stream. If you drop
/// this, the stream will end and you will not be polled again.
pub struct StreamCtx<'a, T>(mpsc::Sender<T>, PhantomData<&'a ()>);
impl<T> StreamCtx<'_, T> {
pub async fn emit(&mut self, value: T) {
(self.0.send(value).await)
.expect("Dropped a stream receiver without dropping the driving closure");
}
}
fn left_strat(_: &mut ()) -> PollNext { PollNext::Left }
/// Create a stream from an async function acting as a coroutine
pub fn stream<'a, T: 'a>(
f: impl for<'b> AsyncFnOnce(StreamCtx<'b, T>) + 'a,
) -> impl Stream<Item = T> + 'a {
let (send, recv) = mpsc::channel::<T>(1);
let fut = async { f(StreamCtx(send, PhantomData)).await };
// use options to ensure that the stream is driven to exhaustion
select_with_strategy(fut.into_stream().map(|()| None), recv.map(Some), left_strat)
.filter_map(async |opt| opt)
}
/// Create a stream of result from a fallible function.
pub fn try_stream<'a, T: 'a, E: 'a>(
f: impl for<'b> AsyncFnOnce(StreamCtx<'b, T>) -> Result<StreamCtx<'b, T>, E> + 'a,
) -> impl Stream<Item = Result<T, E>> + 'a {
let (send, recv) = mpsc::channel::<T>(1);
let fut = async { f(StreamCtx(send, PhantomData)).await };
select_with_strategy(
fut.into_stream().map(|res| if let Err(e) = res { Some(Err(e)) } else { None }),
recv.map(|t| Some(Ok(t))),
left_strat,
)
.filter_map(async |opt| opt)
}
#[cfg(test)]
mod test {
use std::task::Poll;
use std::{future, pin};
use futures::channel::mpsc::channel;
use futures::{Stream, StreamExt, TryStreamExt};
use test_executors::spin_on;
use crate::{stream, try_stream};
#[test]
fn sync() {
spin_on(async {
let v = stream(async |mut cx| {
for i in 0..5 {
cx.emit(i).await
}
})
.collect::<Vec<_>>()
.await;
assert_eq!(v, [0, 1, 2, 3, 4])
})
}
#[test]
/// The exact behaviour of the poll function under blocked use
fn with_delay() {
spin_on(async {
let (mut send, mut recv) = channel(0);
let mut s = pin::pin!(stream(async |mut cx| {
for i in 0..2 {
cx.emit(i).await
}
recv.next().await;
for i in 2..5 {
cx.emit(i).await
}
}));
let mut log = String::new();
let log = future::poll_fn(|cx| {
match s.as_mut().poll_next(cx) {
Poll::Ready(Some(r)) => log += &format!("Found {r}\n"),
Poll::Ready(None) => return Poll::Ready(format!("{log}Ended")),
Poll::Pending => match send.try_send(()) {
Ok(()) => log += "Unblocked\n",
Err(err) => return Poll::Ready(format!("{log}Unblock err: {err}")),
},
}
Poll::Pending
})
.await;
const EXPECTED: &str = "\
Found 0\n\
Found 1\n\
Unblocked\n\
Found 2\n\
Found 3\n\
Found 4\n\
Ended";
assert_eq!(log, EXPECTED)
})
}
#[test]
fn sync_try_all_ok() {
spin_on(async {
let v = try_stream::<_, ()>(async |mut cx| {
for i in 0..5 {
cx.emit(i).await
}
Ok(cx)
})
.try_collect::<Vec<_>>()
.await;
assert_eq!(v, Ok(vec![0, 1, 2, 3, 4]))
})
}
#[test]
fn sync_try_err() {
spin_on(async {
let v = try_stream::<_, ()>(async |mut cx| {
for i in 0..5 {
cx.emit(i).await
}
Err(())
})
.try_collect::<Vec<_>>()
.await;
assert_eq!(v, Err(()))
})
}
}

View File

@@ -1,2 +1,15 @@
const user = "dave"
const main = println "Hello $user!" exit_status::success
let user = r[ "foo" 1, "bar" t[3, 4] ]
let _main = user.bar.1
let main = "foo" + string::slice "hello" 1 3 + "bar"
let io_main = (
stdio::get_stdout \stdout
std::stream::write_str stdout "Hello, World!"
(std::stream::flush
(std::stream::close
orchid::cmd::exit
\e e)
\e e)
\e e
)

21
notes/commands.md Normal file
View File

@@ -0,0 +1,21 @@
# Foreword
Commands exist to allow programs to choose a course of action rather than merely respond to external queries with data. In a sense, commands are data that describes the totality of what a program will do.
Commands are not equivalent to the instructions of an imperative programming language. If that were the case, they would describe a transition from a previous state to a new state in which additional instructions may be attempted, but commands do not have an "outcome". To the extent that they allow the continuation to be selected, they must encode that within themselves.
## Are commands unique
Yes. Since they own the entire program, and since all expressions are younger than all their subexpressions, they cannot be exactly identical to any other subexpression.
## Are commands exclusive
Not really. What control flow primitives exist between commands is up to the environment / interpreter. It may make sense to introduce a "parallel" primitive depending on the nature of the commands. In the abstract, we cannot talk about "the current command".
# Extensions
The orchid embedder and extension API mean something different by command than any particular programmer or embedder does, and something different still from what Orcx and systems programmers do. The Orchid extension API should not assume any capability that may make an embedder's job unduly difficult.
## Continuation
Since commands are expected to be composed into arbitrarily deep TC structures,to avoid a memory leak, commands should not remain passively present in the system; they must be able to express certain outcomes as plain data and return. The most obvious of such outcomes is the single continuation wherein a subexpression evaluating to another command from the same set will eventually run, and it can emulate more complex patterns by continuing with a call to an environment constant which expresses the more complex outcome in terms of its parameters

View File

@@ -6,13 +6,13 @@ Reference loops are resource leaks. There are two primary ways to avoid referenc
- Constants reference their constituent Expressions
- Expressions reference Atoms
- During evaluation, Constants replace their unbound names with Constants
- There is a reference cycle here, but it always goes through a Constant.
> **todo** A potential fix may be to update all Constants to point to a dummy value before freeing Trees
- There is a reference cycle here, but it always goes through a Constant.
> **todo** A potential fix may be to update all Constants to point to a dummy value before freeing Trees
- Atoms reference the Systems that implement them
- Atoms may reference Expressions that are not younger than them
- This link is managed by the System but tied to Atom and not System lifecycle
- Atoms can technically be applied to themselves, but it's a copying apply so it probably isn't a risk factor
- This link is managed by the System but tied to Atom and not System lifecycle
- Atoms can technically be applied to themselves, but it's a copying apply so it probably isn't a risk factor
- Systems reference the Extension that contains them
- Extensions reference the Port that connects them
- The Extension signals the remote peer to disconnect on drop
- The port is also referenced in a loose receiver thread, which always eventually tries to find the Extension or polls for ingress so it always eventually exits after the Extension's drop handler is called
- The Extension signals the remote peer to disconnect on drop
- The port is also referenced in a loose receiver thread, which always eventually tries to find the Extension or polls for ingress so it always eventually exits after the Extension's drop handler is called

View File

@@ -17,11 +17,11 @@ Priority numbers are written in hexadecimal normal form to avoid precision bugs,
- **32-39**: Binary operators, in inverse priority order
- **80-87**: Expression-like structures such as if/then/else
- **128-135**: Anything that creates lambdas
Programs triggered by a lower priority pattern than this can assume that all names are correctly bound
Programs triggered by a lower priority pattern than this can assume that all names are correctly bound
- **200**: Aliases extracted for readability
The user-accessible entry points of all macro programs must be lower priority than this, so any arbitrary syntax can be extracted into an alias with no side effects
The user-accessible entry points of all macro programs must be lower priority than this, so any arbitrary syntax can be extracted into an alias with no side effects
- **224-231**: Integration; documented hooks exposed by a macro package to allow third party packages to extend its functionality
The `statement` pattern produced by `do{}` blocks and matched by `let` and `cps` is a good example of this. When any of these are triggered, all macro programs are in a documented state.
The `statement` pattern produced by `do{}` blocks and matched by `let` and `cps` is a good example of this. When any of these are triggered, all macro programs are in a documented state.
- **248-255**: Transitional states within macro programs get the highest priority
The numbers are arbitrary and up for debate. These are just the ones I came up with when writing the examples.

View File

@@ -42,9 +42,9 @@ Prioritised macro patterns must start and end with a vectorial placeholder. They
Macros are checked from the outermost block inwards.
1. For every name token, test all named macros starting with that name
1. If the tail is implicit, continue iterating
1. If the tail is implicit, continue iterating
2. Test all prioritized macros
1. Take the first rule that matches in the highest prioritized block
1. Take the first rule that matches in the highest prioritized block
Test all in a set of macros
1. Take the first rule that matches in each block
@@ -75,26 +75,26 @@ Recursion has to happen through the interpreter itself, so the macro system is d
- line parser `macro` parses a macro with the existing logic
- atom `MacRecurState` holds the recursion state
- function `resolve_recur` finds all matches on a MacTree
- type: `MacRecurState -> MacTree -> MacTree`
- use all relevant macros to find all matches in the tree
- since macros must contain a locally defined token, it can be assumed that at the point that a constant is evaluated and all imports in the parent module have been resolved, necessarily all relevant macro rules must have been loaded
- for each match
- check for recursion violations
- wrap the body in iife-s corresponding to the named values in the match state
- emit a recursive call to process and run the body, and pass the same recursive call as argument for the macro to use
- type: `MacRecurState -> MacTree -> MacTree`
- use all relevant macros to find all matches in the tree
- since macros must contain a locally defined token, it can be assumed that at the point that a constant is evaluated and all imports in the parent module have been resolved, necessarily all relevant macro rules must have been loaded
- for each match
- check for recursion violations
- wrap the body in iife-s corresponding to the named values in the match state
- emit a recursive call to process and run the body, and pass the same recursive call as argument for the macro to use
```
(\recur. lower (recur $body) recur)
(resolve_recur $mac_recur_state)
```
- emit a single call to `instantiate_tpl` which receives all of these
- emit a single call to `instantiate_tpl` which receives all of these
- function `instantiate_tpl` inserts `MacTree` values into a `MacTree(tpl)`
- type: `MacTree(tpl) [-> MacTree] -> MacTree`
- type: `MacTree(tpl) [-> MacTree] -> MacTree`
_this function deduces the number of arguments from the first argument. This combines poorly with autocurry, but it's an easy way to avoid representing standalone tree lists_
- walks the tree to find max template slot number, reads and type checks as many template values
- returns the populated tree
- walks the tree to find max template slot number, reads and type checks as many template values
- returns the populated tree
- function `resolve` is the main entry point of the code
- type: `MacTree -> MacTree`
- invokes `resolve_recur` with an empty `MacRecurState`
- type: `MacTree -> MacTree`
- invokes `resolve_recur` with an empty `MacRecurState`
- function `lower` is the main exit point of the code
- type: `MacTree -> any`
- Lowers `MacTree` into the equivalent `Expr`.
- type: `MacTree -> any`
- Lowers `MacTree` into the equivalent `Expr`.

View File

@@ -0,0 +1,2 @@
[profile.dev]
panic = 'unwind'

View File

@@ -9,9 +9,8 @@ proc-macro = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
quote = "1.0.38"
syn = { version = "2.0.95" }
quote = "1.0.42"
syn = { version = "2.0.112" }
orchid-api-traits = { version = "0.1.0", path = "../orchid-api-traits" }
proc-macro2 = "1.0.92"
darling = "0.20.10"
proc-macro2 = "1.0.104"
itertools = "0.14.0"

View File

@@ -12,10 +12,10 @@ pub fn derive(input: TokenStream) -> TokenStream {
let decode = decode_body(&input.data);
let expanded = quote! {
impl #impl_generics orchid_api_traits::Decode for #name #ty_generics #where_clause {
async fn decode<R: orchid_api_traits::async_std::io::Read + ?Sized>(
async fn decode<R: orchid_api_traits::AsyncRead + ?Sized>(
mut read: std::pin::Pin<&mut R>
) -> Self {
#decode
) -> std::io::Result<Self> {
Ok(#decode)
}
}
};
@@ -30,7 +30,7 @@ fn decode_fields(fields: &syn::Fields) -> pm2::TokenStream {
let syn::Field { ty, ident, .. } = &f;
quote! {
#ident : (Box::pin(< #ty as orchid_api_traits::Decode>::decode(read.as_mut()))
as std::pin::Pin<Box<dyn std::future::Future<Output = _>>>).await
as std::pin::Pin<Box<dyn std::future::Future<Output = std::io::Result<_>>>>).await?
}
});
quote! { { #( #exprs, )* } }
@@ -40,7 +40,7 @@ fn decode_fields(fields: &syn::Fields) -> pm2::TokenStream {
let ty = &field.ty;
quote! {
(Box::pin(< #ty as orchid_api_traits::Decode>::decode(read.as_mut()))
as std::pin::Pin<Box<dyn std::future::Future<Output = _>>>).await,
as std::pin::Pin<Box<dyn std::future::Future<Output = std::io::Result<_>>>>).await?,
}
});
quote! { ( #( #exprs )* ) }
@@ -62,7 +62,7 @@ fn decode_body(data: &syn::Data) -> proc_macro2::TokenStream {
quote! { #id => Self::#ident #fields, }
});
quote! {
match <u8 as orchid_api_traits::Decode>::decode(read.as_mut()).await {
match <u8 as orchid_api_traits::Decode>::decode(read.as_mut()).await? {
#(#opts)*
x => panic!("Unrecognized enum kind {x}")
}

View File

@@ -14,11 +14,12 @@ pub fn derive(input: TokenStream) -> TokenStream {
let encode = encode_body(&input.data);
let expanded = quote! {
impl #e_impl_generics orchid_api_traits::Encode for #name #e_ty_generics #e_where_clause {
async fn encode<W: orchid_api_traits::async_std::io::Write + ?Sized>(
async fn encode<W: orchid_api_traits::AsyncWrite + ?Sized>(
&self,
mut write: std::pin::Pin<&mut W>
) {
#encode
) -> std::io::Result<()> {
#encode;
Ok(())
}
}
};
@@ -43,7 +44,7 @@ fn encode_body(data: &syn::Data) -> Option<pm2::TokenStream> {
quote! {
Self::#ident #dest => {
(Box::pin((#i as u8).encode(write.as_mut()))
as std::pin::Pin<Box<dyn std::future::Future<Output = _>>>).await;
as std::pin::Pin<Box<dyn std::future::Future<Output = std::io::Result<()>>>>).await?;
#body
}
}
@@ -61,7 +62,7 @@ fn encode_body(data: &syn::Data) -> Option<pm2::TokenStream> {
fn encode_names<T: ToTokens>(names: impl Iterator<Item = T>) -> pm2::TokenStream {
quote! { #(
(Box::pin(#names .encode(write.as_mut()))
as std::pin::Pin<Box<dyn std::future::Future<Output = _>>>).await;
as std::pin::Pin<Box<dyn std::future::Future<Output = std::io::Result<()>>>>).await?;
)* }
}

View File

@@ -120,6 +120,3 @@ fn get_ancestry(input: &DeriveInput) -> Option<Vec<pm2::TokenStream>> {
fn is_extendable(input: &DeriveInput) -> bool {
input.attrs.iter().any(|a| a.path().get_ident().is_some_and(|i| *i == "extendable"))
}
#[test]
fn test_wtf() { eprintln!("{}", gen_casts(&[quote!(ExtHostReq)], &quote!(BogusReq))) }

View File

@@ -6,9 +6,8 @@ edition = "2024"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-std = "1.13.0"
async-stream = "0.3.6"
futures = "0.3.31"
chrono = "0.4.43"
futures = { version = "0.3.31", features = ["std"], default-features = false }
itertools = "0.14.0"
never = "0.1.0"
ordered-float = "5.0.0"
ordered-float = "5.1.0"

View File

@@ -1,34 +1,45 @@
use std::collections::HashMap;
use std::future::Future;
use std::hash::Hash;
use std::io;
use std::num::NonZero;
use std::ops::{Range, RangeInclusive};
use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use async_std::io::{Read, ReadExt, Write, WriteExt};
use async_stream::stream;
use futures::StreamExt;
use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use never::Never;
use ordered_float::NotNan;
use crate::encode_enum;
use crate::{decode_err, decode_err_for, encode_enum, spin_on};
pub trait Decode: 'static {
pub trait Decode: 'static + Sized {
/// Decode an instance from the beginning of the buffer. Return the decoded
/// data and the remaining buffer.
fn decode<R: Read + ?Sized>(read: Pin<&mut R>) -> impl Future<Output = Self> + '_;
fn decode<R: AsyncRead + ?Sized>(
read: Pin<&mut R>,
) -> impl Future<Output = io::Result<Self>> + '_;
fn decode_slice(slc: &mut &[u8]) -> Self {
spin_on(Self::decode(Pin::new(slc) as Pin<&mut _>)).expect("Decode from slice cannot fail")
}
}
pub trait Encode {
/// Append an instance of the struct to the buffer
fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) -> impl Future<Output = ()>;
fn encode<W: AsyncWrite + ?Sized>(
&self,
write: Pin<&mut W>,
) -> impl Future<Output = io::Result<()>>;
fn encode_vec(&self, vec: &mut Vec<u8>) {
spin_on(self.encode(Pin::new(vec) as Pin<&mut _>)).expect("Encode to vector cannot fail")
}
}
pub trait Coding: Encode + Decode + Clone {
fn get_decoder<T: 'static, F: Future<Output = T> + 'static>(
map: impl Fn(Self) -> F + Clone + 'static,
) -> impl AsyncFn(Pin<&mut dyn Read>) -> T {
async move |r| map(Self::decode(r).await).await
fn get_decoder<T: 'static>(
map: impl AsyncFn(Self) -> T + Clone + 'static,
) -> impl AsyncFn(Pin<&mut dyn AsyncRead>) -> io::Result<T> {
async move |r| Ok(map(Self::decode(r).await?).await)
}
}
impl<T: Encode + Decode + Clone> Coding for T {}
@@ -36,15 +47,15 @@ impl<T: Encode + Decode + Clone> Coding for T {}
macro_rules! num_impl {
($number:ty) => {
impl Decode for $number {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let mut bytes = [0u8; (<$number>::BITS / 8) as usize];
read.read_exact(&mut bytes).await.unwrap();
<$number>::from_be_bytes(bytes)
read.read_exact(&mut bytes).await?;
Ok(<$number>::from_be_bytes(bytes))
}
}
impl Encode for $number {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
write.write_all(&self.to_be_bytes()).await.expect("Could not write number")
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
write.write_all(&self.to_be_bytes()).await
}
}
};
@@ -63,12 +74,12 @@ num_impl!(i8);
macro_rules! nonzero_impl {
($name:ty) => {
impl Decode for NonZero<$name> {
async fn decode<R: Read + ?Sized>(read: Pin<&mut R>) -> Self {
Self::new(<$name as Decode>::decode(read).await).unwrap()
async fn decode<R: AsyncRead + ?Sized>(read: Pin<&mut R>) -> io::Result<Self> {
Self::new(<$name as Decode>::decode(read).await?).ok_or_else(decode_err)
}
}
impl Encode for NonZero<$name> {
async fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
self.get().encode(write).await
}
}
@@ -87,20 +98,22 @@ nonzero_impl!(i64);
nonzero_impl!(i128);
impl<T: Encode + ?Sized> Encode for &T {
async fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) { (**self).encode(write).await }
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
(**self).encode(write).await
}
}
macro_rules! float_impl {
($t:ty, $size:expr) => {
impl Decode for NotNan<$t> {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let mut bytes = [0u8; $size];
read.read_exact(&mut bytes).await.unwrap();
NotNan::new(<$t>::from_be_bytes(bytes)).expect("Float was NaN")
read.read_exact(&mut bytes).await?;
NotNan::new(<$t>::from_be_bytes(bytes)).map_err(|_| decode_err())
}
}
impl Encode for NotNan<$t> {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
write.write_all(&self.as_ref().to_be_bytes()).await.expect("Could not write number")
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
write.write_all(&self.as_ref().to_be_bytes()).await
}
}
};
@@ -110,72 +123,77 @@ float_impl!(f64, 8);
float_impl!(f32, 4);
impl Decode for String {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
let len = u64::decode(read.as_mut()).await.try_into().unwrap();
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let len: usize = u64::decode(read.as_mut()).await?.try_into().map_err(decode_err_for)?;
let mut data = vec![0u8; len];
read.read_exact(&mut data).await.unwrap();
std::str::from_utf8(&data).expect("String invalid UTF-8").to_owned()
read.read_exact(&mut data).await?;
Ok(std::str::from_utf8(&data).map_err(decode_err_for)?.to_owned())
}
}
impl Encode for String {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
u64::try_from(self.len()).unwrap().encode(write.as_mut()).await;
write.write_all(self.as_bytes()).await.unwrap()
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
u64::try_from(self.len()).map_err(decode_err_for)?.encode(write.as_mut()).await?;
write.write_all(self.as_bytes()).await
}
}
impl Encode for str {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
u64::try_from(self.len()).unwrap().encode(write.as_mut()).await;
write.write_all(self.as_bytes()).await.unwrap()
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
u64::try_from(self.len()).map_err(decode_err_for)?.encode(write.as_mut()).await?;
write.write_all(self.as_bytes()).await
}
}
impl<T: Decode> Decode for Vec<T> {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
let len = u64::decode(read.as_mut()).await.try_into().unwrap();
stream! { loop { yield T::decode(read.as_mut()).await } }.take(len).collect().await
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let len = u64::decode(read.as_mut()).await?;
let mut values = Vec::with_capacity(len.try_into().map_err(decode_err_for)?);
for _ in 0..len {
values.push(T::decode(read.as_mut()).await?);
}
Ok(values)
}
}
impl<T: Encode> Encode for Vec<T> {
async fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
self.as_slice().encode(write).await
}
}
impl<T: Encode> Encode for [T] {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
u64::try_from(self.len()).unwrap().encode(write.as_mut()).await;
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
u64::try_from(self.len()).unwrap().encode(write.as_mut()).await?;
for t in self.iter() {
t.encode(write.as_mut()).await
t.encode(write.as_mut()).await?
}
Ok(())
}
}
impl<T: Decode> Decode for Option<T> {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
match u8::decode(read.as_mut()).await {
0 => None,
1 => Some(T::decode(read).await),
x => panic!("{x} is not a valid option value"),
}
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
Ok(match bool::decode(read.as_mut()).await? {
false => None,
true => Some(T::decode(read).await?),
})
}
}
impl<T: Encode> Encode for Option<T> {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
let t = if let Some(t) = self { t } else { return 0u8.encode(write.as_mut()).await };
1u8.encode(write.as_mut()).await;
t.encode(write).await;
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
self.is_some().encode(write.as_mut()).await?;
if let Some(t) = self {
t.encode(write).await?
}
Ok(())
}
}
impl<T: Decode, E: Decode> Decode for Result<T, E> {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
match u8::decode(read.as_mut()).await {
0 => Self::Ok(T::decode(read).await),
1 => Self::Err(E::decode(read).await),
x => panic!("Invalid Result tag {x}"),
}
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
Ok(match bool::decode(read.as_mut()).await? {
false => Self::Ok(T::decode(read).await?),
true => Self::Err(E::decode(read).await?),
})
}
}
impl<T: Encode, E: Encode> Encode for Result<T, E> {
async fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
match self {
Ok(t) => encode_enum(write, 0, |w| t.encode(w)).await,
Err(e) => encode_enum(write, 1, |w| e.encode(w)).await,
@@ -183,30 +201,37 @@ impl<T: Encode, E: Encode> Encode for Result<T, E> {
}
}
impl<K: Decode + Eq + Hash, V: Decode> Decode for HashMap<K, V> {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
let len = u64::decode(read.as_mut()).await.try_into().unwrap();
stream! { loop { yield <(K, V)>::decode(read.as_mut()).await } }.take(len).collect().await
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let len = u64::decode(read.as_mut()).await?;
let mut map = HashMap::with_capacity(len.try_into().map_err(decode_err_for)?);
for _ in 0..len {
map.insert(K::decode(read.as_mut()).await?, V::decode(read.as_mut()).await?);
}
Ok(map)
}
}
impl<K: Encode + Eq + Hash, V: Encode> Encode for HashMap<K, V> {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
u64::try_from(self.len()).unwrap().encode(write.as_mut()).await;
for pair in self.iter() {
pair.encode(write.as_mut()).await
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
u64::try_from(self.len()).unwrap().encode(write.as_mut()).await?;
for (key, value) in self.iter() {
key.encode(write.as_mut()).await?;
value.encode(write.as_mut()).await?;
}
Ok(())
}
}
macro_rules! tuple {
(($($t:ident)*) ($($T:ident)*)) => {
impl<$($T: Decode),*> Decode for ($($T,)*) {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
($($T::decode(read.as_mut()).await,)*)
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
Ok(($($T::decode(read.as_mut()).await?,)*))
}
}
impl<$($T: Encode),*> Encode for ($($T,)*) {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
let ($($t,)*) = self;
$( $t.encode(write.as_mut()).await; )*
$( $t.encode(write.as_mut()).await?; )*
Ok(())
}
}
};
@@ -230,59 +255,67 @@ tuple!((t u v x y z a b c d e f g h i) (T U V X Y Z A B C D E F G H I));
tuple!((t u v x y z a b c d e f g h i j) (T U V X Y Z A B C D E F G H I J)); // 16
impl Decode for () {
async fn decode<R: Read + ?Sized>(_: Pin<&mut R>) -> Self {}
async fn decode<R: AsyncRead + ?Sized>(_: Pin<&mut R>) -> io::Result<Self> { Ok(()) }
}
impl Encode for () {
async fn encode<W: Write + ?Sized>(&self, _: Pin<&mut W>) {}
async fn encode<W: AsyncWrite + ?Sized>(&self, _: Pin<&mut W>) -> io::Result<()> { Ok(()) }
}
impl Decode for Never {
async fn decode<R: Read + ?Sized>(_: Pin<&mut R>) -> Self {
async fn decode<R: AsyncRead + ?Sized>(_: Pin<&mut R>) -> io::Result<Self> {
unreachable!("A value of Never cannot exist so it can't have been serialized");
}
}
impl Encode for Never {
async fn encode<W: Write + ?Sized>(&self, _: Pin<&mut W>) { match *self {} }
async fn encode<W: AsyncWrite + ?Sized>(&self, _: Pin<&mut W>) -> io::Result<()> {
match *self {}
}
}
impl Decode for bool {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let mut buf = [0];
read.read_exact(&mut buf).await.unwrap();
buf[0] != 0
read.read_exact(&mut buf).await?;
Ok(buf[0] != 0)
}
}
impl Encode for bool {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
write.write_all(&[if *self { 0xffu8 } else { 0u8 }]).await.unwrap()
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
write.write_all(&[if *self { 0xffu8 } else { 0u8 }]).await
}
}
impl<T: Decode, const N: usize> Decode for [T; N] {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
// TODO: figure out how to do this in safe rust on the stack
let v =
stream! { loop { yield T::decode(read.as_mut()).await } }.take(N).collect::<Vec<_>>().await;
v.try_into().unwrap_or_else(|_| unreachable!("The length of this stream is statically known"))
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
let mut v = Vec::with_capacity(N);
for _ in 0..N {
v.push(T::decode(read.as_mut()).await?);
}
match v.try_into() {
Err(_) => unreachable!("The length of this stream is statically known"),
Ok(arr) => Ok(arr),
}
}
}
impl<T: Encode, const N: usize> Encode for [T; N] {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
for t in self.iter() {
t.encode(write.as_mut()).await
t.encode(write.as_mut()).await?
}
Ok(())
}
}
macro_rules! two_end_range {
($this:ident, $name:tt, $op:tt, $start:expr, $end:expr) => {
impl<T: Decode> Decode for $name<T> {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
T::decode(read.as_mut()).await $op T::decode(read).await
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
Ok(T::decode(read.as_mut()).await? $op T::decode(read).await?)
}
}
impl<T: Encode> Encode for $name<T> {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
let $this = self;
($start).encode(write.as_mut()).await;
($end).encode(write).await;
($start).encode(write.as_mut()).await?;
($end).encode(write).await?;
Ok(())
}
}
}
@@ -294,12 +327,14 @@ two_end_range!(x, RangeInclusive, ..=, x.start(), x.end());
macro_rules! smart_ptr {
($name:tt) => {
impl<T: Decode> Decode for $name<T> {
async fn decode<R: Read + ?Sized>(read: Pin<&mut R>) -> Self {
$name::new(T::decode(read).await)
async fn decode<R: AsyncRead + ?Sized>(read: Pin<&mut R>) -> io::Result<Self> {
Ok($name::new(T::decode(read).await?))
}
}
impl<T: Encode> Encode for $name<T> {
async fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) { (**self).encode(write).await }
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
(**self).encode(write).await
}
}
};
}
@@ -309,12 +344,45 @@ smart_ptr!(Rc);
smart_ptr!(Box);
impl Decode for char {
async fn decode<R: Read + ?Sized>(read: Pin<&mut R>) -> Self {
char::from_u32(u32::decode(read).await).unwrap()
async fn decode<R: AsyncRead + ?Sized>(read: Pin<&mut R>) -> io::Result<Self> {
char::from_u32(u32::decode(read).await?).ok_or_else(decode_err)
}
}
impl Encode for char {
async fn encode<W: Write + ?Sized>(&self, write: Pin<&mut W>) {
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
(*self as u32).encode(write).await
}
}
impl Decode for Duration {
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
Ok(Self::new(u64::decode(read.as_mut()).await?, u32::decode(read).await?))
}
}
impl Encode for Duration {
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
self.as_secs().encode(write.as_mut()).await?;
self.subsec_nanos().encode(write).await
}
}
impl Decode for chrono::TimeDelta {
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
Ok(Self::new(i64::decode(read.as_mut()).await?, u32::decode(read).await?).unwrap())
}
}
impl Encode for chrono::TimeDelta {
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
self.num_seconds().encode(write.as_mut()).await?;
self.subsec_nanos().encode(write).await
}
}
impl Decode for chrono::DateTime<chrono::Utc> {
async fn decode<R: AsyncRead + ?Sized>(read: Pin<&mut R>) -> io::Result<Self> {
Ok(Self::from_timestamp_micros(i64::decode(read).await?).unwrap())
}
}
impl Encode for chrono::DateTime<chrono::Utc> {
async fn encode<W: AsyncWrite + ?Sized>(&self, write: Pin<&mut W>) -> io::Result<()> {
self.timestamp_micros().encode(write).await
}
}

View File

@@ -1,24 +1,24 @@
use std::future::Future;
use std::pin::Pin;
use std::error::Error;
use std::io;
use std::pin::{Pin, pin};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::task::{Context, Poll, Wake};
use async_std::io::{Read, ReadExt, Write, WriteExt};
use futures::{AsyncRead, AsyncReadExt, AsyncWrite};
use itertools::{Chunk, Itertools};
use crate::Encode;
pub async fn encode_enum<'a, W: Write + ?Sized, F: Future<Output = ()>>(
pub async fn encode_enum<'a, W: AsyncWrite + ?Sized>(
mut write: Pin<&'a mut W>,
id: u8,
f: impl FnOnce(Pin<&'a mut W>) -> F,
) {
id.encode(write.as_mut()).await;
f: impl AsyncFnOnce(Pin<&'a mut W>) -> io::Result<()>,
) -> io::Result<()> {
id.encode(write.as_mut()).await?;
f(write).await
}
pub async fn write_exact<W: Write + ?Sized>(mut write: Pin<&mut W>, bytes: &'static [u8]) {
write.write_all(bytes).await.expect("Failed to write exact bytes")
}
pub fn print_bytes(b: &[u8]) -> String {
(b.iter().map(|b| format!("{b:02x}")))
.chunks(4)
@@ -27,16 +27,52 @@ pub fn print_bytes(b: &[u8]) -> String {
.join(" ")
}
pub async fn read_exact<R: Read + ?Sized>(mut read: Pin<&mut R>, bytes: &'static [u8]) {
pub async fn read_exact<R: AsyncRead + ?Sized>(
mut read: Pin<&mut R>,
bytes: &'static [u8],
) -> io::Result<()> {
let mut data = vec![0u8; bytes.len()];
read.read_exact(&mut data).await.expect("Failed to read bytes");
if data != bytes {
panic!("Wrong bytes!\nExpected: {}\nFound: {}", print_bytes(bytes), print_bytes(&data));
read.read_exact(&mut data).await?;
if data == bytes {
Ok(())
} else {
let msg =
format!("Wrong bytes!\nExpected: {}\nFound: {}", print_bytes(bytes), print_bytes(&data));
Err(io::Error::new(io::ErrorKind::InvalidData, msg))
}
}
pub async fn enc_vec(enc: &impl Encode) -> Vec<u8> {
pub fn enc_vec(enc: &impl Encode) -> Vec<u8> {
let mut vec = Vec::new();
enc.encode(Pin::new(&mut vec)).await;
enc.encode_vec(&mut vec);
vec
}
/// Raises a bool flag when called
struct FlagWaker(AtomicBool);
impl Wake for FlagWaker {
fn wake(self: Arc<Self>) { self.0.store(true, Ordering::Relaxed) }
}
pub fn spin_on<F: Future>(fut: F) -> F::Output {
let flag = AtomicBool::new(false);
let flag_waker = Arc::new(FlagWaker(flag));
let mut future = pin!(fut);
loop {
let waker = flag_waker.clone().into();
let mut ctx = Context::from_waker(&waker);
match future.as_mut().poll(&mut ctx) {
// ideally the future should return synchronously
Poll::Ready(res) => break res,
// poorly written futures may yield and immediately wake
Poll::Pending if flag_waker.0.load(Ordering::Relaxed) => (),
// there is no external event to wait for, this has to be a deadlock
Poll::Pending => panic!("Future inside spin_on cannot block"),
};
}
}
pub fn decode_err() -> io::Error { io::Error::new(io::ErrorKind::InvalidData, "Unexpected zero") }
pub fn decode_err_for(e: impl Error) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, e.to_string())
}

View File

@@ -8,8 +8,8 @@ impl TLBool for TLTrue {}
pub struct TLFalse;
impl TLBool for TLFalse {}
/// A type that implements [Hierarchy]. Used to select implementations of traits
/// on the hierarchy
/// A type that implements [Extends] or a root. Used to select implementations
/// of traits on the hierarchy
pub trait InHierarchy: Clone {
/// Indicates that this hierarchy element is a leaf. Leaves can never have
/// children
@@ -29,25 +29,21 @@ pub trait Extends: InHierarchy<IsRoot = TLFalse> + Into<Self::Parent> {
pub trait UnderRootImpl<IsRoot: TLBool>: Sized {
type __Root: UnderRoot<IsRoot = TLTrue, Root = Self::__Root>;
fn __into_root(self) -> Self::__Root;
fn __try_from_root(root: Self::__Root) -> Result<Self, Self::__Root>;
}
pub trait UnderRoot: InHierarchy {
type Root: UnderRoot<IsRoot = TLTrue, Root = Self::Root>;
fn into_root(self) -> Self::Root;
fn try_from_root(root: Self::Root) -> Result<Self, Self::Root>;
}
impl<T: InHierarchy + UnderRootImpl<T::IsRoot>> UnderRoot for T {
type Root = <Self as UnderRootImpl<<Self as InHierarchy>::IsRoot>>::__Root;
fn into_root(self) -> Self::Root { self.__into_root() }
fn try_from_root(root: Self::Root) -> Result<Self, Self::Root> { Self::__try_from_root(root) }
}
impl<T: InHierarchy<IsRoot = TLTrue>> UnderRootImpl<TLTrue> for T {
type __Root = Self;
fn __into_root(self) -> Self::__Root { self }
fn __try_from_root(root: Self::__Root) -> Result<Self, Self::__Root> { Ok(root) }
}
impl<T: InHierarchy<IsRoot = TLFalse> + Extends> UnderRootImpl<TLFalse> for T {
@@ -57,8 +53,4 @@ impl<T: InHierarchy<IsRoot = TLFalse> + Extends> UnderRootImpl<TLFalse> for T {
fn __into_root(self) -> Self::__Root {
<Self as Into<<Self as Extends>::Parent>>::into(self).into_root()
}
fn __try_from_root(root: Self::__Root) -> Result<Self, Self::__Root> {
let parent = <Self as Extends>::Parent::try_from_root(root)?;
parent.clone().try_into().map_err(|_| parent.into_root())
}
}

View File

@@ -3,8 +3,8 @@ mod helpers;
mod hierarchy;
mod relations;
pub use async_std;
pub use coding::*;
pub use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
pub use helpers::*;
pub use hierarchy::*;
pub use relations::*;

View File

@@ -1,27 +1,30 @@
use core::fmt;
use std::future::Future;
use never::Never;
use super::coding::Coding;
use crate::helpers::enc_vec;
pub trait Request: fmt::Debug + Coding + Sized + 'static {
pub trait Request: fmt::Debug + Sized + 'static {
type Response: fmt::Debug + Coding + 'static;
}
pub async fn respond<R: Request>(_: &R, rep: R::Response) -> Vec<u8> { enc_vec(&rep).await }
pub async fn respond_with<R: Request, F: Future<Output = R::Response>>(
r: &R,
f: impl FnOnce(&R) -> F,
) -> Vec<u8> {
respond(r, f(r).await).await
}
pub fn respond<R: Request>(_: &R, rep: R::Response) -> Vec<u8> { enc_vec(&rep) }
pub trait Channel: 'static {
type Req: Coding + Sized + 'static;
type Notif: Coding + Sized + 'static;
}
impl Channel for Never {
type Notif = Never;
type Req = Never;
}
pub trait MsgSet: Sync + 'static {
type In: Channel;
type Out: Channel;
}
impl MsgSet for Never {
type In = Never;
type Out = Never;
}

View File

@@ -6,10 +6,12 @@ edition = "2024"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ordered-float = "5.0.0"
ordered-float = "5.1.0"
orchid-api-traits = { version = "0.1.0", path = "../orchid-api-traits" }
orchid-api-derive = { version = "0.1.0", path = "../orchid-api-derive" }
async-std = "1.13.0"
futures = { version = "0.3.31", features = ["std"], default-features = false }
itertools = "0.14.0"
unsync-pipe = { version = "0.2.0", path = "../unsync-pipe" }
[dev-dependencies]
test_executors = "0.3.2"
test_executors = "0.4.1"

View File

@@ -1,14 +1,26 @@
use std::fmt;
use std::num::NonZeroU64;
use itertools::Itertools;
use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::Request;
use crate::{
ExprTicket, Expression, ExtHostReq, FormattingUnit, HostExtNotif, HostExtReq, OrcResult, SysId,
TStrv,
};
use crate::{ExprTicket, Expression, ExtHostReq, FormattingUnit, HostExtReq, SysId, TStrv};
pub type AtomData = Vec<u8>;
#[derive(Clone, Coding)]
pub struct AtomData(pub Vec<u8>);
impl fmt::Debug for AtomData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut byte_strings = self.0.iter().map(|b| format!("{b:02x}"));
if self.0.len() < 32 {
write!(f, "AtomData({})", byte_strings.join(" "))
} else {
let data_table =
byte_strings.chunks(32).into_iter().map(|mut chunk| chunk.join(" ")).join("\n");
write!(f, "AtomData(\n{}\n)", data_table)
}
}
}
/// Unique ID associated with atoms that have an identity
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
@@ -16,7 +28,7 @@ pub struct AtomId(pub NonZeroU64);
/// An atom owned by an implied system. Usually used in responses from a system.
/// This has the same semantics as [Atom] except in that the owner is implied.
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding)]
#[derive(Clone, Debug, Coding)]
pub struct LocalAtom {
pub drop: Option<AtomId>,
pub data: AtomData,
@@ -27,7 +39,7 @@ impl LocalAtom {
/// An atom representation that can be serialized and sent around. Atoms
/// represent the smallest increment of work.
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding)]
#[derive(Clone, Debug, Coding)]
pub struct Atom {
/// Instance ID of the system that created the atom
pub owner: SysId,
@@ -35,21 +47,21 @@ pub struct Atom {
/// Construction is always explicit and atoms are never cloned.
///
/// Atoms with `drop == None` are also known as trivial, they can be
/// duplicated and stored with no regard to expression lifetimes. NOTICE
/// duplicated and stored with no regard to expression lifetimes. Note
/// that this only applies to the atom. If it's referenced with an
/// [ExprTicket], the ticket itself can still expire.
///
/// Notice also that the atoms still expire when the system is dropped, and
/// Note also that the atoms still expire when the system is dropped, and
/// are not portable across instances of the same system, so this doesn't
/// imply that the atom is serializable.
pub drop: Option<AtomId>,
/// Data stored in the atom. This could be a key into a map, or the raw data
/// of the atom if it isn't too big.
/// Data stored in the atom. This could be a key into a map, the raw data
/// of the atom if it isn't too big, or even a pointer.
pub data: AtomData,
}
/// Attempt to apply an atom as a function to an expression
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(AtomReq, HostExtReq)]
pub struct CallRef(pub Atom, pub ExprTicket);
impl Request for CallRef {
@@ -59,14 +71,14 @@ impl Request for CallRef {
/// Attempt to apply an atom as a function, consuming the atom and enabling the
/// library to reuse its datastructures rather than duplicating them. This is an
/// optimization over [CallRef] followed by [AtomDrop].
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(AtomReq, HostExtReq)]
pub struct FinalCall(pub Atom, pub ExprTicket);
impl Request for FinalCall {
type Response = Expression;
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(AtomReq, HostExtReq)]
pub struct SerializeAtom(pub Atom);
impl Request for SerializeAtom {
@@ -77,51 +89,46 @@ impl Request for SerializeAtom {
#[extends(HostExtReq)]
pub struct DeserAtom(pub SysId, pub Vec<u8>, pub Vec<ExprTicket>);
impl Request for DeserAtom {
type Response = Atom;
type Response = LocalAtom;
}
/// A request blindly routed to the system that provides an atom.
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(AtomReq, HostExtReq)]
pub struct Fwded(pub Atom, pub TStrv, pub Vec<u8>);
impl Request for Fwded {
type Response = Option<Vec<u8>>;
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(ExtHostReq)]
pub struct Fwd(pub Atom, pub TStrv, pub Vec<u8>);
pub struct Fwd {
pub target: Atom,
pub method: TStrv,
pub body: Vec<u8>,
}
impl Request for Fwd {
type Response = Option<Vec<u8>>;
}
#[derive(Clone, Debug, Coding)]
pub enum NextStep {
Continue(Expression),
Halt,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[extends(AtomReq, HostExtReq)]
pub struct Command(pub Atom);
impl Request for Command {
type Response = OrcResult<NextStep>;
}
/// Notification that an atom is being dropped because its associated expression
/// isn't referenced anywhere. This should have no effect if the atom's `drop`
/// flag is false.
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[extends(HostExtNotif)]
#[extends(HostExtReq)]
pub struct AtomDrop(pub SysId, pub AtomId);
impl Request for AtomDrop {
type Response = ();
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(AtomReq, HostExtReq)]
pub struct AtomPrint(pub Atom);
impl Request for AtomPrint {
type Response = FormattingUnit;
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(ExtHostReq)]
pub struct ExtAtomPrint(pub Atom);
impl Request for ExtAtomPrint {
@@ -129,14 +136,13 @@ impl Request for ExtAtomPrint {
}
/// Requests that apply to an existing atom instance
#[derive(Clone, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(HostExtReq)]
#[extendable]
pub enum AtomReq {
CallRef(CallRef),
FinalCall(FinalCall),
Fwded(Fwded),
Command(Command),
AtomPrint(AtomPrint),
SerializeAtom(SerializeAtom),
}
@@ -146,7 +152,6 @@ impl AtomReq {
pub fn get_atom(&self) -> &Atom {
match self {
Self::CallRef(CallRef(a, ..))
| Self::Command(Command(a))
| Self::FinalCall(FinalCall(a, ..))
| Self::Fwded(Fwded(a, ..))
| Self::AtomPrint(AtomPrint(a))

91
orchid-api/src/binary.rs Normal file
View File

@@ -0,0 +1,91 @@
//! # Binary extension definition
//!
//! A binary extension is a DLL / shared object / dylib with a symbol called
//! `orchid_extension_main` which accepts a single argument of type
//! [ExtensionContext]. Once that is received, communication continuees through
//! the channel with the same protocol outlined in [crate::proto]
use unsync_pipe::{Reader, Writer};
/// !Send !Sync owned waker
///
/// This object is [Clone] for convenience but it has `drop` and no `clone` so
/// interactions must reflect a single logical owner
#[derive(Clone, Copy)]
#[repr(C)]
pub struct OwnedWakerBin {
pub data: *const (),
/// `self`
pub drop: extern "C" fn(*const ()),
/// `self`
pub wake: extern "C" fn(*const ()),
/// `&self`
pub wake_ref: extern "C" fn(*const ()),
}
/// !Send !Sync, equivalent to `&mut Context<'a>`, hence no `drop`.
/// When received in [FutureBin::poll], it must not outlive the call.
///
/// You cannot directly wake using this waker, because such a trampoline would
/// pass through the binary interface twice for no reason. An efficient
/// implementation should implement that trampoline action internally, whereas
/// an inefficient but compliant implementation can clone a fresh waker and use
/// it up.
#[derive(Clone, Copy)]
#[repr(C)]
pub struct FutureContextBin {
pub data: *const (),
/// `&self`
pub waker: extern "C" fn(*const ()) -> OwnedWakerBin,
}
/// ABI-stable `Poll<()>`
#[derive(Clone, Copy)]
#[repr(C)]
pub enum UnitPoll {
Pending,
Ready,
}
/// ABI-stable `Pin<Box<dyn Future<Output = ()>>>`
///
/// This object is [Clone] for convenience, but it has `drop` and no `clone` so
/// interactions must reflect a single logical owner
#[derive(Clone, Copy)]
#[repr(C)]
pub struct FutureBin {
pub data: *const (),
/// `self`
pub drop: extern "C" fn(*const ()),
/// `&mut self` Equivalent to [Future::poll]
pub poll: extern "C" fn(*const (), FutureContextBin) -> UnitPoll,
}
/// Handle for a runtime that allows its holder to spawn futures across dynamic
/// library boundaries
#[derive(Clone, Copy)]
#[repr(C)]
pub struct SpawnerBin {
pub data: *const (),
/// `self`
pub drop: extern "C" fn(*const ()),
/// `&self` Add a future to this extension's task after a configurable delay
/// measured in milliseconds. By itself, a pending timer never prevents
/// extension shutdown.
pub spawn: extern "C" fn(*const (), u64, FutureBin),
}
/// Extension context.
///
/// This struct is a plain old value, all of the contained values have a
/// distinct `drop` member
#[repr(C)]
pub struct ExtensionContext {
/// Spawns tasks associated with this extension
pub spawner: SpawnerBin,
/// serialized [crate::HostExtChannel]
pub input: Reader,
/// serialized [crate::ExtHostChannel]
pub output: Writer,
/// UTF-8 log stream directly to log service.
pub log: Writer,
}

View File

@@ -1,4 +1,5 @@
use std::num::NonZeroU16;
use std::rc::Rc;
use std::sync::Arc;
use orchid_api_derive::Coding;
@@ -28,7 +29,7 @@ pub struct OrcError {
pub description: TStr,
/// Specific information about the exact error, preferably containing concrete
/// values.
pub message: Arc<String>,
pub message: Rc<String>,
/// Specific code fragments that have contributed to the emergence of the
/// error.
pub locations: Vec<ErrLocation>,

View File

@@ -1,17 +1,25 @@
use std::fmt;
use std::num::NonZeroU64;
use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::Request;
use crate::{Atom, ExtHostNotif, ExtHostReq, Location, OrcError, SysId, TStrv};
use crate::{
Atom, ExtHostNotif, ExtHostReq, FormattingUnit, LocalAtom, Location, OrcError, SysId, TStrv,
};
/// An arbitrary ID associated with an expression on the host side. Incoming
/// tickets always come with some lifetime guarantee, which can be extended with
/// [Acquire].
///
/// The ID is globally unique within its lifetime, but may be reused.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
pub struct ExprTicket(pub NonZeroU64);
impl fmt::Debug for ExprTicket {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ExprTicket({:x})", self.0.get())
}
}
/// Acquire a strong reference to an expression. This keeps it alive until a
/// corresponding [Release] is emitted. The number of times a system has
@@ -37,17 +45,6 @@ pub struct Acquire(pub SysId, pub ExprTicket);
#[extends(ExprNotif, ExtHostNotif)]
pub struct Release(pub SysId, pub ExprTicket);
/// Decrement the reference count for one system and increment it for another,
/// to indicate passing an owned reference. Equivalent to [Acquire] followed by
/// [Release].
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[extends(ExprNotif, ExtHostNotif)]
pub struct Move {
pub dec: SysId,
pub inc: SysId,
pub expr: ExprTicket,
}
/// A description of a new expression. It is used as the return value of
/// [crate::atom::Call] or [crate::atom::CallRef], or a constant in the
/// [crate::tree::Tree].
@@ -61,7 +58,8 @@ pub enum ExpressionKind {
/// template
Arg(u64),
/// Insert the specified host-expression in the template here. When the clause
/// is used in the const tree, this variant is forbidden.
/// is used in the const tree, this variant is forbidden. The ticket held
/// within is always owning. To avoid a leak, it must be deserialized.
Slot(ExprTicket),
/// The lhs must be fully processed before the rhs can be processed.
/// Equivalent to Haskell's function of the same name
@@ -69,8 +67,7 @@ pub enum ExpressionKind {
/// Insert a new atom in the tree. When the clause is used in the const tree,
/// the atom must be trivial. This is always a newly constructed atom, if you
/// want to reference an existing atom, use the corresponding [ExprTicket].
/// Because the atom is newly constructed, it also must belong to this system.
NewAtom(Atom),
NewAtom(LocalAtom),
/// A reference to a constant
Const(TStrv),
/// A static runtime error.
@@ -110,10 +107,21 @@ impl Request for Inspect {
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
#[extends(ExprReq, ExtHostReq)]
pub struct ExprPrint {
pub target: ExprTicket,
}
impl Request for ExprPrint {
type Response = FormattingUnit;
}
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(ExtHostReq)]
#[extendable]
pub enum ExprReq {
Inspect(Inspect),
ExprPrint(ExprPrint),
Create(Create),
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Coding, Hierarchy)]
@@ -122,5 +130,11 @@ pub enum ExprReq {
pub enum ExprNotif {
Acquire(Acquire),
Release(Release),
Move(Move),
}
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(ExprReq, ExtHostReq)]
pub struct Create(pub SysId, pub Expression);
impl Request for Create {
type Response = ExprTicket;
}

View File

@@ -3,7 +3,7 @@ use std::num::NonZeroU64;
use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::Request;
use crate::{ExtHostReq, HostExtReq};
use crate::{ExtHostNotif, ExtHostReq, HostExtReq};
/// Intern requests sent by the replica to the master. These requests are
/// repeatable.
@@ -71,18 +71,21 @@ pub struct TStr(pub NonZeroU64);
pub struct TStrv(pub NonZeroU64);
/// A request to sweep the replica. The master will not be sweeped until all
/// replicas respond, as it must retain everything the replicas retained
/// replicas respond. For efficiency, replicas should make sure to send the
/// [Sweeped] notif before returning.
#[derive(Clone, Copy, Debug, Coding, Hierarchy)]
#[extends(HostExtReq)]
pub struct Sweep;
impl Request for Sweep {
type Response = Retained;
type Response = ();
}
/// List of keys in this replica that couldn't be sweeped because local
/// datastructures reference their value.
#[derive(Clone, Debug, Coding)]
pub struct Retained {
/// List of keys in this replica that were removed during a sweep. This may have
/// been initiated via a [Sweep] request, but can also be triggered by the
/// replica autonomously.
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(ExtHostNotif)]
pub struct Sweeped {
pub strings: Vec<TStr>,
pub vecs: Vec<TStrv>,
}

View File

@@ -28,7 +28,7 @@ impl Request for LexExpr {
#[derive(Clone, Debug, Coding)]
pub struct LexedExpr {
pub pos: u32,
pub expr: TokenTree,
pub expr: Vec<TokenTree>,
}
#[derive(Clone, Debug, Coding, Hierarchy)]

View File

@@ -1,3 +1,4 @@
pub mod binary;
mod lexer;
pub use lexer::*;
mod format;

View File

@@ -17,6 +17,8 @@ pub enum Location {
Gen(CodeGenInfo),
/// Range and file
SourceRange(SourceRange),
/// Multiple locations
Multi(Vec<Location>),
}
#[derive(Clone, Debug, Coding)]

View File

@@ -1,14 +1,30 @@
use std::collections::HashMap;
use orchid_api_derive::{Coding, Hierarchy};
use crate::ExtHostNotif;
use crate::{ExtHostNotif, TStr};
/// Describes what to do with a log stream.
/// Log streams are unstructured utf8 text unless otherwise stated.
#[derive(Clone, Debug, Coding, PartialEq, Eq, Hash)]
pub enum LogStrategy {
StdErr,
File(String),
/// Context-dependent default stream, often stderr
Default,
/// A file on the local filesystem
File { path: String, append: bool },
/// Discard any log output
Discard,
}
#[derive(Clone, Debug, Coding)]
pub struct Logger {
pub routing: HashMap<String, LogStrategy>,
pub default: Option<LogStrategy>,
}
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(ExtHostNotif)]
pub struct Log(pub String);
pub struct Log {
pub category: TStr,
pub message: String,
}

View File

@@ -30,6 +30,7 @@ pub struct ParseLine {
pub src: TStrv,
pub comments: Vec<Comment>,
pub exported: bool,
pub idx: u16,
pub line: Vec<TokenTree>,
}
impl Request for ParseLine {
@@ -59,7 +60,7 @@ pub struct ParsedMember {
#[derive(Clone, Debug, Coding)]
pub enum ParsedMemberKind {
Constant(ParsedConstId),
Module(Vec<ParsedLine>),
Module { lines: Vec<ParsedLine>, use_prelude: bool },
}
/// Obtain the value of a parsed constant. This is guaranteed to be called after
@@ -67,10 +68,7 @@ pub enum ParsedMemberKind {
/// the macro engine could run here.
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(HostExtReq)]
pub struct FetchParsedConst {
pub sys: SysId,
pub id: ParsedConstId,
}
pub struct FetchParsedConst(pub SysId, pub ParsedConstId);
impl Request for FetchParsedConst {
type Response = Expression;
}
@@ -85,8 +83,8 @@ pub struct Comment {
/// called during a [FetchParsedConst] call, but it can be called for a
/// different [ParsedConstId] from the one in [FetchParsedConst].
///
/// Each name is either resolved to an alias or existing constant `Some(TStrv)`
/// or not resolved `None`. An error is never raised, as names may have a
/// Each name is either resolved to a valid name or a potential error error.
/// The error is not raised by the interpreter itself, as names may have a
/// primary meaning such as a local binding which can be overridden by specific
/// true names such as those triggering macro keywords. It is not recommended to
/// define syntax that can break by defining arbitrary constants, as line
@@ -100,5 +98,5 @@ pub struct ResolveNames {
}
impl Request for ResolveNames {
type Response = Vec<Option<TStrv>>;
type Response = Vec<OrcResult<TStrv>>;
}

View File

@@ -22,51 +22,49 @@
//! be preserved. Toolkits must ensure that the client code is able to observe
//! the ordering of messages.
use std::io;
use std::pin::Pin;
use async_std::io::{Read, Write};
use futures::{AsyncRead, AsyncWrite, AsyncWriteExt};
use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::{Channel, Decode, Encode, MsgSet, Request, read_exact, write_exact};
use orchid_api_traits::{Channel, Decode, Encode, MsgSet, Request, read_exact};
use crate::{atom, expr, interner, lexer, logging, parser, system, tree};
static HOST_INTRO: &[u8] = b"Orchid host, binary API v0\n";
#[derive(Clone, Debug)]
pub struct HostHeader {
pub log_strategy: logging::LogStrategy,
pub msg_logs: logging::LogStrategy,
pub logger: logging::Logger,
}
impl Decode for HostHeader {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
read_exact(read.as_mut(), HOST_INTRO).await;
Self {
log_strategy: logging::LogStrategy::decode(read.as_mut()).await,
msg_logs: logging::LogStrategy::decode(read.as_mut()).await,
}
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
read_exact(read.as_mut(), HOST_INTRO).await?;
Ok(Self { logger: logging::Logger::decode(read).await? })
}
}
impl Encode for HostHeader {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
write_exact(write.as_mut(), HOST_INTRO).await;
self.log_strategy.encode(write.as_mut()).await;
self.msg_logs.encode(write.as_mut()).await
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
write.write_all(HOST_INTRO).await?;
self.logger.encode(write.as_mut()).await
}
}
static EXT_INTRO: &[u8] = b"Orchid extension, binary API v0\n";
#[derive(Clone, Debug)]
pub struct ExtensionHeader {
pub name: String,
pub systems: Vec<system::SystemDecl>,
}
impl Decode for ExtensionHeader {
async fn decode<R: Read + ?Sized>(mut read: Pin<&mut R>) -> Self {
read_exact(read.as_mut(), EXT_INTRO).await;
Self { name: String::decode(read.as_mut()).await, systems: Vec::decode(read).await }
async fn decode<R: AsyncRead + ?Sized>(mut read: Pin<&mut R>) -> io::Result<Self> {
read_exact(read.as_mut(), EXT_INTRO).await?;
Ok(Self { name: String::decode(read.as_mut()).await?, systems: Vec::decode(read).await? })
}
}
impl Encode for ExtensionHeader {
async fn encode<W: Write + ?Sized>(&self, mut write: Pin<&mut W>) {
write_exact(write.as_mut(), EXT_INTRO).await;
self.name.encode(write.as_mut()).await;
async fn encode<W: AsyncWrite + ?Sized>(&self, mut write: Pin<&mut W>) -> io::Result<()> {
write.write_all(EXT_INTRO).await?;
self.name.encode(write.as_mut()).await?;
self.systems.encode(write).await
}
}
@@ -99,6 +97,7 @@ pub enum ExtHostReq {
pub enum ExtHostNotif {
ExprNotif(expr::ExprNotif),
Log(logging::Log),
Sweeped(interner::Sweeped),
}
pub struct ExtHostChannel;
@@ -120,14 +119,14 @@ pub enum HostExtReq {
ParseLine(parser::ParseLine),
FetchParsedConst(parser::FetchParsedConst),
GetMember(tree::GetMember),
SystemDrop(system::SystemDrop),
AtomDrop(atom::AtomDrop),
}
/// Notifications sent from the host to the extension
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extendable]
pub enum HostExtNotif {
SystemDrop(system::SystemDrop),
AtomDrop(atom::AtomDrop),
/// The host can assume that after this notif is sent, a correctly written
/// extension will eventually exit.
Exit,
@@ -155,22 +154,22 @@ impl MsgSet for HostMsgSet {
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use orchid_api_traits::enc_vec;
use ordered_float::NotNan;
use test_executors::spin_on;
use super::*;
use crate::Logger;
#[test]
fn host_header_enc() {
spin_on(async {
let hh = HostHeader {
log_strategy: logging::LogStrategy::File("SomeFile".to_string()),
msg_logs: logging::LogStrategy::File("SomeFile".to_string()),
};
let mut enc = &enc_vec(&hh).await[..];
let hh = HostHeader { logger: Logger { routing: HashMap::new(), default: None } };
let mut enc = &enc_vec(&hh)[..];
eprintln!("Encoded to {enc:?}");
HostHeader::decode(Pin::new(&mut enc)).await;
HostHeader::decode(Pin::new(&mut enc)).await.unwrap();
assert_eq!(enc, []);
})
}
@@ -187,9 +186,9 @@ mod tests {
priority: NotNan::new(1f64).unwrap(),
}],
};
let mut enc = &enc_vec(&eh).await[..];
let mut enc = &enc_vec(&eh)[..];
eprintln!("Encoded to {enc:?}");
ExtensionHeader::decode(Pin::new(&mut enc)).await;
ExtensionHeader::decode(Pin::new(&mut enc)).await.unwrap();
assert_eq!(enc, [])
})
}

View File

@@ -5,7 +5,7 @@ use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::Request;
use ordered_float::NotNan;
use crate::{CharFilter, ExtHostReq, HostExtNotif, HostExtReq, MemberKind, TStr};
use crate::{CharFilter, ExtHostReq, HostExtReq, MemberKind, TStr, TStrv};
/// ID of a system type
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
@@ -63,11 +63,15 @@ pub struct NewSystemResponse {
pub lex_filter: CharFilter,
pub line_types: Vec<TStr>,
pub const_root: HashMap<TStr, MemberKind>,
pub prelude: Vec<TStrv>,
}
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(HostExtNotif)]
#[extends(HostExtReq)]
pub struct SystemDrop(pub SysId);
impl Request for SystemDrop {
type Response = ();
}
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extends(SysReq, HostExtReq)]

View File

@@ -1,7 +1,7 @@
use std::collections::HashMap;
use std::fmt;
use std::num::NonZeroU64;
use std::ops::Range;
use std::rc::Rc;
use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::Request;
@@ -27,7 +27,7 @@ pub struct TokenTree {
#[derive(Clone, Debug, Coding)]
pub enum Token {
/// Lambda function head, from the opening \ until the beginning of the body.
LambdaHead(Vec<TokenTree>),
LambdaHead(Box<TokenTree>),
/// A name segment or an operator.
Name(TStr),
/// A newly generated expression. The last place this is supposed to happen is
@@ -47,7 +47,7 @@ pub enum Token {
/// NewExpr(Bottom) because it fails in dead branches too.
Bottom(Vec<OrcError>),
/// A comment
Comment(Rc<String>),
Comment(TStr),
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, Coding)]
@@ -56,6 +56,15 @@ pub enum Paren {
Square,
Curly,
}
impl fmt::Display for Paren {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", match self {
Self::Round => "()",
Self::Curly => "{}",
Self::Square => "[]",
})
}
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
pub struct TreeId(pub NonZeroU64);

View File

@@ -0,0 +1,12 @@
[package]
name = "orchid-async-utils"
version = "0.1.0"
edition = "2024"
[dependencies]
futures = { version = "0.3.31", default-features = false, features = [
"std",
"async-await",
] }
itertools = "0.14.0"
task-local = "0.1.0"

View File

@@ -0,0 +1,87 @@
use std::pin::Pin;
use std::task::{Context, Poll};
/// Future returned by [cancel_cleanup]
pub struct CancelCleanup<Fut: Future + Unpin, Fun: FnOnce(Fut)> {
/// Set to None when Ready
fut: Option<Fut>,
/// Only set to None in Drop
on_drop: Option<Fun>,
}
impl<Fut: Future + Unpin, Fun: FnOnce(Fut)> Future for CancelCleanup<Fut, Fun> {
type Output = Fut::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let Self { fut, .. } = unsafe { self.get_unchecked_mut() };
if let Some(future) = fut {
let future = unsafe { Pin::new_unchecked(future) };
let poll = future.poll(cx);
if poll.is_ready() {
*fut = None;
}
poll
} else {
Poll::Pending
}
}
}
impl<Fut: Future + Unpin, Fun: FnOnce(Fut)> Drop for CancelCleanup<Fut, Fun> {
fn drop(&mut self) {
if let Some(fut) = self.fut.take() {
(self.on_drop.take().unwrap())(fut)
}
}
}
/// Handle a Future's Drop. The callback is only called if the future has not
/// yet returned and would be cancelled, and it receives the future as an
/// argument
pub fn cancel_cleanup<Fut: Future + Unpin, Fun: FnOnce(Fut)>(
fut: Fut,
on_drop: Fun,
) -> CancelCleanup<Fut, Fun> {
CancelCleanup { fut: Some(fut), on_drop: Some(on_drop) }
}
#[cfg(test)]
mod test {
use std::pin::pin;
use futures::channel::mpsc;
use futures::future::join;
use futures::{SinkExt, StreamExt};
use super::*;
use crate::debug::spin_on;
#[test]
fn called_on_drop() {
let mut called = false;
cancel_cleanup(pin!(async {}), |_| called = true);
assert!(called, "cleanup was called when the future was dropped");
}
#[test]
fn not_called_if_finished() {
spin_on(false, async {
let (mut req_in, mut req_out) = mpsc::channel(0);
let (mut rep_in, mut rep_out) = mpsc::channel(0);
join(
async {
req_out.next().await.unwrap();
rep_in.send(()).await.unwrap();
},
async {
cancel_cleanup(
pin!(async {
req_in.send(()).await.unwrap();
rep_out.next().await.unwrap();
}),
|_| panic!("Callback called on drop even though the future was finished"),
)
.await
},
)
.await
});
}
}

View File

@@ -0,0 +1,193 @@
//! Note that these utilities are safe and simple in order to facilitate
//! debugging without adding more points of failure, but they're not efficient;
//! they may perform heap allocations, I/O and other expensive operations, or
//! even block the thread altogether waiting for input whenever they receive
//! control
use std::cell::RefCell;
use std::fmt::Display;
use std::pin::pin;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::task::{Context, Poll, Wake, Waker};
use std::thread::panicking;
use futures::Stream;
use itertools::Itertools;
use task_local::task_local;
struct OnPollWaker<F: Fn() + 'static>(Waker, F);
impl<F: Fn() + 'static> Wake for OnPollWaker<F> {
fn wake(self: Arc<Self>) {
(self.1)();
self.0.wake_by_ref()
}
}
/// Attach a callback to the [Future] protocol for testing and debugging.
pub async fn on_wake<F: Future>(
f: F,
wake: impl Fn() + Clone + Send + Sync + 'static,
) -> F::Output {
let mut f = pin!(f);
futures::future::poll_fn(|cx| {
let waker = Arc::new(OnPollWaker(cx.waker().clone(), wake.clone())).into();
f.as_mut().poll(&mut Context::from_waker(&waker))
})
.await
}
/// Respond to [Future::poll] with a callback. For maximum flexibility and state
/// control, your callback receives the actual poll job as a callback function.
/// Failure to call this function will result in an immediate panic.
pub async fn wrap_poll<Fut: Future>(
f: Fut,
mut cb: impl FnMut(Box<dyn FnOnce() -> bool + '_>),
) -> Fut::Output {
let mut f = pin!(f);
futures::future::poll_fn(|cx| {
let poll = RefCell::new(None);
cb(Box::new(|| {
let poll1 = f.as_mut().poll(cx);
let ret = poll1.is_ready();
*poll.borrow_mut() = Some(poll1);
ret
}));
poll.into_inner().expect("Callback to on_poll failed to call its argument")
})
.await
}
/// Respond to [Stream::poll_next] with a callback. The semantics of the
/// callback are identical to that in [wrap_poll]
pub fn wrap_poll_next<'a, S: Stream + 'a>(
s: S,
mut cb: impl FnMut(Box<dyn FnOnce() -> bool + '_>) + 'a,
) -> impl Stream<Item = S::Item> + 'a {
let mut s = Box::pin(s);
futures::stream::poll_fn(move |cx| {
let poll = RefCell::new(None);
cb(Box::new(|| {
let poll1 = s.as_mut().poll_next(cx);
let ret = poll1.is_ready();
*poll.borrow_mut() = Some(poll1);
ret
}));
poll.into_inner().expect("Callback to on_poll failed to call its argument")
})
}
/// Attach a callback to the [Stream] protocol for testing and debugging.
pub fn on_stream_wake<'a, S: Stream + 'a>(
s: S,
wake: impl Fn() + Clone + Send + Sync + 'static,
) -> impl Stream<Item = S::Item> {
let mut s = Box::pin(s);
futures::stream::poll_fn(move |cx| {
let waker = Arc::new(OnPollWaker(cx.waker().clone(), wake.clone())).into();
s.as_mut().poll_next(&mut Context::from_waker(&waker))
})
}
task_local! {
static LABEL_STATE: Vec<Rc<String>>
}
/// Add a label to the "label stack" for the duration of a future that helps you
/// efficiently visualize important aspects of the call stack during logging
pub async fn with_label<Fut: Future>(label: &str, f: Fut) -> Fut::Output {
let mut new_lbl = LABEL_STATE.try_with(|lbl| lbl.clone()).unwrap_or_default();
new_lbl.push(Rc::new(label.to_string()));
LABEL_STATE.scope(new_lbl, f).await
}
/// Allows to print the label stack
pub fn label() -> impl Display + Clone + Send + Sync + 'static {
LABEL_STATE.try_with(|lbl| lbl.iter().join("/")).unwrap_or("".to_string())
}
/// Displays the label stack when printed
pub struct Label;
impl Display for Label {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", label()) }
}
/// Attaches generic eprintln handlers to a future
pub async fn eprint_events<Fut: Future>(note: &str, f: Fut) -> Fut::Output {
let label = label();
let note1 = note.to_string();
on_wake(
wrap_poll(f, |cb| {
eprintln!("{Label} polling {note}");
eprintln!("{Label} polled {note} (ready? {})", cb())
}),
move || eprintln!("{label} woke {note1}"),
)
.await
}
/// Attaches generic eprintln handlers to a stream
pub fn eprint_stream_events<'a, S: Stream + 'a>(
note: &'a str,
s: S,
) -> impl Stream<Item = S::Item> + 'a {
let label = label();
let note1 = note.to_string();
on_stream_wake(
wrap_poll_next(s, move |cb| {
eprintln!("{Label} polling {note}");
eprintln!("{Label} polled {note} (ready? {})", cb())
}),
move || eprintln!("{label} woke {note1}"),
)
}
struct SpinWaker {
repeat: AtomicBool,
loud: bool,
}
impl Wake for SpinWaker {
fn wake(self: Arc<Self>) {
self.repeat.store(true, Ordering::SeqCst);
if self.loud {
eprintln!("Triggered repeat for spin_on")
}
}
}
/// A dumb executor that keeps synchronously re-running the future as long as it
/// keeps synchronously waking itself. This is useful for deterministic tests
/// that don't contain side effects or threading.
///
/// # Panics
///
/// If the future doesn't wake itself and doesn't settle.
pub fn spin_on<Fut: Future>(loud: bool, f: Fut) -> Fut::Output {
let spin_waker = Arc::new(SpinWaker { repeat: AtomicBool::new(false), loud });
let mut f = pin!(f);
let waker = spin_waker.clone().into();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(t) => break t,
Poll::Pending if spin_waker.repeat.swap(false, Ordering::SeqCst) => (),
Poll::Pending => panic!("The future did not exit and did not call its waker."),
}
}
}
/// Create an object that will panic if dropped. [PanicOnDrop::defuse] must be
/// called once the particular constraint preventing a drop has passed
pub fn assert_no_drop(msg: &'static str) -> PanicOnDrop { PanicOnDrop(true, msg) }
/// This object will panic if dropped. Call [Self::defuse] when dropping is safe
/// again
pub struct PanicOnDrop(bool, &'static str);
impl PanicOnDrop {
/// Allow dropping the object without causing a panic
pub fn defuse(mut self) { self.0 = false; }
}
impl Drop for PanicOnDrop {
fn drop(&mut self) { assert!(panicking() || !self.0, "{}", self.1) }
}

View File

@@ -0,0 +1,7 @@
pub mod debug;
mod cancel_cleanup;
pub use cancel_cleanup::*;
mod localset;
pub use localset::*;
mod task_future;
pub use task_future::*;

View File

@@ -0,0 +1,61 @@
use std::pin::Pin;
use std::task::Poll;
use futures::channel::mpsc::{SendError, UnboundedReceiver, UnboundedSender, unbounded};
use futures::future::LocalBoxFuture;
use futures::stream::FuturesUnordered;
use futures::{SinkExt, StreamExt};
pub struct LocalSetController<'a, E> {
sender: UnboundedSender<LocalBoxFuture<'a, Result<(), E>>>,
}
impl<'a, E> LocalSetController<'a, E> {
pub async fn spawn<F: Future<Output = Result<(), E>> + 'a>(
&mut self,
fut: F,
) -> Result<(), SendError> {
self.sender.send(Box::pin(fut)).await
}
}
pub fn local_set<'a, E: 'a>()
-> (LocalSetController<'a, E>, impl Future<Output = Result<(), E>> + 'a) {
let (sender, receiver) = unbounded();
let controller = LocalSetController { sender };
let set = LocalSet { receiver, pending: FuturesUnordered::new() };
(controller, set)
}
struct LocalSet<'a, E> {
receiver: UnboundedReceiver<LocalBoxFuture<'a, Result<(), E>>>,
pending: FuturesUnordered<LocalBoxFuture<'a, Result<(), E>>>,
}
impl<E> Future for LocalSet<'_, E> {
type Output = Result<(), E>;
fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
let mut any_pending = false;
loop {
match this.receiver.poll_next_unpin(cx) {
Poll::Ready(Some(fut)) => this.pending.push(fut),
Poll::Ready(None) => break,
Poll::Pending => {
any_pending = true;
break;
},
}
}
loop {
match this.pending.poll_next_unpin(cx) {
Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(e)),
Poll::Ready(Some(Ok(()))) => continue,
Poll::Ready(None) => break,
Poll::Pending => {
any_pending = true;
break;
},
}
}
if any_pending { Poll::Pending } else { Poll::Ready(Ok(())) }
}
}

View File

@@ -0,0 +1,99 @@
use std::any::Any;
use std::cell::RefCell;
use std::pin::{Pin, pin};
use std::rc::Rc;
use std::task::{Context, Poll, Waker};
use futures::FutureExt;
use futures::channel::oneshot::{self, Canceled};
use futures::future::{FusedFuture, LocalBoxFuture};
struct State {
work: Option<LocalBoxFuture<'static, Box<dyn Any>>>,
result: Option<Box<dyn Any>>,
waker: Waker,
}
/// A fused future that can be passed to a non-polymorphic executor that doesn't
/// process results and doesn't return handles
pub struct Pollable(Rc<RefCell<State>>);
impl FusedFuture for Pollable {
fn is_terminated(&self) -> bool {
let g = self.0.borrow();
g.work.is_none() || g.result.is_some()
}
}
impl Future for Pollable {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut g = self.0.borrow_mut();
match &mut *g {
State { result: Some(_), .. } | State { work: None, .. } => Poll::Ready(()),
State { work: Some(work), waker, result } => match work.as_mut().poll(cx) {
Poll::Pending => {
waker.clone_from(cx.waker());
Poll::Pending
},
Poll::Ready(val) => {
*result = Some(val);
g.work = None;
Poll::Ready(())
},
},
}
}
}
pub struct JoinError;
/// An object that can be used to inspect the state of the task
pub struct Handle<T: 'static> {
send_abort: RefCell<Option<oneshot::Sender<()>>>,
ready: Rc<RefCell<bool>>,
recv_output: RefCell<oneshot::Receiver<T>>,
}
impl<T: 'static> Handle<T> {
/// Immediately stop working on this task, and return the result if it has
/// already finished
pub fn abort(&self) -> Option<T> {
if let Some(abort) = self.send_abort.take() {
let _ = abort.send(());
}
self.recv_output.borrow_mut().try_recv().ok().flatten()
}
/// Determine if there's any more work to do on this task
pub fn is_finished(&self) -> bool { *self.ready.borrow() }
/// "finish" the freestanding task, and return the future instead
pub async fn join(self) -> Result<T, JoinError> {
self.recv_output.into_inner().await.map_err(|Canceled| JoinError)
}
}
/// Split a future into an object that can be polled and one that returns
/// information on its progress and its result. The first one can be passed to
/// an executor or localset, the second can be used to manage it
pub fn to_task<'a, F: Future<Output: 'a> + 'a>(
f: F,
) -> (impl Future<Output = ()> + 'a, Handle<F::Output>) {
let (send_abort, mut on_abort) = oneshot::channel();
let (send_output, on_output) = oneshot::channel();
let ready = Rc::new(RefCell::new(false));
let ready2 = ready.clone();
let fut = async move {
let mut fut = pin!(f.fuse());
let output = futures::select_biased! {
res = on_abort => match res {
Ok(()) => return,
Err(_) => fut.await,
},
output = fut => output,
};
ready2.replace(true);
let _: Result<_, _> = send_output.send(output);
};
(fut, Handle {
ready,
recv_output: RefCell::new(on_output),
send_abort: RefCell::new(Some(send_abort)),
})
}

View File

@@ -6,13 +6,18 @@ edition = "2024"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
unsync-pipe = { version = "0.2.0", path = "../unsync-pipe" }
async-fn-stream = { version = "0.1.0", path = "../async-fn-stream" }
orchid-async-utils = { version = "0.1.0", path = "../orchid-async-utils" }
async-once-cell = "0.5.4"
async-std = "1.13.0"
async-stream = "0.3.6"
bound = "0.6.0"
derive_destructure = "1.0.0"
dyn-clone = "1.0.17"
futures = "0.3.31"
hashbrown = "0.15.2"
dyn-clone = "1.0.20"
futures = { version = "0.3.31", default-features = false, features = [
"std",
"async-await",
] }
hashbrown = "0.16.1"
itertools = "0.14.0"
lazy_static = "1.5.0"
never = "0.1.0"
@@ -20,10 +25,14 @@ num-traits = "0.2.19"
orchid-api = { version = "0.1.0", path = "../orchid-api" }
orchid-api-derive = { version = "0.1.0", path = "../orchid-api-derive" }
orchid-api-traits = { version = "0.1.0", path = "../orchid-api-traits" }
ordered-float = "5.0.0"
regex = "1.11.1"
rust-embed = "8.5.0"
some_executor = "0.4.0"
ordered-float = "5.1.0"
regex = "1.12.2"
rust-embed = "8.9.0"
substack = "1.1.1"
test_executors = "0.3.2"
trait-set = "0.3.0"
task-local = "0.1.0"
[dev-dependencies]
futures = "0.3.31"
rand = "0.10.0"
rand_chacha = "0.10.0"

122
orchid-base/src/binary.rs Normal file
View File

@@ -0,0 +1,122 @@
use std::mem;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use crate::api;
type WideBox = Box<dyn Future<Output = ()>>;
static OWNED_VTABLE: RawWakerVTable = RawWakerVTable::new(
|data| {
let data = unsafe { Rc::<api::binary::OwnedWakerBin>::from_raw(data as *const _) };
let val = RawWaker::new(Rc::into_raw(data.clone()) as *const (), &OWNED_VTABLE);
// Clone must create a duplicate of the Rc, so it has to be un-leaked, cloned,
// then leaked again.
let _ = Rc::into_raw(data);
val
},
|data| {
// Wake must awaken the task and then clean up the state, so the waker must be
// un-leaked
let data = unsafe { Rc::<api::binary::OwnedWakerBin>::from_raw(data as *const _) };
(data.wake)(data.data);
mem::drop(data);
},
|data| {
// Wake-by-ref must awaken the task while preserving the future, so the Rc is
// untouched
let data = unsafe { (data as *const api::binary::OwnedWakerBin).as_ref() }.unwrap();
(data.wake_ref)(data.data);
},
|data| {
// Drop must clean up the state, so the waker must be un-leaked
let data = unsafe { Rc::<api::binary::OwnedWakerBin>::from_raw(data as *const _) };
(data.drop)(data.data);
mem::drop(data);
},
);
struct BorrowedWakerData<'a> {
go_around: &'a mut bool,
cx: api::binary::FutureContextBin,
}
static BORROWED_VTABLE: RawWakerVTable = RawWakerVTable::new(
|data| {
let data = unsafe { (data as *mut BorrowedWakerData).as_mut() }.unwrap();
let owned_data = Rc::<api::binary::OwnedWakerBin>::new((data.cx.waker)(data.cx.data));
RawWaker::new(Rc::into_raw(owned_data) as *const (), &OWNED_VTABLE)
},
|data| *unsafe { (data as *mut BorrowedWakerData).as_mut() }.unwrap().go_around = true,
|data| *unsafe { (data as *mut BorrowedWakerData).as_mut() }.unwrap().go_around = true,
|_data| {},
);
/// Convert a future to a binary-compatible format that can be sent across
/// dynamic library boundaries
#[must_use]
pub fn future_to_vt<Fut: Future<Output = ()> + 'static>(fut: Fut) -> api::binary::FutureBin {
let wide_box = Box::new(fut) as WideBox;
let data = Box::into_raw(Box::new(wide_box));
extern "C" fn drop(raw: *const ()) {
mem::drop(unsafe { Box::<WideBox>::from_raw(raw as *mut _) })
}
extern "C" fn poll(raw: *const (), cx: api::binary::FutureContextBin) -> api::binary::UnitPoll {
let mut this = unsafe { Pin::new_unchecked(&mut **(raw as *mut WideBox).as_mut().unwrap()) };
loop {
let mut go_around = false;
let borrowed_waker = unsafe {
Waker::from_raw(RawWaker::new(
&mut BorrowedWakerData { go_around: &mut go_around, cx } as *mut _ as *const (),
&BORROWED_VTABLE,
))
};
let mut ctx = Context::from_waker(&borrowed_waker);
let result = this.as_mut().poll(&mut ctx);
if matches!(result, Poll::Ready(())) {
break api::binary::UnitPoll::Ready;
}
if !go_around {
break api::binary::UnitPoll::Pending;
}
}
}
api::binary::FutureBin { data: data as *const _, drop, poll }
}
struct VirtualFuture {
vt: api::binary::FutureBin,
}
impl Unpin for VirtualFuture {}
impl Future for VirtualFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
extern "C" fn waker(raw: *const ()) -> api::binary::OwnedWakerBin {
let waker = unsafe { (raw as *mut Context).as_mut() }.unwrap().waker().clone();
let data = Box::into_raw(Box::<Waker>::new(waker)) as *const ();
return api::binary::OwnedWakerBin { data, drop, wake, wake_ref };
extern "C" fn drop(raw: *const ()) {
mem::drop(unsafe { Box::<Waker>::from_raw(raw as *mut Waker) })
}
extern "C" fn wake(raw: *const ()) {
unsafe { Box::<Waker>::from_raw(raw as *mut Waker) }.wake();
}
extern "C" fn wake_ref(raw: *const ()) {
unsafe { (raw as *mut Waker).as_mut() }.unwrap().wake_by_ref();
}
}
let cx = api::binary::FutureContextBin { data: cx as *mut Context as *const (), waker };
let result = (self.vt.poll)(self.vt.data, cx);
match result {
api::binary::UnitPoll::Pending => Poll::Pending,
api::binary::UnitPoll::Ready => Poll::Ready(()),
}
}
}
impl Drop for VirtualFuture {
fn drop(&mut self) { (self.vt.drop)(self.vt.data) }
}
/// Receive a future sent across dynamic library boundaries and convert it into
/// an owned object
pub fn vt_to_future(vt: api::binary::FutureBin) -> impl Future<Output = ()> { VirtualFuture { vt } }

View File

@@ -1,29 +0,0 @@
use std::borrow::Borrow;
use std::ops::Deref;
use std::sync::Arc;
pub enum ArcCow<'a, T: ?Sized + ToOwned> {
Borrowed(&'a T),
Owned(Arc<T::Owned>),
}
impl<T: ?Sized + ToOwned> ArcCow<'_, T> {
pub fn owned(value: T::Owned) -> Self { Self::Owned(Arc::new(value)) }
}
impl<T: ?Sized + ToOwned> Clone for ArcCow<'_, T> {
fn clone(&self) -> Self {
match self {
Self::Borrowed(r) => Self::Borrowed(r),
Self::Owned(b) => Self::Owned(b.clone()),
}
}
}
impl<T: ?Sized + ToOwned> Deref for ArcCow<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match self {
Self::Borrowed(t) => t,
Self::Owned(b) => b.as_ref().borrow(),
}
}
}

View File

@@ -3,20 +3,22 @@
use std::iter;
/// A trait object of [Iterator] to be assigned to variables that may be
/// initialized form multiple iterators of incompatible types
/// initialized form iterators of multiple or unknown types
pub type BoxedIter<'a, T> = Box<dyn Iterator<Item = T> + 'a>;
/// creates a [BoxedIter] of a single element
#[must_use]
pub fn box_once<'a, T: 'a>(t: T) -> BoxedIter<'a, T> { Box::new(iter::once(t)) }
/// creates an empty [BoxedIter]
#[must_use]
pub fn box_empty<'a, T: 'a>() -> BoxedIter<'a, T> { Box::new(iter::empty()) }
/// Chain various iterators into a [BoxedIter]
#[macro_export]
macro_rules! box_chain {
($curr:expr) => {
Box::new($curr) as $crate::boxed_iter::BoxedIter<_>
Box::new($curr) as $crate::BoxedIter<_>
};
($curr:expr, $($rest:expr),*) => {
Box::new($curr$(.chain($rest))*) as $crate::boxed_iter::BoxedIter<_>
Box::new($curr$(.chain($rest))*) as $crate::BoxedIter<_>
};
}

View File

@@ -1,34 +0,0 @@
use std::ops::Deref;
use std::rc::Rc;
use futures::future::LocalBoxFuture;
use crate::api;
pub type Spawner = Rc<dyn Fn(LocalBoxFuture<'static, ()>)>;
/// The 3 primary contact points with an extension are
/// - send a message
/// - wait for a message to arrive
/// - wait for the extension to stop after exit (this is the implicit Drop)
///
/// There are no ordering guarantees about these
pub trait ExtPort {
#[must_use]
fn send<'a>(&'a self, msg: &'a [u8]) -> LocalBoxFuture<'a, ()>;
#[must_use]
fn recv(&self) -> LocalBoxFuture<'_, Option<Vec<u8>>>;
}
pub struct ExtInit {
pub header: api::ExtensionHeader,
pub port: Box<dyn ExtPort>,
}
impl ExtInit {
pub async fn send(&self, msg: &[u8]) { self.port.send(msg).await }
pub async fn recv(&self) -> Option<Vec<u8>> { self.port.recv().await }
}
impl Deref for ExtInit {
type Target = api::ExtensionHeader;
fn deref(&self) -> &Self::Target { &self.header }
}

View File

@@ -5,9 +5,9 @@ use itertools::Itertools;
use crate::api;
pub type CRange = RangeInclusive<char>;
/// A fast character filter to avoid superfluous extension calls in the lexer.
pub trait ICFilter: fmt::Debug {
/// Returns an ordered set of character ranges
fn ranges(&self) -> &[RangeInclusive<char>];
}
impl ICFilter for [RangeInclusive<char>] {
@@ -17,7 +17,10 @@ impl ICFilter for api::CharFilter {
fn ranges(&self) -> &[RangeInclusive<char>] { &self.0 }
}
fn try_merge_char_ranges(left: CRange, right: CRange) -> Result<CRange, (CRange, CRange)> {
fn try_merge_char_ranges(
left: RangeInclusive<char>,
right: RangeInclusive<char>,
) -> Result<RangeInclusive<char>, (RangeInclusive<char>, RangeInclusive<char>)> {
match *left.end() as u32 + 1 < *right.start() as u32 {
true => Err((left, right)),
false => Ok(*left.start()..=*right.end()),
@@ -25,8 +28,9 @@ fn try_merge_char_ranges(left: CRange, right: CRange) -> Result<CRange, (CRange,
}
/// Process the character ranges to make them adhere to the structural
/// requirements of [CharFilter]
pub fn mk_char_filter(items: impl IntoIterator<Item = CRange>) -> api::CharFilter {
/// requirements of [api::CharFilter]
#[must_use]
pub fn mk_char_filter(items: impl IntoIterator<Item = RangeInclusive<char>>) -> api::CharFilter {
api::CharFilter(
(items.into_iter())
.filter(|r| *r.start() as u32 <= *r.end() as u32)
@@ -37,6 +41,7 @@ pub fn mk_char_filter(items: impl IntoIterator<Item = CRange>) -> api::CharFilte
}
/// Decide whether a char filter matches a character via binary search
#[must_use]
pub fn char_filter_match(cf: &(impl ICFilter + ?Sized), c: char) -> bool {
match cf.ranges().binary_search_by_key(&c, |l| *l.end()) {
Ok(_) => true, // c is the end of a range
@@ -48,6 +53,7 @@ pub fn char_filter_match(cf: &(impl ICFilter + ?Sized), c: char) -> bool {
/// Merge two char filters into a filter that matches if either of the
/// constituents would match.
#[must_use]
pub fn char_filter_union(
l: &(impl ICFilter + ?Sized),
r: &(impl ICFilter + ?Sized),

View File

@@ -1,12 +1,12 @@
#[macro_export]
macro_rules! clone {
($($n:ident),+; $body:expr) => (
($($n:ident $($mut:ident)?),+; $body:expr) => (
{
$( let $n = $n.clone(); )+
$( let $($mut)? $n = $n.clone(); )+
$body
}
);
($($n:ident),+) => {
$( let $n = $n.clone(); )+
($($n:ident $($mut:ident)?),+) => {
$( let $($mut)? $n = $n.clone(); )+
}
}

View File

@@ -1,24 +0,0 @@
//! The concept of a fallible merger
use never::Never;
/// Fallible, type-preserving variant of [std::ops::Add] implemented by a
/// variety of types for different purposes. Very broadly, if the operation
/// succeeds, the result should represent _both_ inputs.
pub trait Combine: Sized {
/// Information about the failure
type Error;
/// Merge two values into a value that represents both, if this is possible.
fn combine(self, other: Self) -> Result<Self, Self::Error>;
}
impl Combine for Never {
type Error = Never;
fn combine(self, _: Self) -> Result<Self, Self::Error> { match self {} }
}
impl Combine for () {
type Error = Never;
fn combine(self, (): Self) -> Result<Self, Self::Error> { Ok(()) }
}

832
orchid-base/src/comm.rs Normal file
View File

@@ -0,0 +1,832 @@
use std::cell::{BorrowMutError, RefCell};
use std::marker::PhantomData;
use std::pin::{Pin, pin};
use std::rc::Rc;
use std::{io, mem};
use async_fn_stream::try_stream;
use bound::Bound;
use derive_destructure::destructure;
use futures::channel::mpsc::{self, Receiver, Sender, channel};
use futures::channel::oneshot;
use futures::future::LocalBoxFuture;
use futures::lock::{Mutex, MutexGuard};
use futures::{
AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt, stream_select,
};
use hashbrown::HashMap;
use orchid_api_traits::{Decode, Encode, Request, UnderRoot};
use orchid_async_utils::debug::{PanicOnDrop, assert_no_drop};
use orchid_async_utils::{cancel_cleanup, local_set, to_task};
use crate::{clone, finish_or_stash, stash, with_stash};
// TODO: revise error handling; error recovery is never partial, it always
// requires dropping the server, client, and all requests
/// A token indicating that a reply to a request has been sent. Returned from
/// [RepWriter::finish] which is the raw reply channel, or [ReqHandleExt::reply]
/// or [ReqReaderExt::reply] which are type checked
#[must_use = "Receipts indicate that a required action has been performed within a function. \
Most likely this should be returned somewhere."]
pub struct Receipt;
impl Receipt {
/// Only ever call this function from a custom implementation of
/// [RepWriter::finish]
pub fn _new() -> Self { Self }
}
/// Return data while waiting for the response to a request. [Self::future] must
/// be awaited in order to ensure that progress is being made
pub struct ReqWait {
/// Future representing waiting for a request. This must be steadily polled.
pub future: LocalBoxFuture<'static, io::Result<Box<dyn RepReader>>>,
/// Since the [Self::future] must be awaited which exclusively borrows it,
/// this separate handle can be used for cancellation.
pub canceller: Box<dyn CancelNotifier>,
}
/// Write guard to outbound for the purpose of serializing a request. Only one
/// can exist at a time. Dropping this object should panic.
pub trait ReqWriter {
/// Access to the underlying channel. This may be buffered.
fn writer(&mut self) -> Pin<&mut dyn AsyncWrite>;
/// Finalize the request, release the outbound channel, then queue for the
/// reply on the inbound channel.
fn send(self: Box<Self>) -> ReqWait;
}
/// Write guard to inbound for the purpose of deserializing a reply. While held,
/// no inbound requests or other replies can be processed.
///
/// # Cancellation
///
/// If the request has been cancelled and the server has accepted the
/// cancellation instead of writing a reply (which is never guaranteed), then
/// this object is inert and should be dropped.
///
/// Dropping this object if [Self::reader] returns [Some] should panic even if
/// [RepReader::finish] returns synchronously, because the API isn't
/// cancellation safe in general so it is a programmer error to drop an object
/// related to it without proper cleanup.
pub trait RepReader {
/// Access to the underlying channel. The length of the message is inferred
/// from the number of bytes read so this must not be buffered and a full
/// reply must always be read from it if available
///
/// This returns None if the request has successfully been cancelled, in which
/// case this object can be dropped without calling [Self::finish]
fn reader(&mut self) -> Option<Pin<&mut dyn AsyncRead>>;
/// Finish reading the request
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, ()>;
}
/// A handle for cancelling in-flight requests without a reference to
/// the wait future (which would be mutably borrowed by an await at this point)
pub trait CancelNotifier {
/// Upon cancellation the future may resolve to a stub version of [RepReader]
/// with no reader access, but since the cancellation is not synchronized
/// with the server, a full reply may still be received, and if it is, the
/// original reply must still be read from it.
fn cancel(self: Box<Self>) -> LocalBoxFuture<'static, ()>;
}
/// Write guard to outbound for the purpose of serializing a notification.
///
/// Dropping this object should panic for the same reason [RepReader] panics
pub trait MsgWriter {
/// Access to the underlying channel. This may be buffered.
fn writer(&mut self) -> Pin<&mut dyn AsyncWrite>;
/// Send the notification
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, io::Result<()>>;
}
/// For initiating outbound requests and notifications
pub trait Client {
fn start_request(&self) -> LocalBoxFuture<'static, io::Result<Box<dyn ReqWriter>>>;
fn start_notif(&self) -> LocalBoxFuture<'static, io::Result<Box<dyn MsgWriter>>>;
}
impl<T: Client + ?Sized> ClientExt for T {}
/// Extension trait with convenience methods that handle outbound request and
/// notif lifecycle and typing
#[allow(async_fn_in_trait)]
pub trait ClientExt: Client {
#[allow(
clippy::await_holding_refcell_ref,
reason = "Must bypass a future return point by sharing the common path"
)]
async fn request<T: Request + UnderRoot<Root: Encode>>(&self, t: T) -> io::Result<T::Response> {
let start_req = self.start_request();
// This section must finish if it has started, and the returned writer's `send`
// must be called as well.
let common = Rc::new(RefCell::new(Some(Box::pin(async move {
let mut writer = start_req.await?;
t.into_root().encode(writer.writer().as_mut()).await?;
io::Result::Ok(writer)
}))));
// Initialized in the cancelable section if common returns here. If set, the
// future inside must be finished on stash after the notification is sent
// to ensure that the server acknowledges the cancellation, or to decode the
// result if the cancellation was in fact too late.
let req_wait_rc = Rc::new(RefCell::new(None));
// If both this and common are None, that means the cancelable section is
// already past its last interruptible point, and must be finished on stash
cancel_cleanup(
clone!(req_wait_rc, common; Box::pin(async move {
let req_wait;
{
let mut common_g = common.try_borrow_mut().expect("cancel will drop us before locking");
let common = (common_g.as_mut())
.expect("Only unset by us below or by cancel after dropping us");
// cancel handler may take over here
req_wait = common.await?.send();
common_g.take();
}
let mut rep;
{
let mut req_wait_g = (req_wait_rc.try_borrow_mut())
.expect("We are the first ones to access this");
*req_wait_g = Some(req_wait);
let req_wait = req_wait_g.as_mut().expect("Initialized right above");
// cancel handler may take over here
rep = req_wait.future.as_mut().await?;
req_wait_g.take();
};
// cancel handler will not interrupt if we've gotten this far
let reader = rep.reader().expect("Not been cancelled thus far");
let result = T::Response::decode(reader).await;
rep.finish().await;
result
})),
|fut| {
stash(async move {
// TODO: strategy for IO errors on stash
let req_wait = if common.try_borrow_mut().is_ok_and(|r| r.is_none()) {
// fut was already past common
match req_wait_rc.try_borrow_mut() {
Ok(mut opt) => {
let Some(req_wait) = opt.take() else {
// fut was already reading, finish that read and exit
fut.await.expect("IO error on stash");
return;
};
req_wait
},
Err(BorrowMutError { .. }) => {
// fut was in waiting, take over and do our own thing
std::mem::drop(fut);
req_wait_rc.take().expect("If it was borrowed then it was still set")
},
}
} else {
// fut was still in common, take over and finish common
std::mem::drop(fut);
let common =
(common.take()).expect("If it was still borrowed in fut, it was not yet unset");
common.await.expect("IO error on stash").send()
};
req_wait.canceller.cancel().await;
let mut rep = req_wait.future.await.expect("IO error on stash");
let Some(reader) = rep.reader() else { return };
T::Response::decode(reader).await.expect("IO error on stash");
rep.finish().await;
})
},
)
.await
}
async fn notify<T: UnderRoot<Root: Encode> + 'static>(&self, t: T) -> io::Result<()> {
let start_notif = self.start_notif();
finish_or_stash(Box::pin(async {
let mut notif = start_notif.await?;
t.into_root().encode(notif.writer().as_mut()).await?;
notif.finish().await?;
Ok(())
}))
.await
}
}
pub trait ReqReader {
fn reader(&mut self) -> Pin<&mut dyn AsyncRead>;
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, Box<dyn ReqHandle>>;
}
impl<T: ReqReader + ?Sized> ReqReaderExt for T {}
#[allow(async_fn_in_trait)]
pub trait ReqReaderExt: ReqReader {
async fn read_req<R: Decode>(&mut self) -> io::Result<R> { R::decode(self.reader()).await }
async fn reply<R: Request>(
self: Box<Self>,
req: impl Evidence<R>,
rep: R::Response,
) -> io::Result<Receipt> {
self.finish().await.reply(req, rep).await
}
async fn start_reply(self: Box<Self>) -> io::Result<Box<dyn RepWriter>> {
self.finish().await.start_reply().await
}
}
pub trait ReqHandle {
fn start_reply(self: Box<Self>) -> LocalBoxFuture<'static, io::Result<Box<dyn RepWriter>>>;
}
impl<T: ReqHandle + ?Sized> ReqHandleExt for T {}
#[allow(async_fn_in_trait)]
pub trait ReqHandleExt: ReqHandle {
async fn reply<Req: Request>(
self: Box<Self>,
_: impl Evidence<Req>,
rep: Req::Response,
) -> io::Result<Receipt> {
let start_reply = self.start_reply();
finish_or_stash(Box::pin(async move {
let mut reply = start_reply.await?;
rep.encode(reply.writer()).await?;
reply.finish().await
}))
.await
}
}
pub trait RepWriter {
fn writer(&mut self) -> Pin<&mut dyn AsyncWrite>;
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, io::Result<Receipt>>;
}
pub trait MsgReader<'a> {
fn reader(&mut self) -> Pin<&mut dyn AsyncRead>;
fn finish(self: Box<Self>) -> LocalBoxFuture<'a, ()>;
}
impl<'a, T: ?Sized + MsgReader<'a>> MsgReaderExt<'a> for T {}
#[allow(async_fn_in_trait)]
pub trait MsgReaderExt<'a>: MsgReader<'a> {
async fn read<N: Decode>(mut self: Box<Self>) -> io::Result<N> {
let n = N::decode(self.reader()).await;
self.finish().await;
n
}
}
/// A form of [Evidence] that doesn't require the value to be kept around
pub struct Witness<T>(PhantomData<T>);
impl<T> Witness<T> {
pub fn of(_: &T) -> Self { Self(PhantomData) }
}
impl<T> Copy for Witness<T> {}
impl<T> Clone for Witness<T> {
fn clone(&self) -> Self { *self }
}
/// A proxy for the type of a value either previously saved into a [Witness] or
/// still available.
pub trait Evidence<T> {}
impl<T> Evidence<T> for &'_ T {}
impl<T> Evidence<T> for Witness<T> {}
type IoRef<T> = Pin<Box<T>>;
type IoLock<T> = Rc<Mutex<Pin<Box<T>>>>;
type IoGuard<T> = Bound<MutexGuard<'static, Pin<Box<T>>>, IoLock<T>>;
/// An incoming request. This holds a lock on the ingress channel.
pub struct IoReqReader {
prefix: u64,
read: IoGuard<dyn AsyncRead>,
o: Rc<Mutex<IoRef<dyn AsyncWrite>>>,
}
impl ReqReader for IoReqReader {
fn reader(&mut self) -> Pin<&mut dyn AsyncRead> { self.read.as_mut() }
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, Box<dyn ReqHandle>> {
Box::pin(async {
Box::new(IoReqHandle { prefix: self.prefix, write: self.o }) as Box<dyn ReqHandle>
})
}
}
pub struct IoReqHandle {
prefix: u64,
write: IoLock<dyn AsyncWrite>,
}
impl ReqHandle for IoReqHandle {
fn start_reply(self: Box<Self>) -> LocalBoxFuture<'static, io::Result<Box<dyn RepWriter>>> {
let write = self.write.clone();
Box::pin(async move {
let mut write = Bound::async_new(write, |l| l.lock()).await;
self.prefix.encode(write.as_mut()).await?;
Ok(Box::new(IoRepWriter { write }) as Box<dyn RepWriter>)
})
}
}
pub struct IoRepWriter {
write: IoGuard<dyn AsyncWrite>,
}
impl RepWriter for IoRepWriter {
fn writer(&mut self) -> Pin<&mut dyn AsyncWrite> { self.write.as_mut() }
fn finish(mut self: Box<Self>) -> LocalBoxFuture<'static, io::Result<Receipt>> {
Box::pin(async move {
self.writer().flush().await?;
Ok(Receipt)
})
}
}
pub struct IoMsgReader<'a> {
_pd: PhantomData<&'a mut ()>,
read: IoGuard<dyn AsyncRead>,
}
impl<'a> MsgReader<'a> for IoMsgReader<'a> {
fn reader(&mut self) -> Pin<&mut dyn AsyncRead> { self.read.as_mut() }
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, ()> { Box::pin(async {}) }
}
pub enum ReplyRecord {
Cancelled,
Ready(IoGuard<dyn AsyncRead>),
}
#[derive(Debug)]
struct ReplySub {
id: u64,
ack: oneshot::Sender<()>,
cb: oneshot::Sender<ReplyRecord>,
}
struct IoClient {
output: IoLock<dyn AsyncWrite>,
id: Rc<RefCell<u64>>,
subscribe: Rc<Sender<ReplySub>>,
}
impl IoClient {
fn new(output: IoLock<dyn AsyncWrite>) -> (Receiver<ReplySub>, Self) {
let (req, rep) = mpsc::channel(0);
(rep, Self { output, id: Rc::new(RefCell::new(0)), subscribe: Rc::new(req) })
}
}
impl Client for IoClient {
fn start_notif(&self) -> LocalBoxFuture<'static, io::Result<Box<dyn MsgWriter>>> {
let output = self.output.clone();
Box::pin(async {
let drop_g = assert_no_drop("Notif future dropped");
let mut o = Bound::async_new(output, |o| o.lock()).await;
0u64.encode(o.as_mut()).await?;
drop_g.defuse();
Ok(Box::new(IoNotifWriter { o, drop_g: assert_no_drop("Notif writer dropped") })
as Box<dyn MsgWriter>)
})
}
fn start_request(&self) -> LocalBoxFuture<'static, io::Result<Box<dyn ReqWriter>>> {
let output = self.output.clone();
let id = {
let mut id_g = self.id.borrow_mut();
*id_g += 1;
*id_g
};
let (cb, reply) = oneshot::channel();
let (ack, got_ack) = oneshot::channel();
let mut subscribe = self.subscribe.as_ref().clone();
let start_req_drop_g = assert_no_drop("Request future dropped");
Box::pin(async move {
subscribe.send(ReplySub { id, ack, cb }).await.unwrap();
got_ack.await.unwrap();
let mut xfer_bytes = id.to_be_bytes();
xfer_bytes[0] = 0x00;
let req_prefix = u64::from_be_bytes(xfer_bytes);
let mut w = Bound::async_new(output.clone(), |o| o.lock()).await;
req_prefix.encode(w.as_mut()).await?;
start_req_drop_g.defuse();
Ok(Box::new(IoReqWriter {
id,
output,
reply,
w,
drop_g: assert_no_drop("Request reader dropped without reply"),
}) as Box<dyn ReqWriter>)
})
}
}
struct IoReqCanceller {
id: u64,
output: IoLock<dyn AsyncWrite>,
}
impl CancelNotifier for IoReqCanceller {
fn cancel(self: Box<Self>) -> LocalBoxFuture<'static, ()> {
let mut xfer_bytes = self.id.to_be_bytes();
xfer_bytes[0] = 0x02;
let cancel_id = u64::from_be_bytes(xfer_bytes);
let cancel_signal_drop_g = assert_no_drop("Cannot cancel the sending of a cancellation");
let o = self.output.clone();
Box::pin(async move {
let mut o = o.lock().await;
let _ = cancel_id.encode(o.as_mut()).await;
cancel_signal_drop_g.defuse();
})
}
}
struct IoReqWriter {
id: u64,
reply: oneshot::Receiver<ReplyRecord>,
output: IoLock<dyn AsyncWrite>,
w: IoGuard<dyn AsyncWrite>,
drop_g: PanicOnDrop,
}
impl ReqWriter for IoReqWriter {
fn writer(&mut self) -> Pin<&mut dyn AsyncWrite> { self.w.as_mut() }
fn send(self: Box<Self>) -> ReqWait {
let Self { id, output, reply, mut w, drop_g } = *self;
let canceller = IoReqCanceller { id, output };
let future = async {
w.flush().await?;
mem::drop(w);
let reply_record = reply.await.expect("Client dropped before reply received");
drop_g.defuse();
Ok(Box::new(IoRepReader {
reply_record,
drop_g: assert_no_drop("Reply reader dropped without finishing"),
}) as Box<dyn RepReader>)
};
ReqWait { future: Box::pin(future), canceller: Box::new(canceller) }
}
}
struct IoRepReader {
reply_record: ReplyRecord,
drop_g: PanicOnDrop,
}
impl RepReader for IoRepReader {
fn reader(&mut self) -> Option<Pin<&mut dyn AsyncRead>> {
match &mut self.reply_record {
ReplyRecord::Cancelled => None,
ReplyRecord::Ready(guard) => Some(guard.as_mut()),
}
}
fn finish(self: Box<Self>) -> LocalBoxFuture<'static, ()> {
Box::pin(async { self.drop_g.defuse() })
}
}
#[derive(destructure)]
struct IoNotifWriter {
o: IoGuard<dyn AsyncWrite>,
drop_g: PanicOnDrop,
}
impl MsgWriter for IoNotifWriter {
fn writer(&mut self) -> Pin<&mut dyn AsyncWrite> { self.o.as_mut() }
fn finish(mut self: Box<Self>) -> LocalBoxFuture<'static, io::Result<()>> {
Box::pin(async move {
let ret = self.o.flush().await;
self.drop_g.defuse();
ret
})
}
}
pub struct CommCtx {
exit: Sender<()>,
}
impl CommCtx {
pub async fn exit(self) -> io::Result<()> {
self.exit.clone().send(()).await.expect("quit channel dropped");
Ok(())
}
}
/// Establish bidirectional request-notification communication over a duplex
/// channel. The returned [IoClient] can be used for notifications immediately,
/// but requests can only be received while the future is running. The future
/// will only resolve when [CommCtx::exit] is called.
pub fn io_comm(
o: Pin<Box<dyn AsyncWrite>>,
i: Pin<Box<dyn AsyncRead>>,
) -> (impl Client + 'static, CommCtx, IoCommServer) {
let i = Rc::new(Mutex::new(i));
let o = Rc::new(Mutex::new(o));
let (onsub, client) = IoClient::new(o.clone());
let (exit, onexit) = channel(1);
(client, CommCtx { exit }, IoCommServer { o, i, onsub, onexit })
}
pub struct IoCommServer {
o: Rc<Mutex<Pin<Box<dyn AsyncWrite>>>>,
i: Rc<Mutex<Pin<Box<dyn AsyncRead>>>>,
onsub: Receiver<ReplySub>,
onexit: Receiver<()>,
}
impl IoCommServer {
pub async fn listen(
self,
notif: impl AsyncFn(Box<dyn MsgReader>) -> io::Result<()>,
req: impl AsyncFn(Box<dyn ReqReader>) -> io::Result<Receipt>,
) -> io::Result<()> {
let Self { o, i, onexit, onsub } = self;
enum Event {
Input(u64, IoGuard<dyn AsyncRead>),
Sub(ReplySub),
Exit,
}
let input_stream = try_stream(async |mut h| {
loop {
let mut g = Bound::async_new(i.clone(), async |i| i.lock().await).await;
match u64::decode(g.as_mut()).await {
Ok(id) => h.emit(Event::Input(id, g)).await,
Err(e) => match e.kind() {
io::ErrorKind::BrokenPipe
| io::ErrorKind::ConnectionAborted
| io::ErrorKind::UnexpectedEof => h.emit(Event::Exit).await,
_ => return Err(e),
},
}
}
});
let running_requests = RefCell::new(HashMap::new());
let (mut task_pool, fork_future) = local_set();
let mut fork_stream = pin!(fork_future.into_stream());
let mut pending_replies = HashMap::new();
'body: {
let mut shared = stream_select! {
pin!(input_stream) as Pin<&mut dyn Stream<Item = io::Result<Event>>>,
onsub.map(|sub| Ok(Event::Sub(sub))),
fork_stream.as_mut().map(|res| {
res.map(|()| panic!("this substream cannot exit while the loop is running") as Event)
}),
onexit.map(|()| Ok(Event::Exit)),
};
while let Some(next) = shared.next().await {
match next {
Err(e) => break 'body Err(e),
Ok(Event::Exit) => break,
Ok(Event::Sub(ReplySub { id, ack, cb })) => {
pending_replies.insert(id, cb);
// this is detected and logged on client
let _ = ack.send(());
},
// ID 0 is reserved for single-fire notifications
Ok(Event::Input(0, read)) => {
let notif = &notif;
task_pool.spawn(notif(Box::new(IoMsgReader { _pd: PhantomData, read }))).await.unwrap();
},
// non-zero IDs are associated with requests
Ok(Event::Input(id, read)) => {
// the MSb decides what kind of message this is
let mut id_bytes = id.to_be_bytes();
let discr = std::mem::replace(&mut id_bytes[0], 0x00);
let id = u64::from_be_bytes(id_bytes);
match discr {
// request
0x00 => {
let (o, req, reqs) = (o.clone(), &req, &running_requests);
task_pool
.spawn(async move {
id_bytes[0] = 0x01;
let prefix = u64::from_be_bytes(id_bytes);
let reader = Box::new(IoReqReader { prefix, read, o });
let (fut, handle) = to_task(async { req(reader).await.map(|Receipt| ()) });
reqs.borrow_mut().insert(id, handle);
with_stash(fut).await;
// during this await the read guard is released and thus we may receive a
// cancel notification from below
Ok(())
})
.await
.unwrap();
},
// response
0x01 => {
let cb = pending_replies.remove(&id).expect("Reply to unrecognized request");
cb.send(ReplyRecord::Ready(read))
.unwrap_or_else(|_| panic!("Failed to send reply"));
},
// cancellation
0x02 => {
match running_requests.borrow().get(&id) {
Some(handle) => handle.abort(),
// assuming that the client is correct, if there is no record
// then the reply was already sent
None => continue,
};
// if the request starts writing back before our abort arrives, we only
// get this mutex once it's done
let mut write = o.lock().await;
// if the request is still in the store, the write didn't begin
let Some(_) = running_requests.borrow_mut().remove(&id) else { continue };
id_bytes[0] = 0x03;
let cancel_code = u64::from_be_bytes(id_bytes);
cancel_code.encode(write.as_mut()).await?;
},
// stub reply for cancelled request
0x03 => {
let cb = pending_replies.remove(&id).expect("Cancelling unrecognized request");
cb.send(ReplyRecord::Cancelled)
.unwrap_or_else(|_| panic!("Failed to send reply cancellation"))
},
n => panic!("Unrecognized message type code {n}"),
}
},
}
}
Ok(())
}?;
mem::drop(task_pool);
while let Some(next) = fork_stream.next().await {
next?
}
let mut out = o.lock().await;
out.as_mut().flush().await?;
out.as_mut().close().await?;
Ok(())
}
}
#[cfg(test)]
mod test {
use std::cell::RefCell;
use futures::channel::mpsc;
use futures::{FutureExt, SinkExt, StreamExt, join, select};
use never::Never;
use orchid_api_derive::{Coding, Hierarchy};
use orchid_api_traits::Request;
use orchid_async_utils::debug::spin_on;
use unsync_pipe::pipe;
use crate::comm::{ClientExt, MsgReaderExt, ReqReaderExt, io_comm};
use crate::with_stash;
#[derive(Clone, Debug, PartialEq, Coding, Hierarchy)]
#[extendable]
struct TestNotif(u64);
#[test]
fn notification() {
spin_on(false, async {
let (in1, out2) = pipe(1024);
let (in2, out1) = pipe(1024);
let (received, mut on_receive) = mpsc::channel(2);
let (_, recv_ctx, recv_srv) = io_comm(Box::pin(in2), Box::pin(out2));
let (sender, ..) = io_comm(Box::pin(in1), Box::pin(out1));
join!(
async {
recv_srv
.listen(
async |notif| {
received.clone().send(notif.read::<TestNotif>().await?).await.unwrap();
Ok(())
},
async |_| panic!("Should receive notif, not request"),
)
.await
.unwrap()
},
async {
sender.notify(TestNotif(3)).await.unwrap();
assert_eq!(on_receive.next().await, Some(TestNotif(3)));
sender.notify(TestNotif(4)).await.unwrap();
assert_eq!(on_receive.next().await, Some(TestNotif(4)));
recv_ctx.exit().await.unwrap();
}
);
})
}
#[derive(Clone, Debug, Coding, Hierarchy)]
#[extendable]
struct DummyRequest(u64);
impl Request for DummyRequest {
type Response = u64;
}
#[test]
fn request() {
spin_on(false, async {
let (in1, out2) = pipe(1024);
let (in2, out1) = pipe(1024);
let (_, srv_ctx, srv) = io_comm(Box::pin(in2), Box::pin(out2));
let (client, client_ctx, client_srv) = io_comm(Box::pin(in1), Box::pin(out1));
join!(
async {
srv
.listen(
async |_| panic!("No notifs expected"),
async |mut req| {
let val = req.read_req::<DummyRequest>().await?;
req.reply(&val, val.0 + 1).await
},
)
.await
.unwrap()
},
async {
client_srv
.listen(
async |_| panic!("Not expecting ingress notif"),
async |_| panic!("Not expecting ingress req"),
)
.await
.unwrap()
},
async {
let response = client.request(DummyRequest(5)).await.unwrap();
assert_eq!(response, 6);
srv_ctx.exit().await.unwrap();
client_ctx.exit().await.unwrap();
}
);
})
}
#[test]
fn exit() {
spin_on(false, async {
let (input1, output1) = pipe(1024);
let (input2, output2) = pipe(1024);
let (reply_client, reply_context, reply_server) =
io_comm(Box::pin(input1), Box::pin(output2));
let (req_client, req_context, req_server) = io_comm(Box::pin(input2), Box::pin(output1));
let reply_context = RefCell::new(Some(reply_context));
let (exit, onexit) = futures::channel::oneshot::channel::<()>();
join!(
async move {
reply_server
.listen(
async |hand| {
let _notif = hand.read::<TestNotif>().await.unwrap();
let context = reply_context.borrow_mut().take().unwrap();
context.exit().await?;
Ok(())
},
async |mut hand| {
let req = hand.read_req::<DummyRequest>().await?;
hand.reply(&req, req.0 + 1).await
},
)
.await
.unwrap();
exit.send(()).unwrap();
let _client = reply_client;
},
async move {
req_server
.listen(
async |_| panic!("Only the other server expected notifs"),
async |_| panic!("Only the other server expected requests"),
)
.await
.unwrap();
let _ctx = req_context;
},
async move {
req_client.request(DummyRequest(0)).await.unwrap();
req_client.notify(TestNotif(0)).await.unwrap();
onexit.await.unwrap();
}
)
});
}
#[test]
fn timely_cancel() {
spin_on(false, async {
let (in1, out2) = pipe(1024);
let (in2, out1) = pipe(1024);
let (wait_in, mut wait_out) = mpsc::channel(0);
let (_, srv_ctx, srv) = io_comm(Box::pin(in2), Box::pin(out2));
let (client, client_ctx, client_srv) = io_comm(Box::pin(in1), Box::pin(out1));
join!(
async {
srv
.listen(
async |_| panic!("No notifs expected"),
async |mut req| {
let _ = req.read_req::<DummyRequest>().await?;
wait_in.clone().send(()).await.unwrap();
// TODO: verify cancellation
futures::future::pending::<Never>().await;
unreachable!("request should be cancelled before resume is triggered")
},
)
.await
.unwrap()
},
async {
client_srv
.listen(
async |_| panic!("Not expecting ingress notif"),
async |_| panic!("Not expecting ingress req"),
)
.await
.unwrap()
},
with_stash(async {
select! {
_ = client.request(DummyRequest(5)).fuse() => panic!("This one should not run"),
rep = wait_out.next() => rep.expect("something?"),
};
srv_ctx.exit().await.unwrap();
client_ctx.exit().await.unwrap();
})
);
})
}
}

View File

@@ -1,15 +1,17 @@
use std::cell::RefCell;
use std::ffi::OsStr;
use std::fmt;
use std::num::{NonZero, NonZeroUsize};
use std::ops::Add;
use std::rc::Rc;
use std::sync::Arc;
use futures::FutureExt;
use futures::future::join_all;
use itertools::Itertools;
use task_local::task_local;
use crate::api;
use crate::interner::{Interner, Tok};
use crate::location::Pos;
use crate::{IStr, Pos, api, es, is};
/// A point of interest in resolving the error, such as the point where
/// processing got stuck, a command that is likely to be incorrect
@@ -21,13 +23,16 @@ pub struct ErrPos {
pub message: Option<Arc<String>>,
}
impl ErrPos {
/// Create from a position with a position-specific message. If there's no
/// message, use `Pos::into`
#[must_use]
pub fn new(msg: &str, position: Pos) -> Self {
Self { message: Some(Arc::new(msg.to_string())), position }
}
async fn from_api(api: &api::ErrLocation, i: &Interner) -> Self {
async fn from_api(api: api::ErrLocation) -> Self {
Self {
message: Some(api.message.clone()).filter(|s| !s.is_empty()),
position: Pos::from_api(&api.location, i).await,
message: Some(api.message).filter(|s| !s.is_empty()),
position: Pos::from_api(&api.location).await,
}
}
fn to_api(&self) -> api::ErrLocation {
@@ -49,10 +54,16 @@ impl fmt::Display for ErrPos {
}
}
/// An error that occurred in Orchid code, whether at startup or runtime
#[derive(Clone, Debug)]
pub struct OrcErr {
pub description: Tok<String>,
pub message: Arc<String>,
/// A generic error message used in categorizing errors
/// You can also equality-compare atoms with these message tokens
pub description: IStr,
/// A specific error message that may include values relevant in resolving the
/// error
pub message: Rc<String>,
/// Various locations in code that may be useful in resolving the error
pub positions: Vec<ErrPos>,
}
impl OrcErr {
@@ -63,17 +74,16 @@ impl OrcErr {
locations: self.positions.iter().map(ErrPos::to_api).collect(),
}
}
async fn from_api(api: &api::OrcError, i: &Interner) -> Self {
async fn from_api(api: api::OrcError) -> Self {
Self {
description: Tok::from_api(api.description, i).await,
message: api.message.clone(),
positions: join_all(api.locations.iter().map(|e| ErrPos::from_api(e, i))).await,
description: es(api.description).await,
message: api.message,
positions: join_all(api.locations.into_iter().map(ErrPos::from_api)).await,
}
}
}
impl Eq for OrcErr {}
impl PartialEq for OrcErr {
fn eq(&self, other: &Self) -> bool { self.description == other.description }
impl PartialEq<IStr> for OrcErr {
fn eq(&self, other: &IStr) -> bool { self.description == *other }
}
impl From<OrcErr> for Vec<OrcErr> {
fn from(value: OrcErr) -> Self { vec![value] }
@@ -85,6 +95,8 @@ impl fmt::Display for OrcErr {
}
}
/// Rust error produced when an Orchid error condition arises but no
/// specific errors are listed. This is always a Rust programmer error.
#[derive(Clone, Debug)]
pub struct EmptyErrv;
impl fmt::Display for EmptyErrv {
@@ -93,42 +105,58 @@ impl fmt::Display for EmptyErrv {
}
}
/// A container for one or more errors. Code that supports error recovery should
/// use these instead of plain [OrcErr] objects.
#[derive(Clone, Debug)]
pub struct OrcErrv(Vec<OrcErr>);
impl OrcErrv {
/// Create from individual errors. If you have exactly one initial error, see
/// [mk_errv]
pub fn new(errors: impl IntoIterator<Item = OrcErr>) -> Result<Self, EmptyErrv> {
let v = errors.into_iter().collect_vec();
if v.is_empty() { Err(EmptyErrv) } else { Ok(Self(v)) }
}
/// Add additional errors to this container. Since `OrcErrv` also implements
/// [IntoIterator], this can take `(Self, Self)`
#[must_use]
pub fn extended<T>(mut self, errors: impl IntoIterator<Item = T>) -> Self
where Self: Extend<T> {
self.extend(errors);
self
}
/// Determine how many distinct errors there are in the container
#[must_use]
pub fn len(&self) -> usize { self.0.len() }
#[must_use]
pub fn is_empty(&self) -> bool { self.len() == 0 }
pub fn len(&self) -> NonZeroUsize { NonZero::new(self.0.len()).expect("OrcErrv cannot be empty") }
/// See if any errors match a particular filter criteria. This is useful for
/// sentinel errors whch are produced by user code to trigger unique
/// behaviours such as a lexer mismatch
#[must_use]
pub fn any(&self, f: impl FnMut(&OrcErr) -> bool) -> bool { self.0.iter().any(f) }
/// Remove all errors that don't match a filter criterion. If no errors match,
/// nothing is returned
#[must_use]
pub fn keep_only(self, f: impl FnMut(&OrcErr) -> bool) -> Option<Self> {
let v = self.0.into_iter().filter(f).collect_vec();
if v.is_empty() { None } else { Some(Self(v)) }
}
/// If there is exactly one error, return it. Mostly used for simplified
/// printing
#[must_use]
pub fn one(&self) -> Option<&OrcErr> { (self.0.len() == 1).then(|| &self.0[9]) }
pub fn one(&self) -> Option<&OrcErr> { self.0.iter().exactly_one().ok() }
/// Iterate over all positions of all errors
pub fn pos_iter(&self) -> impl Iterator<Item = ErrPos> + '_ {
self.0.iter().flat_map(|e| e.positions.iter().cloned())
}
/// Serialize for transmission
#[must_use]
pub fn to_api(&self) -> Vec<api::OrcError> { self.0.iter().map(OrcErr::to_api).collect() }
pub async fn from_api<'a>(
api: impl IntoIterator<Item = &'a api::OrcError>,
i: &Interner,
) -> Self {
Self(join_all(api.into_iter().map(|e| OrcErr::from_api(e, i))).await)
/// Deserialize from transmission
#[must_use]
pub async fn from_api(api: impl IntoIterator<Item = api::OrcError>) -> Self {
Self(join_all(api.into_iter().map(OrcErr::from_api)).await)
}
/// Iterate over the errors without consuming the collection
pub fn iter(&self) -> impl Iterator<Item = OrcErr> + '_ { self.0.iter().cloned() }
}
impl From<OrcErr> for OrcErrv {
fn from(value: OrcErr) -> Self { Self(vec![value]) }
@@ -156,69 +184,143 @@ impl fmt::Display for OrcErrv {
}
}
/// A result from a function that may return multiple errors.
pub type OrcRes<T> = Result<T, OrcErrv>;
pub fn mk_err(
description: Tok<String>,
message: impl AsRef<str>,
posv: impl IntoIterator<Item = ErrPos>,
) -> OrcErr {
OrcErr {
description,
message: Arc::new(message.as_ref().to_string()),
positions: posv.into_iter().collect(),
/// If two fallible values both succeed return both values, otherwise return
/// all errors.
pub fn join_ok<T, U>(left: OrcRes<T>, right: OrcRes<U>) -> OrcRes<(T, U)> {
match (left, right) {
(Ok(t), Ok(u)) => Ok((t, u)),
(Err(e), Ok(_)) | (Ok(_), Err(e)) => Err(e),
(Err(e1), Err(e2)) => Err(e1 + e2),
}
}
#[macro_export]
macro_rules! join_ok {
($($names:ident $(: $tys:ty)? = $vals:expr;)*) => {
let $crate::join_ok!(@NAMES $($names $(: $tys)? = $vals;)*)
:
$crate::join_ok!(@TYPES $($names $(: $tys)? = $vals;)*)
=
$crate::join_ok!(@VALUES $($names $(: $tys)? = $vals;)*)?;
};
(@NAMES $name:ident $(: $ty:ty)? = $val:expr ; $($names:ident $(: $tys:ty)? = $vals:expr;)*) => {
($name, $crate::join_ok!(@NAMES $($names $(: $tys)? = $vals;)*))
};
(@NAMES) => { _ };
(@TYPES $name:ident : $ty:ty = $val:expr ; $($names:ident $(: $tys:ty)? = $vals:expr;)*) => {
($ty, $crate::join_ok!(@TYPES $($names $(: $tys)? = $vals;)*))
};
(@TYPES $name:ident = $val:expr ; $($names:ident $(: $tys:ty)? = $vals:expr;)*) => {
(_, $crate::join_ok!(@TYPES $($names $(: $tys)? = $vals;)*))
};
(@TYPES) => { () };
(@VALUES $name:ident $(: $ty:ty)? = $val:expr ; $($names:ident $(: $tys:ty)? = $vals:expr;)*) => {
$crate::join_ok($val, $crate::join_ok!(@VALUES $($names $(: $tys)? = $vals;)*))
};
(@VALUES) => { Ok(()) };
}
/// Create an errv without an associated position, as opposed to [mk_errv].
/// While this is technically legal and sometimes needed in library code, all
/// errors that are technically possible to associate with at least one position
/// should be.
#[must_use]
pub fn mk_errv_floating(description: IStr, message: impl AsRef<str>) -> OrcErrv {
mk_errv::<Pos>(description, message, [])
}
/// Create an errv. The third argument can be an iterable of [ErrPos] or [Pos].
#[must_use]
pub fn mk_errv<I: Into<ErrPos>>(
description: Tok<String>,
description: IStr,
message: impl AsRef<str>,
posv: impl IntoIterator<Item = I>,
) -> OrcErrv {
mk_err(description, message, posv.into_iter().map_into()).into()
OrcErr {
description,
message: Rc::new(message.as_ref().to_string()),
positions: posv.into_iter().map_into().collect(),
}
.into()
}
/// Convert a standard IO error into an Orchid error
#[must_use]
pub async fn async_io_err<I: Into<ErrPos>>(
err: async_std::io::Error,
i: &Interner,
err: std::io::Error,
posv: impl IntoIterator<Item = I>,
) -> OrcErrv {
mk_errv(i.i(&err.kind().to_string()).await, err.to_string(), posv)
mk_errv(is(&err.kind().to_string()).await, err.to_string(), posv)
}
pub async fn os_str_to_string<'a, I: Into<ErrPos>>(
str: &'a OsStr,
i: &Interner,
/// Decode an Unicode string, or produce a common error related to Unicode
/// decoding
pub async fn os_str_to_string<I: Into<ErrPos>>(
str: &OsStr,
posv: impl IntoIterator<Item = I>,
) -> OrcRes<&'a str> {
) -> OrcRes<&str> {
match str.to_str() {
Some(str) => Ok(str),
None => Err(mk_errv(
i.i("Non-unicode string").await,
is("Non-unicode string").await,
format!("{str:?} is not representable as unicode"),
posv,
)),
}
}
pub struct Reporter {
errors: RefCell<Vec<OrcErr>>,
#[derive(Clone, Default)]
struct Reporter {
errors: Rc<RefCell<Vec<OrcErr>>>,
}
impl Reporter {
pub fn report(&self, e: impl Into<OrcErrv>) { self.errors.borrow_mut().extend(e.into()) }
pub fn new() -> Self { Self { errors: RefCell::new(vec![]) } }
pub fn errv(self) -> Option<OrcErrv> { OrcErrv::new(self.errors.into_inner()).ok() }
pub fn merge<T>(self, res: OrcRes<T>) -> OrcRes<T> {
match (res, self.errv()) {
(res, None) => res,
(Ok(_), Some(errv)) => Err(errv),
(Err(e), Some(errv)) => Err(e + errv),
}
task_local! {
static REPORTER: Reporter;
}
/// Run the future with a new reporter, and return all errors reported within.
///
/// If your future returns [OrcRes], see [try_with_reporter]
pub async fn with_reporter<T>(fut: impl Future<Output = T>) -> OrcRes<T> {
try_with_reporter(fut.map(Ok)).await
}
/// Run the future with a new reporter, and return all errors either returned or
/// reported by it
///
/// If your future may report errors but always returns an approximate value,
/// see [with_reporter]
pub async fn try_with_reporter<T>(fut: impl Future<Output = OrcRes<T>>) -> OrcRes<T> {
let rep = Reporter::default();
let res = REPORTER.scope(rep.clone(), fut).await;
let errors = rep.errors.take();
match (res, &errors[..]) {
(Ok(t), []) => Ok(t),
(Ok(_), [_, ..]) => Err(OrcErrv::new(errors).unwrap()),
(Err(e), _) => Err(e.extended(errors)),
}
pub fn is_empty(&self) -> bool { self.errors.borrow().is_empty() }
}
impl Default for Reporter {
fn default() -> Self { Self::new() }
/// Determine if there are pending errors or if this overarching procedure has a
/// chance to succeed
#[must_use]
pub async fn is_erroring() -> bool {
(REPORTER.try_with(|r| !r.errors.borrow().is_empty()))
.expect("Sidechannel errors must be caught by a reporter")
}
/// Report an error that is fatal and prevents a correct output, but
/// still allows the current task to continue and produce an approximate output.
/// This can be used for
pub fn report(e: impl Into<OrcErrv>) {
let errv = e.into();
REPORTER.try_with(|r| r.errors.borrow_mut().extend(errv.clone())).unwrap_or_else(|_| {
panic!(
"Unhandled error! Sidechannel errors must be caught by an enclosing call to with_reporter.\n\
Error: {errv}"
)
})
}

View File

@@ -1,67 +0,0 @@
//! Multiple-listener-single-delivery event system.
use std::mem;
use std::sync::Mutex;
use std::sync::mpsc::{self, sync_channel};
struct Reply<T, U> {
resub: bool,
outcome: Result<U, T>,
}
struct Listener<T, E> {
sink: mpsc::SyncSender<T>,
source: mpsc::Receiver<Reply<T, E>>,
}
pub struct Event<T, U> {
listeners: Mutex<Vec<Listener<T, U>>>,
}
impl<T, U> Event<T, U> {
pub const fn new() -> Self { Self { listeners: Mutex::new(Vec::new()) } }
pub fn dispatch(&self, mut ev: T) -> Option<U> {
let mut listeners = self.listeners.lock().unwrap();
let mut alt_list = Vec::with_capacity(listeners.len());
mem::swap(&mut *listeners, &mut alt_list);
let mut items = alt_list.into_iter();
while let Some(l) = items.next() {
l.sink.send(ev).unwrap();
let Reply { resub, outcome } = l.source.recv().unwrap();
if resub {
listeners.push(l);
}
match outcome {
Ok(res) => {
listeners.extend(items);
return Some(res);
},
Err(next) => {
ev = next;
},
}
}
None
}
pub fn get_one<V>(&self, mut filter: impl FnMut(&T) -> bool, f: impl FnOnce(T) -> (U, V)) -> V {
let mut listeners = self.listeners.lock().unwrap();
let (sink, request) = sync_channel(0);
let (response, source) = sync_channel(0);
listeners.push(Listener { sink, source });
mem::drop(listeners);
loop {
let t = request.recv().unwrap();
if filter(&t) {
let (u, v) = f(t);
response.send(Reply { resub: false, outcome: Ok(u) }).unwrap();
return v;
}
response.send(Reply { resub: true, outcome: Err(t) }).unwrap();
}
}
}
impl<T, U> Default for Event<T, U> {
fn default() -> Self { Self::new() }
}

View File

@@ -1,32 +1,40 @@
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::convert::Infallible;
use std::future::Future;
use std::iter;
use std::marker::PhantomData;
use std::rc::Rc;
use std::str::FromStr;
use itertools::Itertools;
use futures::future::join_all;
use itertools::{Itertools, chain};
use never::Never;
use regex::Regex;
use crate::interner::Interner;
use crate::{api, match_mapping};
/// A unit of formattable text where the formatter must make a single choice
/// Converting from various types via [Into::into] keeps strings intact, but
/// [str::parse] resolves escape sequences
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
#[must_use]
pub struct FmtUnit {
/// Sub-units
pub subs: Vec<FmtUnit>,
/// Parsed text templates for how to render this text
pub variants: Rc<Variants>,
}
impl FmtUnit {
pub fn new(variants: Rc<Variants>, subs: impl IntoIterator<Item = FmtUnit>) -> Self {
Self { subs: subs.into_iter().collect(), variants }
}
/// Deserialize from message
pub fn from_api(api: &api::FormattingUnit) -> Self {
Self {
subs: api.subs.iter().map(Self::from_api).collect(),
variants: Rc::new(Variants(
(api.variants.iter().map(|var| Variant {
(api.variants.iter().map(|var| FmtVariant {
bounded: var.bounded,
elements: var.elements.iter().map(FmtElement::from_api).collect(),
}))
@@ -34,6 +42,8 @@ impl FmtUnit {
)),
}
}
/// Serialize into message. String interner IDs used in the structure must
/// remain valid.
pub fn to_api(&self) -> api::FormattingUnit {
api::FormattingUnit {
subs: self.subs.iter().map(Self::to_api).collect(),
@@ -44,13 +54,17 @@ impl FmtUnit {
.collect(),
}
}
/// Shorthand for a variable-length list that can be formatted in exactly one
/// way
pub fn sequence(
head: &str,
delim: &str,
seq_bnd: Option<bool>,
tail: &str,
seq_bnd: bool,
seq: impl IntoIterator<Item = FmtUnit>,
) -> Self {
let items = seq.into_iter().collect_vec();
FmtUnit::new(Variants::sequence(items.len(), delim, seq_bnd), items)
Variants::default().sequence(items.len(), head, delim, tail, seq_bnd).units_own(items)
}
}
impl<T> From<T> for FmtUnit
@@ -65,22 +79,45 @@ impl FromStr for FmtUnit {
}
}
/// A single element of a format string. Composes into [FmtVariant]
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub enum FmtElement {
Sub { slot: u32, bounded: Option<bool> },
/// a reference to an interpolable subunit in the enclosing [FmtUnit]
Sub {
/// Index into [FmtUnit::subs]
slot: u32,
/// Whether the subunit can use an unbounded (`Some(false)`) [FmtVariant],
/// it is restricted to bounded (`Some(true)`) [FmtVariant], or it should
/// inherit this information from the enclosing unit, meaning that the slot
/// is at the very end of the format string
bounded: Option<bool>,
},
/// a string snippet
String(Rc<String>),
/// an indented block
Indent(Vec<FmtElement>),
}
impl FmtElement {
/// Create a plain string snippet
pub fn str(s: &'_ str) -> Self { Self::String(Rc::new(s.to_string())) }
/// Create a slot for a subunit
pub fn sub(slot: u32, bounded: Option<bool>) -> Self { Self::Sub { slot, bounded } }
/// Create a slot for a subunit's bounded representation
pub fn bounded(i: u32) -> Self { Self::sub(i, Some(true)) }
/// Create a slot for any representation of a subunit
pub fn unbounded(i: u32) -> Self { Self::sub(i, Some(false)) }
/// Create an end slot bounded by the enclosing unit if that is bounded
pub fn last(i: u32) -> Self { Self::sub(i, None) }
pub fn sequence(len: usize, bounded: Option<bool>) -> impl Iterator<Item = Self> {
let len32: u32 = len.try_into().unwrap();
(0..len32 - 1).map(FmtElement::unbounded).chain([FmtElement::sub(len32 - 1, bounded)])
/// Create a sequence of `len` unbounded slots capped by a slot of the
/// specified boundedness
pub fn sequence(len: usize, bounded: Option<bool>) -> Vec<Self> {
match len.try_into().unwrap() {
0u32 => vec![],
1u32 => vec![FmtElement::sub(0, bounded)],
n => (0..n - 1).map(FmtElement::unbounded).chain([FmtElement::sub(n - 1, bounded)]).collect(),
}
}
/// Decode from a message
pub fn from_api(api: &api::FormattingElement) -> Self {
match_mapping!(api, api::FormattingElement => FmtElement {
Indent(v => v.iter().map(FmtElement::from_api).collect()),
@@ -88,6 +125,7 @@ impl FmtElement {
Sub{ *slot, *bounded },
})
}
/// Encode to message
pub fn to_api(&self) -> api::FormattingElement {
match_mapping!(self, FmtElement => api::FormattingElement {
Indent(v => v.iter().map(FmtElement::to_api).collect()),
@@ -97,20 +135,25 @@ impl FmtElement {
}
}
/// A particular way in which a value may be formatted in text.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct Variant {
pub struct FmtVariant {
/// Whether this representation has an intrinsic end marker or it needs the
/// parent to provide one
pub bounded: bool,
/// Template string syntax elements
pub elements: Vec<FmtElement>,
}
#[test]
fn variants_parse_test() {
let vars = Variants::default().bounded("({0})");
println!("final: {vars:?}")
}
/// Represents a collection of formatting strings for the same set of parameters
/// from which the formatter can choose within their associated constraints.
///
/// - {0b} can be replaced by any variant of the parameter.
/// - {0} can only be replaced by a bounded variant of the parameter
/// - {0l} causes the current end restriction to be applied to the parameter.
/// This is to be used if the parameter is at the very end of the variant.
#[derive(Clone, Debug, Hash, PartialEq, Eq, Default)]
pub struct Variants(pub Vec<Variant>);
pub struct Variants(pub Vec<FmtVariant>);
impl Variants {
fn parse_phs(s: &'_ str) -> Vec<FmtElement> {
let re = Regex::new(r"(?<tpl>\{\d+?[bl]?\})|(\{\{)|(\}\})").unwrap();
@@ -181,30 +224,57 @@ impl Variants {
}
}
fn add(&mut self, bounded: bool, s: &'_ str) {
self.0.push(Variant { bounded, elements: Self::parse(s) })
self.0.push(FmtVariant { bounded, elements: Self::parse(s) })
}
// This option is available in all positions
/// This option is available in all positions.
/// See [Variants] for a description of the format strings
pub fn bounded(mut self, s: &'_ str) -> Self {
self.add(true, s);
self
}
// This option is only available in positions immediately preceding the end of
// the sequence or a parenthesized subsequence.
/// This option is only available in positions immediately preceding the end
/// of the sequence or a parenthesized subsequence.
/// See [Variants] for a description of the format strings
pub fn unbounded(mut self, s: &'_ str) -> Self {
self.add(false, s);
self
}
pub fn sequence(len: usize, delim: &str, seq_bnd: Option<bool>) -> Rc<Self> {
let seq = Itertools::intersperse(FmtElement::sequence(len, seq_bnd), FmtElement::str(delim));
Rc::new(Variants(vec![Variant { bounded: true, elements: seq.collect_vec() }]))
/// Produces formatting options for `len` parameters separated by `delim`.
/// `seq_bnd` indicates whether `delim` and `tail` can unambiguously indicate
/// the end of a subsequence. For consistency, the stricter of the two is
/// expected to be used
pub fn sequence(
mut self,
len: usize,
head: &str,
delim: &str,
tail: &str,
seq_bnd: bool,
) -> Self {
let seq = chain!(
[FmtElement::str(head)],
Itertools::intersperse(
FmtElement::sequence(len, Some(seq_bnd)).into_iter(),
FmtElement::str(delim),
),
[FmtElement::str(tail)],
);
self.0.push(FmtVariant { bounded: true, elements: seq.collect_vec() });
self
}
/// Pair the slots with subunits to produce a [FmtUnit]
pub fn units_own(self, subs: impl IntoIterator<Item = FmtUnit>) -> FmtUnit {
FmtUnit::new(Rc::new(self), subs)
}
/// Pair the slots with subunits to produce a [FmtUnit] by reference. These
/// objects should preferably be thread-locally cached whenever possible.
pub fn units(self: &Rc<Self>, subs: impl IntoIterator<Item = FmtUnit>) -> FmtUnit {
FmtUnit::new(self.clone(), subs)
}
}
impl From<Rc<String>> for Variants {
fn from(value: Rc<String>) -> Self {
Self(vec![Variant { elements: vec![FmtElement::String(value)], bounded: true }])
Self(vec![FmtVariant { elements: vec![FmtElement::String(value)], bounded: true }])
}
}
impl From<String> for Variants {
@@ -245,29 +315,22 @@ pub fn take_first(unit: &FmtUnit, bounded: bool) -> String {
fill_slots(&first.elements, &unit.subs, 0, bounded)
}
pub async fn take_first_fmt(v: &(impl Format + ?Sized), i: &Interner) -> String {
take_first(&v.print(&FmtCtxImpl { i }).await, false)
pub async fn take_first_fmt(v: &(impl Format + ?Sized)) -> String {
take_first(&v.print(&FmtCtxImpl { _foo: PhantomData }).await, false)
}
/// [Default] this if you need one
#[derive(Default)]
pub struct FmtCtxImpl<'a> {
pub i: &'a Interner,
_foo: PhantomData<&'a ()>,
}
pub trait FmtCtx {
fn i(&self) -> &Interner;
// fn print_as(&self, p: &(impl Format + ?Sized)) -> impl Future<Output =
// String> where Self: Sized {
// async {
// // for now, always take the first option which is probably the one-line
// form let variants = p.print(self).await;
// take_first(&variants, true)
// }
// }
}
impl FmtCtx for FmtCtxImpl<'_> {
fn i(&self) -> &Interner { self.i }
}
/// Additional settings to the formatter. Implemented by [FmtCtxImpl]. Currently
/// not in use
pub trait FmtCtx {}
impl FmtCtx for FmtCtxImpl<'_> {}
/// A value that can be formatted into a string with multiple possible forms
pub trait Format {
#[must_use]
fn print<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> impl Future<Output = FmtUnit> + 'a;
@@ -277,4 +340,44 @@ impl Format for Never {
}
/// Format with default strategy. Currently equal to [take_first_fmt]
pub async fn fmt(v: &(impl Format + ?Sized), i: &Interner) -> String { take_first_fmt(v, i).await }
pub async fn fmt(v: &(impl Format + ?Sized)) -> String { take_first_fmt(v).await }
/// Format a sequence with default strategy. Currently equal to [take_first_fmt]
pub async fn fmt_v<F: Format + ?Sized>(
v: impl IntoIterator<Item: Borrow<F>>,
) -> impl Iterator<Item = String> {
join_all(v.into_iter().map(|f| async move { take_first_fmt(f.borrow()).await })).await.into_iter()
}
#[cfg(test)]
mod test {
use std::rc::Rc;
use crate::format::{FmtElement, FmtUnit, FmtVariant, Variants, take_first};
#[test]
fn variants_parse_test() {
let vars = Rc::new(Variants::default().bounded("({{{0}}})"));
let expected_vars = Rc::new(Variants(vec![FmtVariant {
bounded: true,
elements: vec![
FmtElement::String(Rc::new("({".to_string())),
FmtElement::Sub { bounded: Some(false), slot: 0 },
FmtElement::String(Rc::new("})".to_string())),
],
}]));
assert_eq!(vars.as_ref(), expected_vars.as_ref());
let unit = vars.units(["1".into()]);
assert_eq!(unit, FmtUnit {
subs: vec![FmtUnit {
subs: vec![],
variants: Rc::new(Variants(vec![FmtVariant {
bounded: true,
elements: vec![FmtElement::String(Rc::new("1".to_string()))]
}]))
}],
variants: expected_vars
});
let str = take_first(&unit, true);
assert_eq!(str, "({1})");
}
}

View File

@@ -1,116 +0,0 @@
//! Impure functions that can be triggered by Orchid code when a command
//! evaluates to an atom representing a command
use std::any::{Any, TypeId};
use std::cell::RefCell;
use hashbrown::HashMap;
use trait_set::trait_set;
use super::nort::Expr;
use crate::foreign::atom::Atomic;
use crate::foreign::error::RTResult;
use crate::foreign::to_clause::ToClause;
use crate::location::CodeLocation;
trait_set! {
trait Handler = for<'a> Fn(&'a dyn Any, CodeLocation) -> Expr;
}
enum HTEntry<'a> {
Handler(Box<dyn Handler + 'a>),
Forward(&'a (dyn Handler + 'a)),
}
impl<'a> AsRef<dyn Handler + 'a> for HTEntry<'a> {
fn as_ref(&self) -> &(dyn Handler + 'a) {
match self {
HTEntry::Handler(h) => &**h,
HTEntry::Forward(h) => *h,
}
}
}
/// A table of impure command handlers exposed to Orchid
#[derive(Default)]
pub struct HandlerTable<'a> {
handlers: HashMap<TypeId, HTEntry<'a>>,
}
impl<'a> HandlerTable<'a> {
/// Create a new [HandlerTable]
#[must_use]
pub fn new() -> Self { Self { handlers: HashMap::new() } }
/// Add a handler function to interpret a command and select the continuation.
/// See [HandlerTable#with] for a declarative option.
pub fn register<T: 'static, R: ToClause>(&mut self, f: impl for<'b> FnMut(&'b T) -> R + 'a) {
let cell = RefCell::new(f);
let cb = move |a: &dyn Any, loc: CodeLocation| {
cell.borrow_mut()(a.downcast_ref().expect("found by TypeId")).to_expr(loc)
};
let prev = self.handlers.insert(TypeId::of::<T>(), HTEntry::Handler(Box::new(cb)));
assert!(prev.is_none(), "A handler for this type is already registered");
}
/// Add a handler function to interpret a command and select the continuation.
/// See [HandlerTable#register] for a procedural option.
pub fn with<T: 'static>(mut self, f: impl FnMut(&T) -> RTResult<Expr> + 'a) -> Self {
self.register(f);
self
}
/// Find and execute the corresponding handler for this type
pub fn dispatch(&self, arg: &dyn Atomic, loc: CodeLocation) -> Option<Expr> {
(self.handlers.get(&arg.as_any_ref().type_id())).map(|ent| ent.as_ref()(arg.as_any_ref(), loc))
}
/// Combine two non-overlapping handler sets
#[must_use]
pub fn combine(mut self, other: Self) -> Self {
for (key, value) in other.handlers {
let prev = self.handlers.insert(key, value);
assert!(prev.is_none(), "Duplicate handlers")
}
self
}
/// Add entries that forward requests to a borrowed non-overlapping handler
/// set
pub fn link<'b: 'a>(mut self, other: &'b HandlerTable<'b>) -> Self {
for (key, value) in other.handlers.iter() {
let prev = self.handlers.insert(*key, HTEntry::Forward(value.as_ref()));
assert!(prev.is_none(), "Duplicate handlers")
}
self
}
}
#[cfg(test)]
#[allow(unconditional_recursion)]
#[allow(clippy::ptr_arg)]
mod test {
use std::marker::PhantomData;
use super::HandlerTable;
/// Ensure that the method I use to verify covariance actually passes with
/// covariant and fails with invariant
///
/// The failing case:
/// ```
/// struct Cov2<'a>(PhantomData<&'a mut &'a ()>);
/// fn fail<'a>(_c: &Cov2<'a>, _s: &'a String) { fail(_c, &String::new()) }
/// ```
#[allow(unused)]
fn covariant_control() {
struct Cov<'a>(PhantomData<&'a ()>);
fn pass<'a>(_c: &Cov<'a>, _s: &'a String) { pass(_c, &String::new()) }
}
/// The &mut ensures that 'a in the two functions must be disjoint, and that
/// ht must outlive both. For this to compile, Rust has to cast ht to the
/// shorter lifetimes, ensuring covariance
#[allow(unused)]
fn assert_covariant() {
fn pass<'a>(_ht: HandlerTable<'a>, _s: &'a String) { pass(_ht, &String::new()) }
}
}

View File

@@ -1,50 +1,120 @@
use std::num::NonZeroU64;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Mutex, MutexGuard, OnceLock};
use std::ops::{Index, IndexMut};
use hashbrown::HashMap;
use itertools::Itertools;
enum Rec<T> {
Val(T),
Next(usize),
}
/// A simple and very fast store that assigns small stable integer IDs to
/// objects. It uses a free-list for O(1) insertion, deletion and retrieval.
pub struct IdStore<T> {
table: OnceLock<Mutex<HashMap<NonZeroU64, T>>>,
id: AtomicU64,
first: usize,
values: Vec<Rec<T>>,
}
impl<T> IdStore<T> {
pub const fn new() -> Self { Self { table: OnceLock::new(), id: AtomicU64::new(1) } }
pub fn add(&self, t: T) -> IdRecord<'_, T> {
let tbl = self.table.get_or_init(Mutex::default);
let mut tbl_g = tbl.lock().unwrap();
let id: NonZeroU64 = self.id.fetch_add(1, Ordering::Relaxed).try_into().unwrap();
assert!(tbl_g.insert(id, t).is_none(), "atom ID wraparound");
IdRecord(id, tbl_g)
pub fn new() -> Self { IdStore { first: 0, values: Vec::new() } }
pub fn add(&mut self, value: T) -> usize {
if self.first == 0 && self.values.is_empty() {
self.first = 1;
self.values.push(Rec::Val(value));
return 0;
}
if self.first == self.values.len() {
let len = self.values.len();
self.values.extend((len..len * 2).map(|i| Rec::Next(i + 1)));
}
let Some(rec) = self.values.get_mut(self.first) else {
panic!("Bounds check and growth above")
};
let Rec::Next(next) = rec else {
panic!("first should always point to an empty space or one past the length")
};
let id = std::mem::replace(&mut self.first, *next);
*rec = Rec::Val(value);
id
}
pub fn get(&self, id: impl Into<NonZeroU64>) -> Option<IdRecord<'_, T>> {
let tbl = self.table.get_or_init(Mutex::default);
let tbl_g = tbl.lock().unwrap();
let id64 = id.into();
if tbl_g.contains_key(&id64) { Some(IdRecord(id64, tbl_g)) } else { None }
pub fn add_with(&mut self, cb: impl FnOnce(usize) -> T) -> usize { self.add(cb(self.first)) }
pub fn remove(&mut self, id: usize) -> T {
let Some(rec) = self.values.get_mut(id) else { panic!("Index out of bounds") };
let Rec::Val(val) = std::mem::replace(rec, Rec::Next(self.first)) else {
panic!("Index vacated")
};
self.first = id;
val
}
pub fn iter(&self) -> impl Iterator<Item = (usize, &T)> {
(self.values.iter().enumerate())
.filter_map(|(i, rec)| if let Rec::Val(val) = rec { Some((i, val)) } else { None })
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = (usize, &mut T)> {
(self.values.iter_mut().enumerate())
.filter_map(|(i, rec)| if let Rec::Val(val) = rec { Some((i, val)) } else { None })
}
}
#[allow(clippy::type_complexity, reason = "This is verbose enough as it is")]
pub struct IntoIter<T>(
std::iter::FilterMap<
std::iter::Enumerate<std::vec::IntoIter<Rec<T>>>,
fn((usize, Rec<T>)) -> Option<(usize, T)>,
>,
);
impl<T> Iterator for IntoIter<T> {
type Item = (usize, T);
fn next(&mut self) -> Option<Self::Item> { self.0.next() }
fn size_hint(&self) -> (usize, Option<usize>) { self.0.size_hint() }
}
impl<T> IntoIterator for IdStore<T> {
type Item = (usize, T);
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(
(self.values.into_iter().enumerate())
.filter_map(|(i, rec)| if let Rec::Val(val) = rec { Some((i, val)) } else { None }),
)
}
}
impl<T> Index<usize> for IdStore<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
match self.values.get(index) {
Some(Rec::Val(val)) => val,
_ => panic!("Invalid or stale index"),
}
}
}
impl<T> IndexMut<usize> for IdStore<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
match self.values.get_mut(index) {
Some(Rec::Val(val)) => val,
_ => panic!("Invalid or stale index"),
}
}
pub fn is_empty(&self) -> bool { self.len() == 0 }
pub fn len(&self) -> usize { self.table.get().map(|t| t.lock().unwrap().len()).unwrap_or(0) }
}
impl<T> Default for IdStore<T> {
fn default() -> Self { Self::new() }
}
impl<A> FromIterator<A> for IdStore<A> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
let values = iter.into_iter().map(|a| Rec::Val(a)).collect_vec();
Self { first: values.len(), values }
}
}
pub struct IdRecord<'a, T>(NonZeroU64, MutexGuard<'a, HashMap<NonZeroU64, T>>);
impl<T> IdRecord<'_, T> {
pub fn id(&self) -> NonZeroU64 { self.0 }
pub fn remove(mut self) -> T { self.1.remove(&self.0).unwrap() }
}
impl<T> Deref for IdRecord<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.1.get(&self.0).expect("Existence checked on construction")
}
}
impl<T> DerefMut for IdRecord<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.1.get_mut(&self.0).expect("Existence checked on construction")
#[cfg(test)]
mod test {
use super::*;
#[test]
fn add_and_retrieve() {
let mut store = IdStore::new();
let key1 = store.add(14);
let key2 = store.add(34);
assert_eq!(store[key1], 14);
assert_eq!(store[key2], 34);
assert_eq!(store.remove(key1), 14);
assert_eq!(store.iter().collect_vec(), vec![(key2, &34)]);
}
}

View File

@@ -1,310 +1,424 @@
use std::borrow::Borrow;
use std::fmt::{Debug, Display};
use std::future::Future;
use std::hash::BuildHasher as _;
use std::num::NonZeroU64;
use std::hash::Hash;
use std::ops::Deref;
use std::rc::Rc;
use std::sync::atomic;
use std::{fmt, hash};
use async_std::sync::Mutex;
use hashbrown::{HashMap, HashSet};
use itertools::Itertools as _;
use orchid_api_traits::Request;
use futures::future::LocalBoxFuture;
use task_local::task_local;
use crate::api;
use crate::reqnot::{DynRequester, Requester};
/// Clippy crashes while verifying `Tok: Sized` without this and I cba to create
/// a minimal example
/// Implementation-specific backing object for an interned string.
pub trait IStrHandle: AsRef<str> {
fn rc(&self) -> Rc<String>;
}
/// Implementation-specific backing object for an interned sequence of interned
/// strings.
pub trait IStrvHandle: AsRef<[IStr]> {
fn rc(&self) -> Rc<Vec<IStr>>;
}
/// Interned string created with [is] or [es]
#[derive(Clone)]
struct ForceSized<T>(T);
pub struct IStr(pub api::TStr, pub Rc<dyn IStrHandle>);
impl IStr {
/// Obtain a unique ID for this interned data
///
/// NOTICE: the ID is guaranteed to be the same for any interned instance of
/// the same value only as long as at least one instance exists. If a value is
/// no longer interned, the interner is free to forget about it
pub fn to_api(&self) -> api::TStr { self.0 }
/// Owned reference to a shared instance of the interned string
pub fn rc(&self) -> Rc<String> { self.1.rc() }
}
impl Deref for IStr {
type Target = str;
fn deref(&self) -> &Self::Target { self.1.as_ref().as_ref() }
}
impl Eq for IStr {}
impl PartialEq for IStr {
fn eq(&self, other: &Self) -> bool { self.0 == other.0 }
}
impl Hash for IStr {
fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state) }
}
impl Display for IStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.deref()) }
}
impl Debug for IStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "IStr({self}") }
}
/// Interned string sequence
#[derive(Clone)]
pub struct Tok<T: Interned> {
data: Rc<T>,
marker: ForceSized<T::Marker>,
pub struct IStrv(pub api::TStrv, pub Rc<dyn IStrvHandle>);
impl IStrv {
/// Obtain a unique ID for this interned data
///
/// NOTICE: the ID is guaranteed to be the same for any interned instance of
/// the same value only as long as at least one instance exists. If a value is
/// no longer interned, the interner is free to forget about it
pub fn to_api(&self) -> api::TStrv { self.0 }
/// Owned reference to a shared instance of the interned sequence
pub fn rc(&self) -> Rc<Vec<IStr>> { self.1.rc() }
}
impl<T: Interned> Tok<T> {
pub fn new(data: Rc<T>, marker: T::Marker) -> Self { Self { data, marker: ForceSized(marker) } }
pub fn to_api(&self) -> T::Marker { self.marker.0 }
pub async fn from_api<M>(marker: M, i: &Interner) -> Self
where M: InternMarker<Interned = T> {
i.ex(marker).await
}
pub fn rc(&self) -> Rc<T> { self.data.clone() }
impl Deref for IStrv {
type Target = [IStr];
fn deref(&self) -> &Self::Target { self.1.as_ref().as_ref() }
}
impl<T: Interned> Deref for Tok<T> {
type Target = T;
fn deref(&self) -> &Self::Target { self.data.as_ref() }
impl Eq for IStrv {}
impl PartialEq for IStrv {
fn eq(&self, other: &Self) -> bool { self.0 == other.0 }
}
impl<T: Interned> Ord for Tok<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.to_api().cmp(&other.to_api()) }
impl Hash for IStrv {
fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.0.hash(state) }
}
impl<T: Interned> PartialOrd for Tok<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) }
}
impl<T: Interned> Eq for Tok<T> {}
impl<T: Interned> PartialEq for Tok<T> {
fn eq(&self, other: &Self) -> bool { self.cmp(other).is_eq() }
}
impl<T: Interned> hash::Hash for Tok<T> {
fn hash<H: hash::Hasher>(&self, state: &mut H) { self.to_api().hash(state) }
}
impl<T: Interned + fmt::Display> fmt::Display for Tok<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &*self.data)
}
}
impl<T: Interned + fmt::Debug> fmt::Debug for Tok<T> {
impl Display for IStrv {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Token({} -> {:?})", self.to_api().get_id(), self.data.as_ref())
}
}
pub trait Interned: Eq + hash::Hash + Clone + fmt::Debug + Internable<Interned = Self> {
type Marker: InternMarker<Interned = Self> + Sized;
fn intern(
self: Rc<Self>,
req: &(impl DynRequester<Transfer = api::IntReq> + ?Sized),
) -> impl Future<Output = Self::Marker>;
fn bimap(interner: &mut TypedInterners) -> &mut Bimap<Self>;
}
pub trait Internable: fmt::Debug {
type Interned: Interned;
fn get_owned(&self) -> Rc<Self::Interned>;
}
pub trait InternMarker: Copy + PartialEq + Eq + PartialOrd + Ord + hash::Hash + Sized {
type Interned: Interned<Marker = Self>;
/// Only called on replicas
fn resolve(self, i: &Interner) -> impl Future<Output = Tok<Self::Interned>>;
fn get_id(self) -> NonZeroU64;
fn from_id(id: NonZeroU64) -> Self;
}
impl Interned for String {
type Marker = api::TStr;
async fn intern(
self: Rc<Self>,
req: &(impl DynRequester<Transfer = api::IntReq> + ?Sized),
) -> Self::Marker {
req.request(api::InternStr(self.to_string())).await
}
fn bimap(interners: &mut TypedInterners) -> &mut Bimap<Self> { &mut interners.strings }
}
impl InternMarker for api::TStr {
type Interned = String;
async fn resolve(self, i: &Interner) -> Tok<Self::Interned> {
Tok::new(Rc::new(i.0.master.as_ref().unwrap().request(api::ExternStr(self)).await), self)
}
fn get_id(self) -> NonZeroU64 { self.0 }
fn from_id(id: NonZeroU64) -> Self { Self(id) }
}
impl Internable for str {
type Interned = String;
fn get_owned(&self) -> Rc<Self::Interned> { Rc::new(self.to_string()) }
}
impl Internable for String {
type Interned = String;
fn get_owned(&self) -> Rc<Self::Interned> { Rc::new(self.to_string()) }
}
impl Interned for Vec<Tok<String>> {
type Marker = api::TStrv;
async fn intern(
self: Rc<Self>,
req: &(impl DynRequester<Transfer = api::IntReq> + ?Sized),
) -> Self::Marker {
req.request(api::InternStrv(self.iter().map(|t| t.to_api()).collect())).await
}
fn bimap(interners: &mut TypedInterners) -> &mut Bimap<Self> { &mut interners.vecs }
}
impl InternMarker for api::TStrv {
type Interned = Vec<Tok<String>>;
async fn resolve(self, i: &Interner) -> Tok<Self::Interned> {
let rep = i.0.master.as_ref().unwrap().request(api::ExternStrv(self)).await;
let data = futures::future::join_all(rep.into_iter().map(|m| i.ex(m))).await;
Tok::new(Rc::new(data), self)
}
fn get_id(self) -> NonZeroU64 { self.0 }
fn from_id(id: NonZeroU64) -> Self { Self(id) }
}
impl Internable for [Tok<String>] {
type Interned = Vec<Tok<String>>;
fn get_owned(&self) -> Rc<Self::Interned> { Rc::new(self.to_vec()) }
}
impl<const N: usize> Internable for [Tok<String>; N] {
type Interned = Vec<Tok<String>>;
fn get_owned(&self) -> Rc<Self::Interned> { Rc::new(self.to_vec()) }
}
impl Internable for Vec<Tok<String>> {
type Interned = Vec<Tok<String>>;
fn get_owned(&self) -> Rc<Self::Interned> { Rc::new(self.to_vec()) }
}
// impl Internable for Vec<api::TStr> {
// type Interned = Vec<Tok<String>>;
// fn get_owned(&self) -> Arc<Self::Interned> {
// Arc::new(self.iter().map(|ts| deintern(*ts)).collect())
// }
// }
// impl Internable for [api::TStr] {
// type Interned = Vec<Tok<String>>;
// fn get_owned(&self) -> Arc<Self::Interned> {
// Arc::new(self.iter().map(|ts| deintern(*ts)).collect())
// }
// }
/// The number of references held to any token by the interner.
const BASE_RC: usize = 3;
#[test]
fn base_rc_correct() {
let tok = Tok::new(Rc::new("foo".to_string()), api::TStr(1.try_into().unwrap()));
let mut bimap = Bimap::default();
bimap.insert(tok.clone());
assert_eq!(Rc::strong_count(&tok.data), BASE_RC + 1, "the bimap plus the current instance");
}
pub struct Bimap<T: Interned> {
intern: HashMap<Rc<T>, Tok<T>>,
by_id: HashMap<T::Marker, Tok<T>>,
}
impl<T: Interned> Bimap<T> {
pub fn insert(&mut self, token: Tok<T>) {
self.intern.insert(token.data.clone(), token.clone());
self.by_id.insert(token.to_api(), token);
}
pub fn by_marker(&self, marker: T::Marker) -> Option<Tok<T>> { self.by_id.get(&marker).cloned() }
pub fn by_value<Q: Eq + hash::Hash>(&self, q: &Q) -> Option<Tok<T>>
where T: Borrow<Q> {
(self.intern.raw_entry())
.from_hash(self.intern.hasher().hash_one(q), |k| k.as_ref().borrow() == q)
.map(|p| p.1.clone())
}
pub fn sweep_replica(&mut self) -> Vec<T::Marker> {
(self.intern)
.extract_if(|k, _| Rc::strong_count(k) == BASE_RC)
.map(|(_, v)| {
self.by_id.remove(&v.to_api());
v.to_api()
})
.collect()
}
pub fn sweep_master(&mut self, retained: HashSet<T::Marker>) {
self.intern.retain(|k, v| BASE_RC < Rc::strong_count(k) || retained.contains(&v.to_api()))
}
}
impl<T: Interned> Default for Bimap<T> {
fn default() -> Self { Self { by_id: HashMap::new(), intern: HashMap::new() } }
}
pub trait UpComm {
fn up<R: Request>(&self, req: R) -> R::Response;
}
#[derive(Default)]
pub struct TypedInterners {
strings: Bimap<String>,
vecs: Bimap<Vec<Tok<String>>>,
}
#[derive(Default)]
pub struct InternerData {
interners: Mutex<TypedInterners>,
master: Option<Box<dyn DynRequester<Transfer = api::IntReq>>>,
}
#[derive(Clone, Default)]
pub struct Interner(Rc<InternerData>);
impl Interner {
pub fn new_master() -> Self { Self::default() }
pub fn new_replica(req: impl DynRequester<Transfer = api::IntReq> + 'static) -> Self {
Self(Rc::new(InternerData { master: Some(Box::new(req)), interners: Mutex::default() }))
}
/// Intern some data; query its identifier if not known locally
pub async fn i<T: Interned>(&self, t: &(impl Internable<Interned = T> + ?Sized)) -> Tok<T> {
let data = t.get_owned();
let mut g = self.0.interners.lock().await;
let typed = T::bimap(&mut g);
if let Some(tok) = typed.by_value(&data) {
return tok;
let mut iter = self.deref().iter();
match iter.next() {
None => return Ok(()),
Some(s) => write!(f, "{s}")?,
}
let marker = match &self.0.master {
Some(c) => data.clone().intern(&**c).await,
None =>
T::Marker::from_id(NonZeroU64::new(ID.fetch_add(1, atomic::Ordering::Relaxed)).unwrap()),
};
let tok = Tok::new(data, marker);
T::bimap(&mut g).insert(tok.clone());
tok
}
/// Extern an identifier; query the data it represents if not known locally
pub async fn ex<M: InternMarker>(&self, marker: M) -> Tok<M::Interned> {
if let Some(tok) = M::Interned::bimap(&mut *self.0.interners.lock().await).by_marker(marker) {
return tok;
for s in iter {
write!(f, "::{s}")?
}
assert!(self.0.master.is_some(), "ID not in local interner and this is master");
let token = marker.resolve(self).await;
M::Interned::bimap(&mut *self.0.interners.lock().await).insert(token.clone());
token
}
pub async fn sweep_replica(&self) -> api::Retained {
assert!(self.0.master.is_some(), "Not a replica");
let mut g = self.0.interners.lock().await;
api::Retained { strings: g.strings.sweep_replica(), vecs: g.vecs.sweep_replica() }
}
pub async fn sweep_master(&self, retained: api::Retained) {
assert!(self.0.master.is_none(), "Not master");
let mut g = self.0.interners.lock().await;
g.strings.sweep_master(retained.strings.into_iter().collect());
g.vecs.sweep_master(retained.vecs.into_iter().collect());
Ok(())
}
}
impl fmt::Debug for Interner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Interner{{ replica: {} }}", self.0.master.is_none())
}
impl Debug for IStrv {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "IStrv({self})") }
}
static ID: atomic::AtomicU64 = atomic::AtomicU64::new(1);
pub fn merge_retained(into: &mut api::Retained, from: &api::Retained) {
into.strings = into.strings.iter().chain(&from.strings).copied().unique().collect();
into.vecs = into.vecs.iter().chain(&from.vecs).copied().unique().collect();
/// Injectable interner interface
///
/// [Self::is] and [Self::iv] return an existing ID if any [IStrHandle] or
/// [IStrvHandle] for the same value is still live, and any ID currently not
/// used with the same type otherwise
///
/// [Self::es] and [Self::ev] find an existing value by its key if any
/// [IStrHandle] or [IStrvHandle] for the same ID is still live. If all objects
/// are gone the functions may work or panic.
pub trait InternerSrv {
/// Intern a string
fn is<'a>(&'a self, v: &'a str) -> LocalBoxFuture<'a, IStr>;
/// Find an existing string by its key
fn es(&self, t: api::TStr) -> LocalBoxFuture<'_, IStr>;
/// Intern a str vector
fn iv<'a>(&'a self, v: &'a [IStr]) -> LocalBoxFuture<'a, IStrv>;
/// Find an existing str vector by its key
fn ev(&self, t: api::TStrv) -> LocalBoxFuture<'_, IStrv>;
}
#[cfg(test)]
mod test {
use std::num::NonZero;
use std::pin::Pin;
task_local! {
static INTERNER: Rc<dyn InternerSrv>;
}
use orchid_api_traits::{Decode, enc_vec};
use test_executors::spin_on;
/// Install a global interner. Within this future, the global [is], [iv], [es]
/// and [ev] functions call the provided [InternerSrv]
pub async fn with_interner<F: Future>(val: Rc<dyn InternerSrv>, fut: F) -> F::Output {
INTERNER.scope(val, fut).await
}
use super::*;
fn get_interner() -> Rc<dyn InternerSrv> {
INTERNER.try_with(|i| i.clone()).expect("Interner not initialized")
}
/// Intern a `String` (find its ID or assign it a new one)
pub async fn is(v: &str) -> IStr { get_interner().is(v).await }
/// Intern a `Vec<IStr>` (find its ID or assign it a new one)
pub async fn iv(v: &[IStr]) -> IStrv { get_interner().iv(v).await }
/// Find a live [IStr] by its ID
///
/// # Panics
///
/// This function may panic if there are no other references to the [IStr] we're
/// searching for, as the interner is free to forget about unreferenced values
pub async fn es(v: api::TStr) -> IStr { get_interner().es(v).await }
/// Find a live [IStrv] by its ID
///
/// # Panics
///
/// This function may panic if there are no other references to the [IStrv]
/// we're searching for, as the interner is free to forget about unreferenced
/// values
pub async fn ev(v: api::TStrv) -> IStrv { get_interner().ev(v).await }
/// Basic engine for an interner that supports recovering if a token is not
/// found locally.
pub mod local_interner {
use std::borrow::Borrow;
use std::cell::RefCell;
use std::fmt::Debug;
use std::future;
use std::hash::{BuildHasher, Hash};
use std::num::NonZeroU64;
use std::rc::{Rc, Weak};
use futures::future::LocalBoxFuture;
use hashbrown::hash_table::{Entry, OccupiedEntry, VacantEntry};
use hashbrown::{DefaultHashBuilder, HashTable};
use orchid_api_traits::Coding;
use super::{IStr, IStrHandle, IStrv, IStrvHandle, InternerSrv};
use crate::api;
#[test]
fn test_i() {
let i = Interner::new_master();
let _: Tok<String> = spin_on(i.i("foo"));
let _: Tok<Vec<Tok<String>>> = spin_on(i.i(&[spin_on(i.i("bar")), spin_on(i.i("baz"))]));
/// Associated types and methods for parallel concepts between scalar and
/// vector interning
pub trait InternableCard: 'static + Sized + Default + Debug {
/// API representation of an interner key
type Token: Clone + Copy + Debug + Hash + Eq + PartialOrd + Ord + Coding + 'static;
/// Owned version of interned value physically held by `'static` interner
/// and token
type Data: 'static + Borrow<Self::Borrow> + Eq + Hash + Debug;
/// Borrowed version of interned value placed in intern queries to avoid a
/// copy
type Borrow: ToOwned<Owned = Self::Data> + ?Sized + Eq + Hash + Debug;
/// Smart object handed out by the interner for storage and comparison in
/// third party code. [IStr] or [IStrv]
type Interned: Clone + Debug;
/// Create smart object from token for fast comparison and a handle for
/// everything else incl. virtual drop
fn new_interned(token: Self::Token, handle: Rc<Handle<Self>>) -> Self::Interned;
}
#[test]
fn test_coding() {
spin_on(async {
let coded = api::TStr(NonZero::new(3u64).unwrap());
let mut enc = &enc_vec(&coded).await[..];
api::TStr::decode(Pin::new(&mut enc)).await;
assert_eq!(enc, [], "Did not consume all of {enc:?}")
})
/// String-specific values for [InternableCard]
#[derive(Default, Debug)]
pub struct StrBranch;
impl InternableCard for StrBranch {
type Data = String;
type Token = api::TStr;
type Borrow = str;
type Interned = IStr;
fn new_interned(t: Self::Token, h: Rc<Handle<Self>>) -> Self::Interned { IStr(t, h) }
}
/// Vector-specific values for [InternableCard]
#[derive(Default, Debug)]
pub struct StrvBranch;
impl InternableCard for StrvBranch {
type Data = Vec<IStr>;
type Token = api::TStrv;
type Borrow = [IStr];
type Interned = IStrv;
fn new_interned(t: Self::Token, h: Rc<Handle<Self>>) -> Self::Interned { IStrv(t, h) }
}
/// Pairs interned data with its internment key
#[derive(Debug)]
struct Data<B: InternableCard> {
token: B::Token,
data: Rc<B::Data>,
}
impl<B: InternableCard> Clone for Data<B> {
fn clone(&self) -> Self { Self { token: self.token, data: self.data.clone() } }
}
/// Implementor for the trait objects held by [IStr] and [IStrv]
pub struct Handle<B: InternableCard> {
data: Data<B>,
parent: Weak<RefCell<IntData<B>>>,
}
impl IStrHandle for Handle<StrBranch> {
fn rc(&self) -> Rc<String> { self.data.data.clone() }
}
impl AsRef<str> for Handle<StrBranch> {
fn as_ref(&self) -> &str { self.data.data.as_ref().as_ref() }
}
impl IStrvHandle for Handle<StrvBranch> {
fn rc(&self) -> Rc<Vec<IStr>> { self.data.data.clone() }
}
impl AsRef<[IStr]> for Handle<StrvBranch> {
fn as_ref(&self) -> &[IStr] { self.data.data.as_ref().as_ref() }
}
impl<B: InternableCard> Drop for Handle<B> {
fn drop(&mut self) {
let Some(parent) = self.parent.upgrade() else { return };
if let Entry::Occupied(ent) =
parent.borrow_mut().entry_by_data(self.data.data.as_ref().borrow())
{
ent.remove();
}
if let Entry::Occupied(ent) = parent.borrow_mut().entry_by_tok(self.data.token) {
ent.remove();
}
}
}
/// Information retained about an interned token indexed both by key and
/// value.
struct Rec<B: InternableCard> {
/// This reference is weak, but the [Drop] handler of [Handle] removes the
/// [Rec] from the interner so it is guaranteed to be live.
handle: Weak<Handle<B>>,
/// Keys for indexing from either table
data: Data<B>,
}
/// Read data from an occupied entry in an interner. The equivalent insert
/// command is [insert]
fn read<B: InternableCard>(entry: OccupiedEntry<'_, Rec<B>>) -> B::Interned {
let hand = entry.get().handle.upgrade().expect("Found entry but handle already dropped");
B::new_interned(entry.get().data.token, hand)
}
/// Insert some data into an entry borrowed from this same interner.
/// The equivalent read command is [read]
fn insert<B: InternableCard>(entry: VacantEntry<'_, Rec<B>>, handle: Rc<Handle<B>>) {
entry.insert(Rec { data: handle.data.clone(), handle: Rc::downgrade(&handle) });
}
#[derive(Default)]
struct IntData<B: InternableCard> {
by_tok: HashTable<Rec<B>>,
by_data: HashTable<Rec<B>>,
hasher: DefaultHashBuilder,
}
impl<B: InternableCard> IntData<B> {
fn entry_by_data(&mut self, query: &B::Borrow) -> Entry<'_, Rec<B>> {
self.by_data.entry(
self.hasher.hash_one(query),
|rec| rec.data.data.as_ref().borrow() == query,
|rec| self.hasher.hash_one(rec.data.data.as_ref().borrow()),
)
}
fn entry_by_tok(&mut self, token: B::Token) -> Entry<'_, Rec<B>> {
self.by_tok.entry(
self.hasher.hash_one(token),
|rec| rec.data.token == token,
|rec| self.hasher.hash_one(rec.data.token),
)
}
}
/// Failing intern command that can be recovered if the value is found
/// elsewhere
pub struct InternError<'a, B: InternableCard> {
int: &'a Int<B>,
query: &'a B::Borrow,
}
impl<B: InternableCard> InternError<'_, B> {
/// If a racing write populates the entry, the continuation returns that
/// value and discards its argument
pub fn set_if_empty(self, token: B::Token) -> B::Interned {
let mut int_data = self.int.0.borrow_mut();
match int_data.entry_by_data(self.query) {
Entry::Occupied(ent) => read(ent),
Entry::Vacant(ent) => {
let hand = self.int.mk_handle(Data { token, data: Rc::new(self.query.to_owned()) });
insert(ent, hand.clone());
let Entry::Vacant(other_ent) = int_data.entry_by_tok(token) else {
panic!("Data and key tables out of sync")
};
insert(other_ent, hand.clone());
B::new_interned(token, hand)
},
}
}
}
impl<B: InternableCard> Debug for InternError<'_, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("InternEntry").field(&self.query).finish()
}
}
/// Failing extern command that can be recovered if the value is found
/// elsewhere
pub struct ExternError<'a, B: InternableCard> {
int: &'a Int<B>,
token: B::Token,
}
impl<B: InternableCard> ExternError<'_, B> {
/// If a racing write populates the entry, the continuation returns that
/// value and discards its argument
pub fn set_if_empty(&self, data: Rc<B::Data>) -> B::Interned {
let mut int_data = self.int.0.borrow_mut();
match int_data.entry_by_tok(self.token) {
Entry::Occupied(ent) => read(ent),
Entry::Vacant(ent) => {
let hand = self.int.mk_handle(Data { token: self.token, data: data.clone() });
insert(ent, hand.clone());
let Entry::Vacant(other_ent) = int_data.entry_by_data(data.as_ref().borrow()) else {
panic!("Data and key tables out of sync")
};
insert(other_ent, hand.clone());
B::new_interned(self.token, hand)
},
}
}
}
impl<B: InternableCard> Debug for ExternError<'_, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("ExternEntry").field(&self.token).finish()
}
}
#[derive(Default)]
pub struct Int<B: InternableCard>(Rc<RefCell<IntData<B>>>);
impl<B: InternableCard> Int<B> {
fn mk_handle(&self, data: Data<B>) -> Rc<Handle<B>> {
Rc::new(Handle { data: data.clone(), parent: Rc::downgrade(&self.0.clone()) })
}
/// Look up by value, or yield to figure out its ID from elsewhere
pub fn i<'a>(&'a self, query: &'a B::Borrow) -> Result<B::Interned, InternError<'a, B>> {
if let Entry::Occupied(val) = self.0.borrow_mut().entry_by_data(query) {
return Ok(read(val));
}
Err(InternError { int: self, query })
}
/// Look up by key or yield to figure out its value from elsewhere
pub fn e(&self, token: B::Token) -> Result<B::Interned, ExternError<'_, B>> {
if let Entry::Occupied(ent) = self.0.borrow_mut().entry_by_tok(token) {
return Ok(read(ent));
}
Err(ExternError { int: self, token })
}
}
thread_local! {
static NEXT_ID: RefCell<u64> = 0.into();
}
fn with_new_id<T>(fun: impl FnOnce(NonZeroU64) -> T) -> T {
fun(
NonZeroU64::new(NEXT_ID.with_borrow_mut(|id| {
*id += 1;
*id
}))
.unwrap(),
)
}
#[derive(Default)]
struct LocalInterner {
str: Int<StrBranch>,
strv: Int<StrvBranch>,
}
impl InternerSrv for LocalInterner {
fn is<'a>(&'a self, v: &'a str) -> LocalBoxFuture<'a, IStr> {
match self.str.i(v) {
Ok(int) => Box::pin(future::ready(int)),
Err(e) => with_new_id(|id| Box::pin(future::ready(e.set_if_empty(api::TStr(id))))),
}
}
fn es(&self, t: api::TStr) -> LocalBoxFuture<'_, IStr> {
Box::pin(future::ready(self.str.e(t).expect("Unrecognized token cannot be externed")))
}
fn iv<'a>(&'a self, v: &'a [IStr]) -> LocalBoxFuture<'a, IStrv> {
match self.strv.i(v) {
Ok(int) => Box::pin(future::ready(int)),
Err(e) => with_new_id(|id| Box::pin(future::ready(e.set_if_empty(api::TStrv(id))))),
}
}
fn ev(&self, t: orchid_api::TStrv) -> LocalBoxFuture<'_, IStrv> {
Box::pin(future::ready(self.strv.e(t).expect("Unrecognized token cannot be externed")))
}
}
/// Creates a basic thread-local interner for testing and root role.
pub fn local_interner() -> Rc<dyn InternerSrv> { Rc::<LocalInterner>::default() }
}

View File

@@ -0,0 +1,26 @@
use std::fmt;
use itertools::{Itertools, Position};
pub struct PrintList<'a, I: Iterator<Item = E> + Clone, E: fmt::Display>(pub I, pub &'a str);
impl<'a, I: Iterator<Item = E> + Clone, E: fmt::Display> fmt::Display for PrintList<'a, I, E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (pos, item) in self.0.clone().with_position() {
match pos {
Position::First | Position::Only => write!(f, "{item}")?,
Position::Middle => write!(f, ", {item}")?,
Position::Last => write!(f, ", {} {item}", self.1)?,
}
}
Ok(())
}
}
pub trait IteratorPrint: Iterator<Item: fmt::Display> + Clone {
/// Pretty-print a list with a comma-separated enumeration and an operator
/// word (such as "and" or "or") inserted before the last
fn display<'a>(self, operator: &'a str) -> PrintList<'a, Self, Self::Item> {
PrintList(self, operator)
}
}
impl<T: Iterator<Item: fmt::Display> + Clone> IteratorPrint for T {}

View File

@@ -1,29 +1,44 @@
pub use async_once_cell;
use orchid_api as api;
pub mod box_cow;
pub mod boxed_iter;
pub mod builtin;
pub mod char_filter;
mod on_drop;
pub use on_drop::*;
mod binary;
pub use binary::*;
mod id_store;
pub use id_store::*;
mod boxed_iter;
pub use boxed_iter::*;
mod char_filter;
pub use char_filter::*;
pub mod clone;
pub mod combine;
pub mod error;
pub mod event;
pub mod format;
pub mod id_store;
pub mod interner;
pub mod join;
pub mod location;
pub mod logging;
mod error;
pub use error::*;
mod format;
pub use format::*;
mod interner;
pub use interner::*;
mod iter_print;
pub use iter_print::*;
mod join;
pub use join::*;
mod location;
pub use location::*;
mod logging;
pub use logging::*;
mod match_mapping;
pub mod msg;
pub mod name;
pub mod number;
pub mod parse;
pub mod pure_seq;
pub mod reqnot;
pub mod sequence;
pub mod side;
mod name;
pub use name::*;
mod number;
pub use number::*;
mod parse;
pub use parse::*;
mod comm;
pub use comm::*;
mod side;
pub use side::*;
mod stash;
pub use stash::*;
mod tl_cache;
pub mod tokens;
pub mod tree;
mod tree;
pub use tree::*;

View File

@@ -2,51 +2,65 @@
use std::fmt;
use std::hash::Hash;
use std::ops::Range;
use std::ops::{Add, AddAssign, Range};
use futures::future::join_all;
use trait_set::trait_set;
use crate::error::ErrPos;
use crate::interner::{Interner, Tok};
use crate::name::Sym;
use crate::{api, match_mapping, sym};
use crate::{ErrPos, IStr, IteratorPrint, Sym, api, es, is, match_mapping, sym};
trait_set! {
pub trait GetSrc = FnMut(&Sym) -> Tok<String>;
pub trait GetSrc = FnMut(&Sym) -> IStr;
}
#[derive(Debug, Clone)]
/// One or more positions in code that are associated with an event, value, or
/// other consequence of that code
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Pos {
/// Location not known, for example because the expression was decoded from a
/// source that doesn't have a meaningful location attached, from a format
/// that does not encode location data
None,
/// If the expression in question is a slot, it receives the substituted
/// expression's position. If the expression is being placed into a slot, this
/// is discarded. In all other cases, it is a conflict and an error
SlotTarget,
/// Used in functions to denote the generated code that carries on the
/// location of the call. Not allowed in the const tree.
/// location of the call. Not allowed in the const tree
Inherit,
/// ID and parameters of a generator (such as an extension)
Gen(CodeGenInfo),
/// Range and file
SrcRange(SrcRange),
/// More than one positions. This vec should not contain another [Pos::Multi]
/// and should be `>=2` long. To ensure this, use `+` and `+=` to combine
/// positions and do not construct this directly.
Multi(Vec<Pos>),
}
impl Pos {
pub fn pretty_print(&self, get_src: &mut impl GetSrc) -> String {
match self {
Self::Gen(g) => g.to_string(),
Self::SrcRange(sr) => sr.pretty_print(&get_src(&sr.path)),
Self::Multi(posv) => posv.iter().display("and").to_string(),
// Can't pretty print partial and meta-location
other => format!("{other:?}"),
}
}
pub async fn from_api(api: &api::Location, i: &Interner) -> Self {
pub async fn from_api(api: &api::Location) -> Self {
match_mapping!(api, api::Location => Pos {
None, Inherit, SlotTarget,
Gen(cgi => CodeGenInfo::from_api(cgi, i).await),
Gen(cgi => CodeGenInfo::from_api(cgi).await),
Multi(v => join_all(v.iter().map(Pos::from_api)).await)
} {
api::Location::SourceRange(sr) => Self::SrcRange(SrcRange::from_api(sr, i).await)
api::Location::SourceRange(sr) => Self::SrcRange(SrcRange::from_api(sr).await)
})
}
pub fn to_api(&self) -> api::Location {
match_mapping!(self, Pos => api::Location {
None, Inherit, SlotTarget,
Gen(cgi.to_api()),
Multi(v => v.iter().map(|pos| pos.to_api()).collect()),
} {
Self::SrcRange(sr) => api::Location::SourceRange(sr.to_api()),
})
@@ -60,9 +74,36 @@ impl fmt::Display for Pos {
Pos::None => f.write_str("N/A"),
Pos::Gen(g) => write!(f, "{g}"),
Pos::SrcRange(sr) => write!(f, "{sr}"),
Pos::Multi(posv) => {
write!(f, "{}", posv[0])?;
for pos in posv {
write!(f, "+{}", pos)?;
}
Ok(())
},
}
}
}
impl Add for Pos {
type Output = Pos;
fn add(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Pos::Multi(l), Pos::Multi(r)) => Pos::Multi(l.into_iter().chain(r).collect()),
(Pos::None, any) => any,
(any, Pos::None) => any,
(Pos::Multi(v), single) => Pos::Multi(v.into_iter().chain([single]).collect()),
(single, Pos::Multi(v)) => Pos::Multi([single].into_iter().chain(v).collect()),
(l, r) => Pos::Multi(vec![l, r]),
}
}
}
impl AddAssign for Pos {
fn add_assign(&mut self, rhs: Self) {
let mut tmp = Pos::None;
std::mem::swap(&mut tmp, self);
*self = tmp + rhs;
}
}
/// Exact source code location. Includes where the code was loaded from, what
/// the original source code was, and a byte range.
@@ -75,9 +116,9 @@ impl SrcRange {
pub fn new(range: Range<u32>, path: &Sym) -> Self {
Self { range: range.clone(), path: path.clone() }
}
/// Create a dud [SourceRange] for testing. Its value is unspecified and
/// Create a dud [SrcRange] for testing. Its value is unspecified and
/// volatile.
pub async fn mock(i: &Interner) -> Self { Self { range: 0..1, path: sym!(test; i).await } }
pub async fn mock() -> Self { Self { range: 0..1, path: sym!(test) } }
/// Path the source text was loaded from
pub fn path(&self) -> Sym { self.path.clone() }
/// Byte range
@@ -92,6 +133,10 @@ impl SrcRange {
pub fn map_range(&self, map: impl FnOnce(Range<u32>) -> Range<u32>) -> Self {
Self { range: map(self.range()), path: self.path() }
}
/// Format the range in a way that VSCode can convert to a link and is
/// human-readable. For this operation we need the source code text, but
/// holding it in the position object is a bit heavy so instead we take it as
/// an argument
pub fn pretty_print(&self, src: &str) -> String {
let (sl, sc) = pos2lc(src, self.range.start);
let (el, ec) = pos2lc(src, self.range.end);
@@ -101,13 +146,25 @@ impl SrcRange {
(false, _) => format!("{sl}:{sc}..{el}:{ec}"),
}
}
/// Zero-width position at a certain offset
pub fn zw(path: Sym, pos: u32) -> Self { Self { path, range: pos..pos } }
pub async fn from_api(api: &api::SourceRange, i: &Interner) -> Self {
Self { path: Sym::from_api(api.path, i).await, range: api.range.clone() }
/// Deserialize from a message
pub async fn from_api(api: &api::SourceRange) -> Self {
Self { path: Sym::from_api(api.path).await, range: api.range.clone() }
}
/// Serialize to a message
pub fn to_api(&self) -> api::SourceRange {
api::SourceRange { path: self.path.to_api(), range: self.range.clone() }
}
/// Connect two ranges into one
///
/// # Panics
///
/// if the ranges are not from the same file
pub fn to(&self, rhs: &Self) -> Self {
assert_eq!(self.path, rhs.path, "Range continues across files");
Self { path: self.path(), range: self.start().min(rhs.start())..self.end().max(rhs.end()) }
}
}
impl From<SrcRange> for ErrPos {
fn from(val: SrcRange) -> Self { val.pos().into() }
@@ -127,25 +184,22 @@ pub struct CodeGenInfo {
/// formatted like a Rust namespace
pub generator: Sym,
/// Unformatted user message with relevant circumstances and parameters
pub details: Tok<String>,
pub details: IStr,
}
impl CodeGenInfo {
/// A codegen marker with no user message and parameters
pub async fn new_short(generator: Sym, i: &Interner) -> Self {
Self { generator, details: i.i("").await }
}
pub async fn new_short(generator: Sym) -> Self { Self { generator, details: is("").await } }
/// A codegen marker with a user message or parameters
pub async fn new_details(generator: Sym, details: impl AsRef<str>, i: &Interner) -> Self {
Self { generator, details: i.i(details.as_ref()).await }
pub async fn new_details(generator: Sym, details: impl AsRef<str>) -> Self {
Self { generator, details: is(details.as_ref()).await }
}
/// Syntactic location
pub fn pos(&self) -> Pos { Pos::Gen(self.clone()) }
pub async fn from_api(api: &api::CodeGenInfo, i: &Interner) -> Self {
Self {
generator: Sym::from_api(api.generator, i).await,
details: Tok::from_api(api.details, i).await,
}
/// Deserialize from a message
pub async fn from_api(api: &api::CodeGenInfo) -> Self {
Self { generator: Sym::from_api(api.generator).await, details: es(api.details).await }
}
/// Serialize to a message
pub fn to_api(&self) -> api::CodeGenInfo {
api::CodeGenInfo { generator: self.generator.to_api(), details: self.details.to_api() }
}

View File

@@ -1,35 +1,76 @@
use std::any::Any;
use std::fmt::Arguments;
use std::fs::File;
use std::io::{Write, stderr};
use std::rc::Rc;
pub use api::LogStrategy;
use itertools::Itertools;
use futures::future::LocalBoxFuture;
use task_local::task_local;
use crate::api;
use crate::{api, clone};
#[derive(Clone)]
pub struct Logger(api::LogStrategy);
impl Logger {
pub fn new(strat: api::LogStrategy) -> Self { Self(strat) }
pub fn log(&self, msg: impl AsRef<str>) { writeln!(self, "{}", msg.as_ref()) }
pub fn strat(&self) -> api::LogStrategy { self.0.clone() }
pub fn log_buf(&self, event: impl AsRef<str>, buf: &[u8]) {
if std::env::var("ORCHID_LOG_BUFFERS").is_ok_and(|v| !v.is_empty()) {
writeln!(self, "{}: [{}]", event.as_ref(), buf.iter().map(|b| format!("{b:02x}")).join(" "))
}
}
pub fn write_fmt(&self, fmt: Arguments) {
match &self.0 {
api::LogStrategy::Discard => (),
api::LogStrategy::StdErr => {
stderr().write_fmt(fmt).expect("Could not write to stderr!");
stderr().flush().expect("Could not flush stderr")
},
api::LogStrategy::File(f) => {
let mut file = (File::options().write(true).create(true).truncate(true).open(f))
.expect("Could not open logfile");
file.write_fmt(fmt).expect("Could not write to logfile");
},
}
/// A first argument to [write!] and [writeln!] that causes them to return a
/// future.
pub trait LogWriter {
fn write_fmt<'a>(&'a self, fmt: Arguments<'a>) -> LocalBoxFuture<'a, ()>;
}
/// Injectable logging service passed to [with_logger]
pub trait Logger: Any {
/// Obtain a writer that processes the message according to the given category
fn writer(&self, category: &str) -> Rc<dyn LogWriter>;
/// Obtain a serializable limited description of what will eventually happen
/// to the message
fn strat(&self, category: &str) -> api::LogStrategy;
/// Determine whether a certain category would get processed at all. This is a
/// shortcut
fn is_active(&self, category: &str) -> bool {
!matches!(self.strat(category), api::LogStrategy::Discard)
}
}
task_local! {
static LOGGER: Rc<dyn Logger>;
}
/// Within the future passed to this function the freestanding [log] and
/// [get_logger] functions can be used
pub async fn with_logger<F: Future>(logger: impl Logger + 'static, fut: F) -> F::Output {
LOGGER.scope(Rc::new(logger), fut).await
}
/// Obtain an async log writer
///
/// ```
/// use orchid_base::log;
/// async {
/// let user = "lorentz";
/// writeln!(log("info"), "Hello from {user}").await
/// };
/// ```
pub fn log(category: &str) -> Rc<dyn LogWriter> {
LOGGER.try_with(|l| l.writer(category)).expect("Logger not set!")
}
/// Obtain a reference to the current [Logger]. This is mostly useful for
/// [Logger::is_active]-based optimizations
pub fn get_logger() -> Rc<dyn Logger> { LOGGER.try_with(|l| l.clone()).expect("Logger not set!") }
#[derive(Clone)]
pub struct TestLogger(Rc<dyn Fn(String) -> LocalBoxFuture<'static, ()>>);
impl LogWriter for TestLogger {
fn write_fmt<'a>(&'a self, fmt: Arguments<'a>) -> LocalBoxFuture<'a, ()> {
(self.0)(fmt.to_string())
}
}
impl Logger for TestLogger {
fn strat(&self, _category: &str) -> orchid_api::LogStrategy { orchid_api::LogStrategy::Default }
fn writer(&self, _category: &str) -> std::rc::Rc<dyn LogWriter> { Rc::new(self.clone()) }
}
impl TestLogger {
pub fn new(f: impl AsyncFn(String) + 'static) -> Self {
let f = Rc::new(f);
Self(Rc::new(move |s| clone!(f; Box::pin(async move { f(s).await }))))
}
}
impl Default for TestLogger {
fn default() -> Self { TestLogger::new(async |s| eprint!("{s}")) }
}

View File

@@ -1,4 +1,4 @@
/// A shorthand for mapping over enums with identical structure. Used for
/// A shorthand for mapping over enums with similar structure. Used for
/// converting between owned enums and the corresponding API enums that only
/// differ in the type of their fields.
///
@@ -7,7 +7,11 @@
/// match_mapping!(self, ThisType => OtherType {
/// EmptyVariant,
/// TupleVariant(foo => intern(foo), bar.clone()),
/// StructVariant{ a.to_api(), b }
/// StructVariant{ a.to_api(), b },
/// DedicatedConverter(value () convert)
/// } {
/// ThisType::DimorphicVariant(c) => OtherType::CorrespondingVariant(c.left(), c.right()),
/// ThisType::UnexpectedVariant => panic!(),
/// })
/// ```
#[macro_export]

View File

@@ -1,22 +0,0 @@
use std::io;
use std::pin::Pin;
use async_std::io::{Read, ReadExt, Write, WriteExt};
use orchid_api_traits::{Decode, Encode};
pub async fn send_msg(mut write: Pin<&mut impl Write>, msg: &[u8]) -> io::Result<()> {
let mut len_buf = vec![];
u32::try_from(msg.len()).unwrap().encode(Pin::new(&mut len_buf)).await;
write.write_all(&len_buf).await?;
write.write_all(msg).await?;
write.flush().await
}
pub async fn recv_msg(mut read: Pin<&mut impl Read>) -> io::Result<Vec<u8>> {
let mut len_buf = [0u8; (u32::BITS / 8) as usize];
read.read_exact(&mut len_buf).await?;
let len = u32::decode(Pin::new(&mut &len_buf[..])).await;
let mut msg = vec![0u8; len as usize];
read.read_exact(&mut msg).await?;
Ok(msg)
}

View File

@@ -11,67 +11,60 @@ use futures::future::{OptionFuture, join_all};
use itertools::Itertools;
use trait_set::trait_set;
use crate::api;
use crate::interner::{InternMarker, Interner, Tok};
use crate::{IStr, IStrv, api, es, ev, is, iv};
trait_set! {
/// Traits that all name iterators should implement
pub trait NameIter = Iterator<Item = Tok<String>> + DoubleEndedIterator + ExactSizeIterator;
pub trait NameIter = Iterator<Item = IStr> + DoubleEndedIterator + ExactSizeIterator;
}
/// A token path which may be empty. [VName] is the non-empty,
/// [PathSlice] is the borrowed version
/// A token path which may be empty. [VName] is the non-empty version
#[derive(Clone, Default, Hash, PartialEq, Eq)]
pub struct VPath(pub Vec<Tok<String>>);
pub struct VPath(Vec<IStr>);
impl VPath {
/// Collect segments into a vector
pub fn new(items: impl IntoIterator<Item = Tok<String>>) -> Self {
Self(items.into_iter().collect())
}
pub fn new(items: impl IntoIterator<Item = IStr>) -> Self { Self(items.into_iter().collect()) }
/// Number of path segments
pub fn len(&self) -> usize { self.0.len() }
/// Whether there are any path segments. In other words, whether this is a
/// valid name
pub fn is_empty(&self) -> bool { self.len() == 0 }
/// Prepend some tokens to the path
pub fn prefix(self, items: impl IntoIterator<Item = Tok<String>>) -> Self {
pub fn prefix(self, items: impl IntoIterator<Item = IStr>) -> Self {
Self(items.into_iter().chain(self.0).collect())
}
/// Append some tokens to the path
pub fn suffix(self, items: impl IntoIterator<Item = Tok<String>>) -> Self {
pub fn suffix(self, items: impl IntoIterator<Item = IStr>) -> Self {
Self(self.0.into_iter().chain(items).collect())
}
/// Partition the string by `::` namespace separators
pub async fn parse(s: &str, i: &Interner) -> Self {
Self(if s.is_empty() { vec![] } else { join_all(s.split("::").map(|s| i.i(s))).await })
pub async fn parse(s: &str) -> Self {
Self(if s.is_empty() { vec![] } else { join_all(s.split("::").map(is)).await })
}
/// Walk over the segments
pub fn str_iter(&self) -> impl Iterator<Item = &'_ str> {
Box::new(self.0.iter().map(|s| s.as_str()))
}
pub fn str_iter(&self) -> impl Iterator<Item = &'_ str> { Box::new(self.0.iter().map(|s| &**s)) }
/// Try to convert into non-empty version
pub fn into_name(self) -> Result<VName, EmptyNameError> { VName::new(self.0) }
/// Add a token to the path. Since now we know that it can't be empty, turn it
/// into a name.
pub fn name_with_suffix(self, name: Tok<String>) -> VName {
pub fn name_with_suffix(self, name: IStr) -> VName {
VName(self.into_iter().chain([name]).collect())
}
/// Add a token to the beginning of the. Since now we know that it can't be
/// empty, turn it into a name.
pub fn name_with_prefix(self, name: Tok<String>) -> VName {
pub fn name_with_prefix(self, name: IStr) -> VName {
VName([name].into_iter().chain(self).collect())
}
/// Convert a fs path to a vpath
pub async fn from_path(path: &Path, ext: &str, i: &Interner) -> Option<(Self, bool)> {
async fn to_vpath(p: &Path, i: &Interner) -> Option<VPath> {
let tok_opt_v =
join_all(p.iter().map(|c| OptionFuture::from(c.to_str().map(|s| i.i(s))))).await;
pub async fn from_path(path: &Path, ext: &str) -> Option<(Self, bool)> {
async fn to_vpath(p: &Path) -> Option<VPath> {
let tok_opt_v = join_all(p.iter().map(|c| OptionFuture::from(c.to_str().map(is)))).await;
tok_opt_v.into_iter().collect::<Option<_>>().map(VPath)
}
match path.extension().map(|s| s.to_str()) {
Some(Some(s)) if s == ext => Some((to_vpath(&path.with_extension(""), i).await?, true)),
None => Some((to_vpath(path, i).await?, false)),
Some(Some(s)) if s == ext => Some((to_vpath(&path.with_extension("")).await?, true)),
None => Some((to_vpath(path).await?, false)),
Some(_) => None,
}
}
@@ -84,30 +77,28 @@ impl fmt::Display for VPath {
write!(f, "{}", self.str_iter().join("::"))
}
}
impl FromIterator<Tok<String>> for VPath {
fn from_iter<T: IntoIterator<Item = Tok<String>>>(iter: T) -> Self {
Self(iter.into_iter().collect())
}
impl FromIterator<IStr> for VPath {
fn from_iter<T: IntoIterator<Item = IStr>>(iter: T) -> Self { Self(iter.into_iter().collect()) }
}
impl IntoIterator for VPath {
type Item = Tok<String>;
type Item = IStr;
type IntoIter = vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter { self.0.into_iter() }
}
impl Borrow<[Tok<String>]> for VPath {
fn borrow(&self) -> &[Tok<String>] { &self.0[..] }
impl Borrow<[IStr]> for VPath {
fn borrow(&self) -> &[IStr] { &self.0[..] }
}
impl Deref for VPath {
type Target = [Tok<String>];
type Target = [IStr];
fn deref(&self) -> &Self::Target { self.borrow() }
}
impl<T> Index<T> for VPath
where [Tok<String>]: Index<T>
where [IStr]: Index<T>
{
type Output = <[Tok<String>] as Index<T>>::Output;
type Output = <[IStr] as Index<T>>::Output;
fn index(&self, index: T) -> &Self::Output { &Borrow::<[Tok<String>]>::borrow(self)[index] }
fn index(&self, index: T) -> &Self::Output { &Borrow::<[IStr]>::borrow(self)[index] }
}
/// A mutable representation of a namespaced identifier of at least one segment.
@@ -117,50 +108,42 @@ where [Tok<String>]: Index<T>
/// See also [Sym] for the immutable representation, and [VPath] for possibly
/// empty values
#[derive(Clone, Hash, PartialEq, Eq)]
pub struct VName(Vec<Tok<String>>);
pub struct VName(Vec<IStr>);
impl VName {
/// Assert that the sequence isn't empty and wrap it in [VName] to represent
/// this invariant
pub fn new(items: impl IntoIterator<Item = Tok<String>>) -> Result<Self, EmptyNameError> {
pub fn new(items: impl IntoIterator<Item = IStr>) -> Result<Self, EmptyNameError> {
let data: Vec<_> = items.into_iter().collect();
if data.is_empty() { Err(EmptyNameError) } else { Ok(Self(data)) }
}
pub async fn deintern(
name: impl IntoIterator<Item = api::TStr>,
i: &Interner,
) -> Result<Self, EmptyNameError> {
Self::new(join_all(name.into_iter().map(|m| Tok::from_api(m, i))).await)
pub async fn deintern(name: impl IntoIterator<Item = api::TStr>) -> Result<Self, EmptyNameError> {
Self::new(join_all(name.into_iter().map(es)).await)
}
/// Unwrap the enclosed vector
pub fn into_vec(self) -> Vec<Tok<String>> { self.0 }
pub fn into_vec(self) -> Vec<IStr> { self.0 }
/// Get a reference to the enclosed vector
pub fn vec(&self) -> &Vec<Tok<String>> { &self.0 }
pub fn vec(&self) -> &Vec<IStr> { &self.0 }
/// Mutable access to the underlying vector. To ensure correct results, this
/// must never be empty.
pub fn vec_mut(&mut self) -> &mut Vec<Tok<String>> { &mut self.0 }
pub fn vec_mut(&mut self) -> &mut Vec<IStr> { &mut self.0 }
/// Intern the name and return a [Sym]
pub async fn to_sym(&self, i: &Interner) -> Sym { Sym(i.i(&self.0[..]).await) }
pub async fn to_sym(&self) -> Sym { Sym(iv(&self.0[..]).await) }
/// If this name has only one segment, return it
pub fn as_root(&self) -> Option<Tok<String>> { self.0.iter().exactly_one().ok().cloned() }
pub fn as_root(&self) -> Option<IStr> { self.0.iter().exactly_one().ok().cloned() }
/// Prepend the segments to this name
#[must_use = "This is a pure function"]
pub fn prefix(self, items: impl IntoIterator<Item = Tok<String>>) -> Self {
pub fn prefix(self, items: impl IntoIterator<Item = IStr>) -> Self {
Self(items.into_iter().chain(self.0).collect())
}
/// Append the segments to this name
#[must_use = "This is a pure function"]
pub fn suffix(self, items: impl IntoIterator<Item = Tok<String>>) -> Self {
pub fn suffix(self, items: impl IntoIterator<Item = IStr>) -> Self {
Self(self.0.into_iter().chain(items).collect())
}
/// Read a `::` separated namespaced name
pub async fn parse(s: &str, i: &Interner) -> Result<Self, EmptyNameError> {
Self::new(VPath::parse(s, i).await)
}
pub async fn literal(s: &'static str, i: &Interner) -> Self {
Self::parse(s, i).await.expect("empty literal !?")
}
pub async fn parse(s: &str) -> Result<Self, EmptyNameError> { Self::new(VPath::parse(s).await) }
/// Obtain an iterator over the segments of the name
pub fn iter(&self) -> impl Iterator<Item = Tok<String>> + '_ { self.0.iter().cloned() }
pub fn iter(&self) -> impl Iterator<Item = IStr> + '_ { self.0.iter().cloned() }
}
impl fmt::Debug for VName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "VName({self})") }
@@ -171,22 +154,22 @@ impl fmt::Display for VName {
}
}
impl IntoIterator for VName {
type Item = Tok<String>;
type Item = IStr;
type IntoIter = vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter { self.0.into_iter() }
}
impl<T> Index<T> for VName
where [Tok<String>]: Index<T>
where [IStr]: Index<T>
{
type Output = <[Tok<String>] as Index<T>>::Output;
type Output = <[IStr] as Index<T>>::Output;
fn index(&self, index: T) -> &Self::Output { &self.deref()[index] }
}
impl Borrow<[Tok<String>]> for VName {
fn borrow(&self) -> &[Tok<String>] { self.0.borrow() }
impl Borrow<[IStr]> for VName {
fn borrow(&self) -> &[IStr] { self.0.borrow() }
}
impl Deref for VName {
type Target = [Tok<String>];
type Target = [IStr];
fn deref(&self) -> &Self::Target { self.borrow() }
}
@@ -194,11 +177,9 @@ impl Deref for VName {
/// empty sequence
#[derive(Debug, Copy, Clone, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct EmptyNameError;
impl TryFrom<&[Tok<String>]> for VName {
impl TryFrom<&[IStr]> for VName {
type Error = EmptyNameError;
fn try_from(value: &[Tok<String>]) -> Result<Self, Self::Error> {
Self::new(value.iter().cloned())
}
fn try_from(value: &[IStr]) -> Result<Self, Self::Error> { Self::new(value.iter().cloned()) }
}
/// An interned representation of a namespaced identifier.
@@ -207,37 +188,38 @@ impl TryFrom<&[Tok<String>]> for VName {
///
/// See also [VName]
#[derive(Clone, Hash, PartialEq, Eq)]
pub struct Sym(Tok<Vec<Tok<String>>>);
pub struct Sym(IStrv);
impl Sym {
/// Assert that the sequence isn't empty, intern it and wrap it in a [Sym] to
/// represent this invariant
pub async fn new(
v: impl IntoIterator<Item = Tok<String>>,
i: &Interner,
) -> Result<Self, EmptyNameError> {
pub async fn new(v: impl IntoIterator<Item = IStr>) -> Result<Self, EmptyNameError> {
let items = v.into_iter().collect_vec();
Self::from_tok(i.i(&items).await)
Self::from_tok(iv(&items).await)
}
/// Read a `::` separated namespaced name.
pub async fn parse(s: &str, i: &Interner) -> Result<Self, EmptyNameError> {
Ok(Sym(i.i(&VName::parse(s, i).await?.into_vec()).await))
/// Read a `::` separated namespaced name. Do not use this for statically
/// known names, use the [sym] macro instead which is cached.
pub async fn parse(s: &str) -> Result<Self, EmptyNameError> {
Ok(Sym(iv(&VName::parse(s).await?.into_vec()).await))
}
/// Assert that a token isn't empty, and wrap it in a [Sym]
pub fn from_tok(t: Tok<Vec<Tok<String>>>) -> Result<Self, EmptyNameError> {
pub fn from_tok(t: IStrv) -> Result<Self, EmptyNameError> {
if t.is_empty() { Err(EmptyNameError) } else { Ok(Self(t)) }
}
/// Grab the interner token
pub fn tok(&self) -> Tok<Vec<Tok<String>>> { self.0.clone() }
pub fn tok(&self) -> IStrv { self.0.clone() }
/// Get a number unique to this name suitable for arbitrary ordering.
pub fn id(&self) -> NonZeroU64 { self.0.to_api().get_id() }
pub fn id(&self) -> NonZeroU64 { self.0.to_api().0 }
/// Extern the sym for editing
pub fn to_vname(&self) -> VName { VName(self[..].to_vec()) }
pub async fn from_api(marker: api::TStrv, i: &Interner) -> Sym {
Self::from_tok(Tok::from_api(marker, i).await).expect("Empty sequence found for serialized Sym")
/// Decode from a message
pub async fn from_api(marker: api::TStrv) -> Sym {
Self::from_tok(ev(marker).await).expect("Empty sequence found for serialized Sym")
}
/// Encode into a message
pub fn to_api(&self) -> api::TStrv { self.tok().to_api() }
pub async fn push(&self, tok: Tok<String>, i: &Interner) -> Sym {
Self::new(self.0.iter().cloned().chain([tok]), i).await.unwrap()
/// Copy the symbol and extend it with a suffix
pub async fn suffix(&self, tokv: impl IntoIterator<Item = IStr>) -> Sym {
Self::new(self.0.iter().cloned().chain(tokv)).await.unwrap()
}
}
impl fmt::Debug for Sym {
@@ -249,57 +231,55 @@ impl fmt::Display for Sym {
}
}
impl<T> Index<T> for Sym
where [Tok<String>]: Index<T>
where [IStr]: Index<T>
{
type Output = <[Tok<String>] as Index<T>>::Output;
type Output = <[IStr] as Index<T>>::Output;
fn index(&self, index: T) -> &Self::Output { &self.deref()[index] }
}
impl Borrow<[Tok<String>]> for Sym {
fn borrow(&self) -> &[Tok<String>] { &self.0[..] }
impl Borrow<[IStr]> for Sym {
fn borrow(&self) -> &[IStr] { &self.0[..] }
}
impl Deref for Sym {
type Target = [Tok<String>];
type Target = [IStr];
fn deref(&self) -> &Self::Target { self.borrow() }
}
/// An abstraction over tokenized vs non-tokenized names so that they can be
/// handled together in datastructures. The names can never be empty
#[allow(clippy::len_without_is_empty)] // never empty
#[allow(clippy::len_without_is_empty, reason = "never empty")]
pub trait NameLike:
'static + Clone + Eq + Hash + fmt::Debug + fmt::Display + Borrow<[Tok<String>]>
'static + Clone + Eq + Hash + fmt::Debug + fmt::Display + Borrow<[IStr]>
{
/// Convert into held slice
fn as_slice(&self) -> &[Tok<String>] { Borrow::<[Tok<String>]>::borrow(self) }
fn as_slice(&self) -> &[IStr] { Borrow::<[IStr]>::borrow(self) }
/// Get iterator over tokens
fn iter(&self) -> impl NameIter + '_ { self.as_slice().iter().cloned() }
fn segs(&self) -> impl NameIter + '_ { self.as_slice().iter().cloned() }
/// Get iterator over string segments
fn str_iter(&self) -> impl Iterator<Item = &'_ str> + '_ {
self.as_slice().iter().map(|t| t.as_str())
}
fn str_iter(&self) -> impl Iterator<Item = &'_ str> + '_ { self.as_slice().iter().map(|t| &**t) }
/// Fully resolve the name for printing
#[must_use]
fn to_strv(&self) -> Vec<String> { self.iter().map(|s| s.to_string()).collect() }
fn to_strv(&self) -> Vec<String> { self.segs().map(|s| s.to_string()).collect() }
/// Format the name as an approximate filename
fn as_src_path(&self) -> String { format!("{}.orc", self.iter().join("/")) }
fn as_src_path(&self) -> String { format!("{}.orc", self.segs().join("/")) }
/// Return the number of segments in the name
fn len(&self) -> NonZeroUsize {
NonZeroUsize::try_from(self.iter().count()).expect("NameLike never empty")
fn len_nz(&self) -> NonZeroUsize {
NonZeroUsize::try_from(self.segs().count()).expect("NameLike never empty")
}
/// Like slice's `split_first` except we know that it always returns Some
fn split_first(&self) -> (Tok<String>, &[Tok<String>]) {
fn split_first_seg(&self) -> (IStr, &[IStr]) {
let (foot, torso) = self.as_slice().split_last().expect("NameLike never empty");
(foot.clone(), torso)
}
/// Like slice's `split_last` except we know that it always returns Some
fn split_last(&self) -> (Tok<String>, &[Tok<String>]) {
fn split_last_seg(&self) -> (IStr, &[IStr]) {
let (foot, torso) = self.as_slice().split_last().expect("NameLike never empty");
(foot.clone(), torso)
}
/// Get the first element
fn first(&self) -> Tok<String> { self.split_first().0 }
fn first_seg(&self) -> IStr { self.split_first_seg().0 }
/// Get the last element
fn last(&self) -> Tok<String> { self.split_last().0 }
fn last_seg(&self) -> IStr { self.split_last_seg().0 }
}
impl NameLike for Sym {}
@@ -307,22 +287,28 @@ impl NameLike for VName {}
/// Create a [Sym] literal.
///
/// Both the name and its components will be cached in a thread-local static so
/// The name and its components will be cached in a thread-local static so
/// that subsequent executions of the expression only incur an Arc-clone for
/// cloning the token.
#[macro_export]
macro_rules! sym {
($seg1:tt $( :: $seg:tt)* ; $i:expr) => { async {
$crate::name::Sym::from_tok(
$i.i(&[
$i.i(stringify!($seg1)).await
$( , $i.i(stringify!($seg)).await )*
])
.await
).unwrap()
}
($seg1:tt $( :: $seg:tt)*) => {
$crate::tl_cache!(async $crate::Sym : {
$crate::Sym::from_tok(
$crate::iv(&[
$crate::is($crate::sym!(@SEG $seg1)).await
$( , $crate::is($crate::sym!(@SEG $seg)).await )*
])
.await
).unwrap()
})
};
(@NAME $seg:tt) => {}
(@SEG [ $($data:tt)* ]) => {
stringify!($($data)*)
};
(@SEG $data:tt) => {
stringify!($data)
};
}
/// Create a [VName] literal.
@@ -330,12 +316,14 @@ macro_rules! sym {
/// The components are interned much like in [sym].
#[macro_export]
macro_rules! vname {
($seg1:tt $( :: $seg:tt)* ; $i:expr) => { async {
$crate::name::VName::new([
$i.i(stringify!($seg1)).await
$( , $i.i(stringify!($seg)).await )*
]).unwrap()
} };
($seg1:tt $( :: $seg:tt)*) => {
$crate::tl_cache!(async $crate::VName : {
$crate::VName::new([
$crate::is(stringify!($seg1)).await
$( , $crate::is(stringify!($seg)).await )*
]).unwrap()
})
};
}
/// Create a [VPath] literal.
@@ -343,14 +331,16 @@ macro_rules! vname {
/// The components are interned much like in [sym].
#[macro_export]
macro_rules! vpath {
($seg1:tt $( :: $seg:tt)+ ; $i:expr) => { async {
$crate::name::VPath(vec![
$i.i(stringify!($seg1)).await
$( , $i.i(stringify!($seg)).await )+
])
} };
($seg1:tt $( :: $seg:tt)*) => {
$crate::tl_cache!(async $crate::VPath : {
$crate::VPath::new(vec![
$crate::is(stringify!($seg1)).await
$( , $crate::is(stringify!($seg)).await )*
])
})
};
() => {
$crate::name::VPath(vec![])
$crate::VPath::new(vec![])
}
}
@@ -358,39 +348,41 @@ macro_rules! vpath {
mod test {
use std::borrow::Borrow;
use test_executors::spin_on;
use orchid_api_traits::spin_on;
use super::{NameLike, Sym, VName};
use crate::interner::{Interner, Tok};
use crate::name::VPath;
use crate::local_interner::local_interner;
use crate::{IStr, NameLike, Sym, VName, VPath, is, with_interner};
#[test]
fn recur() {
spin_on(async {
let i = Interner::new_master();
let myname = vname!(foo::bar; i).await;
let _borrowed_slice: &[Tok<String>] = myname.borrow();
let _deref_pathslice: &[Tok<String>] = &myname;
let _as_slice_out: &[Tok<String>] = myname.as_slice();
})
pub fn recur() {
spin_on(with_interner(local_interner(), async {
let myname = vname!(foo::bar);
let _borrowed_slice: &[IStr] = myname.borrow();
let _deref_pathslice: &[IStr] = &myname;
let _as_slice_out: &[IStr] = myname.as_slice();
}))
}
/// Tests that literals are correctly interned as equal
#[test]
fn literals() {
spin_on(async {
let i = Interner::new_master();
pub fn literals() {
spin_on(with_interner(local_interner(), async {
assert_eq!(
sym!(foo::bar::baz; i).await,
Sym::new([i.i("foo").await, i.i("bar").await, i.i("baz").await], &i).await.unwrap()
sym!(foo::bar::baz),
Sym::new([is("foo").await, is("bar").await, is("baz").await]).await.unwrap()
);
assert_eq!(
vname!(foo::bar::baz; i).await,
VName::new([i.i("foo").await, i.i("bar").await, i.i("baz").await]).unwrap()
sym!(foo::bar::[|>]),
Sym::new([is("foo").await, is("bar").await, is("|>").await]).await.unwrap()
);
assert_eq!(
vpath!(foo::bar::baz; i).await,
VPath::new([i.i("foo").await, i.i("bar").await, i.i("baz").await])
vname!(foo::bar::baz),
VName::new([is("foo").await, is("bar").await, is("baz").await]).unwrap()
);
})
assert_eq!(
{ vpath!(foo::bar::baz) },
VPath::new([is("foo").await, is("bar").await, is("baz").await])
);
}))
}
}

View File

@@ -1,283 +0,0 @@
//! The NORT (Normal Order Referencing Tree) is the interpreter's runtime
//! representation of Orchid programs.
//!
//! It uses a locator tree to find bound variables in lambda functions, which
//! necessitates a normal reduction order because modifying the body by reducing
//! expressions would invalidate any locators in enclosing lambdas.
//!
//! Clauses are held in a mutable `Arc<Mutex<_>>`, so that after substitution
//! the instances of the argument remain linked and a reduction step applied to
//! any instance transforms all of them.
//!
//! To improve locality and make the tree less deep and locators shorter,
//! function calls store multiple arguments in a deque.
use std::collections::VecDeque;
use std::fmt;
use std::ops::DerefMut;
use std::sync::{Arc, Mutex, MutexGuard, TryLockError};
use itertools::Itertools;
use super::path_set::PathSet;
use crate::foreign::atom::Atom;
#[allow(unused)] // for doc
use crate::foreign::atom::Atomic;
use crate::foreign::error::{RTErrorObj, RTResult};
use crate::foreign::try_from_expr::TryFromExpr;
use crate::location::CodeLocation;
use crate::name::Sym;
#[allow(unused)] // for doc
use crate::parse::parsed;
use crate::utils::ddispatch::request;
/// Kinda like [AsMut] except it supports a guard
pub(crate) trait AsDerefMut<T> {
fn as_deref_mut(&mut self) -> impl DerefMut<Target = T> + '_;
}
/// An expression with metadata
#[derive(Clone)]
pub struct Expr {
/// The actual value
pub clause: ClauseInst,
/// Information about the code that produced this value
pub location: CodeLocation,
}
impl Expr {
/// Constructor
pub fn new(clause: ClauseInst, location: CodeLocation) -> Self { Self { clause, location } }
/// Obtain the location of the expression
pub fn location(&self) -> CodeLocation { self.location.clone() }
/// Convert into any type that implements [TryFromExpr]. Calls to this
/// function are generated wherever a conversion is elided in an extern
/// function.
pub fn downcast<T: TryFromExpr>(self) -> RTResult<T> {
let Expr { mut clause, location } = self;
loop {
let cls_deref = clause.cls_mut();
match &*cls_deref {
Clause::Identity(alt) => {
let temp = alt.clone();
drop(cls_deref);
clause = temp;
},
_ => {
drop(cls_deref);
return T::from_expr(Expr { clause, location });
},
};
}
}
/// Visit all expressions in the tree. The search can be exited early by
/// returning [Some]
///
/// See also [parsed::Expr::search_all]
pub fn search_all<T>(&self, predicate: &mut impl FnMut(&Self) -> Option<T>) -> Option<T> {
if let Some(t) = predicate(self) {
return Some(t);
}
self.clause.inspect(|c| match c {
Clause::Identity(_alt) => unreachable!("Handled by inspect"),
Clause::Apply { f, x } =>
(f.search_all(predicate)).or_else(|| x.iter().find_map(|x| x.search_all(predicate))),
Clause::Lambda { body, .. } => body.search_all(predicate),
Clause::Constant(_) | Clause::LambdaArg | Clause::Atom(_) | Clause::Bottom(_) => None,
})
}
/// Clone the refcounted [ClauseInst] out of the expression
#[must_use]
pub fn clsi(&self) -> ClauseInst { self.clause.clone() }
/// Read-Write access to the [Clause]
///
/// # Panics
///
/// if the clause is already borrowed
pub fn cls_mut(&self) -> MutexGuard<'_, Clause> { self.clause.cls_mut() }
}
impl fmt::Debug for Expr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}@{}", self.clause, self.location)
}
}
impl fmt::Display for Expr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.clause) }
}
impl AsDerefMut<Clause> for Expr {
fn as_deref_mut(&mut self) -> impl DerefMut<Target = Clause> + '_ { self.clause.cls_mut() }
}
/// A wrapper around expressions to handle their multiple occurences in
/// the tree together
#[derive(Clone)]
pub struct ClauseInst(pub Arc<Mutex<Clause>>);
impl ClauseInst {
/// Wrap a [Clause] in a shared container so that normalization steps are
/// applied to all references
#[must_use]
pub fn new(cls: Clause) -> Self { Self(Arc::new(Mutex::new(cls))) }
/// Take the [Clause] out of this container if it's the last reference to it,
/// or return self.
pub fn try_unwrap(self) -> Result<Clause, ClauseInst> {
Arc::try_unwrap(self.0).map(|c| c.into_inner().unwrap()).map_err(Self)
}
/// Read-Write access to the shared clause instance
///
/// if the clause is already borrowed, this will block until it is released.
pub fn cls_mut(&self) -> MutexGuard<'_, Clause> { self.0.lock().unwrap() }
/// Call a predicate on the clause, returning whatever the
/// predicate returns. This is a convenience function for reaching
/// through the [Mutex]. The clause will never be [Clause::Identity].
#[must_use]
pub fn inspect<T>(&self, predicate: impl FnOnce(&Clause) -> T) -> T {
match &*self.cls_mut() {
Clause::Identity(sub) => sub.inspect(predicate),
x => predicate(x),
}
}
/// If this expression is an [Atomic], request an object of the given type.
/// If it's not an atomic, fail the request automatically.
#[must_use = "your request might not have succeeded"]
pub fn request<T: 'static>(&self) -> Option<T> {
match &*self.cls_mut() {
Clause::Atom(a) => request(&*a.0),
Clause::Identity(alt) => alt.request(),
_ => None,
}
}
/// Associate a location with this clause
pub fn into_expr(self, location: CodeLocation) -> Expr {
Expr { clause: self.clone(), location: location.clone() }
}
/// Check ahead-of-time if this clause contains an atom. Calls
/// [ClauseInst#cls] for read access.
///
/// Since atoms cannot become normalizable, if this is true and previous
/// normalization failed, the atom is known to be in normal form.
pub fn is_atom(&self) -> bool { matches!(&*self.cls_mut(), Clause::Atom(_)) }
/// Tries to unwrap the [Arc]. If that fails, clones it field by field.
/// If it's a [Clause::Atom] which cannot be cloned, wraps it in a
/// [Clause::Identity].
///
/// Implementation of [crate::foreign::to_clause::ToClause::to_clause]. The
/// trait is more general so it requires a location which this one doesn't.
pub fn into_cls(self) -> Clause {
self.try_unwrap().unwrap_or_else(|clsi| match &*clsi.cls_mut() {
Clause::Apply { f, x } => Clause::Apply { f: f.clone(), x: x.clone() },
Clause::Atom(_) => Clause::Identity(clsi.clone()),
Clause::Bottom(e) => Clause::Bottom(e.clone()),
Clause::Constant(c) => Clause::Constant(c.clone()),
Clause::Identity(sub) => Clause::Identity(sub.clone()),
Clause::Lambda { args, body } => Clause::Lambda { args: args.clone(), body: body.clone() },
Clause::LambdaArg => Clause::LambdaArg,
})
}
/// Decides if this clause is the exact same instance as another. Most useful
/// to detect potential deadlocks.
pub fn is_same(&self, other: &Self) -> bool { Arc::ptr_eq(&self.0, &other.0) }
}
impl fmt::Debug for ClauseInst {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0.try_lock() {
Ok(expr) => write!(f, "{expr:?}"),
Err(TryLockError::Poisoned(_)) => write!(f, "<poisoned>"),
Err(TryLockError::WouldBlock) => write!(f, "<locked>"),
}
}
}
impl fmt::Display for ClauseInst {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0.try_lock() {
Ok(expr) => write!(f, "{expr}"),
Err(TryLockError::Poisoned(_)) => write!(f, "<poisoned>"),
Err(TryLockError::WouldBlock) => write!(f, "<locked>"),
}
}
}
impl AsDerefMut<Clause> for ClauseInst {
fn as_deref_mut(&mut self) -> impl DerefMut<Target = Clause> + '_ { self.cls_mut() }
}
/// Distinct types of expressions recognized by the interpreter
#[derive(Debug)]
pub enum Clause {
/// An expression that causes an error
Bottom(RTErrorObj),
/// Indicates that this [ClauseInst] has the same value as the other
/// [ClauseInst]. This has two benefits;
///
/// - [Clause] and therefore [Atomic] doesn't have to be [Clone] which saves
/// many synchronization primitives and reference counters in usercode
/// - it enforces on the type level that all copies are normalized together,
/// so accidental inefficiency in the interpreter is rarer.
///
/// That being said, it's still arbitrary many indirections, so when possible
/// APIs should be usable with a [ClauseInst] directly.
Identity(ClauseInst),
/// An opaque non-callable value, eg. a file handle
Atom(Atom),
/// A function application
Apply {
/// Function to be applied
f: Expr,
/// Argument to be substituted in the function
x: VecDeque<Expr>,
},
/// A name to be looked up in the interpreter's symbol table
Constant(Sym),
/// A function
Lambda {
/// A collection of (zero or more) paths to placeholders belonging to this
/// function
args: Option<PathSet>,
/// The tree produced by this function, with placeholders where the
/// argument will go
body: Expr,
},
/// A placeholder within a function that will be replaced upon application
LambdaArg,
}
impl Clause {
/// Wrap a clause in a refcounted lock
pub fn into_inst(self) -> ClauseInst { ClauseInst::new(self) }
/// Wrap a clause in an expression.
pub fn into_expr(self, location: CodeLocation) -> Expr { self.into_inst().into_expr(location) }
}
impl fmt::Display for Clause {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Clause::Atom(a) => write!(f, "{a:?}"),
Clause::Bottom(err) => write!(f, "bottom({err})"),
Clause::LambdaArg => write!(f, "arg"),
Clause::Apply { f: fun, x } => write!(f, "({fun} {})", x.iter().join(" ")),
Clause::Lambda { args, body } => match args {
Some(path) => write!(f, "[\\{path}.{body}]"),
None => write!(f, "[\\_.{body}]"),
},
Clause::Constant(t) => write!(f, "{t}"),
Clause::Identity(other) => write!(f, "{{{other}}}"),
}
}
}
impl AsDerefMut<Clause> for Clause {
fn as_deref_mut(&mut self) -> impl DerefMut<Target = Clause> + '_ { self }
}

View File

@@ -3,10 +3,7 @@ use std::ops::Range;
use ordered_float::NotNan;
use crate::error::{OrcErr, mk_err};
use crate::interner::Interner;
use crate::location::SrcRange;
use crate::name::Sym;
use crate::{OrcErrv, SrcRange, Sym, is, mk_errv};
/// A number, either floating point or unsigned int, parsed by Orchid.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
@@ -55,20 +52,15 @@ pub struct NumError {
pub kind: NumErrorKind,
}
pub async fn num_to_err(
NumError { kind, range }: NumError,
offset: u32,
source: &Sym,
i: &Interner,
) -> OrcErr {
mk_err(
i.i("Failed to parse number").await,
pub async fn num_to_errv(NumError { kind, range }: NumError, offset: u32, source: &Sym) -> OrcErrv {
mk_errv(
is("Failed to parse number").await,
match kind {
NumErrorKind::NaN => "NaN emerged during parsing",
NumErrorKind::InvalidDigit => "non-digit character encountered",
NumErrorKind::Overflow => "The number being described is too large or too accurate",
},
[SrcRange::new(offset + range.start as u32..offset + range.end as u32, source).pos().into()],
[SrcRange::new(offset + range.start as u32..offset + range.end as u32, source)],
)
}
@@ -79,7 +71,6 @@ pub fn parse_num(string: &str) -> Result<Numeric, NumError> {
.or_else(|| string.strip_prefix("0b").map(|s| (2u8, s, 2)))
.or_else(|| string.strip_prefix("0o").map(|s| (8u8, s, 2)))
.unwrap_or((10u8, string, 0));
eprintln!("({radix}, {noprefix}, {pos})");
// identity
let (base_s, exponent) = match noprefix.split_once('p') {
Some((b, e)) => {
@@ -88,15 +79,14 @@ pub fn parse_num(string: &str) -> Result<Numeric, NumError> {
},
None => (noprefix, 0),
};
eprintln!("({base_s},{exponent})");
match base_s.split_once('.') {
None => {
let base = int_parse(base_s, radix, pos)?;
if let Ok(pos_exp) = u32::try_from(exponent) {
if let Some(radical) = u64::from(radix).checked_pow(pos_exp) {
let num = base.checked_mul(radical).and_then(|m| m.try_into().ok()).ok_or(overflow_e)?;
return Ok(Numeric::Int(num));
}
if let Ok(pos_exp) = u32::try_from(exponent)
&& let Some(radical) = u64::from(radix).checked_pow(pos_exp)
{
let num = base.checked_mul(radical).and_then(|m| m.try_into().ok()).ok_or(overflow_e)?;
return Ok(Numeric::Int(num));
}
let f = (base as f64) * (radix as f64).powi(exponent);
let err = NumError { range: 0..string.len(), kind: NumErrorKind::NaN };

View File

@@ -1,261 +0,0 @@
//! Adaptor trait to embed Rust values in Orchid expressions
use std::any::Any;
use std::fmt;
use std::sync::{Arc, Mutex};
use never::Never;
use super::error::{RTError, RTResult};
use crate::interpreter::context::{RunEnv, RunParams};
use crate::interpreter::nort;
use crate::location::{CodeLocation, SourceRange};
use crate::name::NameLike;
use crate::parse::lexer::Lexeme;
use crate::parse::parsed;
use crate::utils::ddispatch::{request, Request, Responder};
/// Information returned by [Atomic::run].
pub enum AtomicReturn {
/// No work was done. If the atom takes an argument, it can be provided now
Inert(Atom),
/// Work was done, returns new clause and consumed gas. 1 gas is already
/// consumed by the virtual call, so nonzero values indicate expensive
/// operations.
Change(usize, nort::Clause),
}
impl AtomicReturn {
/// Report indicating that the value is inert. The result here is always [Ok],
/// it's meant to match the return type of [Atomic::run]
#[allow(clippy::unnecessary_wraps)]
pub fn inert<T: Atomic, E>(this: T) -> Result<Self, E> { Ok(Self::Inert(Atom::new(this))) }
}
/// Returned by [Atomic::run]
pub type AtomicResult = RTResult<AtomicReturn>;
/// General error produced when a non-function [Atom] is applied to something as
/// a function.
#[derive(Clone)]
pub struct NotAFunction(pub nort::Expr);
impl RTError for NotAFunction {}
impl fmt::Display for NotAFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?} is not a function", self.0)
}
}
/// Information about a function call presented to an external function
pub struct CallData<'a, 'b> {
/// Location of the function expression
pub location: CodeLocation,
/// The argument the function was called on. Functions are curried
pub arg: nort::Expr,
/// Globally available information such as the list of all constants
pub env: &'a RunEnv<'b>,
/// Resource limits and other details specific to this interpreter run
pub params: &'a mut RunParams,
}
/// Information about a normalization run presented to an atom
pub struct RunData<'a, 'b> {
/// Location of the atom
pub location: CodeLocation,
/// Globally available information such as the list of all constants
pub env: &'a RunEnv<'b>,
/// Resource limits and other details specific to this interpreter run
pub params: &'a mut RunParams,
}
/// Functionality the interpreter needs to handle a value
///
/// # Lifecycle methods
///
/// Atomics expose the methods [Atomic::redirect], [Atomic::run],
/// [Atomic::apply] and [Atomic::apply_mut] to interact with the interpreter.
/// The interpreter first tries to call `redirect` to find a subexpression to
/// normalize. If it returns `None` or the subexpression is inert, `run` is
/// called. `run` takes ownership of the value and returns a new one.
///
/// If `run` indicated in its return value that the result is inert and the atom
/// is in the position of a function, `apply` or `apply_mut` is called depending
/// upon whether the atom is referenced elsewhere. `apply` falls back to
/// `apply_mut` so implementing it is considered an optimization to avoid
/// excessive copying.
///
/// Atoms don't generally have to be copyable because clauses are refcounted in
/// the interpreter, but Orchid code is always free to duplicate the references
/// and apply them as functions to multiple different arguments so atoms that
/// represent functions have to support application by-ref without consuming the
/// function itself.
pub trait Atomic: Any + fmt::Debug + Responder + Send
where Self: 'static
{
/// Casts this value to [Any] so that its original value can be salvaged
/// during introspection by other external code.
///
/// This function should be implemented in exactly one way:
///
/// ```ignore
/// fn as_any(self: Box<Self>) -> Box<dyn Any> { self }
/// ```
#[must_use]
fn as_any(self: Box<Self>) -> Box<dyn Any>;
/// See [Atomic::as_any], exactly the same but for references
#[must_use]
fn as_any_ref(&self) -> &dyn Any;
/// Print the atom's type name. Should only ever be implemented as
///
/// ```ignore
/// fn type_name(&self) -> &'static str { std::any::type_name::<Self>() }
/// ```
fn type_name(&self) -> &'static str;
/// Returns a reference to a possible expression held inside the atom which
/// can be reduced. For an overview of the lifecycle see [Atomic]
fn redirect(&mut self) -> Option<&mut nort::Expr>;
/// Attempt to normalize this value. If it wraps a value, this should report
/// inert. If it wraps a computation, it should execute one logical step of
/// the computation and return a structure representing the next.
///
/// For an overview of the lifecycle see [Atomic]
fn run(self: Box<Self>, run: RunData) -> AtomicResult;
/// Combine the function with an argument to produce a new clause. Falls back
/// to [Atomic::apply_mut] by default.
///
/// For an overview of the lifecycle see [Atomic]
fn apply(mut self: Box<Self>, call: CallData) -> RTResult<nort::Clause> { self.apply_mut(call) }
/// Combine the function with an argument to produce a new clause
///
/// For an overview of the lifecycle see [Atomic]
fn apply_mut(&mut self, call: CallData) -> RTResult<nort::Clause>;
/// Must return true for atoms parsed from identical source.
/// If the atom cannot be parsed from source, it can safely be ignored
#[allow(unused_variables)]
fn parser_eq(&self, other: &dyn Atomic) -> bool { false }
/// Wrap the atom in a clause to be placed in an [AtomicResult].
#[must_use]
fn atom_cls(self) -> nort::Clause
where Self: Sized {
nort::Clause::Atom(Atom(Box::new(self)))
}
/// Shorthand for `self.atom_cls().to_inst()`
fn atom_clsi(self) -> nort::ClauseInst
where Self: Sized {
self.atom_cls().into_inst()
}
/// Wrap the atom in a new expression instance to be placed in a tree
#[must_use]
fn atom_expr(self, location: CodeLocation) -> nort::Expr
where Self: Sized {
self.atom_clsi().into_expr(location)
}
/// Wrap the atom in a clause to be placed in a
/// [crate::parse::parsed::SourceLine].
#[must_use]
fn ast_cls(self) -> parsed::Clause
where Self: Sized + Clone {
parsed::Clause::Atom(AtomGenerator::cloner(self))
}
/// Wrap the atom in an expression to be placed in a
/// [crate::parse::parsed::SourceLine].
#[must_use]
fn ast_exp<N: NameLike>(self, range: SourceRange) -> parsed::Expr
where Self: Sized + Clone {
self.ast_cls().into_expr(range)
}
/// Wrap this atomic value in a lexeme. This means that the atom will
/// participate in macro reproject, so it must implement [Atomic::parser_eq].
fn lexeme(self) -> Lexeme
where Self: Sized + Clone {
Lexeme::Atom(AtomGenerator::cloner(self))
}
}
/// A struct for generating any number of [Atom]s. Since atoms aren't Clone,
/// this represents the ability to create any number of instances of an atom
#[derive(Clone)]
pub struct AtomGenerator(Arc<dyn Fn() -> Atom + Send + Sync>);
impl AtomGenerator {
/// Use a factory function to create any number of atoms
pub fn new(f: impl Fn() -> Atom + Send + Sync + 'static) -> Self { Self(Arc::new(f)) }
/// Clone a representative atom when called
pub fn cloner(atom: impl Atomic + Clone) -> Self {
let lock = Mutex::new(atom);
Self::new(move || Atom::new(lock.lock().unwrap().clone()))
}
/// Generate an atom
pub fn run(&self) -> Atom { self.0() }
}
impl fmt::Debug for AtomGenerator {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.run()) }
}
impl PartialEq for AtomGenerator {
fn eq(&self, other: &Self) -> bool { self.run().0.parser_eq(&*other.run().0) }
}
/// Represents a black box unit of data with its own normalization steps.
/// Typically Rust functions integrated with [super::fn_bridge::xfn] will
/// produce and consume [Atom]s to represent both raw data, pending
/// computational tasks, and curried partial calls awaiting their next argument.
pub struct Atom(pub Box<dyn Atomic>);
impl Atom {
/// Wrap an [Atomic] in a type-erased box
#[must_use]
pub fn new<T: 'static + Atomic>(data: T) -> Self { Self(Box::new(data) as Box<dyn Atomic>) }
/// Get the contained data
#[must_use]
pub fn data(&self) -> &dyn Atomic { self.0.as_ref() as &dyn Atomic }
/// Test the type of the contained data without downcasting
#[must_use]
pub fn is<T: Atomic>(&self) -> bool { self.data().as_any_ref().is::<T>() }
/// Downcast contained data, panic if it isn't the specified type
#[must_use]
pub fn downcast<T: Atomic>(self) -> T {
*self.0.as_any().downcast().expect("Type mismatch on Atom::cast")
}
/// Normalize the contained data
pub fn run(self, run: RunData<'_, '_>) -> AtomicResult { self.0.run(run) }
/// Request a delegate from the encapsulated data
pub fn request<T: 'static>(&self) -> Option<T> { request(self.0.as_ref()) }
/// Downcast the atom to a concrete atomic type, or return the original atom
/// if it is not the specified type
pub fn try_downcast<T: Atomic>(self) -> Result<T, Self> {
match self.0.as_any_ref().is::<T>() {
true => Ok(*self.0.as_any().downcast().expect("checked just above")),
false => Err(self),
}
}
/// Downcast an atom by reference
pub fn downcast_ref<T: Atomic>(&self) -> Option<&T> { self.0.as_any_ref().downcast_ref() }
/// Combine the function with an argument to produce a new clause
pub fn apply(self, call: CallData) -> RTResult<nort::Clause> { self.0.apply(call) }
/// Combine the function with an argument to produce a new clause
pub fn apply_mut(&mut self, call: CallData) -> RTResult<nort::Clause> { self.0.apply_mut(call) }
}
impl fmt::Debug for Atom {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.data()) }
}
impl Responder for Never {
fn respond(&self, _request: Request) { match *self {} }
}
impl Atomic for Never {
fn as_any(self: Box<Self>) -> Box<dyn Any> { match *self {} }
fn as_any_ref(&self) -> &dyn Any { match *self {} }
fn type_name(&self) -> &'static str { match *self {} }
fn redirect(&mut self) -> Option<&mut nort::Expr> { match *self {} }
fn run(self: Box<Self>, _: RunData) -> AtomicResult { match *self {} }
fn apply_mut(&mut self, _: CallData) -> RTResult<nort::Clause> { match *self {} }
}

View File

@@ -0,0 +1,5 @@
pub struct OnDrop<F: FnOnce()>(Option<F>);
impl<F: FnOnce()> Drop for OnDrop<F> {
fn drop(&mut self) { (self.0.take().unwrap())() }
}
pub fn on_drop<F: FnOnce()>(f: F) -> OnDrop<F> { OnDrop(Some(f)) }

View File

@@ -6,32 +6,19 @@ use futures::FutureExt;
use futures::future::join_all;
use itertools::Itertools;
use crate::api;
use crate::error::{OrcRes, Reporter, mk_err, mk_errv};
use crate::format::fmt;
use crate::interner::{Interner, Tok};
use crate::location::SrcRange;
use crate::name::{Sym, VName, VPath};
use crate::tree::{ExprRepr, ExtraTok, Paren, TokTree, Token, ttv_range};
pub trait ParseCtx {
#[must_use]
fn i(&self) -> &Interner;
#[must_use]
fn reporter(&self) -> &Reporter;
}
pub struct ParseCtxImpl<'a> {
pub i: &'a Interner,
pub r: &'a Reporter,
}
impl ParseCtx for ParseCtxImpl<'_> {
fn i(&self) -> &Interner { self.i }
fn reporter(&self) -> &Reporter { self.r }
}
use crate::{
ExprRepr, ExtraTok, FmtCtx, FmtUnit, Format, IStr, OrcErrv, OrcRes, Paren, SrcRange, Sym,
TokTree, Token, VName, VPath, api, es, fmt, is, mk_errv, report, ttv_fmt, ttv_range,
};
/// A character that can appear at the start of a name; `[a-zA-Z_]`
pub fn name_start(c: char) -> bool { c.is_alphabetic() || c == '_' }
/// A character that can appear inside a name after the start `[a-zA-Z0-9_]`
pub fn name_char(c: char) -> bool { name_start(c) || c.is_numeric() }
/// A character that can appear in an operator. Anything except
/// `a-zA-Z0-9_()[]{}\` or whitespace
pub fn op_char(c: char) -> bool { !name_char(c) && !c.is_whitespace() && !"()[]{}\\".contains(c) }
/// Any whitespace except a line break
pub fn unrep_space(c: char) -> bool { c.is_whitespace() && !"\r\n".contains(c) }
/// A cheaply copiable subsection of a document that holds onto context data and
@@ -46,7 +33,10 @@ where
A: ExprRepr,
X: ExtraTok,
{
/// Create a snippet from a fallback token for position tracking and a range
/// of tokens
pub fn new(prev: &'a TokTree<A, X>, cur: &'a [TokTree<A, X>]) -> Self { Self { prev, cur } }
/// Split snippet at index
pub fn split_at(self, pos: u32) -> (Self, Self) {
let Self { prev, cur } = self;
let fst = Self { prev, cur: &cur[..pos as usize] };
@@ -54,23 +44,35 @@ where
let snd = Self { prev: new_prev, cur: &self.cur[pos as usize..] };
(fst, snd)
}
/// Find the first index that matches a condition
pub fn find_idx(self, mut f: impl FnMut(&Token<A, X>) -> bool) -> Option<u32> {
self.cur.iter().position(|t| f(&t.tok)).map(|t| t as u32)
}
/// Get the n-th token
pub fn get(self, idx: u32) -> Option<&'a TokTree<A, X>> { self.cur.get(idx as usize) }
/// Count how many tokens long the current sequence is. Parenthesized
/// subsequences count as 1
pub fn len(self) -> u32 { self.cur.len() as u32 }
/// The fallback token that can be used for error reporting if this snippet is
/// unexpectedly empty
pub fn prev(self) -> &'a TokTree<A, X> { self.prev }
/// Create a position that describes all tokens in this snippet
pub fn sr(self) -> SrcRange { ttv_range(self.cur).unwrap_or_else(|| self.prev.sr.clone()) }
pub fn pop_front(self) -> Option<(&'a TokTree<A, X>, Self)> {
/// Split the first token
pub fn split_first(self) -> Option<(&'a TokTree<A, X>, Self)> {
self.cur.first().map(|r| (r, self.split_at(1).1))
}
pub fn pop_back(self) -> Option<(Self, &'a TokTree<A, X>)> {
/// Split the last token
pub fn split_last(self) -> Option<(Self, &'a TokTree<A, X>)> {
self.cur.last().map(|r| (self.split_at(self.len() - 1).0, r))
}
/// Split the snippet at the first token that matches the predicate
pub fn split_once(self, f: impl FnMut(&Token<A, X>) -> bool) -> Option<(Self, Self)> {
let idx = self.find_idx(f)?;
Some((self.split_at(idx).0, self.split_at(idx + 1).1))
}
/// Split the snippet at each occurrence of a delimiter matched by the
/// predicate
pub fn split(mut self, mut f: impl FnMut(&Token<A, X>) -> bool) -> impl Iterator<Item = Self> {
iter::from_fn(move || {
if self.is_empty() {
@@ -81,7 +83,10 @@ where
Some(ret)
})
}
pub fn is_empty(self) -> bool { self.len() == 0 }
/// Returns true if the snippet contains no tokens. Note that thanks to the
/// fallback, an empty snippet still has a queriable position
pub fn is_empty(self) -> bool { self.cur.is_empty() }
/// Skip tokens that are not meant to be significant inside expressions
pub fn skip_fluff(self) -> Self {
let non_fluff_start = self.find_idx(|t| !matches!(t, Token::BR | Token::Comment(_)));
self.split_at(non_fluff_start.unwrap_or(self.len())).1
@@ -95,39 +100,28 @@ impl<A: ExprRepr, X: ExtraTok> Deref for Snippet<'_, A, X> {
type Target = [TokTree<A, X>];
fn deref(&self) -> &Self::Target { self.cur }
}
/// Remove tokens that aren't meaningful in expression context, such as comments
/// or line breaks
pub fn strip_fluff<A: ExprRepr, X: ExtraTok>(tt: &TokTree<A, X>) -> Option<TokTree<A, X>> {
let tok = match &tt.tok {
Token::BR => return None,
Token::Comment(_) => return None,
Token::LambdaHead(arg) => Token::LambdaHead(arg.iter().filter_map(strip_fluff).collect()),
Token::S(p, b) => Token::S(*p, b.iter().filter_map(strip_fluff).collect()),
t => t.clone(),
};
Some(TokTree { tok, sr: tt.sr.clone() })
impl<A: ExprRepr, X: ExtraTok> Format for Snippet<'_, A, X> {
async fn print<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
ttv_fmt(&**self, c).await
}
}
/// A comment as parsed from code
#[derive(Clone, Debug)]
pub struct Comment {
pub text: Tok<String>,
pub text: IStr,
pub sr: SrcRange,
}
impl Comment {
// XXX: which of these four are actually used?
pub async fn from_api(c: &api::Comment, src: Sym, i: &Interner) -> Self {
Self { text: i.ex(c.text).await, sr: SrcRange::new(c.range.clone(), &src) }
pub async fn from_api(c: &api::Comment, src: Sym) -> Self {
Self { text: es(c.text).await, sr: SrcRange::new(c.range.clone(), &src) }
}
pub async fn from_tk(tk: &TokTree<impl ExprRepr, impl ExtraTok>, i: &Interner) -> Option<Self> {
pub async fn from_tk(tk: &TokTree<impl ExprRepr, impl ExtraTok>) -> Option<Self> {
match &tk.tok {
Token::Comment(text) => Some(Self { text: i.i(&**text).await, sr: tk.sr.clone() }),
Token::Comment(text) => Some(Self { text: text.clone(), sr: tk.sr.clone() }),
_ => None,
}
}
pub fn to_tk<R: ExprRepr, X: ExtraTok>(&self) -> TokTree<R, X> {
TokTree { tok: Token::Comment(self.text.rc().clone()), sr: self.sr.clone() }
}
pub fn to_api(&self) -> api::Comment {
api::Comment { range: self.sr.range(), text: self.text.to_api() }
}
@@ -137,8 +131,16 @@ impl fmt::Display for Comment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "--[{}]--", self.text) }
}
/// Split a snippet into items by line breaks outside parentheses, unwrap lines
/// that are entirely wrapped in a single set of round parens for multiline
/// items, and associate all comments with the next non-comment line
///
/// The parse result's [Parsed::output] is the comments, the line's contents are
/// held in [Parsed::tail] so semantically your line parser "continues" for each
/// line separately
///
/// This is the procedure for module parsing
pub async fn line_items<'a, A: ExprRepr, X: ExtraTok>(
ctx: &impl ParseCtx,
snip: Snippet<'a, A, X>,
) -> Vec<Parsed<'a, Vec<Comment>, A, X>> {
let mut items = Vec::new();
@@ -153,9 +155,10 @@ pub async fn line_items<'a, A: ExprRepr, X: ExtraTok>(
None => comments.extend(line.cur),
Some(i) => {
let (cmts, tail) = line.split_at(i);
let comments = join_all(comments.drain(..).chain(cmts.cur).map(|t| async {
Comment::from_tk(t, ctx.i()).await.expect("All are comments checked above")
}))
let comments = join_all(
(comments.drain(..).chain(cmts.cur))
.map(|t| async { Comment::from_tk(t).await.expect("All are comments checked above") }),
)
.await;
items.push(Parsed { output: comments, tail });
},
@@ -164,27 +167,24 @@ pub async fn line_items<'a, A: ExprRepr, X: ExtraTok>(
items
}
/// Pop the next token that isn't a comment or line break
pub async fn try_pop_no_fluff<'a, A: ExprRepr, X: ExtraTok>(
ctx: &impl ParseCtx,
snip: Snippet<'a, A, X>,
) -> ParseRes<'a, &'a TokTree<A, X>, A, X> {
match snip.skip_fluff().pop_front() {
match snip.skip_fluff().split_first() {
Some((output, tail)) => Ok(Parsed { output, tail }),
None => Err(mk_errv(
ctx.i().i("Unexpected end").await,
"Line ends abruptly; more tokens were expected",
[snip.sr()],
)),
None =>
Err(mk_errv(is("Unexpected end").await, "Line ends abruptly; more tokens were expected", [
snip.sr(),
])),
}
}
pub async fn expect_end(
ctx: &impl ParseCtx,
snip: Snippet<'_, impl ExprRepr, impl ExtraTok>,
) -> OrcRes<()> {
/// Fail if the snippet isn't empty
pub async fn expect_end(snip: Snippet<'_, impl ExprRepr, impl ExtraTok>) -> OrcRes<()> {
match snip.skip_fluff().get(0) {
Some(surplus) => Err(mk_errv(
ctx.i().i("Extra code after end of line").await,
is("Extra code after end of line").await,
"Code found after the end of the line",
[surplus.sr.pos()],
)),
@@ -192,57 +192,65 @@ pub async fn expect_end(
}
}
/// Read a token and ensure that it matches the specified keyword
pub async fn expect_tok<'a, A: ExprRepr, X: ExtraTok>(
ctx: &impl ParseCtx,
snip: Snippet<'a, A, X>,
tok: Tok<String>,
tok: IStr,
) -> ParseRes<'a, (), A, X> {
let Parsed { output: head, tail } = try_pop_no_fluff(ctx, snip).await?;
let Parsed { output: head, tail } = try_pop_no_fluff(snip).await?;
match &head.tok {
Token::Name(n) if *n == tok => Ok(Parsed { output: (), tail }),
t => Err(mk_errv(
ctx.i().i("Expected specific keyword").await,
format!("Expected {tok} but found {:?}", fmt(t, ctx.i()).await),
is("Expected specific keyword").await,
format!("Expected {tok} but found {:?}", fmt(t).await),
[head.sr()],
)),
}
}
/// Report an error related to a token that can conveniently use the token's
/// text representation in the long message
pub async fn token_errv<A: ExprRepr, X: ExtraTok>(
tok: &TokTree<A, X>,
description: &'static str,
message: impl FnOnce(&str) -> String,
) -> OrcErrv {
mk_errv(is(description).await, message(&fmt(tok).await), [tok.sr.pos()])
}
/// Success output of parsers
pub struct Parsed<'a, T, H: ExprRepr, X: ExtraTok> {
/// Information obtained from consumed tokens
pub output: T,
/// Input to next parser
pub tail: Snippet<'a, H, X>,
}
pub type ParseRes<'a, T, H, X> = OrcRes<Parsed<'a, T, H, X>>;
/// Parse a `namespaced::name` or a `namespaced::(multi name)`
pub async fn parse_multiname<'a, A: ExprRepr, X: ExtraTok>(
ctx: &impl ParseCtx,
tail: Snippet<'a, A, X>,
) -> ParseRes<'a, Vec<Import>, A, X> {
let Some((tt, tail)) = tail.skip_fluff().pop_front() else {
let Some((tt, tail)) = tail.skip_fluff().split_first() else {
return Err(mk_errv(
ctx.i().i("Expected token").await,
is("Expected token").await,
"Expected a name, a parenthesized list of names, or a globstar.",
[tail.sr().pos()],
));
};
let ret = rec(tt, ctx).await;
let ret = rec(tt).await;
#[allow(clippy::type_complexity)] // it's an internal function
pub async fn rec<A: ExprRepr, X: ExtraTok>(
tt: &TokTree<A, X>,
ctx: &impl ParseCtx,
) -> OrcRes<Vec<(Vec<Tok<String>>, Option<Tok<String>>, SrcRange)>> {
) -> OrcRes<Vec<(Vec<IStr>, Option<IStr>, SrcRange)>> {
let ttpos = tt.sr.pos();
match &tt.tok {
Token::NS(ns, body) => {
if !ns.starts_with(name_start) {
ctx.reporter().report(mk_err(
ctx.i().i("Unexpected name prefix").await,
"Only names can precede ::",
[ttpos.into()],
))
report(mk_errv(is("Unexpected name prefix").await, "Only names can precede ::", [ttpos]))
};
let out = Box::pin(rec(body, ctx)).await?;
let out = Box::pin(rec(body)).await?;
Ok(out.into_iter().update(|i| i.0.push(ns.clone())).collect_vec())
},
Token::Name(ntok) => {
@@ -253,22 +261,20 @@ pub async fn parse_multiname<'a, A: ExprRepr, X: ExtraTok>(
Token::S(Paren::Round, b) => {
let mut o = Vec::new();
let mut body = Snippet::new(tt, b);
while let Some((output, tail)) = body.pop_front() {
match rec(output, ctx).boxed_local().await {
while let Some((output, tail)) = body.split_first() {
match rec(output).boxed_local().await {
Ok(names) => o.extend(names),
Err(e) => ctx.reporter().report(e),
Err(e) => report(e),
}
body = tail;
}
Ok(o)
},
t => {
return Err(mk_errv(
ctx.i().i("Unrecognized name end").await,
format!("Names cannot end with {:?} tokens", fmt(t, ctx.i()).await),
[ttpos],
));
},
t => Err(mk_errv(
is("Unrecognized name end").await,
format!("Names cannot end with {:?} tokens", fmt(t).await),
[ttpos],
)),
}
}
ret.map(|output| {
@@ -284,7 +290,7 @@ pub async fn parse_multiname<'a, A: ExprRepr, X: ExtraTok>(
#[derive(Debug, Clone)]
pub struct Import {
pub path: VPath,
pub name: Option<Tok<String>>,
pub name: Option<IStr>,
pub sr: SrcRange,
}
impl Import {
@@ -295,10 +301,14 @@ impl Import {
None => self.path.into_name().expect("Import cannot be empty"),
}
}
pub fn new(sr: SrcRange, path: VPath, name: IStr) -> Self {
Import { path, name: Some(name), sr }
}
pub fn new_glob(sr: SrcRange, path: VPath) -> Self { Import { path, name: None, sr } }
}
impl Display for Import {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}::{}", self.path.iter().join("::"), self.name.as_ref().map_or("*", |t| t.as_str()))
write!(f, "{}::{}", self.path.iter().join("::"), self.name.as_ref().map_or("*", |t| &**t))
}
}

View File

@@ -1,35 +0,0 @@
//! Methods to operate on Rust vectors in a declarative manner
use std::iter;
/// Pure version of [Vec::push]
///
/// Create a new vector consisting of the provided vector with the
/// element appended. See [pushed_ref] to use it with a slice
pub fn pushed<I: IntoIterator, C: FromIterator<I::Item>>(vec: I, t: I::Item) -> C {
vec.into_iter().chain(iter::once(t)).collect()
}
/// Pure version of [Vec::push]
///
/// Create a new vector consisting of the provided slice with the
/// element appended. See [pushed] for the owned version
pub fn pushed_ref<'a, T: Clone + 'a, C: FromIterator<T>>(
vec: impl IntoIterator<Item = &'a T>,
t: T,
) -> C {
vec.into_iter().cloned().chain(iter::once(t)).collect()
}
/// Push an element on the adhoc stack, pass it to the callback, then pop the
/// element out again.
pub fn with_pushed<T, U>(
vec: &mut Vec<T>,
item: T,
cb: impl for<'a> FnOnce(&'a mut Vec<T>) -> U,
) -> (T, U) {
vec.push(item);
let out = cb(vec);
let item = vec.pop().expect("top element stolen by callback");
(item, out)
}

View File

@@ -1,340 +0,0 @@
use std::any::Any;
use std::cell::RefCell;
use std::future::Future;
use std::marker::PhantomData;
use std::mem;
use std::ops::{BitAnd, Deref};
use std::pin::Pin;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use async_std::channel;
use async_std::sync::Mutex;
use derive_destructure::destructure;
use dyn_clone::{DynClone, clone_box};
use futures::future::LocalBoxFuture;
use hashbrown::HashMap;
use orchid_api_traits::{Channel, Coding, Decode, Encode, MsgSet, Request};
use trait_set::trait_set;
use crate::clone;
use crate::logging::Logger;
pub struct Receipt<'a>(PhantomData<&'a mut ()>);
trait_set! {
pub trait SendFn<T: MsgSet> =
for<'a> FnMut(&'a [u8], ReqNot<T>) -> LocalBoxFuture<'a, ()>
+ DynClone + 'static;
pub trait ReqFn<T: MsgSet> =
for<'a> FnMut(RequestHandle<'a, T>, <T::In as Channel>::Req)
-> LocalBoxFuture<'a, Receipt<'a>>
+ DynClone + 'static;
pub trait NotifFn<T: MsgSet> =
FnMut(<T::In as Channel>::Notif, ReqNot<T>) -> LocalBoxFuture<'static, ()>
+ DynClone + 'static;
}
fn get_id(message: &[u8]) -> (u64, &[u8]) {
(u64::from_be_bytes(message[..8].to_vec().try_into().unwrap()), &message[8..])
}
pub trait ReqHandlish {
fn defer_drop(&self, val: impl Any + 'static)
where Self: Sized {
self.defer_drop_objsafe(Box::new(val));
}
fn defer_drop_objsafe(&self, val: Box<dyn Any>);
}
impl ReqHandlish for &'_ dyn ReqHandlish {
fn defer_drop_objsafe(&self, val: Box<dyn Any>) { (**self).defer_drop_objsafe(val) }
}
#[derive(destructure)]
pub struct RequestHandle<'a, MS: MsgSet> {
defer_drop: RefCell<Vec<Box<dyn Any>>>,
fulfilled: AtomicBool,
id: u64,
_reqlt: PhantomData<&'a mut ()>,
parent: ReqNot<MS>,
}
impl<'a, MS: MsgSet + 'static> RequestHandle<'a, MS> {
fn new(parent: ReqNot<MS>, id: u64) -> Self {
Self {
defer_drop: RefCell::default(),
fulfilled: false.into(),
_reqlt: PhantomData,
parent,
id,
}
}
pub fn reqnot(&self) -> ReqNot<MS> { self.parent.clone() }
pub async fn handle<U: Request>(&self, _: &U, rep: &U::Response) -> Receipt<'a> {
self.respond(rep).await
}
pub fn will_handle_as<U: Request>(&self, _: &U) -> ReqTypToken<U> { ReqTypToken(PhantomData) }
pub async fn handle_as<U: Request>(&self, _: ReqTypToken<U>, rep: &U::Response) -> Receipt<'a> {
self.respond(rep).await
}
pub async fn respond(&self, response: &impl Encode) -> Receipt<'a> {
assert!(!self.fulfilled.swap(true, Ordering::Relaxed), "Already responded to {}", self.id);
let mut buf = (!self.id).to_be_bytes().to_vec();
response.encode(Pin::new(&mut buf)).await;
let mut send = clone_box(&*self.reqnot().0.lock().await.send);
(send)(&buf, self.parent.clone()).await;
Receipt(PhantomData)
}
}
impl<MS: MsgSet> ReqHandlish for RequestHandle<'_, MS> {
fn defer_drop_objsafe(&self, val: Box<dyn Any>) { self.defer_drop.borrow_mut().push(val); }
}
impl<MS: MsgSet> Drop for RequestHandle<'_, MS> {
fn drop(&mut self) {
let done = self.fulfilled.load(Ordering::Relaxed);
debug_assert!(done, "Request {} dropped without response", self.id)
}
}
pub struct ReqTypToken<T>(PhantomData<T>);
pub struct ReqNotData<T: MsgSet> {
id: u64,
send: Box<dyn SendFn<T>>,
notif: Box<dyn NotifFn<T>>,
req: Box<dyn ReqFn<T>>,
responses: HashMap<u64, channel::Sender<Vec<u8>>>,
}
/// Wraps a raw message buffer to save on copying.
/// Dereferences to the tail of the message buffer, cutting off the ID
#[derive(Debug, Clone)]
pub struct RawReply(Vec<u8>);
impl Deref for RawReply {
type Target = [u8];
fn deref(&self) -> &Self::Target { get_id(&self.0[..]).1 }
}
pub struct ReqNot<T: MsgSet>(Arc<Mutex<ReqNotData<T>>>, Logger);
impl<T: MsgSet> ReqNot<T> {
pub fn new(
logger: Logger,
send: impl SendFn<T>,
notif: impl NotifFn<T>,
req: impl ReqFn<T>,
) -> Self {
Self(
Arc::new(Mutex::new(ReqNotData {
id: 1,
send: Box::new(send),
notif: Box::new(notif),
req: Box::new(req),
responses: HashMap::new(),
})),
logger,
)
}
/// Can be called from a polling thread or dispatched in any other way
pub async fn receive(&self, message: &[u8]) {
let mut g = self.0.lock().await;
let (id, payload) = get_id(message);
if id == 0 {
let mut notif_cb = clone_box(&*g.notif);
mem::drop(g);
let notif_val = <T::In as Channel>::Notif::decode(Pin::new(&mut &payload[..])).await;
notif_cb(notif_val, self.clone()).await
} else if 0 < id.bitand(1 << 63) {
let sender = g.responses.remove(&!id).expect("Received response for invalid message");
sender.send(message.to_vec()).await.unwrap()
} else {
let message = <T::In as Channel>::Req::decode(Pin::new(&mut &payload[..])).await;
let mut req_cb = clone_box(&*g.req);
mem::drop(g);
let rn = self.clone();
req_cb(RequestHandle::new(rn, id), message).await;
}
}
pub async fn notify<N: Coding + Into<<T::Out as Channel>::Notif>>(&self, notif: N) {
let mut send = clone_box(&*self.0.lock().await.send);
let mut buf = vec![0; 8];
let msg: <T::Out as Channel>::Notif = notif.into();
msg.encode(Pin::new(&mut buf)).await;
send(&buf, self.clone()).await
}
}
pub trait DynRequester {
type Transfer;
fn logger(&self) -> &Logger;
/// Encode and send a request, then receive the response buffer.
fn raw_request(&self, data: Self::Transfer) -> LocalBoxFuture<'_, RawReply>;
}
pub struct MappedRequester<'a, T: 'a>(Box<dyn Fn(T) -> LocalBoxFuture<'a, RawReply> + 'a>, Logger);
impl<'a, T> MappedRequester<'a, T> {
fn new<U: DynRequester + 'a, F: Fn(T) -> U::Transfer + 'a>(
req: U,
cb: F,
logger: Logger,
) -> Self {
let req_arc = Arc::new(req);
let cb_arc = Arc::new(cb);
MappedRequester(
Box::new(move |t| {
Box::pin(clone!(req_arc, cb_arc; async move { req_arc.raw_request(cb_arc(t)).await}))
}),
logger,
)
}
}
impl<T> DynRequester for MappedRequester<'_, T> {
type Transfer = T;
fn logger(&self) -> &Logger { &self.1 }
fn raw_request(&self, data: Self::Transfer) -> LocalBoxFuture<'_, RawReply> { self.0(data) }
}
impl<T: MsgSet> DynRequester for ReqNot<T> {
type Transfer = <T::Out as Channel>::Req;
fn logger(&self) -> &Logger { &self.1 }
fn raw_request(&self, req: Self::Transfer) -> LocalBoxFuture<'_, RawReply> {
Box::pin(async move {
let mut g = self.0.lock().await;
let id = g.id;
g.id += 1;
let mut buf = id.to_be_bytes().to_vec();
req.encode(Pin::new(&mut buf)).await;
let (send, recv) = channel::bounded(1);
g.responses.insert(id, send);
let mut send = clone_box(&*g.send);
mem::drop(g);
let rn = self.clone();
send(&buf, rn).await;
let items = recv.recv().await;
RawReply(items.unwrap())
})
}
}
pub trait Requester: DynRequester {
#[must_use = "These types are subject to change with protocol versions. \
If you don't want to use the return value, At a minimum, force the type."]
fn request<R: Request + Into<Self::Transfer>>(
&self,
data: R,
) -> impl Future<Output = R::Response>;
fn map<'a, U>(self, cb: impl Fn(U) -> Self::Transfer + 'a) -> MappedRequester<'a, U>
where Self: Sized + 'a {
let logger = self.logger().clone();
MappedRequester::new(self, cb, logger)
}
}
impl<This: DynRequester + ?Sized> Requester for This {
async fn request<R: Request + Into<Self::Transfer>>(&self, data: R) -> R::Response {
let req = format!("{data:?}");
let rep = R::Response::decode(Pin::new(&mut &self.raw_request(data.into()).await[..])).await;
writeln!(self.logger(), "Request {req} got response {rep:?}");
rep
}
}
impl<T: MsgSet> Clone for ReqNot<T> {
fn clone(&self) -> Self { Self(self.0.clone(), self.1.clone()) }
}
#[cfg(test)]
mod test {
use std::rc::Rc;
use std::sync::Arc;
use async_std::sync::Mutex;
use futures::FutureExt;
use orchid_api::LogStrategy;
use orchid_api_derive::Coding;
use orchid_api_traits::{Channel, Request};
use test_executors::spin_on;
use super::{MsgSet, ReqNot};
use crate::clone;
use crate::logging::Logger;
use crate::reqnot::Requester as _;
#[derive(Clone, Debug, Coding, PartialEq)]
pub struct TestReq(u8);
impl Request for TestReq {
type Response = u8;
}
pub struct TestChan;
impl Channel for TestChan {
type Notif = u8;
type Req = TestReq;
}
pub struct TestMsgSet;
impl MsgSet for TestMsgSet {
type In = TestChan;
type Out = TestChan;
}
#[test]
fn notification() {
spin_on(async {
let logger = Logger::new(LogStrategy::StdErr);
let received = Arc::new(Mutex::new(None));
let receiver = ReqNot::<TestMsgSet>::new(
logger.clone(),
|_, _| panic!("Should not send anything"),
clone!(received; move |notif, _| clone!(received; async move {
*received.lock().await = Some(notif);
}.boxed_local())),
|_, _| panic!("Not receiving a request"),
);
let sender = ReqNot::<TestMsgSet>::new(
logger,
clone!(receiver; move |d, _| clone!(receiver; Box::pin(async move {
receiver.receive(d).await
}))),
|_, _| panic!("Should not receive notif"),
|_, _| panic!("Should not receive request"),
);
sender.notify(3).await;
assert_eq!(*received.lock().await, Some(3));
sender.notify(4).await;
assert_eq!(*received.lock().await, Some(4));
})
}
#[test]
fn request() {
spin_on(async {
let logger = Logger::new(LogStrategy::StdErr);
let receiver = Rc::new(Mutex::<Option<ReqNot<TestMsgSet>>>::new(None));
let sender = Rc::new(ReqNot::<TestMsgSet>::new(
logger.clone(),
clone!(receiver; move |d, _| clone!(receiver; Box::pin(async move {
receiver.lock().await.as_ref().unwrap().receive(d).await
}))),
|_, _| panic!("Should not receive notif"),
|_, _| panic!("Should not receive request"),
));
*receiver.lock().await = Some(ReqNot::new(
logger,
clone!(sender; move |d, _| clone!(sender; Box::pin(async move {
sender.receive(d).await
}))),
|_, _| panic!("Not receiving notifs"),
|hand, req| {
Box::pin(async move {
assert_eq!(req, TestReq(5));
hand.respond(&6u8).await
})
},
));
let response = sender.request(TestReq(5)).await;
assert_eq!(response, 6);
})
}
}

View File

@@ -1,27 +0,0 @@
//! An alternative to `Iterable` in many languages, a [Fn] that returns an
//! iterator.
use std::rc::Rc;
use trait_set::trait_set;
use super::boxed_iter::BoxedIter;
trait_set! {
trait Payload<'a, T> = Fn() -> BoxedIter<'a, T> + 'a;
}
/// Dynamic iterator building callback. Given how many trait objects this
/// involves, it may actually be slower than C#.
pub struct Sequence<'a, T: 'a>(Rc<dyn Payload<'a, T>>);
impl<'a, T: 'a> Sequence<'a, T> {
/// Construct from a concrete function returning a concrete iterator
pub fn new<I: IntoIterator<Item = T> + 'a>(f: impl Fn() -> I + 'a) -> Self {
Self(Rc::new(move || Box::new(f().into_iter())))
}
/// Get an iterator from the function
pub fn iter(&self) -> BoxedIter<'_, T> { (self.0)() }
}
impl<'a, T: 'a> Clone for Sequence<'a, T> {
fn clone(&self) -> Self { Self(self.0.clone()) }
}

View File

@@ -4,7 +4,7 @@
use std::fmt;
use std::ops::Not;
use crate::boxed_iter::BoxedIter;
use itertools::Either;
/// A primitive for encoding the two sides Left and Right. While booleans
/// are technically usable for this purpose, they're very easy to confuse
@@ -67,10 +67,13 @@ impl Side {
}
/// Walk a double ended iterator (assumed to be left-to-right) in this
/// direction
pub fn walk<'a, I: DoubleEndedIterator + 'a>(&self, iter: I) -> BoxedIter<'a, I::Item> {
pub fn walk<'a, I: DoubleEndedIterator + 'a>(
&self,
iter: I,
) -> impl Iterator<Item = I::Item> + 'a {
match self {
Side::Right => Box::new(iter) as BoxedIter<I::Item>,
Side::Left => Box::new(iter.rev()),
Side::Right => Either::Right(iter),
Side::Left => Either::Left(iter.rev()),
}
}
}

188
orchid-base/src/stash.rs Normal file
View File

@@ -0,0 +1,188 @@
//! A pattern for running async code from sync destructors and other
//! unfortunately sync callbacks, and for ensuring that these futures finish in
//! a timely fashion
//!
//! We create a task_local vecdeque which is moved into a thread_local whenever
//! the task is being polled. A call to [stash] pushes the future onto this
//! deque. Before [with_stash] returns, it awaits everything stashed up to that
//! point or inside the stashed futures.
use std::cell::RefCell;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::StreamExt;
use futures::future::LocalBoxFuture;
use futures::stream::FuturesUnordered;
use orchid_async_utils::cancel_cleanup;
thread_local! {
/// # Invariant
///
/// Any function that changes the value of this thread_local must restore it before returning
static CURRENT_STASH: RefCell<Option<Vec<LocalBoxFuture<'static, ()>>>> = RefCell::default();
}
/// Complete the argument future, and any futures spawned from it via [stash].
/// This is useful mostly to guarantee that messaging destructors have run.
///
/// # Cancellation
///
/// To ensure that stashed futures run, the returned future re-stashes them a
/// layer above when dropped. Therefore cancelling `with_stash` is only safe
/// within an enclosing `with_stash` outside of a panic.
pub fn with_stash<F: Future>(fut: F) -> impl Future<Output = F::Output> {
WithStash { stash: FuturesUnordered::new(), state: WithStashState::Main(fut) }
}
/// Schedule a future to be run before the next [with_stash] guard ends. This is
/// most useful for sending messages from destructors.
///
/// # Panics
///
/// If no enclosing stash is found, this function panics, unless we are already
/// panicking. The assumption is that a panic is a vis-major where proper
/// cleanup is secondary to avoiding an abort.
pub fn stash<F: Future<Output = ()> + 'static>(fut: F) {
CURRENT_STASH.with(|stash| {
let mut g = stash.borrow_mut();
let Some(stash) = g.as_mut() else {
if !std::thread::panicking() {
panic!("No stash! Timely completion cannot be guaranteed");
}
return;
};
stash.push(Box::pin(fut))
})
}
pub fn finish_or_stash<F: Future + Unpin + 'static>(
fut: F,
) -> impl Future<Output = F::Output> + Unpin + 'static {
cancel_cleanup(fut, |fut| {
stash(async {
fut.await;
})
})
}
enum WithStashState<F: Future> {
Main(F),
Stash {
/// Optional to simplify state management, but only ever null on a very
/// short stretch
output: Option<F::Output>,
},
}
struct WithStash<F: Future> {
stash: FuturesUnordered<LocalBoxFuture<'static, ()>>,
state: WithStashState<F>,
}
impl<F: Future> Future for WithStash<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY: the only non-Unpin item is Main#main, and it's pinned right back
let Self { state, stash } = unsafe { Pin::get_unchecked_mut(self) };
if let WithStashState::Main(main) = state {
// SAFETY: this comes from the pin we break on the line above
let main = unsafe { Pin::new_unchecked(main) };
let prev = CURRENT_STASH.with_borrow_mut(|key| key.replace(Vec::new()));
let poll = main.poll(cx);
let stash_init = CURRENT_STASH
.with_borrow_mut(|key| std::mem::replace(key, prev))
.expect("We put a Some() in here and CURRENT_STASH demands restoration");
stash.extend(stash_init);
if let Poll::Ready(o) = poll {
// skip this branch from this point onwards
*state = WithStashState::Stash { output: Some(o) };
}
}
match state {
WithStashState::Main(_) | WithStashState::Stash { output: None, .. } => Poll::Pending,
WithStashState::Stash { output: output @ Some(_) } => loop {
// if the queue has new elements, poll_next_unpin has to be called in the next
// loop to ensure that wake-ups are triggered for them too, and if
// poll_next_unpin is called, the queue may get yet more elements synchronously,
// hence the loop
let prev = CURRENT_STASH.with_borrow_mut(|key| key.replace(Vec::new()));
let poll = stash.poll_next_unpin(cx);
let stash_new = CURRENT_STASH
.with_borrow_mut(|key| std::mem::replace(key, prev))
.expect("We put a Some() in here and CURRENT_STASH demands restoration");
stash.extend(stash_new);
match poll {
Poll::Ready(None) if stash.is_empty() => {
let output = output.take().expect("Checked in branching");
break Poll::Ready(output);
},
Poll::Pending => {
break Poll::Pending;
},
Poll::Ready(_) => continue,
}
},
}
}
}
impl<F: Future> Drop for WithStash<F> {
fn drop(&mut self) {
if std::thread::panicking() {
eprintln!("Panicking through with_stash may silently drop stashed cleanup work")
}
for future in std::mem::take(&mut self.stash) {
stash(future);
}
}
}
#[cfg(test)]
mod test {
use futures::SinkExt;
use futures::channel::mpsc;
use futures::future::join;
use orchid_async_utils::debug::spin_on;
use super::*;
#[test]
fn run_stashed_future() {
let (mut send, recv) = mpsc::channel(0);
spin_on(
false,
join(
with_stash(async {
let mut send1 = send.clone();
stash(async move {
send1.send(1).await.unwrap();
});
let mut send1 = send.clone();
stash(async move {
let mut send2 = send1.clone();
stash(async move {
send2.send(2).await.unwrap();
});
send1.send(3).await.unwrap();
stash(async move {
send1.send(4).await.unwrap();
})
});
let mut send1 = send.clone();
stash(async move {
send1.send(5).await.unwrap();
});
send.send(6).await.unwrap();
}),
async {
let mut results = recv.take(6).collect::<Vec<_>>().await;
results.sort();
assert_eq!(
&results,
&[1, 2, 3, 4, 5, 6],
"all variations completed in unspecified order"
);
},
),
);
}
}

View File

@@ -1,3 +1,18 @@
/// Cache a value in a [thread_local!]. Supports synchronous and asynchronous
/// initializers
///
/// ```
/// #[macro_use]
/// use orchid_base::tl_cache;
///
/// // simple synchronous case
/// let foo = tl_cache!(Rc<Vec<usize>>: vec![0; 1024]);
/// async {
/// async fn complex_operation(x: usize) -> usize { x + 1 }
/// // async case
/// let bar = tl_cache!(async usize: complex_operation(0).await)
/// }
/// ```
#[macro_export]
macro_rules! tl_cache {
($ty:ty : $expr:expr) => {{
@@ -6,4 +21,18 @@ macro_rules! tl_cache {
}
V.with(|v| v.clone())
}};
(async $ty:ty : $expr:expr) => {{
type CellType = std::cell::OnceCell<$ty>;
thread_local! {
static V: CellType = std::cell::OnceCell::default();
}
match V.with(|cell: &CellType| cell.get().cloned()) {
Some(val) => val as $ty,
None => {
let val = $expr;
let _ = V.with(|cell: &CellType| cell.set(val.clone()));
val as $ty
},
}
}};
}

View File

@@ -1,6 +0,0 @@
pub use api::Paren;
use crate::api;
pub const PARENS: &[(char, char, Paren)] =
&[('(', ')', Paren::Round), ('[', ']', Paren::Square), ('{', '}', Paren::Curly)];

View File

@@ -1,10 +1,9 @@
use std::borrow::Borrow;
use std::fmt::{self, Debug, Display};
use std::future::Future;
use std::marker::PhantomData;
use std::rc::Rc;
use async_stream::stream;
use async_fn_stream::stream;
use futures::future::join_all;
use futures::{FutureExt, StreamExt};
use itertools::Itertools;
@@ -12,64 +11,67 @@ use never::Never;
use orchid_api_traits::Coding;
use trait_set::trait_set;
use crate::error::OrcErrv;
use crate::format::{FmtCtx, FmtUnit, Format, Variants};
use crate::interner::{Interner, Tok};
use crate::location::{Pos, SrcRange};
use crate::name::Sym;
use crate::parse::Snippet;
use crate::{api, match_mapping, tl_cache};
use crate::{
FmtCtx, FmtUnit, Format, IStr, OrcErrv, Pos, Snippet, SrcRange, Sym, VName, VPath, Variants, api,
es, match_mapping, tl_cache,
};
/// The 3 types of parentheses Orchid's lexer recognizes as intrinsic groups in
/// the S-tree
pub type Paren = api::Paren;
/// Helper table with different kinds of parentheses recognized by the language.
/// opening, closing, variant name
pub const PARENS: &[(char, char, Paren)] =
&[('(', ')', Paren::Round), ('[', ']', Paren::Square), ('{', '}', Paren::Curly)];
/// Extension interface for embedded expressions and expression construction
/// commands inside token trees
pub trait TokenVariant<ApiEquiv: Clone + Debug + Coding>: Format + Clone + fmt::Debug {
/// Additional arguments to the deserializer. If deserialization of a token
/// type is impossible, set this to a sentinel unit type that describes why.
/// If you set this to [Never], your token tree type can never be
/// deserialized.
type FromApiCtx<'a>;
/// Additional arguments to the serializer. If serialization of a token type
/// is forbidden, set this to a sentinel unit type that describes how to avoid
/// it.
/// If you set this to [Never], your token tree type can never be serialized.
type ToApiCtx<'a>;
/// Deserializer
#[must_use]
fn from_api(
api: &ApiEquiv,
api: ApiEquiv,
ctx: &mut Self::FromApiCtx<'_>,
pos: SrcRange,
i: &Interner,
) -> impl Future<Output = Self>;
/// Serializer
#[must_use]
fn into_api(self, ctx: &mut Self::ToApiCtx<'_>) -> impl Future<Output = ApiEquiv>;
}
impl<T: Clone + Debug + Coding> TokenVariant<T> for Never {
type FromApiCtx<'a> = ();
type ToApiCtx<'a> = ();
async fn from_api(_: &T, _: &mut Self::FromApiCtx<'_>, _: SrcRange, _: &Interner) -> Self {
async fn from_api(_: T, _: &mut Self::FromApiCtx<'_>, _: SrcRange) -> Self {
panic!("Cannot deserialize Never")
}
async fn into_api(self, _: &mut Self::ToApiCtx<'_>) -> T { match self {} }
}
trait_set! {
// TokenHandle
/// [api::Token::Handle] variant
pub trait ExprRepr = TokenVariant<api::ExprTicket>;
// TokenExpr
/// [api::Token::NewExpr] variant
pub trait ExtraTok = TokenVariant<api::Expression>;
}
trait_set! {
/// Callback to callback to [recur].
pub trait RecurCB<H: ExprRepr, X: ExtraTok> = Fn(TokTree<H, X>) -> TokTree<H, X>;
}
pub fn recur<H: ExprRepr, X: ExtraTok>(
tt: TokTree<H, X>,
f: &impl Fn(TokTree<H, X>, &dyn RecurCB<H, X>) -> TokTree<H, X>,
) -> TokTree<H, X> {
f(tt, &|TokTree { sr: range, tok }| {
let tok = match tok {
tok @ (Token::BR | Token::Bottom(_) | Token::Comment(_) | Token::Name(_)) => tok,
tok @ (Token::Handle(_) | Token::NewExpr(_)) => tok,
Token::NS(n, b) => Token::NS(n, Box::new(recur(*b, f))),
Token::LambdaHead(arg) =>
Token::LambdaHead(arg.into_iter().map(|tt| recur(tt, f)).collect_vec()),
Token::S(p, b) => Token::S(p, b.into_iter().map(|tt| recur(tt, f)).collect_vec()),
};
TokTree { sr: range, tok }
})
}
/// An atom that can be passed through the API boundary as part of an
/// expression. In particular, atoms created by extensions use this form.
pub trait AtomRepr: Clone + Format {
type Ctx: ?Sized;
#[must_use]
@@ -95,6 +97,7 @@ impl Display for TokHandle<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Handle({})", self.0.0) }
}
/// Lexer output
#[derive(Clone, Debug)]
pub struct TokTree<H: ExprRepr, X: ExtraTok> {
pub tok: Token<H, X>,
@@ -104,25 +107,39 @@ pub struct TokTree<H: ExprRepr, X: ExtraTok> {
pub sr: SrcRange,
}
impl<H: ExprRepr, X: ExtraTok> TokTree<H, X> {
/// Visit all tokens, modify them at will, and optionally recurse into them by
/// calling the callback passed to your callback
pub fn recur(self, f: &impl Fn(Self, &dyn RecurCB<H, X>) -> Self) -> Self {
f(self, &|TokTree { sr: range, tok }| {
let tok = match tok {
tok @ (Token::BR | Token::Bottom(_) | Token::Comment(_) | Token::Name(_)) => tok,
tok @ (Token::Handle(_) | Token::NewExpr(_)) => tok,
Token::NS(n, b) => Token::NS(n, Box::new(b.recur(f))),
Token::LambdaHead(arg) => Token::LambdaHead(Box::new(arg.recur(f))),
Token::S(p, b) => Token::S(p, b.into_iter().map(|tt| tt.recur(f)).collect_vec()),
};
TokTree { sr: range, tok }
})
}
pub async fn from_api(
tt: &api::TokenTree,
tt: api::TokenTree,
hctx: &mut H::FromApiCtx<'_>,
xctx: &mut X::FromApiCtx<'_>,
src: &Sym,
i: &Interner,
) -> Self {
let pos = SrcRange::new(tt.range.clone(), src);
let tok = match_mapping!(&tt.token, api::Token => Token::<H, X> {
let pos = SrcRange::new(tt.range, src);
let tok = match_mapping!(tt.token, api::Token => Token::<H, X> {
BR,
NS(n => Tok::from_api(*n, i).await,
b => Box::new(Self::from_api(b, hctx, xctx, src, i).boxed_local().await)),
Bottom(e => OrcErrv::from_api(e, i).await),
LambdaHead(arg => ttv_from_api(arg, hctx, xctx, src, i).await),
Name(n => Tok::from_api(*n, i).await),
S(*par, b => ttv_from_api(b, hctx, xctx, src, i).await),
Comment(c.clone()),
NewExpr(expr => X::from_api(expr, xctx, pos.clone(), i).await),
Handle(tk => H::from_api(tk, hctx, pos.clone(), i).await)
NS(n => es(n).await,
b => Box::new(Self::from_api(*b, hctx, xctx, src).boxed_local().await)),
Bottom(e => OrcErrv::from_api(e).await),
LambdaHead(arg => Box::new(Self::from_api(*arg, hctx, xctx, src).boxed_local().await)),
Name(n => es(n).await),
S(par, b => ttv_from_api(b, hctx, xctx, src).await),
Comment(c => es(c).await),
NewExpr(expr => X::from_api(expr, xctx, pos.clone()).await),
Handle(tk => H::from_api(tk, hctx, pos.clone()).await)
});
Self { sr: pos, tok }
}
@@ -136,8 +153,8 @@ impl<H: ExprRepr, X: ExtraTok> TokTree<H, X> {
BR,
NS(n.to_api(), b => Box::new(b.into_api(hctx, xctx).boxed_local().await)),
Bottom(e.to_api()),
Comment(c.clone()),
LambdaHead(arg => ttv_into_api(arg, hctx, xctx).boxed_local().await),
Comment(c.to_api()),
LambdaHead(arg => Box::new(arg.into_api(hctx, xctx).boxed_local().await)),
Name(nn.to_api()),
S(p, b => ttv_into_api(b, hctx, xctx).boxed_local().await),
Handle(hand.into_api(hctx).await),
@@ -146,25 +163,39 @@ impl<H: ExprRepr, X: ExtraTok> TokTree<H, X> {
api::TokenTree { range: self.sr.range.clone(), token }
}
pub fn is_kw(&self, tk: Tok<String>) -> bool { self.tok.is_kw(tk) }
pub fn as_name(&self) -> Option<Tok<String>> {
pub fn is_kw(&self, tk: IStr) -> bool { self.tok.is_kw(tk) }
pub fn as_name(&self) -> Option<IStr> {
if let Token::Name(n) = &self.tok { Some(n.clone()) } else { None }
}
pub fn as_multiname(&self) -> Result<VName, &TokTree<H, X>> {
let mut segs = VPath::new([]);
let mut cur = self;
loop {
match &cur.tok {
Token::Name(last) => return Ok(segs.name_with_suffix(last.clone())),
Token::NS(seg, inner) => {
segs = segs.suffix([seg.clone()]);
cur = inner;
},
_ => return Err(cur),
}
}
}
pub fn as_s(&self, par: Paren) -> Option<Snippet<'_, H, X>> {
self.tok.as_s(par).map(|slc| Snippet::new(self, slc))
}
pub fn as_lambda(&self) -> Option<Snippet<'_, H, X>> {
pub fn as_lambda(&self) -> Option<&Self> {
match &self.tok {
Token::LambdaHead(arg) => Some(Snippet::new(self, arg)),
Token::LambdaHead(arg) => Some(&**arg),
_ => None,
}
}
pub fn is_fluff(&self) -> bool { matches!(self.tok, Token::Comment(_) | Token::BR) }
pub fn lambda(arg: Vec<Self>, mut body: Vec<Self>) -> Self {
let arg_range = ttv_range(&arg).expect("Lambda with empty arg!");
pub fn lambda(arg: Self, mut body: Vec<Self>) -> Self {
let arg_range = arg.sr();
let mut s_range = arg_range.clone();
s_range.range.end = body.last().expect("Lambda with empty body!").sr.range.end;
body.insert(0, Token::LambdaHead(arg).at(arg_range));
body.insert(0, Token::LambdaHead(Box::new(arg)).at(arg_range));
Token::S(Paren::Round, body).at(s_range)
}
pub fn sr(&self) -> SrcRange { self.sr.clone() }
@@ -175,36 +206,37 @@ impl<H: ExprRepr, X: ExtraTok> Format for TokTree<H, X> {
}
}
/// Receive a token sequence from API
pub async fn ttv_from_api<H: ExprRepr, X: ExtraTok>(
tokv: impl IntoIterator<Item: Borrow<api::TokenTree>>,
tokv: impl IntoIterator<Item = api::TokenTree>,
hctx: &mut H::FromApiCtx<'_>,
xctx: &mut X::FromApiCtx<'_>,
src: &Sym,
i: &Interner,
) -> Vec<TokTree<H, X>> {
stream! {
stream(async |mut cx| {
for tok in tokv {
yield TokTree::<H, X>::from_api(tok.borrow(), hctx, xctx, src, i).boxed_local().await
cx.emit(TokTree::<H, X>::from_api(tok, hctx, xctx, src).boxed_local().await).await
}
}
})
.collect()
.await
}
/// Encode a token sequence for sending
pub async fn ttv_into_api<H: ExprRepr, X: ExtraTok>(
tokv: impl IntoIterator<Item = TokTree<H, X>>,
hctx: &mut H::ToApiCtx<'_>,
xctx: &mut X::ToApiCtx<'_>,
) -> Vec<api::TokenTree> {
stream! {
stream(async |mut cx| {
for tok in tokv {
yield tok.into_api(hctx, xctx).await
cx.emit(tok.into_api(hctx, xctx).await).await
}
}
})
.collect()
.await
}
/// Enclose the tokens in `()` if there is more than one
pub fn wrap_tokv<H: ExprRepr, X: ExtraTok>(
items: impl IntoIterator<Item = TokTree<H, X>>,
) -> TokTree<H, X> {
@@ -219,22 +251,20 @@ pub fn wrap_tokv<H: ExprRepr, X: ExtraTok>(
}
}
pub use api::Paren;
/// Lexer output variant
#[derive(Clone, Debug)]
pub enum Token<H: ExprRepr, X: ExtraTok> {
/// Information about the code addressed to the human reader or dev tooling
/// It has no effect on the behaviour of the program unless it's explicitly
/// read via reflection
Comment(Rc<String>),
Comment(IStr),
/// The part of a lambda between `\` and `.` enclosing the argument. The body
/// stretches to the end of the enclosing parens or the end of the const line
LambdaHead(Vec<TokTree<H, X>>),
LambdaHead(Box<TokTree<H, X>>),
/// A binding, operator, or a segment of a namespaced::name
Name(Tok<String>),
Name(IStr),
/// A namespace prefix, like `my_ns::` followed by a token
NS(Tok<String>, Box<TokTree<H, X>>),
NS(IStr, Box<TokTree<H, X>>),
/// A line break
BR,
/// `()`, `[]`, or `{}`
@@ -250,7 +280,7 @@ pub enum Token<H: ExprRepr, X: ExtraTok> {
}
impl<H: ExprRepr, X: ExtraTok> Token<H, X> {
pub fn at(self, sr: SrcRange) -> TokTree<H, X> { TokTree { sr, tok: self } }
pub fn is_kw(&self, tk: Tok<String>) -> bool { matches!(self, Token::Name(n) if *n == tk) }
pub fn is_kw(&self, tk: IStr) -> bool { matches!(self, Token::Name(n) if *n == tk) }
pub fn as_s(&self, par: Paren) -> Option<&[TokTree<H, X>]> {
match self {
Self::S(p, b) if *p == par => Some(b),
@@ -262,12 +292,14 @@ impl<H: ExprRepr, X: ExtraTok> Format for Token<H, X> {
async fn print<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
match self {
Self::BR => "\n".to_string().into(),
Self::Bottom(err) if err.len() == 1 => format!("Bottom({}) ", err.one().unwrap()).into(),
Self::Bottom(err) => format!("Botttom(\n{}) ", indent(&err.to_string())).into(),
Self::Bottom(err) => match err.one() {
Some(err) => format!("Bottom({err}) ").into(),
None => format!("Botttom(\n{}) ", indent(&err.to_string())).into(),
},
Self::Comment(c) => format!("--[{c}]--").into(),
Self::LambdaHead(arg) =>
tl_cache!(Rc<Variants>: Rc::new(Variants::default().bounded("\\{0b}.")))
.units([ttv_fmt(arg, c).await]),
.units([arg.print(c).boxed_local().await]),
Self::NS(n, b) => tl_cache!(Rc<Variants>: Rc::new(Variants::default().bounded("{0}::{1l}")))
.units([n.to_string().into(), b.print(c).boxed_local().await]),
Self::Name(n) => format!("{n}").into(),
@@ -285,16 +317,27 @@ impl<H: ExprRepr, X: ExtraTok> Format for Token<H, X> {
}
}
/// Find the location that best describes a sequence of tokens if the sequence
/// isn't empty
pub fn ttv_range<'a>(ttv: &[TokTree<impl ExprRepr + 'a, impl ExtraTok + 'a>]) -> Option<SrcRange> {
let range = ttv.first()?.sr.range.start..ttv.last().unwrap().sr.range.end;
Some(SrcRange { path: ttv.first().unwrap().sr.path(), range })
}
/// Pretty-print a token sequence
pub async fn ttv_fmt<'a: 'b, 'b>(
ttv: impl IntoIterator<Item = &'b TokTree<impl ExprRepr + 'a, impl ExtraTok + 'a>>,
c: &(impl FmtCtx + ?Sized),
) -> FmtUnit {
FmtUnit::sequence(" ", None, join_all(ttv.into_iter().map(|t| t.print(c))).await)
FmtUnit::sequence("", " ", "", true, join_all(ttv.into_iter().map(|t| t.print(c))).await)
}
pub struct FmtTTV<'a, H: ExprRepr, X: ExtraTok>(pub &'a [TokTree<H, X>]);
impl<'b, H: ExprRepr, X: ExtraTok> Format for FmtTTV<'b, H, X> {
async fn print<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
ttv_fmt(self.0, c).await
}
}
/// Indent a string by two spaces
pub fn indent(s: &str) -> String { s.replace("\n", "\n ") }

View File

@@ -9,19 +9,19 @@ use crate::proj_error::{ErrorSansOrigin, ErrorSansOriginObj};
/// as the file system. Cheap to clone.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Loaded {
/// Conceptually equivalent to a sourcefile
Code(Arc<String>),
/// Conceptually equivalent to the list of *.orc files in a folder, without
/// the extension
Collection(Arc<Vec<Tok<String>>>),
/// Conceptually equivalent to a sourcefile
Code(Arc<String>),
/// Conceptually equivalent to the list of *.orc files in a folder, without
/// the extension
Collection(Arc<Vec<IStr>>),
}
impl Loaded {
/// Is the loaded item source code (not a collection)?
pub fn is_code(&self) -> bool { matches!(self, Loaded::Code(_)) }
/// Collect the elements in a collection rreport
pub fn collection(items: impl IntoIterator<Item = Tok<String>>) -> Self {
Self::Collection(Arc::new(items.into_iter().collect()))
}
/// Is the loaded item source code (not a collection)?
pub fn is_code(&self) -> bool { matches!(self, Loaded::Code(_)) }
/// Collect the elements in a collection rreport
pub fn collection(items: impl IntoIterator<Item = IStr>) -> Self {
Self::Collection(Arc::new(items.into_iter().collect()))
}
}
/// Returned by any source loading callback
@@ -30,66 +30,62 @@ pub type FSResult = Result<Loaded, ErrorSansOriginObj>;
/// Type that indicates the type of an entry without reading the contents
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum FSKind {
/// Invalid path or read error
None,
/// Source code
Code,
/// Internal tree node
Collection,
/// Invalid path or read error
None,
/// Source code
Code,
/// Internal tree node
Collection,
}
/// Distinguished error for missing code
#[derive(Clone, PartialEq, Eq)]
pub struct CodeNotFound(pub VPath);
impl CodeNotFound {
/// Instantiate error
pub fn new(path: VPath) -> Self { Self(path) }
/// Instantiate error
pub fn new(path: VPath) -> Self { Self(path) }
}
impl ErrorSansOrigin for CodeNotFound {
const DESCRIPTION: &'static str = "No source code for path";
fn message(&self) -> String { format!("{} not found", self.0) }
const DESCRIPTION: &'static str = "No source code for path";
fn message(&self) -> String { format!("{} not found", self.0) }
}
/// A simplified view of a file system for the purposes of source code loading.
/// This includes the real FS and source code, but also various in-memory
/// formats and other sources for libraries and dependencies.
pub trait VirtFS {
/// Implementation of [VirtFS::read]
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult;
/// Discover information about a path without reading it.
///
/// Implement this if your vfs backend can do expensive operations
fn kind(&self, path: &PathSlice) -> FSKind {
match self.read(path) {
Err(_) => FSKind::None,
Ok(Loaded::Code(_)) => FSKind::Code,
Ok(Loaded::Collection(_)) => FSKind::Collection,
}
}
/// Convert a path into a human-readable string that is meaningful in the
/// target context.
fn display(&self, path: &[Tok<String>]) -> Option<String>;
/// Convert the FS handler into a type-erased version of itself for packing in
/// a tree.
fn rc(self) -> Rc<dyn VirtFS>
where Self: Sized + 'static {
Rc::new(self)
}
/// Read a path, returning either a text file, a directory listing or an
/// error. Wrapper for [VirtFS::get]
fn read(&self, path: &PathSlice) -> FSResult { self.get(path, path) }
/// Implementation of [VirtFS::read]
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult;
/// Discover information about a path without reading it.
///
/// Implement this if your vfs backend can do expensive operations
fn kind(&self, path: &PathSlice) -> FSKind {
match self.read(path) {
Err(_) => FSKind::None,
Ok(Loaded::Code(_)) => FSKind::Code,
Ok(Loaded::Collection(_)) => FSKind::Collection,
}
}
/// Convert a path into a human-readable string that is meaningful in the
/// target context.
fn display(&self, path: &[IStr]) -> Option<String>;
/// Convert the FS handler into a type-erased version of itself for packing in
/// a tree.
fn rc(self) -> Rc<dyn VirtFS>
where Self: Sized + 'static {
Rc::new(self)
}
/// Read a path, returning either a text file, a directory listing or an
/// error. Wrapper for [VirtFS::get]
fn read(&self, path: &PathSlice) -> FSResult { self.get(path, path) }
}
impl VirtFS for &dyn VirtFS {
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
(*self).get(path, full_path)
}
fn display(&self, path: &[Tok<String>]) -> Option<String> { (*self).display(path) }
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult { (*self).get(path, full_path) }
fn display(&self, path: &[IStr]) -> Option<String> { (*self).display(path) }
}
impl<T: VirtFS + ?Sized> VirtFS for Rc<T> {
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
(**self).get(path, full_path)
}
fn display(&self, path: &[Tok<String>]) -> Option<String> { (**self).display(path) }
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult { (**self).get(path, full_path) }
fn display(&self, path: &[IStr]) -> Option<String> { (**self).display(path) }
}

View File

@@ -32,7 +32,7 @@ impl<'a> Combine for &'a dyn VirtFS {
pub type DeclTree = ModEntry<Rc<dyn VirtFS>, (), ()>;
impl VirtFS for DeclTree {
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult {
match &self.member {
ModMember::Item(it) => it.get(path, full_path),
ModMember::Sub(module) => match path.split_first() {
@@ -44,7 +44,7 @@ impl VirtFS for DeclTree {
}
}
fn display(&self, path: &[Tok<String>]) -> Option<String> {
fn display(&self, path: &[IStr]) -> Option<String> {
let (head, tail) = path.split_first()?;
match &self.member {
ModMember::Item(it) => it.display(path),
@@ -54,16 +54,16 @@ impl VirtFS for DeclTree {
}
impl VirtFS for String {
fn display(&self, _: &[Tok<String>]) -> Option<String> { None }
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
fn display(&self, _: &[IStr]) -> Option<String> { None }
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult {
(path.is_empty().then(|| Loaded::Code(Arc::new(self.as_str().to_string()))))
.ok_or_else(|| CodeNotFound::new(full_path.to_vpath()).pack())
}
}
impl<'a> VirtFS for &'a str {
fn display(&self, _: &[Tok<String>]) -> Option<String> { None }
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
fn display(&self, _: &[IStr]) -> Option<String> { None }
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult {
(path.is_empty().then(|| Loaded::Code(Arc::new(self.to_string()))))
.ok_or_else(|| CodeNotFound::new(full_path.to_vpath()).pack())
}

View File

@@ -99,14 +99,14 @@ impl DirNode {
}
}
fn mk_pathbuf(&self, path: &[Tok<String>]) -> PathBuf {
fn mk_pathbuf(&self, path: &[IStr]) -> PathBuf {
let mut fpath = self.root.clone();
path.iter().for_each(|seg| fpath.push(seg.as_str()));
fpath
}
}
impl VirtFS for DirNode {
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult {
let fpath = self.mk_pathbuf(path);
let mut binding = self.cached.borrow_mut();
let (_, res) = (binding.raw_entry_mut().from_key(&fpath))
@@ -114,7 +114,7 @@ impl VirtFS for DirNode {
res.clone()
}
fn display(&self, path: &[Tok<String>]) -> Option<String> {
fn display(&self, path: &[IStr]) -> Option<String> {
let pathbuf = self.mk_pathbuf(path).with_extension(self.ext());
Some(pathbuf.to_string_lossy().to_string())
}

View File

@@ -56,7 +56,7 @@ impl EmbeddedFS {
}
impl VirtFS for EmbeddedFS {
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> FSResult {
fn get(&self, path: &[IStr], full_path: &PathSlice) -> FSResult {
if path.is_empty() {
return Ok(Loaded::collection(self.tree.keys(|_| true)));
}
@@ -67,7 +67,7 @@ impl VirtFS for EmbeddedFS {
ModMember::Sub(sub) => Loaded::collection(sub.keys(|_| true)),
})
}
fn display(&self, path: &[Tok<String>]) -> Option<String> {
fn display(&self, path: &[IStr]) -> Option<String> {
let Self { gen, suffix, .. } = self;
Some(format!("{}{suffix} in {gen}", path.iter().join("/")))
}

View File

@@ -21,18 +21,18 @@ impl<'a> PrefixFS<'a> {
add: VPath::parse(add.as_ref()),
}
}
fn proc_path(&self, path: &[Tok<String>]) -> Option<Vec<Tok<String>>> {
fn proc_path(&self, path: &[IStr]) -> Option<Vec<IStr>> {
let path = path.strip_prefix(self.remove.as_slice())?;
Some(self.add.0.iter().chain(path).cloned().collect_vec())
}
}
impl<'a> VirtFS for PrefixFS<'a> {
fn get(&self, path: &[Tok<String>], full_path: &PathSlice) -> super::FSResult {
fn get(&self, path: &[IStr], full_path: &PathSlice) -> super::FSResult {
let path =
self.proc_path(path).ok_or_else(|| CodeNotFound::new(full_path.to_vpath()).pack())?;
self.wrapped.get(&path, full_path)
}
fn display(&self, path: &[Tok<String>]) -> Option<String> {
fn display(&self, path: &[IStr]) -> Option<String> {
self.wrapped.display(&self.proc_path(path)?)
}
}

View File

@@ -6,28 +6,40 @@ edition = "2024"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ahash = "0.8.11"
async-event = "0.2.1"
async-fn-stream = { version = "0.1.0", path = "../async-fn-stream" }
async-once-cell = "0.5.4"
async-std = "1.13.0"
async-stream = "0.3.6"
chrono = "0.4.44"
derive_destructure = "1.0.0"
dyn-clone = "1.0.17"
futures = "0.3.31"
hashbrown = "0.15.2"
dyn-clone = "1.0.20"
futures = { version = "0.3.31", default-features = false, features = [
"std",
"async-await",
] }
futures-locks = "0.7.1"
hashbrown = "0.16.1"
include_dir = { version = "0.7.4", optional = true }
itertools = "0.14.0"
konst = "0.3.16"
konst = "0.4.3"
lazy_static = "1.5.0"
memo-map = "0.3.3"
never = "0.1.0"
once_cell = "1.20.2"
once_cell = "1.21.3"
orchid-api = { version = "0.1.0", path = "../orchid-api" }
orchid-api-derive = { version = "0.1.0", path = "../orchid-api-derive" }
orchid-api-traits = { version = "0.1.0", path = "../orchid-api-traits" }
orchid-async-utils = { version = "0.1.0", path = "../orchid-async-utils" }
orchid-base = { version = "0.1.0", path = "../orchid-base" }
ordered-float = "5.0.0"
pastey = "0.1.0"
some_executor = "0.5.1"
ordered-float = "5.1.0"
pastey = "0.2.1"
substack = "1.1.1"
tokio = { version = "1.46.1", optional = true }
task-local = "0.1.0"
tokio = { version = "1.49.0", optional = true, features = [] }
tokio-util = { version = "0.7.17", optional = true, features = ["compat"] }
trait-set = "0.3.0"
unsync-pipe = { version = "0.2.0", path = "../unsync-pipe" }
[features]
tokio = ["dep:tokio", "dep:tokio-util"]
default = ["tokio"]

View File

@@ -1,89 +1,81 @@
use std::any::{Any, TypeId, type_name};
use std::fmt;
use std::collections::HashMap;
use std::fmt::{self, Debug};
use std::future::Future;
use std::io;
use std::marker::PhantomData;
use std::num::NonZeroU32;
use std::ops::Deref;
use std::pin::Pin;
use std::rc::Rc;
use ahash::HashMap;
use async_std::io::{Read, Write};
use async_std::stream;
use dyn_clone::{DynClone, clone_box};
use futures::future::LocalBoxFuture;
use futures::{FutureExt, StreamExt};
use futures::{AsyncWrite, FutureExt, StreamExt, stream};
use orchid_api_derive::Coding;
use orchid_api_traits::{Coding, Decode, Encode, Request, enc_vec};
use orchid_base::clone;
use orchid_base::error::{OrcErr, OrcRes, mk_err};
use orchid_base::format::{FmtCtx, FmtUnit, Format};
use orchid_base::interner::Interner;
use orchid_base::location::Pos;
use orchid_base::name::Sym;
use orchid_base::reqnot::Requester;
use orchid_api_traits::{Coding, Decode, InHierarchy, Request, UnderRoot, enc_vec};
use orchid_base::{
FmtCtx, FmtUnit, Format, IStr, OrcErrv, Pos, Receipt, ReqHandle, ReqReader, ReqReaderExt, Sym,
fmt, is, mk_errv, mk_errv_floating, take_first,
};
use trait_set::trait_set;
use crate::api;
// use crate::error::{ProjectError, ProjectResult};
use crate::expr::{Expr, ExprData, ExprHandle, ExprKind};
use crate::gen_expr::GExpr;
use crate::system::{DynSystemCard, SysCtx, atom_info_for, downcast_atom};
use crate::{
DynSystemCardExt, Expr, ExprData, ExprHandle, ExprKind, OwnedAtom, ToExpr, api, dyn_cted,
get_obj_store, request, sys_id,
};
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
/// Every atom managed via this system starts with an ID into the type table
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Coding)]
pub struct AtomTypeId(pub NonZeroU32);
pub trait AtomCard: 'static + Sized {
type Data: Clone + Coding + Sized;
}
pub trait AtomicVariant {}
/// A value managed by Orchid. The type should also be registered in the
/// [crate::SystemCard] through [AtomicFeatures::ops] which is provided
/// indirectly by either [crate::OwnedAtom] or [crate::ThinAtom]
pub trait Atomic: 'static + Sized {
/// Either [crate::OwnedVariant] or [crate::ThinVariant] depending on whether
/// the value implements [crate::OwnedAtom] or [crate::ThinAtom]
type Variant: AtomicVariant;
/// Serializable data that gets sent inside the atom to other systems that
/// depend on this system. Methods on this value are directly accessible
/// through [TAtom], and this data can also be used for optimized public
/// functions. The serialized form should have a reasonable length to avoid
/// overburdening the protocol.
type Data: Clone + Coding + Sized + 'static;
/// Register handlers for IPC calls. If this atom implements [Supports], you
/// should register your implementations here. If this atom doesn't
/// participate in IPC at all, the default implementation is fine
fn reg_reqs() -> MethodSetBuilder<Self> { MethodSetBuilder::new() }
}
impl<A: Atomic> AtomCard for A {
type Data = <Self as Atomic>::Data;
fn reg_methods() -> MethodSetBuilder<Self> { MethodSetBuilder::new() }
}
/// Shared interface of all atom types created in this library for use by the
/// library that defines them. This is provided by [Atomic] and either
/// [crate::OwnedAtom] or [crate::ThinAtom]
pub trait AtomicFeatures: Atomic {
/// Convert a value of this atom inside the defining system into a function
/// that will perform registrations and serialization
#[allow(private_interfaces)]
fn factory(self) -> AtomFactory;
type Info: AtomDynfo;
fn info() -> Self::Info;
fn dynfo() -> Box<dyn AtomDynfo>;
/// Expose all operations that can be performed on an instance of this type in
/// an instanceless vtable. This vtable must be registered by the
/// [crate::System].
fn ops() -> Box<dyn AtomOps>;
}
pub trait ToAtom {
fn to_atom_factory(self) -> AtomFactory;
}
impl<A: AtomicFeatures> ToAtom for A {
fn to_atom_factory(self) -> AtomFactory { self.factory() }
}
impl ToAtom for AtomFactory {
fn to_atom_factory(self) -> AtomFactory { self }
}
pub trait AtomicFeaturesImpl<Variant: AtomicVariant> {
pub(crate) trait AtomicFeaturesImpl<Variant: AtomicVariant> {
fn _factory(self) -> AtomFactory;
type _Info: AtomDynfo;
type _Info: AtomOps;
fn _info() -> Self::_Info;
}
impl<A: Atomic + AtomicFeaturesImpl<A::Variant>> AtomicFeatures for A {
#[allow(private_interfaces)]
fn factory(self) -> AtomFactory { self._factory() }
type Info = <Self as AtomicFeaturesImpl<A::Variant>>::_Info;
fn info() -> Self::Info { Self::_info() }
fn dynfo() -> Box<dyn AtomDynfo> { Box::new(Self::info()) }
}
pub fn get_info<A: AtomCard>(
sys: &(impl DynSystemCard + ?Sized),
) -> (AtomTypeId, Box<dyn AtomDynfo>) {
atom_info_for(sys, TypeId::of::<A>()).unwrap_or_else(|| {
panic!("Atom {} not associated with system {}", type_name::<A>(), sys.name())
})
fn ops() -> Box<dyn AtomOps> { Box::new(Self::_info()) }
}
/// A reference to a value of some [Atomic] type. This owns an [Expr]
#[derive(Clone)]
pub struct ForeignAtom {
pub(crate) expr: Rc<ExprHandle>,
@@ -91,24 +83,51 @@ pub struct ForeignAtom {
pub(crate) pos: Pos,
}
impl ForeignAtom {
/// Obtain the position in code of the expression
pub fn pos(&self) -> Pos { self.pos.clone() }
pub fn ctx(&self) -> SysCtx { self.expr.ctx.clone() }
/// Obtain the [Expr]
pub fn ex(self) -> Expr {
let (handle, pos) = (self.expr.clone(), self.pos.clone());
let data = ExprData { pos, kind: ExprKind::Atom(ForeignAtom { ..self }) };
Expr::new(handle, data)
Expr::from_data(handle, data)
}
pub(crate) fn new(handle: Rc<ExprHandle>, atom: api::Atom, pos: Pos) -> Self {
ForeignAtom { atom, expr: handle, pos }
}
pub async fn request<M: AtomMethod>(&self, m: M) -> Option<M::Response> {
let rep = (self.ctx().reqnot().request(api::Fwd(
self.atom.clone(),
Sym::parse(M::NAME, self.ctx().i()).await.unwrap().tok().to_api(),
enc_vec(&m).await,
)))
/// Call an IPC method. If the type does not support the given method type,
/// this function returns [None]
pub async fn call<R: Request + UnderRoot<Root: AtomMethod>>(&self, r: R) -> Option<R::Response> {
let rep = (request(api::Fwd {
target: self.atom.clone(),
method: Sym::parse(<R as UnderRoot>::Root::NAME).await.unwrap().tok().to_api(),
body: enc_vec(&r.into_root()),
}))
.await?;
Some(M::Response::decode(Pin::new(&mut &rep[..])).await)
Some(R::Response::decode_slice(&mut &rep[..]))
}
/// Attempt to downcast this value to a concrete atom type
pub fn downcast<A: Atomic>(self) -> Result<TAtom<A>, NotTypAtom> {
let mut data = &self.atom.data.0[..];
let value = AtomTypeId::decode_slice(&mut data);
let cted = dyn_cted();
let own_inst = cted.inst();
let owner_id = self.atom.owner;
let typ = type_name::<A>();
let owner = if sys_id() == owner_id {
own_inst.card()
} else {
(cted.deps().find(|s| s.id() == self.atom.owner))
.ok_or_else(|| NotTypAtom { expr: self.clone().ex(), pos: self.pos(), typ })?
.get_card()
};
let Some(ops) = owner.ops_by_atid(value) else {
panic!("{value:?} does not refer to an atom in {owner_id:?} when downcasting {typ}");
};
if ops.tid() != TypeId::of::<A>() {
return Err(NotTypAtom { pos: self.pos.clone(), expr: self.ex(), typ });
}
let value = A::Data::decode_slice(&mut data);
Ok(TAtom { value, untyped: self })
}
}
impl fmt::Display for ForeignAtom {
@@ -119,208 +138,277 @@ impl fmt::Debug for ForeignAtom {
}
impl Format for ForeignAtom {
async fn print<'a>(&'a self, _c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
FmtUnit::from_api(&self.ctx().reqnot().request(api::ExtAtomPrint(self.atom.clone())).await)
FmtUnit::from_api(&request(api::ExtAtomPrint(self.atom.clone())).await)
}
}
impl ToExpr for ForeignAtom {
async fn to_expr(self) -> Expr
where Self: Sized {
self.ex()
}
async fn to_gen(self) -> GExpr { self.ex().to_gen().await }
}
pub struct NotTypAtom {
pub pos: Pos,
pub expr: Expr,
pub typ: Box<dyn AtomDynfo>,
pub ctx: SysCtx,
pub typ: &'static str,
}
impl NotTypAtom {
pub async fn mk_err(&self) -> OrcErr {
mk_err(
self.ctx.i().i("Not the expected type").await,
format!("This expression is not a {}", self.typ.name()),
[self.pos.clone().into()],
/// Convert to a generic Orchid error
pub async fn mk_err(&self) -> OrcErrv {
mk_errv(
is("Not the expected type").await,
format!("The expression {} is not a {}", fmt(&self.expr).await, self.typ),
[self.pos.clone()],
)
}
}
impl Debug for NotTypAtom {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NotTypAtom")
.field("pos", &self.pos)
.field("expr", &self.expr)
.field("typ", &self.typ)
.finish_non_exhaustive()
}
}
pub trait AtomMethod: Request {
/// An IPC request associated with an atom. This type should either implement
/// [Request] or be the root of a [orchid_api_derive::Hierarchy] the leaves of
/// which implement [Request].
pub trait AtomMethod: Coding + InHierarchy {
const NAME: &str;
}
pub trait Supports<M: AtomMethod>: AtomCard {
fn handle(&self, ctx: SysCtx, req: M) -> impl Future<Output = <M as Request>::Response>;
/// A handler for an [AtomMethod] on an [Atomic]. The [AtomMethod] must also be
/// registered in [Atomic::reg_methods]
pub trait Supports<M: AtomMethod>: Atomic {
fn handle(&self, hand: Box<dyn ReqHandle>, req: M) -> impl Future<Output = io::Result<Receipt>>;
}
trait_set! {
trait AtomReqCb<A> = for<'a> Fn(
&'a A,
SysCtx,
Pin<&'a mut dyn Read>,
Pin<&'a mut dyn Write>,
) -> LocalBoxFuture<'a, ()>
trait HandleAtomMethod<A> {
fn handle<'a>(&'a self, atom: &'a A, reader: Box<dyn ReqReader>) -> LocalBoxFuture<'a, ()>;
}
struct AtomMethodHandler<M, A>(PhantomData<M>, PhantomData<A>);
impl<M: AtomMethod, A: Supports<M>> HandleAtomMethod<A> for AtomMethodHandler<M, A> {
fn handle<'a>(&'a self, atom: &'a A, mut reader: Box<dyn ReqReader>) -> LocalBoxFuture<'a, ()> {
Box::pin(async {
let req = reader.read_req::<M>().await.unwrap();
let _ = Supports::<M>::handle(atom, reader.finish().await, req).await.unwrap();
})
}
}
pub struct MethodSetBuilder<A: AtomCard> {
handlers: Vec<(&'static str, Rc<dyn AtomReqCb<A>>)>,
/// A collection of [Supports] impls for an [Atomic]. If a [Supports]
/// impl is not added to the method set, it will not be recognized. Note that
/// the [Supports] implementors must be registered, which are not necessarily
/// the same as the [Request] implementors
pub struct MethodSetBuilder<A: Atomic> {
handlers: Vec<(&'static str, Rc<dyn HandleAtomMethod<A>>)>,
}
impl<A: AtomCard> MethodSetBuilder<A> {
impl<A: Atomic> MethodSetBuilder<A> {
pub fn new() -> Self { Self { handlers: vec![] } }
/// Add an [AtomMethod]
pub fn handle<M: AtomMethod>(mut self) -> Self
where A: Supports<M> {
assert!(!M::NAME.is_empty(), "AtomMethod::NAME cannoot be empty");
self.handlers.push((
M::NAME,
Rc::new(move |a: &A, ctx: SysCtx, req: Pin<&mut dyn Read>, rep: Pin<&mut dyn Write>| {
async { Supports::<M>::handle(a, ctx, M::decode(req).await).await.encode(rep).await }
.boxed_local()
}),
));
self.handlers.push((M::NAME, Rc::new(AtomMethodHandler::<M, A>(PhantomData, PhantomData))));
self
}
pub async fn pack(&self, ctx: SysCtx) -> MethodSet<A> {
pub(crate) async fn pack(&self) -> MethodSet<A> {
MethodSet {
handlers: stream::from_iter(self.handlers.iter())
.then(|(k, v)| {
clone!(ctx; async move {
(Sym::parse(k, ctx.i()).await.unwrap(), v.clone())
})
})
handlers: stream::iter(self.handlers.iter())
.then(async |(k, v)| (Sym::parse(k).await.unwrap(), v.clone()))
.collect()
.await,
}
}
}
pub struct MethodSet<A: AtomCard> {
handlers: HashMap<Sym, Rc<dyn AtomReqCb<A>>>,
pub(crate) struct MethodSet<A: Atomic> {
handlers: HashMap<Sym, Rc<dyn HandleAtomMethod<A>>>,
}
impl<A: AtomCard> MethodSet<A> {
pub(crate) async fn dispatch<'a>(
&'a self,
atom: &'a A,
ctx: SysCtx,
key: Sym,
req: Pin<&'a mut dyn Read>,
rep: Pin<&'a mut dyn Write>,
) -> bool {
impl<A: Atomic> MethodSet<A> {
pub(crate) async fn dispatch(&self, atom: &A, key: Sym, req: Box<dyn ReqReader>) -> bool {
match self.handlers.get(&key) {
None => false,
Some(handler) => {
handler(atom, ctx, req, rep).await;
handler.handle(atom, req).await;
true
},
}
}
}
impl<A: AtomCard> Default for MethodSetBuilder<A> {
impl<A: Atomic> Default for MethodSetBuilder<A> {
fn default() -> Self { Self::new() }
}
/// A handle to a value defined by this or another system. This owns an [Expr]
#[derive(Clone)]
pub struct TypAtom<A: AtomicFeatures> {
pub data: ForeignAtom,
pub struct TAtom<A: Atomic> {
pub untyped: ForeignAtom,
pub value: A::Data,
}
impl<A: AtomicFeatures> TypAtom<A> {
impl<A: Atomic> TAtom<A> {
/// Obtain the underlying [Expr]
pub fn ex(&self) -> Expr { self.untyped.clone().ex() }
/// Obtain the position in code associated with the atom
pub fn pos(&self) -> Pos { self.untyped.pos() }
/// Produce from an [ExprHandle] directly
pub async fn downcast(expr: Rc<ExprHandle>) -> Result<Self, NotTypAtom> {
match Expr::from_handle(expr).atom().await {
Err(expr) => Err(NotTypAtom {
ctx: expr.handle().get_ctx(),
pos: expr.data().await.pos.clone(),
expr,
typ: Box::new(A::info()),
}),
Ok(atm) => match downcast_atom::<A>(atm).await {
Ok(tatom) => Ok(tatom),
Err(fa) => Err(NotTypAtom {
pos: fa.pos.clone(),
ctx: fa.ctx().clone(),
expr: fa.ex(),
typ: Box::new(A::info()),
}),
},
Err(expr) =>
Err(NotTypAtom { pos: expr.data().await.pos.clone(), expr, typ: type_name::<A>() }),
Ok(atm) => atm.downcast(),
}
}
pub async fn request<M: AtomMethod>(&self, req: M) -> M::Response
where A: Supports<M> {
M::Response::decode(Pin::new(
&mut &(self.data.ctx().reqnot().request(api::Fwd(
self.data.atom.clone(),
Sym::parse(M::NAME, self.data.ctx().i()).await.unwrap().tok().to_api(),
enc_vec(&req).await,
)))
/// Find the instance associated with a [TAtom] that we own
///
/// # Panics
///
/// if we don't actually own this atom
pub async fn own(&self) -> A
where A: OwnedAtom {
let g = get_obj_store().objects.read().await;
let atom_id = self.untyped.atom.drop.expect("Owned atoms always have a drop ID");
let dyn_atom =
g.get(&atom_id).expect("Atom ID invalid; atom type probably not owned by this crate");
dyn_atom.as_any_ref().downcast_ref().cloned().expect("The ID should imply a type as well")
}
/// Call an IPC method on the value. Since we know the type, unlike
/// [ForeignAtom::call], we can ensure that the callee recognizes this method
pub async fn call<R: Request + UnderRoot<Root: AtomMethod>>(&self, req: R) -> R::Response
where A: Supports<<R as UnderRoot>::Root> {
R::Response::decode_slice(
&mut &(request(api::Fwd {
target: self.untyped.atom.clone(),
method: Sym::parse(<R as UnderRoot>::Root::NAME).await.unwrap().tok().to_api(),
body: enc_vec(&req.into_root()),
}))
.await
.unwrap()[..],
))
.await
)
}
}
impl<A: AtomicFeatures> Deref for TypAtom<A> {
impl<A: AtomicFeatures> Deref for TAtom<A> {
type Target = A::Data;
fn deref(&self) -> &Self::Target { &self.value }
}
pub struct AtomCtx<'a>(pub &'a [u8], pub Option<api::AtomId>, pub SysCtx);
impl FmtCtx for AtomCtx<'_> {
fn i(&self) -> &Interner { self.2.i() }
impl<A: AtomicFeatures> ToExpr for TAtom<A> {
async fn to_gen(self) -> GExpr { self.untyped.to_gen().await }
}
impl<A: AtomicFeatures> Format for TAtom<A> {
async fn print<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
self.untyped.print(c).await
}
}
pub trait AtomDynfo: 'static {
pub(crate) struct AtomCtx<'a>(pub &'a [u8], pub Option<api::AtomId>);
/// A vtable-like type that collects operations defined by an [Atomic] without
/// associating with an instance of that type. This must be registered in
/// [crate::SystemCard]
#[allow(private_interfaces)]
pub trait AtomOps: 'static {
fn tid(&self) -> TypeId;
fn name(&self) -> &'static str;
fn decode<'a>(&'a self, ctx: AtomCtx<'a>) -> LocalBoxFuture<'a, Box<dyn Any>>;
fn call<'a>(&'a self, ctx: AtomCtx<'a>, arg: Expr) -> LocalBoxFuture<'a, GExpr>;
fn call_ref<'a>(&'a self, ctx: AtomCtx<'a>, arg: Expr) -> LocalBoxFuture<'a, GExpr>;
fn print<'a>(&'a self, ctx: AtomCtx<'a>) -> LocalBoxFuture<'a, FmtUnit>;
fn handle_req<'a, 'b: 'a, 'c: 'a>(
fn handle_req_ref<'a>(
&'a self,
ctx: AtomCtx<'a>,
key: Sym,
req: Pin<&'b mut dyn Read>,
rep: Pin<&'c mut dyn Write>,
req: Box<dyn ReqReader>,
) -> LocalBoxFuture<'a, bool>;
fn command<'a>(&'a self, ctx: AtomCtx<'a>) -> LocalBoxFuture<'a, OrcRes<Option<GExpr>>>;
fn serialize<'a, 'b: 'a>(
&'a self,
ctx: AtomCtx<'a>,
write: Pin<&'b mut dyn Write>,
write: Pin<&'b mut dyn AsyncWrite>,
) -> LocalBoxFuture<'a, Option<Vec<Expr>>>;
fn deserialize<'a>(
&'a self,
ctx: SysCtx,
data: &'a [u8],
refs: &'a [Expr],
) -> LocalBoxFuture<'a, api::Atom>;
) -> LocalBoxFuture<'a, api::LocalAtom>;
fn drop<'a>(&'a self, ctx: AtomCtx<'a>) -> LocalBoxFuture<'a, ()>;
}
trait_set! {
pub trait AtomFactoryFn = FnOnce(SysCtx) -> LocalBoxFuture<'static, api::Atom> + DynClone;
pub trait AtomFactoryFn = FnOnce() -> LocalBoxFuture<'static, api::LocalAtom> + DynClone;
}
pub struct AtomFactory(Box<dyn AtomFactoryFn>);
pub(crate) struct AtomFactory(Box<dyn AtomFactoryFn>, String);
impl AtomFactory {
pub fn new(f: impl AsyncFnOnce(SysCtx) -> api::Atom + Clone + 'static) -> Self {
Self(Box::new(|ctx| f(ctx).boxed_local()))
pub fn new(name: String, f: impl AsyncFnOnce() -> api::LocalAtom + Clone + 'static) -> Self {
Self(Box::new(|| f().boxed_local()), name)
}
pub async fn build(self, ctx: SysCtx) -> api::Atom { (self.0)(ctx).await }
pub async fn build(self) -> api::LocalAtom { (self.0)().await }
}
impl Clone for AtomFactory {
fn clone(&self) -> Self { AtomFactory(clone_box(&*self.0)) }
fn clone(&self) -> Self { AtomFactory(clone_box(&*self.0), self.1.clone()) }
}
impl fmt::Debug for AtomFactory {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "AtomFactory") }
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "AtomFactory<{}>", self.1) }
}
impl fmt::Display for AtomFactory {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "AtomFactory") }
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self:?}") }
}
impl Format for AtomFactory {
async fn print<'a>(&'a self, _c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
"AtomFactory".to_string().into()
self.to_string().into()
}
}
pub async fn err_not_callable(i: &Interner) -> OrcErr {
mk_err(i.i("This atom is not callable").await, "Attempted to apply value as function", [])
/// Error produced when an atom can not be applied to a value as a function
pub async fn err_not_callable(unit: &FmtUnit) -> OrcErrv {
mk_errv_floating(
is("This atom is not callable").await,
format!("Attempted to apply {} as function", take_first(unit, false)),
)
}
pub async fn err_not_command(i: &Interner) -> OrcErr {
mk_err(i.i("This atom is not a command").await, "Settled on an inactionable value", [])
/// Error produced when an atom can not be the final value of the program
pub async fn err_not_command(unit: &FmtUnit) -> OrcErrv {
mk_errv_floating(
is("This atom is not a command").await,
format!("Settled on {} which is an inactionable value", take_first(unit, false)),
)
}
pub(crate) async fn err_exit_success_msg() -> IStr { is("Early successful exit").await }
pub(crate) async fn err_exit_failure_msg() -> IStr { is("Early failure exit").await }
/// Sentinel error returnable from [crate::OwnedAtom::command] or
/// [crate::ThinAtom::command] to indicate that the program should exit with a
/// success
pub async fn err_exit_success() -> OrcErrv {
mk_errv_floating(
err_exit_success_msg().await,
"Sentinel error indicating that the program should exit with a success.",
)
}
/// Sentinel error returnable from [crate::OwnedAtom::command] or
/// [crate::ThinAtom::command] to indicate that the program should exit with a
/// failure
pub async fn err_exit_failure() -> OrcErrv {
mk_errv_floating(
err_exit_failure_msg().await,
"Sentinel error indicating that the program should exit with a failure \
but without raising an error.",
)
}
/// Read the type ID prefix from an atom, return type information and the rest
/// of the data
pub(crate) fn resolve_atom_type(atom: &api::Atom) -> (Box<dyn AtomOps>, AtomTypeId, &[u8]) {
let mut data = &atom.data.0[..];
let atid = AtomTypeId::decode_slice(&mut data);
let atom_record = dyn_cted().inst().card().ops_by_atid(atid).expect("Unrecognized atom type ID");
(atom_record, atid, data)
}

View File

@@ -1,66 +1,67 @@
use std::any::{Any, TypeId, type_name};
use std::borrow::Cow;
use std::cell::RefCell;
use std::future::Future;
use std::marker::PhantomData;
use std::num::NonZero;
use std::ops::Deref;
use std::pin::Pin;
use std::sync::atomic::AtomicU64;
use std::rc::Rc;
use async_once_cell::OnceCell;
use async_std::io::{Read, Write};
use async_std::sync::{RwLock, RwLockReadGuard};
use futures::FutureExt;
use dyn_clone::{DynClone, clone_box};
use futures::future::{LocalBoxFuture, ready};
use futures::{AsyncWrite, FutureExt};
use futures_locks::{RwLock, RwLockReadGuard};
use itertools::Itertools;
use memo_map::MemoMap;
use never::Never;
use orchid_api::AtomId;
use orchid_api_traits::{Decode, Encode, enc_vec};
use orchid_base::error::OrcRes;
use orchid_base::format::{FmtCtx, FmtCtxImpl, FmtUnit};
use orchid_base::name::Sym;
use orchid_base::{FmtCtx, FmtCtxImpl, FmtUnit, Format, Sym, log, take_first};
use task_local::task_local;
use crate::api;
use crate::atom::{
AtomCard, AtomCtx, AtomDynfo, AtomFactory, Atomic, AtomicFeaturesImpl, AtomicVariant, MethodSet,
MethodSetBuilder, err_not_callable, err_not_command, get_info,
};
use crate::expr::Expr;
use crate::gen_expr::{GExpr, bot};
use crate::system::{SysCtx, SysCtxEntry};
use crate::system_ctor::CtedObj;
use crate::{
AtomCtx, AtomFactory, AtomOps, Atomic, AtomicFeaturesImpl, AtomicVariant, DynSystemCardExt, Expr,
MethodSet, MethodSetBuilder, ToExpr, api, dyn_cted, err_not_callable,
};
/// Value of [Atomic::Variant] for a type that implements [OwnedAtom]
pub struct OwnedVariant;
impl AtomicVariant for OwnedVariant {}
impl<A: OwnedAtom + Atomic<Variant = OwnedVariant>> AtomicFeaturesImpl<OwnedVariant> for A {
fn _factory(self) -> AtomFactory {
AtomFactory::new(async move |ctx| {
let serial =
ctx.get_or_default::<ObjStore>().next_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let atom_id = api::AtomId(NonZero::new(serial + 1).unwrap());
let (typ_id, _) = get_info::<A>(ctx.get::<CtedObj>().inst().card());
let mut data = enc_vec(&typ_id).await;
AtomFactory::new(type_name::<A>().to_string(), async move || {
let obj_store = get_obj_store();
let atom_id = {
let mut id = obj_store.next_id.borrow_mut();
*id += 1;
api::AtomId(NonZero::new(*id + 1).unwrap())
};
let (typ_id, _) = dyn_cted().inst().card().ops::<A>();
let mut data = enc_vec(&typ_id);
self.encode(Pin::<&mut Vec<u8>>::new(&mut data)).await;
ctx.get_or_default::<ObjStore>().objects.read().await.insert(atom_id, Box::new(self));
eprintln!("Created atom {:?} of type {}", atom_id, type_name::<A>());
api::Atom { drop: Some(atom_id), data, owner: ctx.sys_id() }
obj_store.objects.read().await.insert(atom_id, Box::new(self));
api::LocalAtom { drop: Some(atom_id), data: api::AtomData(data) }
})
}
fn _info() -> Self::_Info { OwnedAtomDynfo { msbuild: A::reg_reqs(), ms: OnceCell::new() } }
type _Info = OwnedAtomDynfo<A>;
fn _info() -> Self::_Info { OwnedAtomOps { msbuild: A::reg_methods(), ms: OnceCell::new() } }
type _Info = OwnedAtomOps<A>;
}
/// While an atom read guard is held, no atom can be removed.
pub(crate) struct AtomReadGuard<'a> {
id: api::AtomId,
guard: RwLockReadGuard<'a, MemoMap<AtomId, Box<dyn DynOwnedAtom>>>,
_lock: PhantomData<&'a ()>,
guard: RwLockReadGuard<MemoMap<api::AtomId, Box<dyn DynOwnedAtom>>>,
}
impl<'a> AtomReadGuard<'a> {
async fn new(id: api::AtomId, ctx: &'a SysCtx) -> Self {
let guard = ctx.get_or_default::<ObjStore>().objects.read().await;
let valid = guard.iter().map(|i| i.0).collect_vec();
assert!(guard.get(&id).is_some(), "Received invalid atom ID: {id:?} not in {valid:?}");
Self { id, guard }
async fn new(id: api::AtomId) -> Self {
let guard = get_obj_store().objects.read().await;
if guard.get(&id).is_none() {
panic!("Received invalid atom ID: {id:?}");
}
Self { id, guard, _lock: PhantomData }
}
}
impl Deref for AtomReadGuard<'_> {
@@ -68,89 +69,106 @@ impl Deref for AtomReadGuard<'_> {
fn deref(&self) -> &Self::Target { &**self.guard.get(&self.id).unwrap() }
}
pub(crate) async fn take_atom(id: api::AtomId, ctx: &SysCtx) -> Box<dyn DynOwnedAtom> {
let mut g = ctx.get_or_default::<ObjStore>().objects.write().await;
g.remove(&id).unwrap_or_else(|| panic!("Received invalid atom ID: {}", id.0))
/// Remove an atom from the store
pub(crate) async fn take_atom(id: api::AtomId) -> Box<dyn DynOwnedAtom> {
let mut g = get_obj_store().objects.write().await;
g.remove(&id).unwrap_or_else(|| {
let name = dyn_cted().inst().card().name();
panic!("{name} received invalid atom ID: {}", id.0)
})
}
pub struct OwnedAtomDynfo<T: OwnedAtom> {
pub(crate) struct OwnedAtomOps<T: OwnedAtom> {
msbuild: MethodSetBuilder<T>,
ms: OnceCell<MethodSet<T>>,
}
impl<T: OwnedAtom> AtomDynfo for OwnedAtomDynfo<T> {
fn tid(&self) -> TypeId { TypeId::of::<T>() }
fn name(&self) -> &'static str { type_name::<T>() }
impl<A: OwnedAtom> AtomOps for OwnedAtomOps<A> {
fn tid(&self) -> TypeId { TypeId::of::<A>() }
fn name(&self) -> &'static str { type_name::<A>() }
fn decode<'a>(&'a self, AtomCtx(data, ..): AtomCtx<'a>) -> LocalBoxFuture<'a, Box<dyn Any>> {
Box::pin(async {
Box::new(<T as AtomCard>::Data::decode(Pin::new(&mut &data[..])).await) as Box<dyn Any>
Box::pin(async { Box::new(<A as Atomic>::Data::decode_slice(&mut &data[..])) as Box<dyn Any> })
}
fn call(&self, AtomCtx(_, id): AtomCtx, arg: Expr) -> LocalBoxFuture<'_, GExpr> {
Box::pin(async move {
writeln!(
log("msg"),
"owned call {} {}",
take_first(&AtomReadGuard::new(id.unwrap()).await.dyn_print().await, false),
take_first(&arg.print(&FmtCtxImpl::default()).await, true),
)
.await;
take_atom(id.unwrap()).await.dyn_call(arg).await
})
}
fn call(&self, AtomCtx(_, id, ctx): AtomCtx, arg: Expr) -> LocalBoxFuture<'_, GExpr> {
Box::pin(async move { take_atom(id.unwrap(), &ctx).await.dyn_call(arg).await })
fn call_ref<'a>(&'a self, AtomCtx(_, id): AtomCtx<'a>, arg: Expr) -> LocalBoxFuture<'a, GExpr> {
Box::pin(async move {
writeln!(
log("msg"),
"owned call_ref {} {}",
take_first(&AtomReadGuard::new(id.unwrap()).await.dyn_print().await, false),
take_first(&arg.print(&FmtCtxImpl::default()).await, true),
)
.await;
AtomReadGuard::new(id.unwrap()).await.dyn_call_ref(arg).await
})
}
fn call_ref<'a>(
fn print(&self, AtomCtx(_, id): AtomCtx<'_>) -> LocalBoxFuture<'_, FmtUnit> {
Box::pin(async move { AtomReadGuard::new(id.unwrap()).await.dyn_print().await })
}
fn handle_req_ref<'a>(
&'a self,
AtomCtx(_, id, ctx): AtomCtx<'a>,
arg: Expr,
) -> LocalBoxFuture<'a, GExpr> {
Box::pin(async move { AtomReadGuard::new(id.unwrap(), &ctx).await.dyn_call_ref(arg).await })
}
fn print(&self, AtomCtx(_, id, ctx): AtomCtx<'_>) -> LocalBoxFuture<'_, FmtUnit> {
Box::pin(
async move { AtomReadGuard::new(id.unwrap(), &ctx).await.dyn_print(ctx.clone()).await },
)
}
fn handle_req<'a, 'b: 'a, 'c: 'a>(
&'a self,
AtomCtx(_, id, ctx): AtomCtx,
AtomCtx(_, id): AtomCtx<'a>,
key: Sym,
req: Pin<&'b mut dyn Read>,
rep: Pin<&'c mut dyn Write>,
req: Box<dyn orchid_base::ReqReader>,
) -> LocalBoxFuture<'a, bool> {
Box::pin(async move {
let a = AtomReadGuard::new(id.unwrap(), &ctx).await;
let ms = self.ms.get_or_init(self.msbuild.pack(ctx.clone())).await;
ms.dispatch(a.as_any_ref().downcast_ref().unwrap(), ctx.clone(), key, req, rep).await
let a = AtomReadGuard::new(id.unwrap()).await;
let ms = self.ms.get_or_init(self.msbuild.pack()).await;
ms.dispatch(a.as_any_ref().downcast_ref().unwrap(), key, req).await
})
}
fn command<'a>(
&'a self,
AtomCtx(_, id, ctx): AtomCtx<'a>,
) -> LocalBoxFuture<'a, OrcRes<Option<GExpr>>> {
Box::pin(async move { take_atom(id.unwrap(), &ctx).await.dyn_command(ctx.clone()).await })
}
fn drop(&self, AtomCtx(_, id, ctx): AtomCtx) -> LocalBoxFuture<'_, ()> {
Box::pin(async move { take_atom(id.unwrap(), &ctx).await.dyn_free(ctx.clone()).await })
fn drop(&self, AtomCtx(_, id): AtomCtx) -> LocalBoxFuture<'_, ()> {
Box::pin(async move { take_atom(id.unwrap()).await.dyn_free().await })
}
fn serialize<'a, 'b: 'a>(
&'a self,
AtomCtx(_, id, ctx): AtomCtx<'a>,
mut write: Pin<&'b mut dyn Write>,
AtomCtx(_, id): AtomCtx<'a>,
mut write: Pin<&'b mut dyn AsyncWrite>,
) -> LocalBoxFuture<'a, Option<Vec<Expr>>> {
Box::pin(async move {
let id = id.unwrap();
id.encode(write.as_mut()).await;
AtomReadGuard::new(id, &ctx).await.dyn_serialize(ctx.clone(), write).await
id.encode(write.as_mut()).await.unwrap();
AtomReadGuard::new(id).await.dyn_serialize(write).await
})
}
fn deserialize<'a>(
&'a self,
ctx: SysCtx,
data: &'a [u8],
refs: &'a [Expr],
) -> LocalBoxFuture<'a, api::Atom> {
) -> LocalBoxFuture<'a, api::LocalAtom> {
Box::pin(async move {
let refs = T::Refs::from_iter(refs.iter().cloned());
let obj = T::deserialize(DeserCtxImpl(data, &ctx), refs).await;
obj._factory().build(ctx).await
let refs = A::Refs::from_iter(refs.iter().cloned());
let obj = A::deserialize(DeserCtxImpl(data), refs).await;
obj._factory().build().await
})
}
}
/// Read from the buffer populated by a previous call to [OwnedAtom::serialize]
pub trait DeserializeCtx: Sized {
/// Read a value from the head of the buffer
fn read<T: Decode>(&mut self) -> impl Future<Output = T>;
/// Check if the buffer is empty
fn is_empty(&self) -> bool;
/// # Panics
///
/// if the buffer isn't empty
fn assert_empty(&self) { assert!(self.is_empty(), "Bytes found after decoding") }
/// Decode the only value in the buffer
///
/// # Panics
///
/// if the buffer has more data after the value was read
fn decode<T: Decode>(&mut self) -> impl Future<Output = T> {
async {
let t = self.read().await;
@@ -158,16 +176,16 @@ pub trait DeserializeCtx: Sized {
t
}
}
fn sys(&self) -> SysCtx;
}
struct DeserCtxImpl<'a>(&'a [u8], &'a SysCtx);
struct DeserCtxImpl<'a>(&'a [u8]);
impl DeserializeCtx for DeserCtxImpl<'_> {
async fn read<T: Decode>(&mut self) -> T { T::decode(Pin::new(&mut self.0)).await }
async fn read<T: Decode>(&mut self) -> T { T::decode(Pin::new(&mut self.0)).await.unwrap() }
fn is_empty(&self) -> bool { self.0.is_empty() }
fn sys(&self) -> SysCtx { self.1.clone() }
}
/// Various collections of expr's that distinguish how many references the type
/// holds. See [OwnedAtom::Refs] for the list of permitted values
pub trait RefSet {
fn from_iter<I: Iterator<Item = Expr> + ExactSizeIterator>(refs: I) -> Self;
fn to_vec(self) -> Vec<Expr>;
@@ -200,10 +218,12 @@ impl<const N: usize> RefSet for [Expr; N] {
}
}
/// Atoms that have a [Drop]
/// Atoms that have a [Drop]. Internal mutability is allowed for optimization
/// purposes, but new references to [Expr] should not be added to avoid
/// reference loops
pub trait OwnedAtom: Atomic<Variant = OwnedVariant> + Any + Clone + 'static {
/// If serializable, the collection that best stores subexpression references
/// for this atom.
/// for this type.
///
/// - `()` for no subexppressions,
/// - `[Expr; N]` for a static number of subexpressions
@@ -211,42 +231,46 @@ pub trait OwnedAtom: Atomic<Variant = OwnedVariant> + Any + Clone + 'static {
/// - `Never` if not serializable
///
/// If this isn't `Never`, you must override the default, panicking
/// `serialize` and `deserialize` implementation
/// [Self::serialize] and [Self::deserialize] implementation
type Refs: RefSet;
/// Obtain serializable representation
fn val(&self) -> impl Future<Output = Cow<'_, Self::Data>>;
/// Apply as a function while a different reference to the value exists.
#[allow(unused_variables)]
fn call_ref(&self, arg: Expr) -> impl Future<Output = GExpr> {
async move { bot([err_not_callable(arg.ctx().i()).await]) }
fn call_ref(&self, arg: Expr) -> impl Future<Output: ToExpr> {
async move { bot(err_not_callable(&self.dyn_print().await).await) }
}
fn call(self, arg: Expr) -> impl Future<Output = GExpr> {
/// Apply as a function and consume
fn call(self, arg: Expr) -> impl Future<Output: ToExpr> {
async {
let ctx = arg.ctx();
let gcl = self.call_ref(arg).await;
self.free(ctx).await;
let gcl = self.call_ref(arg).await.to_gen().await;
self.free().await;
gcl
}
}
/// Drop and perform any cleanup. Unlike Rust's [Drop::drop], this is
/// guaranteed to be called
fn free(self) -> impl Future<Output = ()> { async {} }
/// Debug-print. This is the final fallback for Orchid's
/// `std::string::to_str`.
#[allow(unused_variables)]
fn command(self, ctx: SysCtx) -> impl Future<Output = OrcRes<Option<GExpr>>> {
async move { Err(err_not_command(ctx.i()).await.into()) }
}
#[allow(unused_variables)]
fn free(self, ctx: SysCtx) -> impl Future<Output = ()> { async {} }
#[allow(unused_variables)]
fn print<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> impl Future<Output = FmtUnit> {
fn print_atom<'a>(&'a self, c: &'a (impl FmtCtx + ?Sized + 'a)) -> impl Future<Output = FmtUnit> {
async { format!("OwnedAtom({})", type_name::<Self>()).into() }
}
/// Serialize this object. If the object is serializable you must override
/// this function, otherwise set [Self::Refs] to [Never].
#[allow(unused_variables)]
fn serialize(
&self,
ctx: SysCtx,
write: Pin<&mut (impl Write + ?Sized)>,
write: Pin<&mut (impl AsyncWrite + ?Sized)>,
) -> impl Future<Output = Self::Refs> {
assert_serializable::<Self>();
async { panic!("Either implement serialize or set Refs to Never for {}", type_name::<Self>()) }
}
/// Deserialize this object. If the object is serializable you must override
/// this function, otherwise set [Self::Refs] to [Never]
#[allow(unused_variables)]
fn deserialize(ctx: impl DeserializeCtx, refs: Self::Refs) -> impl Future<Output = Self> {
fn deserialize(dctx: impl DeserializeCtx, refs: Self::Refs) -> impl Future<Output = Self> {
assert_serializable::<Self>();
async {
panic!("Either implement deserialize or set Refs to Never for {}", type_name::<Self>())
@@ -254,62 +278,87 @@ pub trait OwnedAtom: Atomic<Variant = OwnedVariant> + Any + Clone + 'static {
}
}
/// Debug-assert that the object opted in to serialization
fn assert_serializable<T: OwnedAtom>() {
static MSG: &str = "The extension scaffold is broken, Never Refs should prevent serialization";
assert_ne!(TypeId::of::<T::Refs>(), TypeId::of::<Never>(), "{MSG}");
debug_assert_ne!(TypeId::of::<T::Refs>(), TypeId::of::<Never>(), "{MSG}");
}
pub trait DynOwnedAtom: 'static {
fn atom_tid(&self) -> TypeId;
pub(crate) trait DynOwnedAtom: DynClone + 'static {
fn as_any_ref(&self) -> &dyn Any;
fn encode<'a>(&'a self, buffer: Pin<&'a mut dyn Write>) -> LocalBoxFuture<'a, ()>;
fn encode<'a>(&'a self, buffer: Pin<&'a mut dyn AsyncWrite>) -> LocalBoxFuture<'a, ()>;
fn dyn_call_ref(&self, arg: Expr) -> LocalBoxFuture<'_, GExpr>;
fn dyn_call(self: Box<Self>, arg: Expr) -> LocalBoxFuture<'static, GExpr>;
fn dyn_command(self: Box<Self>, ctx: SysCtx) -> LocalBoxFuture<'static, OrcRes<Option<GExpr>>>;
fn dyn_free(self: Box<Self>, ctx: SysCtx) -> LocalBoxFuture<'static, ()>;
fn dyn_print(&self, ctx: SysCtx) -> LocalBoxFuture<'_, FmtUnit>;
fn dyn_free(self: Box<Self>) -> LocalBoxFuture<'static, ()>;
fn dyn_print(&self) -> LocalBoxFuture<'_, FmtUnit>;
fn dyn_serialize<'a>(
&'a self,
ctx: SysCtx,
sink: Pin<&'a mut dyn Write>,
sink: Pin<&'a mut dyn AsyncWrite>,
) -> LocalBoxFuture<'a, Option<Vec<Expr>>>;
}
impl<T: OwnedAtom> DynOwnedAtom for T {
fn atom_tid(&self) -> TypeId { TypeId::of::<T>() }
fn as_any_ref(&self) -> &dyn Any { self }
fn encode<'a>(&'a self, buffer: Pin<&'a mut dyn Write>) -> LocalBoxFuture<'a, ()> {
async { self.val().await.as_ref().encode(buffer).await }.boxed_local()
fn encode<'a>(&'a self, buffer: Pin<&'a mut dyn AsyncWrite>) -> LocalBoxFuture<'a, ()> {
async { self.val().await.as_ref().encode(buffer).await.unwrap() }.boxed_local()
}
fn dyn_call_ref(&self, arg: Expr) -> LocalBoxFuture<'_, GExpr> {
self.call_ref(arg).boxed_local()
async { self.call_ref(arg).await.to_gen().await }.boxed_local()
}
fn dyn_call(self: Box<Self>, arg: Expr) -> LocalBoxFuture<'static, GExpr> {
self.call(arg).boxed_local()
async { self.call(arg).await.to_gen().await }.boxed_local()
}
fn dyn_command(self: Box<Self>, ctx: SysCtx) -> LocalBoxFuture<'static, OrcRes<Option<GExpr>>> {
self.command(ctx).boxed_local()
}
fn dyn_free(self: Box<Self>, ctx: SysCtx) -> LocalBoxFuture<'static, ()> {
self.free(ctx).boxed_local()
}
fn dyn_print(&self, ctx: SysCtx) -> LocalBoxFuture<'_, FmtUnit> {
async move { self.print(&FmtCtxImpl { i: ctx.i() }).await }.boxed_local()
fn dyn_free(self: Box<Self>) -> LocalBoxFuture<'static, ()> { self.free().boxed_local() }
fn dyn_print(&self) -> LocalBoxFuture<'_, FmtUnit> {
async move { self.print_atom(&FmtCtxImpl::default()).await }.boxed_local()
}
fn dyn_serialize<'a>(
&'a self,
ctx: SysCtx,
sink: Pin<&'a mut dyn Write>,
sink: Pin<&'a mut dyn AsyncWrite>,
) -> LocalBoxFuture<'a, Option<Vec<Expr>>> {
match TypeId::of::<Never>() == TypeId::of::<<Self as OwnedAtom>::Refs>() {
true => ready(None).boxed_local(),
false => async { Some(self.serialize(ctx, sink).await.to_vec()) }.boxed_local(),
false => async { Some(self.serialize(sink).await.to_vec()) }.boxed_local(),
}
}
}
#[derive(Default)]
struct ObjStore {
next_id: AtomicU64,
objects: RwLock<MemoMap<api::AtomId, Box<dyn DynOwnedAtom>>>,
pub(crate) struct ObjStore {
pub(crate) next_id: RefCell<u64>,
pub(crate) objects: RwLock<MemoMap<api::AtomId, Box<dyn DynOwnedAtom>>>,
}
task_local! {
static OBJ_STORE: Rc<ObjStore>;
}
pub(crate) fn with_obj_store<'a>(fut: LocalBoxFuture<'a, ()>) -> LocalBoxFuture<'a, ()> {
Box::pin(OBJ_STORE.scope(Rc::new(ObjStore::default()), fut))
}
pub(crate) fn get_obj_store() -> Rc<ObjStore> {
OBJ_STORE.try_with(|store| store.clone()).expect("Owned atom store not initialized")
}
/// Debug-print the entire object store. Most useful if the interpreter refuses
/// to shut down due to apparent refloops
pub async fn debug_print_obj_store(show_atoms: bool) {
let store = get_obj_store();
let keys = store.objects.read().await.keys().cloned().collect_vec();
let mut message = "Atoms in store:".to_string();
if !show_atoms {
message += &keys.iter().map(|k| format!(" {:?}", k)).join("");
} else {
for k in keys {
let g = store.objects.read().await;
let Some(atom) = g.get(&k) else {
message += &format!("\n{k:?} has since been deleted");
continue;
};
let atom = clone_box(&**atom);
std::mem::drop(g);
message += &format!("\n{k:?} -> {}", take_first(&atom.dyn_print().await, true));
}
}
writeln!(log("debug"), "{message}").await
}
impl SysCtxEntry for ObjStore {}

View File

@@ -3,116 +3,97 @@ use std::future::Future;
use std::pin::Pin;
use async_once_cell::OnceCell;
use async_std::io::{Read, Write};
use futures::FutureExt;
use futures::AsyncWrite;
use futures::future::LocalBoxFuture;
use orchid_api_traits::{Coding, enc_vec};
use orchid_base::error::OrcRes;
use orchid_base::format::FmtUnit;
use orchid_base::name::Sym;
use orchid_base::{FmtUnit, Sym, log};
use crate::api;
use crate::atom::{
AtomCard, AtomCtx, AtomDynfo, AtomFactory, Atomic, AtomicFeaturesImpl, AtomicVariant, MethodSet,
MethodSetBuilder, err_not_callable, err_not_command, get_info,
};
use crate::expr::Expr;
use crate::gen_expr::{GExpr, bot};
use crate::system::SysCtx;
use crate::system_ctor::CtedObj;
use crate::{
AtomCtx, AtomFactory, AtomOps, Atomic, AtomicFeaturesImpl, AtomicVariant, DynSystemCardExt, Expr,
MethodSet, MethodSetBuilder, api, dyn_cted, err_not_callable,
};
/// Value of [Atomic::Variant] for a type that implements [ThinAtom]
pub struct ThinVariant;
impl AtomicVariant for ThinVariant {}
impl<A: ThinAtom + Atomic<Variant = ThinVariant>> AtomicFeaturesImpl<ThinVariant> for A {
fn _factory(self) -> AtomFactory {
AtomFactory::new(async move |ctx| {
let (id, _) = get_info::<A>(ctx.get::<CtedObj>().inst().card());
let mut buf = enc_vec(&id).await;
self.encode(Pin::new(&mut buf)).await;
api::Atom { drop: None, data: buf, owner: ctx.sys_id() }
AtomFactory::new(type_name::<A>().to_string(), async move || {
let (id, _) = dyn_cted().inst().card().ops::<A>();
let mut buf = enc_vec(&id);
self.encode_vec(&mut buf);
api::LocalAtom { drop: None, data: api::AtomData(buf) }
})
}
fn _info() -> Self::_Info { ThinAtomDynfo { msbuild: Self::reg_reqs(), ms: OnceCell::new() } }
type _Info = ThinAtomDynfo<Self>;
fn _info() -> Self::_Info { ThinAtomOps { msbuild: Self::reg_methods(), ms: OnceCell::new() } }
type _Info = ThinAtomOps<Self>;
}
pub struct ThinAtomDynfo<T: ThinAtom> {
pub(crate) struct ThinAtomOps<T: ThinAtom> {
msbuild: MethodSetBuilder<T>,
ms: OnceCell<MethodSet<T>>,
}
impl<T: ThinAtom> AtomDynfo for ThinAtomDynfo<T> {
fn print<'a>(&self, AtomCtx(buf, _, ctx): AtomCtx<'a>) -> LocalBoxFuture<'a, FmtUnit> {
Box::pin(async move { T::decode(Pin::new(&mut &buf[..])).await.print(ctx).await })
impl<T: ThinAtom> AtomOps for ThinAtomOps<T> {
fn print<'a>(&self, AtomCtx(buf, _): AtomCtx<'a>) -> LocalBoxFuture<'a, FmtUnit> {
Box::pin(async move { T::decode_slice(&mut &buf[..]).print().await })
}
fn tid(&self) -> TypeId { TypeId::of::<T>() }
fn name(&self) -> &'static str { type_name::<T>() }
fn decode<'a>(&'a self, AtomCtx(buf, ..): AtomCtx<'a>) -> LocalBoxFuture<'a, Box<dyn Any>> {
Box::pin(async { Box::new(T::decode(Pin::new(&mut &buf[..])).await) as Box<dyn Any> })
Box::pin(async { Box::new(T::decode_slice(&mut &buf[..])) as Box<dyn Any> })
}
fn call<'a>(&'a self, AtomCtx(buf, ..): AtomCtx<'a>, arg: Expr) -> LocalBoxFuture<'a, GExpr> {
Box::pin(async move { T::decode(Pin::new(&mut &buf[..])).await.call(arg).await })
Box::pin(async move { T::decode_slice(&mut &buf[..]).call(arg).await })
}
fn call_ref<'a>(&'a self, AtomCtx(buf, ..): AtomCtx<'a>, arg: Expr) -> LocalBoxFuture<'a, GExpr> {
Box::pin(async move { T::decode(Pin::new(&mut &buf[..])).await.call(arg).await })
Box::pin(async move { T::decode_slice(&mut &buf[..]).call(arg).await })
}
fn handle_req<'a, 'm1: 'a, 'm2: 'a>(
fn handle_req_ref<'a>(
&'a self,
AtomCtx(buf, _, sys): AtomCtx<'a>,
AtomCtx(buf, ..): AtomCtx<'a>,
key: Sym,
req: Pin<&'m1 mut dyn Read>,
rep: Pin<&'m2 mut dyn Write>,
req: Box<dyn orchid_base::ReqReader>,
) -> LocalBoxFuture<'a, bool> {
Box::pin(async move {
let ms = self.ms.get_or_init(self.msbuild.pack(sys.clone())).await;
ms.dispatch(&T::decode(Pin::new(&mut &buf[..])).await, sys, key, req, rep).await
let ms = self.ms.get_or_init(self.msbuild.pack()).await;
ms.dispatch(&T::decode_slice(&mut &buf[..]), key, req).await
})
}
fn command<'a>(
&'a self,
AtomCtx(buf, _, ctx): AtomCtx<'a>,
) -> LocalBoxFuture<'a, OrcRes<Option<GExpr>>> {
async move { T::decode(Pin::new(&mut &buf[..])).await.command(ctx).await }.boxed_local()
}
fn serialize<'a, 'b: 'a>(
&'a self,
ctx: AtomCtx<'a>,
write: Pin<&'b mut dyn Write>,
write: Pin<&'b mut dyn AsyncWrite>,
) -> LocalBoxFuture<'a, Option<Vec<Expr>>> {
Box::pin(async {
T::decode(Pin::new(&mut &ctx.0[..])).await.encode(write).await;
T::decode_slice(&mut &ctx.0[..]).encode(write).await.unwrap();
Some(Vec::new())
})
}
fn deserialize<'a>(
&'a self,
ctx: SysCtx,
data: &'a [u8],
refs: &'a [Expr],
) -> LocalBoxFuture<'a, api::Atom> {
) -> LocalBoxFuture<'a, api::LocalAtom> {
assert!(refs.is_empty(), "Refs found when deserializing thin atom");
Box::pin(async { T::decode(Pin::new(&mut &data[..])).await._factory().build(ctx).await })
Box::pin(async { T::decode_slice(&mut &data[..])._factory().build().await })
}
fn drop<'a>(&'a self, AtomCtx(buf, _, ctx): AtomCtx<'a>) -> LocalBoxFuture<'a, ()> {
fn drop<'a>(&'a self, AtomCtx(buf, _): AtomCtx<'a>) -> LocalBoxFuture<'a, ()> {
Box::pin(async move {
let string_self = T::decode(Pin::new(&mut &buf[..])).await.print(ctx.clone()).await;
writeln!(ctx.logger(), "Received drop signal for non-drop atom {string_self:?}");
let string_self = T::decode_slice(&mut &buf[..]).print().await;
writeln!(log("warn"), "Received drop signal for non-drop atom {string_self:?}").await;
})
}
}
pub trait ThinAtom:
AtomCard<Data = Self> + Atomic<Variant = ThinVariant> + Coding + Send + Sync + 'static
{
/// A simple value that is serializable and does not reference any other values
pub trait ThinAtom: Atomic<Data = Self> + Atomic<Variant = ThinVariant> + Coding + 'static {
#[allow(unused_variables)]
fn call(&self, arg: Expr) -> impl Future<Output = GExpr> {
async move { bot([err_not_callable(arg.ctx().i()).await]) }
async move { bot(err_not_callable(&self.print().await).await) }
}
#[allow(unused_variables)]
fn command(&self, ctx: SysCtx) -> impl Future<Output = OrcRes<Option<GExpr>>> {
async move { Err(err_not_command(ctx.i()).await.into()) }
}
#[allow(unused_variables)]
fn print(&self, ctx: SysCtx) -> impl Future<Output = FmtUnit> {
fn print(&self) -> impl Future<Output = FmtUnit> {
async { format!("ThinAtom({})", type_name::<Self>()).into() }
}
}

View File

@@ -0,0 +1,56 @@
use std::rc::Rc;
use std::time::Duration;
use futures::future::LocalBoxFuture;
use orchid_base::future_to_vt;
use crate::{ExtPort, ExtensionBuilder, api};
pub type ExtCx = api::binary::ExtensionContext;
struct Spawner(api::binary::SpawnerBin);
impl Drop for Spawner {
fn drop(&mut self) { (self.0.drop)(self.0.data) }
}
impl Spawner {
pub fn spawn(&self, delay: Duration, fut: LocalBoxFuture<'static, ()>) {
(self.0.spawn)(self.0.data, delay.as_millis().try_into().unwrap(), future_to_vt(fut))
}
}
pub fn orchid_extension_main_body(cx: ExtCx, builder: ExtensionBuilder) {
let spawner = Rc::new(Spawner(cx.spawner));
let spawner2 = spawner.clone();
spawner2.spawn(
Duration::ZERO,
Box::pin(builder.run(ExtPort {
input: Box::pin(cx.input),
output: Box::pin(cx.output),
log: Box::pin(cx.log),
spawn: Rc::new(move |delay, fut| spawner.spawn(delay, fut)),
})),
);
}
/// Generate entrypoint for the dylib extension loader
///
/// # Usage
///
/// ```
/// #[macro_use]
/// use orchid_extension::dylib_main;
/// use orchid_extension::entrypoint::ExtensionBuilder;
///
/// dylib_main! {
/// ExtensionBuilder::new("orchid-std::main")
/// }
/// ```
#[macro_export]
macro_rules! dylib_main {
($builder:expr) => {
#[unsafe(no_mangle)]
pub extern "C" fn orchid_extension_main(cx: ::orchid_api::binary::ExtensionContext) {
$crate::binary::orchid_extension_main_body(cx, $builder);
}
};
}

View File

@@ -0,0 +1,45 @@
use std::borrow::Cow;
use std::rc::Rc;
use futures::future::LocalBoxFuture;
use never::Never;
use orchid_base::{Receipt, ReqHandle, ReqHandleExt};
use crate::gen_expr::{GExpr, new_atom, serialize};
use crate::std_reqs::StartCommand;
use crate::{Atomic, MethodSetBuilder, OwnedAtom, OwnedVariant, Supports, ToExpr};
pub trait AsyncFnDyn {
fn call<'a>(&'a self) -> LocalBoxFuture<'a, Option<GExpr>>;
}
impl<T: AsyncFn() -> Option<GExpr>> AsyncFnDyn for T {
fn call<'a>(&'a self) -> LocalBoxFuture<'a, Option<GExpr>> { Box::pin(async { (self)().await }) }
}
#[derive(Clone)]
pub struct CmdAtom(Rc<dyn AsyncFnDyn>);
impl Atomic for CmdAtom {
type Data = ();
type Variant = OwnedVariant;
fn reg_methods() -> MethodSetBuilder<Self> { MethodSetBuilder::new().handle::<StartCommand>() }
}
impl Supports<StartCommand> for CmdAtom {
async fn handle(&self, hand: Box<dyn ReqHandle>, req: StartCommand) -> std::io::Result<Receipt> {
let reply = self.0.call().await;
match reply {
None => hand.reply(&req, None).await,
Some(next) => hand.reply(&req, Some(serialize(next).await)).await,
}
}
}
impl OwnedAtom for CmdAtom {
type Refs = Never;
async fn val(&self) -> Cow<'_, Self::Data> { Cow::Owned(()) }
}
pub fn cmd<R: ToExpr>(f: impl AsyncFn() -> Option<R> + Clone + 'static) -> GExpr {
new_atom(CmdAtom(Rc::new(async move || match f().await {
None => None,
Some(r) => Some(r.to_gen().await),
})))
}

View File

@@ -1,15 +1,21 @@
use std::future::Future;
use std::pin::Pin;
use orchid_base::error::{OrcErr, OrcRes, mk_err};
use orchid_base::interner::Interner;
use orchid_base::location::Pos;
use dyn_clone::DynClone;
use futures::future::FusedFuture;
use never::Never;
use orchid_base::{Format, OrcErrv, OrcRes, Pos, fmt, is, mk_errv};
use trait_set::trait_set;
use crate::atom::{AtomicFeatures, ToAtom, TypAtom};
use crate::expr::Expr;
use crate::gen_expr::{GExpr, atom, bot};
use crate::system::downcast_atom;
use crate::gen_expr::{GExpr, bot};
use crate::{AtomicFeatures, Expr, ExprKind, ForeignAtom, TAtom};
/// Values that may be converted from certain specific Orchid expressions
pub trait TryFromExpr: Sized {
/// Attempt to cast a generic Orchid expression reference to a concrete value.
/// Note that this cannot evaluate the expression, and if it is not already
/// evaluated, it will simply fail. Use [crate::ExecHandle::exec] inside
/// [crate::exec] to wait for an expression to be evaluated
fn try_from_expr(expr: Expr) -> impl Future<Output = OrcRes<Self>>;
}
@@ -23,46 +29,132 @@ impl<T: TryFromExpr, U: TryFromExpr> TryFromExpr for (T, U) {
}
}
async fn err_not_atom(pos: Pos, i: &Interner) -> OrcErr {
mk_err(i.i("Expected an atom").await, "This expression is not an atom", [pos.into()])
/// Error raised when a composite expression was assumed to be an
/// [crate::Atomic], or if the expression was not evaluated yet
async fn err_not_atom(pos: Pos, value: &impl Format) -> OrcErrv {
mk_errv(is("Expected an atom").await, format!("{} is not an atom", fmt(value).await), [pos])
}
async fn err_type(pos: Pos, i: &Interner) -> OrcErr {
mk_err(i.i("Type error").await, "The atom is a different type than expected", [pos.into()])
}
impl<A: AtomicFeatures> TryFromExpr for TypAtom<A> {
impl TryFromExpr for ForeignAtom {
async fn try_from_expr(expr: Expr) -> OrcRes<Self> {
match expr.atom().await {
Err(ex) => Err(err_not_atom(ex.data().await.pos.clone(), ex.ctx().i()).await.into()),
Ok(f) => match downcast_atom::<A>(f).await {
Ok(a) => Ok(a),
Err(f) => Err(err_type(f.pos(), f.ctx().i()).await.into()),
},
if let ExprKind::Bottom(err) = &expr.data().await.kind {
return Err(err.clone());
}
match expr.clone().atom().await {
Err(ex) => Err(err_not_atom(ex.data().await.pos.clone(), &expr).await),
Ok(f) => Ok(f),
}
}
}
impl<A: AtomicFeatures> TryFromExpr for TAtom<A> {
async fn try_from_expr(expr: Expr) -> OrcRes<Self> {
let f = ForeignAtom::try_from_expr(expr).await?;
match f.clone().downcast::<A>() {
Ok(a) => Ok(a),
Err(e) => Err(e.mk_err().await),
}
}
}
/// Values that are convertible to an Orchid expression. This could mean that
/// the value owns an [Expr] or it may involve more complex operations
///
/// [ToExpr] is also implemented for [orchid_base::Sym] where it converts to a
/// reference to the constant by that name
pub trait ToExpr {
fn to_expr(self) -> GExpr;
/// Inline the value in an expression returned from a function or included in
/// the const tree returned by [crate::System::env]
fn to_gen(self) -> impl Future<Output = GExpr>;
/// Convert the value into a freestanding expression
fn to_expr(self) -> impl Future<Output = Expr>
where Self: Sized {
async { self.to_gen().await.create().await }
}
fn boxed<'a>(self) -> Box<dyn ToExprDyn + 'a>
where Self: Sized + 'a {
Box::new(self)
}
fn clonable_boxed<'a>(self) -> Box<dyn ClonableToExprDyn + 'a>
where Self: Clone + Sized + 'a {
Box::new(self)
}
}
/// A wrapper for a future that implements [ToExpr]
#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct ToExprFuture<F>(pub F);
impl<F: Future<Output: ToExpr>> ToExpr for ToExprFuture<F> {
async fn to_gen(self) -> GExpr { self.0.await.to_gen().await }
async fn to_expr(self) -> Expr
where Self: Sized {
self.0.await.to_expr().await
}
}
impl<F: FusedFuture> FusedFuture for ToExprFuture<F> {
fn is_terminated(&self) -> bool { self.0.is_terminated() }
}
impl<F: Future> Future for ToExprFuture<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<Self::Output> {
unsafe { self.map_unchecked_mut(|this| &mut this.0) }.poll(cx)
}
}
/// Type-erased [ToExpr]
pub trait ToExprDyn {
fn to_gen_dyn<'a>(self: Box<Self>) -> Pin<Box<dyn Future<Output = GExpr> + 'a>>
where Self: 'a;
fn to_expr_dyn<'a>(self: Box<Self>) -> Pin<Box<dyn Future<Output = Expr> + 'a>>
where Self: 'a;
}
impl<T: ToExpr> ToExprDyn for T {
fn to_gen_dyn<'a>(self: Box<Self>) -> Pin<Box<dyn Future<Output = GExpr> + 'a>>
where Self: 'a {
Box::pin(self.to_gen())
}
fn to_expr_dyn<'a>(self: Box<Self>) -> Pin<Box<dyn Future<Output = Expr> + 'a>>
where Self: 'a {
Box::pin(self.to_expr())
}
}
trait_set! {
/// type-erased [ToExpr] and [Clone]. Needed for a value to be
/// included in [crate::System::env]
pub trait ClonableToExprDyn = ToExprDyn + DynClone;
}
impl ToExpr for Box<dyn ToExprDyn> {
async fn to_gen(self) -> GExpr { self.to_gen_dyn().await }
async fn to_expr(self) -> Expr { self.to_expr_dyn().await }
}
impl ToExpr for Box<dyn ClonableToExprDyn> {
async fn to_gen(self) -> GExpr { self.to_gen_dyn().await }
async fn to_expr(self) -> Expr { self.to_expr_dyn().await }
}
impl Clone for Box<dyn ClonableToExprDyn> {
fn clone(&self) -> Self { dyn_clone::clone_box(&**self) }
}
impl ToExpr for GExpr {
fn to_expr(self) -> GExpr { self }
async fn to_gen(self) -> GExpr { self }
async fn to_expr(self) -> Expr { self.create().await }
}
impl ToExpr for Expr {
fn to_expr(self) -> GExpr { self.slot() }
async fn to_gen(self) -> GExpr { self.slot() }
async fn to_expr(self) -> Expr { self }
}
impl<T: ToExpr> ToExpr for OrcRes<T> {
fn to_expr(self) -> GExpr {
async fn to_gen(self) -> GExpr {
match self {
Err(e) => bot(e),
Ok(t) => t.to_expr(),
Ok(t) => t.to_gen().await,
}
}
}
impl<A: ToAtom> ToExpr for A {
fn to_expr(self) -> GExpr { atom(self) }
impl ToExpr for Never {
async fn to_gen(self) -> GExpr { match self {} }
}

View File

@@ -0,0 +1,88 @@
use std::any::type_name;
use std::borrow::Cow;
use std::marker::PhantomData;
use std::rc::Rc;
use futures::channel::mpsc::{Sender, channel};
use futures::lock::Mutex;
use futures::stream::{self, LocalBoxStream};
use futures::{FutureExt, SinkExt, StreamExt};
use never::Never;
use orchid_base::{FmtCtx, FmtUnit, OrcRes};
use crate::gen_expr::{GExpr, call, lam, new_atom, seq};
use crate::{Atomic, Expr, OwnedAtom, OwnedVariant, ToExpr, TryFromExpr};
enum Command {
Execute(GExpr, Sender<Expr>),
Halt(GExpr),
}
struct BuilderCoroutineData {
name: &'static str,
receiver: Mutex<LocalBoxStream<'static, Command>>,
}
#[derive(Clone)]
struct BuilderCoroutine(Rc<BuilderCoroutineData>);
impl BuilderCoroutine {
pub async fn run(self) -> GExpr {
let cmd = self.0.receiver.lock().await.next().await;
match cmd {
None => panic!("Exec handle dropped and coroutine blocked instead of returning"),
Some(Command::Halt(expr)) => expr,
Some(Command::Execute(expr, reply)) =>
call(lam(async |x| seq(x, call(new_atom(Replier { reply, builder: self }), x)).await), expr)
.await,
}
}
}
#[derive(Clone)]
pub(crate) struct Replier {
reply: Sender<Expr>,
builder: BuilderCoroutine,
}
impl Atomic for Replier {
type Data = ();
type Variant = OwnedVariant;
}
impl OwnedAtom for Replier {
type Refs = Never;
async fn val(&self) -> Cow<'_, Self::Data> { Cow::Owned(()) }
async fn call(mut self, arg: Expr) -> impl ToExpr {
self.reply.send(arg).await.expect("Resolution request dropped after sending");
std::mem::drop(self.reply);
self.builder.run().await
}
async fn print_atom<'a>(&'a self, _c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
format!("Replier<{}>", self.builder.0.name).into()
}
}
/// A long-lived async context that can yield to the executor. The expression
/// representing an in-progress exec block is not serializable.
pub async fn exec<R: ToExpr>(f: impl for<'a> AsyncFnOnce(ExecHandle<'a>) -> R + 'static) -> GExpr {
let (cmd_snd, cmd_recv) = channel(0);
let halt =
async { Command::Halt(f(ExecHandle(cmd_snd, PhantomData)).await.to_gen().await) }.into_stream();
let coro = BuilderCoroutine(Rc::new(BuilderCoroutineData {
name: type_name::<R>(),
receiver: Mutex::new(stream::select(halt, cmd_recv).boxed_local()),
}));
coro.run().await
}
static WEIRD_DROP_ERR: &str = "Coroutine dropped while we are being polled somehow";
/// The handle an [exec] callback uses to yield to the executor
pub struct ExecHandle<'a>(Sender<Command>, PhantomData<&'a ()>);
impl ExecHandle<'_> {
/// Yield to the executor by resolving to an expression that normalizes the
/// value and then calls the continuation of the body with the result.
pub async fn exec<T: TryFromExpr>(&mut self, val: impl ToExpr) -> OrcRes<T> {
let (reply_snd, mut reply_recv) = channel(1);
self.0.send(Command::Execute(val.to_gen().await, reply_snd)).await.expect(WEIRD_DROP_ERR);
T::try_from_expr(reply_recv.next().await.expect(WEIRD_DROP_ERR)).await
}
}

View File

@@ -1,334 +1,413 @@
use std::any::Any;
use std::cell::RefCell;
use std::future::Future;
use std::marker::PhantomData;
use std::mem;
use std::num::NonZero;
use std::pin::Pin;
use std::rc::Rc;
use std::time::Duration;
use async_std::channel::{self, Receiver, Sender};
use async_std::stream;
use async_std::sync::Mutex;
use futures::future::{LocalBoxFuture, join_all};
use futures::{FutureExt, StreamExt, stream_select};
use futures::future::{LocalBoxFuture, join_all, join3};
use futures::{AsyncReadExt, AsyncWriteExt, StreamExt, stream};
use hashbrown::HashMap;
use itertools::Itertools;
use orchid_api::{ExtMsgSet, IntReq};
use orchid_api_traits::{Decode, UnderRoot, enc_vec};
use orchid_base::builtin::{ExtInit, ExtPort, Spawner};
use orchid_base::char_filter::{char_filter_match, char_filter_union, mk_char_filter};
use orchid_base::clone;
use orchid_base::interner::{Interner, Tok};
use orchid_base::logging::Logger;
use orchid_base::name::Sym;
use orchid_base::parse::{Comment, Snippet};
use orchid_base::reqnot::{ReqNot, RequestHandle, Requester};
use orchid_base::tree::{TokenVariant, ttv_from_api};
use orchid_api_traits::{Decode, Encode, Request, UnderRoot, enc_vec};
use orchid_async_utils::{Handle, JoinError, to_task};
use orchid_base::{
Client, ClientExt, CommCtx, Comment, MsgReader, MsgReaderExt, ReqHandleExt, ReqReaderExt,
Snippet, Sym, TokenVariant, Witness, char_filter_match, char_filter_union, es, io_comm, is, log,
mk_char_filter, try_with_reporter, ttv_from_api, with_interner, with_logger, with_stash,
};
use substack::Substack;
use trait_set::trait_set;
use task_local::task_local;
use unsync_pipe::pipe;
use crate::api;
use crate::atom::{AtomCtx, AtomDynfo, AtomTypeId};
use crate::atom_owned::take_atom;
use crate::expr::{Expr, ExprHandle};
use crate::lexer::{LexContext, err_cascade, err_not_applicable};
use crate::parser::{ParsCtx, get_const, linev_into_api};
use crate::system::{SysCtx, atom_by_idx};
use crate::system_ctor::{CtedObj, DynSystemCtor};
use crate::tree::{GenTok, GenTokTree, LazyMemberFactory, TreeIntoApiCtxImpl};
use crate::gen_expr::serialize;
use crate::interner::new_interner;
use crate::logger::LoggerImpl;
use crate::tree::{TreeIntoApiCtxImpl, get_lazy, with_lazy_member_store};
use crate::trivial_req::TrivialReqCycle;
use crate::{
AtomCtx, AtomTypeId, BorrowedExprStore, CtedObj, DynSystemCardExt, DynSystemCtor, Expr,
ExprHandle, ExtPort, LexContext, PTokTree, ParsCtx, SysCtx, SystemCtor, api, dyn_cted,
ekey_cascade, ekey_not_applicable, get_const, linev_into_api, resolve_atom_type, take_atom,
with_funs_ctx, with_obj_store, with_parsed_const_ctx, with_refl_roots, with_sys,
};
pub type ExtReq<'a> = RequestHandle<'a, api::ExtMsgSet>;
pub type ExtReqNot = ReqNot<api::ExtMsgSet>;
task_local::task_local! {
static CLIENT: Rc<dyn Client>;
static CTX: Rc<RefCell<Option<CommCtx>>>;
}
pub struct ExtensionData {
fn get_client() -> Rc<dyn Client> { CLIENT.get() }
/// Do not expect any more requests or notifications, exit once all pending
/// requests settle
pub async fn exit() {
let cx = CTX.get().borrow_mut().take();
cx.unwrap().exit().await.unwrap()
}
/// Set the client used for global [request] and [notify] functions within the
/// runtime of this future
pub async fn with_comm<F: Future>(c: Rc<dyn Client>, ctx: CommCtx, fut: F) -> F::Output {
CLIENT.scope(c, CTX.scope(Rc::new(RefCell::new(Some(ctx))), fut)).await
}
task_local! {
static MUTE_REPLY: ();
}
/// Silence replies within this block even if the `msg` log channel is active to
/// prevent excessive log noise.
pub async fn mute_reply<F: Future>(f: F) -> F::Output { MUTE_REPLY.scope((), f).await }
/// Send a request through the global client's [ClientExt::request]
pub async fn request<T: Request + UnderRoot<Root = api::ExtHostReq>>(t: T) -> T::Response {
let req_str = if MUTE_REPLY.try_with(|b| *b).is_err() { format!("{t:?}") } else { String::new() };
let response = get_client().request(t).await.unwrap();
if MUTE_REPLY.try_with(|b| *b).is_err() {
let ext = dyn_cted().inst().card().name();
writeln!(log("msg"), "{ext} {req_str} got response {response:?}").await;
}
response
}
/// Send a notification through the global client's [ClientExt::notify]
pub async fn notify<T: UnderRoot<Root = api::ExtHostNotif> + 'static>(t: T) {
get_client().notify(t).await.unwrap()
}
struct SystemRecord {
cted: CtedObj,
}
type SystemTable = RefCell<HashMap<api::SysId, Rc<SystemRecord>>>;
task_local! {
static SYSTEM_TABLE: SystemTable;
}
async fn with_sys_record<F: Future>(id: api::SysId, fut: F) -> F::Output {
let cted = SYSTEM_TABLE.with(|tbl| tbl.borrow().get(&id).expect("Invalid sys ID").cted.clone());
with_sys(SysCtx(id, cted), fut).await
}
/// Context that can be attached to a [Future] using [task_local]
pub trait ContextModifier: 'static {
fn apply<'a>(self: Box<Self>, fut: LocalBoxFuture<'a, ()>) -> LocalBoxFuture<'a, ()>;
}
impl<F: AsyncFnOnce(LocalBoxFuture<'_, ()>) + 'static> ContextModifier for F {
fn apply<'a>(self: Box<Self>, fut: LocalBoxFuture<'a, ()>) -> LocalBoxFuture<'a, ()> {
Box::pin((self)(fut))
}
}
pub(crate) trait DynTaskHandle: 'static {
fn abort(self: Box<Self>);
fn join(self: Box<Self>) -> LocalBoxFuture<'static, Result<Box<dyn Any>, JoinError>>;
}
task_local! {
pub(crate) static SPAWN:
Rc<dyn Fn(Duration, LocalBoxFuture<'static, Box<dyn Any>>) -> Box<dyn DynTaskHandle> + 'static>
}
/// Handle for a task that is not associated with a particular pending request
/// or past notification
pub struct TaskHandle<T>(Box<dyn DynTaskHandle>, PhantomData<T>);
impl<T: 'static> TaskHandle<T> {
/// Immediately stop working on the task. Unlike in Tokio's abort, this is a
/// guarantee
pub fn abort(self) { self.0.abort(); }
/// Stop working on the task and return the nested future. The distinction
/// between this and waiting until the task is complete without reparenting it
/// is significant for the purpose of [task_local] context
pub async fn join(self) -> Result<T, JoinError> { Ok(*self.0.join().await?.downcast().unwrap()) }
}
/// Spawn a future that is not associated with a pending request or a past
/// notification. Pending tasks are cancelled and dropped when the extension
/// exits
pub fn spawn<F: Future<Output: 'static> + 'static>(delay: Duration, f: F) -> TaskHandle<F::Output> {
SPAWN.with(|spawn| {
TaskHandle(spawn(delay, Box::pin(async { Box::new(f.await) as Box<dyn Any> })), PhantomData)
})
}
impl DynTaskHandle for Handle<Box<dyn Any>> {
fn abort(self: Box<Self>) { Self::abort(&self); }
fn join(self: Box<Self>) -> LocalBoxFuture<'static, Result<Box<dyn Any>, JoinError>> {
Box::pin(Self::join(*self))
}
}
/// A new Orchid extension as specified in loaders. An extension is a unit of
/// distribution and its name serves for debugging purposes primarily. In
/// contrast, [SystemCtor] is a unit of features of which [ExtensionBuilder] may
/// contain multiple
pub struct ExtensionBuilder {
pub name: &'static str,
pub systems: &'static [&'static dyn DynSystemCtor],
pub systems: Vec<Box<dyn DynSystemCtor>>,
pub context: Vec<Box<dyn ContextModifier>>,
}
impl ExtensionData {
pub fn new(name: &'static str, systems: &'static [&'static dyn DynSystemCtor]) -> Self {
Self { name, systems }
impl ExtensionBuilder {
/// Create a new extension
pub fn new(name: &'static str) -> Self { Self { name, systems: Vec::new(), context: Vec::new() } }
/// Add a system to the extension
pub fn system(mut self, ctor: impl SystemCtor) -> Self {
self.systems.push(Box::new(ctor) as Box<_>);
self
}
}
pub enum MemberRecord {
Gen(Vec<Tok<String>>, LazyMemberFactory),
Res,
}
pub struct SystemRecord {
lazy_members: HashMap<api::TreeId, MemberRecord>,
ctx: SysCtx,
}
trait_set! {
pub trait WithAtomRecordCallback<'a, T> = AsyncFnOnce(
Box<dyn AtomDynfo>,
SysCtx,
AtomTypeId,
&'a [u8]
) -> T
}
pub async fn with_atom_record<'a, F: Future<Output = SysCtx>, T>(
get_sys_ctx: &impl Fn(api::SysId) -> F,
atom: &'a api::Atom,
cb: impl WithAtomRecordCallback<'a, T>,
) -> T {
let mut data = &atom.data[..];
let ctx = get_sys_ctx(atom.owner).await;
let inst = ctx.get::<CtedObj>().inst();
let id = AtomTypeId::decode(Pin::new(&mut data)).await;
let atom_record = atom_by_idx(inst.card(), id.clone()).expect("Atom ID reserved");
cb(atom_record, ctx, id, data).await
}
pub struct ExtensionOwner {
_interner_cell: Rc<RefCell<Option<Interner>>>,
_systems_lock: Rc<Mutex<HashMap<api::SysId, SystemRecord>>>,
out_recv: Receiver<Vec<u8>>,
out_send: Sender<Vec<u8>>,
}
impl ExtPort for ExtensionOwner {
fn send<'a>(&'a self, msg: &'a [u8]) -> LocalBoxFuture<'a, ()> {
Box::pin(async { self.out_send.send(msg.to_vec()).boxed_local().await.unwrap() })
/// Add some [task_local] state to the extension. Bear in mind that distinct
/// [crate::System] instances should not visibly affect each other
pub fn add_context(&mut self, fun: impl ContextModifier) {
self.context.push(Box::new(fun) as Box<_>);
}
fn recv(&self) -> LocalBoxFuture<'_, Option<Vec<u8>>> {
Box::pin(async { (self.out_recv.recv().await).ok() })
/// Builder form of [Self::add_context]
pub fn context(mut self, fun: impl ContextModifier) -> Self {
self.add_context(fun);
self
}
}
pub fn extension_init(
data: ExtensionData,
host_header: api::HostHeader,
spawner: Spawner,
) -> ExtInit {
let api::HostHeader { log_strategy, msg_logs } = host_header;
let decls = (data.systems.iter().enumerate())
.map(|(id, sys)| (u16::try_from(id).expect("more than u16max system ctors"), sys))
.map(|(id, sys)| sys.decl(api::SysDeclId(NonZero::new(id + 1).unwrap())))
.collect_vec();
let systems_lock = Rc::new(Mutex::new(HashMap::<api::SysId, SystemRecord>::new()));
let ext_header = api::ExtensionHeader { name: data.name.to_string(), systems: decls.clone() };
let (out_send, in_recv) = channel::bounded::<Vec<u8>>(1);
let (in_send, out_recv) = channel::bounded::<Vec<u8>>(1);
let (exit_send, exit_recv) = channel::bounded(1);
let logger = Logger::new(log_strategy);
let msg_logger = Logger::new(msg_logs);
let interner_cell = Rc::new(RefCell::new(None::<Interner>));
let interner_weak = Rc::downgrade(&interner_cell);
let systems_weak = Rc::downgrade(&systems_lock);
let get_ctx = clone!(systems_weak; move |id: api::SysId| clone!(systems_weak; async move {
let systems =
systems_weak.upgrade().expect("System table dropped before request processing done");
systems.lock().await.get(&id).expect("System not found").ctx.clone()
}));
let init_ctx = {
clone!(interner_weak, spawner, logger);
move |id: api::SysId, cted: CtedObj, reqnot: ReqNot<ExtMsgSet>| {
clone!(interner_weak, spawner, logger; async move {
let interner_rc =
interner_weak.upgrade().expect("System construction order while shutting down");
let i = interner_rc.borrow().clone().expect("mk_ctx called very early, no interner!");
SysCtx::new(id, i, reqnot, spawner, logger, cted)
})
}
};
let rn = ReqNot::<api::ExtMsgSet>::new(
msg_logger.clone(),
move |a, _| clone!(in_send; Box::pin(async move { in_send.send(a.to_vec()).await.unwrap() })),
clone!(systems_weak, exit_send, get_ctx; move |n, _| {
clone!(systems_weak, exit_send, get_ctx; async move {
match n {
api::HostExtNotif::Exit => exit_send.send(()).await.unwrap(),
api::HostExtNotif::SystemDrop(api::SystemDrop(sys_id)) =>
if let Some(rc) = systems_weak.upgrade() {
mem::drop(rc.lock().await.remove(&sys_id))
},
api::HostExtNotif::AtomDrop(api::AtomDrop(sys_id, atom)) => {
let ctx = get_ctx(sys_id).await;
take_atom(atom, &ctx).await.dyn_free(ctx.clone()).await
}
/// Start the extension on a message channel, blocking the task until the peer
/// on the other side drops the extension. Extension authors would typically
/// pass the prepared builder into some function that is responsible for
/// managing the [ExtPort]
pub async fn run(mut self, mut ctx: ExtPort) {
self.add_context(with_funs_ctx);
self.add_context(with_parsed_const_ctx);
self.add_context(with_obj_store);
self.add_context(with_lazy_member_store);
self.add_context(with_refl_roots);
let spawn = ctx.spawn.clone();
let host_header = api::HostHeader::decode(ctx.input.as_mut()).await.unwrap();
let decls = (self.systems.iter().enumerate())
.map(|(id, sys)| (u16::try_from(id).expect("more than u16max system ctors"), sys))
.map(|(id, sys)| sys.decl(api::SysDeclId(NonZero::new(id + 1).unwrap())))
.collect_vec();
api::ExtensionHeader { name: self.name.to_string(), systems: decls.clone() }
.encode(ctx.output.as_mut())
.await
.unwrap();
ctx.output.as_mut().flush().await.unwrap();
let logger1 = LoggerImpl::from_api(&host_header.logger);
let logger2 = logger1.clone();
let (client, comm_ctx, extension_srv) = io_comm(ctx.output, ctx.input);
// this future will be ready once the extension cleanly exits
let extension_fut = extension_srv.listen(
async |n: Box<dyn MsgReader<'_>>| {
let notif = n.read().await.unwrap();
match notif {
api::HostExtNotif::Exit => exit().await,
}
}.boxed_local())
}),
{
clone!(logger, get_ctx, init_ctx, systems_weak, interner_weak, decls, msg_logger);
move |hand, req| {
clone!(logger, get_ctx, init_ctx, systems_weak, interner_weak, decls, msg_logger);
async move {
let interner_cell = interner_weak.upgrade().expect("Interner dropped before request");
let i = interner_cell.borrow().clone().expect("Request arrived before interner set");
writeln!(msg_logger, "{} extension received request {req:?}", data.name);
Ok(())
},
async |mut reader| {
with_stash(async {
let req = reader.read_req().await.unwrap();
let handle = reader.finish().await;
// Atom printing is never reported because it generates too much
// noise
if !matches!(req, api::HostExtReq::AtomReq(api::AtomReq::AtomPrint(_))) {
writeln!(log("msg"), "{} extension received request {req:?}", self.name).await;
}
match req {
api::HostExtReq::Ping(ping @ api::Ping) => hand.handle(&ping, &()).await,
api::HostExtReq::Sweep(sweep @ api::Sweep) =>
hand.handle(&sweep, &i.sweep_replica().await).await,
api::HostExtReq::SystemDrop(sys_drop) => {
SYSTEM_TABLE.with(|l| l.borrow_mut().remove(&sys_drop.0));
handle.reply(&sys_drop, ()).await
},
api::HostExtReq::AtomDrop(atom_drop @ api::AtomDrop(sys_id, atom)) =>
with_sys_record(sys_id, async {
take_atom(atom).await.dyn_free().await;
handle.reply(&atom_drop, ()).await
})
.await,
api::HostExtReq::Ping(ping @ api::Ping) => handle.reply(&ping, ()).await,
api::HostExtReq::Sweep(api::Sweep) => todo!(),
api::HostExtReq::SysReq(api::SysReq::NewSystem(new_sys)) => {
let (sys_id, _) = (decls.iter().enumerate().find(|(_, s)| s.id == new_sys.system))
let (ctor_idx, _) = (decls.iter().enumerate().find(|(_, s)| s.id == new_sys.system))
.expect("NewSystem call received for invalid system");
let cted = data.systems[sys_id].new_system(&new_sys);
let lex_filter =
cted.inst().dyn_lexers().iter().fold(api::CharFilter(vec![]), |cf, lx| {
char_filter_union(&cf, &mk_char_filter(lx.char_filter().iter().cloned()))
});
let lazy_mems = Mutex::new(HashMap::new());
let ctx = init_ctx(new_sys.id, cted.clone(), hand.reqnot()).await;
let const_root = stream::from_iter(cted.inst().dyn_env())
.then(|mem| {
let (req, lazy_mems) = (&hand, &lazy_mems);
clone!(i, ctx; async move {
let cted = self.systems[ctor_idx].new_system(&new_sys);
let record = Rc::new(SystemRecord { cted: cted.clone() });
SYSTEM_TABLE.with(|tbl| {
let mut g = tbl.borrow_mut();
g.insert(new_sys.id, record);
});
with_sys_record(new_sys.id, async {
let lex_filter =
cted.inst().dyn_lexers().iter().fold(api::CharFilter(vec![]), |cf, lx| {
char_filter_union(&cf, &mk_char_filter(lx.char_filter().iter().cloned()))
});
let const_root = stream::iter(cted.inst().dyn_env().await)
.then(async |mem| {
let name = is(&mem.name).await;
let mut tia_ctx = TreeIntoApiCtxImpl {
lazy_members: &mut *lazy_mems.lock().await,
sys: ctx,
basepath: &[],
path: Substack::Bottom,
req
path: Substack::Bottom.push(name.clone()),
};
(i.i(&mem.name).await.to_api(), mem.kind.into_api(&mut tia_ctx).await)
(name.to_api(), mem.kind.into_api(&mut tia_ctx).await)
})
})
.collect()
.collect()
.await;
let prelude =
cted.inst().dyn_prelude().await.iter().map(|sym| sym.to_api()).collect();
let line_types = join_all(
(cted.inst().dyn_parsers().iter())
.map(async |p| is(p.line_head()).await.to_api()),
)
.await;
let record = SystemRecord { ctx, lazy_members: lazy_mems.into_inner() };
let systems = systems_weak.upgrade().expect("System constructed during shutdown");
systems.lock().await.insert(new_sys.id, record);
let response = api::NewSystemResponse { lex_filter, const_root, line_types: vec![] };
hand.handle(&new_sys, &response).await
},
api::HostExtReq::GetMember(get_tree @ api::GetMember(sys_id, tree_id)) => {
let sys_ctx = get_ctx(sys_id).await;
let systems = systems_weak.upgrade().expect("Member queried during shutdown");
let mut systems_g = systems.lock().await;
let SystemRecord { lazy_members, .. } =
systems_g.get_mut(&sys_id).expect("System not found");
let (path, cb) = match lazy_members.insert(tree_id, MemberRecord::Res) {
None => panic!("Tree for ID not found"),
Some(MemberRecord::Res) => panic!("This tree has already been transmitted"),
Some(MemberRecord::Gen(path, cb)) => (path, cb),
};
let tree = cb.build(Sym::new(path.clone(), &i).await.unwrap(), sys_ctx.clone()).await;
let mut tia_ctx = TreeIntoApiCtxImpl {
sys: sys_ctx,
path: Substack::Bottom,
basepath: &path,
lazy_members,
req: &hand,
};
hand.handle(&get_tree, &tree.into_api(&mut tia_ctx).await).await
let response =
api::NewSystemResponse { lex_filter, const_root, line_types, prelude };
handle.reply(&new_sys, response).await
})
.await
},
api::HostExtReq::GetMember(get_tree @ api::GetMember(sys_id, tree_id)) =>
with_sys_record(sys_id, async {
let (path, tree) = get_lazy(tree_id).await;
let mut tia_ctx =
TreeIntoApiCtxImpl { path: Substack::Bottom, basepath: &path[..] };
handle.reply(&get_tree, tree.into_api(&mut tia_ctx).await).await
})
.await,
api::HostExtReq::SysReq(api::SysReq::SysFwded(fwd)) => {
let fwd_tok = Witness::of(&fwd);
let api::SysFwded(sys_id, payload) = fwd;
let ctx = get_ctx(sys_id).await;
let sys = ctx.cted().inst();
sys.dyn_request(hand, payload).await
with_sys_record(sys_id, async {
let (mut req_in, req) = pipe(1024);
let (rep, mut rep_out) = pipe(1024);
let mut reply = Vec::new();
let (..) = join3(
async { req_in.write_all(&payload).await.expect("Ingress failed") },
async { rep_out.read_to_end(&mut reply).await.expect("Egress failed") },
dyn_cted().inst().dyn_request(Box::new(TrivialReqCycle { req, rep })),
)
.await;
handle.reply(fwd_tok, reply).await
})
.await
},
api::HostExtReq::LexExpr(lex @ api::LexExpr { sys, src, text, pos, id }) => {
let sys_ctx = get_ctx(sys).await;
let text = Tok::from_api(text, &i).await;
let src = Sym::from_api(src, sys_ctx.i()).await;
let ctx = LexContext { id, pos, text: &text, src, ctx: sys_ctx.clone() };
let trigger_char = text.chars().nth(pos as usize).unwrap();
let err_na = err_not_applicable(&i).await;
let err_cascade = err_cascade(&i).await;
let lexers = sys_ctx.cted().inst().dyn_lexers();
for lx in lexers.iter().filter(|l| char_filter_match(l.char_filter(), trigger_char)) {
match lx.lex(&text[pos as usize..], &ctx).await {
Err(e) if e.any(|e| *e == err_na) => continue,
Err(e) => {
let eopt = e.keep_only(|e| *e != err_cascade).map(|e| Err(e.to_api()));
return hand.handle(&lex, &eopt).await;
},
Ok((s, expr)) => {
let expr = expr.into_api(&mut (), &mut (sys_ctx, &hand)).await;
let pos = (text.len() - s.len()) as u32;
return hand.handle(&lex, &Some(Ok(api::LexedExpr { pos, expr }))).await;
},
api::HostExtReq::LexExpr(lex @ api::LexExpr { sys, src, text, pos, id }) =>
with_sys_record(sys, async {
let text = es(text).await;
let src = Sym::from_api(src).await;
let expr_store = BorrowedExprStore::new();
let tail = &text[pos as usize..];
let trigger_char = tail.chars().next().unwrap();
let ekey_na = ekey_not_applicable().await;
let ekey_cascade = ekey_cascade().await;
let lexers = dyn_cted().inst().dyn_lexers();
for lx in lexers.iter().filter(|l| char_filter_match(l.char_filter(), trigger_char))
{
let ctx = LexContext::new(&expr_store, &text, id, pos, src.clone());
match try_with_reporter(lx.lex(tail, &ctx)).await {
Err(e) if e.any(|e| *e == ekey_na) => continue,
Err(e) => {
let eopt = e.keep_only(|e| *e != ekey_cascade).map(|e| Err(e.to_api()));
expr_store.dispose().await;
return handle.reply(&lex, eopt).await;
},
Ok((s, expr)) => {
let expr = join_all(
(expr.into_iter())
.map(|tok| async { tok.into_api(&mut (), &mut ()).await }),
)
.await;
let pos = (text.len() - s.len()) as u32;
expr_store.dispose().await;
return handle.reply(&lex, Some(Ok(api::LexedExpr { pos, expr }))).await;
},
}
}
}
writeln!(logger, "Got notified about n/a character '{trigger_char}'");
hand.handle(&lex, &None).await
},
writeln!(log("warn"), "Got notified about n/a character '{trigger_char}'").await;
expr_store.dispose().await;
handle.reply(&lex, None).await
})
.await,
api::HostExtReq::ParseLine(pline) => {
let api::ParseLine { module, src, exported, comments, sys, line } = &pline;
let mut ctx = get_ctx(*sys).await;
let parsers = ctx.cted().inst().dyn_parsers();
let src = Sym::from_api(*src, ctx.i()).await;
let comments =
join_all(comments.iter().map(|c| Comment::from_api(c, src.clone(), &i))).await;
let line: Vec<GenTokTree> = ttv_from_api(line, &mut ctx, &mut (), &src, &i).await;
let snip = Snippet::new(line.first().expect("Empty line"), &line);
let (head, tail) = snip.pop_front().unwrap();
let name = if let GenTok::Name(n) = &head.tok { n } else { panic!("No line head") };
let parser =
parsers.iter().find(|p| p.line_head() == **name).expect("No parser candidate");
let module = Sym::from_api(*module, ctx.i()).await;
let pctx = ParsCtx::new(ctx.clone(), module);
let o_line = match parser.parse(pctx, *exported, comments, tail).await {
Err(e) => Err(e.to_api()),
Ok(t) => Ok(linev_into_api(t, ctx.clone(), &hand).await),
};
hand.handle(&pline, &o_line).await
},
api::HostExtReq::FetchParsedConst(ref fpc @ api::FetchParsedConst { id, sys }) => {
let ctx = get_ctx(sys).await;
let cnst = get_const(id, ctx.clone()).await;
hand.handle(fpc, &cnst.api_return(ctx, &hand).await).await
let req = Witness::of(&pline);
let api::ParseLine { module, src, exported, comments, sys, line, idx } = pline;
with_sys_record(sys, async {
let parsers = dyn_cted().inst().dyn_parsers();
let src = Sym::from_api(src).await;
let comments =
join_all(comments.iter().map(|c| Comment::from_api(c, src.clone()))).await;
let expr_store = BorrowedExprStore::new();
let line: Vec<PTokTree> = ttv_from_api(line, &mut &expr_store, &mut (), &src).await;
let snip = Snippet::new(line.first().expect("Empty line"), &line);
let parser = parsers[idx as usize];
let module = Sym::from_api(module).await;
let pctx = ParsCtx::new(module);
let o_line =
match try_with_reporter(parser.parse(pctx, exported, comments, snip)).await {
Err(e) => Err(e.to_api()),
Ok(t) => Ok(linev_into_api(t).await),
};
mem::drop(line);
expr_store.dispose().await;
handle.reply(req, o_line).await
})
.await
},
api::HostExtReq::FetchParsedConst(ref fpc @ api::FetchParsedConst(sys, id)) =>
with_sys_record(sys, async {
let cnst = get_const(id).await;
handle.reply(fpc, serialize(cnst).await).await
})
.await,
api::HostExtReq::AtomReq(atom_req) => {
let atom = atom_req.get_atom();
let atom_req = atom_req.clone();
with_atom_record(&get_ctx, atom, async move |nfo, ctx, id, buf| {
let actx = AtomCtx(buf, atom.drop, ctx.clone());
with_sys_record(atom.owner, async {
let (nfo, id, buf) = resolve_atom_type(atom);
let actx = AtomCtx(buf, atom.drop);
match &atom_req {
api::AtomReq::SerializeAtom(ser) => {
let mut buf = enc_vec(&id).await;
let mut buf = enc_vec(&id);
match nfo.serialize(actx, Pin::<&mut Vec<_>>::new(&mut buf)).await {
None => hand.handle(ser, &None).await,
None => handle.reply(ser, None).await,
Some(refs) => {
let refs =
join_all(refs.into_iter().map(|ex| async { ex.into_api(&mut ()).await }))
join_all(refs.into_iter().map(async |ex| ex.into_api(&mut ()).await))
.await;
hand.handle(ser, &Some((buf, refs))).await
handle.reply(ser, Some((buf, refs))).await
},
}
},
api::AtomReq::AtomPrint(print @ api::AtomPrint(_)) =>
hand.handle(print, &nfo.print(actx).await.to_api()).await,
handle.reply(print, nfo.print(actx).await.to_api()).await,
api::AtomReq::Fwded(fwded) => {
let api::Fwded(_, key, payload) = &fwded;
let (mut req_in, req) = pipe(1024);
let (rep, mut rep_out) = pipe(1024);
let mut reply = Vec::new();
let key = Sym::from_api(*key, &i).await;
let some = nfo
.handle_req(
actx,
key,
Pin::<&mut &[u8]>::new(&mut &payload[..]),
Pin::<&mut Vec<_>>::new(&mut reply),
)
.await;
hand.handle(fwded, &some.then_some(reply)).await
let key = Sym::from_api(*key).await;
let (.., some) = join3(
async { req_in.write_all(payload).await.expect("Ingress failed") },
async { rep_out.read_to_end(&mut reply).await.expect("Egress failed") },
nfo.handle_req_ref(actx, key, Box::new(TrivialReqCycle { req, rep })),
)
.await;
handle.reply(fwded, some.then_some(reply)).await
},
api::AtomReq::CallRef(call @ api::CallRef(_, arg)) => {
// SAFETY: function calls own their argument implicitly
let expr_handle = unsafe { ExprHandle::from_args(ctx.clone(), *arg) };
let ret = nfo.call_ref(actx, Expr::from_handle(Rc::new(expr_handle))).await;
hand.handle(call, &ret.api_return(ctx.clone(), &hand).await).await
let expr_store = BorrowedExprStore::new();
let expr_handle = ExprHandle::borrowed(*arg, &expr_store);
let ret = nfo.call_ref(actx, Expr::from_handle(expr_handle.clone())).await;
let api_expr = serialize(ret).await;
mem::drop(expr_handle);
expr_store.dispose().await;
handle.reply(call, api_expr).await
},
api::AtomReq::FinalCall(call @ api::FinalCall(_, arg)) => {
// SAFETY: function calls own their argument implicitly
let expr_handle = unsafe { ExprHandle::from_args(ctx.clone(), *arg) };
let ret = nfo.call(actx, Expr::from_handle(Rc::new(expr_handle))).await;
hand.handle(call, &ret.api_return(ctx.clone(), &hand).await).await
},
api::AtomReq::Command(cmd @ api::Command(_)) => match nfo.command(actx).await {
Err(e) => hand.handle(cmd, &Err(e.to_api())).await,
Ok(opt) => match opt {
None => hand.handle(cmd, &Ok(api::NextStep::Halt)).await,
Some(cont) => {
let cont = cont.api_return(ctx.clone(), &hand).await;
hand.handle(cmd, &Ok(api::NextStep::Continue(cont))).await
},
},
let expr_store = BorrowedExprStore::new();
let expr_handle = ExprHandle::borrowed(*arg, &expr_store);
let ret = nfo.call(actx, Expr::from_handle(expr_handle.clone())).await;
let api_expr = serialize(ret).await;
mem::drop(expr_handle);
expr_store.dispose().await;
handle.reply(call, api_expr).await
},
}
})
@@ -336,42 +415,51 @@ pub fn extension_init(
},
api::HostExtReq::DeserAtom(deser) => {
let api::DeserAtom(sys, buf, refs) = &deser;
let mut read = &mut &buf[..];
let ctx = get_ctx(*sys).await;
// SAFETY: deserialization implicitly grants ownership to previously owned exprs
let refs = (refs.iter())
.map(|tk| unsafe { ExprHandle::from_args(ctx.clone(), *tk) })
.map(|handle| Expr::from_handle(Rc::new(handle)))
.collect_vec();
let id = AtomTypeId::decode(Pin::new(&mut read)).await;
let inst = ctx.cted().inst();
let nfo = atom_by_idx(inst.card(), id).expect("Deserializing atom with invalid ID");
hand.handle(&deser, &nfo.deserialize(ctx.clone(), read, &refs).await).await
let read = &mut &buf[..];
with_sys_record(*sys, async {
// SAFETY: deserialization implicitly grants ownership to previously owned exprs
let refs = (refs.iter())
.map(|tk| Expr::from_handle(ExprHandle::deserialize(*tk)))
.collect_vec();
let id = AtomTypeId::decode_slice(read);
let nfo = (dyn_cted().inst().card().ops_by_atid(id))
.expect("Deserializing atom with invalid ID");
handle.reply(&deser, nfo.deserialize(read, &refs).await).await
})
.await
},
}
}
.boxed_local()
}
},
);
*interner_cell.borrow_mut() =
Some(Interner::new_replica(rn.clone().map(|ir: IntReq| ir.into_root())));
spawner(Box::pin(clone!(spawner; async move {
let mut streams = stream_select! { in_recv.map(Some), exit_recv.map(|_| None) };
while let Some(item) = streams.next().await {
match item {
Some(rcvd) => spawner(Box::pin(clone!(rn; async move { rn.receive(&rcvd[..]).await }))),
None => break,
}
}
})));
ExtInit {
header: ext_header,
port: Box::new(ExtensionOwner {
out_recv,
out_send,
_interner_cell: interner_cell,
_systems_lock: systems_lock,
}),
})
.await
},
);
// add essential services to the very tail, then fold all context into the run
// future
SYSTEM_TABLE
.scope(
RefCell::default(),
with_interner(
new_interner(),
with_logger(
logger2,
with_comm(
Rc::new(client),
comm_ctx,
SPAWN.scope(
Rc::new(move |delay, fut| {
let (poll, handle) = to_task(fut);
spawn(delay, Box::pin(poll));
Box::new(handle)
}),
(self.context.into_iter()).fold(
Box::pin(async { extension_fut.await.unwrap() }) as LocalBoxFuture<()>,
|fut, cx| cx.apply(fut),
),
),
),
),
),
)
.await;
}
}

View File

@@ -1,310 +0,0 @@
use std::any::Any;
use std::borrow::Cow;
use std::cell::RefCell;
use std::sync::{Arc, OnceLock};
use std::{fmt, iter};
use dyn_clone::{clone_box, DynClone};
use itertools::Itertools;
use orchid_base::boxed_iter::{box_once, BoxedIter};
use orchid_base::clone;
use orchid_base::error::{ErrPos, OrcError};
use orchid_base::interner::{deintern, intern};
use orchid_base::location::{GetSrc, Pos};
use orchid_base::reqnot::{ReqNot, Requester};
use crate::api;
/// Errors addressed to the developer which are to be resolved with
/// code changes
pub trait ProjectError: Sized + Send + Sync + 'static {
/// A general description of this type of error
const DESCRIPTION: &'static str;
/// A formatted message that includes specific parameters
#[must_use]
fn message(&self) -> String { self.description().to_string() }
/// Code positions relevant to this error. If you don't implement this, you
/// must implement [ProjectError::one_position]
#[must_use]
fn positions(&self) -> impl IntoIterator<Item = ErrPos> + '_ {
box_once(ErrPos { position: self.one_position(), message: None })
}
/// Short way to provide a single origin. If you don't implement this, you
/// must implement [ProjectError::positions]
#[must_use]
fn one_position(&self) -> Pos {
unimplemented!("Error type did not implement either positions or one_position")
}
/// Convert the error into an `Arc<dyn DynProjectError>` to be able to
/// handle various errors together
#[must_use]
fn pack(self) -> ProjectErrorObj { Arc::new(self) }
}
/// Object-safe version of [ProjectError]. Implement that instead of this.
pub trait DynProjectError: Send + Sync + 'static {
/// Access type information about this error
#[must_use]
fn as_any_ref(&self) -> &dyn Any;
/// Pack the error into a trait object, or leave it as-is if it's already a
/// trait object
#[must_use]
fn into_packed(self: Arc<Self>) -> ProjectErrorObj;
/// A general description of this type of error
#[must_use]
fn description(&self) -> Cow<'_, str>;
/// A formatted message that includes specific parameters
#[must_use]
fn message(&self) -> String { self.description().to_string() }
/// Code positions relevant to this error.
#[must_use]
fn positions(&self) -> BoxedIter<'_, ErrPos>;
}
impl<T> DynProjectError for T
where T: ProjectError
{
fn as_any_ref(&self) -> &dyn Any { self }
fn into_packed(self: Arc<Self>) -> ProjectErrorObj { self }
fn description(&self) -> Cow<'_, str> { Cow::Borrowed(T::DESCRIPTION) }
fn message(&self) -> String { ProjectError::message(self) }
fn positions(&self) -> BoxedIter<ErrPos> { Box::new(ProjectError::positions(self).into_iter()) }
}
pub fn pretty_print(err: &dyn DynProjectError, get_src: &mut impl GetSrc) -> String {
let description = err.description();
let message = err.message();
let positions = err.positions().collect::<Vec<_>>();
let head = format!("Project error: {description}\n{message}");
if positions.is_empty() {
head + "No origins specified"
} else {
iter::once(head)
.chain(positions.iter().map(|ErrPos { position: origin, message }| match message {
None => format!("@{}", origin.pretty_print(get_src)),
Some(msg) => format!("@{}: {msg}", origin.pretty_print(get_src)),
}))
.join("\n")
}
}
impl DynProjectError for ProjectErrorObj {
fn as_any_ref(&self) -> &dyn Any { (**self).as_any_ref() }
fn description(&self) -> Cow<'_, str> { (**self).description() }
fn into_packed(self: Arc<Self>) -> ProjectErrorObj { (*self).clone() }
fn message(&self) -> String { (**self).message() }
fn positions(&self) -> BoxedIter<'_, ErrPos> { (**self).positions() }
}
/// Type-erased [ProjectError] implementor through the [DynProjectError]
/// object-trait
pub type ProjectErrorObj = Arc<dyn DynProjectError>;
/// Alias for a result with an error of [ProjectErrorObj]. This is the type of
/// result most commonly returned by pre-runtime operations.
pub type ProjectResult<T> = Result<T, ProjectErrorObj>;
/// A trait for error types that are only missing an origin. Do not depend on
/// this trait, refer to [DynErrorSansOrigin] instead.
pub trait ErrorSansOrigin: Clone + Sized + Send + Sync + 'static {
/// General description of the error condition
const DESCRIPTION: &'static str;
/// Specific description of the error including code fragments or concrete
/// data if possible
fn message(&self) -> String { Self::DESCRIPTION.to_string() }
/// Convert the error to a type-erased structure for handling on shared
/// channels
fn pack(self) -> ErrorSansOriginObj { Box::new(self) }
/// A shortcut to streamline switching code between [ErrorSansOriginObj] and
/// concrete types
fn bundle(self, origin: &Pos) -> ProjectErrorObj { self.pack().bundle(origin) }
}
/// Object-safe equivalent to [ErrorSansOrigin]. Implement that one instead of
/// this. Typically found as [ErrorSansOriginObj]
pub trait DynErrorSansOrigin: Any + Send + Sync + DynClone {
/// Allow to downcast the base object to distinguish between various errors.
/// The main intended purpose is to trigger a fallback when [CodeNotFound] is
/// encountered, but the possibilities are not limited to that.
fn as_any_ref(&self) -> &dyn Any;
/// Regularize the type
fn into_packed(self: Box<Self>) -> ErrorSansOriginObj;
/// Generic description of the error condition
fn description(&self) -> Cow<'_, str>;
/// Specific description of this particular error
fn message(&self) -> String;
/// Add an origin
fn bundle(self: Box<Self>, origin: &Pos) -> ProjectErrorObj;
}
/// Type-erased [ErrorSansOrigin] implementor through the object-trait
/// [DynErrorSansOrigin]. This can be turned into a [ProjectErrorObj] with
/// [ErrorSansOriginObj::bundle].
pub type ErrorSansOriginObj = Box<dyn DynErrorSansOrigin>;
/// A generic project result without origin
pub type ResultSansOrigin<T> = Result<T, ErrorSansOriginObj>;
impl<T: ErrorSansOrigin + 'static> DynErrorSansOrigin for T {
fn description(&self) -> Cow<'_, str> { Cow::Borrowed(Self::DESCRIPTION) }
fn message(&self) -> String { (*self).message() }
fn as_any_ref(&self) -> &dyn Any { self }
fn into_packed(self: Box<Self>) -> ErrorSansOriginObj { (*self).pack() }
fn bundle(self: Box<Self>, origin: &Pos) -> ProjectErrorObj {
Arc::new(OriginBundle(origin.clone(), *self))
}
}
impl Clone for ErrorSansOriginObj {
fn clone(&self) -> Self { clone_box(&**self) }
}
impl DynErrorSansOrigin for ErrorSansOriginObj {
fn description(&self) -> Cow<'_, str> { (**self).description() }
fn message(&self) -> String { (**self).message() }
fn as_any_ref(&self) -> &dyn Any { (**self).as_any_ref() }
fn into_packed(self: Box<Self>) -> ErrorSansOriginObj { *self }
fn bundle(self: Box<Self>, origin: &Pos) -> ProjectErrorObj { (*self).bundle(origin) }
}
impl fmt::Display for ErrorSansOriginObj {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "{}\nOrigin missing from error", self.message())
}
}
impl fmt::Debug for ErrorSansOriginObj {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self}") }
}
struct OriginBundle<T: ErrorSansOrigin>(Pos, T);
impl<T: ErrorSansOrigin> DynProjectError for OriginBundle<T> {
fn as_any_ref(&self) -> &dyn Any { self.1.as_any_ref() }
fn into_packed(self: Arc<Self>) -> ProjectErrorObj { self }
fn description(&self) -> Cow<'_, str> { self.1.description() }
fn message(&self) -> String { self.1.message() }
fn positions(&self) -> BoxedIter<ErrPos> {
box_once(ErrPos { position: self.0.clone(), message: None })
}
}
/// A collection for tracking fatal errors without halting. Participating
/// functions return [ProjectResult] even if they only ever construct [Ok]. When
/// they call other participating functions, instead of directly forwarding
/// errors with `?` they should prefer constructing a fallback value with
/// [Reporter::fallback]. If any error is added to a [Reporter] in a function,
/// the return value is valid but its meaning need not be related in any way to
/// the inputs.
///
/// Returning [Err] from a function that accepts `&mut Reporter` indicates not
/// that there was a fatal error but that it wasn't possible to construct a
/// fallback, so if it can, the caller should construct one.
pub struct Reporter(RefCell<Vec<ProjectErrorObj>>);
impl Reporter {
/// Create a new error reporter
pub fn new() -> Self { Self(RefCell::new(Vec::new())) }
/// Returns true if any errors were regorded. If this ever returns true, it
/// will always return true in the future.
pub fn failing(&self) -> bool { !self.0.borrow().is_empty() }
/// Report a fatal error
pub fn report(&self, error: ProjectErrorObj) {
match error.as_any_ref().downcast_ref::<MultiError>() {
None => self.0.borrow_mut().push(error),
Some(me) =>
for err in me.0.iter() {
self.report(err.clone())
},
}
}
/// Catch a fatal error, report it, and substitute the value
pub fn fallback<T>(&self, res: ProjectResult<T>, cb: impl FnOnce(ProjectErrorObj) -> T) -> T {
res.inspect_err(|e| self.report(e.clone())).unwrap_or_else(cb)
}
/// Take the errors out of the reporter
#[must_use]
pub fn into_errors(self) -> Option<Vec<ProjectErrorObj>> {
let v = self.0.into_inner();
if v.is_empty() { None } else { Some(v) }
}
/// Raise an error if the reporter contains any errors
pub fn bind(self) -> ProjectResult<()> {
match self.into_errors() {
None => Ok(()),
Some(v) if v.len() == 1 => Err(v.into_iter().next().unwrap()),
Some(v) => Err(MultiError(v).pack()),
}
}
}
impl Default for Reporter {
fn default() -> Self { Self::new() }
}
fn unpack_into(err: impl DynProjectError, res: &mut Vec<ProjectErrorObj>) {
match err.as_any_ref().downcast_ref::<MultiError>() {
Some(multi) => multi.0.iter().for_each(|e| unpack_into(e.clone(), res)),
None => res.push(Arc::new(err).into_packed()),
}
}
pub fn unpack_err(err: ProjectErrorObj) -> Vec<ProjectErrorObj> {
let mut out = Vec::new();
unpack_into(err, &mut out);
out
}
pub fn pack_err<E: DynProjectError>(iter: impl IntoIterator<Item = E>) -> ProjectErrorObj {
let mut errors = Vec::new();
iter.into_iter().for_each(|e| unpack_into(e, &mut errors));
if errors.len() == 1 { errors.into_iter().next().unwrap() } else { MultiError(errors).pack() }
}
struct MultiError(Vec<ProjectErrorObj>);
impl ProjectError for MultiError {
const DESCRIPTION: &'static str = "Multiple errors occurred";
fn message(&self) -> String { format!("{} errors occurred", self.0.len()) }
fn positions(&self) -> impl IntoIterator<Item = ErrPos> + '_ {
self.0.iter().flat_map(|e| {
e.positions().map(|pos| {
let emsg = e.message();
let msg = match pos.message {
None => emsg,
Some(s) if s.is_empty() => emsg,
Some(pmsg) => format!("{emsg}: {pmsg}"),
};
ErrPos { position: pos.position, message: Some(Arc::new(msg)) }
})
})
}
}
fn err_to_api(err: ProjectErrorObj) -> api::OrcErr {
api::OrcErr {
description: intern(&*err.description()).marker(),
message: Arc::new(err.message()),
locations: err.positions().map(|e| e.to_api()).collect_vec(),
}
}
struct RelayedError {
pub id: Option<api::ErrId>,
pub reqnot: ReqNot<api::ExtMsgSet>,
pub details: OnceLock<OrcError>,
}
impl RelayedError {
fn details(&self) -> &OrcError {
let Self { id, reqnot, details: data } = self;
data.get_or_init(clone!(reqnot; move || {
let id = id.expect("Either data or ID must be initialized");
let projerr = reqnot.request(api::GetErrorDetails(id));
OrcError {
description: deintern(projerr.description),
message: projerr.message,
positions: projerr.locations.iter().map(ErrPos::from_api).collect_vec(),
}
}))
}
}
impl DynProjectError for RelayedError {
fn description(&self) -> Cow<'_, str> { Cow::Borrowed(self.details().description.as_str()) }
fn message(&self) -> String { self.details().message.to_string() }
fn as_any_ref(&self) -> &dyn std::any::Any { self }
fn into_packed(self: Arc<Self>) -> ProjectErrorObj { self }
fn positions(&self) -> BoxedIter<'_, ErrPos> {
Box::new(self.details().positions.iter().cloned())
}
}

View File

@@ -1,112 +1,199 @@
use std::cell::RefCell;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use std::thread::panicking;
use async_once_cell::OnceCell;
use derive_destructure::destructure;
use orchid_api::ExtAtomPrint;
use orchid_base::error::OrcErrv;
use orchid_base::format::{FmtCtx, FmtUnit, Format};
use orchid_base::location::Pos;
use orchid_base::reqnot::Requester;
use futures::future::join_all;
use hashbrown::HashSet;
use orchid_base::{FmtCtx, FmtUnit, Format, OrcErrv, Pos, stash};
use crate::api;
use crate::atom::ForeignAtom;
use crate::gen_expr::{GExpr, GExprKind};
use crate::system::SysCtx;
use crate::gen_expr::{GExpr, slot};
use crate::{ForeignAtom, api, notify, request, sys_id};
#[derive(destructure)]
pub struct ExprHandle {
pub tk: api::ExprTicket,
pub ctx: SysCtx,
/// Handle for a lifetime associated with an [ExprHandle], such as a function
/// call. Can be passed into [ExprHandle::borrowed] as an optimization over
/// [ExprHandle::from_ticket]
///
/// # Panics
///
/// The [Drop] of this type panics by default unless the stack is already
/// unwinding. You need to make sure you dispose of it by calling
/// [Self::dispose].
pub struct BorrowedExprStore(RefCell<Option<HashSet<Rc<ExprHandle>>>>);
impl BorrowedExprStore {
pub(crate) fn new() -> Self { Self(RefCell::new(Some(HashSet::new()))) }
pub async fn dispose(self) {
let set = self.0.borrow_mut().take().expect("Double dispose of BorrowedExprStore!");
join_all(set.into_iter().map(ExprHandle::on_borrow_expire)).await;
}
}
impl Drop for BorrowedExprStore {
fn drop(&mut self) {
if self.0.borrow().is_some() && !panicking() {
panic!("This should always be explicitly disposed")
}
}
}
/// A RAII wrapper over an [api::ExprTicket]. Extension authors usually use
/// [Expr] for all purposes as this type does not deal with the details of the
/// expression associated with the ticket, it merely ensures that [api::Acquire]
/// and [api::Release] are sent at appropriate times.
#[derive(destructure, PartialEq, Eq, Hash)]
pub struct ExprHandle(api::ExprTicket);
impl ExprHandle {
/// Do not signal to take ownership of the expr. Instead, the
/// [BorrowedExprStore] signifies the lifetime of the borrow, and when it is
/// freed, it signals to take ownership of any exprs that ended up outliving
/// it. It is used to receive exprs sent via [ExprHandle::ticket] as an
/// optimization over [ExprHandle::from_ticket]
pub fn borrowed(tk: api::ExprTicket, store: &BorrowedExprStore) -> Rc<Self> {
let this = Rc::new(Self(tk));
store.0.borrow_mut().as_mut().unwrap().insert(this.clone());
this
}
/// This function takes over the loose reference pre-created via
/// [ExprHandle::serialize] in the sender. It must therefore pair up with a
/// corresponding call to that function.
pub fn deserialize(tk: api::ExprTicket) -> Rc<Self> { Rc::new(Self(tk)) }
/// This function takes ownership of a borrowed expr sent via
/// [ExprHandle::ticket] and signals immediately to record that ownership. It
/// is used in place of [ExprHandle::borrowed] when it's impractical to
/// determine how long the borrow will live.
///
/// # Safety
///
/// This function does not signal to take ownership of the expr. It must only
/// be called on tickets that are already implicitly owned.
pub unsafe fn from_args(ctx: SysCtx, tk: api::ExprTicket) -> Self { Self { ctx, tk } }
pub fn get_ctx(&self) -> SysCtx { self.ctx.clone() }
pub async fn clone(&self) -> Self {
self.ctx.reqnot().notify(api::Acquire(self.ctx.sys_id(), self.tk)).await;
Self { ctx: self.ctx.clone(), tk: self.tk }
/// You need to ensure that the [api::Acquire] sent by this function arrives
/// before the borrow expires, so you still need a borrow delimited by some
/// message you will send in the future.
pub async fn from_ticket(tk: api::ExprTicket) -> Rc<Self> {
let store = BorrowedExprStore::new();
let expr = Self::borrowed(tk, &store);
store.dispose().await;
expr
}
/// The raw ticket used in messages. If you want to transfer ownership via the
/// ticket, you should use [ExprHandle::serialize]. Only send this if you want
/// to lend the expr, and you expect the receiver to use
/// [ExprHandle::borrowed] or [ExprHandle::from_ticket]
pub fn ticket(&self) -> api::ExprTicket { self.0 }
async fn send_acq(&self) { notify(api::Acquire(sys_id(), self.0)).await }
/// If this is the last reference, do nothing, otherwise send an Acquire
pub async fn on_borrow_expire(self: Rc<Self>) { self.serialize().await; }
/// Drop the handle and get the ticket without a release notification.
/// Use this with messages that imply ownership transfer. This function is
/// safe because abusing it is a memory leak.
pub fn into_tk(self) -> api::ExprTicket { self.destructure().0 }
pub async fn serialize(self: Rc<Self>) -> api::ExprTicket {
match Rc::try_unwrap(self) {
Err(rc) => {
rc.send_acq().await;
rc.0
},
Ok(hand) => hand.destructure().0,
}
}
}
impl fmt::Debug for ExprHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ExprHandle({})", self.tk.0)
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ExprHandle({})", self.0.0) }
}
impl Drop for ExprHandle {
fn drop(&mut self) {
let notif = api::Release(self.ctx.sys_id(), self.tk);
let reqnot = self.ctx.reqnot().clone();
self.ctx.spawner()(Box::pin(async move { reqnot.notify(notif).await }))
let notif = api::Release(sys_id(), self.0);
stash(async move { notify(notif).await })
}
}
/// A smart object that keeps an expression alive in the interpreter until all
/// references are dropped and provides information about that expression. These
/// can be stored in any pattern, but care must be taken as adding new ones into
/// a structure that is already visible to the interpreter can easily cause a
/// memory leak.
#[derive(Clone, Debug, destructure)]
pub struct Expr {
handle: Rc<ExprHandle>,
data: Rc<OnceCell<ExprData>>,
}
impl Expr {
/// Wrap a handle in order to retrieve details about it
pub fn from_handle(handle: Rc<ExprHandle>) -> Self { Self { handle, data: Rc::default() } }
pub fn new(handle: Rc<ExprHandle>, d: ExprData) -> Self {
/// Wrap a handle the details of which are already known
pub fn from_data(handle: Rc<ExprHandle>, d: ExprData) -> Self {
Self { handle, data: Rc::new(OnceCell::from(d)) }
}
/// Creates an instance without incrementing the reference count. This is
/// only safe to be called on a reference created with an [Expr::serialize]
/// call which created the loose reference it can take ownership of.
pub async fn deserialize(tk: api::ExprTicket) -> Self {
Self::from_handle(ExprHandle::deserialize(tk))
}
/// Fetch the details of this expression via [api::Inspect] if not already
/// known, and return them
pub async fn data(&self) -> &ExprData {
(self.data.get_or_init(async {
let details = self.handle.ctx.reqnot().request(api::Inspect { target: self.handle.tk }).await;
let pos = Pos::from_api(&details.location, self.handle.ctx.i()).await;
let details = request(api::Inspect { target: self.handle.ticket() }).await;
let pos = Pos::from_api(&details.location).await;
let kind = match details.kind {
api::InspectedKind::Atom(a) =>
ExprKind::Atom(ForeignAtom::new(self.handle.clone(), a, pos.clone())),
api::InspectedKind::Bottom(b) =>
ExprKind::Bottom(OrcErrv::from_api(&b, self.handle.ctx.i()).await),
api::InspectedKind::Bottom(b) => ExprKind::Bottom(OrcErrv::from_api(b).await),
api::InspectedKind::Opaque => ExprKind::Opaque,
};
ExprData { pos, kind }
}))
.await
}
/// Attempt to downcast this to a [ForeignAtom]
pub async fn atom(self) -> Result<ForeignAtom, Self> {
match self.data().await {
ExprData { kind: ExprKind::Atom(atom), .. } => Ok(atom.clone()),
_ => Err(self),
}
}
/// Obtain code location info associated with this expression for logging.
pub async fn pos(&self) -> Pos { self.data().await.pos.clone() }
/// Clone out the handle for this expression
pub fn handle(&self) -> Rc<ExprHandle> { self.handle.clone() }
pub fn ctx(&self) -> SysCtx { self.handle.ctx.clone() }
pub fn slot(&self) -> GExpr {
GExpr { pos: Pos::SlotTarget, kind: GExprKind::Slot(self.clone()) }
}
/// Wrap this expression in a [GExpr] synchronously as an escape hatch.
/// Otherwise identical to this type's [crate::ToExpr] impl
pub fn slot(&self) -> GExpr { slot(self.clone()) }
/// Increments the refcount to ensure that the ticket remains valid even if
/// the handle is freed. To avoid a leak, [Expr::deserialize] must eventually
/// be called.
pub async fn serialize(self) -> api::ExprTicket { self.handle.serialize().await }
}
impl Format for Expr {
async fn print<'a>(&'a self, _c: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
match &self.data().await.kind {
ExprKind::Opaque => "OPAQUE".to_string().into(),
ExprKind::Bottom(b) => format!("Bottom({b})").into(),
ExprKind::Atom(a) =>
FmtUnit::from_api(&self.handle.ctx.reqnot().request(ExtAtomPrint(a.atom.clone())).await),
}
FmtUnit::from_api(&request(api::ExprPrint { target: self.handle.0 }).await)
}
}
impl Eq for Expr {}
impl PartialEq for Expr {
fn eq(&self, other: &Self) -> bool { self.handle == other.handle }
}
impl Hash for Expr {
fn hash<H: Hasher>(&self, state: &mut H) { self.handle.hash(state); }
}
/// Information about an expression
#[derive(Clone, Debug)]
pub struct ExprData {
/// Source code location data associated with the expression for debug logging
pub pos: Pos,
/// Limited information on the value available to extensions
pub kind: ExprKind,
}
/// All that is visible about a runtime value to extensions. This
/// information is limited for the sake of extensibility
#[derive(Clone, Debug)]
pub enum ExprKind {
/// An atom, local or foreign
Atom(ForeignAtom),
/// A runtime error
Bottom(OrcErrv),
/// Some other value, possibly normalizes to one of the above
Opaque,
}

View File

@@ -0,0 +1,13 @@
use std::pin::Pin;
use std::rc::Rc;
use std::time::Duration;
use futures::future::LocalBoxFuture;
use futures::{AsyncRead, AsyncWrite};
pub struct ExtPort {
pub input: Pin<Box<dyn AsyncRead>>,
pub output: Pin<Box<dyn AsyncWrite>>,
pub log: Pin<Box<dyn AsyncWrite>>,
pub spawn: Rc<dyn Fn(Duration, LocalBoxFuture<'static, ()>)>,
}

View File

@@ -1,41 +1,102 @@
use std::any::TypeId;
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use async_std::io::Write;
use async_std::sync::Mutex;
use futures::FutureExt;
use futures::future::LocalBoxFuture;
use futures::future::{LocalBoxFuture, join_all};
use futures::{AsyncWrite, FutureExt};
use itertools::Itertools;
use never::Never;
use orchid_api_traits::Encode;
use orchid_base::clone;
use orchid_base::error::OrcRes;
use orchid_base::format::{FmtCtx, FmtUnit};
use orchid_base::name::Sym;
use orchid_base::{FmtCtx, FmtUnit, OrcRes, Pos, Sym, clone};
use task_local::task_local;
use trait_set::trait_set;
use crate::atom::Atomic;
use crate::atom_owned::{DeserializeCtx, OwnedAtom, OwnedVariant};
use crate::conv::ToExpr;
use crate::expr::Expr;
use crate::gen_expr::GExpr;
use crate::system::{SysCtx, SysCtxEntry};
use crate::gen_expr::{GExpr, new_atom};
use crate::{
Atomic, DeserializeCtx, ExecHandle, Expr, OwnedAtom, OwnedVariant, ToExpr, api, exec, sys_id,
};
trait_set! {
trait FunCB = Fn(Vec<Expr>) -> LocalBoxFuture<'static, OrcRes<GExpr>> + 'static;
}
pub trait ExprFunc<I, O>: Clone + 'static {
const ARITY: u8;
fn apply(&self, v: Vec<Expr>) -> impl Future<Output = OrcRes<GExpr>>;
task_local! {
static ARGV: Vec<Expr>;
}
#[derive(Default)]
struct FunsCtx(Mutex<HashMap<Sym, (u8, Rc<dyn FunCB>)>>);
impl SysCtxEntry for FunsCtx {}
/// Wihtin an [ExprFunc]'s body, access a raw argument by index
pub fn get_arg(idx: usize) -> Expr {
ARGV
.try_with(|argv| {
(argv.get(idx).cloned())
.unwrap_or_else(|| panic!("Cannot read argument ##{idx}, only have {}", argv.len()))
})
.expect("get_arg called outside ExprFunc")
}
/// Find the number of arguments accepted by this [ExprFunc]
pub fn get_argc() -> usize {
ARGV.try_with(|argv| argv.len()).expect("get_arg called outside ExprFunc")
}
/// Retrieve the code locations associated with specific arguments by index.
/// This is intended to be the last argument to [orchid_base::mk_errv]
pub async fn get_arg_posv(idxes: impl IntoIterator<Item = usize>) -> impl Iterator<Item = Pos> {
let args = (ARGV.try_with(|argv| idxes.into_iter().map(|i| &argv[i]).cloned().collect_vec()))
.expect("get_arg_posv called outside ExprFunc");
join_all(args.iter().map(|expr| expr.pos())).await.into_iter()
}
/// A Rust lambda that can be placed into an Orchid atom. This requires that
///
/// - the lambda is [Clone] and `'static`
/// - All of its arguments are [crate::TryFromExpr]
/// - Its return value is [crate::ToExpr]
/// - For the sake of compilation time, at present the trait is only implemented
/// for up to 6 arguments
pub trait ExprFunc<I, O>: Clone + 'static {
fn argtyps() -> &'static [TypeId];
fn apply<'a>(&self, hand: ExecHandle<'a>, v: Vec<Expr>) -> impl Future<Output = OrcRes<GExpr>>;
}
task_local! {
static FUNS_CTX: RefCell<HashMap<(api::SysId, Sym), FunRecord>>;
}
pub(crate) fn with_funs_ctx<'a>(fut: LocalBoxFuture<'a, ()>) -> LocalBoxFuture<'a, ()> {
Box::pin(FUNS_CTX.scope(RefCell::default(), fut))
}
#[derive(Clone)]
struct FunRecord {
argtyps: &'static [TypeId],
fun: Rc<dyn FunCB>,
}
fn process_args<I, O, F: ExprFunc<I, O>>(f: F) -> FunRecord {
let argtyps = F::argtyps();
let fun = Rc::new(move |v: Vec<Expr>| {
clone!(f, v mut);
exec(async move |mut hand| {
let mut norm_args = Vec::with_capacity(v.len());
for (expr, typ) in v.into_iter().zip(argtyps) {
if *typ == TypeId::of::<Expr>() {
norm_args.push(expr);
} else {
norm_args.push(hand.exec(expr).await?);
}
}
f.apply(hand, norm_args).await
})
.map(Ok)
.boxed_local()
});
FunRecord { argtyps, fun }
}
/// An Atom representing a partially applied named native function. These
/// partial calls are serialized into the name of the native function and the
@@ -46,23 +107,23 @@ impl SysCtxEntry for FunsCtx {}
pub(crate) struct Fun {
path: Sym,
args: Vec<Expr>,
arity: u8,
fun: Rc<dyn FunCB>,
record: FunRecord,
}
impl Fun {
pub async fn new<I, O, F: ExprFunc<I, O>>(path: Sym, ctx: SysCtx, f: F) -> Self {
let funs: &FunsCtx = ctx.get_or_default();
let mut fung = funs.0.lock().await;
let fun = if let Some(x) = fung.get(&path) {
x.1.clone()
} else {
let fun = Rc::new(move |v| clone!(f; async move { f.apply(v).await }.boxed_local()));
fung.insert(path.clone(), (F::ARITY, fun.clone()));
fun
};
Self { args: vec![], arity: F::ARITY, path, fun }
pub async fn new<I, O, F: ExprFunc<I, O>>(path: Sym, f: F) -> Self {
FUNS_CTX.with(|cx| {
let mut fung = cx.borrow_mut();
let record = if let Some(record) = fung.get(&(sys_id(), path.clone())) {
record.clone()
} else {
let record = process_args(f);
fung.insert((sys_id(), path.clone()), record.clone());
record
};
Self { args: vec![], path, record }
})
}
pub fn arity(&self) -> u8 { self.arity }
pub fn arity(&self) -> u8 { self.record.argtyps.len() as u8 }
}
impl Atomic for Fun {
type Data = ();
@@ -71,46 +132,43 @@ impl Atomic for Fun {
impl OwnedAtom for Fun {
type Refs = Vec<Expr>;
async fn val(&self) -> Cow<'_, Self::Data> { Cow::Owned(()) }
async fn call_ref(&self, arg: Expr) -> GExpr {
std::io::Write::flush(&mut std::io::stderr()).unwrap();
async fn call_ref(&self, arg: Expr) -> impl ToExpr {
let new_args = self.args.iter().cloned().chain([arg]).collect_vec();
if new_args.len() == self.arity.into() {
(self.fun)(new_args).await.to_expr()
if new_args.len() == self.record.argtyps.len() {
(self.record.fun)(new_args).await.to_gen().await
} else {
Self { args: new_args, arity: self.arity, fun: self.fun.clone(), path: self.path.clone() }
.to_expr()
new_atom(Self { args: new_args, record: self.record.clone(), path: self.path.clone() })
}
}
async fn call(self, arg: Expr) -> GExpr { self.call_ref(arg).await }
async fn serialize(&self, _: SysCtx, write: Pin<&mut (impl Write + ?Sized)>) -> Self::Refs {
self.path.to_api().encode(write).await;
async fn serialize(&self, write: Pin<&mut (impl AsyncWrite + ?Sized)>) -> Self::Refs {
self.path.to_api().encode(write).await.unwrap();
self.args.clone()
}
async fn deserialize(mut ctx: impl DeserializeCtx, args: Self::Refs) -> Self {
let sys = ctx.sys();
let path = Sym::from_api(ctx.decode().await, sys.i()).await;
let (arity, fun) = sys.get_or_default::<FunsCtx>().0.lock().await.get(&path).unwrap().clone();
Self { args, arity, path, fun }
async fn deserialize(mut ds_cx: impl DeserializeCtx, args: Self::Refs) -> Self {
let path = Sym::from_api(ds_cx.decode().await).await;
let record = (FUNS_CTX.with(|funs| funs.borrow().get(&(sys_id(), path.clone())).cloned()))
.expect("Function missing during deserialization")
.clone();
Self { args, path, record }
}
async fn print<'a>(&'a self, _: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
format!("{}:{}/{}", self.path, self.args.len(), self.arity).into()
async fn print_atom<'a>(&'a self, _: &'a (impl FmtCtx + ?Sized + 'a)) -> FmtUnit {
format!("{}:{}/{}", self.path, self.args.len(), self.arity()).into()
}
}
/// An Atom representing a partially applied native lambda. These are not
/// serializable.
///
/// See [Fun] for the serializable variant
/// See [crate::fun] for the serializable variant
#[derive(Clone)]
pub struct Lambda {
args: Vec<Expr>,
arity: u8,
fun: Rc<dyn FunCB>,
record: FunRecord,
}
impl Lambda {
/// Embed a lambda in an Orchid expression
pub fn new<I, O, F: ExprFunc<I, O>>(f: F) -> Self {
let fun = Rc::new(move |v| clone!(f; async move { f.apply(v).await }.boxed_local()));
Self { args: vec![], arity: F::ARITY, fun }
Self { args: vec![], record: process_args(f) }
}
}
impl Atomic for Lambda {
@@ -120,55 +178,61 @@ impl Atomic for Lambda {
impl OwnedAtom for Lambda {
type Refs = Never;
async fn val(&self) -> Cow<'_, Self::Data> { Cow::Owned(()) }
async fn call_ref(&self, arg: Expr) -> GExpr {
async fn call_ref(&self, arg: Expr) -> impl ToExpr {
let new_args = self.args.iter().cloned().chain([arg]).collect_vec();
if new_args.len() == self.arity.into() {
(self.fun)(new_args).await.to_expr()
if new_args.len() == self.record.argtyps.len() {
(self.record.fun)(new_args).await.to_gen().await
} else {
Self { args: new_args, arity: self.arity, fun: self.fun.clone() }.to_expr()
new_atom(Self { args: new_args, record: self.record.clone() })
}
}
async fn call(self, arg: Expr) -> GExpr { self.call_ref(arg).await }
}
mod expr_func_derives {
use orchid_base::error::OrcRes;
use std::any::TypeId;
use std::sync::OnceLock;
use super::ExprFunc;
use orchid_base::OrcRes;
use super::{ARGV, ExprFunc};
use crate::conv::{ToExpr, TryFromExpr};
use crate::func_atom::Expr;
use crate::func_atom::{ExecHandle, Expr};
use crate::gen_expr::GExpr;
macro_rules! expr_func_derive {
($arity: tt, $($t:ident),*) => {
($($t:ident),*) => {
pastey::paste!{
impl<
$($t: TryFromExpr, )*
$($t: TryFromExpr + 'static, )*
Out: ToExpr,
Func: AsyncFn($($t,)*) -> Out + Clone + Send + Sync + 'static
Func: AsyncFn($($t,)*) -> Out + Clone + 'static
> ExprFunc<($($t,)*), Out> for Func {
const ARITY: u8 = $arity;
async fn apply(&self, v: Vec<Expr>) -> OrcRes<GExpr> {
assert_eq!(v.len(), Self::ARITY.into(), "Arity mismatch");
fn argtyps() -> &'static [TypeId] {
static STORE: OnceLock<Vec<TypeId>> = OnceLock::new();
&*STORE.get_or_init(|| vec![$(TypeId::of::<$t>()),*])
}
async fn apply<'a>(&self, _: ExecHandle<'a>, v: Vec<Expr>) -> OrcRes<GExpr> {
assert_eq!(v.len(), Self::argtyps().len(), "Arity mismatch");
let argv = v.clone();
let [$([< $t:lower >],)*] = v.try_into().unwrap_or_else(|_| panic!("Checked above"));
Ok(self($($t::try_from_expr([< $t:lower >]).await?,)*).await.to_expr())
Ok(ARGV.scope(argv, self($($t::try_from_expr([< $t:lower >]).await?,)*)).await.to_gen().await)
}
}
}
};
}
expr_func_derive!(1, A);
expr_func_derive!(2, A, B);
expr_func_derive!(3, A, B, C);
expr_func_derive!(4, A, B, C, D);
expr_func_derive!(5, A, B, C, D, E);
expr_func_derive!(6, A, B, C, D, E, F);
expr_func_derive!(7, A, B, C, D, E, F, G);
expr_func_derive!(8, A, B, C, D, E, F, G, H);
expr_func_derive!(9, A, B, C, D, E, F, G, H, I);
expr_func_derive!(10, A, B, C, D, E, F, G, H, I, J);
expr_func_derive!(11, A, B, C, D, E, F, G, H, I, J, K);
expr_func_derive!(12, A, B, C, D, E, F, G, H, I, J, K, L);
expr_func_derive!(13, A, B, C, D, E, F, G, H, I, J, K, L, M);
expr_func_derive!(14, A, B, C, D, E, F, G, H, I, J, K, L, M, N);
expr_func_derive!(A);
expr_func_derive!(A, B);
expr_func_derive!(A, B, C);
expr_func_derive!(A, B, C, D);
expr_func_derive!(A, B, C, D, E);
expr_func_derive!(A, B, C, D, E, F);
// expr_func_derive!(A, B, C, D, E, F, G);
// expr_func_derive!(A, B, C, D, E, F, G, H);
// expr_func_derive!(A, B, C, D, E, F, G, H, I);
// expr_func_derive!(A, B, C, D, E, F, G, H, I, J);
// expr_func_derive!(A, B, C, D, E, F, G, H, I, J, K);
// expr_func_derive!(A, B, C, D, E, F, G, H, I, J, K, L);
// expr_func_derive!(A, B, C, D, E, F, G, H, I, J, K, L, M);
// expr_func_derive!(A, B, C, D, E, F, G, H, I, J, K, L, M, N);
}

Some files were not shown because too many files have changed in this diff Show More