salad commit
This commit is contained in:
@@ -13,5 +13,5 @@ hashbrown = "0.12"
|
|||||||
mappable-rc = "0.1"
|
mappable-rc = "0.1"
|
||||||
ordered-float = "3.0"
|
ordered-float = "3.0"
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
smallvec = "1.10.0"
|
smallvec = { version = "1.10.0", features = ['const_generics'] }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
|
|||||||
68
README.md
68
README.md
@@ -122,36 +122,33 @@ types whose defaults have implmentations based on your defaults.
|
|||||||
For a demonstration, here's a sample implementation of the Option monad.
|
For a demonstration, here's a sample implementation of the Option monad.
|
||||||
```orchid
|
```orchid
|
||||||
--[[ The definition of Monad ]]--
|
--[[ The definition of Monad ]]--
|
||||||
Bind := \M:Type -> Type. @T -> @U -> (T -> M U) -> M T -> M U
|
define Monad $M:(Type -> Type) as (Pair
|
||||||
Return := \M:Type -> Type. @T -> T -> M T
|
(@T. @U. (T -> M U) -> M T -> M U) -- bind
|
||||||
Monad := \M:Type -> Type. (
|
(@T. T -> M T) -- return
|
||||||
@:Bind M.
|
|
||||||
@:Return M.
|
|
||||||
0 --[ Note that empty expressions are forbidden so those that exist
|
|
||||||
purely for their constraints should return a nondescript constant
|
|
||||||
that is likely to raise a type error when used by mistake, such as
|
|
||||||
zero ]--
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
bind := @M:Type -> Type. @monad:Monad M. fst monad
|
||||||
|
return := @M:Type -> Type. @monad:Monad M. snd monad
|
||||||
|
|
||||||
--[[ The definition of Option ]]--
|
--[[ The definition of Option ]]--
|
||||||
export Option := \T:Type. @U -> U -> (T -> U) -> U
|
define Option $T as @U. U -> (T -> U) -> U
|
||||||
--[ Constructors ]--
|
--[ Constructors ]--
|
||||||
export Some := @T. \data:T. ( \default. \map. map data ):(Option T)
|
export Some := @T. \data:T. categorise @(Option T) ( \default. \map. map data )
|
||||||
export None := @T. ( \default. \map. default ):(Option T)
|
export None := @T. categorise @(Option T) ( \default. \map. default )
|
||||||
--[ Implement Monad ]--
|
--[ Implement Monad ]--
|
||||||
default returnOption := Some:(Return Option)
|
impl Monad Option via (makePair
|
||||||
default bindOption := ( @T:Type. @U:Type.
|
( @T. @U. \f:T -> U. \opt:Option T. opt None \x. Some f ) -- bind
|
||||||
\f:T -> U. \opt:Option T. opt None f
|
Some -- return
|
||||||
):(Bind Option)
|
)
|
||||||
--[ Sample function that works on unknown monad to demonstrate HKTs.
|
--[ Sample function that works on unknown monad to demonstrate HKTs.
|
||||||
Turns (Option (M T)) into (M (Option T)), "raising" the unknown monad
|
Turns (Option (M T)) into (M (Option T)), "raising" the unknown monad
|
||||||
out of the Option ]--
|
out of the Option ]--
|
||||||
export raise := @M:Type -> Type. @T:Type. @:Monad M. \opt:Option (M T). (
|
export raise := @M:Type -> Type. @T. @:Monad M. \opt:Option (M T). (
|
||||||
opt (return None) (\m. bind m (\x. Some x))
|
opt (return None) (\m. bind m (\x. Some x))
|
||||||
):(M (Option T))
|
):(M (Option T))
|
||||||
```
|
```
|
||||||
|
|
||||||
Defaults may be defined in any module that also defines at least one of
|
Typeclasses may be implmented in any module that also defines at least one of
|
||||||
the types in the definition, which includes both the type of the
|
the types in the definition, which includes both the type of the
|
||||||
expression and the types of its auto parameters. They always have a name,
|
expression and the types of its auto parameters. They always have a name,
|
||||||
which can be used to override known defaults with which your definiton
|
which can be used to override known defaults with which your definiton
|
||||||
@@ -162,30 +159,34 @@ Add has three arguments, two are the types of the operands and one is
|
|||||||
the result:
|
the result:
|
||||||
|
|
||||||
```orchid
|
```orchid
|
||||||
default concatListAdd replacing elementwiseAdd := @T. (
|
impl @T. Add (List T) (List T) (List T) by concatListAdd over elementwiseAdd via (
|
||||||
...
|
...
|
||||||
):(Add (List T) (List T) (List T))
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
For completeness' sake, the original definition might look like this:
|
For completeness' sake, the original definition might look like this:
|
||||||
|
|
||||||
```orchid
|
```orchid
|
||||||
default elementwiseAdd := @C:Type -> Type. @T. @U. @V. @:(Applicative C). @:(Add T U V). (
|
impl
|
||||||
|
@C:Type -> Type. @T. @U. @V. -- variables
|
||||||
|
@:(Applicative C). @:(Add T U V). -- conditions
|
||||||
|
Add (C T) (C U) (C V) -- target
|
||||||
|
by elementwiseAdd via (
|
||||||
...
|
...
|
||||||
):(Add (C T) (C U) (C V))
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
With the use of autos, here's what the recursive multiplication
|
With the use of autos, here's what the recursive multiplication
|
||||||
implementation looks like:
|
implementation looks like:
|
||||||
|
|
||||||
```orchid
|
```orchid
|
||||||
default iterativeMultiply := @T. @:(Add T T T). (
|
impl @T. @:(Add T T T). Multiply T int T by iterativeMultiply via (
|
||||||
\a:int.\b:T. loop \r. (\i.
|
\a:int. \b:T. loop \r. (\i.
|
||||||
ifthenelse (ieq i 0)
|
ifthenelse (ieq i 0)
|
||||||
b
|
b
|
||||||
(add b (r (isub i 1)) -- notice how iadd is now add
|
(add b (r (isub i 1)) -- notice how iadd is now add
|
||||||
) a
|
) a
|
||||||
):(Multiply T int T)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
This could then be applied to any type that's closed over addition
|
This could then be applied to any type that's closed over addition
|
||||||
@@ -196,6 +197,8 @@ aroundTheWorldLyrics := (
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For my notes on the declare/impl system, see [notes/type_system]
|
||||||
|
|
||||||
## Preprocessor
|
## Preprocessor
|
||||||
|
|
||||||
The above code samples have one notable difference from the Examples
|
The above code samples have one notable difference from the Examples
|
||||||
@@ -218,25 +221,24 @@ are searched back-to-front. If order is still a problem, you can always
|
|||||||
parenthesize subexpressions at the callsite.
|
parenthesize subexpressions at the callsite.
|
||||||
|
|
||||||
```orchid
|
```orchid
|
||||||
(..$pre:2 if $1 then $2 else $3 ..$post:1) =2=> (
|
(..$pre:2 if ...$cond then ...$true else ...$false) =10=> (
|
||||||
..$pre
|
..$pre
|
||||||
(ifthenelse $1 $2 $3)
|
(ifthenelse (...$cond) (...$true) (...$false))
|
||||||
...$post
|
|
||||||
)
|
)
|
||||||
$a + $b =10=> (add $a $b)
|
...$a + ...$b =2=> (add (...$a) (...$b))
|
||||||
$a = $b =5=> (eq $a $b)
|
...$a = ...$b =5=> (eq $a $b)
|
||||||
$a - $b =10=> (sub $a $b)
|
...$a - ...$b =2=> (sub (...$a) (...$b))
|
||||||
```
|
```
|
||||||
|
|
||||||
The recursive addition function now looks like this
|
The recursive addition function now looks like this
|
||||||
|
|
||||||
```orchid
|
```orchid
|
||||||
default iterativeMultiply := @T. @:(Add T T T). (
|
impl @T. @:(Add T T T). Multiply T int T by iterativeMultiply via (
|
||||||
\a:int.\b:T. loop \r. (\i.
|
\a:int.\b:T. loop \r. (\i.
|
||||||
if (i = 0) then b
|
if (i = 0) then b
|
||||||
else (b + (r (i - 1)))
|
else (b + (r (i - 1)))
|
||||||
) a
|
) a
|
||||||
):(Multiply T int T)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Traversal using carriages
|
### Traversal using carriages
|
||||||
|
|||||||
28
notes/papers/project_synopsis/Makefile
Normal file
28
notes/papers/project_synopsis/Makefile
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# (c) 2010: Johann A. Briffa <j.briffa@ieee.org>
|
||||||
|
# $Id: Makefile 1791 2010-09-28 17:00:10Z jabriffa $
|
||||||
|
|
||||||
|
TARGETS := main.pdf
|
||||||
|
DEPENDS := $(wildcard *.tex) $(wildcard *.cls) $(wildcard *.bib)
|
||||||
|
|
||||||
|
PDFLATEX=pdflatex
|
||||||
|
|
||||||
|
.force:
|
||||||
|
|
||||||
|
all: $(TARGETS)
|
||||||
|
|
||||||
|
archive: $(TARGETS)
|
||||||
|
rm -f archive.zip
|
||||||
|
zip -r archive.zip Figures/ Makefile *.cls *.tex *.bib $(TARGETS) -x "*.svn*"
|
||||||
|
|
||||||
|
%.bbl: %.aux
|
||||||
|
bibtex $*
|
||||||
|
|
||||||
|
%.aux: %.tex $(DEPENDS)
|
||||||
|
$(PDFLATEX) $*.tex
|
||||||
|
|
||||||
|
%.pdf: %.aux %.bbl
|
||||||
|
$(PDFLATEX) $*.tex
|
||||||
|
$(PDFLATEX) $*.tex
|
||||||
|
|
||||||
|
clean:
|
||||||
|
-/bin/rm -f $(TARGETS) *.aux *.log *.bbl *.blg *.out *.toc *.lof *.lot
|
||||||
136
notes/papers/project_synopsis/main.tex
Normal file
136
notes/papers/project_synopsis/main.tex
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
\documentclass{article}
|
||||||
|
\usepackage{graphicx}
|
||||||
|
\usepackage[margin=2cm]{geometry}
|
||||||
|
\usepackage[hidelinks]{hyperref}
|
||||||
|
|
||||||
|
|
||||||
|
\title{Orchid's Type System}
|
||||||
|
\author{Lawrence Bethlenfalvy, 6621227}
|
||||||
|
\date{12 November 2022}
|
||||||
|
|
||||||
|
% Why would you toss all this in the template if it just doesn't compile!?
|
||||||
|
%\urn{6621227}
|
||||||
|
%\degree{Bachelor of Science in Computer Science}
|
||||||
|
%\supervisor{Brijesh Dongol}
|
||||||
|
|
||||||
|
\begin{document}
|
||||||
|
\maketitle
|
||||||
|
|
||||||
|
|
||||||
|
\section{Introduction}
|
||||||
|
|
||||||
|
Originally my final year project was going to be an entire programming language which I started to
|
||||||
|
develop around February, however at the start of the year I decided to set a more reasonable goal.
|
||||||
|
|
||||||
|
Orchid is a functional programming language inspired by $\lambda$-calculus, Haskell and Rust. The
|
||||||
|
execution model is exactly $\lambda$-calculus with opaque predicates and functions representing
|
||||||
|
foreign data such as numbers, file descriptors and their respeective operations. For the purpose of
|
||||||
|
side effects caused by foreign functions, reduction is carried out in normal order just like
|
||||||
|
Haskell.
|
||||||
|
|
||||||
|
There are two metaprogramming systems, one syntax level and one type level, similar to Rust.
|
||||||
|
Syntax-level metaprogramming is based on generalized kerning, it is mostly defined and a naiive
|
||||||
|
implementation is complete at the time of writing. Type-level metaprogramming resembles Prolog and
|
||||||
|
is a major objective of this year's project.
|
||||||
|
|
||||||
|
The project's home is this repository which, at the time of writing, contains fairly outdated code
|
||||||
|
samples: \url{https://github.com/lbfalvy/orchid}
|
||||||
|
|
||||||
|
\subsection{Aims}
|
||||||
|
|
||||||
|
My goal for this year is to define a robust, usable type system and write a performant
|
||||||
|
implementation.
|
||||||
|
|
||||||
|
The next phase of development will be a compiler frontend for LLVM. If the type system reaches a
|
||||||
|
satisfactory level of completion before the dissertation is complete, I may also write a bit about
|
||||||
|
the compilation.
|
||||||
|
|
||||||
|
If due to some unforeseen circumstances I'm unable to complete enough of the type system to fill
|
||||||
|
the dissertation with its details or it ends up too simple, I may also write about the macro system
|
||||||
|
which is already in a usable state and only needs some optimizations and minor adjustments
|
||||||
|
due to shifts in responsibilities which occured while I was defining the basic properties of the
|
||||||
|
type system and experimenting with concrete code examples to get a clearer picture of the
|
||||||
|
provisional feature set.
|
||||||
|
|
||||||
|
\subsection{Objectives}
|
||||||
|
|
||||||
|
A working type system should have the following parts, which I will implement in roughly this order
|
||||||
|
|
||||||
|
\begin{itemize}
|
||||||
|
\item \textbf{Type inference engine and type checker} This will be an extension of
|
||||||
|
the Hindley-Milner algorithm, which simultaneously unifies and completes partial type
|
||||||
|
annotations, and recognizes conflicts.
|
||||||
|
\item \textbf{Typeclass solver} At the moment this appears to be a relatively simple piece of
|
||||||
|
code but I'm not entirely confident that complications won't arise as its responsibilities
|
||||||
|
become clearer, so I consider it a separate component
|
||||||
|
\item \textbf{Executor} Orchid is a statically typed language so it should eventually be compiled
|
||||||
|
with LLVM, but in order to demonstrate the usability of my type system I will have to write
|
||||||
|
an experimental interpreter. Since types are already basically expressions of type type,
|
||||||
|
parts of the executor will coincide with parts of the type inference engine.
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
\section{Literature Review}
|
||||||
|
|
||||||
|
The preprocessor can parse arbitrary syntax. Generalized kerning can use "rockets"
|
||||||
|
(called carriages in Orchid terminology) to parse token sequences statefully and assume
|
||||||
|
the role of an arbitrary parser encoded as a rich Turing machine.\cite{suckerpinch}
|
||||||
|
|
||||||
|
The type system supports higher-kinded types. I considered translating higher-kinded polymorphism
|
||||||
|
into abstract types as demonstrated by Yallop\cite{yallop} which can be translated into
|
||||||
|
Prolog and then building the breadth-first executor described by Tubella\cite{tubella}, but
|
||||||
|
in the end I decided that since I'm already building my own unification system I might as well
|
||||||
|
skip this step. Currently expressions are annotated with common Orchid expressions that evaluate to
|
||||||
|
a type. This means that unification is uncomputable in some cases, but the most common cases
|
||||||
|
such as halting expressions and recursive types using fixed point combinators can be unified
|
||||||
|
fairly easily and this leaves room for extension of the unification engine.
|
||||||
|
|
||||||
|
\section{Technical Overview}
|
||||||
|
|
||||||
|
\subsection{Type checker}
|
||||||
|
|
||||||
|
Type expressions to be unified are collected into a group. For the purpose of unification, types
|
||||||
|
are either opaque types with possible parameters which are considered equal if both the type and its
|
||||||
|
parameters are equal, or transparent lambda expressions applied to types. Before unification would
|
||||||
|
begin, the expressions that refer to equal types are collected in a group. A breadth-first search
|
||||||
|
through the network of reduced forms is executed for all expressions in lockstep, and
|
||||||
|
syntactic unification is attempted on each pair of reduced forms belonging to different expressions
|
||||||
|
in the same group.
|
||||||
|
|
||||||
|
At a minimum, the following must be valid reduction steps:
|
||||||
|
|
||||||
|
\begin{itemize}
|
||||||
|
\item $\beta$-reduction
|
||||||
|
\item fixed point normalization, which simply means identifying that a subexpression has
|
||||||
|
reduced to an expression that contains the original. When a fixed point is detected, the
|
||||||
|
recursive expression is converted to a form that uses the Y-combinator. This operation
|
||||||
|
is ordered before $\beta$-reductions of the expression in the BFS tree but otherwise has
|
||||||
|
the same precedence.
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
\subsection{Typeclass solver}
|
||||||
|
|
||||||
|
This will be relatively simple and strongly resemble Rust's Chalk trait solver, with the exception
|
||||||
|
that I would prefer not to enforce the orphan rules on the language level so as not to completely
|
||||||
|
stall projects while a third party developer accepts pull requests on what might be legacy code to
|
||||||
|
add new impls.
|
||||||
|
|
||||||
|
\subsection{Executor}
|
||||||
|
|
||||||
|
A basic version of the executor can technically be produced by initializing the lazy BFS of
|
||||||
|
reductions created for the type checker on runtime code, taking the first result, dropping the
|
||||||
|
BFS iterator and repeating these two steps ad infinitum, but this will likely be very inefficient
|
||||||
|
so time permitting I would like to create a somewhat better version. This stands to show how
|
||||||
|
developer effort can be reduced - along with the learning curve of the complex type system - by
|
||||||
|
reusing the same language for both. A type system supporting HKTs would have to be uncomputable
|
||||||
|
either way.
|
||||||
|
|
||||||
|
\section{Workplan}
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
\appendix
|
||||||
|
|
||||||
|
\bibliographystyle{IEEEtran}
|
||||||
|
\bibliography{references}
|
||||||
|
|
||||||
|
\end{document}
|
||||||
20
notes/papers/project_synopsis/references.bib
Normal file
20
notes/papers/project_synopsis/references.bib
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
@online{suckerpinch,
|
||||||
|
title = {Generalized kerning is undecidable! But anagraphing is possible.},
|
||||||
|
author = {suckerpinch},
|
||||||
|
date = {dec, 2017},
|
||||||
|
organization = {YouTube},
|
||||||
|
url = {https://www.youtube.com/watch?v=8\_npHZbe3qM}
|
||||||
|
}
|
||||||
|
|
||||||
|
@phdthesis{tubella,
|
||||||
|
author = {Jordi Tubella and Antonio González},
|
||||||
|
school = {Universitat Politechnica de Catalunya},
|
||||||
|
title = {A Partial Breadth-First Execution Model for Prolog},
|
||||||
|
year = {1994}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{yallop,
|
||||||
|
author = {Jeremy Yallop and Leo White},
|
||||||
|
howpublished = {University of Cambridge},
|
||||||
|
title = {Lightweight higher-kinded polymorphism}
|
||||||
|
}
|
||||||
@@ -3,5 +3,6 @@ mod foreign;
|
|||||||
mod partial_hash;
|
mod partial_hash;
|
||||||
mod reduction_tree;
|
mod reduction_tree;
|
||||||
mod apply_lambda;
|
mod apply_lambda;
|
||||||
|
mod syntax_eq;
|
||||||
pub use foreign::ExternFn;
|
pub use foreign::ExternFn;
|
||||||
pub use foreign::Atom;
|
pub use foreign::Atom;
|
||||||
@@ -9,7 +9,7 @@ use super::super::representations::typed::{Clause, Expr};
|
|||||||
|
|
||||||
/// Call the function with the first Expression that isn't an Auto,
|
/// Call the function with the first Expression that isn't an Auto,
|
||||||
/// wrap all elements in the returned iterator back in the original sequence of Autos.
|
/// wrap all elements in the returned iterator back in the original sequence of Autos.
|
||||||
fn skip_autos<'a,
|
pub fn skip_autos<'a,
|
||||||
F: 'a + FnOnce(Mrc<Expr>, usize) -> I,
|
F: 'a + FnOnce(Mrc<Expr>, usize) -> I,
|
||||||
I: Iterator<Item = Mrc<Expr>> + 'static
|
I: Iterator<Item = Mrc<Expr>> + 'static
|
||||||
>(
|
>(
|
||||||
|
|||||||
@@ -1,41 +1,159 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::hash::{Hasher, Hash};
|
use std::hash::{Hasher, Hash};
|
||||||
|
|
||||||
|
use mappable_rc::Mrc;
|
||||||
|
|
||||||
|
use crate::utils::ProtoMap;
|
||||||
|
|
||||||
use super::super::representations::typed::{Clause, Expr};
|
use super::super::representations::typed::{Clause, Expr};
|
||||||
use super::super::utils::Stackframe;
|
use super::super::utils::Stackframe;
|
||||||
|
|
||||||
/// Hash the parts of an expression that are required to be equal for syntactic equality.
|
pub fn swap<T, U>((t, u): (T, U)) -> (U, T) { (u, t) }
|
||||||
pub fn syntax_eq_rec<H: Hasher>(
|
|
||||||
ex1: &Expr, ex1_stack: Stackframe<bool>,
|
// All data to be forwarded during recursion about one half of a unification task
|
||||||
ex2: &Expr, ex2_stack: Stackframe<bool>
|
#[derive(Clone)]
|
||||||
) -> bool {
|
struct UnifHalfTask<'a> {
|
||||||
match clause {
|
/// The expression to be unified
|
||||||
// Skip autos and explicits
|
expr: &'a Expr,
|
||||||
Clause::Auto(_, body) => partial_hash_rec(body, state, is_auto.push(true)),
|
/// Auto parameters with their values from the opposite side
|
||||||
Clause::Explicit(f, _) => partial_hash_rec(f, state, is_auto),
|
ctx: &'a ProtoMap<'a, usize, Mrc<Expr>>,
|
||||||
// Annotate everything else with a prefix
|
/// Stores whether a given relative upreference is auto or lambda
|
||||||
// - Recurse into the tree of lambdas and calls - classic lambda calc
|
is_auto: Option<Stackframe<'a, bool>>,
|
||||||
Clause::Lambda(_, body) => {
|
/// Metastack of explicit arguments not yet resolved. An explicit will always exactly pair with
|
||||||
state.write_u8(0);
|
/// the first auto below it. Disjoint autos always bubble with a left-to-right precedence.
|
||||||
partial_hash_rec(body, state, is_auto.push(false))
|
explicits: Option<Stackframe<'a, Mrc<Expr>>>
|
||||||
}
|
}
|
||||||
Clause::Apply(f, x) => {
|
|
||||||
state.write_u8(1);
|
impl<'a> UnifHalfTask<'a> {
|
||||||
partial_hash_rec(f, state, is_auto);
|
fn push_auto(&self, body: &Expr) -> (Self, bool) {
|
||||||
partial_hash_rec(x, state, is_auto);
|
if let Some(Stackframe{ prev, .. }) = self.explicits {(
|
||||||
}
|
Self{
|
||||||
// - Only recognize the depth of an argument if it refers to a non-auto parameter
|
expr: body,
|
||||||
Clause::Argument(depth) => {
|
is_auto: Stackframe::opush(&self.is_auto, false),
|
||||||
// If the argument references an auto, acknowledge its existence
|
explicits: prev.cloned(),
|
||||||
if *is_auto.iter().nth(*depth).unwrap_or(&false) {
|
..*self
|
||||||
state.write_u8(2)
|
},
|
||||||
} else {
|
true
|
||||||
state.write_u8(3);
|
)} else {(
|
||||||
state.write_usize(*depth)
|
Self{
|
||||||
}
|
expr: body,
|
||||||
}
|
is_auto: Stackframe::opush(&self.is_auto, true),
|
||||||
// - Hash leaves like normal
|
..*self
|
||||||
Clause::Literal(lit) => { state.write_u8(4); lit.hash(state) }
|
},
|
||||||
Clause::Atom(at) => { state.write_u8(5); at.hash(state) }
|
false
|
||||||
Clause::ExternFn(f) => { state.write_u8(6); f.hash(state) }
|
)}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
fn push_lambda(&self, body: &Expr) -> Self {Self{
|
||||||
|
expr: body,
|
||||||
|
is_auto: Stackframe::opush(&self.is_auto, false),
|
||||||
|
..*self
|
||||||
|
}}
|
||||||
|
|
||||||
|
fn push_explicit(&self, subexpr: &Expr, arg: Mrc<Expr>) -> Self {Self{
|
||||||
|
expr: subexpr,
|
||||||
|
explicits: Stackframe::opush(&self.explicits, arg),
|
||||||
|
..*self
|
||||||
|
}}
|
||||||
|
|
||||||
|
fn push_expr(&self, f: &Expr) -> Self {Self{
|
||||||
|
expr: f,
|
||||||
|
..*self
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct UnifResult {
|
||||||
|
/// Collected identities for the given side
|
||||||
|
context: HashMap<usize, Mrc<Expr>>,
|
||||||
|
/// Number of explicits to be eliminated from task before forwarding to the next branch
|
||||||
|
usedExplicits: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UnifResult {
|
||||||
|
fn useExplicit(self) -> Self{Self{
|
||||||
|
usedExplicits: self.usedExplicits + 1,
|
||||||
|
context: self.context.clone()
|
||||||
|
}}
|
||||||
|
|
||||||
|
fn dropUsedExplicits(&mut self, task: &mut UnifHalfTask) {
|
||||||
|
task.explicits = task.explicits.map(|s| {
|
||||||
|
s.pop(self.usedExplicits).expect("More explicits used than provided")
|
||||||
|
}).cloned();
|
||||||
|
self.usedExplicits = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ascertain syntactic equality. Syntactic equality means that
|
||||||
|
/// - lambda elements are verbatim equal
|
||||||
|
/// - auto constraints are pairwise syntactically equal after sorting
|
||||||
|
///
|
||||||
|
/// Context associates variables with subtrees resolved on the opposite side
|
||||||
|
pub fn unify_syntax_rec( // the stacks store true for autos, false for lambdas
|
||||||
|
ltask@UnifHalfTask{ expr: lexpr@Expr(lclause, _), .. }: UnifHalfTask,
|
||||||
|
rtask@UnifHalfTask{ expr: rexpr@Expr(rclause, _), .. }: UnifHalfTask
|
||||||
|
) -> Option<(UnifResult, UnifResult)> {
|
||||||
|
// Ensure that ex1 is a value-level construct
|
||||||
|
match lclause {
|
||||||
|
Clause::Auto(_, body) => {
|
||||||
|
let res = unify_syntax_rec(ltask.push_auto(body).0, rtask);
|
||||||
|
return if ltask.explicits.is_some() {
|
||||||
|
res.map(|(r1, r2)| (r1.useExplicit(), r2))
|
||||||
|
} else {res}
|
||||||
|
}
|
||||||
|
Clause::Explicit(subexpr, arg) => {
|
||||||
|
let new_ltask = ltask.push_explicit(subexpr, Mrc::clone(arg));
|
||||||
|
return unify_syntax_rec(new_ltask, rtask)
|
||||||
|
}
|
||||||
|
_ => ()
|
||||||
|
};
|
||||||
|
// Reduce ex2's auto handling to ex1's. In the optimizer we trust
|
||||||
|
if let Clause::Auto(..) | Clause::Explicit(..) = rclause {
|
||||||
|
return unify_syntax_rec(rtask, ltask).map(swap);
|
||||||
|
}
|
||||||
|
// Neither ex1 nor ex2 can be Auto or Explicit
|
||||||
|
match (lclause, rclause) {
|
||||||
|
// recurse into both
|
||||||
|
(Clause::Lambda(_, lbody), Clause::Lambda(_, rbody)) => unify_syntax_rec(
|
||||||
|
ltask.push_lambda(lbody),
|
||||||
|
rtask.push_lambda(rbody)
|
||||||
|
),
|
||||||
|
(Clause::Apply(lf, lx), Clause::Apply(rf, rx)) => {
|
||||||
|
let (lpart, rpart) = unify_syntax_rec(
|
||||||
|
ltask.push_expr(lf),
|
||||||
|
rtask.push_expr(rf)
|
||||||
|
)?;
|
||||||
|
lpart.dropUsedExplicits(&mut ltask);
|
||||||
|
rpart.dropUsedExplicits(&mut rtask);
|
||||||
|
unify_syntax_rec(ltask.push_expr(lx), rtask.push_expr(rx))
|
||||||
|
}
|
||||||
|
(Clause::Atom(latom), Clause::Atom(ratom)) => {
|
||||||
|
if latom != ratom { None }
|
||||||
|
else { Some((UnifResult::default(), UnifResult::default())) }
|
||||||
|
}
|
||||||
|
(Clause::ExternFn(lf), Clause::ExternFn(rf)) => {
|
||||||
|
if lf != rf { None }
|
||||||
|
else { Some((UnifResult::default(), UnifResult::default())) }
|
||||||
|
}
|
||||||
|
(Clause::Literal(llit), Clause::Literal(rlit)) => {
|
||||||
|
if llit != rlit { None }
|
||||||
|
else { Some((UnifResult::default(), UnifResult::default())) }
|
||||||
|
}
|
||||||
|
// TODO Select a representative
|
||||||
|
(Clause::Argument(depth1), Clause::Argument(depth2)) => {
|
||||||
|
!*stack1.iter().nth(*depth1).unwrap_or(&false)
|
||||||
|
&& !*stack2.iter().nth(*depth2).unwrap_or(&false)
|
||||||
|
&& stack1.iter().count() - depth1 == stack2.iter().count() - depth2
|
||||||
|
}
|
||||||
|
// TODO Assign a substitute
|
||||||
|
(Clause::Argument(placeholder), _) => {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tricky unifications
|
||||||
|
// @A. A A 1 ~ @B. 2 B B = fails if left-authoritative
|
||||||
|
// @A. 1 A A ~ @B. B B 2
|
||||||
|
// @A. A 1 A ~ @B. B B 2
|
||||||
|
// @ 0 X 0 ~ @ 0 0 Y
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
#![feature(specialization)]
|
#![feature(specialization)]
|
||||||
#![feature(core_intrinsics)]
|
#![feature(core_intrinsics)]
|
||||||
|
#![feature(adt_const_params)]
|
||||||
|
#![feature(generic_const_exprs)]
|
||||||
|
|
||||||
use std::env::current_dir;
|
use std::env::current_dir;
|
||||||
|
|
||||||
@@ -46,21 +48,21 @@ fn initial_tree() -> Mrc<[Expr]> {
|
|||||||
|
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
fn typed_notation_debug() {
|
fn typed_notation_debug() {
|
||||||
let t = t::Clause::Auto(None,
|
let true_ex = t::Clause::Auto(None,
|
||||||
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(0))),
|
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(0))),
|
||||||
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(1))),
|
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(1))),
|
||||||
t::Clause::Argument(1).wrap_t(t::Clause::Argument(2))
|
t::Clause::Argument(1).wrap_t(t::Clause::Argument(2))
|
||||||
).wrap()
|
).wrap()
|
||||||
).wrap()
|
).wrap()
|
||||||
).wrap();
|
).wrap();
|
||||||
let f = t::Clause::Auto(None,
|
let false_ex = t::Clause::Auto(None,
|
||||||
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(0))),
|
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(0))),
|
||||||
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(1))),
|
t::Clause::Lambda(Some(Mrc::new(t::Clause::Argument(1))),
|
||||||
t::Clause::Argument(0).wrap_t(t::Clause::Argument(2))
|
t::Clause::Argument(0).wrap_t(t::Clause::Argument(2))
|
||||||
).wrap()
|
).wrap()
|
||||||
).wrap()
|
).wrap()
|
||||||
).wrap();
|
).wrap();
|
||||||
println!("{:?}", t::Clause::Apply(t::Clause::Apply(Mrc::clone(&t), t).wrap(), f))
|
println!("{:?}", t::Clause::Apply(t::Clause::Apply(Mrc::clone(&true_ex), true_ex).wrap(), false_ex))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ pub use side::Side;
|
|||||||
pub use merge_sorted::merge_sorted;
|
pub use merge_sorted::merge_sorted;
|
||||||
pub use iter::BoxedIter;
|
pub use iter::BoxedIter;
|
||||||
pub use string_from_charset::string_from_charset;
|
pub use string_from_charset::string_from_charset;
|
||||||
|
pub use protomap::ProtoMap;
|
||||||
|
|
||||||
pub fn mrc_derive<T: ?Sized, P, U: ?Sized>(m: &Mrc<T>, p: P) -> Mrc<U>
|
pub fn mrc_derive<T: ?Sized, P, U: ?Sized>(m: &Mrc<T>, p: P) -> Mrc<U>
|
||||||
where P: for<'a> FnOnce(&'a T) -> &'a U {
|
where P: for<'a> FnOnce(&'a T) -> &'a U {
|
||||||
|
|||||||
@@ -7,12 +7,17 @@ const INLINE_ENTRIES: usize = 2;
|
|||||||
/// Linked-array-list of key-value pairs.
|
/// Linked-array-list of key-value pairs.
|
||||||
/// Lookup and modification is O(n + cachemiss * n / m)
|
/// Lookup and modification is O(n + cachemiss * n / m)
|
||||||
/// Can be extended by reference in O(m) < O(n)
|
/// Can be extended by reference in O(m) < O(n)
|
||||||
pub struct ProtoMap<'a, K, V> {
|
///
|
||||||
entries: SmallVec<[(K, Option<V>); INLINE_ENTRIES]>,
|
/// The number of elements stored inline in a stackframe is 2 by default, which is enough for most
|
||||||
prototype: Option<&'a ProtoMap<'a, K, V>>
|
/// recursive algorithms. The cost of overruns is a heap allocation and subsequent heap indirections,
|
||||||
|
/// plus wasted stack space which is likely wasted L1 as well. The cost of underruns is wasted stack
|
||||||
|
/// space.
|
||||||
|
pub struct ProtoMap<'a, K, V, const STACK_COUNT: usize = 2> {
|
||||||
|
entries: SmallVec<[(K, Option<V>); STACK_COUNT]>,
|
||||||
|
prototype: Option<&'a ProtoMap<'a, K, V, STACK_COUNT>>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, K, V> ProtoMap<'a, K, V> {
|
impl<'a, K, V, const STACK_COUNT: usize> ProtoMap<'a, K, V, STACK_COUNT> {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
entries: SmallVec::new(),
|
entries: SmallVec::new(),
|
||||||
@@ -104,7 +109,8 @@ impl<'a, K, V> ProtoMap<'a, K, V> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Update the prototype, and correspondingly the lifetime of the map
|
/// Update the prototype, and correspondingly the lifetime of the map
|
||||||
pub fn set_proto<'b>(self, proto: &'b ProtoMap<'b, K, V>) -> ProtoMap<'b, K, V> {
|
pub fn set_proto<'b>(self, proto: &'b ProtoMap<'b, K, V, STACK_COUNT>)
|
||||||
|
-> ProtoMap<'b, K, V, STACK_COUNT> {
|
||||||
ProtoMap {
|
ProtoMap {
|
||||||
entries: self.entries,
|
entries: self.entries,
|
||||||
prototype: Some(proto)
|
prototype: Some(proto)
|
||||||
|
|||||||
@@ -6,14 +6,16 @@ use std::fmt::Debug;
|
|||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub struct Stackframe<'a, T> {
|
pub struct Stackframe<'a, T> {
|
||||||
pub item: T,
|
pub item: T,
|
||||||
pub prev: Option<&'a Stackframe<'a, T>>
|
pub prev: Option<&'a Stackframe<'a, T>>,
|
||||||
|
pub len: usize
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: 'a> Stackframe<'a, T> {
|
impl<'a, T: 'a> Stackframe<'a, T> {
|
||||||
pub fn new(item: T) -> Self {
|
pub fn new(item: T) -> Self {
|
||||||
Self {
|
Self {
|
||||||
item,
|
item,
|
||||||
prev: None
|
prev: None,
|
||||||
|
len: 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Get the item owned by this listlike, very fast O(1)
|
/// Get the item owned by this listlike, very fast O(1)
|
||||||
@@ -27,9 +29,22 @@ impl<'a, T: 'a> Stackframe<'a, T> {
|
|||||||
pub fn push(&self, item: T) -> Stackframe<'_, T> {
|
pub fn push(&self, item: T) -> Stackframe<'_, T> {
|
||||||
Stackframe {
|
Stackframe {
|
||||||
item,
|
item,
|
||||||
prev: Some(self)
|
prev: Some(self),
|
||||||
|
len: self.len + 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pub fn opush(prev: &Option<Self>, item: T) -> Option<Self> {
|
||||||
|
Some(Self {
|
||||||
|
item,
|
||||||
|
prev: prev.as_ref(),
|
||||||
|
len: prev.map_or(1, |s| s.len)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pub fn len(&self) -> usize { self.len }
|
||||||
|
pub fn pop(&self, count: usize) -> Option<&Self> {
|
||||||
|
if count == 0 {Some(self)}
|
||||||
|
else {self.prev.and_then(|prev| prev.pop(count - 1))}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T> Debug for Stackframe<'a, T> where T: Debug {
|
impl<'a, T> Debug for Stackframe<'a, T> where T: Debug {
|
||||||
@@ -52,4 +67,4 @@ impl<'a, T> Iterator for StackframeIterator<'a, T> {
|
|||||||
self.curr = prev;
|
self.curr = prev;
|
||||||
Some(item)
|
Some(item)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user