repo_name stringlengths 1 62 | dataset stringclasses 1 value | lang stringclasses 11 values | pr_id int64 1 20.1k | owner stringlengths 2 34 | reviewer stringlengths 2 39 | diff_hunk stringlengths 15 262k | code_review_comment stringlengths 1 99.6k |
|---|---|---|---|---|---|---|---|
Mooncake.jl | github_2023 | others | 220 | compintell | willtebbutt | @@ -123,7 +123,7 @@ Crucially, observe that we distinguish between the state of the arguments before
For our example, the exact form of ``f`` is
```math
-f((x, y, z)) = ((x, y, x \odot y), (2 x \odot y, \sum_{d=1}^D x \odot y))
+f((x, y, z, s)) = ((x, y, x \odot y, \text{Ref}(2 x \odot y)), (2 x \odot y, \sum_{d=1}^D x \odot y)) | Nearly -- you can drop the `Ref` (we've not given `Ref` a mathematical interpreteration):
```julia
f((x, y, z, s)) = ((x, y, x \odot y, 2 x \odot y), (2 x \odot y, \sum_{d=1}^D x \odot y))
``` |
Mooncake.jl | github_2023 | others | 231 | compintell | willtebbutt | @@ -31,9 +31,11 @@ end
Gradient using algorithmic/automatic differentiation via Tapir.
"""
-function ADgradient(::Val{:Tapir}, ℓ; safety_on::Bool=false)
- primal_sig = Tuple{typeof(logdensity), typeof(ℓ), Vector{Float64}}
- rule = Tapir.build_rrule(Tapir.TapirInterpreter(), primal_sig; safety_on)
+function ADgradient(::Val{:Tapir}, ℓ; safety_on::Bool=false, rule=nothing) | What kind of type instabilities are you seeing here? |
Mooncake.jl | github_2023 | others | 229 | compintell | yebai | @@ -368,7 +369,7 @@ function make_ad_stmts!(stmt::PiNode, line::ID, info::ADInfo)
# Assemble the above lines and construct reverse-pass.
return ad_stmt_info(
line,
- PiNode(stmt.val, fcodual_type(_type(stmt.typ))),
+ PiNode(__inc(stmt.val), fcodual_type(_type(stmt.typ))), | Very clear -- thanks! |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -436,16 +440,89 @@ end
tangent_field_zeros_exprs = ntuple(fieldcount(P)) do n
if tangent_field_type(P, n) <: PossiblyUninitTangent
V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
- return :(isdefined(x, $n) ? $V(zero_tangent(getfield(x, $n))) : $V())
+ return :(isdefined(x, $n) ? $V(zero_tangent_internal(getfield(x, $n))) : $V())
else
- return :(zero_tangent(getfield(x, $n)))
+ return :(zero_tangent_internal(getfield(x, $n)))
end
end
backing_data_expr = Expr(:call, :tuple, tangent_field_zeros_exprs...)
backing_expr = :($(backing_type(P))($backing_data_expr))
return :($(tangent_type(P))($backing_expr))
end
+@inline zero_tangent_internal(x::Union{Int8,Int16,Int32,Int64,Int128,IEEEFloat}, ::IdDict) = zero_tangent_internal(x)
+@inline function zero_tangent_internal(x::SimpleVector, stackdict::IdDict)
+ return map!(n -> zero_tangent_internal(x[n], stackdict), Vector{Any}(undef, length(x)), eachindex(x))
+end
+@inline function zero_tangent_internal(x::Array{P, N}, stackdict::IdDict) where {P, N}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end
+ if isbitstype(P)
+ return _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), Array{tangent_type(P), N}(undef, size(x)...), x)
+ else
+ stackdict[x] = Array{tangent_type(P), N}(undef, size(x)...)
+ for i in eachindex(x)
+ if isassigned(x, i)
+ stackdict[x][i] = zero_tangent_internal(x[i], stackdict)
+ end
+ end
+ return stackdict[x]
+ end
+end
+function zero_tangent_internal(x::P, stackdict::IdDict) where {P<:Union{Tuple, NamedTuple}}
+ if haskey(stackdict, x)
+ return stackdict[x] | Could you try adding a type assertion here? i.e.
```suggestion
return stackdict[x]::fdata_type(tangent_type(P))
```
ought to work nicely. |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -410,18 +413,19 @@ It is an error for the zero element of the tangent space of `x` to be represente
anything other than that which this function returns.
"""
zero_tangent(x)
-@inline zero_tangent(::Union{Int8, Int16, Int32, Int64, Int128}) = NoTangent()
-@inline zero_tangent(x::IEEEFloat) = zero(x)
-@inline function zero_tangent(x::SimpleVector)
- return map!(n -> zero_tangent(x[n]), Vector{Any}(undef, length(x)), eachindex(x))
-end
-@inline function zero_tangent(x::Array{P, N}) where {P, N}
- return _map_if_assigned!(zero_tangent, Array{tangent_type(P), N}(undef, size(x)...), x)
+function zero_tangent(x::P) where {P}
+ if isbitstype(P)
+ return zero_tangent_internal(x)
+ end
+ return zero_tangent_internal(x, IdDict()) | This could be simplified to
```suggestion
return isbitstype(P) ? zero_tangent_internal(x) : zero_tangent_internal(x, IdDict())
``` |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -436,16 +440,89 @@ end
tangent_field_zeros_exprs = ntuple(fieldcount(P)) do n
if tangent_field_type(P, n) <: PossiblyUninitTangent
V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
- return :(isdefined(x, $n) ? $V(zero_tangent(getfield(x, $n))) : $V())
+ return :(isdefined(x, $n) ? $V(zero_tangent_internal(getfield(x, $n))) : $V())
else
- return :(zero_tangent(getfield(x, $n)))
+ return :(zero_tangent_internal(getfield(x, $n)))
end
end
backing_data_expr = Expr(:call, :tuple, tangent_field_zeros_exprs...)
backing_expr = :($(backing_type(P))($backing_data_expr))
return :($(tangent_type(P))($backing_expr))
end
+@inline zero_tangent_internal(x::Union{Int8,Int16,Int32,Int64,Int128,IEEEFloat}, ::IdDict) = zero_tangent_internal(x)
+@inline function zero_tangent_internal(x::SimpleVector, stackdict::IdDict)
+ return map!(n -> zero_tangent_internal(x[n], stackdict), Vector{Any}(undef, length(x)), eachindex(x))
+end
+@inline function zero_tangent_internal(x::Array{P, N}, stackdict::IdDict) where {P, N}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end
+ if isbitstype(P)
+ return _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), Array{tangent_type(P), N}(undef, size(x)...), x)
+ else
+ stackdict[x] = Array{tangent_type(P), N}(undef, size(x)...)
+ for i in eachindex(x)
+ if isassigned(x, i)
+ stackdict[x][i] = zero_tangent_internal(x[i], stackdict)
+ end
+ end
+ return stackdict[x]
+ end
+end
+function zero_tangent_internal(x::P, stackdict::IdDict) where {P<:Union{Tuple, NamedTuple}}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end
+ if tangent_type(P) == NoTangent
+ return NoTangent()
+ end
+ stackdict[x] = tuple_map(Base.Fix2(zero_tangent_internal, stackdict), x)
+ return stackdict[x]
+end
+function zero_tangent_internal(x::P, stackdict::IdDict) where {P}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end | Should this maybe live inside the `tangent_type(P) <: MutableTangent` if statement? It'll be correct either way, but where it currently is, I imagine it'll damage the performance if `tangent_type(P) == NoTangent`, or is a `struct`. |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -436,16 +440,89 @@ end
tangent_field_zeros_exprs = ntuple(fieldcount(P)) do n
if tangent_field_type(P, n) <: PossiblyUninitTangent
V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
- return :(isdefined(x, $n) ? $V(zero_tangent(getfield(x, $n))) : $V())
+ return :(isdefined(x, $n) ? $V(zero_tangent_internal(getfield(x, $n))) : $V())
else
- return :(zero_tangent(getfield(x, $n)))
+ return :(zero_tangent_internal(getfield(x, $n)))
end
end
backing_data_expr = Expr(:call, :tuple, tangent_field_zeros_exprs...)
backing_expr = :($(backing_type(P))($backing_data_expr))
return :($(tangent_type(P))($backing_expr))
end
+@inline zero_tangent_internal(x::Union{Int8,Int16,Int32,Int64,Int128,IEEEFloat}, ::IdDict) = zero_tangent_internal(x)
+@inline function zero_tangent_internal(x::SimpleVector, stackdict::IdDict)
+ return map!(n -> zero_tangent_internal(x[n], stackdict), Vector{Any}(undef, length(x)), eachindex(x))
+end
+@inline function zero_tangent_internal(x::Array{P, N}, stackdict::IdDict) where {P, N}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end
+ if isbitstype(P)
+ return _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), Array{tangent_type(P), N}(undef, size(x)...), x)
+ else
+ stackdict[x] = Array{tangent_type(P), N}(undef, size(x)...)
+ for i in eachindex(x)
+ if isassigned(x, i)
+ stackdict[x][i] = zero_tangent_internal(x[i], stackdict)
+ end
+ end
+ return stackdict[x]
+ end
+end
+function zero_tangent_internal(x::P, stackdict::IdDict) where {P<:Union{Tuple, NamedTuple}}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end
+ if tangent_type(P) == NoTangent
+ return NoTangent()
+ end
+ stackdict[x] = tuple_map(Base.Fix2(zero_tangent_internal, stackdict), x)
+ return stackdict[x]
+end
+function zero_tangent_internal(x::P, stackdict::IdDict) where {P}
+ if haskey(stackdict, x)
+ return stackdict[x]
+ end
+
+ tangent_type(P) == NoTangent && return NoTangent()
+
+ if tangent_type(P) <: MutableTangent
+ stackdict[x] = tangent_type(P)()
+ zt = ntuple(fieldcount(P)) do n
+ name = fieldname(P, n)
+ if tangent_field_type(P, n) <: PossiblyUninitTangent
+ V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
+ if isdefined(x, n)
+ return (name => V(zero_tangent_internal(getfield(x, n), stackdict)))
+ else
+ return (name => V())
+ end
+ else
+ return (name => zero_tangent_internal(getfield(x, n), stackdict))
+ end
+ end
+
+ stackdict[x].fields = NamedTuple(zt)
+ return stackdict[x]
+ else
+ zt = ntuple(fieldcount(P)) do n | Should regular immutable `struct`s be put into the `IdDict`? I don't _think_ that they can ever alias one another / contain circular references, but would be happy to be shown wrong on this point! |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -55,6 +55,13 @@ Base.:(==)(x::Tangent, y::Tangent) = x.fields == y.fields
mutable struct MutableTangent{Tfields<:NamedTuple}
fields::Tfields
+ MutableTangent{Tfields}(fields::Tfields) where {Tfields} = new{Tfields}(fields) | This method doesn't appear to be used. Could we remove it? |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -55,6 +55,13 @@ Base.:(==)(x::Tangent, y::Tangent) = x.fields == y.fields
mutable struct MutableTangent{Tfields<:NamedTuple}
fields::Tfields
+ MutableTangent{Tfields}(fields::Tfields) where {Tfields} = new{Tfields}(fields)
+ function MutableTangent{Tfields}(fields::NamedTuple{names}) where {Tfields, names}
+ @assert names == fieldnames(Tfields) "field names mismatch"
+ new{Tfields}(fields)
+ end | Would you consider using diagonal dispatch here, rather than error checking? Something like
```julia
MutableTangent{Tfields}(fields::Tfields) where {Tfields} = new{Tfields}(fields)
``` |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -436,16 +441,88 @@ end
tangent_field_zeros_exprs = ntuple(fieldcount(P)) do n
if tangent_field_type(P, n) <: PossiblyUninitTangent
V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
- return :(isdefined(x, $n) ? $V(zero_tangent(getfield(x, $n))) : $V())
+ return :(isdefined(x, $n) ? $V(zero_tangent_internal(getfield(x, $n))) : $V())
else
- return :(zero_tangent(getfield(x, $n)))
+ return :(zero_tangent_internal(getfield(x, $n)))
end
end
backing_data_expr = Expr(:call, :tuple, tangent_field_zeros_exprs...)
backing_expr = :($(backing_type(P))($backing_data_expr))
return :($(tangent_type(P))($backing_expr))
end
+@inline zero_tangent_internal(x::Union{Int8,Int16,Int32,Int64,Int128,IEEEFloat}, ::IdDict) = zero_tangent_internal(x)
+@inline function zero_tangent_internal(x::SimpleVector, stackdict::IdDict)
+ return map!(n -> zero_tangent_internal(x[n], stackdict), Vector{Any}(undef, length(x)), eachindex(x))
+end
+@inline function zero_tangent_internal(x::Array{P, N}, stackdict::IdDict) where {P, N}
+ if haskey(stackdict, x)
+ return stackdict[x]::tangent_type(typeof(x))
+ end
+ if isbitstype(P)
+ zt = _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), Array{tangent_type(P), N}(undef, size(x)...), x)
+ stackdict[x] = zt
+ return zt
+ else
+ zt = Array{tangent_type(P), N}(undef, size(x)...)
+ stackdict[x] = zt
+ for i in eachindex(x) | Could `_map_if_assigned!` be used here, rather than writing out a for loop? |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -436,16 +441,88 @@ end
tangent_field_zeros_exprs = ntuple(fieldcount(P)) do n
if tangent_field_type(P, n) <: PossiblyUninitTangent
V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
- return :(isdefined(x, $n) ? $V(zero_tangent(getfield(x, $n))) : $V())
+ return :(isdefined(x, $n) ? $V(zero_tangent_internal(getfield(x, $n))) : $V())
else
- return :(zero_tangent(getfield(x, $n)))
+ return :(zero_tangent_internal(getfield(x, $n)))
end
end
backing_data_expr = Expr(:call, :tuple, tangent_field_zeros_exprs...)
backing_expr = :($(backing_type(P))($backing_data_expr))
return :($(tangent_type(P))($backing_expr))
end
+@inline zero_tangent_internal(x::Union{Int8,Int16,Int32,Int64,Int128,IEEEFloat}, ::IdDict) = zero_tangent_internal(x)
+@inline function zero_tangent_internal(x::SimpleVector, stackdict::IdDict)
+ return map!(n -> zero_tangent_internal(x[n], stackdict), Vector{Any}(undef, length(x)), eachindex(x))
+end
+@inline function zero_tangent_internal(x::Array{P, N}, stackdict::IdDict) where {P, N}
+ if haskey(stackdict, x)
+ return stackdict[x]::tangent_type(typeof(x))
+ end
+ if isbitstype(P)
+ zt = _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), Array{tangent_type(P), N}(undef, size(x)...), x)
+ stackdict[x] = zt
+ return zt
+ else
+ zt = Array{tangent_type(P), N}(undef, size(x)...)
+ stackdict[x] = zt
+ for i in eachindex(x)
+ if isassigned(x, i)
+ zt[i] = zero_tangent_internal(x[i], stackdict)
+ end
+ end
+ return zt
+ end
+end
+function zero_tangent_internal(x::P, stackdict::IdDict) where {P<:Union{Tuple, NamedTuple}}
+ if tangent_type(P) == NoTangent
+ return NoTangent()
+ end
+ if haskey(stackdict, x) | I think we can probably get away without doing this here. Since `Tuple`s and `NamedTuple`s are immutable, they don't have an address, so the same reasoning applies to them as does to `struct`s.
So I think you can avoid the check, and you don't ever need to put them in the `stackdict`, so the line just before the end of the function can be dropped as well. |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -436,16 +441,75 @@ end
tangent_field_zeros_exprs = ntuple(fieldcount(P)) do n
if tangent_field_type(P, n) <: PossiblyUninitTangent
V = PossiblyUninitTangent{tangent_type(fieldtype(P, n))}
- return :(isdefined(x, $n) ? $V(zero_tangent(getfield(x, $n))) : $V())
+ return :(isdefined(x, $n) ? $V(zero_tangent_internal(getfield(x, $n))) : $V())
else
- return :(zero_tangent(getfield(x, $n)))
+ return :(zero_tangent_internal(getfield(x, $n)))
end
end
backing_data_expr = Expr(:call, :tuple, tangent_field_zeros_exprs...)
backing_expr = :($(backing_type(P))($backing_data_expr))
return :($(tangent_type(P))($backing_expr))
end
+@inline zero_tangent_internal(x::Union{Int8,Int16,Int32,Int64,Int128,IEEEFloat}, ::IdDict) = zero_tangent_internal(x)
+@inline function zero_tangent_internal(x::SimpleVector, stackdict::IdDict)
+ return map!(n -> zero_tangent_internal(x[n], stackdict), Vector{Any}(undef, length(x)), eachindex(x))
+end
+@inline function zero_tangent_internal(x::Array{P, N}, stackdict::IdDict) where {P, N}
+ if haskey(stackdict, x)
+ return stackdict[x]::tangent_type(typeof(x))
+ end
+ if isbitstype(P)
+ zt = _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), Array{tangent_type(P), N}(undef, size(x)...), x)
+ stackdict[x] = zt
+ return zt
+ else
+ zt = Array{tangent_type(P), N}(undef, size(x)...)
+ stackdict[x] = zt
+ return _map_if_assigned!(Base.Fix2(zero_tangent_internal, stackdict), zt, x)::Array{tangent_type(P), N}
+ end | Another thing: do we actually need these two separate paths here? If I'm not mistaken, the second case should cover everything that we need, and be just as performant. I might be wrong though... |
Mooncake.jl | github_2023 | others | 210 | compintell | willtebbutt | @@ -408,20 +411,24 @@ end
Returns the unique zero element of the tangent space of `x`.
It is an error for the zero element of the tangent space of `x` to be represented by
anything other than that which this function returns.
+
+Internally, `zero_tangent` calls `zero_tangent_internal`, which handles different types of inputs differently.
+`zero_tangent_internal` has two variants:
+1. For `isbitstype` types, `zero_tangent_internal` takes one argument.
+2. Otherwise, `zero_tangent_internal` takes another argument which is an `IdDict`, which solves the issue of circular references
+and also allows more efficient handling of aliased objects. | ```suggestion
2. Otherwise, `zero_tangent_internal` takes another argument which is an `IdDict`, which
handles both circular references and aliasing correctly.
``` |
Mooncake.jl | github_2023 | others | 217 | compintell | yebai | @@ -122,6 +122,6 @@ jobs:
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ github.event.pull_request.number }}
- body: "Performance Ratio:\nWarning: results are very approximate!\n```\n${{ steps.read-file.outputs.table }}\n```"
+ body: "Performance Ratio:\nRatio of time to compute gradient and time to compute function.\nWarning: results are very approximate! See bench/README.md for more context.\n```\n${{ steps.read-file.outputs.table }}\n```" | Consider making it a clickable link? |
Mooncake.jl | github_2023 | others | 217 | compintell | yebai | @@ -43,6 +43,12 @@ plot_ratio_histogram!(df)
## Inter-framework Benchmarking
This comprises a small suite of functions that we AD using `Tapir.jl`, `Zygote.jl`, `ReverseDiff.jl`, and `Enzyme.jl`.
+The primary purpose of this suite of benchmarks is to ensure that we're regularly comparing the performance of a range of reverse-mode ADs on a set of problems which are known to stretch them in various ways. | Consider adding some comments on why Zygote is fast (~~due to more rules~~; you already added it below), and relevant discussions on other AD we had previously? |
Mooncake.jl | github_2023 | others | 217 | compintell | yebai | @@ -43,6 +43,12 @@ plot_ratio_histogram!(df)
## Inter-framework Benchmarking
This comprises a small suite of functions that we AD using `Tapir.jl`, `Zygote.jl`, `ReverseDiff.jl`, and `Enzyme.jl`.
+The primary purpose of this suite of benchmarks is to ensure that we're regularly comparing the performance of a range of reverse-mode ADs on a set of problems which are known to stretch them in various ways.
+For any given function in the suite, some frameworks might have rules for it, and some not.
+Consequently, they're not comparing the same thing in all cases.
+
+Please note that we have found that the results of the comparisons vary widely from machine to machine. | Not sure this is generally true; the sum example seems to have a big variance, but other numbers are relatively stable across machines. |
Mooncake.jl | github_2023 | others | 217 | compintell | yebai | @@ -122,6 +122,6 @@ jobs:
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ github.event.pull_request.number }}
- body: "Performance Ratio:\nRatio of time to compute gradient and time to compute function.\nWarning: results are very approximate! See bench/README.md for more context.\n```\n${{ steps.read-file.outputs.table }}\n```"
+ body: "Performance Ratio:\nRatio of time to compute gradient and time to compute function.\nWarning: results are very approximate! See https://github.com/compintell/Tapir.jl/tree/main/bench for more context.\n```\n${{ steps.read-file.outputs.table }}\n```" | ```suggestion
body: "Performance Ratio:\nRatio of time to compute gradient and time to compute function.\nWarning: results are very approximate! See [here](https://github.com/compintell/Tapir.jl/tree/main/bench#inter-framework-benchmarking) for more context.\n```\n${{ steps.read-file.outputs.table }}\n```"
``` |
Mooncake.jl | github_2023 | others | 201 | compintell | sunxd3 | @@ -1,7 +1,7 @@
_to_rdata(::ChainRulesCore.NoTangent) = NoRData()
_to_rdata(dx::Float64) = dx
-"""
+@doc""" | What difference does this make? |
Mooncake.jl | github_2023 | others | 175 | compintell | mhauru | @@ -0,0 +1,451 @@
+# Algorithmic Differentiation
+
+This section introduces the mathematics behind AD.
+Even if you have worked with AD before, we recommend reading in order to acclimatise yourself to the perspective that Tapir.jl takes on the subject.
+
+# Derivatives
+
+
+A foundation on which all of AD is built the the derivate -- we require a fairly general definition of it, which we build up to here.
+
+_**Scalar-to-Scalar Functions**_
+
+Consider first ``f : \RR \to \RR``, which we require to be differentiable at ``x \in \RR``.
+Its derivative at ``x`` is usually thought of as the scalar ``\alpha \in \RR`` such that
+```math
+\text{d}f = \alpha \, \text{d}x .
+```
+Loosely speaking, by this notation we mean that for arbitrary small changes ``\text{d} x`` in the input to ``f``, the change in the output ``\text{d} f`` is ``\alpha \, \text{d}x``.
+We refer readers to the first few minutes of the first lecture mentioned above for a more careful explanation.
+
+_**Vector-to-Vector Functions**_
+
+The generalisation of this to Euclidean space should be familiar: if ``f : \RR^P \to \RR^Q`` is differentiable at a point ``x \in \RR^P``, then the derivative of ``f`` at ``x`` is given by the Jacobian matrix at ``x``, denoted ``J[x] \in \RR^{Q \times P}``, such that
+```math
+\text{d}f = J[x] \, \text{d}x .
+```
+
+It is possible to stop here, as all the functions we shall need to consider can in principle be written as functions on some subset ``\RR^P``.
+
+However, when we consider differentiating computer programmes, we will have to deal with complicated nested data structures, e.g. `struct`s inside `Tuple`s inside `Vector`s etc.
+While all of these data structures _can_ be mapped onto a flat vector in order to make sense of the Jacobian of a computer programme, this becomes very inconvenient very quickly.
+To see the problem, consider the Julia function whose input is of type `Tuple{Tuple{Float64, Vector{Float64}}, Vector{Float64}, Float64}` and whose output is of type `Tuple{Vector{Float64}, Float64}`.
+What kind of object might be use to represent the derivative of a function mapping between these two spaces?
+We certainly _can_ treat these as structured "view" into a "flat" `Vector{Float64}`s, and then define a Jacobian, but actually _finding_ this mapping is a tedious exercise even if it quite obviously exists.
+
+Similarly, while "vector-Jacobian" products are usually used to explain reverse-mode AD, a more general formulation of the derivative is used all the time -- the matrix calculus discussed by [giles2008extended](@cite) and [minka2000old](@cite) (to name a couple) make use of a generalised form of derivative in order to work with functions which map to and from matrices (despite slight differences in naming conventions from text to text).
+
+Consequently, it will be much easier to avoid these kinds of "flattening" operations wherever possible.
+In order to do so, we make use of a generalised notion of the derivative.
+
+_**Functions Between More General Spaces**_
+
+In order to avoid the difficulties described above, we consider we consider functions ``f : \mathcal{X} \to \mathcal{Y}``, where ``\mathcal{X}`` and ``\mathcal{Y}`` are _finite_ dimensional real Hilbert spaces (read: finite-dimensional vector space with an inner product, and real-valued scalars).
+This definition includes functions to / from ``\RR``, ``\RR^D``, but also real-valued matrices.
+Furthermore, we shall see later how we can model all sorts of structured representations of data directly as such spaces.
+
+For such spaces, the derivative of ``f`` at ``x \in \mathcal{X}`` is the linear operator (read: linear function) ``D f [x] : \mathcal{X} \to \mathcal{Y}`` satisfying | It took me a while to parse this correctly. I think two things threw me off a bit:
1) It wasn't obvious to me that I can consider dx to be an element of `\mathcal{X}`, considering it's infinitesimal and gradients I think are elements of the dual space.
2) There are multiple layers of functions here, where `D` takes the function `f` as an argument, `D f` takes the point `x` as an argument, and `D f [x]` takes the infinitesimal d x as an argument.
Because of the above two, for a moment I thought the `\mathcal{X} \to \mathcal{Y}` part was referring to the `[x]` argument of `D f`, and this got me confused. |
Mooncake.jl | github_2023 | others | 175 | compintell | mhauru | @@ -0,0 +1,451 @@
+# Algorithmic Differentiation
+
+This section introduces the mathematics behind AD.
+Even if you have worked with AD before, we recommend reading in order to acclimatise yourself to the perspective that Tapir.jl takes on the subject.
+
+# Derivatives
+
+
+A foundation on which all of AD is built the the derivate -- we require a fairly general definition of it, which we build up to here.
+
+_**Scalar-to-Scalar Functions**_
+
+Consider first ``f : \RR \to \RR``, which we require to be differentiable at ``x \in \RR``.
+Its derivative at ``x`` is usually thought of as the scalar ``\alpha \in \RR`` such that
+```math
+\text{d}f = \alpha \, \text{d}x .
+```
+Loosely speaking, by this notation we mean that for arbitrary small changes ``\text{d} x`` in the input to ``f``, the change in the output ``\text{d} f`` is ``\alpha \, \text{d}x``.
+We refer readers to the first few minutes of the first lecture mentioned above for a more careful explanation.
+
+_**Vector-to-Vector Functions**_
+
+The generalisation of this to Euclidean space should be familiar: if ``f : \RR^P \to \RR^Q`` is differentiable at a point ``x \in \RR^P``, then the derivative of ``f`` at ``x`` is given by the Jacobian matrix at ``x``, denoted ``J[x] \in \RR^{Q \times P}``, such that
+```math
+\text{d}f = J[x] \, \text{d}x .
+```
+
+It is possible to stop here, as all the functions we shall need to consider can in principle be written as functions on some subset ``\RR^P``.
+
+However, when we consider differentiating computer programmes, we will have to deal with complicated nested data structures, e.g. `struct`s inside `Tuple`s inside `Vector`s etc.
+While all of these data structures _can_ be mapped onto a flat vector in order to make sense of the Jacobian of a computer programme, this becomes very inconvenient very quickly.
+To see the problem, consider the Julia function whose input is of type `Tuple{Tuple{Float64, Vector{Float64}}, Vector{Float64}, Float64}` and whose output is of type `Tuple{Vector{Float64}, Float64}`.
+What kind of object might be use to represent the derivative of a function mapping between these two spaces?
+We certainly _can_ treat these as structured "view" into a "flat" `Vector{Float64}`s, and then define a Jacobian, but actually _finding_ this mapping is a tedious exercise even if it quite obviously exists.
+
+Similarly, while "vector-Jacobian" products are usually used to explain reverse-mode AD, a more general formulation of the derivative is used all the time -- the matrix calculus discussed by [giles2008extended](@cite) and [minka2000old](@cite) (to name a couple) make use of a generalised form of derivative in order to work with functions which map to and from matrices (despite slight differences in naming conventions from text to text).
+
+Consequently, it will be much easier to avoid these kinds of "flattening" operations wherever possible.
+In order to do so, we make use of a generalised notion of the derivative.
+
+_**Functions Between More General Spaces**_
+
+In order to avoid the difficulties described above, we consider we consider functions ``f : \mathcal{X} \to \mathcal{Y}``, where ``\mathcal{X}`` and ``\mathcal{Y}`` are _finite_ dimensional real Hilbert spaces (read: finite-dimensional vector space with an inner product, and real-valued scalars).
+This definition includes functions to / from ``\RR``, ``\RR^D``, but also real-valued matrices.
+Furthermore, we shall see later how we can model all sorts of structured representations of data directly as such spaces.
+
+For such spaces, the derivative of ``f`` at ``x \in \mathcal{X}`` is the linear operator (read: linear function) ``D f [x] : \mathcal{X} \to \mathcal{Y}`` satisfying
+```math
+\text{d}f = D f [x] \, \text{d} x
+```
+That is, instead of thinking of the derivative as a number or a matrix, we think about it as a _function_.
+We can express the previous notions of the derivative in this language.
+
+In the scalar case, rather than thinking of the derivative as _being_ ``\alpha``, we think of it is a the linear operator ``D f [x] (\dot{x}) := \alpha \dot{x}``.
+Put differently, rather than thinking of the derivative as the slope of the tangent to ``f`` at ``x``, think of it as the function decribing the tangent itself.
+
+Similarly, if ``\mathcal{X} = \RR^P`` and ``\mathcal{Y} = \RR^Q`` then this operator can be specified in terms of the Jacobian matrix: ``D f [x] (\dot{x}) := J[x] \dot{x}`` -- brackets are used to emphasise that ``D f [x]`` is a function, and is being applied to ``\dot{x}``.
+
+The difference from usual is a little bit subtle.
+We do not define the derivative to _be_ ``\alpha`` or ``J[x]``, rather we define it to be "multiply by ``\alpha``" or "multiply by ``J[x]``".
+For the rest of this document we shall use this definition of the derivative.
+So whenever you see the word "derivative", you should think "linear function".
+
+_**The Chain Rule**_
+
+The chain rule is _the_ result which makes AD work.
+Fortunately, it applies to this version of the derivative:
+```math
+f = g \circ h \implies D f [x] = (D g [h(x)]) \circ (D h [x])
+```
+By induction this extends to a collection of ``N`` functions ``f_1, \dots, f_N``:
+```math
+f := f_N \circ \dots \circ f_1 \implies D f [x] = (D f_N [x_N]) \circ \dots \circ (D f_1 [x_1]),
+```
+where ``x_{n+1} := f(x_n)``, and ``x_1 := x``.
+
+
+_**An aside: the definition of the Frechet Derivative**_
+
+This definition of the derivative has a name: the Frechet derivative.
+It is a generalisation of the Total Derivative.
+Formally, we say that a function ``f : \mathcal{X} \to \mathcal{Y}`` is differentiable at a point ``x \in \mathcal{X}`` if there exists a linear operator ``D f [x] : \mathcal{X} \to \mathcal{Y}`` (the derivative) satisfying
+```math
+\lim_{\text{d} h \to 0} \frac{\| f(x + \text{d} h) - f(x) + D f [x] (\text{d} h) \|_\mathcal{Y}}{\| \text{d}h \|_\mathcal{X}} = 0,
+```
+where ``\| \cdot \|_\mathcal{X}`` and ``\| \cdot \|_\mathcal{Y}`` are the norms associated to Hilbert spaces ``\mathcal{X}`` and ``\mathcal{Y}`` respectively.
+It is a good idea to consider what this looks like when ``\mathcal{X} = \mathcal{Y} = \RR`` and when ``\mathcal{X} = \mathcal{Y} = \RR^D``.
+It is sometimes helpful to refer to this definition to e.g. verify the correctness of the derivative of a function -- as with single-variable calculus, however, this is rare.
+
+
+
+_**Another aside: what does Forwards-Mode AD compute?**_
+
+At this point we have enough machinery to discuss forwards-mode AD.
+Expressed in the language of linear operators and Hilbert spaces, the goal of forwards-mode AD is the following:
+given a function ``f`` which is differentiable at a point ``x``, compute ``D f [x] (\dot{x})`` for a given vector ``\dot{x}``.
+If ``f : \RR^P \to \RR^Q``, this is equivalent to computing ``J[x] \dot{x}``, where ``J[x]`` is the Jacobian of ``f`` at ``x``.
+For the interested reader we provide a high-level explanation of _how_ forwards-mode AD does this in [_How_ does Forwards-Mode AD work?](@ref).
+
+
+
+# Reverse-Mode AD: _what_ does it do?
+
+In order to explain what reverse-mode AD does, we first consider the "vector-Jacobian product" definition in Euclidean space which will be familiar to many readers.
+We then generalise.
+
+_**Reverse-Mode AD: what does it do in Euclidean space?**_
+
+In this setting, the goal of reverse-mode AD is the following: given a function ``f : \RR^P \to \RR^Q`` which is differentiable at ``x \in \RR^P`` with Jacobian ``J[x]`` at ``x``, compute ``J[x]^\top \bar{y}`` for any ``\bar{y} \in \RR^Q``.
+This is useful because we can obtain the gradient from this when ``Q = 1`` by letting ``\bar{y} = 1``.
+
+_**Adjoint Operators**_
+
+In order to generalise this algorithm to work with linear operators, we must first generalise the idea of multiplying a vector by the transpose of the Jacobian.
+The relevant concept here is that of the _adjoint_ _operator_.
+Specifically, the adjoint ``A^\ast`` of linear operator ``A`` is the linear operator satisfying
+```math
+\langle A^\ast \bar{y}, \dot{x} \rangle = \langle \bar{y}, A \dot{x} \rangle.
+```
+The relationship between the adjoint and matrix transpose is this: if ``A (x) := J x`` for some matrix ``J``, then ``A^\ast (y) := J^\top y``.
+
+Moreover, just as ``(A B)^\top = B^\top A^\top`` when ``A`` and ``B`` are matrices, ``(A B)^\ast = B^\ast A^\ast`` when ``A`` and ``B`` are linear operators.
+This result follows in short order from the definition of the adjoint operator -- (and is a good exercise!)
+
+_**Reverse-Mode AD: what does it do in general?**_
+
+Equipped with adjoints, we can express reverse-mode AD only in terms of linear operators, dispensing with the need to express everything in terms of Jacobians.
+The goal of reverse-mode AD is as follows: given a differentiable function ``f : \mathcal{X} \to \mathcal{Y}``, compute ``D f [x]^\ast (\bar{y})`` for some ``\bar{y}``.
+
+We will explain _how_ reverse-mode AD goes about computing this after some worked examples.
+
+### Some Worked Examples
+
+We now present some worked examples in order to prime intuition, and to introduce the important classes of problems that will be encountered when doing AD in the Julia language.
+We will put all of these problems in a single general framework later on.
+
+#### An Example with Matrix Calculus
+
+We have introduced some mathematical abstraction in order to simplify the calculations involved in AD.
+To this end, we consider differentiating ``f(X) := X^\top X``.
+Results for this and similar operations are given by [giles2008extended](@cite).
+A similar operation, but which maps from matrices to ``\RR`` is discussed in Lecture 4 part 2 of the MIT course mentioned previouly.
+Both [giles2008extended](@cite) and Lecture 4 part 2 provide approaches to obtaining the derivative of this function.
+
+Following either resource will yield the derivative:
+```math
+D f [X] (\dot{X}) = \dot{X}^\top X + X^\top \dot{X}
+```
+Observe that this is indeed a linear operator (i.e. it is linear in its argument, ``\dot{X}``).
+(You can always plug it in to the definition of the Frechet derivative to confirm that it is indeed the derivative.)
+
+In order to perform reverse-mode AD, we need to find the adjoint operator.
+Using the usual definition of the inner product between matrices,
+```math
+\langle X, Y \rangle := \textrm{tr} (X^\top Y)
+```
+we can rearrange the inner product as follows:
+```math
+\begin{align}
+ \langle \bar{Y}, D f [X] (\dot{X}) \rangle &= \langle \bar{Y}, \dot{X}^\top X + X^\top \dot{X} \rangle \nonumber \\
+ &= \textrm{tr} (\bar{Y}^\top \dot{X}^\top X) + \textrm{tr}(\bar{Y}^\top X^\top \dot{X}) \nonumber \\
+ &= \textrm{tr} ( [\bar{Y} X^\top]^\top \dot{X}) + \textrm{tr}( [X \bar{Y}]^\top \dot{X}) \nonumber \\
+ &= \langle \bar{Y} X^\top + X \bar{Y}, \dot{X} \rangle. \nonumber
+\end{align}
+```
+We can read off the adjoint operator from the first argument to the inner product:
+```math
+D f [X]^\ast (\bar{Y}) = \bar{Y} X^\top + X \bar{Y}.
+```
+
+#### AD of a Julia function: a trivial example
+
+We now turn to differentiating Julia `function`s.
+The way that Tapir.jl handles immutable data is very similar to how Zygote / ChainRules do.
+For example, consider the Julia function
+```julia
+f(x::Float64) = sin(x)
+```
+If you've previously worked with ChainRules / Zygote, without thinking too hard about the formalisms we introduced previously (perhaps by considering a variety of partial derivatives) you can probably arrive at the following adjoint for the derivative of `f`:
+```julia
+g -> g * cos(x)
+```
+
+Implicitly, you have performed three steps:
+1. model `f` as a differentiable function,
+2. compute its derivative, and
+3. compute the adjoint of the derivative.
+
+It is helpful to work through this simple example in detail, as the steps involved apply more generally.
+The goal is to spell out the steps involved in detail, as this detail becomes helpful in more complicated examples.
+If at any point this exercise feels pedantic, we ask you to stick with it.
+
+_**Step 1: Differentiable Mathematical Model**_
+
+Obviously, we model the Julia `function` `f` as the function ``f : \RR \to \RR`` where
+```math
+f(x) := \sin(x)
+```
+Observe that, we've made (at least) two modelling assumptions here:
+1. a `Float64` is modelled as a real number,
+2. the Julia `function` `sin` is modelled as the usual mathematical function ``\sin``.
+
+As promised we're being quite pedantic.
+While the first assumption is obvious and will remain true, we will shortly see examples where we have to work a bit harder to obtain a correspondence between a Julia `function` and a mathematical object.
+
+_**Step 2: Compute Derivative**_
+
+Now that we have a mathematical model, we can differentiate it:
+```math
+D f [x] (\dot{x}) = \cos(x) \dot{x}
+```
+
+_**Step 3: Compute Adjoint of Derivative**_
+
+Given the derivative, we can find its adjoint:
+```math
+\langle \bar{f}, D f [x](\dot{x}) \rangle = \langle \bar{f}, \cos(x) \dot{x} \rangle = \langle \cos(x) \bar{f}, \dot{x} \rangle.
+```
+From here the adjoint can be read off from the first argument to the inner product:
+```math
+D f [x]^\ast (\bar{f}) = \cos(x) \bar{f}.
+```
+
+
+#### AD of a Julia function: a slightly less trivial example
+
+Now consider the Julia function
+```julia
+f(x::Float64, y::Tuple{Float64, Float64}) = x + y[1] * y[2]
+```
+Its adjoint is going to be something along the lines of
+```julia
+g -> (g, (y[2] * g, y[1] * g))
+```
+
+As before, we work through in detail.
+
+
+
+_**Step 1: Differentiable Mathematical Model**_
+
+There are a couple of aspects of `f` which require thought:
+1. it has two arguments -- we've only handled single argument functions previously, and
+2. the second argument is a `Tuple` -- we've not yet decided how to model this.
+
+To this end, we define a mathematical notion of a tuple.
+A tuple is a collection of ``N`` elements, each of which is drawn from some set ``\mathcal{X}_n``.
+We denote by ``\mathcal{X} := \{ \mathcal{X}_1 \times \dots \times \mathcal{X}_N \}`` the set of all ``N``-tuples whose ``n``th element is drawn from ``\mathcal{X}_n``.
+Provided that each ``\mathcal{X}_n`` forms a finite Hilbert space, ``\mathcal{X}`` forms a Hilbert space with
+1. ``\alpha x := (\alpha x_1, \dots, \alpha x_N)``,
+2. ``x + y := (x_1 + y_1, \dots, x_N + y_N)``, and
+3. ``\langle x, y \rangle := \sum_{n=1}^N \langle x_n, y_n \rangle``.
+
+We can think of multi-argument functions as single-argument functions of a tuple, so a reasonable mathematical model for `f` might be a function ``f : \{ \RR \times \{ \RR \times \RR \} \} \to \RR``, where
+```math
+f(x, y) := x + y_1 y_2
+```
+Note that while the function is written with two arguments, you should treat them as a single tuple, where we've assigned the name ``x`` to the first element, and ``y`` to the second.
+
+_**Step 2: Compute Derivative**_
+
+Now that we have a mathematical object, we can differentiate it:
+```math
+D f [x, y](\dot{x}, \dot{y}) = \dot{x} + \dot{y}_1 y_2 + y_1 \dot{y}_2
+```
+
+_**Step 3: Compute Adjoint of Derivative**_
+
+``D f[x, y]`` maps ``\{ \RR \times \{ \RR \times \RR \}\}`` to ``\RR``, so ``D f [x, y]^\ast`` must map the other way.
+You should verify that the following follows quickly from the definition of the adjoint:
+```math
+D f [x, y]^\ast (\bar{f}) = (\bar{f}, (\bar{f} y_2, \bar{f} y_1))
+```
+
+
+#### AD with mutable data
+
+In the previous two examples there was an obvious mathematical model for the Julia function.
+Indeed this model was sufficiently obvious that it required little explanation.
+This is not always the case though, in particular, Julia functions which modify / mutate their inputs require a little more thought.
+
+Consider the following Julia `function`:
+```julia
+function f!(x::Vector{Float64})
+ x .*= x
+ return sum(x)
+end
+```
+This `function` squares each element of its input in-place, and returns the sum of the result.
+So what is an appropriate mathematical model for this `function`?
+
+_**Step 1: Differentiable Mathematical Model**_
+
+The trick is to distingush between the state of `x` upon _entry_ to / _exit_ from `f!`.
+In particular, let ``\phi_{\text{f!}} : \RR^N \to \{ \RR^N \times \RR \}`` be given by
+```math
+\phi_{\text{f!}}(x) = (x \odot x, \sum_{n=1}^N x_n^2)
+```
+where ``\odot`` denotes the Hadamard / elementwise product.
+The point here is that the inputs to ``\phi_{\text{f!}}`` are the inputs to `x` upon entry to `f!`, and the value returned from ``\phi_{\text{f!}}`` is a tuple containing the both the inputs upon exit from `f!` and the value returned by `f!`.
+
+The remaining steps are straightforward now that we have the model.
+
+
+_**Step 2: Compute Derivative**_
+
+The derivative of ``\phi_{\text{f!}}`` is
+```math
+D \phi_{\text{f!}} [x](\dot{x}) = (2 x \odot x, 2 \sum_{n=1}^N x_n \dot{x}_n).
+```
+
+_**Step 3: Compute Adjoint of Derivative**_
+
+The argument to the adjoint of the derivative must be a 2-tuple whose elements are drawn from ``\{\RR^N \times \RR \}``.
+Denote such a tuple as ``(\bar{y}_1, \bar{y}_2)``.
+Plugging this into an inner product with the derivative and rearranging yields
+```math
+\begin{align}
+ \langle (\bar{y}_1, \bar{y}_2), D \phi_{\text{f!}} [x] (\dot{x}) \rangle &= \langle (\bar{y}_1, \bar{y}_2), (2 x \odot \dot{x}, 2 \sum_{n=1}^N x_n \dot{x}_n) \rangle \nonumber \\
+ &= \langle (2 x \odot \bar{y}_1, 2 \bar{y}_2 x), (\text{d} x, \text{d} x) \rangle \nonumber \\
+ &= \langle 2 x \odot \bar{y}_1 + 2 \bar{y}_2 x, \text{d} x \rangle. \nonumber
+\end{align}
+```
+So we can read off the adjoint to be
+```math
+D \phi_{\text{f!}} [x]^\ast (\bar{y}) = 2 (x \odot \bar{y}_1 + \bar{y}_2 x).
+```
+
+# Reverse-Mode AD: _how_ does it do it?
+
+Now that we know _what_ it is that AD computes, we need a rough understanding of _how_ it computes it.
+
+In short: reverse-mode AD breaks down a "complicated" function ``f`` into the composition of a collection of "simple" functions ``f_1, \dots, f_N``, applies the chain rule, and takes the adjoint.
+
+Specifically, we assume that we can express any function ``f`` as ``f = f_N \circ \dots \circ f_1``, and that we can compute the adjoint of the derivative for each ``f_n``.
+From this, we can obtain the adjoint of ``f`` by applying the chain rule to the derivatives and taking the adjoint:
+```math
+\begin{align}
+D f [x]^\ast &= (D f_N [x_N] \circ \dots \circ D f_1 [x_1])^\ast \nonumber \\
+ &= D f_1 [x_1]^\ast \circ \dots \circ D f_N [x_N]^\ast \nonumber
+\end{align}
+```
+
+For example, suppose that ``f(x) := \sin(\cos(\text{tr}(X^\top X)))``.
+One option to compute its adjoint is to figure it out by hand directly (probably using the chain rule somewhere).
+Instead, we could notice that ``f = f_4 \circ f_3 \circ f_2 \circ f_1`` where ``f_4 := \sin``, ``f_3 := \cos``, ``f_2 := \text{tr}`` and ``f_1(X) = X^\top X``.
+We could derive the adjoint for each of these functions (a fairly straightforward task), and then compute
+```math
+D f [x]^\ast (\bar{y}) = (D f_1 [x_1]^\ast \circ D f_2 [x_2]^\ast \circ D f_3 [x_3]^\ast \circ D f_4 [x_4]^\ast)(1)
+```
+in order to obtain the gradient of ``f``.
+Reverse-mode AD essentially just does this.
+Modern systems have hand-written adjoints for (hopefully!) all of the "simple" functions you may wish to build a function such as ``f`` from (often there are hundreds of these), and composes them to compute the adjoint of ``f``.
+A sketch of a more generic algorithm is as follows.
+
+Forwards-Pass:
+1. ``x_1 = x``, ``n = 1``
+2. construct ``D f_n [x_n]^\ast``
+3. let ``x_{n+1} = f_n (x_n)``
+4. let ``n = n + 1``
+5. if ``n < N + 1`` then go to 2
+
+Reverse-Pass:
+1. let ``\bar{x}_{N+1} = \bar{y}``
+2. let ``n = n - 1``
+3. let ``\bar{x}_{n} = D f_n [x_n]^\ast (\bar{x}_{n+1})``
+4. if ``n = 1`` return ``\bar{x}_1`` else go to 2.
+
+
+
+
+_**How does this relate to vector-Jacobian products?**_
+
+In Euclidean space, each derivative ``D f_n [x_n](\dot{x}_n) = J_n[x_n] \dot{x}_n``.
+Applying the chain rule to ``D f [x]`` and substituting this in yields
+```math
+J[x] = J_N[x_N] \dots J_1[x_1] .
+```
+Taking the transpose and multiplying from the left by ``\bar{y}`` yields
+```math
+J[x]^\top \bar{y} = J[x_N]^\top_N \dots J[x_1]^\top_1 \bar{y} .
+```
+Comparing this with the expression in terms of adjoints and operators, we see that composition of adjoints of derivatives has been replaced with multiplying by transposed Jacobian matrices.
+This "vector-Jacobian product" expression is commonly used to explain AD, and is likely familiar to many readers.
+
+# Directional Derivatives and Gradients
+
+Now we turn to using reverse-mode AD to compute the gradient of a function.
+In short, given a function ``g : \mathcal{X} \to \RR`` with derivative ``D g [x]`` at ``x``, its gradient is equal to ``D g [x]^\ast (1)``.
+We explain why in this section.
+
+It's worth taking a few minutes to consider the ideas discussed thus far relate to other similar ideas.
+
+The derivative discussed here can be used to compute directional derivatives.
+Consider a function ``f : \mathcal{X} \to \RR`` with Frechet derivative ``D f [x] : \mathcal{X} \to \RR`` at ``x \in \mathcal{X}``.
+Then ``D f[x](\dot{x})`` returns the directional derivative in direction ``\dot{x}``.
+
+Gradients are closely related to the adjoint of the derivative.
+Recall that the gradient of ``f`` at ``x`` is defined to be the vector ``\nabla f (x) \in \mathcal{X}`` such that ``\langle \nabla f (x), \dot{x} \rangle`` gives the directional derivative of ``f`` at ``x`` in direction ``\dot{x}``.
+Having noted that ``D f[x](\dot{x})`` is exactly this directional derivative, we can equivalently say that
+```math
+D f[x](\dot{x}) = \langle \nabla f (x), \dot{x} \rangle .
+```
+
+The role of the adjoint is revealed when we consider ``f := \mathcal{l} \circ g``, where ``g : \mathcal{X} \to \mathcal{Y}``, ``\mathcal{l}(y) := \langle \bar{y}, y \rangle``, and ``\bar{y} \in \mathcal{Y}`` is some fixed vector.
+Noting that ``D \mathcal{l} [y](\dot{y}) = \langle \bar{y}, \dot{y} \rangle``, we apply the chain rule to obtain
+```math
+\begin{align}
+D f [x] (\dot{x}) &= [(D \mathcal{l} [g(x)]) \circ (D g [x])](\dot{x}) \nonumber \\
+ &= \langle \bar{y}, D g [x] (\dot{x}) \rangle \nonumber \\
+ &= \langle D g [x]^\ast (\bar{y}), \dot{x} \rangle, \nonumber
+\end{align}
+```
+from which we conclude that ``D g [x]^\ast (\bar{y})`` is the gradient of the composition ``l \circ g`` at ``x``.
+
+The consequence is that we can always view the computation performed by reverse-mode AD as computing the gradient of the composition of the function in question and an inner product with the argument to the adjoint.
+
+The above shows that if ``Y = \RR`` and ``g`` is the function we wish to compute the gradient of, we can simply set ``\bar{y} = 1`` and compute ``D g [x]^\ast (\bar{y})`` to obtain the gradient of ``g`` at ``x``.
+
+
+
+
+# Summary
+
+This document explains the core mathematical foundations of AD.
+It explains separately _what_ is does, and _how_ it goes about it.
+Some basic examples are given which show how these mathematical foundations can be applied to differentiate functions of matrices, and Julia `function`s.
+
+Subsequent sections will build on these foundations, to provide a more general explanation of what AD looks like for a Julia programme.
+
+
+
+# Asides
+
+### _How_ does Forwards-Mode AD work?
+
+Forwards-mode AD achieves this by breaking down ``f`` into the composition ``f = f_N \circ \dots \circ f_1``, # where each ``f_n`` is a simple function whose derivative (function) ``D f_n [x_n]`` we know for any given ``x_n``. By the chain rule, we have that
+```math
+D f [x] (\dot{x}) = D f_N [x_N] \circ \dots \circ D f_1 [x_1] (\dot{x})
+```
+which suggests the following algorithm:
+1. let ``x_1 = x``, ``\dot{x}_1 = \dot{x}``, and ``n = 1``
+2. let ``\dot{x}_{n+1} = D f_n [x_n] (\dot{x}_n)``
+3. let ``x_{n+1} = f(x_n)``
+4. let ``n = n + 1``
+5. if ``n = N+1`` then return `\dot{x}_{N+1}`, otherwise go to 2. | ```suggestion
5. if ``n = N+1`` then return ``\dot{x}_{N+1}``, otherwise go to 2.
``` |
Mooncake.jl | github_2023 | others | 175 | compintell | mhauru | @@ -0,0 +1,451 @@
+# Algorithmic Differentiation
+
+This section introduces the mathematics behind AD.
+Even if you have worked with AD before, we recommend reading in order to acclimatise yourself to the perspective that Tapir.jl takes on the subject.
+
+# Derivatives
+
+
+A foundation on which all of AD is built the the derivate -- we require a fairly general definition of it, which we build up to here.
+
+_**Scalar-to-Scalar Functions**_
+
+Consider first ``f : \RR \to \RR``, which we require to be differentiable at ``x \in \RR``.
+Its derivative at ``x`` is usually thought of as the scalar ``\alpha \in \RR`` such that
+```math
+\text{d}f = \alpha \, \text{d}x .
+```
+Loosely speaking, by this notation we mean that for arbitrary small changes ``\text{d} x`` in the input to ``f``, the change in the output ``\text{d} f`` is ``\alpha \, \text{d}x``.
+We refer readers to the first few minutes of the first lecture mentioned above for a more careful explanation.
+
+_**Vector-to-Vector Functions**_
+
+The generalisation of this to Euclidean space should be familiar: if ``f : \RR^P \to \RR^Q`` is differentiable at a point ``x \in \RR^P``, then the derivative of ``f`` at ``x`` is given by the Jacobian matrix at ``x``, denoted ``J[x] \in \RR^{Q \times P}``, such that
+```math
+\text{d}f = J[x] \, \text{d}x .
+```
+
+It is possible to stop here, as all the functions we shall need to consider can in principle be written as functions on some subset ``\RR^P``.
+
+However, when we consider differentiating computer programmes, we will have to deal with complicated nested data structures, e.g. `struct`s inside `Tuple`s inside `Vector`s etc.
+While all of these data structures _can_ be mapped onto a flat vector in order to make sense of the Jacobian of a computer programme, this becomes very inconvenient very quickly.
+To see the problem, consider the Julia function whose input is of type `Tuple{Tuple{Float64, Vector{Float64}}, Vector{Float64}, Float64}` and whose output is of type `Tuple{Vector{Float64}, Float64}`.
+What kind of object might be use to represent the derivative of a function mapping between these two spaces?
+We certainly _can_ treat these as structured "view" into a "flat" `Vector{Float64}`s, and then define a Jacobian, but actually _finding_ this mapping is a tedious exercise even if it quite obviously exists.
+
+Similarly, while "vector-Jacobian" products are usually used to explain reverse-mode AD, a more general formulation of the derivative is used all the time -- the matrix calculus discussed by [giles2008extended](@cite) and [minka2000old](@cite) (to name a couple) make use of a generalised form of derivative in order to work with functions which map to and from matrices (despite slight differences in naming conventions from text to text).
+
+Consequently, it will be much easier to avoid these kinds of "flattening" operations wherever possible.
+In order to do so, we make use of a generalised notion of the derivative.
+
+_**Functions Between More General Spaces**_
+
+In order to avoid the difficulties described above, we consider we consider functions ``f : \mathcal{X} \to \mathcal{Y}``, where ``\mathcal{X}`` and ``\mathcal{Y}`` are _finite_ dimensional real Hilbert spaces (read: finite-dimensional vector space with an inner product, and real-valued scalars).
+This definition includes functions to / from ``\RR``, ``\RR^D``, but also real-valued matrices.
+Furthermore, we shall see later how we can model all sorts of structured representations of data directly as such spaces.
+
+For such spaces, the derivative of ``f`` at ``x \in \mathcal{X}`` is the linear operator (read: linear function) ``D f [x] : \mathcal{X} \to \mathcal{Y}`` satisfying
+```math
+\text{d}f = D f [x] \, \text{d} x
+```
+That is, instead of thinking of the derivative as a number or a matrix, we think about it as a _function_.
+We can express the previous notions of the derivative in this language.
+
+In the scalar case, rather than thinking of the derivative as _being_ ``\alpha``, we think of it is a the linear operator ``D f [x] (\dot{x}) := \alpha \dot{x}``.
+Put differently, rather than thinking of the derivative as the slope of the tangent to ``f`` at ``x``, think of it as the function decribing the tangent itself.
+
+Similarly, if ``\mathcal{X} = \RR^P`` and ``\mathcal{Y} = \RR^Q`` then this operator can be specified in terms of the Jacobian matrix: ``D f [x] (\dot{x}) := J[x] \dot{x}`` -- brackets are used to emphasise that ``D f [x]`` is a function, and is being applied to ``\dot{x}``.
+
+The difference from usual is a little bit subtle.
+We do not define the derivative to _be_ ``\alpha`` or ``J[x]``, rather we define it to be "multiply by ``\alpha``" or "multiply by ``J[x]``".
+For the rest of this document we shall use this definition of the derivative.
+So whenever you see the word "derivative", you should think "linear function".
+
+_**The Chain Rule**_
+
+The chain rule is _the_ result which makes AD work.
+Fortunately, it applies to this version of the derivative:
+```math
+f = g \circ h \implies D f [x] = (D g [h(x)]) \circ (D h [x])
+```
+By induction this extends to a collection of ``N`` functions ``f_1, \dots, f_N``:
+```math
+f := f_N \circ \dots \circ f_1 \implies D f [x] = (D f_N [x_N]) \circ \dots \circ (D f_1 [x_1]),
+```
+where ``x_{n+1} := f(x_n)``, and ``x_1 := x``.
+
+
+_**An aside: the definition of the Frechet Derivative**_
+
+This definition of the derivative has a name: the Frechet derivative.
+It is a generalisation of the Total Derivative.
+Formally, we say that a function ``f : \mathcal{X} \to \mathcal{Y}`` is differentiable at a point ``x \in \mathcal{X}`` if there exists a linear operator ``D f [x] : \mathcal{X} \to \mathcal{Y}`` (the derivative) satisfying
+```math
+\lim_{\text{d} h \to 0} \frac{\| f(x + \text{d} h) - f(x) + D f [x] (\text{d} h) \|_\mathcal{Y}}{\| \text{d}h \|_\mathcal{X}} = 0,
+```
+where ``\| \cdot \|_\mathcal{X}`` and ``\| \cdot \|_\mathcal{Y}`` are the norms associated to Hilbert spaces ``\mathcal{X}`` and ``\mathcal{Y}`` respectively.
+It is a good idea to consider what this looks like when ``\mathcal{X} = \mathcal{Y} = \RR`` and when ``\mathcal{X} = \mathcal{Y} = \RR^D``.
+It is sometimes helpful to refer to this definition to e.g. verify the correctness of the derivative of a function -- as with single-variable calculus, however, this is rare.
+
+
+
+_**Another aside: what does Forwards-Mode AD compute?**_
+
+At this point we have enough machinery to discuss forwards-mode AD.
+Expressed in the language of linear operators and Hilbert spaces, the goal of forwards-mode AD is the following:
+given a function ``f`` which is differentiable at a point ``x``, compute ``D f [x] (\dot{x})`` for a given vector ``\dot{x}``.
+If ``f : \RR^P \to \RR^Q``, this is equivalent to computing ``J[x] \dot{x}``, where ``J[x]`` is the Jacobian of ``f`` at ``x``.
+For the interested reader we provide a high-level explanation of _how_ forwards-mode AD does this in [_How_ does Forwards-Mode AD work?](@ref).
+
+
+
+# Reverse-Mode AD: _what_ does it do?
+
+In order to explain what reverse-mode AD does, we first consider the "vector-Jacobian product" definition in Euclidean space which will be familiar to many readers.
+We then generalise.
+
+_**Reverse-Mode AD: what does it do in Euclidean space?**_
+
+In this setting, the goal of reverse-mode AD is the following: given a function ``f : \RR^P \to \RR^Q`` which is differentiable at ``x \in \RR^P`` with Jacobian ``J[x]`` at ``x``, compute ``J[x]^\top \bar{y}`` for any ``\bar{y} \in \RR^Q``.
+This is useful because we can obtain the gradient from this when ``Q = 1`` by letting ``\bar{y} = 1``.
+
+_**Adjoint Operators**_
+
+In order to generalise this algorithm to work with linear operators, we must first generalise the idea of multiplying a vector by the transpose of the Jacobian.
+The relevant concept here is that of the _adjoint_ _operator_.
+Specifically, the adjoint ``A^\ast`` of linear operator ``A`` is the linear operator satisfying
+```math
+\langle A^\ast \bar{y}, \dot{x} \rangle = \langle \bar{y}, A \dot{x} \rangle.
+```
+The relationship between the adjoint and matrix transpose is this: if ``A (x) := J x`` for some matrix ``J``, then ``A^\ast (y) := J^\top y``.
+
+Moreover, just as ``(A B)^\top = B^\top A^\top`` when ``A`` and ``B`` are matrices, ``(A B)^\ast = B^\ast A^\ast`` when ``A`` and ``B`` are linear operators.
+This result follows in short order from the definition of the adjoint operator -- (and is a good exercise!)
+
+_**Reverse-Mode AD: what does it do in general?**_
+
+Equipped with adjoints, we can express reverse-mode AD only in terms of linear operators, dispensing with the need to express everything in terms of Jacobians.
+The goal of reverse-mode AD is as follows: given a differentiable function ``f : \mathcal{X} \to \mathcal{Y}``, compute ``D f [x]^\ast (\bar{y})`` for some ``\bar{y}``.
+
+We will explain _how_ reverse-mode AD goes about computing this after some worked examples.
+
+### Some Worked Examples
+
+We now present some worked examples in order to prime intuition, and to introduce the important classes of problems that will be encountered when doing AD in the Julia language.
+We will put all of these problems in a single general framework later on.
+
+#### An Example with Matrix Calculus
+
+We have introduced some mathematical abstraction in order to simplify the calculations involved in AD.
+To this end, we consider differentiating ``f(X) := X^\top X``.
+Results for this and similar operations are given by [giles2008extended](@cite).
+A similar operation, but which maps from matrices to ``\RR`` is discussed in Lecture 4 part 2 of the MIT course mentioned previouly.
+Both [giles2008extended](@cite) and Lecture 4 part 2 provide approaches to obtaining the derivative of this function.
+
+Following either resource will yield the derivative:
+```math
+D f [X] (\dot{X}) = \dot{X}^\top X + X^\top \dot{X}
+```
+Observe that this is indeed a linear operator (i.e. it is linear in its argument, ``\dot{X}``).
+(You can always plug it in to the definition of the Frechet derivative to confirm that it is indeed the derivative.)
+
+In order to perform reverse-mode AD, we need to find the adjoint operator.
+Using the usual definition of the inner product between matrices,
+```math
+\langle X, Y \rangle := \textrm{tr} (X^\top Y)
+```
+we can rearrange the inner product as follows:
+```math
+\begin{align}
+ \langle \bar{Y}, D f [X] (\dot{X}) \rangle &= \langle \bar{Y}, \dot{X}^\top X + X^\top \dot{X} \rangle \nonumber \\
+ &= \textrm{tr} (\bar{Y}^\top \dot{X}^\top X) + \textrm{tr}(\bar{Y}^\top X^\top \dot{X}) \nonumber \\
+ &= \textrm{tr} ( [\bar{Y} X^\top]^\top \dot{X}) + \textrm{tr}( [X \bar{Y}]^\top \dot{X}) \nonumber \\
+ &= \langle \bar{Y} X^\top + X \bar{Y}, \dot{X} \rangle. \nonumber
+\end{align}
+```
+We can read off the adjoint operator from the first argument to the inner product:
+```math
+D f [X]^\ast (\bar{Y}) = \bar{Y} X^\top + X \bar{Y}.
+```
+
+#### AD of a Julia function: a trivial example
+
+We now turn to differentiating Julia `function`s.
+The way that Tapir.jl handles immutable data is very similar to how Zygote / ChainRules do.
+For example, consider the Julia function
+```julia
+f(x::Float64) = sin(x)
+```
+If you've previously worked with ChainRules / Zygote, without thinking too hard about the formalisms we introduced previously (perhaps by considering a variety of partial derivatives) you can probably arrive at the following adjoint for the derivative of `f`:
+```julia
+g -> g * cos(x)
+```
+
+Implicitly, you have performed three steps:
+1. model `f` as a differentiable function,
+2. compute its derivative, and
+3. compute the adjoint of the derivative.
+
+It is helpful to work through this simple example in detail, as the steps involved apply more generally.
+The goal is to spell out the steps involved in detail, as this detail becomes helpful in more complicated examples.
+If at any point this exercise feels pedantic, we ask you to stick with it.
+
+_**Step 1: Differentiable Mathematical Model**_
+
+Obviously, we model the Julia `function` `f` as the function ``f : \RR \to \RR`` where
+```math
+f(x) := \sin(x)
+```
+Observe that, we've made (at least) two modelling assumptions here:
+1. a `Float64` is modelled as a real number,
+2. the Julia `function` `sin` is modelled as the usual mathematical function ``\sin``.
+
+As promised we're being quite pedantic.
+While the first assumption is obvious and will remain true, we will shortly see examples where we have to work a bit harder to obtain a correspondence between a Julia `function` and a mathematical object.
+
+_**Step 2: Compute Derivative**_
+
+Now that we have a mathematical model, we can differentiate it:
+```math
+D f [x] (\dot{x}) = \cos(x) \dot{x}
+```
+
+_**Step 3: Compute Adjoint of Derivative**_
+
+Given the derivative, we can find its adjoint:
+```math
+\langle \bar{f}, D f [x](\dot{x}) \rangle = \langle \bar{f}, \cos(x) \dot{x} \rangle = \langle \cos(x) \bar{f}, \dot{x} \rangle.
+```
+From here the adjoint can be read off from the first argument to the inner product:
+```math
+D f [x]^\ast (\bar{f}) = \cos(x) \bar{f}.
+```
+
+
+#### AD of a Julia function: a slightly less trivial example
+
+Now consider the Julia function
+```julia
+f(x::Float64, y::Tuple{Float64, Float64}) = x + y[1] * y[2]
+```
+Its adjoint is going to be something along the lines of
+```julia
+g -> (g, (y[2] * g, y[1] * g))
+```
+
+As before, we work through in detail.
+
+
+
+_**Step 1: Differentiable Mathematical Model**_
+
+There are a couple of aspects of `f` which require thought:
+1. it has two arguments -- we've only handled single argument functions previously, and
+2. the second argument is a `Tuple` -- we've not yet decided how to model this.
+
+To this end, we define a mathematical notion of a tuple.
+A tuple is a collection of ``N`` elements, each of which is drawn from some set ``\mathcal{X}_n``.
+We denote by ``\mathcal{X} := \{ \mathcal{X}_1 \times \dots \times \mathcal{X}_N \}`` the set of all ``N``-tuples whose ``n``th element is drawn from ``\mathcal{X}_n``.
+Provided that each ``\mathcal{X}_n`` forms a finite Hilbert space, ``\mathcal{X}`` forms a Hilbert space with
+1. ``\alpha x := (\alpha x_1, \dots, \alpha x_N)``,
+2. ``x + y := (x_1 + y_1, \dots, x_N + y_N)``, and
+3. ``\langle x, y \rangle := \sum_{n=1}^N \langle x_n, y_n \rangle``.
+
+We can think of multi-argument functions as single-argument functions of a tuple, so a reasonable mathematical model for `f` might be a function ``f : \{ \RR \times \{ \RR \times \RR \} \} \to \RR``, where
+```math
+f(x, y) := x + y_1 y_2
+```
+Note that while the function is written with two arguments, you should treat them as a single tuple, where we've assigned the name ``x`` to the first element, and ``y`` to the second.
+
+_**Step 2: Compute Derivative**_
+
+Now that we have a mathematical object, we can differentiate it:
+```math
+D f [x, y](\dot{x}, \dot{y}) = \dot{x} + \dot{y}_1 y_2 + y_1 \dot{y}_2
+```
+
+_**Step 3: Compute Adjoint of Derivative**_
+
+``D f[x, y]`` maps ``\{ \RR \times \{ \RR \times \RR \}\}`` to ``\RR``, so ``D f [x, y]^\ast`` must map the other way.
+You should verify that the following follows quickly from the definition of the adjoint:
+```math
+D f [x, y]^\ast (\bar{f}) = (\bar{f}, (\bar{f} y_2, \bar{f} y_1))
+```
+
+
+#### AD with mutable data
+
+In the previous two examples there was an obvious mathematical model for the Julia function.
+Indeed this model was sufficiently obvious that it required little explanation.
+This is not always the case though, in particular, Julia functions which modify / mutate their inputs require a little more thought.
+
+Consider the following Julia `function`:
+```julia
+function f!(x::Vector{Float64})
+ x .*= x
+ return sum(x)
+end
+```
+This `function` squares each element of its input in-place, and returns the sum of the result.
+So what is an appropriate mathematical model for this `function`?
+
+_**Step 1: Differentiable Mathematical Model**_
+
+The trick is to distingush between the state of `x` upon _entry_ to / _exit_ from `f!`.
+In particular, let ``\phi_{\text{f!}} : \RR^N \to \{ \RR^N \times \RR \}`` be given by
+```math
+\phi_{\text{f!}}(x) = (x \odot x, \sum_{n=1}^N x_n^2)
+```
+where ``\odot`` denotes the Hadamard / elementwise product.
+The point here is that the inputs to ``\phi_{\text{f!}}`` are the inputs to `x` upon entry to `f!`, and the value returned from ``\phi_{\text{f!}}`` is a tuple containing the both the inputs upon exit from `f!` and the value returned by `f!`.
+
+The remaining steps are straightforward now that we have the model.
+
+
+_**Step 2: Compute Derivative**_
+
+The derivative of ``\phi_{\text{f!}}`` is
+```math
+D \phi_{\text{f!}} [x](\dot{x}) = (2 x \odot x, 2 \sum_{n=1}^N x_n \dot{x}_n).
+```
+
+_**Step 3: Compute Adjoint of Derivative**_
+
+The argument to the adjoint of the derivative must be a 2-tuple whose elements are drawn from ``\{\RR^N \times \RR \}``.
+Denote such a tuple as ``(\bar{y}_1, \bar{y}_2)``.
+Plugging this into an inner product with the derivative and rearranging yields
+```math
+\begin{align}
+ \langle (\bar{y}_1, \bar{y}_2), D \phi_{\text{f!}} [x] (\dot{x}) \rangle &= \langle (\bar{y}_1, \bar{y}_2), (2 x \odot \dot{x}, 2 \sum_{n=1}^N x_n \dot{x}_n) \rangle \nonumber \\
+ &= \langle (2 x \odot \bar{y}_1, 2 \bar{y}_2 x), (\text{d} x, \text{d} x) \rangle \nonumber \\ | I find the middle step here confusing. Would it be clearer to use the definition of the inner product of tuples to spell this out as a sum over inner products?
Also, there's been a switch here from `\dot{x}` to `\text{d} x`. |
Mooncake.jl | github_2023 | others | 175 | compintell | yebai | @@ -0,0 +1,35 @@
+# Safe Mode | Maybe rename this to `debugg_mode.md` to avoid renaming it again soon? |
Mooncake.jl | github_2023 | others | 123 | compintell | yebai | @@ -41,6 +41,7 @@ include("front_matter.jl")
end
include("chain_rules_macro.jl")
elseif test_group == "integration_testing/misc"
+ include(joinpath("integration_test", "logdensityproblemsad_interop.jl")) | ```suggestion
include(joinpath("integration_testing", "logdensityproblemsad_interop.jl"))
``` |
Mooncake.jl | github_2023 | others | 104 | compintell | yebai | @@ -67,17 +67,14 @@ Additionally, the strategy of immediately incrementing (co)tangents resolves lon
### Written entirely in Julia
-`Phi.jl` is written entirely in Julia.
+`Tapir.jl` is written entirely in Julia.
This sits in contrast to `Enzyme.jl`, which targets LLVM and is primarily written in C++.
These two approaches entail different tradeoffs.
# Project Name
-This package is called `Phi.jl` because 1) this package supports automatic differentiation for Julia functions containing input-dependent control flows (aka `phi`-nodes in SSA-IR), 2) the developers spent many hours figuring out how to handle the various edge cases associated with phi nodes, which inspired this name.
-At the time of writing (25/03/2024), we continue to improve the performance of our implementation.
-Additionally, `Phi.jl` is reasonably memorable, and snappy.
-
-This package was initially called `Taped.jl`, but that name ceased to be helpful when we stopped using a classic "Wengert list"-style type to implement AD.
+Before an initial release, this package was called `Taped.jl`, but that name ceased to be helpful when we stopped using a classic "Wengert list"-style type to implement AD. | Maybe add a sentence that explains the reasoning for naming it `Tapir`? |
Mooncake.jl | github_2023 | others | 83 | compintell | yebai | @@ -60,7 +78,30 @@ jobs:
include-all-prereleases: true
- uses: julia-actions/cache@v1
- uses: julia-actions/julia-buildpkg@v1
+ - run: mkdir bench_results
- run: julia --project=bench --eval 'include("bench/run_benchmarks.jl"); main()'
env:
- PERF_GROUP: ${{ matrix.perf_group }}
+ PERF_GROUP: 'comparison'
+ GKSwstype: '100'
shell: bash
+ - uses: actions/upload-artifact@v4
+ with:
+ name: benchmarking-results
+ path: bench_results/
+ - name: Convert graph to Base64
+ run: |
+ base64 benchmarking_results.png > benchmarking_results_base64.txt | That's a clever trick -- I hope it can work! |
Mooncake.jl | github_2023 | others | 33 | compintell | willtebbutt | @@ -6,12 +6,20 @@
# Project Goals
-The goal of the `Taped.jl` project is to produce a reverse-mode AD package, written entirely in Julia, which improves over both ReverseDiff.jl and Zygote.jl in several ways:
+The goal of the `Taped.jl` project is to produce a reverse-mode AD package, written entirely in Julia, which improves over both `ReverseDiff.jl` and `Zygote.jl` in several ways:
1. performance,
1. correctness / scope of testing,
1. coverage of language features.
-The most notable feature that we improve on over ReverseDiff and Zygote / ChainRules is support for mutation (writing to arrays, modifying fields of `mutable struct`s, etc), which is arguably the core limitation of these two packages, and has been the elephant-in-the-room of reverse-mode AD in Julia for years.
+Some notable features that we try to improve on over ReverseDiff and Zygote / ChainRules are
+
+- optimise performance, usability and robustness while balancing generalizability
+- guaranteed safety against dynamic control flows when using cached and compiled tapes
+- compatibility with more array types, e.g. GPU arrays
+- builtin support for mutation (writing to arrays, modifying fields of `mutable struct`s, etc), which is a shared limitation of `ReverseDiff.jl` and `Zygote.jl`, and has been the elephant-in-the-room of reverse-mode AD in Julia for years
+
+`Taped.jl` inherits many features and characteristics from `ReverseDiff.jl` and aims to be a drop-in replacement. However, compared to `ReverseDiff.jl`'s operator-overloading approach to tracing, we adopt an IR-based tracking based on [Umlaut](https://github.com/dfdx/Umlaut.jl). This IR-based tracing mechanism allows us to avoid the need to rewrite functions with generic type signatures (similar to [`Cassette.jl`](https://github.com/JuliaLabs/Cassette.jl)).
+ | I'm not in favour of this re-phrasing.
The reason that I only went with mutation is that mutation is missing in _both_ ReverseDiff and Zygote. This is not true of the other points -- the dynamic control flow point is only relevant to ReverseDiff, as is the compatibility with more array types point (Zygote is fine with whatever array you like).
Additionally, my feeling is that the "optimise performance, usability..." point is too vague to be helpful. If 10 different people read it, you'll get 10 different impressions of what it means. My feeling is that the goals at the top of the readme better capture what this sentence states.
I would prefer it if you removed this proposed change. |
aerugo | github_2023 | others | 89 | n7space | Glamhoth | @@ -0,0 +1,204 @@
+//! Floating Point Unit HAL driver implementation.
+
+use cortex_m::asm::{dsb, isb};
+use cortex_m::register::fpscr::{read as read_fpscr, write as write_fpscr};
+use samv71q21_pac::{FPU, SCB};
+
+use self::registers::{
+ FPU_FPCAR_ADDRESS_MASK, FPU_FPCAR_ADDRESS_OFFSET, FPU_FPCCR_ASPEN_MASK, FPU_FPCCR_BFRDY_MASK,
+ FPU_FPCCR_HFRDY_MASK, FPU_FPCCR_LSPACT_MASK, FPU_FPCCR_LSPEN_MASK, FPU_FPCCR_MMRDY_MASK,
+ FPU_FPCCR_MONRDY_MASK, FPU_FPCCR_THREAD_MASK, FPU_FPCCR_USER_MASK, FPU_FPDSCR_AHP_MASK,
+ FPU_FPDSCR_AHP_OFFSET, FPU_FPDSCR_DN_MASK, FPU_FPDSCR_DN_OFFSET, FPU_FPDSCR_FZ_MASK,
+ FPU_FPDSCR_FZ_OFFSET, FPU_FPDSCR_RMODE_MASK, FPU_FPDSCR_RMODE_OFFSET,
+};
+
+mod registers;
+
+pub use registers::{
+ Config, ContextConfig, ContextStateFlags, FlushToZeroMode, HalfPrecisionMode, NaNMode,
+ RoundingMode, Status,
+};
+
+/// Structure representing Floating Point Unit
+pub struct Fpu {
+ /// PAC FPU driver instance.
+ fpu: FPU,
+}
+
+impl Fpu {
+ /// Creates new instance of FPU driver and consumes PAC FPU instance.
+ pub fn new(fpu: FPU) -> Self {
+ Fpu { fpu }
+ }
+
+ /// Enables the FPU and it's lazy stacking feature.
+ pub fn enable(&mut self, scb: &mut SCB) {
+ scb.enable_fpu();
+ dsb();
+ isb();
+ unsafe { self.fpu.fpccr.modify(|reg| reg | FPU_FPCCR_LSPEN_MASK) };
+ dsb();
+ isb();
+ }
+
+ /// Disable the FPU. In addition to | Missing something? |
aerugo | github_2023 | others | 86 | n7space | Glamhoth | @@ -45,19 +66,46 @@ use aerugo::{
},
},
interrupt,
- user_peripherals::{PIOD, PMC, UART4},
+ user_peripherals::{PIOD, PMC, SPI0, UART4},
},
logln,
time::RateExtU32,
Aerugo, EventId, EventStorage, InitApi, MessageQueueHandle, MessageQueueStorage,
SystemHardwareConfig, TaskletConfig, TaskletStorage,
};
+use lsm6dso::config::control::AccelerometerTestMode;
+use lsm6dso::config::control::GyroscopeTestMode;
+use lsm6dso::{
+ config::fifo::config::{
+ AccelerometerBatchingRate, DataRateChangeBatching, FifoConfig, FifoMode,
+ FifoWatermarkThreshold, GyroscopeBatchingRate, StopOnWatermarkThreshold,
+ },
+ LSM6DSO,
+}; | Put it in one `use` as done above with `aerugo` |
aerugo | github_2023 | others | 86 | n7space | Glamhoth | @@ -119,64 +166,196 @@ static EVENT_START_STORAGE: EventStorage = EventStorage::new();
static EVENT_STOP_STORAGE: EventStorage = EventStorage::new();
static EVENT_STATS_STORAGE: EventStorage = EventStorage::new();
+/// IMU will never be accessed from an interrupt, so it's safe to access from tasklets.
+/// This is an "unsafe" alternative to `Mutex<RefCell<Option<T>>>` that's 100% safe in this specific
+/// scenario. Wrapping IMU in a mutex would require doing all IMU operations in critical sections,
+/// which is very suboptimal, as we want to limit the amount of critical sections to minimum.
+///
+/// For mutex-based storage example, see [`IMU_DATA_RATE_CONFIG`]
+pub static mut IMU_STORAGE: Option<IMU> = None;
+
+/// See [`IMU_STORAGE`] for explanation why is this a `pub static mut`.
+pub static mut UART_WRITER_STORAGE: Option<Writer<UART4>> = None;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DemoTaskletName {
+ GetExecutionStats,
+ SetAccelerometerScale,
+ SetGyroscopeScale,
+ SetDataOutputRate,
+ StartMeasurements,
+ StopMeasurements,
+ TransmitImuData,
+ UartReader,
+}
+
+pub type TaskletMap = [(DemoTaskletName, TaskletId); 8];
+
+/// This is a mapping of app's tasklet IDs to Aerugo's tasklet IDs.
+/// Should be filled by the system @ init.
+pub static mut TASKLET_MAP: Option<TaskletMap> = None; | This should be in tasklet context |
aerugo | github_2023 | others | 86 | n7space | Glamhoth | @@ -119,64 +166,196 @@ static EVENT_START_STORAGE: EventStorage = EventStorage::new();
static EVENT_STOP_STORAGE: EventStorage = EventStorage::new();
static EVENT_STATS_STORAGE: EventStorage = EventStorage::new();
+/// IMU will never be accessed from an interrupt, so it's safe to access from tasklets.
+/// This is an "unsafe" alternative to `Mutex<RefCell<Option<T>>>` that's 100% safe in this specific
+/// scenario. Wrapping IMU in a mutex would require doing all IMU operations in critical sections,
+/// which is very suboptimal, as we want to limit the amount of critical sections to minimum.
+///
+/// For mutex-based storage example, see [`IMU_DATA_RATE_CONFIG`]
+pub static mut IMU_STORAGE: Option<IMU> = None;
+
+/// See [`IMU_STORAGE`] for explanation why is this a `pub static mut`.
+pub static mut UART_WRITER_STORAGE: Option<Writer<UART4>> = None;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DemoTaskletName {
+ GetExecutionStats,
+ SetAccelerometerScale,
+ SetGyroscopeScale,
+ SetDataOutputRate,
+ StartMeasurements,
+ StopMeasurements,
+ TransmitImuData,
+ UartReader,
+}
+
+pub type TaskletMap = [(DemoTaskletName, TaskletId); 8];
+
+/// This is a mapping of app's tasklet IDs to Aerugo's tasklet IDs.
+/// Should be filled by the system @ init.
+pub static mut TASKLET_MAP: Option<TaskletMap> = None;
+
#[entry]
fn main() -> ! {
let (aerugo, mut peripherals) = Aerugo::initialize(SystemHardwareConfig::default());
+ logln!("Hello world, starting the demo...");
+
+ logln!("Initializing clocks...");
let pmc = peripherals.pmc.unwrap();
init_clocks(pmc);
+ logln!("Clock initialized!");
+ logln!("Initializing PIO...");
let port = Port::new(peripherals.pio_d.take().unwrap());
init_pio(port);
+ logln!("PIO initialized!");
+
+ logln!("Initializing UART with {}bps baudrate...", UART_BAUD_RATE);
+ let mut uart = init_uart(Uart::new(peripherals.uart_4.take().unwrap())); | This was split on purpose, imo more readable, I am not a fan of calling functions as arguments to other functions |
aerugo | github_2023 | others | 86 | n7space | Glamhoth | @@ -119,64 +166,196 @@ static EVENT_START_STORAGE: EventStorage = EventStorage::new();
static EVENT_STOP_STORAGE: EventStorage = EventStorage::new();
static EVENT_STATS_STORAGE: EventStorage = EventStorage::new();
+/// IMU will never be accessed from an interrupt, so it's safe to access from tasklets.
+/// This is an "unsafe" alternative to `Mutex<RefCell<Option<T>>>` that's 100% safe in this specific
+/// scenario. Wrapping IMU in a mutex would require doing all IMU operations in critical sections,
+/// which is very suboptimal, as we want to limit the amount of critical sections to minimum.
+///
+/// For mutex-based storage example, see [`IMU_DATA_RATE_CONFIG`]
+pub static mut IMU_STORAGE: Option<IMU> = None;
+
+/// See [`IMU_STORAGE`] for explanation why is this a `pub static mut`.
+pub static mut UART_WRITER_STORAGE: Option<Writer<UART4>> = None;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DemoTaskletName {
+ GetExecutionStats,
+ SetAccelerometerScale,
+ SetGyroscopeScale,
+ SetDataOutputRate,
+ StartMeasurements,
+ StopMeasurements,
+ TransmitImuData,
+ UartReader,
+}
+
+pub type TaskletMap = [(DemoTaskletName, TaskletId); 8];
+
+/// This is a mapping of app's tasklet IDs to Aerugo's tasklet IDs.
+/// Should be filled by the system @ init.
+pub static mut TASKLET_MAP: Option<TaskletMap> = None;
+
#[entry]
fn main() -> ! {
let (aerugo, mut peripherals) = Aerugo::initialize(SystemHardwareConfig::default());
+ logln!("Hello world, starting the demo...");
+
+ logln!("Initializing clocks...");
let pmc = peripherals.pmc.unwrap();
init_clocks(pmc);
+ logln!("Clock initialized!");
+ logln!("Initializing PIO...");
let port = Port::new(peripherals.pio_d.take().unwrap());
init_pio(port);
+ logln!("PIO initialized!");
+
+ logln!("Initializing UART with {}bps baudrate...", UART_BAUD_RATE);
+ let mut uart = init_uart(Uart::new(peripherals.uart_4.take().unwrap()));
+ unsafe { UART_WRITER_STORAGE.replace(uart.take_writer().unwrap()) };
+ logln!("UART initialized!");
+
+ logln!("Initializing SPI...");
+ let spi = init_spi(Spi::new(peripherals.spi_0.take().unwrap())); | Same as with UART |
aerugo | github_2023 | others | 86 | n7space | Glamhoth | @@ -119,64 +166,196 @@ static EVENT_START_STORAGE: EventStorage = EventStorage::new();
static EVENT_STOP_STORAGE: EventStorage = EventStorage::new();
static EVENT_STATS_STORAGE: EventStorage = EventStorage::new();
+/// IMU will never be accessed from an interrupt, so it's safe to access from tasklets.
+/// This is an "unsafe" alternative to `Mutex<RefCell<Option<T>>>` that's 100% safe in this specific
+/// scenario. Wrapping IMU in a mutex would require doing all IMU operations in critical sections,
+/// which is very suboptimal, as we want to limit the amount of critical sections to minimum.
+///
+/// For mutex-based storage example, see [`IMU_DATA_RATE_CONFIG`]
+pub static mut IMU_STORAGE: Option<IMU> = None;
+
+/// See [`IMU_STORAGE`] for explanation why is this a `pub static mut`.
+pub static mut UART_WRITER_STORAGE: Option<Writer<UART4>> = None;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DemoTaskletName {
+ GetExecutionStats,
+ SetAccelerometerScale,
+ SetGyroscopeScale,
+ SetDataOutputRate,
+ StartMeasurements,
+ StopMeasurements,
+ TransmitImuData,
+ UartReader,
+}
+
+pub type TaskletMap = [(DemoTaskletName, TaskletId); 8];
+
+/// This is a mapping of app's tasklet IDs to Aerugo's tasklet IDs.
+/// Should be filled by the system @ init.
+pub static mut TASKLET_MAP: Option<TaskletMap> = None;
+
#[entry]
fn main() -> ! {
let (aerugo, mut peripherals) = Aerugo::initialize(SystemHardwareConfig::default());
+ logln!("Hello world, starting the demo...");
+
+ logln!("Initializing clocks...");
let pmc = peripherals.pmc.unwrap();
init_clocks(pmc);
+ logln!("Clock initialized!");
+ logln!("Initializing PIO...");
let port = Port::new(peripherals.pio_d.take().unwrap());
init_pio(port);
+ logln!("PIO initialized!");
+
+ logln!("Initializing UART with {}bps baudrate...", UART_BAUD_RATE);
+ let mut uart = init_uart(Uart::new(peripherals.uart_4.take().unwrap()));
+ unsafe { UART_WRITER_STORAGE.replace(uart.take_writer().unwrap()) };
+ logln!("UART initialized!");
+
+ logln!("Initializing SPI...");
+ let spi = init_spi(Spi::new(peripherals.spi_0.take().unwrap()));
+ logln!("SPI initialized!");
- let uart = Uart::new(peripherals.uart_4.take().unwrap());
- let mut uart = init_uart(uart);
+ logln!("Initializing IMU...");
+ let imu = init_imu(spi);
+ // This is safe, because IMU storage is never accessed from IRQ.
+ unsafe { IMU_STORAGE.replace(imu) };
+ logln!("IMU initialized!");
+ logln!("Initializing DMA...");
let xdmac = Xdmac::new(peripherals.xdmac.take().unwrap());
init_xdmac(xdmac, &mut uart);
+ logln!("DMA initialized!");
+ logln!("Initializing NVIC...");
let mut nvic = NVIC::new(peripherals.nvic.take().unwrap());
nvic.enable(Interrupt::XDMAC);
+ logln!("NVIC initialized!");
+ logln!("Initializing the system...");
init_system(aerugo);
+ logln!("System initialized!");
+ // This is safe, because this channel is currently idle.
unsafe {
XDMAC_RX_CHANNEL.as_mut().unwrap().enable();
}
+ logln!("System is starting!");
aerugo.start();
}
fn init_clocks(mut pmc: PMC) {
pmc.enable_peripheral_clock(PeripheralId::PIOD);
+ pmc.enable_peripheral_clock(PeripheralId::SPI0);
pmc.enable_peripheral_clock(PeripheralId::UART4);
pmc.enable_peripheral_clock(PeripheralId::XDMAC);
}
fn init_pio(port: Port<PIOD>) {
let mut pins = port.into_pins();
- pins[18]
+ let _uart_rx = pins[18]
.take()
.unwrap()
.into_peripheral_pin(PioPeripheral::C);
- pins[19]
+ let _uart_tx = pins[19]
+ .take()
+ .unwrap()
+ .into_peripheral_pin(PioPeripheral::C);
+ let _spi_miso = pins[20]
+ .take()
+ .unwrap()
+ .into_peripheral_pin(PioPeripheral::B);
+ let _spi_mosi = pins[21]
+ .take()
+ .unwrap()
+ .into_peripheral_pin(PioPeripheral::B);
+ let _spi_sck = pins[22]
+ .take()
+ .unwrap()
+ .into_peripheral_pin(PioPeripheral::B);
+ let _lsm6dso_cs = pins[25]
.take()
.unwrap()
- .into_peripheral_pin(PioPeripheral::D);
+ .into_peripheral_pin(PioPeripheral::B);
+ let _lsm6dso_int1 = pins[28].take().unwrap().into_input_pin();
}
-fn init_uart(uart: Uart<UART4, NotConfigured>) -> Uart<UART4, Bidirectional> {
- let uart_config = Config::new(9600, 12.MHz()).unwrap();
+fn init_uart(uart: Uart<UART4, UartNotConfigured>) -> Uart<UART4, Bidirectional> {
+ let uart_config = Config::new(UART_BAUD_RATE, 12.MHz()).unwrap();
let recv_config = ReceiverConfig {
rx_filter_enabled: true,
};
uart.into_bidirectional(uart_config, recv_config)
}
+fn init_spi(spi: Spi<SPI0, SpiNotConfigured>) -> Spi<SPI0, Master> {
+ let mut spi = spi.into_master(MasterConfig::new(IMU_CHIP));
+ spi.configure_chip(
+ IMU_CHIP,
+ ChipConfig {
+ clock_polarity: ClockPolarity::HighWhenInactive,
+ clock_phase: ClockPhase::DataChangedOnLeadingEdge,
+ chip_select_behavior: ChipSelectBehavior::DeactivateAfterLastTransfer,
+ bits_per_transfer: BitsPerTransfer::Bits8,
+ clock_divider: SerialClockDivider::new(IMU_SPI_CLOCK_DIVIDER).unwrap(),
+ delay_before_first_clock: 0,
+ delay_between_consecutive_transfers: 0,
+ },
+ );
+ spi.set_interrupts_state(SpiInterrupts {
+ rx_data_register_full: true,
+ tx_data_register_empty: true,
+ mode_fault_error: true,
+ overrun_error: true,
+ nss_rising: false,
+ tx_registers_empty: true,
+ underrun_error: false,
+ });
+
+ spi
+}
+
+fn init_imu(spi: Spi<SPI0, Master>) -> IMU {
+ let mut imu = match IMU::new(spi) {
+ Ok(imu) => imu,
+ Err(reason) => panic!("Could not initialize IMU: {reason:?}"),
+ };
+
+ logln!(
+ "Is IMU responsive: {}",
+ if imu.is_alive().unwrap() { "yes" } else { "no" } | Should we continue if not alive? |
aerugo | github_2023 | others | 86 | n7space | Glamhoth | @@ -17,7 +17,7 @@ mod tasklet_status;
mod tasklet_storage;
mod tasklet_vtable;
-pub(crate) use self::tasklet_id::TaskletId;
+pub use self::tasklet_id::TaskletId; | move down |
aerugo | github_2023 | others | 85 | n7space | Glamhoth | @@ -0,0 +1,44 @@
+/// Macro creating an bitfield enum with methods for converting it from/to register value.
+macro_rules! register_enum { | Shouldn't this be `registry_enum`? This sound like you register enum somewhere |
aerugo | github_2023 | others | 81 | n7space | SteelPh0enix | @@ -0,0 +1,423 @@
+#![no_std]
+#![no_main]
+
+extern crate cortex_m;
+extern crate cortex_m_rt as rt;
+extern crate panic_rtt_target;
+
+pub mod command;
+pub mod events;
+pub mod task_get_execution_stats;
+pub mod task_set_accelerometer_scale;
+pub mod task_set_data_output_rate;
+pub mod task_set_gyroscope_scale;
+pub mod task_start_measurements;
+pub mod task_stop_measurements;
+pub mod task_uart_reader;
+
+use crate::command::*;
+use crate::events::*;
+use crate::task_get_execution_stats::*;
+use crate::task_set_accelerometer_scale::*;
+use crate::task_set_data_output_rate::*;
+use crate::task_set_gyroscope_scale::*;
+use crate::task_start_measurements::*;
+use crate::task_stop_measurements::*;
+use crate::task_uart_reader::*;
+
+use aerugo::{
+ hal::{
+ drivers::{
+ nvic::{Interrupt, NVIC},
+ pio::{pin::Peripheral as PioPeripheral, Port},
+ pmc::config::PeripheralId,
+ uart::{Bidirectional, Config, NotConfigured, ReceiverConfig, Uart},
+ xdmac::{
+ channel::{Channel, Configured},
+ channel_status::ChannelStatusReader,
+ events::ChannelEvents,
+ status::StatusReader,
+ transfer::{
+ AddressingMode, DataWidth, MicroblockLength, Peripheral as XdmacPeripheral,
+ SystemBus, TransferBlock, TransferLocation, TransferType, TriggerSource,
+ },
+ Xdmac,
+ },
+ },
+ interrupt,
+ user_peripherals::{PIOD, PMC, UART4},
+ },
+ logln,
+ time::RateExtU32,
+ Aerugo, EventId, EventStorage, InitApi, MessageQueueHandle, MessageQueueStorage,
+ SystemHardwareConfig, TaskletConfig, TaskletStorage,
+};
+use rt::entry;
+
+const TRANSFER_LENGTH: usize = 7;
+type TransferArrayType = [u8; TRANSFER_LENGTH];
+
+static mut MESSAGE_BUFFER: TransferArrayType = [0; TRANSFER_LENGTH];
+
+/// This is used for passing XDMAC's status reader to IRQ.
+/// It must be initialized before starting an IRQ-synchronized XDMAC transaction, otherwise the
+/// program may panic.
+/// This can be safely accessed outside of XDMAC IRQ only when no XDMAC transactions are in progress.
+static mut XDMAC_STATUS_READER: Option<StatusReader> = None;
+/// This is used for passing XDMAC's channel status reader to IRQ.
+/// It must be initialized before starting an IRQ-synchronized XDMAC transaction, otherwise the
+/// program may panic.
+/// This can be safely accessed outside of XDMAC IRQ only when no XDMAC transactions are in progress.
+static mut XDMAC_CHANNEL_STATUS_READER: Option<ChannelStatusReader> = None;
+/// This is used for passing XDMAC's channel to IRQ.
+/// It must be initialized before starting an IRQ-synchronized XDMAC transaction, otherwise the
+/// program may panic.
+/// This can be safely accessed outside of XDMAC IRQ only when no XDMAC transactions are in progress.
+static mut XDMAC_RX_CHANNEL: Option<Channel<Configured>> = None;
+/// This is used for passing command queue handle to IRQ.
+/// It must be initialized before starting an IRQ-synchronized XDMAC transaction, otherwise the
+/// probram may panic. | ```suggestion
/// program may panic.
``` |
aerugo | github_2023 | others | 81 | n7space | SteelPh0enix | @@ -0,0 +1,45 @@
+use aerugo::{logln, RuntimeApi};
+
+#[derive(Copy, Clone, Debug)]
+pub enum OutputDataRate {
+ Invalid, | that doesn't make sense, as "invalid" value is not a valid value :)
Potentially invalid value should be represented as Result, not actual enum value. |
aerugo | github_2023 | others | 81 | n7space | SteelPh0enix | @@ -0,0 +1,35 @@
+use aerugo::{logln, RuntimeApi};
+
+#[derive(Copy, Clone, Debug)]
+pub enum GyroscopeScale {
+ Invalid, | don't, see earlier comment |
aerugo | github_2023 | others | 81 | n7space | SteelPh0enix | @@ -0,0 +1,45 @@
+use aerugo::{logln, RuntimeApi};
+
+#[derive(Copy, Clone, Debug)]
+pub enum OutputDataRate {
+ Invalid,
+ Odr12,
+ Odr26,
+ Odr52,
+ Odr104,
+ Odr208,
+ Odr416,
+ Odr833,
+ Odr1666,
+ Odr3332,
+ Odr6664,
+}
+
+impl From<u8> for OutputDataRate { | use TryFrom instead, see earlier comment |
aerugo | github_2023 | others | 81 | n7space | SteelPh0enix | @@ -0,0 +1,35 @@
+use aerugo::{logln, RuntimeApi};
+
+#[derive(Copy, Clone, Debug)]
+pub enum GyroscopeScale {
+ Invalid,
+ Gs120,
+ Gs250,
+ Gs500,
+ Gs1000,
+ Gs2000,
+}
+
+impl From<u8> for GyroscopeScale { | use TryFrom instead, see earlier comment |
aerugo | github_2023 | others | 78 | n7space | Glamhoth | @@ -0,0 +1,510 @@
+//! Implementation of XDMAC's channel.
+
+use core::marker::PhantomData;
+
+use samv71q21_pac::{
+ xdmac::{xdmac_chid::XDMAC_CHID as ChannelRegisters, RegisterBlock},
+ XDMAC,
+};
+
+pub use super::channel_status::ChannelStatusReader;
+pub use super::events::ChannelEvents;
+use super::transfer::{ErrataTransferBlockConfig, TransferBlock};
+
+/// Typestate trait representing generic XDMAC channel's state.
+pub trait State {}
+
+/// Typestate struct representing XDMAC channel in default, not configured state.
+pub struct NotConfigured;
+
+/// Typestate struct representing XDMAC channel in configured state.
+pub struct Configured;
+
+impl State for NotConfigured {}
+impl State for Configured {}
+
+/// XDMAC channel.
+///
+/// Channels can be created only via [`Xdmac`](super::Xdmac). After acquiring a channel, it can be
+/// used to configure an XDMAC transfer and manage it (start, stop, suspend, flush).
+///
+/// To check channel's status, you must use [`ChannelStatusReader`] instance that can be acquired
+/// from Channel via [`Channel::take_status_reader`]. It can be taken only once - but can be
+/// returned, and it must be present when giving the Channel back to [`Xdmac`](super::Xdmac), to
+/// make sure that there's no dangling Reader after returning ownership of a Channel.
+///
+/// This requirement may be ignored with `unsafe` variant of
+/// [`Xdmac::return_channel`](super::Xdmac::return_channel):
+/// [`Xdmac::mark_channel_as_free`](super::Xdmac::mark_channel_as_free). You can call this function
+/// safely if you can guarantee that the Reader won't exist when Channel's ownership is returned.
+///
+/// In order to configure an XDMAC transfer, you must create transfer block, and pass it to the
+/// Channel. Then, you can start the transfer by enabling the channel.
+///
+/// # Safety
+///
+/// Most channel-related functions are safe. However, few functions that require a read-modify-write
+/// operation on XDMAC global registers are unsafe, as using them without proper precautions (making
+/// sure that these registers will not be accessed from another thread/IRQ for the operation's
+/// duration) might cause data races.
+///
+/// As described in driver's module documentation, channels share global XDMAC registers, and proper
+/// precautions must be taken if they must be shared between main thread and IRQs. For handling the
+/// IRQs, you should use [`ChannelStatusReader`] along [`StatusReader`](super::status::StatusReader).
+pub struct Channel<CurrentState: State> {
+ /// Pointer to channel's registers.
+ channel_registers: *const ChannelRegisters,
+ /// Channel's numeric identifier.
+ id: usize,
+ /// Channel's status reader.
+ status_reader: Option<ChannelStatusReader>,
+ /// Channel's state metadata
+ _state: PhantomData<CurrentState>,
+}
+
+/// Implementation of Channel functions available in every state.
+impl<AnyState: State> Channel<AnyState> {
+ /// Takes the status reader out of Channel.
+ /// If the reader was already taken, and not put back, this function will return `None`.
+ /// To return the status reader to the channel, use [`Channel::return_status_reader`].
+ pub fn take_status_reader(&mut self) -> Option<ChannelStatusReader> {
+ self.status_reader.take()
+ }
+
+ /// Returns the status reader back to the Channel.
+ pub fn return_status_reader(&mut self, reader: ChannelStatusReader) {
+ self.status_reader.replace(reader);
+ }
+
+ /// Returns `true` if status reader is currently stored in Channel.
+ pub fn is_status_reader_available(&self) -> bool {
+ self.status_reader.is_some()
+ }
+
+ /// Returns `true` if source requests for this channel are suspended.
+ pub fn is_read_suspended(&self) -> bool {
+ self.is_channels_bit_set(self.xdmac_registers_ref().grs.read().bits())
+ }
+
+ /// Returns `true` if destination requests for this channel are suspended.
+ pub fn is_write_suspended(&self) -> bool {
+ self.is_channels_bit_set(self.xdmac_registers_ref().gws.read().bits())
+ }
+
+ /// Suspends source requests for the channel.
+ /// Source requests for this channel are no longer serviced by the system scheduler.
+ ///
+ /// # Safety
+ /// This is a read-modify-write operation that uses global XDMAC registers. Be very careful
+ /// with that if you share the Channels between threads/IRQs.
+ pub fn suspend_read(&mut self) {
+ self.xdmac_registers_ref()
+ .grs
+ // Safety: This is safe, because channel's ID must be valid for a Channel to exist.
+ // Also, channel bits are correctly masked with old value, as this is an R-M-W operation.
+ .modify(|r, w| unsafe { w.bits(r.bits() | self.channel_bitmask()) });
+ }
+
+ /// Suspends destination requests for the channel.
+ /// Destination requests for this channel are no longer routed to the scheduler.
+ ///
+ /// # Safety
+ /// This is a read-modify-write operation that uses global XDMAC registers. Be very careful
+ /// with that if you share the Channels between threads/IRQs.
+ pub fn suspend_write(&mut self) {
+ self.xdmac_registers_ref()
+ .gws
+ // Safety: This is safe, because channel's ID must be valid for a Channel to exist.
+ // Also, channel bits are correctly masked with old value, as this is an R-M-W operation.
+ .modify(|r, w| unsafe { w.bits(r.bits() | self.channel_bitmask()) });
+ }
+
+ /// Suspends read and write operations at the same time.
+ pub fn suspend_read_and_write(&mut self) {
+ self.xdmac_registers_ref()
+ .grws
+ // Safety: This is safe, because channel's ID must be valid for a Channel to exist.
+ .write(|w| unsafe { w.bits(self.channel_bitmask()) });
+ }
+
+ /// Resumes source requests for the channel.
+ ///
+ /// # Safety
+ /// This is a read-modify-write operation that uses global XDMAC registers. Be very careful
+ /// with that if you share the Channels between threads/IRQs.
+ pub fn resume_read(&mut self) {
+ self.xdmac_registers_ref()
+ .grs
+ // Safety: This is safe, because channel's ID must be valid for a Channel to exist.
+ // Also, channel bits are correctly masked with old value, as this is an R-M-W operation.
+ // Channel bitmask must be negated, as this operation is supposed to clear the channel bit.
+ .modify(|r, w| unsafe { w.bits(r.bits() & !self.channel_bitmask()) });
+ }
+
+ /// Resumes destination requests for the channel.
+ ///
+ /// # Safety
+ /// This is a read-modify-write operation that uses global XDMAC registers. Be very careful
+ /// with that if you share the Channels between threads/IRQs.
+ pub fn resume_write(&mut self) {
+ self.xdmac_registers_ref()
+ .gws
+ // Safety: This is safe, because channel's ID must be valid for a Channel to exist.
+ // Also, channel bits are correctly masked with old value, as this is an R-M-W operation.
+ // Channel bitmask must be negated, as this operation is supposed to clear the channel bit.
+ .modify(|r, w| unsafe { w.bits(r.bits() & !self.channel_bitmask()) });
+ }
+
+ /// Resumes read and write operations at the same time.
+ pub fn resume_read_and_write(&mut self) {
+ self.xdmac_registers_ref()
+ .grwr
+ // Safety: This is safe, because channel's ID must be valid for a Channel to exist.
+ .write(|w| unsafe { w.bits(self.channel_bitmask()) });
+ }
+
+ /// Returns channel's ID.
+ pub fn id(&self) -> usize {
+ self.id
+ }
+
+ /// Returns a reference to channel's registers.
+ #[inline(always)]
+ fn channel_registers_ref(&self) -> &ChannelRegisters {
+ // Safety: This is safe, as the address of the register is guaranteed to be valid by Xdmac.
+ unsafe { &*self.channel_registers }
+ }
+
+ /// Returns a reference to XDMAC's registers.
+ #[inline(always)]
+ fn xdmac_registers_ref(&self) -> &RegisterBlock {
+ // Safety: This is safe, as the address of XDMAC register is guaranteed to be valid by Xdmac.
+ unsafe { &*Self::XDMAC_REGISTERS }
+ }
+
+ /// Returns channel's bitmask (`1` shifted by `n` bits, where `n` is channel's ID)
+ /// This function will return valid value as long, as channel's ID is also valid.
+ #[inline(always)]
+ fn channel_bitmask(&self) -> u32 {
+ 1 << self.id
+ }
+
+ /// Returns `true` if channel's bit is set in specified value.
+ /// The value should usually be a register's content.
+ #[inline(always)]
+ fn is_channels_bit_set(&self, value: u32) -> bool {
+ value & self.channel_bitmask() != 0
+ }
+
+ /// Transforms Channel into a type with different state.
+ ///
+ /// This is a helper function that reduces state transition boilerplate.
+ ///
+ /// # Parameters
+ /// * `channel` - Channel instance to be consumed and transformed.
+ ///
+ /// # Returns
+ /// Transformed Channel instance.
+ const fn transform<NewState: State>(channel: Channel<NewState>) -> Self {
+ Self {
+ channel_registers: channel.channel_registers,
+ id: channel.id,
+ status_reader: channel.status_reader,
+ _state: PhantomData,
+ }
+ }
+
+ /// Pointer to XDMAC's registers.
+ const XDMAC_REGISTERS: *const RegisterBlock = XDMAC::PTR; | Move to the top maybe? |
aerugo | github_2023 | others | 78 | n7space | Glamhoth | @@ -0,0 +1,66 @@
+//! This module contains status reader implementation, a structure that allows you to read XDMAC's
+//! status register.
+//!
+//! This structure is a helper that's supposed to be given to XDMAC interrupt handler, or whatever
+//! piece of code you're using for checking the XDMAC status.
+
+use super::Xdmac;
+use samv71q21_pac::{xdmac::RegisterBlock, XDMAC};
+
+use bitvec::prelude::*;
+use heapless::Vec;
+
+/// Helper structure, use it to read XDMAC status.
+///
+/// After getting it's instance from [`Xdmac`], you can use it to check which channels have pending
+/// interrupts.
+///
+/// **Reading the status register clears the flags inside it, so you should always handle pending
+/// interrupts afterwards, as soon as the register is read.**
+pub struct StatusReader;
+
+impl StatusReader {
+ /// Returns an array of boolean values, where every n'th value indicates whether n'th channel
+ /// currently has pending interrupt.
+ ///
+ /// For example, to check if 5th, 7th and 10th XDMAC channels have an interrupt pending to be
+ /// serviced, do this:
+ ///
+ /// ```no_run
+ /// let interrupts_state = reader.get_pending_interrupts();
+ ///
+ /// if interrupts_state[5] {
+ /// // Channel 5 has pending IRQ...
+ /// }
+ ///
+ /// if interrupts_state[7] {
+ /// // Channel 7 has pending IRQ...
+ /// }
+ ///
+ /// if interrupts_state[10] {
+ /// // Channel 10 has pending IRQ...
+ /// }
+ /// ```
+ ///
+ /// **Reading the status register clears the flags inside it, so you should read it once, and
+ /// immediately handle all channels with pending IRQs.
+ ///
+ /// [`Channel`](super::Channel) provides similar structure for handling specific channel IRQs.
+ pub fn get_pending_channels(&mut self) -> Vec<bool, { Xdmac::SUPPORTED_CHANNELS }> {
+ let status = self.xdmac_registers_ref().gis.read();
+ let status_bits = status.bits();
+ status_bits.view_bits::<Lsb0>()[0..Xdmac::SUPPORTED_CHANNELS]
+ .iter()
+ .map(|channel_bit| channel_bit == true)
+ .collect()
+ }
+
+ /// Returns a reference to XDMAC's registers.
+ fn xdmac_registers_ref(&self) -> &RegisterBlock {
+ // Safety: This is safe, as the address of XDMAC register is guaranteed to be valid by Xdmac.
+ unsafe { &*Self::XDMAC_REGISTERS }
+ }
+
+ /// Pointer to XDMAC's registers.
+ const XDMAC_REGISTERS: *const RegisterBlock = XDMAC::PTR; | Move at the top? |
aerugo | github_2023 | others | 78 | n7space | Glamhoth | @@ -0,0 +1,503 @@
+//! Module with transfer-related items.
+
+use crate::utils::{BoundedU16, BoundedU32};
+use samv71q21_pac::xdmac::{
+ self,
+ xdmac_chid::cc::{
+ CSIZESELECT_A, DAMSELECT_A, DIFSELECT_A, DSYNCSELECT_A, DWIDTHSELECT_A, MBSIZESELECT_A,
+ SAMSELECT_A, SIFSELECT_A, SWREQSELECT_A, TYPESELECT_A,
+ },
+};
+
+/// A single block of XDMAC transfer.
+///
+/// # Safety
+///
+/// **XDMAC transfers are inherently unsafe, as they operate directly on raw memory.**
+///
+/// In order to make sure that an XDMAC transfer will be performed safely, **you** must guarantee
+/// that the transfer configuration is valid, and will be valid during XDMAC transfer.
+/// This mostly means that **you** must make sure the source and destination addresses point to
+/// valid, aligned locations/buffers in memory, and the transfer is configured appropriately for the
+/// buffer(s) size.
+///
+/// **XDMAC will not perform any safety checks. [`TransferBlock`]'s constructor and setters will
+/// perform basic safety checks wherever possible, but it's not possible to fully validate the
+/// transfer configuration that way. Breaking Rust/XDMAC safety invariants will lead to an
+/// undefined behavior. Drive responsibly.**
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct TransferBlock {
+ /// Transfer's source
+ source: TransferLocation,
+
+ /// Transfer's destination
+ destination: TransferLocation,
+
+ /// Amount of data units in the microblock.
+ ///
+ /// Data unit size is configured in `data_width` field.
+ microblock_length: MicroblockLength,
+
+ /// Amount of microblocks in a transfer block.
+ block_length: BlockLength,
+
+ /// Transfer's type (memory-to-memory, peripheral-to-memory, memory-to-peripheral), and
+ /// peripheral ID in case of peripheral transfer.
+ transfer_type: TransferType,
+
+ /// Memory burst size.
+ memory_burst_size: MemoryBurstSize,
+
+ /// Channel's chunk size, amount of data transferred in a single chunk.
+ chunk_size: ChunkSize,
+ | ```suggestion
/// Transfer's destination
destination: TransferLocation,
/// Amount of data units in the microblock.
///
/// Data unit size is configured in `data_width` field.
microblock_length: MicroblockLength,
/// Amount of microblocks in a transfer block.
block_length: BlockLength,
/// Transfer's type (memory-to-memory, peripheral-to-memory, memory-to-peripheral), and
/// peripheral ID in case of peripheral transfer.
transfer_type: TransferType,
/// Memory burst size.
memory_burst_size: MemoryBurstSize,
/// Channel's chunk size, amount of data transferred in a single chunk.
chunk_size: ChunkSize,
``` |
aerugo | github_2023 | others | 78 | n7space | Glamhoth | @@ -0,0 +1,503 @@
+//! Module with transfer-related items.
+
+use crate::utils::{BoundedU16, BoundedU32};
+use samv71q21_pac::xdmac::{
+ self,
+ xdmac_chid::cc::{
+ CSIZESELECT_A, DAMSELECT_A, DIFSELECT_A, DSYNCSELECT_A, DWIDTHSELECT_A, MBSIZESELECT_A,
+ SAMSELECT_A, SIFSELECT_A, SWREQSELECT_A, TYPESELECT_A,
+ },
+};
+
+/// A single block of XDMAC transfer.
+///
+/// # Safety
+///
+/// **XDMAC transfers are inherently unsafe, as they operate directly on raw memory.**
+///
+/// In order to make sure that an XDMAC transfer will be performed safely, **you** must guarantee
+/// that the transfer configuration is valid, and will be valid during XDMAC transfer.
+/// This mostly means that **you** must make sure the source and destination addresses point to
+/// valid, aligned locations/buffers in memory, and the transfer is configured appropriately for the
+/// buffer(s) size.
+///
+/// **XDMAC will not perform any safety checks. [`TransferBlock`]'s constructor and setters will
+/// perform basic safety checks wherever possible, but it's not possible to fully validate the
+/// transfer configuration that way. Breaking Rust/XDMAC safety invariants will lead to an
+/// undefined behavior. Drive responsibly.**
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct TransferBlock {
+ /// Transfer's source
+ source: TransferLocation,
+
+ /// Transfer's destination
+ destination: TransferLocation,
+
+ /// Amount of data units in the microblock.
+ ///
+ /// Data unit size is configured in `data_width` field.
+ microblock_length: MicroblockLength,
+
+ /// Amount of microblocks in a transfer block.
+ block_length: BlockLength,
+
+ /// Transfer's type (memory-to-memory, peripheral-to-memory, memory-to-peripheral), and
+ /// peripheral ID in case of peripheral transfer.
+ transfer_type: TransferType,
+
+ /// Memory burst size.
+ memory_burst_size: MemoryBurstSize,
+
+ /// Channel's chunk size, amount of data transferred in a single chunk.
+ chunk_size: ChunkSize,
+
+ /// Data width for a single transfer (smallest data unit).
+ data_width: DataWidth,
+}
+
+/// Transfer location (destination or source) configuration.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct TransferLocation {
+ /// Transfer's address.
+ ///
+ /// **This address must point to a valid memory location, aligned to the transfer's data width.**
+ ///
+ /// If addressing mode is set to "incremented", address must point to a sufficiently large,
+ /// linear memory buffer.
+ ///
+ /// For peripheral transfers, this must point to the source/destination peripheral's data
+ /// register.
+ pub address: *const (),
+
+ /// Bus interface that allows the access to the memory pointed by `address`.
+ /// For details, check [`xdmac`](crate::xdmac#matrix-connections) module documentation.
+ pub interface: SystemBus,
+
+ /// Addressing mode - can either be fixed (address does not change during transfer), or
+ /// incrementing (address is incremented after reading a data unit).
+ pub addressing_mode: AddressingMode, | ```suggestion
pub address: *const (),
/// Bus interface that allows the access to the memory pointed by `address`.
/// For details, check [`xdmac`](crate::xdmac#matrix-connections) module documentation.
pub interface: SystemBus,
/// Addressing mode - can either be fixed (address does not change during transfer), or
/// incrementing (address is incremented after reading a data unit).
pub addressing_mode: AddressingMode,
``` |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -34,29 +37,39 @@ static AERUGO: Aerugo = Aerugo::new();
/// System scheduler.
///
-/// Singleton instance of the scheduler. Used directly only by the [Aerugo]
-/// structure, which exposes some functionality via it's API.
+/// Singleton instance of the scheduler. Used directly only by the [Aerugo] structure.
static EXECUTOR: Executor = Executor::new(AERUGO.time_source());
/// Event manager.
///
-/// Singleton instance of the event manager. Used directly only by the [Aerugo]
-/// structure.
+/// Singleton instance of the event manager. Used directly only by the [Aerugo] structure.
static EVENT_MANAGER: EventManager = EventManager::new(AERUGO.time_source());
/// Time manager.
///
-/// Singleton instance of the time manager. Used directly only by the [Aerugo]
-/// structure.
+/// Singleton instance of the time manager. Used directly only by the [Aerugo] structure.
static CYCLIC_EXECUTION_MANAGER: CyclicExecutionManager =
CyclicExecutionManager::new(AERUGO.time_source());
+/// Execution monitor.
+///
+/// Singleton instance of the execution monitor. Used directly only by the [Aerugo] structure.
+static EXECUTION_MONITOR: ExecutionMonitor = ExecutionMonitor::new();
/// System structure.
///
/// This shouldn't be created by hand by the user or anywhere else in the code.
pub struct Aerugo {
+ /// Tasklets that are created in the system.
+ tasklets: InternalList<TaskletPtr, { Aerugo::TASKLET_COUNT }>,
+ /// IDs of tasklets that are created in the system.
+ tasklet_ids: InternalList<TaskletId, { Aerugo::TASKLET_COUNT }>,
/// Time source, responsible for creating timestamps.
time_source: TimeSource,
}
+/// This structure stores a list of tasklets that were created in a system. Adding new elements to
+/// that list is safe only during initialization (before scheduler is started) and this operation
+/// cannot be interrupted. | ```suggestion
/// must not be interrupted.
```
"cannot" suggest that it's not possible to do so. "must not" suggest that the user mustn't even try to do it.
Unless I'm wrong of course :D |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -106,16 +121,49 @@ impl Aerugo {
/// its internal components and hardware.
fn run(&'static self) -> ! {
loop {
- EXECUTOR
+ let execution_data = EXECUTOR
.execute_next_tasklet()
.expect("Failure in tasklet execution");
+ if let Some(data) = execution_data {
+ // SAFETY: This is safe, as `EXECUTION_MONITOR` is not available from the IRQ context.
+ unsafe { EXECUTION_MONITOR.update(data) };
+ }
+
EVENT_MANAGER.activate_scheduled_events();
CYCLIC_EXECUTION_MANAGER.wake_tasklets();
Hal::feed_watchdog();
}
}
+
+ /// Adds new tasklet and new tasklet ID to the lists.
+ ///
+ /// # Parameters
+ /// * `tasklet` - Tasklet to add.
+ ///
+ /// # Result
+ /// `()` if successful, `SystemError` otherwise.
+ ///
+ /// # Safety
+ /// This is safe to call only during system initialization (before scheduler is started) and it
+ /// cannot be interrupted. | ```suggestion
/// must not be interrupted.
```
no critical section -> IRQ can happen |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -212,12 +260,16 @@ impl InitApi for Aerugo {
step_fn: StepFn<T, C>,
storage: &'static TaskletStorage<T, C, COND_COUNT>,
) {
- // SAFETY: This is safe, as long as this function is called only during system initialization.
- unsafe {
- storage
+ // SAFETY: This is safe, as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -321,12 +373,16 @@ impl InitApi for Aerugo {
context: C,
storage: &'static TaskletStorage<T, C, COND_COUNT>,
) {
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
- storage
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -403,12 +459,13 @@ impl InitApi for Aerugo {
&'static self,
storage: &'static MessageQueueStorage<T, QUEUE_SIZE>,
) {
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -481,24 +538,25 @@ impl InitApi for Aerugo {
/// }
/// ```
fn create_event(&'static self, event_id: EventId, storage: &'static EventStorage) {
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -481,24 +538,25 @@ impl InitApi for Aerugo {
/// }
/// ```
fn create_event(&'static self, event_id: EventId, storage: &'static EventStorage) {
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted.
+ critical_section::with(|_| unsafe {
storage
.init(event_id)
.expect("Failed to initialize storage for event");
- }
+ });
let event = storage
.event()
.expect("Failed to get reference to the stored event");
- // SAFETY: This is safe as long as this function is called only during system
- // initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -571,12 +629,13 @@ impl InitApi for Aerugo {
value: bool,
storage: &'static BooleanConditionStorage,
) {
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -645,16 +704,17 @@ impl InitApi for Aerugo {
let tasklet = tasklet_handle.tasklet();
let queue = queue_handle.queue();
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -727,31 +787,33 @@ impl InitApi for Aerugo {
) {
let tasklet = tasklet_handle.tasklet();
+ // SAFETY: This is safe as long as this function is called only during system initialization.
let event_set = unsafe {
EVENT_MANAGER
.create_event_set(tasklet.ptr())
.expect("Failed to create event set")
};
- for event_id in events {
- let event = EVENT_MANAGER
- .get_event(event_id)
- .unwrap_or_else(|| panic!("Failed to get event with ID '{}'", event_id));
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -810,16 +872,17 @@ impl InitApi for Aerugo {
let tasklet = tasklet_handle.tasklet();
let condition = condition_handle.condition();
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -874,16 +937,17 @@ impl InitApi for Aerugo {
) {
let tasklet = tasklet_handle.tasklet();
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 77 | n7space | SteelPh0enix | @@ -945,16 +1009,29 @@ impl InitApi for Aerugo {
) {
let tasklet = tasklet_handle.tasklet();
- // SAFETY: This is safe as long as this function is called only during system initialization.
- unsafe {
+ // SAFETY: This is safe as long as this function is called only during system initialization
+ // and can't be interrupted. | ```suggestion
```
that sounds weird, it can't be interrupted because it's in critical section - so it's always safe in that context. |
aerugo | github_2023 | others | 71 | n7space | Glamhoth | @@ -1,20 +1,30 @@
//! Module containing meta-traits and their implementations for HAL UART driver
-pub(super) use crate::pac::uart0::RegisterBlock;
+use crate::pac::uart0::RegisterBlock;
pub use crate::pac::{UART0, UART1, UART2, UART3, UART4};
/// Trait for PAC UART instances.
///
/// This trait erases the type of UART instance, so it can be used as
/// generic argument for [`UART`](super::UART) instead of concrete type.
-pub trait UartMetadata {
+pub trait UARTMetadata { | To change back as discussed |
aerugo | github_2023 | others | 71 | n7space | Glamhoth | @@ -0,0 +1,114 @@
+//! UART Reader implementation.
+//!
+//! Reader can be used to receive data via UART.
+
+use core::marker::PhantomData;
+
+use crate::utils::wait_until;
+
+use super::Error;
+use super::Status;
+use super::UARTMetadata;
+
+/// This structure can be used to receive data via UART.
+///
+/// Reader instance is created by [`UART`](super::UART) and can be taken from it using
+/// [`UART::take_reader`](super::UART::take_reader) method.
+/// Once taken, it can be put inside UART again using [`UART::put_reader`](super::UART::put_reader)
+/// for storage.
+///
+/// # Safety
+/// If Reader is used while UART receiver is disabled, it will always return [`Error::TimedOut`] on
+/// blocking operations.
+///
+/// Reader is thread-safe, as it doesn't share any (mutable) state with UART or Writer, and
+/// there can be only a single instance of Reader per UART.
+pub struct Reader<Instance: UARTMetadata> {
+ /// UART instance marker.
+ _uart: PhantomData<Instance>,
+}
+
+impl<Instance: UARTMetadata> Reader<Instance> {
+ /// Receives a single byte. Blocks until a byte is received, or timeout is hit.
+ ///
+ /// If you check "receiver ready" flag manually (for example, in IRQ handler), you could use
+ /// [`Reader::get_received_byte`] instead, as it doesn't perform the additional status check.
+ /// However, this function will also work fine in that context, it'll just double-check that.
+ ///
+ /// This function requires mutable access to Reader, as reading the character from RX holding
+ /// register while "receiver ready" flag is set will reset it's state and clear this flag.
+ ///
+ /// # Parameters
+ /// * `timeout` - Maximum amount of UART status checks before declaring timeout.
+ ///
+ /// # Returns
+ /// `Ok(u8)` if reception was successful, with the value of received byte.
+ /// `Err(())` on timeout.
+ pub fn receive_byte(&mut self, timeout: u32) -> Result<u8, Error> {
+ self.wait_for_byte_reception(timeout)
+ // This is safe, as we just verified that receiver is ready and RX holding register
+ // contains a received byte.
+ .map_or(Err(Error::TimedOut), |_| unsafe {
+ Ok(self.get_received_byte())
+ })
+ }
+
+ /// Returns the byte currently stored in received character register.
+ ///
+ /// This function is meant to be used primarily in interrupt handlers, as a slightly faster
+ /// version of [`Reader::receive_byte`] that avoids double-checking the status register.
+ ///
+ /// This function requires mutable access to Reader, as reading the character from RX holding
+ /// register while "receiver ready" flag is set will reset it's state and clear this flag.
+ ///
+ /// # Safety
+ /// This function doesn't wait for UART to indicate that there's data in RX register, and will
+ /// return `0` if there's no received data there, instead of an error.
+ /// Therefore, it's reasonable to use only if you manually checked if there's new data in UART
+ /// RX register (by checking "receiver ready" status flag). If you do that, then this function
+ /// becomes safe to use.
+ ///
+ /// # Returns
+ /// Received byte, if UART status flag indicates that there's one in RX register.
+ /// `0`` otherwise.
+ #[inline(always)]
+ pub unsafe fn get_received_byte(&mut self) -> u8 { | Shouldn't this maybe return `Option<u8>`? It looks like it's impossible to distinguish between `there is a byte, and the value is 0` and `there is no byte` |
aerugo | github_2023 | others | 71 | n7space | Glamhoth | @@ -0,0 +1,114 @@
+//! UART Reader implementation.
+//!
+//! Reader can be used to receive data via UART.
+
+use core::marker::PhantomData;
+
+use crate::utils::wait_until;
+
+use super::Error;
+use super::Status;
+use super::UARTMetadata;
+
+/// This structure can be used to receive data via UART.
+///
+/// Reader instance is created by [`UART`](super::UART) and can be taken from it using
+/// [`UART::take_reader`](super::UART::take_reader) method.
+/// Once taken, it can be put inside UART again using [`UART::put_reader`](super::UART::put_reader)
+/// for storage.
+///
+/// # Safety
+/// If Reader is used while UART receiver is disabled, it will always return [`Error::TimedOut`] on
+/// blocking operations.
+///
+/// Reader is thread-safe, as it doesn't share any (mutable) state with UART or Writer, and
+/// there can be only a single instance of Reader per UART.
+pub struct Reader<Instance: UARTMetadata> {
+ /// UART instance marker.
+ _uart: PhantomData<Instance>,
+}
+
+impl<Instance: UARTMetadata> Reader<Instance> {
+ /// Receives a single byte. Blocks until a byte is received, or timeout is hit.
+ ///
+ /// If you check "receiver ready" flag manually (for example, in IRQ handler), you could use
+ /// [`Reader::get_received_byte`] instead, as it doesn't perform the additional status check.
+ /// However, this function will also work fine in that context, it'll just double-check that.
+ ///
+ /// This function requires mutable access to Reader, as reading the character from RX holding
+ /// register while "receiver ready" flag is set will reset it's state and clear this flag.
+ ///
+ /// # Parameters
+ /// * `timeout` - Maximum amount of UART status checks before declaring timeout.
+ ///
+ /// # Returns
+ /// `Ok(u8)` if reception was successful, with the value of received byte.
+ /// `Err(())` on timeout.
+ pub fn receive_byte(&mut self, timeout: u32) -> Result<u8, Error> {
+ self.wait_for_byte_reception(timeout)
+ // This is safe, as we just verified that receiver is ready and RX holding register
+ // contains a received byte.
+ .map_or(Err(Error::TimedOut), |_| unsafe {
+ Ok(self.get_received_byte())
+ })
+ }
+
+ /// Returns the byte currently stored in received character register.
+ ///
+ /// This function is meant to be used primarily in interrupt handlers, as a slightly faster
+ /// version of [`Reader::receive_byte`] that avoids double-checking the status register.
+ ///
+ /// This function requires mutable access to Reader, as reading the character from RX holding
+ /// register while "receiver ready" flag is set will reset it's state and clear this flag.
+ ///
+ /// # Safety
+ /// This function doesn't wait for UART to indicate that there's data in RX register, and will
+ /// return `0` if there's no received data there, instead of an error.
+ /// Therefore, it's reasonable to use only if you manually checked if there's new data in UART
+ /// RX register (by checking "receiver ready" status flag). If you do that, then this function
+ /// becomes safe to use.
+ ///
+ /// # Returns
+ /// Received byte, if UART status flag indicates that there's one in RX register.
+ /// `0`` otherwise.
+ #[inline(always)]
+ pub unsafe fn get_received_byte(&mut self) -> u8 {
+ Instance::registers().rhr.read().rxchr().bits()
+ }
+
+ /// Returns current UART status.
+ ///
+ /// Error flags **must** be cleared manually by calling [`Reader::reset_status`].
+ pub fn status(&self) -> Status {
+ Instance::registers().sr.read().into()
+ }
+
+ /// Resets UART status by clearing status flags.
+ /// **This function should usually be called immediately after reading the status.** | ```suggestion
/// Resets UART status by clearing status flags.
///
/// **This function should usually be called immediately after reading the status.**
``` |
aerugo | github_2023 | others | 71 | n7space | Glamhoth | @@ -0,0 +1,162 @@
+//! UART Writer implementation.
+//!
+//! Writer can be used to transmit data via UART.
+
+use core::marker::PhantomData;
+
+use crate::utils::wait_until;
+
+use super::Error;
+use super::Status;
+use super::UARTMetadata;
+
+/// This structure can be used to transmit data via UART.
+///
+/// Writer instance is created by [`UART`](super::UART) and can be taken from it using
+/// [`UART::take_writer`](super::UART::take_writer) method.
+/// Once taken, it can be put inside UART again using [`UART::put_writer`](super::UART::put_writer)
+/// for storage.
+///
+/// # Safety
+/// If Writer is used while UART transmitter is disabled, it will always return [`Error::TimedOut`]
+/// on blocking operations.
+///
+/// Writer is thread-safe, as it doesn't share any (mutable) state with UART or Reader, and
+/// there can be only a single instance of Writer per UART.
+pub struct Writer<Instance: UARTMetadata> {
+ /// UART instance marker.
+ _uart: PhantomData<Instance>,
+}
+
+impl<Instance: UARTMetadata> Writer<Instance> {
+ /// Transmits a single byte.
+ /// Waits for UART TX register to be empty. | ```suggestion
/// Transmits a single byte.
///
/// Waits for UART TX register to be empty.
``` |
aerugo | github_2023 | others | 69 | n7space | Glamhoth | @@ -0,0 +1,425 @@
+//! Module with structures and enumerations representing UART configuration.
+
+use samv71q21_pac::uart0::mr::{BRSRCCKSELECT_A, CHMODESELECT_A, FILTERSELECT_A, PARSELECT_A};
+
+use super::{Frequency, OVERSAMPLING_RATIO};
+
+/// Structure representing UART configuration.
+///
+/// Public members of this structure can be changed directly, or
+/// via chained functions `with_X`, where `X` is the member name.. | Is this neccessary as there are no public members? |
aerugo | github_2023 | others | 69 | n7space | Glamhoth | @@ -0,0 +1,425 @@
+//! Module with structures and enumerations representing UART configuration.
+
+use samv71q21_pac::uart0::mr::{BRSRCCKSELECT_A, CHMODESELECT_A, FILTERSELECT_A, PARSELECT_A};
+
+use super::{Frequency, OVERSAMPLING_RATIO};
+
+/// Structure representing UART configuration.
+///
+/// Public members of this structure can be changed directly, or
+/// via chained functions `with_X`, where `X` is the member name..
+///
+/// Private members can be accessed and modified only via dedicated
+/// functions, as their values may depend on each other.
+///
+/// This structure makes sure that provided UART configuration is
+/// always correct. It should not be possible to create [Config]
+/// instance that contains invalid configuration. Linked fields are
+/// always updated when one of them is being changed (for example,
+/// clock divider is always updated when changing baudrate or source
+/// clock frequency).
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct Config {
+ /// Baudrate.
+ baudrate: u32,
+ /// clock source.
+ clock_source: ClockSource,
+ /// clock source frequency.
+ clock_source_frequency: Frequency,
+ /// Parity bit configuration.
+ parity_bit: ParityBit,
+ /// Baudrate clock divider.
+ clock_divider: u16,
+}
+
+/// Structure representing additional UART settings, applicable only
+/// for receiver or bidirectional mode.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct ReceiverConfig {
+ /// If `true`, UART will filter the receive line using a three-sample
+ /// filter (16x-bit clock, 2 over 3 majority).
+ pub rx_filter_enabled: bool,
+}
+
+/// Enumeration representing configuration error.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum ConfigurationError {
+ /// Specified baudrate is too low, and it would result in clock divider
+ /// larger than maximum possible value.
+ BaudrateTooLow,
+ /// Specified baudrate is too high, and it would cause the clock divider
+ /// to be zero, effectively disabling baudrate clock.
+ BaudrateTooHigh,
+}
+
+/// Enumeration representing UART clock source.
+///
+/// # Safety
+/// Consult UART module documentation, or SAMV71 datasheet (section 46.5.1 "Baud Rate Generator")
+/// if you intend to change processor's clocks frequency while UART is enabled.
+///
+/// The peripheral clock frequency must be at least three times higher than PCK frequency.
+#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
+pub enum ClockSource {
+ /// UART is driven using standard peripheral clock.
+ /// You must enable this clock via PMC before using UART.
+ #[default]
+ PeripheralClock,
+ /// UART is driven using programmable clock (PCK4).
+ /// You must enable and configure this clock via PMC before using UART.
+ ProgrammableClock,
+}
+
+/// Enumeration representing UART parity bit configuration.
+#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
+pub enum ParityBit {
+ /// Even parity
+ Even,
+ /// Odd parity
+ Odd,
+ /// Parity forced to 0
+ Space,
+ /// Parity forced to 1
+ Mark,
+ /// No parity bit
+ #[default]
+ None,
+}
+
+/// Enumeration representing available loopback modes.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum LoopbackMode {
+ /// Loopback disabled, UART operates normally
+ None,
+ /// RX line is internally connected to TX line.
+ /// Transmitter is disconnected from TX line.
+ AutomaticEcho,
+ /// Transmitter is internally connected to receiver.
+ /// RX and TX lines are disconnected from receiver and transmitter.
+ /// TX line is pulled to Vdd.
+ LocalLoopback,
+ /// RX line is internally connected to TX line.
+ /// Transmitter and receiver are disconnected from TX and RX lines.
+ /// Receiver is pulled to Vdd.
+ RemoteLoopback,
+}
+
+impl Config {
+ /// Creates [`Config`] instance with provided baudrate and following defaults:
+ /// * Clock source is peripheral clock (must be enabled via PMC)
+ /// * No parity bit
+ ///
+ /// This function should be used to create new instance of [`Config`].
+ ///
+ /// To change other settings, use appropriate chained methods of [`Config`],
+ /// or change their fields directly.
+ ///
+ /// Provided baudrate will be internally re-calculated after calculating clock
+ /// divider for it, because clock divider might not be able to precisely
+ /// scale the clock for it. If you need to know the precise baudrate, you can
+ /// check it with [`Config::baudrate`] method.
+ ///
+ /// # Parameters
+ /// * `baudrate` - UART Baudrate in bits per second.
+ /// * `peripheral_clock_frequency` - Frequency of peripheral clock.
+ ///
+ /// # Returns
+ /// Ok([Config]), or Err([ConfigurationError]) if provided configuration is invalid.
+ pub fn new(
+ baudrate: u32,
+ peripheral_clock_frequency: Frequency,
+ ) -> Result<Self, ConfigurationError> {
+ let clock_divider = calculate_clock_divider(baudrate, peripheral_clock_frequency)?;
+ // Clock divider changed, baudrate must be recalculated
+ let baudrate = calculate_baudrate(clock_divider, peripheral_clock_frequency);
+
+ Ok(Self {
+ baudrate,
+ clock_source: ClockSource::PeripheralClock,
+ clock_source_frequency: peripheral_clock_frequency,
+ parity_bit: ParityBit::None,
+ clock_divider,
+ })
+ }
+
+ /// Returns configured baudrate.
+ pub fn baudrate(&self) -> u32 {
+ self.baudrate
+ }
+
+ /// Consumes config and returns a new instance with specified baudrate.
+ ///
+ /// Provided baudrate will be internally re-calculated after calculating clock
+ /// divider for it, because clock divider might not be able to precisely
+ /// scale the clock for it. If you need to know the precise baudrate, you can
+ /// check it with [`Config::baudrate`] method.
+ ///
+ /// Use this to chain-construct new config, or create modified instance of
+ /// existing one, for example:
+ /// ```
+ /// # let old_config = Config::new(9600, 10_000_000.to_Hz());
+ /// let config = old_config.with_baudrate(115200);
+ /// ```
+ ///
+ /// You can chain multiple configuration methods.
+ pub fn with_baudrate(self, baudrate: u32) -> Result<Self, ConfigurationError> {
+ let clock_divider = calculate_clock_divider(baudrate, self.clock_source_frequency)?;
+ // Clock divider changed, baudrate must be recalculated
+ let baudrate = calculate_baudrate(clock_divider, self.clock_source_frequency);
+
+ Ok(Self {
+ baudrate,
+ clock_divider,
+ ..self
+ })
+ }
+
+ /// Returns configured clock source.
+ pub fn clock_source(&self) -> ClockSource {
+ self.clock_source
+ }
+
+ /// Consumes config and returns a new instance with specified clock source.
+ ///
+ /// Use this to chain-construct new config, or create modified instance of
+ /// existing one, for example:
+ /// ```
+ /// let config = Config::with_baudrate(9600, 12_000_000.to_Hz())
+ /// .with_clock_source(ClockSource::ProgrammableClock);
+ /// ```
+ ///
+ /// You can chain multiple configuration methods.
+ pub fn with_clock_source(self, clock_source: ClockSource) -> Self {
+ Self {
+ clock_source,
+ ..self
+ }
+ }
+
+ /// Returns configured frequency of clock source driving the baudrate.
+ pub fn clock_source_frequency(&self) -> Frequency {
+ self.clock_source_frequency
+ }
+
+ /// Consumes config and returns a new instance with specified clock source frequency.
+ ///
+ /// Original baudrate will be internally re-calculated after calculating clock
+ /// divider for it, because clock divider might not be able to precisely
+ /// scale the clock for it. If you need to know the precise baudrate, you can
+ /// check it with [`Config::baudrate`] method.
+ ///
+ /// Use this to chain-construct new config, or create modified instance of
+ /// existing one, for example:
+ /// ```
+ /// # let old_config = Config::new(9600, 10_000_000.to_Hz());
+ /// let config = old_config.with_clock_source_frequency(12_000_000.to_Hz());
+ /// ```
+ ///
+ /// You can chain multiple configuration methods.
+ pub fn with_clock_source_frequency(
+ self,
+ clock_source_frequency: Frequency,
+ ) -> Result<Self, ConfigurationError> {
+ let clock_divider = calculate_clock_divider(self.baudrate, clock_source_frequency)?;
+ // Clock divider changed, baudrate must be recalculated
+ let baudrate = calculate_baudrate(clock_divider, clock_source_frequency);
+
+ Ok(Self {
+ baudrate,
+ clock_source_frequency,
+ clock_divider,
+ ..self
+ })
+ }
+
+ /// Returns configured parity bit
+ pub fn parity_bit(&self) -> ParityBit {
+ self.parity_bit
+ }
+
+ /// Consumes config and returns a new instance with specified parity bit.
+ ///
+ /// Use this to chain-construct new config, or create modified instance of
+ /// existing one, for example:
+ /// ```
+ /// let config = Config::with_baudrate(9600, 12_000_000.to_Hz())
+ /// .with_parity_bit(ParityBit::Odd);
+ /// ```
+ ///
+ /// You can chain multiple configuration methods.
+ pub fn with_parity_bit(self, parity_bit: ParityBit) -> Self {
+ Self { parity_bit, ..self }
+ }
+
+ /// Returns configured clock divider.
+ ///
+ /// Clock divider is automatically updated by [`Config::new`] and all the functions
+ /// that modify baudrate or clock source frequency.
+ pub fn clock_divider(&self) -> u16 {
+ self.clock_divider
+ }
+
+ /// Consumes config and returns a new instance with specified clock source divider.
+ ///
+ /// Setting the clock divider automatically updates the baudrate stored in config.
+ ///
+ /// Use this to chain-construct new config, or create modified instance of
+ /// existing one, for example:
+ /// ```
+ /// # let old_config = Config::new(9600, 10_000_000.to_Hz());
+ /// let config = old_config.with_clock_divider(100);
+ /// ```
+ ///
+ /// You can chain multiple configuration methods.
+ ///
+ /// # Safety
+ /// Setting clock divider to `0` disables baudrate clock, which makes it a potentially
+ /// unwanted side-effect.
+ /// Therefore, this function is `unsafe`. Use it at your own peril. If you want to
+ /// set or modify the baudrate, use baudrate-related functions which prevent UART from
+ /// having it's baudrate clock disabled in this way.
+ pub unsafe fn with_clock_divider(self, clock_divider: u16) -> Self {
+ let baudrate = calculate_baudrate(clock_divider, self.clock_source_frequency);
+
+ Self {
+ baudrate,
+ clock_divider,
+ ..self
+ }
+ }
+}
+
+/// Validates provided baudrate and calculates clock divider.
+///
+/// If you intend to configure the UART, you should use [`Config`] or one
+/// of the [`UART`](super::UART) methods instead, as they perform baudrate validation too.
+///
+/// This function should be used only if you want to validate UART baudrate
+/// manually, as there's plenty of methods for baudrate configuration
+/// implemented by UART driver that use this function underneath.
+///
+/// # Parameters
+/// * `baudrate` - Baudrate to be validated.
+/// * `baudrate_clock_frequency` - Frequency of the clock driving the baudrate.
+///
+/// # Returns
+/// `Ok(u16)` with clock divider if provided baudrate is valid for provided clock
+/// frequency, `Err(ConfigurationError)` otherwise.
+pub const fn calculate_clock_divider(
+ baudrate: u32,
+ baudrate_clock_frequency: Frequency,
+) -> Result<u16, ConfigurationError> {
+ let divider = baudrate_clock_frequency.to_Hz() / (OVERSAMPLING_RATIO * baudrate);
+
+ // If provided baudrate is larger than clock source frequency / 16, it will
+ // cause the divider to be truncated to 0, which will disable the baudrate clock.
+ // If you intend to disable the baudrate clock that way, set the divider to 0
+ // directly, using `UART::set_clock_divider`.
+ if divider == 0 {
+ return Err(ConfigurationError::BaudrateTooHigh);
+ }
+
+ // If provided baudrate is small enough, it will cause the divider to be
+ // larger than (2^16) - 1, which would cause an integer overflow.
+ if divider > (u16::MAX as u32) {
+ return Err(ConfigurationError::BaudrateTooLow);
+ }
+
+ // This cast is safe, because we validated that `divider` can be represented
+ // as 16-bit unsigned.
+ Ok(divider as u16)
+}
+
+/// Calculates and returns UART baudrate.
+///
+/// # Parameters
+/// * `clock_divider` - Baudrate clock divider. **Cannot be 0**, as it will cause this function to panic.
+/// * `baudrate_clock_frequency` - Frequency of the clock driving the baudrate.
+///
+/// # Returns
+/// Baudrate in bits per second.
+pub const fn calculate_baudrate(clock_divider: u16, baudrate_clock_frequency: Frequency) -> u32 {
+ baudrate_clock_frequency.to_Hz() / (OVERSAMPLING_RATIO * (clock_divider as u32))
+}
+
+/// Converts RX filter configuration from PAC to boolean value.
+#[inline(always)]
+pub(super) const fn rx_filter_config_to_bool(config: FILTERSELECT_A) -> bool {
+ match config {
+ FILTERSELECT_A::DISABLED => false,
+ FILTERSELECT_A::ENABLED => true,
+ }
+}
+
+/// Converts boolean value into RX filter configuration from PAC.
+#[inline(always)]
+pub(super) const fn bool_to_rx_filter_config(filter_enabled: bool) -> FILTERSELECT_A {
+ match filter_enabled {
+ true => FILTERSELECT_A::ENABLED,
+ false => FILTERSELECT_A::DISABLED,
+ }
+} | Maybe impl `Into`/`From` instead? |
aerugo | github_2023 | others | 69 | n7space | Glamhoth | @@ -0,0 +1,106 @@
+#![no_std]
+#![no_main]
+
+extern crate cortex_m;
+extern crate cortex_m_rt as rt;
+extern crate panic_rtt_target;
+
+use aerugo::hal::drivers::pio::{pin::Peripheral, Port};
+use aerugo::hal::drivers::pmc::config::PeripheralId;
+use aerugo::hal::drivers::uart::{Bidirectional, Config, NotConfigured, ReceiverConfig, UART};
+use aerugo::hal::user_peripherals::{PIOD, PMC, UART4};
+use aerugo::time::RateExtU32;
+use aerugo::{
+ logln, Aerugo, Duration, InitApi, RuntimeApi, SystemHardwareConfig, TaskletConfig,
+ TaskletStorage,
+};
+use rt::entry;
+
+fn uart_task(_: (), context: &mut UartTaskContext, _: &'static dyn RuntimeApi) {
+ let uart = &mut context.uart;
+ uart.transmit_byte(context.byte_to_transmit, 1_000_000)
+ .unwrap();
+ let received_byte = uart.receive_byte(1_000_000).unwrap();
+ logln!( | ```suggestion
let uart = &mut context.uart;
uart.transmit_byte(context.byte_to_transmit, 1_000_000)
.unwrap();
let received_byte = uart.receive_byte(1_000_000).unwrap();
logln!(
``` |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -20,9 +20,25 @@ pub(crate) struct CyclicExecution {
impl CyclicExecution {
/// Creates new instance.
- pub(crate) fn new(tasklet: TaskletPtr, period: Option<Duration>) -> Self {
+ ///
+ /// # Parameters
+ /// * `tasklet` - Tasklet which should be executed cyclically.
+ /// * `period` - Period of execution, `None` if should be woke whenever possible. | ```suggestion
/// * `period` - Period of execution, `None` if should be awaken whenever possible.
``` |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -20,9 +20,25 @@ pub(crate) struct CyclicExecution {
impl CyclicExecution {
/// Creates new instance.
- pub(crate) fn new(tasklet: TaskletPtr, period: Option<Duration>) -> Self {
+ ///
+ /// # Parameters
+ /// * `tasklet` - Tasklet which should be executed cyclically.
+ /// * `period` - Period of execution, `None` if should be woke whenever possible.
+ /// * `offset` - Offset of first execution after scheduled start, `None` if should be executed instantly.
+ /// executed instantly. | ```suggestion
``` |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -44,6 +44,11 @@ impl CyclicExecutionManager {
/// * `tasklet`: Tasklet that will be executed
/// * `period`: Period for execution, `None` if tasklet shall be executed without waits
///
+ /// # Parameters
+ /// * `tasklet` - Tasklet which should be executed cyclically.
+ /// * `period` - Period of execution, `None` if should be woke whenever possible. | ```suggestion
/// * `period` - Period of execution, `None` if should be awaken whenever possible.
``` |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -101,3 +101,5 @@ impl DataProvider<bool> for BooleanCondition {
false
}
}
+
+unsafe impl Sync for BooleanCondition {} | safety comment? |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -79,3 +79,5 @@ impl PartialEq for Event {
self.id.eq(&other.id)
}
}
+
+unsafe impl Sync for Event {} | safety comment? |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -120,3 +125,5 @@ mod tests {
assert_eq!(queue100u64_size, stub_size);
}
}
+
+unsafe impl<T, const N: usize> Sync for MessageQueue<T, N> {} | safety comment? |
aerugo | github_2023 | others | 68 | n7space | SteelPh0enix | @@ -0,0 +1,91 @@
+#![feature(prelude_import)] | wtf happened here |
aerugo | github_2023 | others | 67 | n7space | SteelPh0enix | @@ -20,12 +20,12 @@ fn dummy_task(_: (), context: &mut DummyTaskContext, api: &'static dyn RuntimeAp
context.acc = context.acc.wrapping_add(1);
if context.acc == 1 {
- let startup_time = api.get_startup_time();
- let startup_secs = startup_time.unwrap().to_secs();
- let startup_ms = startup_time.unwrap().to_millis() % 1000;
- let startup_us = startup_time.unwrap().to_micros() % (1000 * 1000);
+ let startup_time = api.get_startup_duration(); | `startup_time` -> `startup_duration`? |
aerugo | github_2023 | others | 67 | n7space | SteelPh0enix | @@ -21,6 +21,8 @@ use crate::time::{Duration, Instant};
/// Failing to adhere to this requirement will invalidate `Sync` trait implementation of this type,
/// unless it's explicitly guaranteed by design that mutations will not occur during interrupt's execution.
pub(crate) struct TimeSource {
+ /// Time system's scheduler started. | that syntax is weird |
aerugo | github_2023 | others | 63 | n7space | Glamhoth | @@ -0,0 +1,100 @@
+#![no_std]
+#![no_main]
+
+extern crate aerugo;
+extern crate calldwell;
+extern crate cortex_m;
+extern crate cortex_m_rt as rt;
+
+use aerugo::hal::user_peripherals::{CPUID, SCB};
+use aerugo::{Aerugo, InitApi, SystemHardwareConfig};
+use calldwell::write_str;
+use rt::entry;
+
+// Align for dcache line size
+#[repr(align(32))]
+#[derive(Debug)]
+struct DummyData {
+ // D-cache line size is 32B, data must be multiple of 32B
+ // 128B spans 4 d-cache lines.
+ pub data: [u8; 128],
+}
+
+impl Default for DummyData {
+ fn default() -> Self {
+ Self { data: [0u8; 128] }
+ }
+}
+
+fn perform_dcache_tests(scb: &mut SCB, cpuid: &mut CPUID) {
+ // Check cache enabling/disabling
+ scb.enable_dcache(cpuid);
+ assert!(
+ SCB::dcache_enabled(),
+ "D-Cache was enabled, yet it's reported as disabled"
+ );
+
+ // Clearing and invalidating cache has no observable effect outside of processor's core.
+ // Exact behavior was verified by analysis.
+ let mut dummy = DummyData::default();
+ // Safety: This is safe as long as `dummy` is aligned to 32 bytes,
+ // and it's data length is a multiple of 32.
+ assert!(core::mem::align_of::<DummyData>() % 32 == 0);
+ assert!(core::mem::size_of::<DummyData>() % 32 == 0);
+ unsafe {
+ scb.clean_dcache_by_slice(&dummy.data);
+ scb.invalidate_dcache_by_slice(&mut dummy.data);
+ }
+
+ scb.clean_invalidate_dcache(cpuid);
+
+ // TODO: Why does this crash/halt the test? | I think now we know why? |
aerugo | github_2023 | others | 63 | n7space | Glamhoth | @@ -0,0 +1,25 @@
+// Test scenario:
+// - Verify that icache can be disabled/enabled/cleared
+// - Verify that dcache can be disabled/enabled/cleared
+
+/// @SRS{ROS-FUN-BSP-SCB-020}
+/// @SRS{ROS-FUN-BSP-SCB-030} | It's not covered for now, right? |
aerugo | github_2023 | others | 60 | n7space | Glamhoth | @@ -0,0 +1,175 @@
+//! Module containing Parallel I/O (PIO) pin items for PIO-controlled I/O pin in output mode.
+
+use embedded_hal::digital::{OutputPin, PinState, StatefulOutputPin, ToggleableOutputPin};
+
+use super::{pin::OutputMode, Pin};
+
+/// Enumeration listing available pin driving modes.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum DriveMode {
+ /// Pin is driven both to the high- and low-level.
+ PushPull,
+ /// Pin can be driven only to low-level, driving it to
+ /// high-level results in floating pin. Using pull resistors
+ /// (external or internal) is recommended with this mode.
+ OpenDrain,
+}
+
+/// Implementation of OutputPin trait from `embedded-hal` crate.
+///
+/// `Pin<_>::Error` type is [`core::convert::Infallible`]. These functions cannot fail.
+/// As long as pin is in output mode, it always can be set high or low.
+impl OutputPin for Pin<OutputMode> {
+ /// Drives the pin low.
+ ///
+ /// In both push-pull and open-drain mode, this will pull the pin to GND.
+ fn set_low(&mut self) -> Result<(), Self::Error> {
+ // Safety: See `Pin::pin_mask` description.
+ self.registers_ref()
+ .codr
+ .write(|w| unsafe { w.bits(self.pin_mask()) });
+ Ok(())
+ }
+
+ /// Drives the pin high.
+ ///
+ /// In push-pull mode, this will set the pin to high state by pulling it to VDD.
+ ///
+ /// In open-drain mode, this will keep the pin floating, and since it will usually
+ /// be pulled up, it should also indirectly result in pulling the pin to high state.
+ fn set_high(&mut self) -> Result<(), Self::Error> {
+ // Safety: See `Pin::pin_mask` description.
+ self.registers_ref()
+ .sodr
+ .write(|w| unsafe { w.bits(self.pin_mask()) });
+ Ok(())
+ }
+}
+
+/// Implementation of OutputPin trait from `embedded-hal` crate.
+///
+/// `Pin<_>::Error` type is [`core::convert::Infallible`]. These functions cannot fail.
+/// As long as pin is in output mode, it's set state can always be looked up.
+impl StatefulOutputPin for Pin<OutputMode> {
+ /// Returns true if the pin is currently driven "high".
+ ///
+ /// **It's important to notice that this function does not read pin's state,
+ /// it will only inform whether it's set to be driven high, or not.**
+ /// If you want to read current logic state of the pin, use [`Pin<_>::is_high`]/[`Pin<_>::is_low`]/[`Pin<_>::state`].
+ fn is_set_high(&self) -> Result<bool, Self::Error> {
+ Ok(self.is_pin_bit_set(self.registers_ref().odsr.read().bits()))
+ }
+
+ /// Returns true if the pin is currently driven "low".
+ ///
+ /// **It's important to notice that this function does not read pin's state,
+ /// it will only inform whether it's set to be driven high, or not.**
+ /// If you want to read current logic state of the pin, use [`Pin<_>::is_high`]/[`Pin<_>::is_low`]/[`Pin<_>::state`].
+ #[inline(always)]
+ fn is_set_low(&self) -> Result<bool, Self::Error> {
+ Ok(!self.is_set_high().unwrap())
+ }
+}
+
+/// Implementation of OutputPin trait from `embedded-hal` crate.
+///
+/// # Remarks
+/// SAMV71 PIO driver does not provide any quick way of toggling the pin's state,
+/// so this function uses the naive "check + invert state" approach.
+///
+/// `Pin<_>::Error` type is [`core::convert::Infallible`]. These functions cannot fail.
+/// As long as pin is in output mode, it can always be toggled.
+impl ToggleableOutputPin for Pin<OutputMode> {
+ /// Toggles pin output (from "high" to "low" or vice versa).
+ fn toggle(&mut self) -> Result<(), Self::Error> {
+ if self.is_set_high().unwrap() {
+ self.set_low().unwrap();
+ } else {
+ self.set_high().unwrap();
+ }
+ Ok(())
+ }
+}
+
+/// PIO-controlled pin's implementation for pin in output mode.
+///
+/// This mode allows the pin to:
+/// * Set it's output state
+/// * Configure it in open-drain or push-pull mode
+/// * Configure
+impl Pin<OutputMode> { | maybe base `impl` shoud be be before trait `impl`s? (readabilty) |
aerugo | github_2023 | others | 60 | n7space | Glamhoth | @@ -0,0 +1,234 @@
+#![no_std]
+#![no_main]
+
+extern crate aerugo;
+extern crate calldwell;
+extern crate cortex_m;
+extern crate cortex_m_rt as rt;
+
+use aerugo::{
+ hal::drivers::{
+ embedded_hal::digital::{OutputPin, StatefulOutputPin},
+ pio::{
+ pin::{DriveMode, OutputMode, PinMode, PinState, PullResistor},
+ IoPortMetadata, Pin, Port, SynchronousPort, PIOC,
+ },
+ pmc::config::PeripheralId,
+ },
+ Aerugo, InitApi, SystemHardwareConfig,
+};
+use calldwell::with_rtt_out;
+use rt::entry;
+
+fn validate_pin_is_driven_high(pin: &Pin<OutputMode>) {
+ if !pin.is_set_high().unwrap() {
+ panic!("Pin is driven high, yet `is_set_high` returns `false`");
+ } | Those could be replaced with
`assert!(cond, "message")` |
aerugo | github_2023 | others | 59 | n7space | SteelPh0enix | @@ -32,14 +31,16 @@ impl AerugoHal for Hal {
}
fn get_system_time() -> Instant {
- Instant::from_ticks(
+ let duration = Duration::nanos(
TIME_START
.elapsed()
.expect("{}")
.as_nanos()
.try_into()
.unwrap(),
- )
+ );
+
+ Instant::from_ticks(duration.ticks()) | why like that, readability, or is there some difference in code underneath? |
aerugo | github_2023 | others | 59 | n7space | SteelPh0enix | @@ -5,25 +5,48 @@
use crate::aerugo::Aerugo;
use crate::data_provider::DataProvider;
use crate::tasklet::TaskletPtr;
-use crate::time::MillisDurationU32;
+use crate::time::{Duration, Instant};
+use crate::Mutex;
/// Cyclic execution information.
pub(crate) struct CyclicExecution {
+ /// Next execution time.
+ next_execution_time: Mutex<Instant>,
+ /// Period of cyclic execution.
+ period: Option<Duration>,
/// Tasklet subscribed for cyclic execution.
tasklet: TaskletPtr,
- /// Period of cyclic execution.
- period: Option<MillisDurationU32>,
}
impl CyclicExecution {
/// Creates new instance.
- pub(crate) fn new(tasklet: TaskletPtr, period: Option<MillisDurationU32>) -> Self {
- CyclicExecution { tasklet, period }
+ pub(crate) fn new(tasklet: TaskletPtr, period: Option<Duration>) -> Self {
+ CyclicExecution {
+ next_execution_time: Instant::from_ticks(0).into(),
+ period,
+ tasklet,
+ }
}
- /// Wakes that stored tasklet.
- pub(crate) fn wake_tasklet(&self) {
- Aerugo::wake_tasklet(&self.tasklet);
+ /// Wakes that stored tasklet if the time has come. | ```suggestion
/// Wakes that stored tasklet if the time for it's execution has come.
```
"if the time has come" sounds pretty ominous |
aerugo | github_2023 | others | 59 | n7space | SteelPh0enix | @@ -5,25 +5,48 @@
use crate::aerugo::Aerugo;
use crate::data_provider::DataProvider;
use crate::tasklet::TaskletPtr;
-use crate::time::MillisDurationU32;
+use crate::time::{Duration, Instant};
+use crate::Mutex;
/// Cyclic execution information.
pub(crate) struct CyclicExecution {
+ /// Next execution time.
+ next_execution_time: Mutex<Instant>,
+ /// Period of cyclic execution.
+ period: Option<Duration>,
/// Tasklet subscribed for cyclic execution.
tasklet: TaskletPtr,
- /// Period of cyclic execution.
- period: Option<MillisDurationU32>,
}
impl CyclicExecution {
/// Creates new instance.
- pub(crate) fn new(tasklet: TaskletPtr, period: Option<MillisDurationU32>) -> Self {
- CyclicExecution { tasklet, period }
+ pub(crate) fn new(tasklet: TaskletPtr, period: Option<Duration>) -> Self {
+ CyclicExecution {
+ next_execution_time: Instant::from_ticks(0).into(),
+ period,
+ tasklet,
+ }
}
- /// Wakes that stored tasklet.
- pub(crate) fn wake_tasklet(&self) {
- Aerugo::wake_tasklet(&self.tasklet);
+ /// Wakes that stored tasklet if the time has come.
+ ///
+ /// # Parameters
+ /// * `current_time` - Current system time.
+ pub(crate) fn wake_if_time(&self, current_time: Instant) { | maybe `wake_if_should_execute`? |
aerugo | github_2023 | others | 49 | n7space | Glamhoth | @@ -23,6 +22,8 @@ use samv71_hal::watchdog::{Watchdog, WatchdogConfig};
/// Safety of this cell is managed by HAL instead, guaranteeing that undefined behavior will not occur.
static mut HAL_SYSTEM_PERIPHERALS: Option<SystemPeripherals> = None;
+/// Global "restore state" that's used to manage | Comment to nothing? |
aerugo | github_2023 | others | 49 | n7space | Glamhoth | @@ -11,10 +11,10 @@ pub use self::boolean_condition_storage::BooleanConditionStorage;
use crate::aerugo::{Aerugo, AERUGO};
use crate::api::{InitError, SystemApi};
-use crate::arch::Mutex;
use crate::data_provider::DataProvider;
use crate::internal_list::InternalList;
use crate::tasklet::TaskletPtr;
+use crate::Mutex; | ```suggestion
use crate::mutex::Mutex;
``` |
aerugo | github_2023 | others | 49 | n7space | Glamhoth | @@ -9,8 +9,8 @@ use heapless::binary_heap::{BinaryHeap, Max};
use crate::aerugo::{Aerugo, AERUGO};
use crate::api::{RuntimeApi, RuntimeError, SystemApi};
-use crate::arch::Mutex;
use crate::tasklet::{TaskletPtr, TaskletStatus};
+use crate::Mutex; | ```suggestion
use crate::mutex::Mutex;
``` |
aerugo | github_2023 | others | 49 | n7space | Glamhoth | @@ -10,8 +10,8 @@ use core::cell::OnceCell;
use heapless::Vec;
use crate::api::InitError;
-use crate::arch::Mutex;
use crate::message_queue::MessageQueueHandle;
+use crate::Mutex; | ```suggestion
use crate::mutex::Mutex;
``` |
aerugo | github_2023 | others | 49 | n7space | Glamhoth | @@ -28,10 +28,10 @@ use core::cell::{OnceCell, UnsafeCell};
use crate::aerugo::AERUGO;
use crate::api::{InitError, RuntimeApi};
-use crate::arch::Mutex;
use crate::boolean_condition::BooleanConditionSet;
use crate::data_provider::DataProvider;
use crate::Instant;
+use crate::Mutex; | ```suggestion
use crate::mutex::Mutex;
``` |
aerugo | github_2023 | others | 52 | n7space | SteelPh0enix | @@ -44,7 +44,7 @@ static EVENT_MANAGER: EventManager = EventManager::new();
///
/// Singleton instance of the time manager. Used directly only by the [Aerugo]
/// structure.
-static TIME_MANAGER: TimeManager = TimeManager::new();
+static TIME_MANAGER: CyclicExecutionManager = CyclicExecutionManager::new(); | why did `TIME_MANAGER` stay the same? |
aerugo | github_2023 | others | 47 | n7space | Glamhoth | @@ -44,26 +45,27 @@ keywords = ["rtos", "space"]
categories = ["aerospace", "embedded", "hardware-support", "no-std"]
[dependencies]
-heapless = "0.7"
-bare-metal = "0.2.4"
-aerugo-hal = { version = "0.1.0", path = "aerugo-hal" }
aerugo-cortex-m = { version = "0.1.0", path = "arch/cortex-m/aerugo-cortex-m", optional = true }
-samv71-hal = { version = "0.1.0", path = "arch/cortex-m/samv71-hal", optional = true }
+aerugo-hal = { version = "0.1.0", path = "aerugo-hal" }
+aerugo-samv71-hal = { version = "0.1.0", path = "arch/cortex-m/aerugo-samv71-hal", optional = true }
aerugo-x86 = { version = "0.1.0", path = "arch/x86/aerugo-x86", optional = true }
-x86-hal = { version = "0.1.0", path = "arch/x86/x86-hal", optional = true }
+aerugo-x86-hal = { version = "0.1.0", path = "arch/x86/aerugo-x86-hal", optional = true }
+bare-metal = "0.2.4"
env-parser = { version = "1.0.0", path = "utils/env-parser" }
+heapless = "0.7"
internal-cell = { version = "0.0.1", path = "utils/internal_cell" }
+samv71-hal = { version = "0.1.0", path = "arch/cortex-m/samv71-hal", optional = true } | Some sorting by 'category'? |
aerugo | github_2023 | others | 46 | n7space | Glamhoth | @@ -699,12 +711,31 @@ impl RuntimeApi for Aerugo {
EVENT_MANAGER.clear()
}
- fn get_system_time(&'static self) -> crate::time::TimerInstantU64<1_000_000> {
- Hal::get_system_time()
+ fn get_system_time(&'static self) -> Instant {
+ if let Some(system_time) = self.time_source.time_since_user_offset() {
+ return system_time;
+ }
+ TimeSource::time_since_init() | imo `match` would be more readable here |
aerugo | github_2023 | others | 46 | n7space | Glamhoth | @@ -0,0 +1,89 @@
+//! Module containing Aerugo's time source module, providing configurable timestamps for the system
+//! Should be used internally by the system.
+
+use aerugo_hal::system_hal::SystemHal;
+
+use crate::hal::Hal;
+use crate::internal_cell::InternalCell;
+use crate::{Duration, Instant};
+
+/// Time source, responsible for creating timestamps.
+///
+/// Allows time tracking/timestamp generation since three points in time:
+/// * Hardware initialization (call to [`Aerugo::initialize`](crate::Aerugo::initialize))
+/// * Start of Aerugo scheduler (call to [`Aerugo::start`](crate::Aerugo::start))
+/// * User-defined offset
+pub struct TimeSource {
+ /// Time since system's scheduler start.
+ system_start_offset: InternalCell<Option<Duration>>,
+ /// User-defined offset.
+ user_offset: InternalCell<Option<Duration>>,
+}
+
+impl TimeSource {
+ /// Creates new instance of TimeSource
+ pub const fn new() -> Self {
+ TimeSource {
+ system_start_offset: InternalCell::new(None),
+ user_offset: InternalCell::new(None),
+ }
+ }
+
+ /// Returns time since system initialization (call to [`Aerugo::initialize`](crate::Aerugo::initialize), start of the hardware timer)
+ pub fn time_since_init() -> Instant {
+ Hal::get_system_time()
+ }
+
+ /// Returns time since system's scheduler start (call to [`Aerugo::start`](crate::Aerugo::start)), or `None` if system hasn't started yet.
+ pub fn time_since_start(&self) -> Option<Instant> {
+ // SAFETY: This is safe as long as used in single-core context and `mark_system_start` is not being called.
+ if let Some(start_offset) = unsafe { *self.system_start_offset.as_ref() } { | imo `match` would be more readable here |
aerugo | github_2023 | others | 46 | n7space | Glamhoth | @@ -0,0 +1,89 @@
+//! Module containing Aerugo's time source module, providing configurable timestamps for the system
+//! Should be used internally by the system.
+
+use aerugo_hal::system_hal::SystemHal;
+
+use crate::hal::Hal;
+use crate::internal_cell::InternalCell;
+use crate::{Duration, Instant};
+
+/// Time source, responsible for creating timestamps.
+///
+/// Allows time tracking/timestamp generation since three points in time:
+/// * Hardware initialization (call to [`Aerugo::initialize`](crate::Aerugo::initialize))
+/// * Start of Aerugo scheduler (call to [`Aerugo::start`](crate::Aerugo::start))
+/// * User-defined offset
+pub struct TimeSource {
+ /// Time since system's scheduler start.
+ system_start_offset: InternalCell<Option<Duration>>,
+ /// User-defined offset.
+ user_offset: InternalCell<Option<Duration>>,
+}
+
+impl TimeSource {
+ /// Creates new instance of TimeSource
+ pub const fn new() -> Self {
+ TimeSource {
+ system_start_offset: InternalCell::new(None),
+ user_offset: InternalCell::new(None),
+ }
+ }
+
+ /// Returns time since system initialization (call to [`Aerugo::initialize`](crate::Aerugo::initialize), start of the hardware timer)
+ pub fn time_since_init() -> Instant {
+ Hal::get_system_time()
+ }
+
+ /// Returns time since system's scheduler start (call to [`Aerugo::start`](crate::Aerugo::start)), or `None` if system hasn't started yet.
+ pub fn time_since_start(&self) -> Option<Instant> {
+ // SAFETY: This is safe as long as used in single-core context and `mark_system_start` is not being called.
+ if let Some(start_offset) = unsafe { *self.system_start_offset.as_ref() } {
+ let current_time = TimeSource::time_since_init();
+ return current_time.checked_sub_duration(start_offset);
+ }
+ None
+ }
+
+ /// Returns time since user-defined offset, or `None` if offset is not defined, or cannot be subtracted from system time.
+ pub fn time_since_user_offset(&self) -> Option<Instant> {
+ // SAFETY: This is safe as long as used in single-core context and `set_user_offset` is not being called.
+ if let Some(user_offset) = unsafe { *self.user_offset.as_ref() } { | imo `match` would be more readable here |
aerugo | github_2023 | others | 45 | n7space | SteelPh0enix | @@ -63,63 +21,44 @@ type EventStateList = Vec<EventState, { EventManager::EVENT_COUNT }>;
pub(crate) struct EventSet {
/// Tasklet assigned to this set.
tasklet: TaskletPtr,
- /// List of event states.
- event_states: Mutex<EventStateList>,
+ /// Event queue.
+ event_queue: Mutex<EventQueue>,
}
impl EventSet {
/// Creates new event set.
pub(crate) fn new(tasklet: TaskletPtr) -> Self {
EventSet {
tasklet,
- event_states: EventStateList::new().into(),
+ event_queue: EventQueue::new().into(),
}
}
- /// Adds new event to the set.
- ///
- /// # Parameters
- /// * `event_id` - ID of the event that is to be added to this set.
- ///
- /// # Return
- /// `()` if successful, `InitError` otherwise.
- ///
- /// # Safety
- /// This is unsafe, because it mutably borrows the list of event states.
- /// This is safe to call before the system initialization.
- pub(crate) unsafe fn add_event(&self, event_id: EventId) -> Result<(), InitError> {
- let event_state = EventState::new(event_id);
-
- self.event_states
- .lock(|event_states| match event_states.push(event_state) {
- Ok(_) => Ok(()),
- Err(_) => Err(InitError::EventSetFull),
- })
- }
-
/// Activates event
///
/// # Parameters
/// * `event_id` - Event ID to activate.
///
/// # Return
/// `()` if successful, `RuntimeError` otherwise. | ```suggestion
/// `true` if successfully activated event, `false` if event was already on the event queue and is waiting for trigger, `RuntimeError` otherwise.
``` |
aerugo | github_2023 | others | 45 | n7space | SteelPh0enix | @@ -131,24 +70,48 @@ impl EventSet {
/// `()` if successful, `RuntimeError` otherwise.
#[allow(dead_code)]
pub(crate) fn deactivate_event(&self, event_id: EventId) -> Result<(), RuntimeError> {
- self.event_states.lock(|event_states| {
- match event_states
- .iter_mut()
- .find(|event_state| event_state.id() == event_id)
- {
- Some(event_state) => event_state.set_active(false),
- None => return Err(RuntimeError::EventNotFound(event_id)),
- };
-
- Ok(())
+ self.event_queue.lock(|event_queue| { | that whole lock looks like a giant https://doc.rust-lang.org/core/iter/trait.Iterator.html#method.filter
you have iterators here, so you should be able to use it to one-line this whole function |
aerugo | github_2023 | others | 44 | n7space | Glamhoth | @@ -44,4 +44,5 @@ pub(crate) use aerugo_x86 as arch;
#[cfg(feature = "use-aerugo-x86")]
pub use x86_hal as hal;
-pub use arch::log;
+#[cfg(feature = "log")]
+pub use arch::{log, logln}; | There should probably be a stub-empty implementation if the feature is disabled for the case we would have any `log!` in the rtos code |
aerugo | github_2023 | others | 44 | n7space | Glamhoth | @@ -2,15 +2,16 @@
set -euo pipefail
-if [ $# -eq 0 ]
-then
+if [ $# -eq 0 ]; then
for d in examples/*/; do
- pushd $d > /dev/null
+ pushd $d >/dev/null
+ echo "Building $d"
cargo build
- popd > /dev/null
+ popd >/dev/null
done
else
- pushd examples/$1/ > /dev/null
+ pushd examples/$1/ >/dev/null
+ echo "Building examples/$1/"
cargo build
- popd > /dev/null
+ popd >/dev/null | Some spaces are removed |
aerugo | github_2023 | others | 43 | n7space | Glamhoth | @@ -1,3 +1,10 @@
#!/bin/sh | ```suggestion
#!/bin/bash
set -euo pipefail
``` |
aerugo | github_2023 | others | 43 | n7space | Glamhoth | @@ -1,3 +1,5 @@
#!/bin/sh | ```suggestion
#!/bin/bash
set -euo pipefail
``` |
aerugo | github_2023 | others | 43 | n7space | Glamhoth | @@ -0,0 +1,10 @@
+export CALLDWELL_BOARD_LOGIN=$AERUGO_BOARD_LOGIN | ```suggestion
#!/bin/bash
set -euo pipefail
export CALLDWELL_BOARD_LOGIN=$AERUGO_BOARD_LOGIN
``` |
aerugo | github_2023 | others | 41 | n7space | Glamhoth | @@ -197,7 +197,13 @@ impl SystemHal for Hal {
peripherals.timer.trigger_all_channels();
Ok(())
- })
+ });
+
+ if config.disable_interrupts_during_setup { | What about disabling the interrupts before initializing HAL? This would simplify this a bit |
aerugo | github_2023 | others | 41 | n7space | Glamhoth | @@ -244,10 +250,15 @@ impl SystemHal for Hal {
peripherals.watchdog.feed();
}
+ /// Enters critical section by disabling global interrupts.
fn enter_critical() {
cortex_m::interrupt::disable();
}
+ /// Exits critical section by enabling global interrupts.
+ ///
+ /// # Safety:
+ /// This function should never be called in critical sections created with [`SystemHal::execute_critical`] | Maybe use this new `warning` tag that is now available in the Rust doc? |
aerugo | github_2023 | others | 41 | n7space | Glamhoth | @@ -244,10 +250,15 @@ impl SystemHal for Hal {
peripherals.watchdog.feed();
}
+ /// Enters critical section by disabling global interrupts.
fn enter_critical() {
cortex_m::interrupt::disable();
}
+ /// Exits critical section by enabling global interrupts.
+ ///
+ /// # Safety
+ /// <div class="warning">This function should never be called scope-bound critical sections (like the one created with <code>SystemHal::execute_critical</code>)</div> | ```suggestion
/// <div class="warning">This function should never be called from scope-bound critical sections (like the one created with <code>SystemHal::execute_critical</code>)</div>
``` |
aerugo | github_2023 | others | 39 | n7space | Glamhoth | @@ -22,19 +22,23 @@ pub trait SystemHal {
/// Type for system HAL error.
type Error;
+ /// Creates global HAL instance. Since there can only be a single instance of HAL, this function
+ /// should initialize it's global state and prepare the environment for hardware configuration.
+ fn create() -> Result<(), Self::Error>; | Maybe `initialize` then, if this initializes and not creates/returns anything? |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.