code
stringlengths 5
1.03M
| repo_name
stringlengths 5
90
| path
stringlengths 4
158
| license
stringclasses 15
values | size
int64 5
1.03M
| n_ast_errors
int64 0
53.9k
| ast_max_depth
int64 2
4.17k
| n_whitespaces
int64 0
365k
| n_ast_nodes
int64 3
317k
| n_ast_terminals
int64 1
171k
| n_ast_nonterminals
int64 1
146k
| loc
int64 -1
37.3k
| cycloplexity
int64 -1
1.31k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|
{-
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-}
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE NoImplicitPrelude #-}
module GHC.Float (module M) where
import "base" GHC.Float as M
|
xwysp/codeworld
|
codeworld-base/src/GHC/Float.hs
|
apache-2.0
| 733
| 0
| 4
| 136
| 23
| 17
| 6
| 4
| 0
|
{-# LANGUAGE CPP #-}
#ifndef MIN_VERSION_base
#define MIN_VERSION_base(x,y,z) 1
#endif
module Language.Lua.StringLiteral
( interpretStringLiteral
, constructStringLiteral
) where
import Data.Char (ord, chr, isNumber, isPrint, isAscii)
import Data.ByteString.Lazy (ByteString)
import qualified Data.ByteString.Lazy as B
import qualified Data.ByteString.Builder as B
import qualified Data.ByteString.Lazy.Char8 as B8
import Data.List (foldl')
import Data.Bits ((.&.),shiftR)
import Numeric (showHex)
#if !(MIN_VERSION_base(4,8,0))
import Data.Monoid (mempty, mappend, mconcat)
#endif
skipWS :: String -> String
skipWS (' ' : rest) = skipWS rest
skipWS ('\n' : rest) = skipWS rest
skipWS ('\r' : rest) = skipWS rest
skipWS ('\f' : rest) = skipWS rest
skipWS ('\t' : rest) = skipWS rest
skipWS ('\v' : rest) = skipWS rest
skipWS str = str
hexToInt :: Char -> Int
hexToInt c =
case c of
'A' -> 10
'a' -> 10
'B' -> 11
'b' -> 11
'C' -> 12
'c' -> 12
'D' -> 13
'd' -> 13
'E' -> 14
'e' -> 14
'F' -> 15
'f' -> 15
_ -> decToNum c
{-# INLINE decToNum #-}
decToNum :: Char -> Int
decToNum c = fromEnum c - fromEnum '0'
interpretStringLiteral :: String -> Maybe ByteString
interpretStringLiteral xxs =
case xxs of
'\'':xs -> Just (decodeEscapes (dropLast 1 xs))
'"':xs -> Just (decodeEscapes (dropLast 1 xs))
'[':xs -> removeLongQuotes xs
_ -> Nothing
-- | Long-quoted string literals have no escapes.
-- A leading newline on a long quoted string literal is ignored.
removeLongQuotes :: String -> Maybe ByteString
removeLongQuotes str =
case span (=='=') str of
(eqs,'[':'\n':xs) -> go (dropLast (2+length eqs) xs)
(eqs,'[': xs) -> go (dropLast (2+length eqs) xs)
_ -> Nothing
where
go = Just . B.toLazyByteString . mconcat . map encodeChar
dropLast :: Int -> [a] -> [a]
dropLast n xs = take (length xs - n) xs
decodeEscapes :: String -> ByteString
decodeEscapes = B.toLazyByteString . aux
where
aux xxs =
case xxs of
[] -> mempty
'\\' : 'x' : h1 : h2 : rest ->
B.word8 (fromIntegral (hexToInt h1 * 16 + hexToInt h2)) `mappend` aux rest
'\\' : 'u' : '{' : rest ->
case break (=='}') rest of
(ds,_:rest')
| code <= 0x10ffff -> encodeChar (chr code) `mappend` aux rest'
where code = foldl' (\acc d -> acc * 16 + hexToInt d) 0 ds
_ -> encodeChar '\xFFFD' `mappend` aux (dropWhile (/='}') rest)
'\\' : c1 : c2 : c3 : rest
| isNumber c1 && isNumber c2 && isNumber c3 ->
let code = decToNum c1 * 100 + decToNum c2 * 10 + decToNum c3
in B.word8 (fromIntegral code) `mappend` aux rest
'\\' : c1 : c2 : rest
| isNumber c1 && isNumber c2 ->
let code = decToNum c1 * 10 + decToNum c2
in B.word8 (fromIntegral code) `mappend` aux rest
'\\' : c1 : rest
| isNumber c1 -> B.word8 (fromIntegral (decToNum c1)) `mappend` aux rest
'\\' : 'a' : rest -> B.char8 '\a' `mappend` aux rest
'\\' : 'b' : rest -> B.char8 '\b' `mappend` aux rest
'\\' : 'f' : rest -> B.char8 '\f' `mappend` aux rest
'\\' : 'n' : rest -> B.char8 '\n' `mappend` aux rest
'\\' : '\n' : rest -> B.char8 '\n' `mappend` aux rest
'\\' : 'r' : rest -> B.char8 '\r' `mappend` aux rest
'\\' : 't' : rest -> B.char8 '\t' `mappend` aux rest
'\\' : 'v' : rest -> B.char8 '\v' `mappend` aux rest
'\\' : '\\' : rest -> B.char8 '\\' `mappend` aux rest
'\\' : '"' : rest -> B.char8 '"' `mappend` aux rest
'\\' : '\'' : rest -> B.char8 '\'' `mappend` aux rest
'\\' : 'z' : rest -> aux (skipWS rest)
c : rest -> encodeChar c `mappend` aux rest
-- | Convert a string literal body to string literal syntax
constructStringLiteral :: ByteString -> String
constructStringLiteral bs = '"' : aux 0
where
aux i
| i >= B.length bs = "\""
| otherwise =
case B8.index bs i of
'\a' -> '\\' : 'a' : aux (i+1)
'\b' -> '\\' : 'b' : aux (i+1)
'\f' -> '\\' : 'f' : aux (i+1)
'\n' -> '\\' : 'n' : aux (i+1)
'\r' -> '\\' : 'r' : aux (i+1)
'\t' -> '\\' : 't' : aux (i+1)
'\v' -> '\\' : 'v' : aux (i+1)
'\\' -> '\\' : '\\' : aux (i+1)
'\"' -> '\\' : '"' : aux (i+1)
x | isPrint x && isAscii x -> x : aux (i+1)
| x <= '\x0f' -> '\\' : 'x' : '0' : showHex (ord x) (aux (i+1))
| otherwise -> '\\' : 'x' : showHex (ord x) (aux (i+1))
encodeChar :: Char -> B.Builder
encodeChar c
| oc <= 0x7f = asByte oc
| oc <= 0x7ff = asByte (0xc0 + (oc `shiftR` 6))
`mappend` asByte (0x80 + oc .&. 0x3f)
| oc <= 0xffff = asByte (0xe0 + (oc `shiftR` 12))
`mappend` asByte (0x80 + ((oc `shiftR` 6) .&. 0x3f))
`mappend` asByte (0x80 + oc .&. 0x3f)
| otherwise = asByte (0xf0 + (oc `shiftR` 18))
`mappend` asByte (0x80 + ((oc `shiftR` 12) .&. 0x3f))
`mappend` asByte (0x80 + ((oc `shiftR` 6) .&. 0x3f))
`mappend` asByte (0x80 + oc .&. 0x3f)
where
asByte = B.word8 . fromIntegral
oc = ord c
|
osa1/language-lua
|
src/Language/Lua/StringLiteral.hs
|
bsd-3-clause
| 5,289
| 0
| 19
| 1,587
| 2,206
| 1,132
| 1,074
| 123
| 20
|
{-
(c) The University of Glasgow, 1994-2006
Core pass to saturate constructors and PrimOps
-}
{-# LANGUAGE BangPatterns, CPP, MultiWayIf #-}
module CorePrep (
corePrepPgm, corePrepExpr, cvtLitInteger,
lookupMkIntegerName, lookupIntegerSDataConName
) where
#include "HsVersions.h"
import OccurAnal
import HscTypes
import PrelNames
import MkId ( realWorldPrimId )
import CoreUtils
import CoreArity
import CoreFVs
import CoreMonad ( CoreToDo(..) )
import CoreLint ( endPassIO )
import CoreSyn
import CoreSubst
import MkCore hiding( FloatBind(..) ) -- We use our own FloatBind here
import Type
import Literal
import Coercion
import TcEnv
import TyCon
import Demand
import Var
import VarSet
import VarEnv
import Id
import IdInfo
import TysWiredIn
import DataCon
import PrimOp
import BasicTypes
import Module
import UniqSupply
import Maybes
import OrdList
import ErrUtils
import DynFlags
import Util
import Pair
import Outputable
import Platform
import FastString
import Config
import Name ( NamedThing(..), nameSrcSpan )
import SrcLoc ( SrcSpan(..), realSrcLocSpan, mkRealSrcLoc )
import Data.Bits
import MonadUtils ( mapAccumLM )
import Data.List ( mapAccumL )
import Control.Monad
{-
-- ---------------------------------------------------------------------------
-- Overview
-- ---------------------------------------------------------------------------
The goal of this pass is to prepare for code generation.
1. Saturate constructor and primop applications.
2. Convert to A-normal form; that is, function arguments
are always variables.
* Use case for strict arguments:
f E ==> case E of x -> f x
(where f is strict)
* Use let for non-trivial lazy arguments
f E ==> let x = E in f x
(were f is lazy and x is non-trivial)
3. Similarly, convert any unboxed lets into cases.
[I'm experimenting with leaving 'ok-for-speculation'
rhss in let-form right up to this point.]
4. Ensure that *value* lambdas only occur as the RHS of a binding
(The code generator can't deal with anything else.)
Type lambdas are ok, however, because the code gen discards them.
5. [Not any more; nuked Jun 2002] Do the seq/par munging.
6. Clone all local Ids.
This means that all such Ids are unique, rather than the
weaker guarantee of no clashes which the simplifier provides.
And that is what the code generator needs.
We don't clone TyVars or CoVars. The code gen doesn't need that,
and doing so would be tiresome because then we'd need
to substitute in types and coercions.
7. Give each dynamic CCall occurrence a fresh unique; this is
rather like the cloning step above.
8. Inject bindings for the "implicit" Ids:
* Constructor wrappers
* Constructor workers
We want curried definitions for all of these in case they
aren't inlined by some caller.
9. Replace (lazy e) by e. See Note [lazyId magic] in MkId.hs
Also replace (noinline e) by e.
10. Convert (LitInteger i t) into the core representation
for the Integer i. Normally this uses mkInteger, but if
we are using the integer-gmp implementation then there is a
special case where we use the S# constructor for Integers that
are in the range of Int.
11. Uphold tick consistency while doing this: We move ticks out of
(non-type) applications where we can, and make sure that we
annotate according to scoping rules when floating.
This is all done modulo type applications and abstractions, so that
when type erasure is done for conversion to STG, we don't end up with
any trivial or useless bindings.
Note [CorePrep invariants]
~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is the syntax of the Core produced by CorePrep:
Trivial expressions
arg ::= lit | var
| arg ty | /\a. arg
| truv co | /\c. arg | arg |> co
Applications
app ::= lit | var | app arg | app ty | app co | app |> co
Expressions
body ::= app
| let(rec) x = rhs in body -- Boxed only
| case body of pat -> body
| /\a. body | /\c. body
| body |> co
Right hand sides (only place where value lambdas can occur)
rhs ::= /\a.rhs | \x.rhs | body
We define a synonym for each of these non-terminals. Functions
with the corresponding name produce a result in that syntax.
-}
type CpeArg = CoreExpr -- Non-terminal 'arg'
type CpeApp = CoreExpr -- Non-terminal 'app'
type CpeBody = CoreExpr -- Non-terminal 'body'
type CpeRhs = CoreExpr -- Non-terminal 'rhs'
{-
************************************************************************
* *
Top level stuff
* *
************************************************************************
-}
corePrepPgm :: HscEnv -> Module -> ModLocation -> CoreProgram -> [TyCon]
-> IO CoreProgram
corePrepPgm hsc_env this_mod mod_loc binds data_tycons =
withTiming (pure dflags)
(text "CorePrep"<+>brackets (ppr this_mod))
(const ()) $ do
us <- mkSplitUniqSupply 's'
initialCorePrepEnv <- mkInitialCorePrepEnv dflags hsc_env
let implicit_binds = mkDataConWorkers dflags mod_loc data_tycons
-- NB: we must feed mkImplicitBinds through corePrep too
-- so that they are suitably cloned and eta-expanded
binds_out = initUs_ us $ do
floats1 <- corePrepTopBinds initialCorePrepEnv binds
floats2 <- corePrepTopBinds initialCorePrepEnv implicit_binds
return (deFloatTop (floats1 `appendFloats` floats2))
endPassIO hsc_env alwaysQualify CorePrep binds_out []
return binds_out
where
dflags = hsc_dflags hsc_env
corePrepExpr :: DynFlags -> HscEnv -> CoreExpr -> IO CoreExpr
corePrepExpr dflags hsc_env expr =
withTiming (pure dflags) (text "CorePrep [expr]") (const ()) $ do
us <- mkSplitUniqSupply 's'
initialCorePrepEnv <- mkInitialCorePrepEnv dflags hsc_env
let new_expr = initUs_ us (cpeBodyNF initialCorePrepEnv expr)
dumpIfSet_dyn dflags Opt_D_dump_prep "CorePrep" (ppr new_expr)
return new_expr
corePrepTopBinds :: CorePrepEnv -> [CoreBind] -> UniqSM Floats
-- Note [Floating out of top level bindings]
corePrepTopBinds initialCorePrepEnv binds
= go initialCorePrepEnv binds
where
go _ [] = return emptyFloats
go env (bind : binds) = do (env', bind') <- cpeBind TopLevel env bind
binds' <- go env' binds
return (bind' `appendFloats` binds')
mkDataConWorkers :: DynFlags -> ModLocation -> [TyCon] -> [CoreBind]
-- See Note [Data constructor workers]
-- c.f. Note [Injecting implicit bindings] in TidyPgm
mkDataConWorkers dflags mod_loc data_tycons
= [ NonRec id (tick_it (getName data_con) (Var id))
-- The ice is thin here, but it works
| tycon <- data_tycons, -- CorePrep will eta-expand it
data_con <- tyConDataCons tycon,
let id = dataConWorkId data_con
]
where
-- If we want to generate debug info, we put a source note on the
-- worker. This is useful, especially for heap profiling.
tick_it name
| debugLevel dflags == 0 = id
| RealSrcSpan span <- nameSrcSpan name = tick span
| Just file <- ml_hs_file mod_loc = tick (span1 file)
| otherwise = tick (span1 "???")
where tick span = Tick (SourceNote span $ showSDoc dflags (ppr name))
span1 file = realSrcLocSpan $ mkRealSrcLoc (mkFastString file) 1 1
{-
Note [Floating out of top level bindings]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NB: we do need to float out of top-level bindings
Consider x = length [True,False]
We want to get
s1 = False : []
s2 = True : s1
x = length s2
We return a *list* of bindings, because we may start with
x* = f (g y)
where x is demanded, in which case we want to finish with
a = g y
x* = f a
And then x will actually end up case-bound
Note [CafInfo and floating]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
What happens when we try to float bindings to the top level? At this
point all the CafInfo is supposed to be correct, and we must make certain
that is true of the new top-level bindings. There are two cases
to consider
a) The top-level binding is marked asCafRefs. In that case we are
basically fine. The floated bindings had better all be lazy lets,
so they can float to top level, but they'll all have HasCafRefs
(the default) which is safe.
b) The top-level binding is marked NoCafRefs. This really happens
Example. CoreTidy produces
$fApplicativeSTM [NoCafRefs] = D:Alternative retry# ...blah...
Now CorePrep has to eta-expand to
$fApplicativeSTM = let sat = \xy. retry x y
in D:Alternative sat ...blah...
So what we *want* is
sat [NoCafRefs] = \xy. retry x y
$fApplicativeSTM [NoCafRefs] = D:Alternative sat ...blah...
So, gruesomely, we must set the NoCafRefs flag on the sat bindings,
*and* substutite the modified 'sat' into the old RHS.
It should be the case that 'sat' is itself [NoCafRefs] (a value, no
cafs) else the original top-level binding would not itself have been
marked [NoCafRefs]. The DEBUG check in CoreToStg for
consistentCafInfo will find this.
This is all very gruesome and horrible. It would be better to figure
out CafInfo later, after CorePrep. We'll do that in due course.
Meanwhile this horrible hack works.
Note [Data constructor workers]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create any necessary "implicit" bindings for data con workers. We
create the rather strange (non-recursive!) binding
$wC = \x y -> $wC x y
i.e. a curried constructor that allocates. This means that we can
treat the worker for a constructor like any other function in the rest
of the compiler. The point here is that CoreToStg will generate a
StgConApp for the RHS, rather than a call to the worker (which would
give a loop). As Lennart says: the ice is thin here, but it works.
Hmm. Should we create bindings for dictionary constructors? They are
always fully applied, and the bindings are just there to support
partial applications. But it's easier to let them through.
Note [Dead code in CorePrep]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Imagine that we got an input program like this (see Trac #4962):
f :: Show b => Int -> (Int, b -> Maybe Int -> Int)
f x = (g True (Just x) + g () (Just x), g)
where
g :: Show a => a -> Maybe Int -> Int
g _ Nothing = x
g y (Just z) = if z > 100 then g y (Just (z + length (show y))) else g y unknown
After specialisation and SpecConstr, we would get something like this:
f :: Show b => Int -> (Int, b -> Maybe Int -> Int)
f x = (g$Bool_True_Just x + g$Unit_Unit_Just x, g)
where
{-# RULES g $dBool = g$Bool
g $dUnit = g$Unit #-}
g = ...
{-# RULES forall x. g$Bool True (Just x) = g$Bool_True_Just x #-}
g$Bool = ...
{-# RULES forall x. g$Unit () (Just x) = g$Unit_Unit_Just x #-}
g$Unit = ...
g$Bool_True_Just = ...
g$Unit_Unit_Just = ...
Note that the g$Bool and g$Unit functions are actually dead code: they
are only kept alive by the occurrence analyser because they are
referred to by the rules of g, which is being kept alive by the fact
that it is used (unspecialised) in the returned pair.
However, at the CorePrep stage there is no way that the rules for g
will ever fire, and it really seems like a shame to produce an output
program that goes to the trouble of allocating a closure for the
unreachable g$Bool and g$Unit functions.
The way we fix this is to:
* In cloneBndr, drop all unfoldings/rules
* In deFloatTop, run a simple dead code analyser on each top-level
RHS to drop the dead local bindings. For that call to OccAnal, we
disable the binder swap, else the occurrence analyser sometimes
introduces new let bindings for cased binders, which lead to the bug
in #5433.
The reason we don't just OccAnal the whole output of CorePrep is that
the tidier ensures that all top-level binders are GlobalIds, so they
don't show up in the free variables any longer. So if you run the
occurrence analyser on the output of CoreTidy (or later) you e.g. turn
this program:
Rec {
f = ... f ...
}
Into this one:
f = ... f ...
(Since f is not considered to be free in its own RHS.)
************************************************************************
* *
The main code
* *
************************************************************************
-}
cpeBind :: TopLevelFlag -> CorePrepEnv -> CoreBind
-> UniqSM (CorePrepEnv, Floats)
cpeBind top_lvl env (NonRec bndr rhs)
= do { (_, bndr1) <- cpCloneBndr env bndr
; let dmd = idDemandInfo bndr
is_unlifted = isUnliftedType (idType bndr)
; (floats, bndr2, rhs2) <- cpePair top_lvl NonRecursive
dmd
is_unlifted
env bndr1 rhs
-- See Note [Inlining in CorePrep]
; if exprIsTrivial rhs2 && isNotTopLevel top_lvl
then return (extendCorePrepEnvExpr env bndr rhs2, floats)
else do {
; let new_float = mkFloat dmd is_unlifted bndr2 rhs2
-- We want bndr'' in the envt, because it records
-- the evaluated-ness of the binder
; return (extendCorePrepEnv env bndr bndr2,
addFloat floats new_float) }}
cpeBind top_lvl env (Rec pairs)
= do { let (bndrs,rhss) = unzip pairs
; (env', bndrs1) <- cpCloneBndrs env (map fst pairs)
; stuff <- zipWithM (cpePair top_lvl Recursive topDmd False env') bndrs1 rhss
; let (floats_s, bndrs2, rhss2) = unzip3 stuff
all_pairs = foldrOL add_float (bndrs2 `zip` rhss2)
(concatFloats floats_s)
; return (extendCorePrepEnvList env (bndrs `zip` bndrs2),
unitFloat (FloatLet (Rec all_pairs))) }
where
-- Flatten all the floats, and the currrent
-- group into a single giant Rec
add_float (FloatLet (NonRec b r)) prs2 = (b,r) : prs2
add_float (FloatLet (Rec prs1)) prs2 = prs1 ++ prs2
add_float b _ = pprPanic "cpeBind" (ppr b)
---------------
cpePair :: TopLevelFlag -> RecFlag -> Demand -> Bool
-> CorePrepEnv -> Id -> CoreExpr
-> UniqSM (Floats, Id, CpeRhs)
-- Used for all bindings
cpePair top_lvl is_rec dmd is_unlifted env bndr rhs
= do { (floats1, rhs1) <- cpeRhsE env rhs
-- See if we are allowed to float this stuff out of the RHS
; (floats2, rhs2) <- float_from_rhs floats1 rhs1
-- Make the arity match up
; (floats3, rhs3)
<- if manifestArity rhs1 <= arity
then return (floats2, cpeEtaExpand arity rhs2)
else WARN(True, text "CorePrep: silly extra arguments:" <+> ppr bndr)
-- Note [Silly extra arguments]
(do { v <- newVar (idType bndr)
; let float = mkFloat topDmd False v rhs2
; return ( addFloat floats2 float
, cpeEtaExpand arity (Var v)) })
-- Wrap floating ticks
; let (floats4, rhs4) = wrapTicks floats3 rhs3
-- Record if the binder is evaluated
-- and otherwise trim off the unfolding altogether
-- It's not used by the code generator; getting rid of it reduces
-- heap usage and, since we may be changing uniques, we'd have
-- to substitute to keep it right
; let bndr' | exprIsHNF rhs3 = bndr `setIdUnfolding` evaldUnfolding
| otherwise = bndr `setIdUnfolding` noUnfolding
; return (floats4, bndr', rhs4) }
where
platform = targetPlatform (cpe_dynFlags env)
arity = idArity bndr -- We must match this arity
---------------------
float_from_rhs floats rhs
| isEmptyFloats floats = return (emptyFloats, rhs)
| isTopLevel top_lvl = float_top floats rhs
| otherwise = float_nested floats rhs
---------------------
float_nested floats rhs
| wantFloatNested is_rec dmd is_unlifted floats rhs
= return (floats, rhs)
| otherwise = dontFloat floats rhs
---------------------
float_top floats rhs -- Urhgh! See Note [CafInfo and floating]
| mayHaveCafRefs (idCafInfo bndr)
, allLazyTop floats
= return (floats, rhs)
-- So the top-level binding is marked NoCafRefs
| Just (floats', rhs') <- canFloatFromNoCaf platform floats rhs
= return (floats', rhs')
| otherwise
= dontFloat floats rhs
dontFloat :: Floats -> CpeRhs -> UniqSM (Floats, CpeBody)
-- Non-empty floats, but do not want to float from rhs
-- So wrap the rhs in the floats
-- But: rhs1 might have lambdas, and we can't
-- put them inside a wrapBinds
dontFloat floats1 rhs
= do { (floats2, body) <- rhsToBody rhs
; return (emptyFloats, wrapBinds floats1 $
wrapBinds floats2 body) }
{- Note [Silly extra arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we had this
f{arity=1} = \x\y. e
We *must* match the arity on the Id, so we have to generate
f' = \x\y. e
f = \x. f' x
It's a bizarre case: why is the arity on the Id wrong? Reason
(in the days of __inline_me__):
f{arity=0} = __inline_me__ (let v = expensive in \xy. e)
When InlineMe notes go away this won't happen any more. But
it seems good for CorePrep to be robust.
-}
-- ---------------------------------------------------------------------------
-- CpeRhs: produces a result satisfying CpeRhs
-- ---------------------------------------------------------------------------
cpeRhsE :: CorePrepEnv -> CoreExpr -> UniqSM (Floats, CpeRhs)
-- If
-- e ===> (bs, e')
-- then
-- e = let bs in e' (semantically, that is!)
--
-- For example
-- f (g x) ===> ([v = g x], f v)
cpeRhsE _env expr@(Type {}) = return (emptyFloats, expr)
cpeRhsE _env expr@(Coercion {}) = return (emptyFloats, expr)
cpeRhsE env (Lit (LitInteger i _))
= cpeRhsE env (cvtLitInteger (cpe_dynFlags env) (getMkIntegerId env)
(cpe_integerSDataCon env) i)
cpeRhsE _env expr@(Lit {}) = return (emptyFloats, expr)
cpeRhsE env expr@(Var {}) = cpeApp env expr
cpeRhsE env expr@(App {}) = cpeApp env expr
cpeRhsE env (Let bind expr)
= do { (env', new_binds) <- cpeBind NotTopLevel env bind
; (floats, body) <- cpeRhsE env' expr
; return (new_binds `appendFloats` floats, body) }
cpeRhsE env (Tick tickish expr)
| tickishPlace tickish == PlaceNonLam && tickish `tickishScopesLike` SoftScope
= do { (floats, body) <- cpeRhsE env expr
-- See [Floating Ticks in CorePrep]
; return (unitFloat (FloatTick tickish) `appendFloats` floats, body) }
| otherwise
= do { body <- cpeBodyNF env expr
; return (emptyFloats, mkTick tickish' body) }
where
tickish' | Breakpoint n fvs <- tickish
-- See also 'substTickish'
= Breakpoint n (map (getIdFromTrivialExpr . lookupCorePrepEnv env) fvs)
| otherwise
= tickish
cpeRhsE env (Cast expr co)
= do { (floats, expr') <- cpeRhsE env expr
; return (floats, Cast expr' co) }
cpeRhsE env expr@(Lam {})
= do { let (bndrs,body) = collectBinders expr
; (env', bndrs') <- cpCloneBndrs env bndrs
; body' <- cpeBodyNF env' body
; return (emptyFloats, mkLams bndrs' body') }
cpeRhsE env (Case scrut bndr ty alts)
= do { (floats, scrut') <- cpeBody env scrut
; let bndr1 = bndr `setIdUnfolding` evaldUnfolding
-- Record that the case binder is evaluated in the alternatives
; (env', bndr2) <- cpCloneBndr env bndr1
; alts' <- mapM (sat_alt env') alts
; return (floats, Case scrut' bndr2 ty alts') }
where
sat_alt env (con, bs, rhs)
= do { (env2, bs') <- cpCloneBndrs env bs
; rhs' <- cpeBodyNF env2 rhs
; return (con, bs', rhs') }
cvtLitInteger :: DynFlags -> Id -> Maybe DataCon -> Integer -> CoreExpr
-- Here we convert a literal Integer to the low-level
-- represenation. Exactly how we do this depends on the
-- library that implements Integer. If it's GMP we
-- use the S# data constructor for small literals.
-- See Note [Integer literals] in Literal
cvtLitInteger dflags _ (Just sdatacon) i
| inIntRange dflags i -- Special case for small integers
= mkConApp sdatacon [Lit (mkMachInt dflags i)]
cvtLitInteger dflags mk_integer _ i
= mkApps (Var mk_integer) [isNonNegative, ints]
where isNonNegative = if i < 0 then mkConApp falseDataCon []
else mkConApp trueDataCon []
ints = mkListExpr intTy (f (abs i))
f 0 = []
f x = let low = x .&. mask
high = x `shiftR` bits
in mkConApp intDataCon [Lit (mkMachInt dflags low)] : f high
bits = 31
mask = 2 ^ bits - 1
-- ---------------------------------------------------------------------------
-- CpeBody: produces a result satisfying CpeBody
-- ---------------------------------------------------------------------------
-- | Convert a 'CoreExpr' so it satisfies 'CpeBody', without
-- producing any floats (any generated floats are immediately
-- let-bound using 'wrapBinds'). Generally you want this, esp.
-- when you've reached a binding form (e.g., a lambda) and
-- floating any further would be incorrect.
cpeBodyNF :: CorePrepEnv -> CoreExpr -> UniqSM CpeBody
cpeBodyNF env expr
= do { (floats, body) <- cpeBody env expr
; return (wrapBinds floats body) }
-- | Convert a 'CoreExpr' so it satisfies 'CpeBody'; also produce
-- a list of 'Floats' which are being propagated upwards. In
-- fact, this function is used in only two cases: to
-- implement 'cpeBodyNF' (which is what you usually want),
-- and in the case when a let-binding is in a case scrutinee--here,
-- we can always float out:
--
-- case (let x = y in z) of ...
-- ==> let x = y in case z of ...
--
cpeBody :: CorePrepEnv -> CoreExpr -> UniqSM (Floats, CpeBody)
cpeBody env expr
= do { (floats1, rhs) <- cpeRhsE env expr
; (floats2, body) <- rhsToBody rhs
; return (floats1 `appendFloats` floats2, body) }
--------
rhsToBody :: CpeRhs -> UniqSM (Floats, CpeBody)
-- Remove top level lambdas by let-binding
rhsToBody (Tick t expr)
| tickishScoped t == NoScope -- only float out of non-scoped annotations
= do { (floats, expr') <- rhsToBody expr
; return (floats, mkTick t expr') }
rhsToBody (Cast e co)
-- You can get things like
-- case e of { p -> coerce t (\s -> ...) }
= do { (floats, e') <- rhsToBody e
; return (floats, Cast e' co) }
rhsToBody expr@(Lam {})
| Just no_lam_result <- tryEtaReducePrep bndrs body
= return (emptyFloats, no_lam_result)
| all isTyVar bndrs -- Type lambdas are ok
= return (emptyFloats, expr)
| otherwise -- Some value lambdas
= do { fn <- newVar (exprType expr)
; let rhs = cpeEtaExpand (exprArity expr) expr
float = FloatLet (NonRec fn rhs)
; return (unitFloat float, Var fn) }
where
(bndrs,body) = collectBinders expr
rhsToBody expr = return (emptyFloats, expr)
-- ---------------------------------------------------------------------------
-- CpeApp: produces a result satisfying CpeApp
-- ---------------------------------------------------------------------------
data ArgInfo = CpeApp CoreArg
| CpeCast Coercion
| CpeTick (Tickish Id)
{- Note [runRW arg]
~~~~~~~~~~~~~~~~~~~
If we got, say
runRW# (case bot of {})
which happened in Trac #11291, we do /not/ want to turn it into
(case bot of {}) realWorldPrimId#
because that gives a panic in CoreToStg.myCollectArgs, which expects
only variables in function position. But if we are sure to make
runRW# strict (which we do in MkId), this can't happen
-}
cpeApp :: CorePrepEnv -> CoreExpr -> UniqSM (Floats, CpeRhs)
-- May return a CpeRhs because of saturating primops
cpeApp top_env expr
= do { let (terminal, args, depth) = collect_args expr
; cpe_app top_env terminal args depth
}
where
-- We have a nested data structure of the form
-- e `App` a1 `App` a2 ... `App` an, convert it into
-- (e, [CpeApp a1, CpeApp a2, ..., CpeApp an], depth)
-- We use 'ArgInfo' because we may also need to
-- record casts and ticks. Depth counts the number
-- of arguments that would consume strictness information
-- (so, no type or coercion arguments.)
collect_args :: CoreExpr -> (CoreExpr, [ArgInfo], Int)
collect_args e = go e [] 0
where
go (App fun arg) as depth
= go fun (CpeApp arg : as)
(if isTyCoArg arg then depth else depth + 1)
go (Cast fun co) as depth
= go fun (CpeCast co : as) depth
go (Tick tickish fun) as depth
| tickishPlace tickish == PlaceNonLam
&& tickish `tickishScopesLike` SoftScope
= go fun (CpeTick tickish : as) depth
go terminal as depth = (terminal, as, depth)
cpe_app :: CorePrepEnv
-> CoreExpr
-> [ArgInfo]
-> Int
-> UniqSM (Floats, CpeRhs)
cpe_app env (Var f) (CpeApp Type{} : CpeApp arg : args) depth
| f `hasKey` lazyIdKey -- Replace (lazy a) with a, and
|| f `hasKey` noinlineIdKey -- Replace (noinline a) with a
-- Consider the code:
--
-- lazy (f x) y
--
-- We need to make sure that we need to recursively collect arguments on
-- "f x", otherwise we'll float "f x" out (it's not a variable) and
-- end up with this awful -ddump-prep:
--
-- case f x of f_x {
-- __DEFAULT -> f_x y
-- }
--
-- rather than the far superior "f x y". Test case is par01.
= let (terminal, args', depth') = collect_args arg
in cpe_app env terminal (args' ++ args) (depth + depth' - 1)
cpe_app env (Var f) [CpeApp _runtimeRep@Type{}, CpeApp _type@Type{}, CpeApp arg] 1
| f `hasKey` runRWKey
-- Replace (runRW# f) by (f realWorld#), beta reducing if possible (this
-- is why we return a CorePrepEnv as well)
= case arg of
Lam s body -> cpe_app (extendCorePrepEnv env s realWorldPrimId) body [] 0
_ -> cpe_app env arg [CpeApp (Var realWorldPrimId)] 1
cpe_app env (Var v) args depth
= do { v1 <- fiddleCCall v
; let e2 = lookupCorePrepEnv env v1
hd = getIdFromTrivialExpr_maybe e2
-- NB: depth from collect_args is right, because e2 is a trivial expression
-- and thus its embedded Id *must* be at the same depth as any
-- Apps it is under are type applications only (c.f.
-- exprIsTrivial). But note that we need the type of the
-- expression, not the id.
; (app, floats) <- rebuild_app args e2 (exprType e2) emptyFloats stricts
; mb_saturate hd app floats depth }
where
stricts = case idStrictness v of
StrictSig (DmdType _ demands _)
| listLengthCmp demands depth /= GT -> demands
-- length demands <= depth
| otherwise -> []
-- If depth < length demands, then we have too few args to
-- satisfy strictness info so we have to ignore all the
-- strictness info, e.g. + (error "urk")
-- Here, we can't evaluate the arg strictly, because this
-- partial application might be seq'd
-- We inlined into something that's not a var and has no args.
-- Bounce it back up to cpeRhsE.
cpe_app env fun [] _ = cpeRhsE env fun
-- N-variable fun, better let-bind it
cpe_app env fun args depth
= do { (fun_floats, fun') <- cpeArg env evalDmd fun ty
-- The evalDmd says that it's sure to be evaluated,
-- so we'll end up case-binding it
; (app, floats) <- rebuild_app args fun' ty fun_floats []
; mb_saturate Nothing app floats depth }
where
ty = exprType fun
-- Saturate if necessary
mb_saturate head app floats depth =
case head of
Just fn_id -> do { sat_app <- maybeSaturate fn_id app depth
; return (floats, sat_app) }
_other -> return (floats, app)
-- Deconstruct and rebuild the application, floating any non-atomic
-- arguments to the outside. We collect the type of the expression,
-- the head of the application, and the number of actual value arguments,
-- all of which are used to possibly saturate this application if it
-- has a constructor or primop at the head.
rebuild_app
:: [ArgInfo] -- The arguments (inner to outer)
-> CpeApp
-> Type
-> Floats
-> [Demand]
-> UniqSM (CpeApp, Floats)
rebuild_app [] app _ floats ss = do
MASSERT(null ss) -- make sure we used all the strictness info
return (app, floats)
rebuild_app (a : as) fun' fun_ty floats ss = case a of
CpeApp arg@(Type arg_ty) ->
rebuild_app as (App fun' arg) (piResultTy fun_ty arg_ty) floats ss
CpeApp arg@(Coercion {}) ->
rebuild_app as (App fun' arg) (funResultTy fun_ty) floats ss
CpeApp arg -> do
let (ss1, ss_rest) -- See Note [lazyId magic] in MkId
= case (ss, isLazyExpr arg) of
(_ : ss_rest, True) -> (topDmd, ss_rest)
(ss1 : ss_rest, False) -> (ss1, ss_rest)
([], _) -> (topDmd, [])
(arg_ty, res_ty) = expectJust "cpeBody:collect_args" $
splitFunTy_maybe fun_ty
(fs, arg') <- cpeArg top_env ss1 arg arg_ty
rebuild_app as (App fun' arg') res_ty (fs `appendFloats` floats) ss_rest
CpeCast co ->
let Pair _ty1 ty2 = coercionKind co
in rebuild_app as (Cast fun' co) ty2 floats ss
CpeTick tickish ->
-- See [Floating Ticks in CorePrep]
rebuild_app as fun' fun_ty (addFloat floats (FloatTick tickish)) ss
isLazyExpr :: CoreExpr -> Bool
-- See Note [lazyId magic] in MkId
isLazyExpr (Cast e _) = isLazyExpr e
isLazyExpr (Tick _ e) = isLazyExpr e
isLazyExpr (Var f `App` _ `App` _) = f `hasKey` lazyIdKey
isLazyExpr _ = False
-- ---------------------------------------------------------------------------
-- CpeArg: produces a result satisfying CpeArg
-- ---------------------------------------------------------------------------
{-
Note [ANF-ising literal string arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider a program like,
data Foo = Foo Addr#
foo = Foo "turtle"#
When we go to ANFise this we might think that we want to float the string
literal like we do any other non-trivial argument. This would look like,
foo = u\ [] case "turtle"# of s { __DEFAULT__ -> Foo s }
However, this 1) isn't necessary since strings are in a sense "trivial"; and 2)
wreaks havoc on the CAF annotations that we produce here since we the result
above is caffy since it is updateable. Ideally at some point in the future we
would like to just float the literal to the top level as suggested in #11312,
s = "turtle"#
foo = Foo s
However, until then we simply add a special case excluding literals from the
floating done by cpeArg.
-}
-- | Is an argument okay to CPE?
okCpeArg :: CoreExpr -> Bool
-- Don't float literals. See Note [ANF-ising literal string arguments].
okCpeArg (Lit _) = False
-- Do not eta expand a trivial argument
okCpeArg expr = not (exprIsTrivial expr)
-- This is where we arrange that a non-trivial argument is let-bound
cpeArg :: CorePrepEnv -> Demand
-> CoreArg -> Type -> UniqSM (Floats, CpeArg)
cpeArg env dmd arg arg_ty
= do { (floats1, arg1) <- cpeRhsE env arg -- arg1 can be a lambda
; (floats2, arg2) <- if want_float floats1 arg1
then return (floats1, arg1)
else dontFloat floats1 arg1
-- Else case: arg1 might have lambdas, and we can't
-- put them inside a wrapBinds
; if okCpeArg arg2
then do { v <- newVar arg_ty
; let arg3 = cpeEtaExpand (exprArity arg2) arg2
arg_float = mkFloat dmd is_unlifted v arg3
; return (addFloat floats2 arg_float, varToCoreExpr v) }
else return (floats2, arg2)
}
where
is_unlifted = isUnliftedType arg_ty
want_float = wantFloatNested NonRecursive dmd is_unlifted
{-
Note [Floating unlifted arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider C (let v* = expensive in v)
where the "*" indicates "will be demanded". Usually v will have been
inlined by now, but let's suppose it hasn't (see Trac #2756). Then we
do *not* want to get
let v* = expensive in C v
because that has different strictness. Hence the use of 'allLazy'.
(NB: the let v* turns into a FloatCase, in mkLocalNonRec.)
------------------------------------------------------------------------------
-- Building the saturated syntax
-- ---------------------------------------------------------------------------
maybeSaturate deals with saturating primops and constructors
The type is the type of the entire application
-}
maybeSaturate :: Id -> CpeApp -> Int -> UniqSM CpeRhs
maybeSaturate fn expr n_args
| Just DataToTagOp <- isPrimOpId_maybe fn -- DataToTag must have an evaluated arg
-- A gruesome special case
= saturateDataToTag sat_expr
| hasNoBinding fn -- There's no binding
= return sat_expr
| otherwise
= return expr
where
fn_arity = idArity fn
excess_arity = fn_arity - n_args
sat_expr = cpeEtaExpand excess_arity expr
-------------
saturateDataToTag :: CpeApp -> UniqSM CpeApp
-- See Note [dataToTag magic]
saturateDataToTag sat_expr
= do { let (eta_bndrs, eta_body) = collectBinders sat_expr
; eta_body' <- eval_data2tag_arg eta_body
; return (mkLams eta_bndrs eta_body') }
where
eval_data2tag_arg :: CpeApp -> UniqSM CpeBody
eval_data2tag_arg app@(fun `App` arg)
| exprIsHNF arg -- Includes nullary constructors
= return app -- The arg is evaluated
| otherwise -- Arg not evaluated, so evaluate it
= do { arg_id <- newVar (exprType arg)
; let arg_id1 = setIdUnfolding arg_id evaldUnfolding
; return (Case arg arg_id1 (exprType app)
[(DEFAULT, [], fun `App` Var arg_id1)]) }
eval_data2tag_arg (Tick t app) -- Scc notes can appear
= do { app' <- eval_data2tag_arg app
; return (Tick t app') }
eval_data2tag_arg other -- Should not happen
= pprPanic "eval_data2tag" (ppr other)
{-
Note [dataToTag magic]
~~~~~~~~~~~~~~~~~~~~~~
Horrid: we must ensure that the arg of data2TagOp is evaluated
(data2tag x) --> (case x of y -> data2tag y)
(yuk yuk) take into account the lambdas we've now introduced
How might it not be evaluated? Well, we might have floated it out
of the scope of a `seq`, or dropped the `seq` altogether.
************************************************************************
* *
Simple CoreSyn operations
* *
************************************************************************
-}
{-
-- -----------------------------------------------------------------------------
-- Eta reduction
-- -----------------------------------------------------------------------------
Note [Eta expansion]
~~~~~~~~~~~~~~~~~~~~~
Eta expand to match the arity claimed by the binder Remember,
CorePrep must not change arity
Eta expansion might not have happened already, because it is done by
the simplifier only when there at least one lambda already.
NB1:we could refrain when the RHS is trivial (which can happen
for exported things). This would reduce the amount of code
generated (a little) and make things a little words for
code compiled without -O. The case in point is data constructor
wrappers.
NB2: we have to be careful that the result of etaExpand doesn't
invalidate any of the assumptions that CorePrep is attempting
to establish. One possible cause is eta expanding inside of
an SCC note - we're now careful in etaExpand to make sure the
SCC is pushed inside any new lambdas that are generated.
Note [Eta expansion and the CorePrep invariants]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It turns out to be much much easier to do eta expansion
*after* the main CorePrep stuff. But that places constraints
on the eta expander: given a CpeRhs, it must return a CpeRhs.
For example here is what we do not want:
f = /\a -> g (h 3) -- h has arity 2
After ANFing we get
f = /\a -> let s = h 3 in g s
and now we do NOT want eta expansion to give
f = /\a -> \ y -> (let s = h 3 in g s) y
Instead CoreArity.etaExpand gives
f = /\a -> \y -> let s = h 3 in g s y
-}
cpeEtaExpand :: Arity -> CpeRhs -> CpeRhs
cpeEtaExpand arity expr
| arity == 0 = expr
| otherwise = etaExpand arity expr
{-
-- -----------------------------------------------------------------------------
-- Eta reduction
-- -----------------------------------------------------------------------------
Why try eta reduction? Hasn't the simplifier already done eta?
But the simplifier only eta reduces if that leaves something
trivial (like f, or f Int). But for deLam it would be enough to
get to a partial application:
case x of { p -> \xs. map f xs }
==> case x of { p -> map f }
-}
tryEtaReducePrep :: [CoreBndr] -> CoreExpr -> Maybe CoreExpr
tryEtaReducePrep bndrs expr@(App _ _)
| ok_to_eta_reduce f
, n_remaining >= 0
, and (zipWith ok bndrs last_args)
, not (any (`elemVarSet` fvs_remaining) bndrs)
, exprIsHNF remaining_expr -- Don't turn value into a non-value
-- else the behaviour with 'seq' changes
= Just remaining_expr
where
(f, args) = collectArgs expr
remaining_expr = mkApps f remaining_args
fvs_remaining = exprFreeVars remaining_expr
(remaining_args, last_args) = splitAt n_remaining args
n_remaining = length args - length bndrs
ok bndr (Var arg) = bndr == arg
ok _ _ = False
-- We can't eta reduce something which must be saturated.
ok_to_eta_reduce (Var f) = not (hasNoBinding f)
ok_to_eta_reduce _ = False -- Safe. ToDo: generalise
tryEtaReducePrep bndrs (Let bind@(NonRec _ r) body)
| not (any (`elemVarSet` fvs) bndrs)
= case tryEtaReducePrep bndrs body of
Just e -> Just (Let bind e)
Nothing -> Nothing
where
fvs = exprFreeVars r
-- NB: do not attempt to eta-reduce across ticks
-- Otherwise we risk reducing
-- \x. (Tick (Breakpoint {x}) f x)
-- ==> Tick (breakpoint {x}) f
-- which is bogus (Trac #17228)
-- tryEtaReducePrep bndrs (Tick tickish e)
-- = fmap (mkTick tickish) $ tryEtaReducePrep bndrs e
tryEtaReducePrep _ _ = Nothing
{-
************************************************************************
* *
Floats
* *
************************************************************************
Note [Pin demand info on floats]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We pin demand info on floated lets so that we can see the one-shot thunks.
-}
data FloatingBind
= FloatLet CoreBind -- Rhs of bindings are CpeRhss
-- They are always of lifted type;
-- unlifted ones are done with FloatCase
| FloatCase
Id CpeBody
Bool -- The bool indicates "ok-for-speculation"
-- | See Note [Floating Ticks in CorePrep]
| FloatTick (Tickish Id)
data Floats = Floats OkToSpec (OrdList FloatingBind)
instance Outputable FloatingBind where
ppr (FloatLet b) = ppr b
ppr (FloatCase b r ok) = brackets (ppr ok) <+> ppr b <+> equals <+> ppr r
ppr (FloatTick t) = ppr t
instance Outputable Floats where
ppr (Floats flag fs) = text "Floats" <> brackets (ppr flag) <+>
braces (vcat (map ppr (fromOL fs)))
instance Outputable OkToSpec where
ppr OkToSpec = text "OkToSpec"
ppr IfUnboxedOk = text "IfUnboxedOk"
ppr NotOkToSpec = text "NotOkToSpec"
-- Can we float these binds out of the rhs of a let? We cache this decision
-- to avoid having to recompute it in a non-linear way when there are
-- deeply nested lets.
data OkToSpec
= OkToSpec -- Lazy bindings of lifted type
| IfUnboxedOk -- A mixture of lazy lifted bindings and n
-- ok-to-speculate unlifted bindings
| NotOkToSpec -- Some not-ok-to-speculate unlifted bindings
mkFloat :: Demand -> Bool -> Id -> CpeRhs -> FloatingBind
mkFloat dmd is_unlifted bndr rhs
| use_case = FloatCase bndr rhs (exprOkForSpeculation rhs)
| is_hnf = FloatLet (NonRec bndr rhs)
| otherwise = FloatLet (NonRec (setIdDemandInfo bndr dmd) rhs)
-- See Note [Pin demand info on floats]
where
is_hnf = exprIsHNF rhs
is_strict = isStrictDmd dmd
use_case = is_unlifted || is_strict && not is_hnf
-- Don't make a case for a value binding,
-- even if it's strict. Otherwise we get
-- case (\x -> e) of ...!
emptyFloats :: Floats
emptyFloats = Floats OkToSpec nilOL
isEmptyFloats :: Floats -> Bool
isEmptyFloats (Floats _ bs) = isNilOL bs
wrapBinds :: Floats -> CpeBody -> CpeBody
wrapBinds (Floats _ binds) body
= foldrOL mk_bind body binds
where
mk_bind (FloatCase bndr rhs _) body = Case rhs bndr (exprType body) [(DEFAULT, [], body)]
mk_bind (FloatLet bind) body = Let bind body
mk_bind (FloatTick tickish) body = mkTick tickish body
addFloat :: Floats -> FloatingBind -> Floats
addFloat (Floats ok_to_spec floats) new_float
= Floats (combine ok_to_spec (check new_float)) (floats `snocOL` new_float)
where
check (FloatLet _) = OkToSpec
check (FloatCase _ _ ok_for_spec)
| ok_for_spec = IfUnboxedOk
| otherwise = NotOkToSpec
check FloatTick{} = OkToSpec
-- The ok-for-speculation flag says that it's safe to
-- float this Case out of a let, and thereby do it more eagerly
-- We need the top-level flag because it's never ok to float
-- an unboxed binding to the top level
unitFloat :: FloatingBind -> Floats
unitFloat = addFloat emptyFloats
appendFloats :: Floats -> Floats -> Floats
appendFloats (Floats spec1 floats1) (Floats spec2 floats2)
= Floats (combine spec1 spec2) (floats1 `appOL` floats2)
concatFloats :: [Floats] -> OrdList FloatingBind
concatFloats = foldr (\ (Floats _ bs1) bs2 -> appOL bs1 bs2) nilOL
combine :: OkToSpec -> OkToSpec -> OkToSpec
combine NotOkToSpec _ = NotOkToSpec
combine _ NotOkToSpec = NotOkToSpec
combine IfUnboxedOk _ = IfUnboxedOk
combine _ IfUnboxedOk = IfUnboxedOk
combine _ _ = OkToSpec
deFloatTop :: Floats -> [CoreBind]
-- For top level only; we don't expect any FloatCases
deFloatTop (Floats _ floats)
= foldrOL get [] floats
where
get (FloatLet b) bs = occurAnalyseRHSs b : bs
get b _ = pprPanic "corePrepPgm" (ppr b)
-- See Note [Dead code in CorePrep]
occurAnalyseRHSs (NonRec x e) = NonRec x (occurAnalyseExpr_NoBinderSwap e)
occurAnalyseRHSs (Rec xes) = Rec [(x, occurAnalyseExpr_NoBinderSwap e) | (x, e) <- xes]
---------------------------------------------------------------------------
canFloatFromNoCaf :: Platform -> Floats -> CpeRhs -> Maybe (Floats, CpeRhs)
-- Note [CafInfo and floating]
canFloatFromNoCaf platform (Floats ok_to_spec fs) rhs
| OkToSpec <- ok_to_spec -- Worth trying
, Just (subst, fs') <- go (emptySubst, nilOL) (fromOL fs)
= Just (Floats OkToSpec fs', subst_expr subst rhs)
| otherwise
= Nothing
where
subst_expr = substExpr (text "CorePrep")
go :: (Subst, OrdList FloatingBind) -> [FloatingBind]
-> Maybe (Subst, OrdList FloatingBind)
go (subst, fbs_out) [] = Just (subst, fbs_out)
go (subst, fbs_out) (FloatLet (NonRec b r) : fbs_in)
| rhs_ok r
= go (subst', fbs_out `snocOL` new_fb) fbs_in
where
(subst', b') = set_nocaf_bndr subst b
new_fb = FloatLet (NonRec b' (subst_expr subst r))
go (subst, fbs_out) (FloatLet (Rec prs) : fbs_in)
| all rhs_ok rs
= go (subst', fbs_out `snocOL` new_fb) fbs_in
where
(bs,rs) = unzip prs
(subst', bs') = mapAccumL set_nocaf_bndr subst bs
rs' = map (subst_expr subst') rs
new_fb = FloatLet (Rec (bs' `zip` rs'))
go (subst, fbs_out) (ft@FloatTick{} : fbs_in)
= go (subst, fbs_out `snocOL` ft) fbs_in
go _ _ = Nothing -- Encountered a caffy binding
------------
set_nocaf_bndr subst bndr
= (extendIdSubst subst bndr (Var bndr'), bndr')
where
bndr' = bndr `setIdCafInfo` NoCafRefs
------------
rhs_ok :: CoreExpr -> Bool
-- We can only float to top level from a NoCaf thing if
-- the new binding is static. However it can't mention
-- any non-static things or it would *already* be Caffy
rhs_ok = rhsIsStatic platform (\_ -> False)
(\i -> pprPanic "rhsIsStatic" (integer i))
-- Integer literals should not show up
wantFloatNested :: RecFlag -> Demand -> Bool -> Floats -> CpeRhs -> Bool
wantFloatNested is_rec dmd is_unlifted floats rhs
= isEmptyFloats floats
|| isStrictDmd dmd
|| is_unlifted
|| (allLazyNested is_rec floats && exprIsHNF rhs)
-- Why the test for allLazyNested?
-- v = f (x `divInt#` y)
-- we don't want to float the case, even if f has arity 2,
-- because floating the case would make it evaluated too early
allLazyTop :: Floats -> Bool
allLazyTop (Floats OkToSpec _) = True
allLazyTop _ = False
allLazyNested :: RecFlag -> Floats -> Bool
allLazyNested _ (Floats OkToSpec _) = True
allLazyNested _ (Floats NotOkToSpec _) = False
allLazyNested is_rec (Floats IfUnboxedOk _) = isNonRec is_rec
{-
************************************************************************
* *
Cloning
* *
************************************************************************
-}
-- ---------------------------------------------------------------------------
-- The environment
-- ---------------------------------------------------------------------------
-- Note [Inlining in CorePrep]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- There is a subtle but important invariant that must be upheld in the output
-- of CorePrep: there are no "trivial" updatable thunks. Thus, this Core
-- is impermissible:
--
-- let x :: ()
-- x = y
--
-- (where y is a reference to a GLOBAL variable). Thunks like this are silly:
-- they can always be profitably replaced by inlining x with y. Consequently,
-- the code generator/runtime does not bother implementing this properly
-- (specifically, there is no implementation of stg_ap_0_upd_info, which is the
-- stack frame that would be used to update this thunk. The "0" means it has
-- zero free variables.)
--
-- In general, the inliner is good at eliminating these let-bindings. However,
-- there is one case where these trivial updatable thunks can arise: when
-- we are optimizing away 'lazy' (see Note [lazyId magic], and also
-- 'cpeRhsE'.) Then, we could have started with:
--
-- let x :: ()
-- x = lazy @ () y
--
-- which is a perfectly fine, non-trivial thunk, but then CorePrep will
-- drop 'lazy', giving us 'x = y' which is trivial and impermissible.
-- The solution is CorePrep to have a miniature inlining pass which deals
-- with cases like this. We can then drop the let-binding altogether.
--
-- Why does the removal of 'lazy' have to occur in CorePrep?
-- The gory details are in Note [lazyId magic] in MkId, but the
-- main reason is that lazy must appear in unfoldings (optimizer
-- output) and it must prevent call-by-value for catch# (which
-- is implemented by CorePrep.)
--
-- An alternate strategy for solving this problem is to have the
-- inliner treat 'lazy e' as a trivial expression if 'e' is trivial.
-- We decided not to adopt this solution to keep the definition
-- of 'exprIsTrivial' simple.
--
-- There is ONE caveat however: for top-level bindings we have
-- to preserve the binding so that we float the (hacky) non-recursive
-- binding for data constructors; see Note [Data constructor workers].
--
-- Note [CorePrep inlines trivial CoreExpr not Id]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- Why does cpe_env need to be an IdEnv CoreExpr, as opposed to an
-- IdEnv Id? Naively, we might conjecture that trivial updatable thunks
-- as per Note [Inlining in CorePrep] always have the form
-- 'lazy @ SomeType gbl_id'. But this is not true: the following is
-- perfectly reasonable Core:
--
-- let x :: ()
-- x = lazy @ (forall a. a) y @ Bool
--
-- When we inline 'x' after eliminating 'lazy', we need to replace
-- occurences of 'x' with 'y @ bool', not just 'y'. Situations like
-- this can easily arise with higher-rank types; thus, cpe_env must
-- map to CoreExprs, not Ids.
data CorePrepEnv
= CPE { cpe_dynFlags :: DynFlags
, cpe_env :: IdEnv CoreExpr -- Clone local Ids
-- ^ This environment is used for three operations:
--
-- 1. To support cloning of local Ids so that they are
-- all unique (see item (6) of CorePrep overview).
--
-- 2. To support beta-reduction of runRW, see
-- Note [runRW magic] and Note [runRW arg].
--
-- 3. To let us inline trivial RHSs of non top-level let-bindings,
-- see Note [lazyId magic], Note [Inlining in CorePrep]
-- and Note [CorePrep inlines trivial CoreExpr not Id] (#12076)
, cpe_mkIntegerId :: Id
, cpe_integerSDataCon :: Maybe DataCon
}
lookupMkIntegerName :: DynFlags -> HscEnv -> IO Id
lookupMkIntegerName dflags hsc_env
= guardIntegerUse dflags $ liftM tyThingId $
lookupGlobal hsc_env mkIntegerName
lookupIntegerSDataConName :: DynFlags -> HscEnv -> IO (Maybe DataCon)
lookupIntegerSDataConName dflags hsc_env = case cIntegerLibraryType of
IntegerGMP -> guardIntegerUse dflags $ liftM (Just . tyThingDataCon) $
lookupGlobal hsc_env integerSDataConName
IntegerSimple -> return Nothing
-- | Helper for 'lookupMkIntegerName' and 'lookupIntegerSDataConName'
guardIntegerUse :: DynFlags -> IO a -> IO a
guardIntegerUse dflags act
| thisPackage dflags == primUnitId
= return $ panic "Can't use Integer in ghc-prim"
| thisPackage dflags == integerUnitId
= return $ panic "Can't use Integer in integer-*"
| otherwise = act
mkInitialCorePrepEnv :: DynFlags -> HscEnv -> IO CorePrepEnv
mkInitialCorePrepEnv dflags hsc_env
= do mkIntegerId <- lookupMkIntegerName dflags hsc_env
integerSDataCon <- lookupIntegerSDataConName dflags hsc_env
return $ CPE {
cpe_dynFlags = dflags,
cpe_env = emptyVarEnv,
cpe_mkIntegerId = mkIntegerId,
cpe_integerSDataCon = integerSDataCon
}
extendCorePrepEnv :: CorePrepEnv -> Id -> Id -> CorePrepEnv
extendCorePrepEnv cpe id id'
= cpe { cpe_env = extendVarEnv (cpe_env cpe) id (Var id') }
extendCorePrepEnvExpr :: CorePrepEnv -> Id -> CoreExpr -> CorePrepEnv
extendCorePrepEnvExpr cpe id expr
= cpe { cpe_env = extendVarEnv (cpe_env cpe) id expr }
extendCorePrepEnvList :: CorePrepEnv -> [(Id,Id)] -> CorePrepEnv
extendCorePrepEnvList cpe prs
= cpe { cpe_env = extendVarEnvList (cpe_env cpe)
(map (\(id, id') -> (id, Var id')) prs) }
lookupCorePrepEnv :: CorePrepEnv -> Id -> CoreExpr
lookupCorePrepEnv cpe id
= case lookupVarEnv (cpe_env cpe) id of
Nothing -> Var id
Just exp -> exp
getMkIntegerId :: CorePrepEnv -> Id
getMkIntegerId = cpe_mkIntegerId
------------------------------------------------------------------------------
-- Cloning binders
-- ---------------------------------------------------------------------------
cpCloneBndrs :: CorePrepEnv -> [Var] -> UniqSM (CorePrepEnv, [Var])
cpCloneBndrs env bs = mapAccumLM cpCloneBndr env bs
cpCloneBndr :: CorePrepEnv -> Var -> UniqSM (CorePrepEnv, Var)
cpCloneBndr env bndr
| isLocalId bndr, not (isCoVar bndr)
= do bndr' <- setVarUnique bndr <$> getUniqueM
-- We are going to OccAnal soon, so drop (now-useless) rules/unfoldings
-- so that we can drop more stuff as dead code.
-- See also Note [Dead code in CorePrep]
let bndr'' = bndr' `setIdUnfolding` noUnfolding
`setIdSpecialisation` emptyRuleInfo
return (extendCorePrepEnv env bndr bndr'', bndr'')
| otherwise -- Top level things, which we don't want
-- to clone, have become GlobalIds by now
-- And we don't clone tyvars, or coercion variables
= return (env, bndr)
------------------------------------------------------------------------------
-- Cloning ccall Ids; each must have a unique name,
-- to give the code generator a handle to hang it on
-- ---------------------------------------------------------------------------
fiddleCCall :: Id -> UniqSM Id
fiddleCCall id
| isFCallId id = (id `setVarUnique`) <$> getUniqueM
| otherwise = return id
------------------------------------------------------------------------------
-- Generating new binders
-- ---------------------------------------------------------------------------
newVar :: Type -> UniqSM Id
newVar ty
= seqType ty `seq` do
uniq <- getUniqueM
return (mkSysLocalOrCoVar (fsLit "sat") uniq ty)
------------------------------------------------------------------------------
-- Floating ticks
-- ---------------------------------------------------------------------------
--
-- Note [Floating Ticks in CorePrep]
--
-- It might seem counter-intuitive to float ticks by default, given
-- that we don't actually want to move them if we can help it. On the
-- other hand, nothing gets very far in CorePrep anyway, and we want
-- to preserve the order of let bindings and tick annotations in
-- relation to each other. For example, if we just wrapped let floats
-- when they pass through ticks, we might end up performing the
-- following transformation:
--
-- src<...> let foo = bar in baz
-- ==> let foo = src<...> bar in src<...> baz
--
-- Because the let-binding would float through the tick, and then
-- immediately materialize, achieving nothing but decreasing tick
-- accuracy. The only special case is the following scenario:
--
-- let foo = src<...> (let a = b in bar) in baz
-- ==> let foo = src<...> bar; a = src<...> b in baz
--
-- Here we would not want the source tick to end up covering "baz" and
-- therefore refrain from pushing ticks outside. Instead, we copy them
-- into the floating binds (here "a") in cpePair. Note that where "b"
-- or "bar" are (value) lambdas we have to push the annotations
-- further inside in order to uphold our rules.
--
-- All of this is implemented below in @wrapTicks@.
-- | Like wrapFloats, but only wraps tick floats
wrapTicks :: Floats -> CoreExpr -> (Floats, CoreExpr)
wrapTicks (Floats flag floats0) expr = (Floats flag floats1, expr')
where (floats1, expr') = foldrOL go (nilOL, expr) floats0
go (FloatTick t) (fs, e) = ASSERT(tickishPlace t == PlaceNonLam)
(mapOL (wrap t) fs, mkTick t e)
go other (fs, e) = (other `consOL` fs, e)
wrap t (FloatLet bind) = FloatLet (wrapBind t bind)
wrap t (FloatCase b r ok) = FloatCase b (mkTick t r) ok
wrap _ other = pprPanic "wrapTicks: unexpected float!"
(ppr other)
wrapBind t (NonRec binder rhs) = NonRec binder (mkTick t rhs)
wrapBind t (Rec pairs) = Rec (mapSnd (mkTick t) pairs)
|
olsner/ghc
|
compiler/coreSyn/CorePrep.hs
|
bsd-3-clause
| 57,867
| 1
| 19
| 15,730
| 9,376
| 4,934
| 4,442
| 610
| 18
|
module Dotnet.System.Xml where
data XmlNodeType
= Attribute
| CDATA
| Comment
| Document
| DocumentFragment
| Element
| EndElement
| EndEntity
| Entity
| EntityReference
| None
| Notation
| ProcessingInstruction
| SignificantWhitespace
| Text
| Whitespace
| XmlDeclaration
deriving ( Eq, Enum )
data ReadState
= Closed
| EndOfFile
| Error
| Initial
| Interactive
deriving ( Eq, Enum )
data XmlSpace
= DefaultSpace | NoSpace | PreserveSpace
deriving ( Eq, Enum )
|
FranklinChen/Hugs
|
dotnet/lib/Dotnet/System/Xml.hs
|
bsd-3-clause
| 501
| 0
| 6
| 113
| 127
| 79
| 48
| 30
| 0
|
module M.Tests
(
main
, test
) where
import Test.Framework
import Test.Framework.Providers.QuickCheck2 (testProperty)
import M
main ::
IO ()
main =
defaultMain [test]
test ::
Test
test =
testGroup "M"
[
testProperty "Right Identity" prop_right_identity
]
prop_right_identity ::
Int
-> Bool
prop_right_identity n =
n `add` 0 == n
|
tonymorris/serialisation
|
test/src/M/Tests.hs
|
bsd-3-clause
| 383
| 0
| 7
| 103
| 105
| 60
| 45
| 22
| 1
|
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="ja-JP">
<title>Plug-n-Hack | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>コンテンツ</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>インデックス</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>検索</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>お気に入り</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
thc202/zap-extensions
|
addOns/plugnhack/src/main/javahelp/org/zaproxy/zap/extension/plugnhack/resources/help_ja_JP/helpset_ja_JP.hs
|
apache-2.0
| 998
| 86
| 64
| 158
| 411
| 208
| 203
| -1
| -1
|
module StrategoPattern where
import Parentheses
data Literal
= HInt (P Integer)
| HChar (P String) -- character literals in Stratego?
| HString (P String) -- desugar into list of characters?
| HFrac (P Rational)
deriving (Show{-,Read-})
hInt = HInt. P
hChar = HChar . P
hString = HString . P
hFrac = HFrac . P
data Pattern
= NoPattern
| NewPattern (P String)
| WildCard
| VarPat (P String)
| ConstrPat (String, [Pattern])
| AsPattern (String, Pattern)
| TuplePat (P [Pattern])
| LitPat (P Literal) -- new
{- old
| LitPat (P Integer)
| CharLitPat (P Char)
| StringLitPat (P String)
-}
| TwiddlePat (P Pattern)
| FunPat (String, [Pattern])
deriving (Show{-,Read-})
varPat = VarPat . P
tuplePat = TuplePat . P
litPat = LitPat . P
--charLitPat = CharLitPat . P
--stringLitPat = StringLitPat . P
twiddlePat = TwiddlePat . P
pcons x xs = ConstrPat (":", [x,xs])
pnil = ConstrPat ("[]", [])
plist = foldr pcons pnil
|
forste/haReFork
|
tools/hs2stratego/AST/StrategoPattern.hs
|
bsd-3-clause
| 953
| 0
| 9
| 203
| 307
| 178
| 129
| 31
| 1
|
module F where
-- Test for refactor of if to case
foo x = if (odd x)
then do
bob x 1
else do
bob x 2
bob x y = x + y
|
RefactoringTools/HaRe
|
test/testdata/Case/F.hs
|
bsd-3-clause
| 163
| 0
| 8
| 80
| 57
| 29
| 28
| 7
| 2
|
import qualified Data.Vector as U
import Data.Bits
main = print . U.minimum . U.map (*2) . U.map (`shiftL` 2) $ U.replicate (100000000 :: Int) (5::Int)
|
dolio/vector
|
old-testsuite/microsuite/minimum.hs
|
bsd-3-clause
| 153
| 0
| 9
| 26
| 76
| 44
| 32
| 3
| 1
|
{-# LANGUAGE BangPatterns, NoMonoLocalBinds, NoMonoPatBinds #-}
module T4498 where
f x = let !y = (\v -> v) :: a -> a
in (y x, y 'T')
|
forked-upstream-packages-for-ghcjs/ghc
|
testsuite/tests/typecheck/should_compile/T4498.hs
|
bsd-3-clause
| 143
| 0
| 10
| 36
| 53
| 28
| 25
| 4
| 1
|
module Annfail04 where
import Annfail04_Help
-- Testing that we detect the use of instances defined in the same module
instance Thing Int where
thing = 1
{-# ANN module (thing :: Int) #-}
{-# ANN type Foo (thing :: Int) #-}
data Foo = Bar
{-# ANN f (thing :: Int) #-}
f x = x
|
urbanslug/ghc
|
testsuite/tests/annotations/should_fail/annfail04.hs
|
bsd-3-clause
| 282
| 0
| 5
| 62
| 39
| 24
| 15
| 9
| 1
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE TypeFamilies #-}
-- | Perform a restricted form of loop tiling within kernel streams.
-- We only tile primitive types, to avoid excessive local memory use.
module Futhark.Optimise.TileLoops
( tileLoops )
where
import Control.Applicative
import Control.Monad.State
import Control.Monad.Reader
import qualified Data.Set as S
import qualified Data.Map.Strict as M
import Data.Monoid
import Data.List
import Prelude
import Futhark.MonadFreshNames
import Futhark.Representation.Kernels
import Futhark.Pass
import Futhark.Tools
import Futhark.Util (mapAccumLM)
tileLoops :: Pass Kernels Kernels
tileLoops =
Pass { passName = "tile loops"
, passDescription = "Tile stream loops inside kernels"
, passFunction = intraproceduralTransformation optimiseFunDef
}
optimiseFunDef :: MonadFreshNames m => FunDef Kernels -> m (FunDef Kernels)
optimiseFunDef fundec = do
body' <- modifyNameSource $ runState $
runReaderT m (scopeOfFParams (funDefParams fundec))
return fundec { funDefBody = body' }
where m = optimiseBody $ funDefBody fundec
type TileM = ReaderT (Scope Kernels) (State VNameSource)
optimiseBody :: Body Kernels -> TileM (Body Kernels)
optimiseBody (Body () bnds res) =
Body () <$> (concat <$> mapM optimiseStm bnds) <*> pure res
optimiseStm :: Stm Kernels -> TileM [Stm Kernels]
optimiseStm (Let pat aux (Op old_kernel@(Kernel desc space ts body))) = do
(extra_bnds, space', body') <- tileInKernelBody mempty initial_variance space body
let new_kernel = Kernel desc space' ts body'
-- XXX: we should not change the type of the kernel (such as by
-- changing the number of groups being used for a kernel that
-- returns a result-per-group).
if kernelType old_kernel == kernelType new_kernel
then return $ extra_bnds ++ [Let pat aux $ Op new_kernel]
else return [Let pat aux $ Op old_kernel]
where initial_variance = M.map mempty $ scopeOfKernelSpace space
optimiseStm (Let pat aux e) =
pure <$> (Let pat aux <$> mapExpM optimise e)
where optimise = identityMapper { mapOnBody = const optimiseBody }
tileInKernelBody :: Names -> VarianceTable
-> KernelSpace -> KernelBody InKernel
-> TileM ([Stm Kernels], KernelSpace, KernelBody InKernel)
tileInKernelBody branch_variant initial_variance initial_kspace (KernelBody () kstms kres) = do
(extra_bnds, kspace', kstms') <-
tileInStms branch_variant initial_variance initial_kspace kstms
return (extra_bnds, kspace', KernelBody () kstms' kres)
tileInBody :: Names -> VarianceTable
-> KernelSpace -> Body InKernel
-> TileM ([Stm Kernels], KernelSpace, Body InKernel)
tileInBody branch_variant initial_variance initial_kspace (Body () stms res) = do
(extra_bnds, kspace', stms') <-
tileInStms branch_variant initial_variance initial_kspace stms
return (extra_bnds, kspace', Body () stms' res)
tileInStms :: Names -> VarianceTable
-> KernelSpace -> [Stm InKernel]
-> TileM ([Stm Kernels], KernelSpace, [Stm InKernel])
tileInStms branch_variant initial_variance initial_kspace kstms = do
((kspace, extra_bndss), kstms') <-
mapAccumLM tileInKernelStatement (initial_kspace,[]) kstms
return (extra_bndss, kspace, kstms')
where variance = varianceInStms initial_variance kstms
tileInKernelStatement (kspace, extra_bnds)
(Let pat attr (Op (GroupStream w max_chunk lam accs arrs)))
| max_chunk == w,
not $ null arrs,
chunk_size <- Var $ groupStreamChunkSize lam,
arr_chunk_params <- groupStreamArrParams lam,
maybe_1d_tiles <-
zipWith (is1dTileable branch_variant kspace variance chunk_size) arrs arr_chunk_params,
maybe_1_5d_tiles <-
zipWith (is1_5dTileable branch_variant kspace variance chunk_size) arrs arr_chunk_params,
Just mk_tilings <-
zipWithM (<|>) maybe_1d_tiles maybe_1_5d_tiles = do
(kspaces, arr_chunk_params', tile_kstms) <- unzip3 <$> sequence mk_tilings
let (kspace', kspace_bnds) =
case kspaces of
[] -> (kspace, [])
new_kspace : _ -> new_kspace
Body () lam_kstms lam_res = groupStreamLambdaBody lam
lam_kstms' = concat tile_kstms ++ lam_kstms
group_size = spaceGroupSize kspace
lam' = lam { groupStreamLambdaBody = Body () lam_kstms' lam_res
, groupStreamArrParams = arr_chunk_params'
}
return ((kspace', extra_bnds <> kspace_bnds),
Let pat attr $ Op $ GroupStream w group_size lam' accs arrs)
tileInKernelStatement (kspace, extra_bnds)
(Let pat attr (Op (GroupStream w max_chunk lam accs arrs)))
| w == max_chunk,
not $ null arrs,
FlatThreadSpace gspace <- spaceStructure kspace,
chunk_size <- Var $ groupStreamChunkSize lam,
arr_chunk_params <- groupStreamArrParams lam,
Just mk_tilings <-
zipWithM (is2dTileable branch_variant kspace variance chunk_size)
arrs arr_chunk_params = do
((tile_size, tiled_group_size), tile_size_bnds) <- runBinder $ do
tile_size <- letSubExp "tile_size" $ Op TileSize
tiled_group_size <- letSubExp "tiled_group_size" $
BasicOp $ BinOp (Mul Int32) tile_size tile_size
return (tile_size, tiled_group_size)
let (tiled_gspace,untiled_gspace) = splitAt 2 $ reverse gspace
-- Play with reversion to ensure we get increasing IDs for
-- ltids. This affects readability of generated code.
untiled_gspace' <- fmap reverse $ forM (reverse untiled_gspace) $ \(gtid,gdim) -> do
ltid <- newVName "ltid"
return (gtid,gdim,
ltid, constant (1::Int32))
tiled_gspace' <- fmap reverse $ forM (reverse tiled_gspace) $ \(gtid,gdim) -> do
ltid <- newVName "ltid"
return (gtid,gdim,
ltid, tile_size)
let gspace' = reverse $ tiled_gspace' ++ untiled_gspace'
-- We have to recalculate number of workgroups and
-- number of threads to fit the new workgroup size.
((num_threads, num_groups), num_bnds) <-
runBinder $ sufficientGroups gspace' tiled_group_size
let kspace' = kspace { spaceStructure = NestedThreadSpace gspace'
, spaceGroupSize = tiled_group_size
, spaceNumThreads = num_threads
, spaceNumGroups = num_groups
}
local_ids = map (\(_, _, ltid, _) -> ltid) gspace'
(arr_chunk_params', tile_kstms) <-
fmap unzip $ forM mk_tilings $ \mk_tiling ->
mk_tiling tile_size local_ids
let Body () lam_kstms lam_res = groupStreamLambdaBody lam
lam_kstms' = concat tile_kstms ++ lam_kstms
lam' = lam { groupStreamLambdaBody = Body () lam_kstms' lam_res
, groupStreamArrParams = arr_chunk_params'
}
return ((kspace', extra_bnds ++ tile_size_bnds ++ num_bnds),
Let pat attr $ Op $ GroupStream w tile_size lam' accs arrs)
tileInKernelStatement (kspace, extra_bnds)
(Let pat attr (Op (GroupStream w maxchunk lam accs arrs))) = do
(bnds, kspace', lam') <- tileInStreamLambda branch_variant variance kspace lam
return ((kspace', extra_bnds ++ bnds),
Let pat attr $ Op $ GroupStream w maxchunk lam' accs arrs)
tileInKernelStatement acc stm =
return (acc, stm)
tileInStreamLambda :: Names -> VarianceTable -> KernelSpace -> GroupStreamLambda InKernel
-> TileM ([Stm Kernels], KernelSpace, GroupStreamLambda InKernel)
tileInStreamLambda branch_variant variance kspace lam = do
(bnds, kspace', kbody') <-
tileInBody branch_variant variance' kspace $ groupStreamLambdaBody lam
return (bnds, kspace', lam { groupStreamLambdaBody = kbody' })
where variance' = varianceInStms variance $
bodyStms $ groupStreamLambdaBody lam
is1dTileable :: MonadFreshNames m =>
Names -> KernelSpace -> VarianceTable -> SubExp -> VName -> LParam InKernel
-> Maybe (m ((KernelSpace, [Stm Kernels]),
LParam InKernel,
[Stm InKernel]))
is1dTileable branch_variant kspace variance block_size arr block_param = do
guard $ S.null $ M.findWithDefault mempty arr variance
guard $ S.null branch_variant
guard $ primType $ rowType $ paramType block_param
return $ do
(outer_block_param, kstms) <- tile1d kspace block_size block_param
return ((kspace, []), outer_block_param, kstms)
is1_5dTileable :: (MonadFreshNames m, HasScope Kernels m) =>
Names -> KernelSpace -> VarianceTable
-> SubExp -> VName -> LParam InKernel
-> Maybe (m ((KernelSpace, [Stm Kernels]),
LParam InKernel,
[Stm InKernel]))
is1_5dTileable branch_variant kspace variance block_size arr block_param = do
guard $ primType $ rowType $ paramType block_param
(inner_gtid, inner_gdim) <- invariantToInnermostDimension
mk_structure <-
case spaceStructure kspace of
NestedThreadSpace{} -> Nothing
FlatThreadSpace gtids_and_gdims ->
return $ do
-- Force a functioning group size. XXX: not pretty.
let n_dims = length gtids_and_gdims
outer <- forM (take (n_dims-1) gtids_and_gdims) $ \(gtid, gdim) -> do
ltid <- newVName "ltid"
return (gtid, gdim, ltid, gdim)
inner_ltid <- newVName "inner_ltid"
inner_ldim <- newVName "inner_ldim"
let compute_tiled_group_size =
mkLet' [] [Ident inner_ldim $ Prim int32] $
BasicOp $ BinOp (SMin Int32) (spaceGroupSize kspace) inner_gdim
structure = NestedThreadSpace $ outer ++ [(inner_gtid, inner_gdim,
inner_ltid, Var inner_ldim)]
((num_threads, num_groups), num_bnds) <- runBinder $ do
threads_necessary <-
letSubExp "threads_necessary" =<<
foldBinOp (Mul Int32)
(constant (1::Int32)) (map snd gtids_and_gdims)
groups_necessary <-
letSubExp "groups_necessary" =<<
eDivRoundingUp Int32 (eSubExp threads_necessary) (eSubExp $ Var inner_ldim)
num_threads <-
letSubExp "num_threads" $
BasicOp $ BinOp (Mul Int32) groups_necessary (Var inner_ldim)
return (num_threads, groups_necessary)
let kspace' = kspace { spaceGroupSize = Var inner_ldim
, spaceNumGroups = num_groups
, spaceNumThreads = num_threads
, spaceStructure = structure
}
return (compute_tiled_group_size : num_bnds,
kspace')
return $ do
(outer_block_param, kstms) <- tile1d kspace block_size block_param
(structure_bnds, kspace') <- mk_structure
return ((kspace', structure_bnds), outer_block_param, kstms)
where invariantToInnermostDimension :: Maybe (VName, SubExp)
invariantToInnermostDimension =
case reverse $ spaceDimensions kspace of
(i,d) : _
| not $ i `S.member` M.findWithDefault mempty arr variance,
not $ i `S.member` branch_variant -> Just (i,d)
_ -> Nothing
tile1d :: MonadFreshNames m =>
KernelSpace
-> SubExp
-> LParam InKernel
-> m (LParam InKernel, [Stm InKernel])
tile1d kspace block_size block_param = do
outer_block_param <- do
name <- newVName $ baseString (paramName block_param) ++ "_outer"
return block_param { paramName = name }
let ltid = spaceLocalId kspace
read_elem_bnd <- do
name <- newVName $ baseString (paramName outer_block_param) ++ "_elem"
return $
mkLet' [] [Ident name $ rowType $ paramType outer_block_param] $
BasicOp $ Index (paramName outer_block_param) [DimFix $ Var ltid]
let block_cspace = [(ltid,block_size)]
block_pe =
PatElem (paramName block_param) BindVar $ paramType outer_block_param
write_block_stms =
[ Let (Pattern [] [block_pe]) (defAux ()) $ Op $
Combine block_cspace [patElemType pe] [] $
Body () [read_elem_bnd] [Var $ patElemName pe]
| pe <- patternElements $ stmPattern read_elem_bnd ]
return (outer_block_param, write_block_stms)
is2dTileable :: MonadFreshNames m =>
Names -> KernelSpace -> VarianceTable -> SubExp -> VName -> LParam InKernel
-> Maybe (SubExp -> [VName] -> m (LParam InKernel, [Stm InKernel]))
is2dTileable branch_variant kspace variance block_size arr block_param = do
guard $ S.null branch_variant
guard $ primType $ rowType $ paramType block_param
pt <- case rowType $ paramType block_param of
Prim pt -> return pt
_ -> Nothing
inner_perm <- invariantToOneOfTwoInnerDims
Just $ \tile_size local_is -> do
let num_outer = length local_is - 2
perm = [0..num_outer-1] ++ map (+num_outer) inner_perm
invariant_i : variant_i : _ = reverse $ rearrangeShape perm local_is
(global_i,global_d):_ = rearrangeShape inner_perm $ drop num_outer $ spaceDimensions kspace
outer_block_param <- do
name <- newVName $ baseString (paramName block_param) ++ "_outer"
return block_param { paramName = name }
elem_name <- newVName $ baseString (paramName outer_block_param) ++ "_elem"
let read_elem_bnd = mkLet' [] [Ident elem_name $ Prim pt] $
BasicOp $ Index (paramName outer_block_param) $
fullSlice (paramType outer_block_param) [DimFix $ Var invariant_i]
let block_size_2d = Shape $ rearrangeShape inner_perm [tile_size, block_size]
block_cspace = zip (drop num_outer local_is) $ rearrangeShape inner_perm [tile_size,block_size]
block_name_2d <- newVName $ baseString (paramName block_param) ++ "_2d"
let block_pe =
PatElem block_name_2d BindVar $
rowType (paramType outer_block_param) `arrayOfShape` block_size_2d
write_block_stm =
Let (Pattern [] [block_pe]) (defAux ()) $
Op $ Combine block_cspace [Prim pt] [(global_i, global_d)] $
Body () [read_elem_bnd] [Var elem_name]
block_param_aux_name <- newVName $ baseString $ paramName block_param
let block_param_aux = Ident block_param_aux_name $
rearrangeType inner_perm $ patElemType block_pe
let index_block_kstms =
[mkLet' [] [block_param_aux] $
BasicOp $ Rearrange inner_perm block_name_2d,
mkLet' [] [paramIdent block_param] $
BasicOp $ Index (identName block_param_aux) $
fullSlice (identType block_param_aux) [DimFix $ Var variant_i]]
return (outer_block_param, write_block_stm : index_block_kstms)
where invariantToOneOfTwoInnerDims :: Maybe [Int]
invariantToOneOfTwoInnerDims = do
(j,_) : (i,_) : _ <- Just $ reverse $ spaceDimensions kspace
let variant_to = M.findWithDefault mempty arr variance
if i `S.member` variant_to && not (j `S.member` variant_to) then
Just [0,1]
else if j `S.member` variant_to && not (i `S.member` variant_to) then
Just [1,0]
else
Nothing
-- | The variance table keeps a mapping from a variable name
-- (something produced by a 'Stm') to the kernel thread indices
-- that name depends on. If a variable is not present in this table,
-- that means it is bound outside the kernel (and so can be considered
-- invariant to all dimensions).
type VarianceTable = M.Map VName Names
varianceInStms :: VarianceTable -> [Stm InKernel] -> VarianceTable
varianceInStms = foldl varianceInStm
varianceInStm :: VarianceTable -> Stm InKernel -> VarianceTable
varianceInStm variance bnd =
foldl' add variance $ patternNames $ stmPattern bnd
where add variance' v = M.insert v binding_variance variance'
look variance' v = S.insert v $ M.findWithDefault mempty v variance'
binding_variance = mconcat $ map (look variance) $ S.toList (freeInStm bnd)
sufficientGroups :: MonadBinder m =>
[(VName, SubExp, VName, SubExp)] -> SubExp
-> m (SubExp, SubExp)
sufficientGroups gspace group_size = do
groups_in_dims <- forM gspace $ \(_, gd, _, ld) ->
letSubExp "groups_in_dim" =<< eDivRoundingUp Int32 (eSubExp gd) (eSubExp ld)
num_groups <- letSubExp "num_groups" =<<
foldBinOp (Mul Int32) (constant (1::Int32)) groups_in_dims
num_threads <- letSubExp "num_threads" $
BasicOp $ BinOp (Mul Int32) num_groups group_size
return (num_threads, num_groups)
|
ihc/futhark
|
src/Futhark/Optimise/TileLoops.hs
|
isc
| 17,279
| 0
| 23
| 4,810
| 4,784
| 2,412
| 2,372
| 306
| 5
|
{-# Language TemplateHaskell, LambdaCase #-}
module Main where
import AsmProg
import Common
import Control.Lens
import Control.Monad.Trans.State.Strict
import Data.Foldable
import Data.Map (Map)
import qualified Data.Map as Map
import Data.Vector (Vector)
import qualified Data.Vector as Vector
import Text.Megaparsec hiding (State)
import Text.Megaparsec.Char
data Inst
= Copy Value !Register
| Inc !Register
| Dec !Register
| Jnz Value Value
| Tgl Value
deriving Show
data Machine = Machine
{ _machRegisters :: !Registers
, _machProgram :: !(Vector Inst)
}
makeLenses '' Machine
instance HasRegisters Machine where
reg r = machRegisters . reg r
{-# INLINE reg #-}
main :: IO ()
main =
do program <- Vector.fromList . parseLines parseFile <$> readInputFile 23
print (execute program 7)
print (execute program 12)
parseFile :: Parser Inst
parseFile =
Copy <$ wholestring "cpy " <*> pValue <* char ' ' <*> pReg <|>
Jnz <$ wholestring "jnz " <*> pValue <* char ' ' <*> pValue <|>
Tgl <$ wholestring "tgl " <*> pValue <|>
Inc <$ wholestring "inc " <*> pReg <|>
Dec <$ wholestring "dec " <*> pReg
execute :: Vector Inst -> Int -> Int
execute program0 a =
evalState mainEntry (Machine zeroRegisters program0)
where
mainEntry =
do reg A .= a
goto 0
step pc o =
case o of
Copy i o -> (reg o <~ rval i) >> goto (pc+1)
Inc r -> (reg r += 1) >> goto (pc+1)
Dec r -> (reg r -= 1) >> goto (pc+1)
Tgl r -> do v <- rval r
toggle (pc+v)
goto (pc+1)
Jnz i o -> do v <- rval i
o' <- rval o
goto (if v == 0 then pc+1 else pc+o')
toggle :: Int -> State Machine ()
toggle pc =
machProgram . ix pc %= \oper ->
case oper of
Inc x -> Dec x
Dec x -> Inc x
Tgl (Reg x) -> Inc x
Jnz x (Reg y) -> Copy x y
Copy x y -> Jnz x (Reg y)
_ -> error ("Nonsense toggle: " ++ show pc ++ " " ++ show oper)
goto pc = strictState $
do program <- use machProgram
case program Vector.!? pc of
Just o -> step pc o
Nothing -> use (reg A)
|
glguy/advent2016
|
Day23.hs
|
isc
| 2,378
| 0
| 23
| 850
| 863
| 424
| 439
| -1
| -1
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE ScopedTypeVariables #-}
-- | Kernel extraction.
--
-- In the following, I will use the term "width" to denote the amount
-- of immediate parallelism in a map - that is, the outer size of the
-- array(s) being used as input.
--
-- = Basic Idea
--
-- If we have:
--
-- @
-- map
-- map(f)
-- bnds_a...
-- map(g)
-- @
--
-- Then we want to distribute to:
--
-- @
-- map
-- map(f)
-- map
-- bnds_a
-- map
-- map(g)
-- @
--
-- But for now only if
--
-- (0) it can be done without creating irregular arrays.
-- Specifically, the size of the arrays created by @map(f)@, by
-- @map(g)@ and whatever is created by @bnds_a@ that is also used
-- in @map(g)@, must be invariant to the outermost loop.
--
-- (1) the maps are _balanced_. That is, the functions @f@ and @g@
-- must do the same amount of work for every iteration.
--
-- The advantage is that the map-nests containing @map(f)@ and
-- @map(g)@ can now be trivially flattened at no cost, thus exposing
-- more parallelism. Note that the @bnds_a@ map constitutes array
-- expansion, which requires additional storage.
--
-- = Distributing Sequential Loops
--
-- As a starting point, sequential loops are treated like scalar
-- expressions. That is, not distributed. However, sometimes it can
-- be worthwhile to distribute if they contain a map:
--
-- @
-- map
-- loop
-- map
-- map
-- @
--
-- If we distribute the loop and interchange the outer map into the
-- loop, we get this:
--
-- @
-- loop
-- map
-- map
-- map
-- map
-- @
--
-- Now more parallelism may be available.
--
-- = Unbalanced Maps
--
-- Unbalanced maps will as a rule be sequentialised, but sometimes,
-- there is another way. Assume we find this:
--
-- @
-- map
-- map(f)
-- map(g)
-- map
-- @
--
-- Presume that @map(f)@ is unbalanced. By the simple rule above, we
-- would then fully sequentialise it, resulting in this:
--
-- @
-- map
-- loop
-- map
-- map
-- @
--
-- == Balancing by Loop Interchange
--
-- The above is not ideal, as we cannot flatten the @map-loop@ nest,
-- and we are thus limited in the amount of parallelism available.
--
-- But assume now that the width of @map(g)@ is invariant to the outer
-- loop. Then if possible, we can interchange @map(f)@ and @map(g)@,
-- sequentialise @map(f)@ and distribute, interchanging the outer
-- parallel loop into the sequential loop:
--
-- @
-- loop(f)
-- map
-- map(g)
-- map
-- map
-- @
--
-- After flattening the two nests we can obtain more parallelism.
--
-- When distributing a map, we also need to distribute everything that
-- the map depends on - possibly as its own map. When distributing a
-- set of scalar bindings, we will need to know which of the binding
-- results are used afterwards. Hence, we will need to compute usage
-- information.
--
-- = Redomap
--
-- Redomap can be handled much like map. Distributed loops are
-- distributed as maps, with the parameters corresponding to the
-- neutral elements added to their bodies. The remaining loop will
-- remain a redomap. Example:
--
-- @
-- redomap(op,
-- fn (acc,v) =>
-- map(f)
-- map(g),
-- e,a)
-- @
--
-- distributes to
--
-- @
-- let b = map(fn v =>
-- let acc = e
-- map(f),
-- a)
-- redomap(op,
-- fn (acc,v,dist) =>
-- map(g),
-- e,a,b)
-- @
--
-- Note that there may be further kernel extraction opportunities
-- inside the @map(f)@. The downside of this approach is that the
-- intermediate array (@b@ above) must be written to main memory. An
-- often better approach is to just turn the entire @redomap@ into a
-- single kernel.
--
module Futhark.Pass.ExtractKernels
(extractKernels)
where
import Control.Applicative
import Control.Monad.RWS.Strict
import Control.Monad.Reader
import Control.Monad.Trans.Maybe
import qualified Data.Map.Strict as M
import qualified Data.Set as S
import Data.Maybe
import Data.List
import Prelude
import Futhark.Representation.SOACS
import Futhark.Representation.SOACS.Simplify (simplifyStms, simpleSOACS)
import qualified Futhark.Representation.Kernels as Out
import Futhark.Representation.Kernels.Kernel
import Futhark.MonadFreshNames
import Futhark.Tools
import qualified Futhark.Transform.FirstOrderTransform as FOT
import qualified Futhark.Pass.ExtractKernels.Kernelise as Kernelise
import Futhark.Transform.Rename
import Futhark.Pass
import Futhark.Transform.CopyPropagate
import Futhark.Pass.ExtractKernels.Distribution
import Futhark.Pass.ExtractKernels.ISRWIM
import Futhark.Pass.ExtractKernels.BlockedKernel
import Futhark.Pass.ExtractKernels.Segmented
import Futhark.Pass.ExtractKernels.Interchange
import Futhark.Util
import Futhark.Util.Log
type KernelsStm = Out.Stm Out.Kernels
type InKernelStm = Out.Stm Out.InKernel
type InKernelLambda = Out.Lambda Out.InKernel
-- | Transform a program using SOACs to a program using explicit
-- kernels, using the kernel extraction transformation.
extractKernels :: Pass SOACS Out.Kernels
extractKernels =
Pass { passName = "extract kernels"
, passDescription = "Perform kernel extraction"
, passFunction = runDistribM . fmap Prog . mapM transformFunDef . progFunctions
}
newtype DistribM a = DistribM (RWS (Scope Out.Kernels) Log VNameSource a)
deriving (Functor, Applicative, Monad,
HasScope Out.Kernels,
LocalScope Out.Kernels,
MonadFreshNames,
MonadLogger)
runDistribM :: (MonadLogger m, MonadFreshNames m) =>
DistribM a -> m a
runDistribM (DistribM m) = do
(x, msgs) <- modifyNameSource $ positionNameSource . runRWS m M.empty
addLog msgs
return x
where positionNameSource (x, src, msgs) = ((x, msgs), src)
transformFunDef :: FunDef -> DistribM (Out.FunDef Out.Kernels)
transformFunDef (FunDef entry name rettype params body) = do
body' <- localScope (scopeOfFParams params) $
transformBody body
return $ FunDef entry name rettype params body'
transformBody :: Body -> DistribM (Out.Body Out.Kernels)
transformBody body = do bnds <- transformStms $ bodyStms body
return $ mkBody bnds $ bodyResult body
transformStms :: [Stm] -> DistribM [KernelsStm]
transformStms [] =
return []
transformStms (bnd:bnds) =
sequentialisedUnbalancedStm bnd >>= \case
Nothing -> do
bnd' <- transformStm bnd
inScopeOf bnd' $
(bnd'++) <$> transformStms bnds
Just bnds' ->
transformStms $ bnds' <> bnds
sequentialisedUnbalancedStm :: Stm -> DistribM (Maybe [Stm])
sequentialisedUnbalancedStm (Let pat _ (Op soac@(Map _ lam _)))
| unbalancedLambda lam, lambdaContainsParallelism lam = do
types <- asksScope scopeForSOACs
Just . snd <$> runBinderT (FOT.transformSOAC pat soac) types
sequentialisedUnbalancedStm (Let pat _ (Op soac@(Redomap _ _ _ lam2 _ _)))
| unbalancedLambda lam2, lambdaContainsParallelism lam2 = do
types <- asksScope scopeForSOACs
Just . snd <$> runBinderT (FOT.transformSOAC pat soac) types
sequentialisedUnbalancedStm _ =
return Nothing
scopeForSOACs :: Scope Out.Kernels -> Scope SOACS
scopeForSOACs = castScope
scopeForKernels :: Scope SOACS -> Scope Out.Kernels
scopeForKernels = castScope
transformStm :: Stm -> DistribM [KernelsStm]
transformStm (Let pat aux (If c tb fb rt)) = do
tb' <- transformBody tb
fb' <- transformBody fb
return [Let pat aux $ If c tb' fb' rt]
transformStm (Let pat aux (DoLoop ctx val form body)) =
localScope (castScope (scopeOf form) <>
scopeOfFParams mergeparams) $ do
body' <- transformBody body
return [Let pat aux $ DoLoop ctx val form' body']
where mergeparams = map fst $ ctx ++ val
form' = case form of
WhileLoop cond ->
WhileLoop cond
ForLoop i it bound ps ->
ForLoop i it bound ps
transformStm (Let pat (StmAux cs _) (Op (Map w lam arrs))) =
distributeMap pat $ MapLoop cs w lam arrs
transformStm (Let pat (StmAux cs _) (Op (Scanomap w lam1 lam2 nes arrs))) = do
lam1_sequential <- Kernelise.transformLambda lam1
lam2_sequential <- Kernelise.transformLambda lam2
runBinder_ $ certifying cs $
blockedScan pat w lam1_sequential lam2_sequential (intConst Int32 1) [] [] nes arrs
transformStm (Let pat (StmAux cs _) (Op (Redomap w comm lam1 lam2 nes arrs))) =
if sequentialiseRedomapBody then do
lam1_sequential <- Kernelise.transformLambda lam1
lam2_sequential <- Kernelise.transformLambda lam2
map (certify cs) <$>
blockedReduction pat w comm' lam1_sequential lam2_sequential nes arrs
else do
(mapbnd, redbnd) <- redomapToMapAndReduce pat (w, comm', lam1, lam2, nes, arrs)
transformStms [certify cs mapbnd, certify cs redbnd]
where sequentialiseRedomapBody = True
comm' | commutativeLambda lam1 = Commutative
| otherwise = comm
transformStm (Let res_pat (StmAux cs _) (Op (Reduce w comm red_fun red_input)))
| Just do_irwim <- irwim res_pat w comm' red_fun red_input = do
types <- asksScope scopeForSOACs
bnds <- fst <$> runBinderT (simplifyStms =<< collectStms_ (certifying cs do_irwim)) types
transformStms bnds
where comm' | commutativeLambda red_fun = Commutative
| otherwise = comm
transformStm (Let pat (StmAux cs _) (Op (Reduce w comm red_fun red_input))) = do
red_fun_sequential <- Kernelise.transformLambda red_fun
red_fun_sequential' <- renameLambda red_fun_sequential
map (certify cs) <$>
blockedReduction pat w comm' red_fun_sequential' red_fun_sequential nes arrs
where (nes, arrs) = unzip red_input
comm' | commutativeLambda red_fun = Commutative
| otherwise = comm
transformStm (Let res_pat (StmAux cs _) (Op (Scan w scan_fun scan_input)))
| Just do_iswim <- iswim res_pat w scan_fun scan_input = do
types <- asksScope scopeForSOACs
transformStms =<< (snd <$> runBinderT (certifying cs do_iswim) types)
transformStm (Let pat (StmAux cs _) (Op (Scan w fun input))) = do
fun_sequential <- Kernelise.transformLambda fun
fun_sequential_renamed <- renameLambda fun_sequential
runBinder_ $ certifying cs $
blockedScan pat w fun_sequential fun_sequential_renamed (intConst Int32 1) [] [] nes arrs
where (nes, arrs) = unzip input
-- Streams can be handled in two different ways - either we
-- sequentialise the body or we keep it parallel and distribute.
transformStm (Let pat (StmAux cs _) (Op (Stream w (Parallel _ _ _ []) map_fun arrs))) = do
-- No reduction part. Remove the stream and leave the body
-- parallel. It will be distributed.
types <- asksScope scopeForSOACs
transformStms =<<
(snd <$> runBinderT (certifying cs $ sequentialStreamWholeArray pat w [] map_fun arrs) types)
transformStm (Let pat aux (Op (Stream w (Parallel _o comm red_fun nes) fold_fun arrs)))
| any (not . primType) $ lambdaReturnType red_fun,
Just fold_fun' <- extLambdaToLambda fold_fun = do
-- Split into a chunked map and a reduction, with the latter
-- distributed.
fold_fun_sequential <- Kernelise.transformLambda fold_fun'
let (red_pat_elems, concat_pat_elems) =
splitAt (length nes) $ patternValueElements pat
red_pat = Pattern [] red_pat_elems
concat_pat = Pattern [] concat_pat_elems
(map_bnd, map_misc_bnds) <- blockedMap concat_pat w InOrder fold_fun_sequential nes arrs
let num_threads = arraysSize 0 $ patternTypes $ stmPattern map_bnd
red_input = zip nes $ patternNames $ stmPattern map_bnd
((map_misc_bnds++[map_bnd])++) <$>
inScopeOf (map_misc_bnds++[map_bnd])
(transformStm $ Let red_pat aux $
Op (Reduce num_threads comm' red_fun red_input))
where comm' | commutativeLambda red_fun = Commutative
| otherwise = comm
transformStm (Let pat _ (Op (Stream w
(Parallel o comm red_fun nes) fold_fun arrs)))
| Just fold_fun' <- extLambdaToLambda fold_fun = do
-- Generate a kernel immediately.
red_fun_sequential <- Kernelise.transformLambda red_fun
fold_fun_sequential <- Kernelise.transformLambda fold_fun'
blockedReductionStream pat w comm' red_fun_sequential fold_fun_sequential nes arrs
where comm' | commutativeLambda red_fun, o /= InOrder = Commutative
| otherwise = comm
transformStm (Let pat _ (Op (Stream w (Sequential nes) fold_fun arrs))) = do
-- Remove the stream and leave the body parallel. It will be
-- distributed.
types <- asksScope scopeForSOACs
transformStms =<<
(snd <$> runBinderT (sequentialStreamWholeArray pat w nes fold_fun arrs) types)
transformStm (Let pat (StmAux cs _) (Op (Scatter w lam ivs as))) = runBinder_ $ do
lam' <- Kernelise.transformLambda lam
write_i <- newVName "write_i"
let (i_res, v_res) = splitAt (length as) $ bodyResult $ lambdaBody lam'
kstms = bodyStms $ lambdaBody lam'
krets = do (i, v, (a_w, a)) <- zip3 i_res v_res as
return $ WriteReturn [a_w] a [i] v
body = KernelBody () kstms krets
inputs = do (p, p_a) <- zip (lambdaParams lam') ivs
return $ KernelInput (paramName p) (paramType p) p_a [Var write_i]
(bnds, kernel) <-
mapKernel w (FlatThreadSpace [(write_i,w)]) inputs (map rowType $ patternTypes pat) body
certifying cs $ do
mapM_ addStm bnds
letBind_ pat $ Op kernel
transformStm bnd =
runBinder_ $ FOT.transformStmRecursively bnd
data MapLoop = MapLoop Certificates SubExp Lambda [VName]
mapLoopExp :: MapLoop -> Exp
mapLoopExp (MapLoop _ w lam arrs) = Op $ Map w lam arrs
distributeMap :: (HasScope Out.Kernels m,
MonadFreshNames m, MonadLogger m) =>
Pattern -> MapLoop -> m [KernelsStm]
distributeMap pat (MapLoop cs w lam arrs) = do
types <- askScope
let loopnest = MapNesting pat cs w $ zip (lambdaParams lam) arrs
env = KernelEnv { kernelNest =
singleNesting (Nesting mempty loopnest)
, kernelScope =
scopeForKernels (scopeOf lam) <> types
}
let res = map Var $ patternNames pat
par_stms <- fmap (postKernelsStms . snd) $ runKernelM env $
distribute =<< distributeMapBodyStms acc (bodyStms $ lambdaBody lam)
if not versionedCode || not (containsNestedParallelism lam)
then return par_stms
else do
par_body <- renameBody $ mkBody par_stms res
seq_stms <- do
soactypes <- asksScope scopeForSOACs
(seq_lam, _) <- runBinderT (Kernelise.transformLambda lam) soactypes
fmap (postKernelsStms . snd) $ runKernelM env $ distribute $
addStmsToKernel (bodyStms $ lambdaBody seq_lam) acc
seq_body <- renameBody $ mkBody seq_stms res
(outer_suff, outer_suff_stms) <- runBinder $
letSubExp "outer_suff_par" $ Op $ SufficientParallelism w
intra_stms <- flip runReaderT types $ localScope (scopeOfLParams (lambdaParams lam)) $
intraGroupParallelise (newKernel loopnest) $ lambdaBody lam
group_par_body <- renameBody $ mkBody intra_stms res
(intra_suff, intra_suff_stms) <- runBinder $ do
group_size <- letSubExp "group_size" $ Op GroupSize
group_available_par <-
letSubExp "group_available_par" $ BasicOp $ BinOp (Mul Int32) w group_size
if isJust $ lookup "FUTHARK_INTRA_GROUP_PARALLELISM" unixEnvironment then
letSubExp "group_suff_par" $ Op $ SufficientParallelism group_available_par
else return $ constant False
((outer_suff_stms++intra_suff_stms)++) <$>
kernelAlternatives pat par_body [(outer_suff, seq_body),
(intra_suff, group_par_body)]
where acc = KernelAcc { kernelTargets = singleTarget (pat, bodyResult $ lambdaBody lam)
, kernelStms = mempty
}
data KernelEnv = KernelEnv { kernelNest :: Nestings
, kernelScope :: Scope Out.Kernels
}
data KernelAcc = KernelAcc { kernelTargets :: Targets
, kernelStms :: [InKernelStm]
}
data KernelRes = KernelRes { accPostKernels :: PostKernels
, accLog :: Log
}
instance Monoid KernelRes where
KernelRes ks1 log1 `mappend` KernelRes ks2 log2 =
KernelRes (ks1 <> ks2) (log1 <> log2)
mempty = KernelRes mempty mempty
newtype PostKernel = PostKernel { unPostKernel :: [KernelsStm] }
newtype PostKernels = PostKernels [PostKernel]
instance Monoid PostKernels where
mempty = PostKernels mempty
PostKernels xs `mappend` PostKernels ys = PostKernels $ ys ++ xs
postKernelsStms :: PostKernels -> [KernelsStm]
postKernelsStms (PostKernels kernels) = concatMap unPostKernel kernels
typeEnvFromKernelAcc :: KernelAcc -> Scope Out.Kernels
typeEnvFromKernelAcc = scopeOfPattern . fst . outerTarget . kernelTargets
addStmsToKernel :: [InKernelStm] -> KernelAcc -> KernelAcc
addStmsToKernel stms acc =
acc { kernelStms = stms <> kernelStms acc }
addStmToKernel :: (LocalScope Out.Kernels m, MonadFreshNames m) =>
Stm -> KernelAcc -> m KernelAcc
addStmToKernel bnd acc = do
stms <- runBinder_ $ Kernelise.transformStm bnd
return acc { kernelStms = stms <> kernelStms acc }
newtype KernelM a = KernelM (RWS KernelEnv KernelRes VNameSource a)
deriving (Functor, Applicative, Monad,
MonadReader KernelEnv,
MonadWriter KernelRes,
MonadFreshNames)
instance HasScope Out.Kernels KernelM where
askScope = asks kernelScope
instance LocalScope Out.Kernels KernelM where
localScope types = local $ \env ->
env { kernelScope = types <> kernelScope env }
instance MonadLogger KernelM where
addLog msgs = tell mempty { accLog = msgs }
runKernelM :: (MonadFreshNames m, MonadLogger m) =>
KernelEnv -> KernelM a -> m (a, PostKernels)
runKernelM env (KernelM m) = do
(x, res) <- modifyNameSource $ getKernels . runRWS m env
addLog $ accLog res
return (x, accPostKernels res)
where getKernels (x,s,a) = ((x, a), s)
collectKernels :: KernelM a -> KernelM (a, PostKernels)
collectKernels m = pass $ do
(x, res) <- listen m
return ((x, accPostKernels res),
const res { accPostKernels = mempty })
addKernels :: PostKernels -> KernelM ()
addKernels ks = tell $ mempty { accPostKernels = ks }
addKernel :: [KernelsStm] -> KernelM ()
addKernel bnds = addKernels $ PostKernels [PostKernel bnds]
withStm :: Stm -> KernelM a -> KernelM a
withStm bnd = local $ \env ->
env { kernelScope =
scopeForKernels (scopeOf [bnd]) <> kernelScope env
, kernelNest =
letBindInInnerNesting provided $
kernelNest env
}
where provided = S.fromList $ patternNames $ stmPattern bnd
mapNesting :: Pattern -> Certificates -> SubExp -> Lambda -> [VName]
-> KernelM a
-> KernelM a
mapNesting pat cs w lam arrs = local $ \env ->
env { kernelNest = pushInnerNesting nest $ kernelNest env
, kernelScope = scopeForKernels (scopeOf lam) <> kernelScope env
}
where nest = Nesting mempty $
MapNesting pat cs w $
zip (lambdaParams lam) arrs
inNesting :: KernelNest -> KernelM a -> KernelM a
inNesting (outer, nests) = local $ \env ->
env { kernelNest = (inner, nests')
, kernelScope = mconcat (map scopeOf $ outer : nests) <> kernelScope env
}
where (inner, nests') =
case reverse nests of
[] -> (asNesting outer, [])
(inner' : ns) -> (asNesting inner', map asNesting $ outer : reverse ns)
asNesting = Nesting mempty
unbalancedLambda :: Lambda -> Bool
unbalancedLambda lam =
unbalancedBody
(S.fromList $ map paramName $ lambdaParams lam) $
lambdaBody lam
where subExpBound (Var i) bound = i `S.member` bound
subExpBound (Constant _) _ = False
unbalancedBody bound body =
any (unbalancedStm (bound <> boundInBody body) . stmExp) $
bodyStms body
-- XXX - our notion of balancing is probably still too naive.
unbalancedStm bound (Op (Map w _ _)) =
w `subExpBound` bound
unbalancedStm bound (Op (Reduce w _ _ _)) =
w `subExpBound` bound
unbalancedStm bound (Op (Scan w _ _)) =
w `subExpBound` bound
unbalancedStm bound (Op (Redomap w _ _ _ _ _)) =
w `subExpBound` bound
unbalancedStm bound (Op (Scanomap w _ _ _ _)) =
w `subExpBound` bound
unbalancedStm bound (Op (Stream w _ _ _)) =
w `subExpBound` bound
unbalancedStm _ (Op Scatter{}) =
False
unbalancedStm bound (DoLoop _ merge (ForLoop i _ iterations _) body) =
iterations `subExpBound` bound ||
unbalancedBody bound' body
where bound' = foldr S.insert bound $
i : map (paramName . fst) merge
unbalancedStm _ (DoLoop _ _ (WhileLoop _) _) =
False
unbalancedStm bound (If _ tbranch fbranch _) =
unbalancedBody bound tbranch || unbalancedBody bound fbranch
unbalancedStm _ (BasicOp _) =
False
unbalancedStm _ (Apply fname _ _ _) =
not $ isBuiltInFunction fname
bodyContainsParallelism :: Body -> Bool
bodyContainsParallelism = any (isMap . stmExp) . bodyStms
where isMap Op{} = True
isMap _ = False
lambdaContainsParallelism :: Lambda -> Bool
lambdaContainsParallelism = bodyContainsParallelism . lambdaBody
-- | Returns the sizes of immediate nested parallelism.
nestedParallelism :: Body -> [SubExp]
nestedParallelism = concatMap (parallelism . stmExp) . bodyStms
where parallelism (Op (Reduce w _ _ _)) = [w]
parallelism (Op (Scan w _ _)) = [w]
parallelism (Op (Scanomap w _ _ _ _)) = [w]
parallelism (Op (Redomap w _ _ _ _ _)) = [w]
parallelism (Op (Map w _ _)) = [w]
parallelism (Op (Stream w Sequential{} lam _))
| chunk_size_param : _ <- extLambdaParams lam =
let update (Var v) | v == paramName chunk_size_param = w
update se = se
in map update $ nestedParallelism $ extLambdaBody lam
parallelism _ = []
containsNestedParallelism :: Lambda -> Bool
containsNestedParallelism lam =
not (null $ nestedParallelism $ lambdaBody lam) &&
not (perfectMapNest $ bodyStms $ lambdaBody lam)
where perfectMapNest [Let _ _ (Op Map{})] = True
perfectMapNest _ = False
-- Enable if you want the cool new versioned code. Beware: may be
-- slower in practice. Caveat emptor (and you are the emptor).
versionedCode :: Bool
versionedCode = isJust $ lookup "FUTHARK_VERSIONED_CODE" unixEnvironment
distributeInnerMap :: Pattern -> MapLoop -> KernelAcc
-> KernelM KernelAcc
distributeInnerMap pat maploop@(MapLoop cs w lam arrs) acc
| unbalancedLambda lam, lambdaContainsParallelism lam =
addStmToKernel (Let pat (StmAux cs ()) $ mapLoopExp maploop) acc
| not versionedCode || not (containsNestedParallelism lam) =
distributeNormally
| otherwise =
distributeSingleStm acc (Let pat (StmAux cs ()) $ mapLoopExp maploop) >>= \case
Nothing ->
distributeNormally
Just (post_kernels, _, nest, acc') -> do
addKernels post_kernels
-- The kernel can be distributed by itself, so now we can
-- decide whether to just sequentialise, or exploit inner
-- parallelism.
let map_nesting = MapNesting pat cs w $ zip (lambdaParams lam) arrs
nest' = pushInnerKernelNesting (pat, lam_res) map_nesting nest
par_acc = KernelAcc { kernelTargets = pushInnerTarget
(pat, lam_res) $ kernelTargets acc
, kernelStms = mempty
}
extra_scope = targetsScope $ kernelTargets acc'
(_, distributed_kernels) <- collectKernels $
localScope extra_scope $ inNesting nest' $
distribute =<< leavingNesting maploop =<< distribute =<<
distributeMapBodyStms par_acc lam_bnds
(parw_bnds, parw, sequentialised_kernel) <- localScope extra_scope $ do
sequentialised_map_body <-
localScope (scopeOfLParams (lambdaParams lam)) $ runBinder_ $
Kernelise.transformStms lam_bnds
let kbody = KernelBody () sequentialised_map_body $
map (ThreadsReturn ThreadsInSpace) lam_res
constructKernel nest' kbody
let outer_pat = loopNestingPattern $ fst nest
res' = map Var $ patternNames outer_pat
seq_body <- renameBody $ mkBody [sequentialised_kernel] res'
par_body <- renameBody $ mkBody (postKernelsStms distributed_kernels) res'
(sufficient_parallelism, sufficient_stms) <- runBinder $
letSubExp "sufficient_parallelism" $ Op $ SufficientParallelism parw
addKernel =<< kernelAlternatives outer_pat
par_body [(sufficient_parallelism,seq_body)]
addKernel $ parw_bnds ++ sufficient_stms
return acc'
where lam_bnds = bodyStms $ lambdaBody lam
lam_res = bodyResult $ lambdaBody lam
def_acc = KernelAcc { kernelTargets = pushInnerTarget
(pat, bodyResult $ lambdaBody lam) $
kernelTargets acc
, kernelStms = mempty
}
distributeNormally =
distribute =<<
leavingNesting maploop =<<
mapNesting pat cs w lam arrs
(distribute =<< distributeMapBodyStms def_acc lam_bnds)
leavingNesting :: MapLoop -> KernelAcc -> KernelM KernelAcc
leavingNesting (MapLoop cs w lam arrs) acc =
case popInnerTarget $ kernelTargets acc of
Nothing ->
fail "The kernel targets list is unexpectedly small"
Just ((pat,res), newtargets) -> do
let acc' = acc { kernelTargets = newtargets }
case kernelStms acc' of
[] -> return acc'
remnant -> do
let kbody = Body () remnant res
used_in_body = freeInBody kbody
(used_params, used_arrs) =
unzip $
filter ((`S.member` used_in_body) . paramName . fst) $
zip (lambdaParams lam) arrs
stms <- runBinder_ $ Kernelise.mapIsh pat cs w used_params kbody used_arrs
return $ addStmsToKernel stms acc' { kernelStms = [] }
distributeMapBodyStms :: KernelAcc -> [Stm] -> KernelM KernelAcc
distributeMapBodyStms acc [] =
return acc
distributeMapBodyStms acc
(Let pat (StmAux cs _) (Op (Stream w (Sequential accs) lam arrs)):bnds) = do
types <- asksScope scopeForSOACs
stream_bnds <-
snd <$> runBinderT (sequentialStreamWholeArray pat w accs lam arrs) types
stream_bnds' <-
runReaderT (copyPropagateInStms simpleSOACS stream_bnds) types
distributeMapBodyStms acc $ map (certify cs) stream_bnds' ++ bnds
distributeMapBodyStms acc (bnd:bnds) =
-- It is important that bnd is in scope if 'maybeDistributeStm'
-- wants to distribute, even if this causes the slightly silly
-- situation that bnd is in scope of itself.
withStm bnd $
maybeDistributeStm bnd =<<
distributeMapBodyStms acc bnds
maybeDistributeStm :: Stm -> KernelAcc
-> KernelM KernelAcc
maybeDistributeStm bnd@(Let pat _ (Op (Map w lam arrs))) acc =
-- Only distribute inside the map if we can distribute everything
-- following the map.
distributeIfPossible acc >>= \case
Nothing -> addStmToKernel bnd acc
Just acc' -> distribute =<< distributeInnerMap pat (MapLoop (stmCerts bnd) w lam arrs) acc'
maybeDistributeStm bnd@(Let pat _ (DoLoop [] val form@ForLoop{} body)) acc
| null (patternContextElements pat), bodyContainsParallelism body =
distributeSingleStm acc bnd >>= \case
Just (kernels, res, nest, acc')
| S.null $ freeIn form `S.intersection` boundInKernelNest nest,
Just (perm, pat_unused) <- permutationAndMissing pat res ->
-- We need to pretend pat_unused was used anyway, by adding
-- it to the kernel nest.
localScope (typeEnvFromKernelAcc acc') $ do
nest' <- expandKernelNest pat_unused nest
addKernels kernels
types <- asksScope scopeForSOACs
bnds <- runReaderT
(interchangeLoops nest' (SeqLoop perm pat val form body)) types
-- runDistribM starts out with an empty scope, so we have to
-- immmediately insert the real one.
scope <- askScope
bnds' <- runDistribM $ localScope scope $ transformStms bnds
addKernel bnds'
return acc'
_ ->
addStmToKernel bnd acc
maybeDistributeStm (Let pat (StmAux cs _) (Op (Reduce w comm lam input))) acc
| Just m <- irwim pat w comm lam input = do
types <- asksScope scopeForSOACs
(_, bnds) <- runBinderT (certifying cs m) types
distributeMapBodyStms acc bnds
-- Parallelise segmented scatters.
maybeDistributeStm bnd@(Let pat (StmAux cs _) (Op (Scatter w lam ivs as))) acc =
distributeSingleStm acc bnd >>= \case
Just (kernels, res, nest, acc')
| Just (perm, pat_unused) <- permutationAndMissing pat res ->
localScope (typeEnvFromKernelAcc acc') $ do
nest' <- expandKernelNest pat_unused nest
lam' <- Kernelise.transformLambda lam
addKernels kernels
addKernel =<< segmentedScatterKernel nest' perm pat cs w lam' ivs as
return acc'
_ ->
addStmToKernel bnd acc
-- If the scan can be distributed by itself, we will turn it into a
-- segmented scan.
--
-- If the scan cannot be distributed by itself, it will be
-- sequentialised in the default case for this function.
maybeDistributeStm bnd@(Let pat (StmAux cs _) (Op (Scanomap w lam fold_lam nes arrs))) acc =
distributeSingleStm acc bnd >>= \case
Just (kernels, res, nest, acc')
| Just (perm, pat_unused) <- permutationAndMissing pat res ->
-- We need to pretend pat_unused was used anyway, by adding
-- it to the kernel nest.
localScope (typeEnvFromKernelAcc acc') $ do
nest' <- expandKernelNest pat_unused nest
lam' <- Kernelise.transformLambda lam
fold_lam' <- Kernelise.transformLambda fold_lam
localScope (typeEnvFromKernelAcc acc') $
segmentedScanomapKernel nest' perm w lam' fold_lam' nes arrs >>=
kernelOrNot cs bnd acc kernels acc'
_ ->
addStmToKernel bnd acc
-- If the reduction can be distributed by itself, we will turn it into a
-- segmented reduce.
--
-- If the reduction cannot be distributed by itself, it will be
-- sequentialised in the default case for this function.
maybeDistributeStm bnd@(Let pat (StmAux cs _) (Op (Redomap w comm lam foldlam nes arrs))) acc | versionedCode =
distributeSingleStm acc bnd >>= \case
Just (kernels, res, nest, acc')
| Just (perm, pat_unused) <- permutationAndMissing pat res ->
-- We need to pretend pat_unused was used anyway, by adding
-- it to the kernel nest.
localScope (typeEnvFromKernelAcc acc') $ do
nest' <- expandKernelNest pat_unused nest
lam' <- Kernelise.transformLambda lam
foldlam' <- Kernelise.transformLambda foldlam
regularSegmentedRedomapKernel nest' perm w comm' lam' foldlam' nes arrs >>=
kernelOrNot cs bnd acc kernels acc'
_ ->
addStmToKernel bnd acc
where comm' | commutativeLambda lam = Commutative
| otherwise = comm
-- Redomap and Scanomap are general cases, so pretend nested
-- reductions and scans are Redomap and Scanomap. Well, not for
-- Reduce, because of a hack...
maybeDistributeStm bnd@(Let pat (StmAux cs _) (Op (Reduce w comm lam input))) acc =
distributeSingleStm acc bnd >>= \case
Just (kernels, res, nest, acc')
| Just (perm, pat_unused) <- permutationAndMissing pat res ->
-- We need to pretend pat_unused was used anyway, by adding
-- it to the kernel nest.
localScope (typeEnvFromKernelAcc acc') $ do
let (nes, arrs) = unzip input
nest' <- expandKernelNest pat_unused nest
lam' <- Kernelise.transformLambda lam
foldlam' <- renameLambda lam'
regularSegmentedRedomapKernel nest' perm w comm' lam' foldlam' nes arrs >>=
kernelOrNot cs bnd acc kernels acc'
_ ->
addStmToKernel bnd acc
where comm' | commutativeLambda lam = Commutative
| otherwise = comm
maybeDistributeStm (Let pat aux (Op (Scan w lam input))) acc = do
let (nes, arrs) = unzip input
lam_renamed <- renameLambda lam
let bnd = Let pat aux $ Op $ Scanomap w lam lam_renamed nes arrs
maybeDistributeStm bnd acc
maybeDistributeStm (Let pat aux (BasicOp (Replicate (Shape (d:ds)) v))) acc
| [t] <- patternTypes pat = do
-- XXX: We need a temporary dummy binding to prevent an empty
-- map body. The kernel extractor does not like empty map
-- bodies.
tmp <- newVName "tmp"
let rowt = rowType t
newbnd = Let pat aux $ Op $ Map d lam []
tmpbnd = Let (Pattern [] [PatElem tmp BindVar rowt]) aux $
BasicOp $ Replicate (Shape ds) v
lam = Lambda { lambdaReturnType = [rowt]
, lambdaParams = []
, lambdaBody = mkBody [tmpbnd] [Var tmp]
}
maybeDistributeStm newbnd acc
maybeDistributeStm bnd@(Let _ aux (BasicOp Copy{})) acc =
distributeSingleUnaryStm acc bnd $ \_ outerpat arr ->
addKernel [Let outerpat aux $ BasicOp $ Copy arr]
-- Opaques are applied to the full array, because otherwise they can
-- drastically inhibit parallelisation in some cases.
maybeDistributeStm bnd@(Let (Pattern [] [pe]) aux (BasicOp Opaque{})) acc
| not $ primType $ typeOf pe =
distributeSingleUnaryStm acc bnd $ \_ outerpat arr ->
addKernel [Let outerpat aux $ BasicOp $ Copy arr]
maybeDistributeStm bnd@(Let _ aux (BasicOp (Rearrange perm _))) acc =
distributeSingleUnaryStm acc bnd $ \nest outerpat arr -> do
let r = length (snd nest) + 1
perm' = [0..r-1] ++ map (+r) perm
addKernel [Let outerpat aux $ BasicOp $ Rearrange perm' arr]
maybeDistributeStm bnd@(Let _ aux (BasicOp (Reshape reshape _))) acc =
distributeSingleUnaryStm acc bnd $ \nest outerpat arr -> do
let reshape' = map DimNew (kernelNestWidths nest) ++
map DimNew (newDims reshape)
addKernel [Let outerpat aux $ BasicOp $ Reshape reshape' arr]
maybeDistributeStm bnd acc =
addStmToKernel bnd acc
distributeSingleUnaryStm :: KernelAcc
-> Stm
-> (KernelNest -> Pattern -> VName -> KernelM ())
-> KernelM KernelAcc
distributeSingleUnaryStm acc bnd f =
distributeSingleStm acc bnd >>= \case
Just (kernels, res, nest, acc')
| res == map Var (patternNames $ stmPattern bnd),
(outer, _) <- nest,
[(_, arr)] <- loopNestingParamsAndArrs outer -> do
addKernels kernels
let outerpat = loopNestingPattern $ fst nest
f nest outerpat arr
return acc'
_ -> addStmToKernel bnd acc
distribute :: KernelAcc -> KernelM KernelAcc
distribute acc =
fromMaybe acc <$> distributeIfPossible acc
distributeIfPossible :: KernelAcc -> KernelM (Maybe KernelAcc)
distributeIfPossible acc = do
nest <- asks kernelNest
tryDistribute nest (kernelTargets acc) (kernelStms acc) >>= \case
Nothing -> return Nothing
Just (targets, kernel) -> do
addKernel kernel
return $ Just KernelAcc { kernelTargets = targets
, kernelStms = []
}
distributeSingleStm :: KernelAcc -> Stm
-> KernelM (Maybe (PostKernels, Result, KernelNest, KernelAcc))
distributeSingleStm acc bnd = do
nest <- asks kernelNest
tryDistribute nest (kernelTargets acc) (kernelStms acc) >>= \case
Nothing -> return Nothing
Just (targets, distributed_bnds) ->
tryDistributeStm nest targets bnd >>= \case
Nothing -> return Nothing
Just (res, targets', new_kernel_nest) ->
return $ Just (PostKernels [PostKernel distributed_bnds],
res,
new_kernel_nest,
KernelAcc { kernelTargets = targets'
, kernelStms = []
})
segmentedScatterKernel :: KernelNest
-> [Int]
-> Pattern
-> Certificates
-> SubExp
-> InKernelLambda
-> [VName] -> [(SubExp,VName)]
-> KernelM [KernelsStm]
segmentedScatterKernel nest perm scatter_pat cs scatter_w lam ivs as = do
-- We replicate some of the checking done by 'isSegmentedOp', but
-- things are different because a scatter is not a reduction or
-- scan.
--
-- First, pretend that the scatter is also part of the nesting. The
-- KernelNest we produce here is technically not sensible, but it's
-- good enough for flatKernel to work.
let nest' = pushInnerKernelNesting (scatter_pat, bodyResult $ lambdaBody lam)
(MapNesting scatter_pat cs scatter_w $ zip (lambdaParams lam) ivs) nest
(nest_bnds, w, ispace, kernel_inps, _rets) <- flatKernel nest'
-- The input/output arrays ('as') _must_ correspond to some kernel
-- input, or else the original nested scatter would have been
-- ill-typed. Find them.
as_inps <- mapM (findInput kernel_inps . snd) as
runBinder_ $ do
mapM_ addStm nest_bnds
let rts = drop (length as) $ lambdaReturnType lam
(is,vs) = splitAt (length as) $ bodyResult $ lambdaBody lam
k_body = KernelBody () (bodyStms $ lambdaBody lam) $
zipWith (inPlaceReturn ispace)
(map fst as) $ zip3 as_inps is vs
(k_bnds, k) <-
mapKernel w (FlatThreadSpace ispace) kernel_inps rts k_body
mapM_ addStm k_bnds
let pat = Pattern [] $ rearrangeShape perm $
patternValueElements $ loopNestingPattern $ fst nest
certifying cs $ letBind_ pat $ Op k
where findInput kernel_inps a =
maybe bad return $ find ((==a) . kernelInputName) kernel_inps
bad = fail "Ill-typed nested scatter encountered."
inPlaceReturn ispace aw (inp,i,v) =
WriteReturn (init ws++[aw]) (kernelInputArray inp) (map Var (init gtids)++[i]) v
where (gtids,ws) = unzip ispace
segmentedScanomapKernel :: KernelNest
-> [Int]
-> SubExp
-> InKernelLambda -> InKernelLambda
-> [SubExp] -> [VName]
-> KernelM (Maybe [KernelsStm])
segmentedScanomapKernel nest perm segment_size lam fold_lam nes arrs =
isSegmentedOp nest perm segment_size
(lambdaReturnType fold_lam) (freeInLambda lam) (freeInLambda fold_lam) nes arrs $
\pat flat_pat _num_segments total_num_elements ispace inps nes' arrs' -> do
regularSegmentedScan segment_size flat_pat total_num_elements
lam fold_lam ispace inps nes' arrs'
forM_ (zip (patternValueElements pat) (patternNames flat_pat)) $
\(dst_pat_elem, flat) -> do
let ident = patElemIdent dst_pat_elem
bindage = patElemBindage dst_pat_elem
dims = arrayDims $ identType ident
addStm $ mkLet [] [(ident, bindage)] $
BasicOp $ Reshape (map DimNew dims) flat
regularSegmentedRedomapKernel :: KernelNest
-> [Int]
-> SubExp -> Commutativity
-> InKernelLambda -> InKernelLambda -> [SubExp] -> [VName]
-> KernelM (Maybe [KernelsStm])
regularSegmentedRedomapKernel nest perm segment_size comm lam fold_lam nes arrs =
isSegmentedOp nest perm segment_size
(lambdaReturnType fold_lam) (freeInLambda lam) (freeInLambda fold_lam) nes arrs $
\pat flat_pat num_segments total_num_elements ispace inps nes' arrs' ->
regularSegmentedRedomap
segment_size num_segments (kernelNestWidths nest)
flat_pat pat total_num_elements comm lam fold_lam ispace inps nes' arrs'
isSegmentedOp :: KernelNest
-> [Int]
-> SubExp
-> [Type]
-> Names -> Names
-> [SubExp] -> [VName]
-> (Pattern
-> Pattern
-> SubExp
-> SubExp
-> [(VName, SubExp)]
-> [KernelInput]
-> [SubExp] -> [VName]
-> Binder Out.Kernels ())
-> KernelM (Maybe [KernelsStm])
isSegmentedOp nest perm segment_size ret free_in_op _free_in_fold_op nes arrs m = runMaybeT $ do
-- We must verify that array inputs to the operation are inputs to
-- the outermost loop nesting or free in the loop nest. Nothing
-- free in the op may be bound by the nest. Furthermore, the
-- neutral elements must be free in the loop nest.
--
-- We must summarise any names from free_in_op that are bound in the
-- nest, and describe how to obtain them given segment indices.
let bound_by_nest = boundInKernelNest nest
(pre_bnds, nesting_size, ispace, kernel_inps, _rets) <- flatKernel nest
unless (S.null $ free_in_op `S.intersection` bound_by_nest) $
fail "Non-fold lambda uses nest-bound parameters."
let indices = map fst ispace
prepareNe (Var v) | v `S.member` bound_by_nest =
fail "Neutral element bound in nest"
prepareNe ne = return ne
prepareArr arr =
case find ((==arr) . kernelInputName) kernel_inps of
Just inp
| kernelInputIndices inp == map Var indices ->
return $ return $ kernelInputArray inp
| not (kernelInputArray inp `S.member` bound_by_nest) ->
return $ replicateMissing ispace inp
Nothing | not (arr `S.member` bound_by_nest) ->
-- This input is something that is free inside
-- the loop nesting. We will have to replicate
-- it.
return $
letExp (baseString arr ++ "_repd")
(BasicOp $ Replicate (Shape [nesting_size]) $ Var arr)
_ ->
fail "Input not free or outermost."
nes' <- mapM prepareNe nes
mk_arrs <- mapM prepareArr arrs
lift $ runBinder_ $ do
mapM_ addStm pre_bnds
-- We must make sure all inputs are of size
-- segment_size*nesting_size.
total_num_elements <-
letSubExp "total_num_elements" $ BasicOp $ BinOp (Mul Int32) segment_size nesting_size
let flatten arr = do
arr_shape <- arrayShape <$> lookupType arr
-- CHECKME: is the length the right thing here? We want to
-- reproduce the parameter type.
let reshape = reshapeOuter [DimNew total_num_elements]
(2+length (snd nest)) arr_shape
letExp (baseString arr ++ "_flat") $
BasicOp $ Reshape reshape arr
nested_arrs <- sequence mk_arrs
arrs' <- mapM flatten nested_arrs
let pat = Pattern [] $ rearrangeShape perm $
patternValueElements $ loopNestingPattern $ fst nest
flatPatElem pat_elem t = do
let t' = arrayOfRow t total_num_elements
name <- newVName $ baseString (patElemName pat_elem) ++ "_flat"
return $ PatElem name BindVar t'
flat_pat <- Pattern [] <$>
zipWithM flatPatElem
(patternValueElements pat) ret
m pat flat_pat nesting_size total_num_elements ispace kernel_inps nes' arrs'
where replicateMissing ispace inp = do
t <- lookupType $ kernelInputArray inp
let inp_is = kernelInputIndices inp
shapes = determineRepeats ispace inp_is
(outer_shapes, inner_shape) = repeatShapes shapes t
letExp "repeated" $ BasicOp $
Repeat outer_shapes inner_shape $ kernelInputArray inp
determineRepeats ispace (i:is)
| (skipped_ispace, ispace') <- span ((/=i) . Var . fst) ispace =
Shape (map snd skipped_ispace) : determineRepeats (drop 1 ispace') is
determineRepeats ispace _ =
[Shape $ map snd ispace]
permutationAndMissing :: Pattern -> [SubExp] -> Maybe ([Int], [PatElem])
permutationAndMissing pat res = do
let pes = patternValueElements pat
(_used,unused) =
partition ((`S.member` freeIn res) . patElemName) pes
res_expanded = res ++ map (Var . patElemName) unused
perm <- map (Var . patElemName) pes `isPermutationOf` res_expanded
return (perm, unused)
-- Add extra pattern elements to every kernel nesting level.
expandKernelNest :: MonadFreshNames m =>
[PatElem] -> KernelNest -> m KernelNest
expandKernelNest pes (outer_nest, inner_nests) = do
let outer_size = loopNestingWidth outer_nest :
map loopNestingWidth inner_nests
inner_sizes = tails $ map loopNestingWidth inner_nests
outer_nest' <- expandWith outer_nest outer_size
inner_nests' <- zipWithM expandWith inner_nests inner_sizes
return (outer_nest', inner_nests')
where expandWith nest dims = do
pes' <- mapM (expandPatElemWith dims) pes
return nest { loopNestingPattern =
Pattern [] $
patternElements (loopNestingPattern nest) <> pes'
}
expandPatElemWith dims pe = do
name <- newVName $ baseString $ patElemName pe
return pe { patElemName = name
, patElemAttr = patElemType pe `arrayOfShape` Shape dims
}
-- | Convert the statements inside a map nest to kernel statements,
-- attempting to parallelise any remaining (top-level) parallel
-- statements. Anything that is not a map, scan or reduction will
-- simply be sequentialised. This includes sequential loops that
-- contain maps, scans or reduction. In the future, we could probably
-- do something more clever. Make sure that the amount of parallelism
-- to be exploited does not exceed the group size.
intraGroupParallelise :: (MonadFreshNames m,
HasScope Out.Kernels m) =>
KernelNest -> Body
-> m [Out.Stm Out.Kernels]
intraGroupParallelise knest body = do
(w_stms, w, ispace, inps, rts) <- flatKernel knest
let num_groups = w
((kspace, read_input_stms), prelude_stms) <- runBinder $ do
let inputIsUsed input = kernelInputName input `S.member` freeInBody body
used_inps = filter inputIsUsed inps
mapM_ addStm w_stms
group_size_v <- newVName "group_size"
letBindNames'_ [group_size_v] $ Op GroupSize
num_threads <- letSubExp "num_threads" $
BasicOp $ BinOp (Mul Int32) num_groups (Var group_size_v)
let ksize = (num_groups, Var group_size_v, num_threads)
ltid <- newVName "ltid"
kspace <- newKernelSpace ksize $ FlatThreadSpace $ ispace ++ [(ltid,Var group_size_v)]
read_input_stms <- mapM readKernelInput used_inps
return (kspace, read_input_stms)
kbody <- intraGroupParalleliseBody kspace body
let kbody' = kbody { kernelBodyStms = read_input_stms ++ kernelBodyStms kbody }
kstm = Let (loopNestingPattern first_nest) (StmAux cs ()) $ Op $
Kernel (KernelDebugHints "map_intra_group" []) kspace rts kbody'
return $ prelude_stms ++ [kstm]
where first_nest = fst knest
cs = loopNestingCertificates first_nest
intraGroupParalleliseBody :: (MonadFreshNames m,
HasScope Out.Kernels m) =>
KernelSpace -> Body -> m (Out.KernelBody Out.InKernel)
intraGroupParalleliseBody kspace body = do
let ltid = spaceLocalId kspace
kstms <- runBinder_ $ do
let processStms = mapM_ processStm
-- Without this type signature, the GHC 8.0.1 type checker
-- enters an infinite loop.
processStm :: Stm -> Binder Out.InKernel ()
processStm stm@(Let pat _ e) =
case e of
Op (Map w fun arrs) -> do
body_stms <- collectStms_ $ do
forM_ (zip (lambdaParams fun) arrs) $ \(p, arr) -> do
arr_t <- lookupType arr
letBindNames' [paramName p] $ BasicOp $ Index arr $
fullSlice arr_t [DimFix $ Var ltid]
Kernelise.transformStms $ bodyStms $ lambdaBody fun
let comb_body = mkBody body_stms $ bodyResult $ lambdaBody fun
letBind_ pat $ Op $
Out.Combine [(ltid,w)] (lambdaReturnType fun) [] comb_body
Op (Scanomap w scanfun foldfun nes arrs) -> do
let (scan_pes, map_pes) =
splitAt (length nes) $ patternElements pat
scan_input <- procInput (Pattern [] map_pes) w foldfun nes arrs
scanfun' <- Kernelise.transformLambda scanfun
-- A GroupScan lambda needs two more parameters.
my_index <- newVName "my_index"
other_index <- newVName "other_index"
let my_index_param = Param my_index (Prim int32)
other_index_param = Param other_index (Prim int32)
scanfun'' = scanfun' { lambdaParams = my_index_param :
other_index_param :
lambdaParams scanfun'
}
letBind_ (Pattern [] scan_pes) $
Op $ Out.GroupScan w scanfun'' $ zip nes scan_input
Op (Redomap w _ redfun foldfun nes arrs) -> do
let (red_pes, map_pes) =
splitAt (length nes) $ patternElements pat
red_input <- procInput (Pattern [] map_pes) w foldfun nes arrs
redfun' <- Kernelise.transformLambda redfun
-- A GroupReduce lambda needs two more parameters.
my_index <- newVName "my_index"
other_index <- newVName "other_index"
let my_index_param = Param my_index (Prim int32)
other_index_param = Param other_index (Prim int32)
redfun'' = redfun' { lambdaParams = my_index_param :
other_index_param :
lambdaParams redfun'
}
letBind_ (Pattern [] red_pes) $
Op $ Out.GroupReduce w redfun'' $ zip nes red_input
Op (Stream w (Sequential accs) lam arrs) -> do
types <- asksScope castScope
((), stream_bnds) <-
runBinderT (sequentialStreamWholeArray pat w accs lam arrs) types
processStms stream_bnds
_ ->
Kernelise.transformStm stm
where procInput :: Out.Pattern Out.InKernel
-> SubExp -> Lambda -> [SubExp] -> [VName]
-> Binder Out.InKernel [VName]
procInput map_pat w foldfun nes arrs = do
fold_stms <- collectStms_ $ do
let (fold_acc_params, fold_arr_params) =
splitAt (length nes) $ lambdaParams foldfun
forM_ (zip fold_acc_params nes) $ \(p, ne) ->
letBindNames'_ [paramName p] $ BasicOp $ SubExp ne
forM_ (zip fold_arr_params arrs) $ \(p, arr) -> do
arr_t <- lookupType arr
letBindNames' [paramName p] $ BasicOp $ Index arr $
fullSlice arr_t [DimFix $ Var ltid]
Kernelise.transformStms $ bodyStms $ lambdaBody foldfun
let fold_body = mkBody fold_stms $ bodyResult $ lambdaBody foldfun
op_inps <- replicateM (length nes) (newVName "op_input")
letBindNames'_ (op_inps ++ patternNames map_pat) $ Op $
Out.Combine [(ltid,w)] (lambdaReturnType foldfun) [] fold_body
return op_inps
processStms $ bodyStms body
return $ KernelBody () kstms $ map (ThreadsReturn $ OneThreadPerGroup $ intConst Int32 0) $ bodyResult body
kernelAlternatives :: (MonadFreshNames m, HasScope Out.Kernels m) =>
Out.Pattern Out.Kernels
-> Out.Body Out.Kernels
-> [(SubExp, Out.Body Out.Kernels)]
-> m [Out.Stm Out.Kernels]
kernelAlternatives pat default_body [] = runBinder_ $ do
ses <- bodyBind default_body
forM_ (zip (patternNames pat) ses) $ \(name, se) ->
letBindNames'_ [name] $ BasicOp $ SubExp se
kernelAlternatives pat default_body ((cond,alt):alts) = runBinder_ $ do
alts_pat <- fmap (Pattern []) $ forM (patternElements pat) $ \pe -> do
name <- newVName $ baseString $ patElemName pe
return pe { patElemName = name }
alt_stms <- kernelAlternatives alts_pat default_body alts
let alt_body = mkBody alt_stms $ map Var $ patternValueNames alts_pat
letBind_ pat $ If cond alt alt_body $ ifCommon $ patternTypes pat
kernelOrNot :: Certificates -> Stm -> KernelAcc
-> PostKernels -> KernelAcc -> Maybe [KernelsStm]
-> KernelM KernelAcc
kernelOrNot cs bnd acc _ _ Nothing =
addStmToKernel (certify cs bnd) acc
kernelOrNot cs _ _ kernels acc' (Just bnds) = do
addKernels kernels
addKernel $ map (certify cs) bnds
return acc'
|
ihc/futhark
|
src/Futhark/Pass/ExtractKernels.hs
|
isc
| 54,461
| 0
| 32
| 15,386
| 14,630
| 7,295
| 7,335
| 924
| 13
|
{-# LANGUAGE OverloadedStrings, NamedFieldPuns #-}
module Lachmann.Artificial where
import Lachmann.Core
import qualified Data.Text as T
data MSCopySpec = MSCopySpec {
ms_identifier :: T.Text
,parents :: [Int]
}
copyTextWithErrors :: [T.Text] -> T.Text
copyTextWithErrors [] = ""
copyTextWithErrors [t] = T.append t " garbage"
copyTextWithErrors (t:ts) = copyTextWithErrors [t]
extendTradition :: Tradition -> MSCopySpec -> Tradition
extendTradition Tradition{edgelist, mss} MSCopySpec{ms_identifier, parents} = trad where
trad = Tradition (new_edges ++ edgelist) (new_ms:mss) where
parent_mss = map (mss !!) parents
texts = map text parent_mss
t = copyTextWithErrors texts
new_ms = Manuscript ms_identifier t
new_edges = map (\x -> (ms_id x, ms_identifier)) parent_mss
makeTradition :: Manuscript -> [MSCopySpec] -> Tradition
makeTradition archetype mss_specs = foldl extendTradition start_trad mss_specs where
start_trad = Tradition [] [archetype]
|
christopheryoung/lachman
|
src/Lachmann/Artificial.hs
|
mit
| 987
| 0
| 11
| 158
| 298
| 164
| 134
| -1
| -1
|
module Data.Random.Distribution.Hypergeometric.Test where
import Data.Int
import Data.Random
import Data.Random.Sample (sampleFrom)
import Data.Random.Distribution.Hypergeometric
import System.Random.MWC
import Data.Vector (fromList)
import Test.QuickCheck
import Test.QuickCheck.Monadic
import Distribution.TestSuite.QuickCheck
instance Arbitrary Seed where
arbitrary = return . toSeed . fromList =<< vector 258
testArbitraryRandom :: (GenIO -> IO Bool) -> Seed -> Property
testArbitraryRandom f s = monadicIO $ assert =<< run (f =<< restore s)
tests :: IO [Test]
tests = return
[ testGroup "identities"
[ testGroup "small"
[ testProperty "draw nil" $ \p -> testArbitraryRandom $ \g -> do
let i = (getSmall . getPositive) p :: Int64
v <- sampleFrom g $ hypergeometric i i 0
return $ v == 0
, testProperty "draw all" $ \p -> testArbitraryRandom $ \g -> do
let i = (getSmall . getPositive) p :: Int64
v <- sampleFrom g $ hypergeometric i i i
return $ v == i
]
, testGroup "large"
[ testProperty "draw nil" $ \p -> testArbitraryRandom $ \g -> do
let i = (getLarge . getPositive) p :: Int64
v <- sampleFrom g $ hypergeometric i i 0
return $ v == 0
, testProperty "draw all" $ \p -> testArbitraryRandom $ \g -> do
let i = (getLarge . getPositive) p :: Int64
v <- sampleFrom g $ hypergeometric i i i
return $ v == i
]
]
]
|
srijs/random-hypergeometric
|
src/Data/Random/Distribution/Hypergeometric/Test.hs
|
mit
| 1,487
| 0
| 22
| 390
| 507
| 261
| 246
| 35
| 1
|
sayMe :: (Integral a) => a -> String
sayMe 1 = "One!"
sayMe 2 = "Two!"
sayMe 3 = "Three!"
sayMe 4 = "Four!"
sayMe 5 = "Five!"
sayMe x = "Not between 1 and 5"
|
williamHuang5468/LearningHaskell
|
CH4 Synatax in Function/UseIfReplacePatternMatching.hs
|
mit
| 169
| 0
| 6
| 47
| 67
| 34
| 33
| 7
| 1
|
{-# LANGUAGE ScopedTypeVariables #-}
module Stackage.LoadDatabase where
import qualified Codec.Archive.Tar as Tar
import qualified Codec.Compression.GZip as GZip
import Control.Exception (IOException, handle)
import Control.Monad (guard, foldM)
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString.Lazy.Char8 as L8
import Data.List (stripPrefix)
import qualified Data.Map as Map
import Data.Maybe (catMaybes, listToMaybe,
mapMaybe, fromMaybe)
import Data.Monoid (Monoid (..))
import Data.Set (member)
import qualified Data.Set as Set
import Distribution.Compiler (CompilerFlavor (GHC))
import Distribution.Package (Dependency (Dependency))
import Distribution.PackageDescription (Condition (..),
ConfVar (..),
FlagName (FlagName),
RepoType (Git),
SourceRepo (..),
benchmarkBuildInfo,
buildInfo, buildTools,
condBenchmarks,
condExecutables,
condLibrary,
condTestSuites,
condTreeComponents,
condTreeConstraints,
condTreeData,
flagDefault, flagName,
genPackageFlags,
homepage, libBuildInfo,
packageDescription,
sourceRepos,
testBuildInfo)
import Distribution.PackageDescription.Parse (ParseResult (ParseOk),
parsePackageDescription)
import Distribution.System (buildArch, buildOS)
import Distribution.Text (simpleParse)
import Distribution.Version (Version (Version),
unionVersionRanges,
withinRange)
import Stackage.Config (convertGithubUser)
import Stackage.Types
import Stackage.Util
import System.Directory (doesFileExist, getDirectoryContents)
import System.FilePath ((<.>), (</>))
-- | Load the raw package database.
--
-- We want to put in some restrictions:
--
-- * Drop all core packages. We never want to install a new version of
-- those, nor include them in the package list.
--
-- * For packages with a specific version bound, find the maximum matching
-- version.
--
-- * For other packages, select the maximum version number.
loadPackageDB :: SelectSettings
-> Map PackageName Version -- ^ core packages from HP file
-> Set PackageName -- ^ all core packages, including extras
-> Map PackageName (VersionRange, Maintainer) -- ^ additional deps
-> Set PackageName -- ^ underlay packages to exclude
-> IO PackageDB
loadPackageDB settings coreMap core deps underlay = do
tarName <- getTarballName
lbs <- L.readFile tarName
pdb <- addEntries mempty $ Tar.read lbs
contents <- handle (\(_ :: IOException) -> return [])
$ getDirectoryContents $ selectTarballDir settings
pdb' <- foldM addTarball pdb $ mapMaybe stripTarGz contents
return $ excludeUnderlay pdb'
where
addEntries _ (Tar.Fail e) = error $ show e
addEntries db Tar.Done = return db
addEntries db (Tar.Next e es) = addEntry db e >>= flip addEntries es
stripTarGz = fmap reverse . stripPrefix (reverse ".tar.gz") . reverse
ghcVersion' =
let GhcMajorVersion x y = selectGhcVersion settings
in Version [x, y, 2] []
addEntry :: PackageDB -> Tar.Entry -> IO PackageDB
addEntry pdb e =
case getPackageVersion e of
Nothing -> return pdb
Just (p, v)
| p `member` core -> return pdb
| otherwise ->
case Map.lookup p deps of
Just (vrange, _maintainer)
| not $ withinRange v vrange -> return pdb
_ -> do
let pkgname = packageVersionString (p, v)
tarball = selectTarballDir settings </> pkgname <.> "tar.gz"
exists <- doesFileExist tarball
if exists
then do
lbs <- L.readFile tarball
findCabalAndAddPackage tarball p v pdb $ Tar.read $ GZip.decompress lbs
else
case Tar.entryContent e of
Tar.NormalFile bs _ -> addPackage p v bs pdb
_ -> return pdb
addTarball :: PackageDB -> FilePath -> IO PackageDB
addTarball pdb tarball' = do
lbs <- L.readFile tarball
let (v', p') = break (== '-') $ reverse tarball'
p = PackageName $ reverse $ drop 1 p'
v <- maybe (error $ "Invalid tarball name: " ++ tarball) return
$ simpleParse $ reverse v'
findCabalAndAddPackage tarball p v pdb $ Tar.read $ GZip.decompress lbs
where
tarball = selectTarballDir settings </> tarball' <.> "tar.gz"
excludeUnderlay :: PackageDB -> PackageDB
excludeUnderlay (PackageDB pdb) =
PackageDB $ Map.filterWithKey (\k _ -> Set.notMember k underlay) pdb
skipTests p = p `Set.member` skippedTests settings
-- Find the relevant cabal file in the given entries and add its contents
-- to the package database
findCabalAndAddPackage tarball p v pdb =
loop
where
fixPath '\\' = '/'
fixPath c = c
expectedPath = let PackageName p' = p in concat
[ packageVersionString (p, v)
, "/"
, p'
, ".cabal"
]
loop Tar.Done = error $ concat
[ "Missing cabal file "
, show expectedPath
, " in tarball: "
, show tarball
]
loop (Tar.Fail e) = error $ concat
[ "Unable to read tarball "
, show tarball
, ": "
, show e
]
loop (Tar.Next entry rest)
| map fixPath (Tar.entryPath entry) == expectedPath =
case Tar.entryContent entry of
Tar.NormalFile bs _ -> addPackage p v bs pdb
_ -> error $ concat
[ "In tarball "
, show tarball
, " the cabal file "
, show expectedPath
, " was not a normal file"
]
| otherwise = loop rest
addPackage p v lbs pdb = do
let (deps', hasTests, buildToolsExe', buildToolsOther', mgpd, execs, mgithub) = parseDeps p lbs
return $ mappend pdb $ PackageDB $ Map.singleton p PackageInfo
{ piVersion = v
, piDeps = deps'
, piHasTests = hasTests
, piBuildToolsExe = buildToolsExe'
, piBuildToolsAll = buildToolsExe' `Set.union` buildToolsOther'
, piGPD = mgpd
, piExecs = execs
, piGithubUser = fromMaybe [] mgithub
}
parseDeps p lbs =
case parsePackageDescription $ L8.unpack lbs of
ParseOk _ gpd -> (mconcat
[ maybe mempty (go gpd) $ condLibrary gpd
, mconcat $ map (go gpd . snd) $ condExecutables gpd
, if skipTests p
then mempty
else mconcat $ map (go gpd . snd) $ condTestSuites gpd
-- FIXME , mconcat $ map (go gpd . snd) $ condBenchmarks gpd
], not $ null $ condTestSuites gpd
, Set.fromList $ map depName $ libExeBuildInfo gpd
, Set.fromList $ map depName $ testBenchBuildInfo gpd
, Just gpd
, Set.fromList $ map (Executable . fst) $ condExecutables gpd
, fmap convertGithubUser $ listToMaybe $ catMaybes
$ parseGithubUserHP (homepage $ packageDescription gpd)
: map parseGithubUserSR (sourceRepos $ packageDescription gpd)
)
_ -> (mempty, defaultHasTestSuites, Set.empty, Set.empty, Nothing, Set.empty, Nothing)
where
libExeBuildInfo gpd = concat
[ maybe mempty (goBI libBuildInfo) $ condLibrary gpd
, concat $ map (goBI buildInfo . snd) $ condExecutables gpd
]
testBenchBuildInfo gpd = concat
[ if skipTests p
then []
else concat $ map (goBI testBuildInfo . snd) $ condTestSuites gpd
, concat $ map (goBI benchmarkBuildInfo . snd) $ condBenchmarks gpd
]
goBI f x = buildTools $ f $ condTreeData x
depName (Dependency (PackageName pn) _) = Executable pn
go gpd tree
= Map.filterWithKey (\k _ -> not $ ignoredDep k)
$ Map.unionsWith unionVersionRanges
$ Map.fromList (map (\(Dependency pn vr) -> (pn, vr)) $ condTreeConstraints tree)
: map (go gpd) (mapMaybe (checkCond gpd) $ condTreeComponents tree)
-- Some specific overrides for cases where getting Stackage to be smart
-- enough to handle things would be too difficult.
ignoredDep :: PackageName -> Bool
ignoredDep dep
-- The flag logic used by text-stream-decode confuses Stackage.
| dep == PackageName "text" && p == PackageName "text-stream-decode" = True
| otherwise = False
checkCond gpd (cond, tree, melse)
| checkCond' cond = Just tree
| otherwise = melse
where
checkCond' (Var (OS os)) = os == buildOS
checkCond' (Var (Arch arch)) = arch == buildArch
-- Sigh... the small_base flag on mersenne-random-pure64 is backwards
checkCond' (Var (Flag (FlagName "small_base")))
| p == PackageName "mersenne-random-pure64" = False
checkCond' (Var (Flag flag@(FlagName flag'))) =
flag' `Set.notMember` disabledFlags settings &&
flag `elem` flags'
checkCond' (Var (Impl compiler range)) =
compiler == GHC && withinRange ghcVersion' range
checkCond' (Lit b) = b
checkCond' (CNot c) = not $ checkCond' c
checkCond' (COr c1 c2) = checkCond' c1 || checkCond' c2
checkCond' (CAnd c1 c2) = checkCond' c1 && checkCond' c2
flags' = map flagName (filter flagDefault $ genPackageFlags gpd) ++
(map FlagName $ Set.toList $ Stackage.Types.flags settings coreMap)
-- | Attempt to grab the Github username from a homepage.
parseGithubUserHP :: String -> Maybe String
parseGithubUserHP url1 = do
url2 <- listToMaybe $ mapMaybe (flip stripPrefix url1)
[ "http://github.com/"
, "https://github.com/"
]
let x = takeWhile (/= '/') url2
guard $ not $ null x
Just x
-- | Attempt to grab the Github username from a source repo.
parseGithubUserSR :: SourceRepo -> Maybe String
parseGithubUserSR sr =
case (repoType sr, repoLocation sr) of
(Just Git, Just s) -> parseGithubUserHP s
_ -> Nothing
|
yogsototh/stackage
|
Stackage/LoadDatabase.hs
|
mit
| 12,655
| 0
| 21
| 5,473
| 2,778
| 1,437
| 1,341
| 212
| 22
|
{-# LANGUAGE OverloadedStrings #-}
module GhostLang.SerializationTests
( encodeDecodeIsEqual
, writeReadFile
) where
import Control.Exception (bracket)
import Data.Serialize (decode, encode)
import GhostLang.InterpreterGenerators (TestInstrSet (..))
import GhostLang.Serialize (fromFile, toFile)
import GhostLang.Types (Program (..), Pattern (..), Operation (..))
import System.Directory (getTemporaryDirectory, removeFile)
import System.FilePath
import System.Posix.Process (getProcessID)
import Test.HUnit
import Text.Parsec.Pos (initialPos)
-- | Test that encoding followed by a decode result in the original
-- value.
encodeDecodeIsEqual :: Program TestInstrSet -> Bool
encodeDecodeIsEqual p =
case decode (encode p) of
Right p' -> p == p'
_ -> False
-- | Write a simple Program to file and read back again. Shall be
-- equal to the original value.
writeReadFile :: Assertion
writeReadFile =
bracket getTempFilePath
removeFile
(\file -> do
let program = Program
[ Pattern (initialPos "") "pattern" 1
[ Invoke Instr1
]
]
program `toFile` file
program' <- fromFile file
program @=? program'
)
getTempFilePath :: IO FilePath
getTempFilePath = do
tmp <- getTemporaryDirectory
pid <- getProcessID
return $ tmp </> show pid <.> "bin"
|
kosmoskatten/ghost-lang
|
ghost-lang/test/GhostLang/SerializationTests.hs
|
mit
| 1,511
| 0
| 17
| 444
| 326
| 181
| 145
| 35
| 2
|
{-# OPTIONS
-XMultiParamTypeClasses
-XFunctionalDependencies
#-}
module MathParser (parseFun, parseDispExpr, showFormula) where
import System.Environment
import Control.Monad
import Data.Tree
import Data.List as L
import Search
import Data.Map as M
import Text.ParserCombinators.Parsec
import Utilities
import Formulas
type Crumbs = TPath Formula Atom
funTree :: Crumbs -> GenParser Char st Crumbs
funTree t =
do {
char '(';
(funTree (down t))
} <|>
do {
num <- many1 digit;
funTree (changeMe (Node (AInt $ read num) []) t)
} <|>
do {
str <- word;
case str of
'n':'_':num -> funTree (changeMe (Node (AVar $ read num) []) t)
_ -> (funTree (changeMe (Node (AStr str) []) t))
} <|>
do {
oneOf ", "; -- disallowed \n
(funTree (next t))
} <|>
do {
char ')';
(funTree (up t))
} <|>
do {
eof;
return t
}
word :: GenParser Char st String
word =
many1 (noneOf " (),")
parseFun' :: String -> Either ParseError Formula
parseFun' input =
let
t = parse (funTree emptyPath) "(unknown)" input
in
case t of
Right tp -> Right (curTree tp)
Left err -> Left err
parseFun :: String -> Formula
parseFun = justRight . parseFun'
num :: GenParser Char st String
num =
many1 (digit)
dispExprTree :: String -> [String] -> GenParser Char st String
dispExprTree s args =
do {
try (string "?args");
dispExprTree (s ++ (intercalate ", " args)) args
} <|>
do {
char '?';
d <- many1 (digit);
dispExprTree (s ++ (args !! (((read d)::Int) - 1))) args
} <|>
do {
t <- anyChar;
dispExprTree (s ++ [t]) args
} <|>
do {
eof;
return s
}
parseDispExpr :: String -> [String] -> Either ParseError String
parseDispExpr input args = parse (dispExprTree "" args) "(unknown)" input
showFormula :: SymbolLib -> Formula -> String
showFormula slib f =
let
--get the symbol at the root
rt =
case root f of
AStr str -> str
AInt n -> (show n)
AVar i -> "n_" ++ (show i)
--look up the displayrule,
--if you can't find it, use the default provided
def =
if (L.null (children f)) then rt else (rt ++ "(" ++ "?args" ++ ")")
drule = tryWithDefault (\sym -> M.lookup sym slib) def rt
in (case (parseDispExpr drule (fmap (showFormula slib) (children f))) of
Right s -> s
Left _ -> "error")
|
holdenlee/fluidity
|
SeekWhence/formulas/MathParser.hs
|
mit
| 2,409
| 0
| 21
| 660
| 934
| 481
| 453
| 88
| 5
|
module ASPico.Handler.Root where
import ASPico.Prelude
import Servant (ServerT, (:<|>)((:<|>)))
import ASPico.Config (Config)
import ASPico.Error (AppErr)
import ASPico.Handler.Root.Affiliate (ApiAffiliate, serverAffiliate)
import ASPico.Handler.Root.Conversion (ApiConversion, serverConversion)
import ASPico.Handler.Root.Track (ApiTrack, serverTrack)
import ASPico.Monad (MonadASPico)
type ApiRoot = ApiAffiliate :<|> ApiConversion :<|> ApiTrack
serverRoot
:: (MonadError AppErr m, MonadASPico m, MonadReader Config m, MonadIO m)
=> ServerT ApiRoot m
serverRoot =
serverAffiliate :<|> serverConversion :<|> serverTrack
|
arowM/ASPico
|
src/ASPico/Handler/Root.hs
|
mit
| 632
| 0
| 6
| 72
| 174
| 106
| 68
| -1
| -1
|
module RealFunctions where
import Data.Fixed -- mod'
-- one dimensional functions
sawtooth :: (RealFloat a) => a -> a
sawtooth = tan . abs . sin
triangle :: (RealFloat a) => a -> a -> a -> a
triangle x a p = (a / p) * (p - abs(x `mod'` (2*p) - p))
gamma :: (Floating a) => a -> a
gamma z = z ** (1/2.2)
smoothstep :: (RealFrac a) => a -> a -> a -> a
smoothstep min max x = 3 * (x'^2) - 2 * (x'^3)
where x' = map' min max 0.0 1.0 x
decreasing :: (RealFloat a, Fractional a) => a -> a
decreasing x = 1/(x+1)
towards1 :: (Floating a) => a -> a
towards1 x = exp (-1 / (abs x))
linear :: Num a => a -> a
linear _ = 1
schwefel :: (Floating a) => a -> a
schwefel z = 2 * 418.9829 - (f z)
where f x = x * sin(sqrt(abs x))
reflection :: (Floating a, Fractional a) => a -> a
reflection z = 1 / z
map' :: RealFrac a => a -> a -> a -> a -> a -> a
map' inMin inMax outMin outMax x = (x - inMin) * (outMax - outMin) / (inMax - inMin) + outMin
clamp :: (Ord a) => a -> a -> a -> a
clamp min max x
| x < min = min
| x > max = max
| otherwise = x
|
Vetii/Fractal
|
RealFunctions.hs
|
mit
| 1,067
| 0
| 13
| 289
| 607
| 319
| 288
| 29
| 1
|
{-# htermination return :: a -> [] a #-}
|
ComputationWithBoundedResources/ara-inference
|
doc/tpdb_trs/Haskell/full_haskell/Prelude_return_2.hs
|
mit
| 41
| 0
| 2
| 9
| 3
| 2
| 1
| 1
| 0
|
maximum' :: (Ord a) => [a] -> a
maximum' [] = error "maximum of empty list is not defined"
maximum' [x] = x
maximum' (x:xs)
| x > maxTail = x
| otherwise = maxTail
where maxTail = maximum' xs
replicate' :: (Ord i, Num i) => i -> a -> [a]
replicate' n x
| n < 0 = error "Can't replicate a negative number of times"
| n == 0 = []
| otherwise = x:replicate' (n - 1) x
take' :: (Num i, Ord i) => i -> [a] -> [a]
take' n _
| n <= 0 = []
take' _ [] = []
take' n (x:xs) = x : take' (n-1) xs
reverse' :: [a] -> [a]
reverse' [] = []
reverse' (x:xs) = reverse' xs ++ [x]
repeat' :: a -> [a]
repeat' x = x:repeat' x
zip' :: [a] -> [b] -> [(a,b)]
zip' _ [] = []
zip' [] _ = []
zip' (x:xs) (y:ys) = (x,y):zip xs ys
elem' :: (Eq a) => a -> [a] -> Bool
elem' _ [] = False
elem' a (x:xs)
| a == x = True
| otherwise = a `elem'` xs
quicksort :: (Ord a) => [a] -> [a]
quicksort [] = []
quicksort (x:xs) = let smallerSorted = quicksort [a | a <- xs, a <= x]
biggerSorted = quicksort [a | a <- xs, a > x]
in smallerSorted ++ [x] ++ biggerSorted
|
diminishedprime/.org
|
reading-list/learn_you_a_haskell/recursion.hs
|
mit
| 1,093
| 0
| 12
| 309
| 662
| 343
| 319
| 36
| 1
|
{-# LANGUAGE ScopedTypeVariables, FlexibleInstances, TypeSynonymInstances #-}
module ConsoleChess.Print where
import Data.Monoid
import Data.Ord (comparing, compare)
import Data.Function (on)
import Data.List (intersperse, intercalate, groupBy, sortBy)
import Data.List.Split (chunksOf)
import ConsoleChess.Board
-- | Represent the space around the "main piece" of a Square
data A0
data A1
data A2
class Space a where getSpace :: a -> Int
instance Space A0 where getSpace = const 0
instance Space A1 where getSpace = const 1
instance Space A2 where getSpace = const 2
data SquareColor = BlackS | WhiteS deriving (Eq)
data Square s a = Square { element :: a
, infos :: [a]
, color :: SquareColor}
newtype Line s a = Line { getSquareLine :: [Square s a] }
newtype Matrix s a = Matrix { getMatrix :: [Line s a] }
createSquare :: Space s => s -> SquareColor -> a -> Square s a
createSquare s c a = Square { element = a, infos = [], color = c }
-- appendSquare,addSquare :: (Space s) => Square s a -> Line s a -> Line s a
-- appendSquare squ l = Line $ getSquareLine l ++ [squ]
-- addSquare squ l = Line $ squ : (getSquareLine l)
--
-- appendLine,addLine :: Space s => Line s a -> Matrix s a -> Matrix s a
-- appendLine l m = Matrix $ getMatrix m ++ [l]
-- addLine l m = Matrix $ l : (getMatrix m)
completeList :: Int -> a -> [a] -> [a]
completeList 0 _ _ = []
completeList i d [] = d : completeList (i-1) d []
completeList i d (x:xs) = x : completeList (i-1) d xs
spaceFromSquare :: Space s => Square s a -> Int
spaceFromSquare sq = getSpace $ (undefined :: Square s a -> s) $ sq
toLst :: (Space s, Show a) => a -> Square s a -> [[a]]
toLst d sqr@(Square e inf _) = [ chunk | chunk <- chunksOf side finalLst ]
where space = spaceFromSquare sqr
side = space*2 + 1
total = side*side - 1
infoLst = completeList total d inf
finalLst = (\(f, r) -> f ++ [e] ++ r) $ splitAt (total `div` 2) infoLst
instance (Space s) => Show (Line s String) where
show (Line ss) = intercalate "\n" $ map (concat) $ foldr1 (zipWith (++)) $ map (toLst " ") ss
instance (Space s) => Show (Matrix s String) where
show (Matrix ls) = intercalate "\n" . map show $ ls
instance Show (Board8x8 (Char, Int)) where
show = show . boardToMatrix (undefined :: A0)
-- TODO: add possibility to flip the change the orientation of the matrix
-- (black pieces at the bottom for instance)
boardToMatrix :: (Ord x, Ord y, Board b, Space s) => s -> b (x,y) -> Matrix s String
boardToMatrix s b = Matrix
$ map (Line . map (\p -> createSquare s WhiteS $ maybe " " show (b `get` p)))
$ groupBy ((==) `on` snd)
$ sortBy ((flip compare `on` snd) `mappend` comparing fst)
$ coordinates b
|
RomainGehrig/ConsoleChess
|
src/ConsoleChess/Print.hs
|
mit
| 2,838
| 0
| 18
| 717
| 990
| 542
| 448
| -1
| -1
|
import Data.List (permutations)
subStringDivProp :: String -> Bool
subStringDivProp xs = foldl fn True zs
where
fn acc (i, d) = acc && (read (map (xs !!) [i..i+2]) :: Int) `mod` d == 0
zs = zip [1..] [2, 3, 5, 7, 11, 13, 17]
subStringDivisibility :: Int
subStringDivisibility = sum [ read x :: Int | x <- permutations ['0'..'9'], subStringDivProp x ]
|
samidarko/euler
|
problem043.hs
|
mit
| 371
| 0
| 15
| 84
| 176
| 97
| 79
| 7
| 1
|
{-# LANGUAGE BangPatterns, DataKinds, DeriveDataTypeable, FlexibleInstances, MultiParamTypeClasses #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
module Hadoop.Protos.DatanodeProtocolProtos.ReportBadBlocksResponseProto (ReportBadBlocksResponseProto(..)) where
import Prelude ((+), (/))
import qualified Prelude as Prelude'
import qualified Data.Typeable as Prelude'
import qualified Data.Data as Prelude'
import qualified Text.ProtocolBuffers.Header as P'
data ReportBadBlocksResponseProto = ReportBadBlocksResponseProto{}
deriving (Prelude'.Show, Prelude'.Eq, Prelude'.Ord, Prelude'.Typeable, Prelude'.Data)
instance P'.Mergeable ReportBadBlocksResponseProto where
mergeAppend ReportBadBlocksResponseProto ReportBadBlocksResponseProto = ReportBadBlocksResponseProto
instance P'.Default ReportBadBlocksResponseProto where
defaultValue = ReportBadBlocksResponseProto
instance P'.Wire ReportBadBlocksResponseProto where
wireSize ft' self'@(ReportBadBlocksResponseProto)
= case ft' of
10 -> calc'Size
11 -> P'.prependMessageSize calc'Size
_ -> P'.wireSizeErr ft' self'
where
calc'Size = 0
wirePut ft' self'@(ReportBadBlocksResponseProto)
= case ft' of
10 -> put'Fields
11 -> do
P'.putSize (P'.wireSize 10 self')
put'Fields
_ -> P'.wirePutErr ft' self'
where
put'Fields
= do
Prelude'.return ()
wireGet ft'
= case ft' of
10 -> P'.getBareMessageWith update'Self
11 -> P'.getMessageWith update'Self
_ -> P'.wireGetErr ft'
where
update'Self wire'Tag old'Self
= case wire'Tag of
_ -> let (field'Number, wire'Type) = P'.splitWireTag wire'Tag in P'.unknown field'Number wire'Type old'Self
instance P'.MessageAPI msg' (msg' -> ReportBadBlocksResponseProto) ReportBadBlocksResponseProto where
getVal m' f' = f' m'
instance P'.GPB ReportBadBlocksResponseProto
instance P'.ReflectDescriptor ReportBadBlocksResponseProto where
getMessageInfo _ = P'.GetMessageInfo (P'.fromDistinctAscList []) (P'.fromDistinctAscList [])
reflectDescriptorInfo _
= Prelude'.read
"DescriptorInfo {descName = ProtoName {protobufName = FIName \".hadoop.hdfs.datanode.ReportBadBlocksResponseProto\", haskellPrefix = [MName \"Hadoop\",MName \"Protos\"], parentModule = [MName \"DatanodeProtocolProtos\"], baseName = MName \"ReportBadBlocksResponseProto\"}, descFilePath = [\"Hadoop\",\"Protos\",\"DatanodeProtocolProtos\",\"ReportBadBlocksResponseProto.hs\"], isGroup = False, fields = fromList [], descOneofs = fromList [], keys = fromList [], extRanges = [], knownKeys = fromList [], storeUnknown = False, lazyFields = False, makeLenses = False}"
instance P'.TextType ReportBadBlocksResponseProto where
tellT = P'.tellSubMessage
getT = P'.getSubMessage
instance P'.TextMsg ReportBadBlocksResponseProto where
textPut msg = Prelude'.return ()
textGet = Prelude'.return P'.defaultValue
|
alexbiehl/hoop
|
hadoop-protos/src/Hadoop/Protos/DatanodeProtocolProtos/ReportBadBlocksResponseProto.hs
|
mit
| 3,013
| 1
| 16
| 538
| 554
| 291
| 263
| 53
| 0
|
module Main where
import Data.List.Split
import Numeric
import System.Environment
import System.Exit
import System.IO
import Text.Printf
shiftTime n = map maybeShiftLine
where maybeShiftLine l = case words l of
[t1, "-->", t2] -> printf "%s --> %s" (shift t1) (shift t2)
_ -> l
shift = toTime . (+n) . toSeconds :: String -> String
toTime seconds = printf "%02d:%02d:%02d,%03d" h m s us
where (digits, index) = floatToDigits 10 seconds
wholeSeconds = listToInt $ take index digits
us = listToInt $ take 3 $ drop index digits
(totalMinutes, s) = wholeSeconds `divMod` 60
(h, m) = totalMinutes `divMod` 60
listToInt [] = 0
listToInt l = read $ concatMap show l :: Integer
toSeconds = sum . zipWith ($) ops . map read . splitOneOf ":,"
where ops = [(*3600), (*60), id, (/1000)]
main = do
args <- getArgs
case args of
[seconds, filename] | [(seconds', _)] <- reads seconds -> do
subs <- readFile filename
mapM_ putStrLn $ shiftTime seconds' $ lines subs
_ -> do
progName <- getProgName
hPutStrLn stderr $ "usage: " ++ progName ++ " <seconds> <filename>"
exitFailure
|
strange/subtitle-shifter
|
Main.hs
|
mit
| 1,244
| 0
| 15
| 364
| 442
| 232
| 210
| 32
| 2
|
import Text.Regex.PCRE
decompressLength s
| null findFirstParenMatch = length s
| otherwise = length pre
+ r * decompressLength repeatedString
+ decompressLength rest'
where
findFirstParenMatch = s =~ "([A-Z]*)\\((\\d+)x(\\d+)\\)(.+)" :: [[String]]
[[_, pre, ls, rs, rest]] = findFirstParenMatch
l = read ls
r = read rs
(repeatedString, rest') = splitAt l rest
main = do
input <- getContents
print . decompressLength $ input
|
lzlarryli/advent_of_code_2016
|
day9/part2.hs
|
mit
| 531
| 0
| 9
| 169
| 157
| 80
| 77
| 14
| 1
|
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE OverloadedStrings #-}
module Xcode.DWARF
( dwarfUUIDsFrom
, DwarfUUID(..)
, bcsymbolmapNameFrom
-- test only
, Arch(..)
, parseDwarfdumpUUID
)
where
import Control.Applicative ( (<|>) )
import Control.Monad.Except
import Data.Char ( toLower )
import qualified Data.Text as T
import qualified Text.Parsec as Parsec
import Text.Read
import qualified Text.Read.Lex as L
import qualified Turtle
-- Types
data Arch = ARMV7 | ARM64 | I386 | X86_64 | Other String deriving (Eq)
instance Show Arch where
show ARMV7 = "armv7"
show ARM64 = "arm64"
show I386 = "i386"
show X86_64 = "x86_64"
show (Other s) = s
instance Read Arch where
readPrec = parens $ do
L.Ident s <- lexP
case map toLower s of
"armv7" -> return ARMV7
"arm64" -> return ARM64
"i386" -> return I386
"x86_64" -> return X86_64
o -> return $ Other o
data DwarfUUID = DwarfUUID { _uuid :: String
, _arch :: Arch
}
deriving (Show, Read, Eq)
-- Functions
-- | Attempts to get UUIDs of DWARFs form a .framework/<binary-name> or .dSYM
-- | by running `xcrun dwarfdump --uuid <path>`
dwarfUUIDsFrom
:: MonadIO m
=> FilePath -- ^ Path to dSYM or .framework/<binary-name>
-> ExceptT String m [DwarfUUID]
dwarfUUIDsFrom fPath = do
(exitCode, stdOutText, stdErrText) <- Turtle.procStrictWithErr "xcrun"
["dwarfdump", "--uuid", T.pack fPath]
(return $ Turtle.unsafeTextToLine "")
case exitCode of
Turtle.ExitSuccess -> either (throwError . (\e -> errorMessageHeader ++ show e)) return
$ mapM (Parsec.parse parseDwarfdumpUUID "" . T.unpack) (T.lines stdOutText)
_ -> throwError $ errorMessageHeader ++ T.unpack stdErrText
where errorMessageHeader = "Failed parsing DWARF UUID: "
-- | Parses a DwarfUUID from a string like
-- UUID: EDF2AE8A-2EB4-3CA0-986F-D3E49D8C675F (i386) Carthage/Build/iOS/Alamofire.framework/Alamofire
parseDwarfdumpUUID :: Parsec.Parsec String () DwarfUUID
parseDwarfdumpUUID = do
uuid <- Parsec.string "UUID:" >> Parsec.spaces >> Parsec.manyTill (Parsec.hexDigit <|> Parsec.char '-') Parsec.space
archString <- paren $ Parsec.many1 (Parsec.noneOf [')', ' ', '\t', '\n', '\r'])
return DwarfUUID { _uuid = uuid, _arch = read archString }
where paren = Parsec.between (Parsec.char '(') (Parsec.char ')')
bcsymbolmapNameFrom :: DwarfUUID -> String
bcsymbolmapNameFrom (DwarfUUID _uuid _) = _uuid ++ ".bcsymbolmap"
|
blender/Rome
|
src/Xcode/DWARF.hs
|
mit
| 2,860
| 0
| 17
| 862
| 684
| 369
| 315
| 56
| 2
|
module Main where
import Control.Concurrent.STM
import Control.Monad
import Control.Monad.Error
import System.IO
import System.Environment
import Type
import Parser
import Environment
import TypeChecker
import Evaluator
stdlib :: String
stdlib = "../stdlib/stdlib.cam"
loadLibrary :: String -> Environment -> IO (Environment)
loadLibrary lib env = do
contents <- readFile lib
let program = "Integer {\n" ++ contents ++ "}"
case (readExpression program) of
Left err -> do
print err
return env
Right val -> do
typesCheck <- checkThrowsIO . stmToIO . checkType env $ val
if (typesCheck)
then do
retVal <- runThrowsIO VoidExpression . eval env $ val
return env
else do
return env
runFile :: String -> IO (Expression)
runFile fileName = do
contents <- readFile fileName
let program = "Integer {\n" ++ contents ++ "}"
env <- newEnvironmentIO >>= loadLibrary stdlib
case (readExpression program) of
Left err -> do
print err
return VoidExpression
Right val -> do
typesCheck <- checkThrowsIO . stmToIO . checkType env $ val
if (typesCheck)
then do
retVal <- runThrowsIO VoidExpression . eval env $ val
return retVal
else do
return VoidExpression
repl :: Environment -> IO ()
repl env = do
putStr "Camille> "
hFlush stdout
s <- getLine
case (readExpression s) of
Left err -> do
print err
repl env
Right val -> do
typesCheck <- checkThrowsIO . stmToIO . checkType env $ val
when (typesCheck) $ do
newVal <- runThrowsIO VoidExpression . eval env $ val
when (newVal /= VoidExpression) $ do
print newVal
repl env
main :: IO ()
main = do args <- getArgs
if (length args == 0)
then do
newEnvironmentIO >>= loadLibrary stdlib >>= repl
else do
e <- runFile (args !! 0)
when (e /= VoidExpression) $ do
print e
|
jlubi333/Camille
|
camille/hs/Main.hs
|
mit
| 2,298
| 0
| 18
| 887
| 666
| 310
| 356
| 71
| 3
|
{- |
Module : $Header$
Description : The Omega Data Types
Copyright : (c) Ewaryst Schulz, DFKI 2008
License : GPLv2 or higher, see LICENSE.txt
Maintainer : Ewaryst.Schulz@dfki.de
Stability : provisional
Portability : portable
Datatypes for an intermediate Omega Representation.
-}
module Omega.DataTypes where
justWhen:: Bool -> a -> Maybe a
justWhen b x = if b then Just x else Nothing
-- | Top level element with libname and a list of theories
data Library = Library String [Theory] deriving (Show, Eq, Ord)
-- | Contains a theoryname a list of imports, signature elements and
-- sentences (axioms or theorems)
data Theory = Theory String [String] [TCElement]
deriving (Show, Eq, Ord)
-- | Theory constitutive elements
data TCElement =
-- | An axiom or theorem element
TCAxiomOrTheorem Bool String Term
-- | Symbol to represent sorts, constants, predicate symbols, etc.
| TCSymbol String
-- | A comment, only for development purposes
| TCComment String
deriving (Show, Eq, Ord)
-- | Term structure
data Term =
-- | Symbol
Symbol String
-- | Simple variable
| Var String
-- | Application of a function to a list of arguments
| App Term [Term]
-- | Bindersymbol, bound vars, body
| Bind String [Term] Term
deriving (Show, Eq, Ord)
|
nevrenato/Hets_Fork
|
Omega/DataTypes.hs
|
gpl-2.0
| 1,331
| 0
| 7
| 308
| 208
| 121
| 87
| 17
| 2
|
{-# LANGUAGE DeriveAnyClass, TemplateHaskell, PostfixOperators, LambdaCase, OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-missing-signatures -fno-warn-type-defaults #-}
{-# OPTIONS_GHC -O0 -fno-cse -fno-full-laziness #-} -- preserve "lexical" sharing for observed sharing
module Commands.Plugins.Spiros.Shell.Grammar where
import Commands.Plugins.Spiros.Shell.Types
import Commands.Plugins.Spiros.Phrase
import Commands.Plugins.Spiros.Extra
import Commands.Mixins.DNS13OSX9
import Prelude()
import Prelude.Spiros
shell = 'shell <=> foldMap go shellCommands
where
shellCommands = fmap (leftAppend Safe) (filterBlanks safeShellCommands)
++ fmap (leftAppend Unsafe) (filterBlanks unsafeShellCommands)
go (safety,spoken,written) = Shell safety <$> (written <$ token spoken) <*> (phrase-?-"")
leftAppend a (b,c) = (a,b,c)
safeShellCommands =
[ "list"-: "ls"
, "make dear"-: "mkdir"
, "get"-: "git"
, "CD"-: "cd"
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, both "cabal"
, both "git"
, both "find"
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
] ++ concat
[ fmap (("stack "++) > both)
[ "build"
, "exec"
]
]
unsafeShellCommands =
[ "remove"-: "rm"
, "recursively remove"-: "rm -r"
, "remove dear"-: "rmdir"
, "are sink"-: "rsync -av"
, "move"-: "mv"
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, ""-: ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
, both ""
]
|
sboosali/commands-spiros
|
config/Commands/Plugins/Spiros/Shell/Grammar.hs
|
gpl-2.0
| 2,016
| 0
| 11
| 515
| 749
| 413
| 336
| 102
| 1
|
import Data.Array.Accelerate as Acc
import Data.Array.Accelerate.Math.Complex as Acc
import Data.Array.Accelerate.CUDA as CUDA
import Data.ByteString.Lazy as BL
import AlaSDR.Modem.Analog.QuadratureDetector as SDR
import AlaSDR.Src.ByteString
processAcc :: Array DIM1 (Complex Float) -> Acc (Array DIM1 Float)
processAcc zs
= let
zs' = use zs
in SDR.quadratureDetector zs'
main :: IO ()
main = do
input <- BL.getContents
let zs = listComplexF32be input
let zs' = Acc.fromList (Z:.1000000) zs :: Array DIM1 (Complex Float)
print (CUDA.run (processAcc zs'))
|
HounD/AlaSDR
|
examples/fmDemod.hs
|
gpl-3.0
| 572
| 4
| 12
| 90
| 206
| 111
| 95
| 17
| 1
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE PackageImports #-}
{- |
Module : Control.Monad.Fibre
Copyright : (c) Anupam Jain 2011
License : GNU GPL Version 3 (see the file LICENSE)
Maintainer : ajnsit@gmail.com
Stability : experimental
Portability : non-portable (uses ghc extensions)
This package defines Monadic functions which provide Choice and Parallelism - (<||&rt;) and (<&&&rt;) - that work on Monads that provide a (MonadBi m IO) instance.
Depends on the @monadbi@ library for extracting the IO actions from m. Also provides a good example of how to use the library.
-}
module Control.Monad.Fibre (
module Control.Monad.Bi,
(<||>), (<&&>),
) where
import Control.Concurrent (forkIO, newEmptyMVar, takeMVar, putMVar)
import Control.Monad.Bi (MonadBi(..))
--------------------------
-- FUNCTION DEFINITIONS --
--------------------------
-- Choice
(<||>) :: (Monad m, MonadBi m IO) => m o -> m o -> m o
t1 <||> t2 = do
t1io <- lower t1
t2io <- lower t2
x <- raise newEmptyMVar
raise $ do
forkIO $ t1io >>= putMVar x
forkIO $ t2io >>= putMVar x
raise $ takeMVar x
-- Parallelism
(<&&>) :: (Monad m, MonadBi m IO) => m o1 -> m o2 -> m (o1,o2)
t1 <&&> t2 = do
t1io <- lower t1
t2io <- lower t2
x1 <- raise newEmptyMVar
x2 <- raise newEmptyMVar
raise $ do
forkIO $ t1io >>= putMVar x1
forkIO $ t2io >>= putMVar x2
xv1 <- raise $ takeMVar x1
xv2 <- raise $ takeMVar x2
return (xv1,xv2)
|
ajnsit/monadfibre
|
src/Control/Monad/Fibre.hs
|
gpl-3.0
| 1,472
| 0
| 11
| 313
| 369
| 186
| 183
| 28
| 1
|
{-# LANGUAGE UnicodeSyntax, NoImplicitPrelude #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE DeriveTraversable #-}
{-# LANGUAGE ScopedTypeVariables #-}
module WithBar
( WithBar(..)
, toString
, parseString
) where
import BasePrelude hiding (toList)
import Prelude.Unicode
import Util (HumanReadable, split, (>$>))
import qualified Util as HR (HumanReadable(..))
import Data.Aeson (ToJSON, FromJSON, toJSON, parseJSON, withText)
import Data.Aeson.Types (Value(String))
import Data.Foldable (toList)
import qualified Data.Text as T
newtype WithBar α = WithBar { getNonEmpty ∷ NonEmpty α }
deriving (Eq, Ord, Show, Read, Functor, Foldable, Traversable)
toString ∷ HumanReadable α ⇒ WithBar α → String
toString = intercalate "|" ∘ map HR.toString ∘ toList
parseString ∷ (HumanReadable α, MonadFail m) ⇒ String → m (WithBar α)
parseString = split (≡ '|') >>> traverse HR.parseString >$> WithBar
instance HumanReadable α ⇒ ToJSON (WithBar α) where
toJSON = String ∘ T.pack ∘ toString
instance HumanReadable α ⇒ FromJSON (WithBar α) where
parseJSON = withText "String" (parseString ∘ T.unpack)
|
39aldo39/klfc
|
src/WithBar.hs
|
gpl-3.0
| 1,177
| 0
| 9
| 187
| 345
| 198
| 147
| 26
| 1
|
module LispData where
import Data.Complex
import Data.Array
import Control.Monad.Error
import Text.ParserCombinators.Parsec.Error
import Control.Monad.Error
import Data.IORef
import System.IO
data LispVal = Atom String
| List [LispVal]
| DottedList [LispVal] LispVal
| Number LispNum
| String String
| Character Char
| Bool Bool
| Vector (Array Integer LispVal)
| PrimitiveFunc ([LispVal] -> ThrowsError LispVal)
| Func LispFunc
| IOFunc ([LispVal] -> IOThrowsError LispVal)
| Port Handle
data LispFunc =
LFunc { params :: [String]
, vararg :: (Maybe String)
, body :: [LispVal]
, closure :: Env
}
data LispNum = LRea Double
| LInt Integer
| LRat Rational
| LCom (Complex Double)
deriving (Eq)
showVal :: LispVal -> String
showVal (String v) = "\"" ++ v ++ "\""
showVal (Atom v) = v
showVal (Number v) = show v
showVal (Character c) = "#\\" ++ [c]
showVal (Bool True) = "#t"
showVal (Bool False) = "#f"
showVal (List v) = "(" ++ unwordsList v ++ ")"
showVal (DottedList h t) = "(" ++ unwordsList h ++ " . " ++ showVal t ++ ")"
showVal (Vector a) = "#(" ++ (unwordsList . elems $ a) ++ ")"
showVal (PrimitiveFunc _) = "<primitive>"
showVal (Func f) = "(lambda (" ++ unwords (map show $ params f) ++
(case vararg f of
Nothing -> ""
Just arg -> " . " ++ arg) ++ ") ...)"
showVal (Port _) = "<IO Port>"
showVal (IOFunc _) = "<IO primitive>"
unwordsList :: [LispVal] -> String
unwordsList = unwords . map showVal
showNum :: LispNum -> String
showNum (LRea x) = show x
showNum (LInt x) = show x
showNum (LRat x) = show x
showNum (LCom x) = show x
instance Show LispVal where
show = showVal
instance Show LispNum where
show = showNum
data LispError
= NumArgs Integer [LispVal]
| TypeMismatch String LispVal
| Parser ParseError
| BadSpecialForm String LispVal
| NotFunction String String
| UnboundVar String String
| Default String
showError :: LispError -> String
showError (UnboundVar msg vname) = msg ++ ": " ++ vname
showError (BadSpecialForm msg form) = msg ++ ": " ++ show form
showError (NotFunction msg func) = msg ++ ": " ++ show func
showError (NumArgs expected found) = "Expected " ++ show expected
++ " arguments, but found " ++ unwordsList found
showError (TypeMismatch expected found) = "Invalid type: expected " ++ expected
++ ", found " ++ show found
showError (Parser err) = "Parse error at " ++ show err
showError (Default s) = "Error: " ++ s
instance Show LispError where
show = showError
instance Error LispError where
noMsg = Default "An error has occured"
strMsg = Default
type ThrowsError = Either LispError
trapError action = catchError action (return . show)
extractValue :: ThrowsError a -> a
extractValue (Right val) = val
type Env = IORef [(String, IORef LispVal)]
type IOThrowsError = ErrorT LispError IO
makeFunc :: (Maybe String) -> Env -> [LispVal] -> [LispVal] -> IOThrowsError LispVal
makeFunc varargs env params body = return $ Func $ LFunc (map showVal params) varargs body env
makeNormalFunc = makeFunc Nothing
makeVarArgs = makeFunc . Just . showVal
|
adamschoenemann/wysas
|
src/LispData.hs
|
gpl-3.0
| 3,380
| 0
| 11
| 912
| 1,138
| 596
| 542
| 91
| 2
|
{-# LANGUAGE CPP #-}
module Carbon.Data.Logic.Evaluation(
run
)where
{-
This module aims for concurrent evaluation of a discussion.
The entry point is the run function.
Evaluation is performed as followes:
1.: The Content is written into a temporary file
2.: Diamond is called on the file for each ResultType concurrently
3.: Each of the concurrent threads parses it's diamond output
4.: The concurrent outputs are gathered and returned.
-}
import Control.Concurrent
import Control.Concurrent.STM (TVar, STM)
import Control.Monad
import Data.Map (Map)
import System.Directory (removeFile)
import qualified Control.Concurrent.STM as STM
import qualified Data.Map as Map
import qualified System.Process as Process
import Carbon.Config
import Carbon.Data.Logic.Diamond
import qualified Carbon.Data.Logic as Logic
#if MIN_VERSION_stm(2, 3, 0)
modifyTVar = STM.modifyTVar
#else
modifyTVar :: TVar a -> (a -> a) -> STM ()
modifyTVar t f = STM.writeTVar t =<< liftM f (STM.readTVar t)
#endif
run :: Config -> FilePath -> String -> IO (Results String)
run conf path content = do
let rTypes = diamondEval conf
writeFile path content
state <- mkState $ length rTypes
watchDog state
mapM_ (runSingle conf state path) rTypes
results <- liftM merge $ awaitFinish state
when (deleteAfterEval conf) $ removeFile path
return results
where
merge :: [Results String] -> Results String
merge = Results . Map.toList . foldl go Map.empty
where
go :: Map ResultType [DiamondResult String] -> Results String -> Map ResultType [DiamondResult String]
go m (Results rs) = foldl (flip $ uncurry (Map.insertWith' (++))) m rs
-- | Sleeps some time and sets tCount to 0 afterwards.
watchDog :: State -> IO ()
watchDog state = void . forkIO $ do
let second = 1000000
threadDelay $ 10 * second
ts <- STM.atomically $ do
STM.writeTVar (tCount state) 0
STM.readTVar (threads state)
mapM_ killThread ts
-- | Waits till tCount is 0 and returns all results.
awaitFinish :: State -> IO [Results String]
awaitFinish state = STM.atomically $ do
tc <- STM.readTVar $ tCount state
STM.check $ tc <= 0
STM.readTVar $ results state
-- | Running a single evaluation:
runSingle :: Config -> State -> FilePath -> ResultType -> IO ()
runSingle conf state path rType = void . forkIO $ do
tId <- myThreadId
STM.atomically $ modifyTVar (threads state) (tId:)
let call = diamondCall conf
parm = diamondParams conf Map.! rType ++ [path]
proc = Process.proc call parm
dOutput <- Process.readProcess call parm ""
either (onError tId) (onResult tId rType) $ Logic.execParser Logic.answers path dOutput
where
onError :: ThreadId -> String -> IO ()
onError tId e = do
STM.atomically $ do
modifyTVar (threads state) (filter (tId /=))
modifyTVar (tCount state) $ subtract 1
mapM_ putStrLn ["Could not parse diamond output for discussion \""++path++"\":",e]
onResult :: ThreadId -> ResultType -> [Logic.Answer] -> IO ()
onResult tId rType answers = STM.atomically $ do
let r = Results [(rType, answers)]
modifyTVar (threads state) $ filter (tId /=)
modifyTVar (tCount state) $ subtract 1
modifyTVar (results state) (r :)
-- | The shared state for all threads:
data State = State {
threads :: TVar [ThreadId]
, tCount :: TVar Int
, results :: TVar [Results String]
}
-- | Initialising the shared state:
mkState :: Int -> IO State
mkState i = STM.atomically $ do
ts <- STM.newTVar []
tc <- STM.newTVar i
rs <- STM.newTVar []
return State {
threads = ts
, tCount = tc
, results = rs
}
|
runjak/carbon-adf
|
Carbon/Data/Logic/Evaluation.hs
|
gpl-3.0
| 3,705
| 0
| 16
| 837
| 1,101
| 553
| 548
| 77
| 1
|
module PrettyPrint where
import Data.List
import Collect
class PrettyPrint a where
pp :: a -> String
instance PrettyPrint Stmt where
pp (Vars vars body) = "Var" ++ show vars ++ "[\n" ++ (foldr (\x y -> "\t" ++ pp x ++ ",\n" ++ y) "" body) ++ "\n]\n"
pp (Assign e1 e2) = pp e1 ++ " := " ++ pp e2
pp Skip = "Skip"
pp (Pre e1) = "Assume " ++ pp e1
pp (Post e1) = "Assert " ++ pp e1
pp (If g s1 s2) = "If (" ++ pp g ++ ") {\n\t" ++
(foldr (\x y -> "\t" ++ pp x ++ ",\n" ++ y) "" s1) ++ "}\n else {\n\t" ++
(foldr (\x y -> "\t" ++ pp x ++ ",\n" ++ y) "" s2) ++ "}"
pp (While g s) = "while (" ++ pp g ++ ")" ++ foldr (\x y -> "\t" ++ pp x ++ ",\n" ++ y) "" s
pp (Inv e s) = "inv (" ++ pp e ++ "): " ++ pp s
pp (Sim e1 e2) = intercalate "," (map pp e1) ++ " := " ++ intercalate "," (map pp e2)
pp (Prog name inP outP body) = "Program " ++ name ++ " (" ++ intercalate "," (map pp inP) ++ ") (" ++
intercalate "," (map pp outP) ++ " )" ++ "{\n" ++ (concat $ map pp body) ++ "\n}\n"
instance PrettyPrint Expr where
pp (Lit i) = show i
pp (Name s) = s
pp (Exists s e) = "Exists " ++ s ++ "(" ++ pp e ++ ")"
pp (ForAll s e) = "ForAll " ++ s ++ "(" ++ pp e ++ ")"
pp (Minus e1 e2) = pp e1 ++ " - " ++ pp e2
pp (Plus e1 e2) = pp e1 ++ " + " ++ pp e2
pp (Equal e1 e2) = pp e1 ++ " == " ++ pp e2
pp (Lower e1 e2) = pp e1 ++ " < " ++ pp e2
pp (LowerE e1 e2) = pp e1 ++ " <= " ++ pp e2
pp (And e1 e2) = pp e1 ++ " && " ++ pp e2
pp (Or e1 e2) = pp e1 ++ " || " ++ pp e2
pp (Impl e1 e2) = "((" ++ pp e1 ++ ")" ++ " --> " ++ "(" ++ pp e2 ++ "))"
pp (Repby e1 e2) = pp e1 ++ "[" ++ pp e2 ++ "]"
pp (Not e1) = "!(" ++ pp e1 ++ ")"
pp True_ = "True"
instance PrettyPrint Var where
pp (Int i) = "Int " ++ i
pp (Array s) = "Array " ++ s
pp (Univ x) = x
pp (Exis x) = x
|
Ferdinand-vW/Wlp-verification-engine
|
src/PrettyPrint.hs
|
gpl-3.0
| 1,884
| 0
| 17
| 605
| 1,048
| 510
| 538
| 40
| 0
|
-- grid is a game written in Haskell
-- Copyright (C) 2018 karamellpelle@hotmail.com
--
-- This file is part of grid.
--
-- grid is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- grid is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with grid. If not, see <http://www.gnu.org/licenses/>.
--
module Game.Memory.MemoryData.Fancy.SoundMemory
(
SoundMemory (..),
loadSoundMemory,
unloadSoundMemory,
) where
import MyPrelude
import Game.Values
import File
import OpenAL
import OpenAL.Helpers
data SoundMemory =
SoundMemory
{
soundMemoryIterationFailureBuf :: !ALuint,
soundMemorySrc :: !ALuint
}
loadSoundMemory :: IO SoundMemory
loadSoundMemory = do
-- buffer
buf <- genBuf
path <- fileStaticData "Memory/Output/iteration_failure.mp3"
loadBuf buf path
-- src
src <- genSrc
-- make source non-3D
alSourcei src al_SOURCE_RELATIVE $ fI al_TRUE
alSource3f src al_POSITION 0.0 0.0 0.0
alSource3f src al_VELOCITY 0.0 0.0 0.0
-- set default buffer
alSourcei src al_BUFFER (fI buf)
return SoundMemory
{
soundMemoryIterationFailureBuf = buf,
soundMemorySrc = src
}
unloadSoundMemory :: SoundMemory -> IO ()
unloadSoundMemory sound = do
-- alStopSource?
delSrc $ soundMemorySrc sound
delBuf $ soundMemoryIterationFailureBuf sound
|
karamellpelle/grid
|
source/Game/Memory/MemoryData/Fancy/SoundMemory.hs
|
gpl-3.0
| 1,862
| 0
| 9
| 440
| 256
| 141
| 115
| 35
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.DataTransfer.Transfers.Insert
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Inserts a data transfer request.
--
-- /See:/ <https://developers.google.com/admin-sdk/data-transfer/ Admin Data Transfer API Reference> for @datatransfer.transfers.insert@.
module Network.Google.Resource.DataTransfer.Transfers.Insert
(
-- * REST Resource
TransfersInsertResource
-- * Creating a Request
, transfersInsert
, TransfersInsert
-- * Request Lenses
, tiPayload
) where
import Network.Google.DataTransfer.Types
import Network.Google.Prelude
-- | A resource alias for @datatransfer.transfers.insert@ method which the
-- 'TransfersInsert' request conforms to.
type TransfersInsertResource =
"admin" :>
"datatransfer" :>
"v1" :>
"transfers" :>
QueryParam "alt" AltJSON :>
ReqBody '[JSON] DataTransfer :>
Post '[JSON] DataTransfer
-- | Inserts a data transfer request.
--
-- /See:/ 'transfersInsert' smart constructor.
newtype TransfersInsert = TransfersInsert'
{ _tiPayload :: DataTransfer
} deriving (Eq,Show,Data,Typeable,Generic)
-- | Creates a value of 'TransfersInsert' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'tiPayload'
transfersInsert
:: DataTransfer -- ^ 'tiPayload'
-> TransfersInsert
transfersInsert pTiPayload_ =
TransfersInsert'
{ _tiPayload = pTiPayload_
}
-- | Multipart request metadata.
tiPayload :: Lens' TransfersInsert DataTransfer
tiPayload
= lens _tiPayload (\ s a -> s{_tiPayload = a})
instance GoogleRequest TransfersInsert where
type Rs TransfersInsert = DataTransfer
type Scopes TransfersInsert =
'["https://www.googleapis.com/auth/admin.datatransfer"]
requestClient TransfersInsert'{..}
= go (Just AltJSON) _tiPayload dataTransferService
where go
= buildClient
(Proxy :: Proxy TransfersInsertResource)
mempty
|
rueshyna/gogol
|
gogol-admin-datatransfer/gen/Network/Google/Resource/DataTransfer/Transfers/Insert.hs
|
mpl-2.0
| 2,806
| 0
| 13
| 643
| 308
| 189
| 119
| 50
| 1
|
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- |
-- Module : Network.AWS.CloudWatchLogs.CreateExportTask
-- Copyright : (c) 2013-2015 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Creates an 'ExportTask' which allows you to efficiently export data from
-- a Log Group to your Amazon S3 bucket.
--
-- This is an asynchronous call. If all the required information is
-- provided, this API will initiate an export task and respond with the
-- task Id. Once started, 'DescribeExportTasks' can be used to get the
-- status of an export task.
--
-- /See:/ <http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateExportTask.html AWS API Reference> for CreateExportTask.
module Network.AWS.CloudWatchLogs.CreateExportTask
(
-- * Creating a Request
createExportTask
, CreateExportTask
-- * Request Lenses
, cetDestinationPrefix
, cetTaskName
, cetLogStreamNamePrefix
, cetLogGroupName
, cetFrom
, cetTo
, cetDestination
-- * Destructuring the Response
, createExportTaskResponse
, CreateExportTaskResponse
-- * Response Lenses
, cetrsTaskId
, cetrsResponseStatus
) where
import Network.AWS.CloudWatchLogs.Types
import Network.AWS.CloudWatchLogs.Types.Product
import Network.AWS.Prelude
import Network.AWS.Request
import Network.AWS.Response
-- | /See:/ 'createExportTask' smart constructor.
data CreateExportTask = CreateExportTask'
{ _cetDestinationPrefix :: !(Maybe Text)
, _cetTaskName :: !(Maybe Text)
, _cetLogStreamNamePrefix :: !(Maybe Text)
, _cetLogGroupName :: !Text
, _cetFrom :: !Nat
, _cetTo :: !Nat
, _cetDestination :: !Text
} deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'CreateExportTask' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'cetDestinationPrefix'
--
-- * 'cetTaskName'
--
-- * 'cetLogStreamNamePrefix'
--
-- * 'cetLogGroupName'
--
-- * 'cetFrom'
--
-- * 'cetTo'
--
-- * 'cetDestination'
createExportTask
:: Text -- ^ 'cetLogGroupName'
-> Natural -- ^ 'cetFrom'
-> Natural -- ^ 'cetTo'
-> Text -- ^ 'cetDestination'
-> CreateExportTask
createExportTask pLogGroupName_ pFrom_ pTo_ pDestination_ =
CreateExportTask'
{ _cetDestinationPrefix = Nothing
, _cetTaskName = Nothing
, _cetLogStreamNamePrefix = Nothing
, _cetLogGroupName = pLogGroupName_
, _cetFrom = _Nat # pFrom_
, _cetTo = _Nat # pTo_
, _cetDestination = pDestination_
}
-- | Prefix that will be used as the start of Amazon S3 key for every object
-- exported. If not specified, this defaults to \'exportedlogs\'.
cetDestinationPrefix :: Lens' CreateExportTask (Maybe Text)
cetDestinationPrefix = lens _cetDestinationPrefix (\ s a -> s{_cetDestinationPrefix = a});
-- | The name of the export task.
cetTaskName :: Lens' CreateExportTask (Maybe Text)
cetTaskName = lens _cetTaskName (\ s a -> s{_cetTaskName = a});
-- | Will only export log streams that match the provided
-- logStreamNamePrefix. If you don\'t specify a value, no prefix filter is
-- applied.
cetLogStreamNamePrefix :: Lens' CreateExportTask (Maybe Text)
cetLogStreamNamePrefix = lens _cetLogStreamNamePrefix (\ s a -> s{_cetLogStreamNamePrefix = a});
-- | The name of the log group to export.
cetLogGroupName :: Lens' CreateExportTask Text
cetLogGroupName = lens _cetLogGroupName (\ s a -> s{_cetLogGroupName = a});
-- | A unix timestamp indicating the start time of the range for the request.
-- Events with a timestamp prior to this time will not be exported.
cetFrom :: Lens' CreateExportTask Natural
cetFrom = lens _cetFrom (\ s a -> s{_cetFrom = a}) . _Nat;
-- | A unix timestamp indicating the end time of the range for the request.
-- Events with a timestamp later than this time will not be exported.
cetTo :: Lens' CreateExportTask Natural
cetTo = lens _cetTo (\ s a -> s{_cetTo = a}) . _Nat;
-- | Name of Amazon S3 bucket to which the log data will be exported.
-- __NOTE: Only buckets in the same AWS region are supported__
cetDestination :: Lens' CreateExportTask Text
cetDestination = lens _cetDestination (\ s a -> s{_cetDestination = a});
instance AWSRequest CreateExportTask where
type Rs CreateExportTask = CreateExportTaskResponse
request = postJSON cloudWatchLogs
response
= receiveJSON
(\ s h x ->
CreateExportTaskResponse' <$>
(x .?> "taskId") <*> (pure (fromEnum s)))
instance ToHeaders CreateExportTask where
toHeaders
= const
(mconcat
["X-Amz-Target" =#
("Logs_20140328.CreateExportTask" :: ByteString),
"Content-Type" =#
("application/x-amz-json-1.1" :: ByteString)])
instance ToJSON CreateExportTask where
toJSON CreateExportTask'{..}
= object
(catMaybes
[("destinationPrefix" .=) <$> _cetDestinationPrefix,
("taskName" .=) <$> _cetTaskName,
("logStreamNamePrefix" .=) <$>
_cetLogStreamNamePrefix,
Just ("logGroupName" .= _cetLogGroupName),
Just ("from" .= _cetFrom), Just ("to" .= _cetTo),
Just ("destination" .= _cetDestination)])
instance ToPath CreateExportTask where
toPath = const "/"
instance ToQuery CreateExportTask where
toQuery = const mempty
-- | /See:/ 'createExportTaskResponse' smart constructor.
data CreateExportTaskResponse = CreateExportTaskResponse'
{ _cetrsTaskId :: !(Maybe Text)
, _cetrsResponseStatus :: !Int
} deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'CreateExportTaskResponse' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'cetrsTaskId'
--
-- * 'cetrsResponseStatus'
createExportTaskResponse
:: Int -- ^ 'cetrsResponseStatus'
-> CreateExportTaskResponse
createExportTaskResponse pResponseStatus_ =
CreateExportTaskResponse'
{ _cetrsTaskId = Nothing
, _cetrsResponseStatus = pResponseStatus_
}
-- | Id of the export task that got created.
cetrsTaskId :: Lens' CreateExportTaskResponse (Maybe Text)
cetrsTaskId = lens _cetrsTaskId (\ s a -> s{_cetrsTaskId = a});
-- | The response status code.
cetrsResponseStatus :: Lens' CreateExportTaskResponse Int
cetrsResponseStatus = lens _cetrsResponseStatus (\ s a -> s{_cetrsResponseStatus = a});
|
olorin/amazonka
|
amazonka-cloudwatch-logs/gen/Network/AWS/CloudWatchLogs/CreateExportTask.hs
|
mpl-2.0
| 7,197
| 0
| 13
| 1,618
| 1,086
| 646
| 440
| 130
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Games.Applications.GetEndPoint
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Returns a URL for the requested end point type.
--
-- /See:/ <https://developers.google.com/games/ Google Play Game Services Reference> for @games.applications.getEndPoint@.
module Network.Google.Resource.Games.Applications.GetEndPoint
(
-- * REST Resource
ApplicationsGetEndPointResource
-- * Creating a Request
, applicationsGetEndPoint
, ApplicationsGetEndPoint
-- * Request Lenses
, agepXgafv
, agepUploadProtocol
, agepAccessToken
, agepUploadType
, agepEndPointType
, agepApplicationId
, agepCallback
) where
import Network.Google.Games.Types
import Network.Google.Prelude
-- | A resource alias for @games.applications.getEndPoint@ method which the
-- 'ApplicationsGetEndPoint' request conforms to.
type ApplicationsGetEndPointResource =
"games" :>
"v1" :>
"applications" :>
"getEndPoint" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "endPointType"
ApplicationsGetEndPointEndPointType
:>
QueryParam "applicationId" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :> Post '[JSON] EndPoint
-- | Returns a URL for the requested end point type.
--
-- /See:/ 'applicationsGetEndPoint' smart constructor.
data ApplicationsGetEndPoint =
ApplicationsGetEndPoint'
{ _agepXgafv :: !(Maybe Xgafv)
, _agepUploadProtocol :: !(Maybe Text)
, _agepAccessToken :: !(Maybe Text)
, _agepUploadType :: !(Maybe Text)
, _agepEndPointType :: !(Maybe ApplicationsGetEndPointEndPointType)
, _agepApplicationId :: !(Maybe Text)
, _agepCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ApplicationsGetEndPoint' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'agepXgafv'
--
-- * 'agepUploadProtocol'
--
-- * 'agepAccessToken'
--
-- * 'agepUploadType'
--
-- * 'agepEndPointType'
--
-- * 'agepApplicationId'
--
-- * 'agepCallback'
applicationsGetEndPoint
:: ApplicationsGetEndPoint
applicationsGetEndPoint =
ApplicationsGetEndPoint'
{ _agepXgafv = Nothing
, _agepUploadProtocol = Nothing
, _agepAccessToken = Nothing
, _agepUploadType = Nothing
, _agepEndPointType = Nothing
, _agepApplicationId = Nothing
, _agepCallback = Nothing
}
-- | V1 error format.
agepXgafv :: Lens' ApplicationsGetEndPoint (Maybe Xgafv)
agepXgafv
= lens _agepXgafv (\ s a -> s{_agepXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
agepUploadProtocol :: Lens' ApplicationsGetEndPoint (Maybe Text)
agepUploadProtocol
= lens _agepUploadProtocol
(\ s a -> s{_agepUploadProtocol = a})
-- | OAuth access token.
agepAccessToken :: Lens' ApplicationsGetEndPoint (Maybe Text)
agepAccessToken
= lens _agepAccessToken
(\ s a -> s{_agepAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
agepUploadType :: Lens' ApplicationsGetEndPoint (Maybe Text)
agepUploadType
= lens _agepUploadType
(\ s a -> s{_agepUploadType = a})
-- | Type of endpoint being requested.
agepEndPointType :: Lens' ApplicationsGetEndPoint (Maybe ApplicationsGetEndPointEndPointType)
agepEndPointType
= lens _agepEndPointType
(\ s a -> s{_agepEndPointType = a})
-- | The application ID from the Google Play developer console.
agepApplicationId :: Lens' ApplicationsGetEndPoint (Maybe Text)
agepApplicationId
= lens _agepApplicationId
(\ s a -> s{_agepApplicationId = a})
-- | JSONP
agepCallback :: Lens' ApplicationsGetEndPoint (Maybe Text)
agepCallback
= lens _agepCallback (\ s a -> s{_agepCallback = a})
instance GoogleRequest ApplicationsGetEndPoint where
type Rs ApplicationsGetEndPoint = EndPoint
type Scopes ApplicationsGetEndPoint =
'["https://www.googleapis.com/auth/games"]
requestClient ApplicationsGetEndPoint'{..}
= go _agepXgafv _agepUploadProtocol _agepAccessToken
_agepUploadType
_agepEndPointType
_agepApplicationId
_agepCallback
(Just AltJSON)
gamesService
where go
= buildClient
(Proxy :: Proxy ApplicationsGetEndPointResource)
mempty
|
brendanhay/gogol
|
gogol-games/gen/Network/Google/Resource/Games/Applications/GetEndPoint.hs
|
mpl-2.0
| 5,393
| 0
| 19
| 1,263
| 790
| 458
| 332
| 118
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Analytics.Management.Filters.Delete
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Delete a filter.
--
-- /See:/ <https://developers.google.com/analytics/ Google Analytics API Reference> for @analytics.management.filters.delete@.
module Network.Google.Resource.Analytics.Management.Filters.Delete
(
-- * REST Resource
ManagementFiltersDeleteResource
-- * Creating a Request
, managementFiltersDelete
, ManagementFiltersDelete
-- * Request Lenses
, mfdFilterId
, mfdAccountId
) where
import Network.Google.Analytics.Types
import Network.Google.Prelude
-- | A resource alias for @analytics.management.filters.delete@ method which the
-- 'ManagementFiltersDelete' request conforms to.
type ManagementFiltersDeleteResource =
"analytics" :>
"v3" :>
"management" :>
"accounts" :>
Capture "accountId" Text :>
"filters" :>
Capture "filterId" Text :>
QueryParam "alt" AltJSON :> Delete '[JSON] Filter
-- | Delete a filter.
--
-- /See:/ 'managementFiltersDelete' smart constructor.
data ManagementFiltersDelete =
ManagementFiltersDelete'
{ _mfdFilterId :: !Text
, _mfdAccountId :: !Text
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ManagementFiltersDelete' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'mfdFilterId'
--
-- * 'mfdAccountId'
managementFiltersDelete
:: Text -- ^ 'mfdFilterId'
-> Text -- ^ 'mfdAccountId'
-> ManagementFiltersDelete
managementFiltersDelete pMfdFilterId_ pMfdAccountId_ =
ManagementFiltersDelete'
{_mfdFilterId = pMfdFilterId_, _mfdAccountId = pMfdAccountId_}
-- | ID of the filter to be deleted.
mfdFilterId :: Lens' ManagementFiltersDelete Text
mfdFilterId
= lens _mfdFilterId (\ s a -> s{_mfdFilterId = a})
-- | Account ID to delete the filter for.
mfdAccountId :: Lens' ManagementFiltersDelete Text
mfdAccountId
= lens _mfdAccountId (\ s a -> s{_mfdAccountId = a})
instance GoogleRequest ManagementFiltersDelete where
type Rs ManagementFiltersDelete = Filter
type Scopes ManagementFiltersDelete =
'["https://www.googleapis.com/auth/analytics.edit"]
requestClient ManagementFiltersDelete'{..}
= go _mfdAccountId _mfdFilterId (Just AltJSON)
analyticsService
where go
= buildClient
(Proxy :: Proxy ManagementFiltersDeleteResource)
mempty
|
brendanhay/gogol
|
gogol-analytics/gen/Network/Google/Resource/Analytics/Management/Filters/Delete.hs
|
mpl-2.0
| 3,293
| 0
| 15
| 723
| 386
| 232
| 154
| 63
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Spanner.Projects.Instances.Backups.Operations.Delete
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Deletes a long-running operation. This method indicates that the client
-- is no longer interested in the operation result. It does not cancel the
-- operation. If the server doesn\'t support this method, it returns
-- \`google.rpc.Code.UNIMPLEMENTED\`.
--
-- /See:/ <https://cloud.google.com/spanner/ Cloud Spanner API Reference> for @spanner.projects.instances.backups.operations.delete@.
module Network.Google.Resource.Spanner.Projects.Instances.Backups.Operations.Delete
(
-- * REST Resource
ProjectsInstancesBackupsOperationsDeleteResource
-- * Creating a Request
, projectsInstancesBackupsOperationsDelete
, ProjectsInstancesBackupsOperationsDelete
-- * Request Lenses
, pibodXgafv
, pibodUploadProtocol
, pibodAccessToken
, pibodUploadType
, pibodName
, pibodCallback
) where
import Network.Google.Prelude
import Network.Google.Spanner.Types
-- | A resource alias for @spanner.projects.instances.backups.operations.delete@ method which the
-- 'ProjectsInstancesBackupsOperationsDelete' request conforms to.
type ProjectsInstancesBackupsOperationsDeleteResource
=
"v1" :>
Capture "name" Text :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :> Delete '[JSON] Empty
-- | Deletes a long-running operation. This method indicates that the client
-- is no longer interested in the operation result. It does not cancel the
-- operation. If the server doesn\'t support this method, it returns
-- \`google.rpc.Code.UNIMPLEMENTED\`.
--
-- /See:/ 'projectsInstancesBackupsOperationsDelete' smart constructor.
data ProjectsInstancesBackupsOperationsDelete =
ProjectsInstancesBackupsOperationsDelete'
{ _pibodXgafv :: !(Maybe Xgafv)
, _pibodUploadProtocol :: !(Maybe Text)
, _pibodAccessToken :: !(Maybe Text)
, _pibodUploadType :: !(Maybe Text)
, _pibodName :: !Text
, _pibodCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ProjectsInstancesBackupsOperationsDelete' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'pibodXgafv'
--
-- * 'pibodUploadProtocol'
--
-- * 'pibodAccessToken'
--
-- * 'pibodUploadType'
--
-- * 'pibodName'
--
-- * 'pibodCallback'
projectsInstancesBackupsOperationsDelete
:: Text -- ^ 'pibodName'
-> ProjectsInstancesBackupsOperationsDelete
projectsInstancesBackupsOperationsDelete pPibodName_ =
ProjectsInstancesBackupsOperationsDelete'
{ _pibodXgafv = Nothing
, _pibodUploadProtocol = Nothing
, _pibodAccessToken = Nothing
, _pibodUploadType = Nothing
, _pibodName = pPibodName_
, _pibodCallback = Nothing
}
-- | V1 error format.
pibodXgafv :: Lens' ProjectsInstancesBackupsOperationsDelete (Maybe Xgafv)
pibodXgafv
= lens _pibodXgafv (\ s a -> s{_pibodXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
pibodUploadProtocol :: Lens' ProjectsInstancesBackupsOperationsDelete (Maybe Text)
pibodUploadProtocol
= lens _pibodUploadProtocol
(\ s a -> s{_pibodUploadProtocol = a})
-- | OAuth access token.
pibodAccessToken :: Lens' ProjectsInstancesBackupsOperationsDelete (Maybe Text)
pibodAccessToken
= lens _pibodAccessToken
(\ s a -> s{_pibodAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
pibodUploadType :: Lens' ProjectsInstancesBackupsOperationsDelete (Maybe Text)
pibodUploadType
= lens _pibodUploadType
(\ s a -> s{_pibodUploadType = a})
-- | The name of the operation resource to be deleted.
pibodName :: Lens' ProjectsInstancesBackupsOperationsDelete Text
pibodName
= lens _pibodName (\ s a -> s{_pibodName = a})
-- | JSONP
pibodCallback :: Lens' ProjectsInstancesBackupsOperationsDelete (Maybe Text)
pibodCallback
= lens _pibodCallback
(\ s a -> s{_pibodCallback = a})
instance GoogleRequest
ProjectsInstancesBackupsOperationsDelete
where
type Rs ProjectsInstancesBackupsOperationsDelete =
Empty
type Scopes ProjectsInstancesBackupsOperationsDelete
=
'["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.admin"]
requestClient
ProjectsInstancesBackupsOperationsDelete'{..}
= go _pibodName _pibodXgafv _pibodUploadProtocol
_pibodAccessToken
_pibodUploadType
_pibodCallback
(Just AltJSON)
spannerService
where go
= buildClient
(Proxy ::
Proxy
ProjectsInstancesBackupsOperationsDeleteResource)
mempty
|
brendanhay/gogol
|
gogol-spanner/gen/Network/Google/Resource/Spanner/Projects/Instances/Backups/Operations/Delete.hs
|
mpl-2.0
| 5,796
| 0
| 15
| 1,228
| 706
| 416
| 290
| 109
| 1
|
{-# OPTIONS_GHC -F -pgmF ./scripts/local-htfpp #-}
{-# LANGUAGE CPP #-}
module Foo.A where
import Test.Framework
#include "test.h"
test_a_FAIL =
assertEqual x y
|
skogsbaer/HTF
|
tests/Foo/A.hs
|
lgpl-2.1
| 169
| 0
| 5
| 30
| 24
| 15
| 9
| 6
| 1
|
{- nbd - A Haskell library to implement NBD servers
-
- Copyright (C) 2012 Nicolas Trangez
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-}
{-# LANGUAGE FlexibleContexts #-}
module Data.Conduit.Network.Basic (
Application
, ApplicationBuilder
, ServerSettings(..)
, runServer
) where
import Control.Concurrent (forkIO)
import Control.Exception.Base (bracket, bracketOnError, finally)
import Control.Monad (forever, void)
import Control.Monad.IO.Class (MonadIO, liftIO)
import Control.Monad.Trans.Control (control)
import Control.Monad.Trans.Resource (MonadBaseControl)
import Network.Socket (Socket)
import qualified Network.Socket as NS
import Data.Conduit.Network hiding (Application, ServerSettings)
data ServerSettings = TCPServerSettings Int HostPreference
| UNIXServerSettings FilePath
deriving (Show, Eq)
type Application m = m ()
type ApplicationBuilder m = ((Socket, NS.SockAddr) -> m (Application m))
runServer :: (MonadBaseControl IO m, MonadIO m) => ServerSettings
-> ApplicationBuilder m
-> m ()
runServer settings mkApp = control $ \run -> bracket
(liftIO doBind)
(liftIO . NS.sClose)
(run . forever . serve)
where
serve lsocket = do
s@(socket, _) <- liftIO $ acceptSafe lsocket
app <- mkApp s
let app' run = void $ run app
appClose run = app' run `finally` NS.sClose socket
control $ \run -> forkIO (appClose run) >> run (return ())
doBind = case settings of
TCPServerSettings port host -> bindPort port host
UNIXServerSettings path -> bindUNIX path
bindUNIX path = do
sock <- bracketOnError
(NS.socket NS.AF_UNIX NS.Stream 0)
NS.close
(\sock -> do
NS.bindSocket sock (NS.SockAddrUnix path)
return sock
)
NS.listen sock (max 2048 NS.maxListenQueue)
return sock
|
NicolasT/nbd
|
src/Data/Conduit/Network/Basic.hs
|
lgpl-2.1
| 2,722
| 0
| 18
| 715
| 555
| 298
| 257
| 45
| 2
|
module Network.Haskoin.Node.Units (tests) where
import Test.HUnit (Assertion, assertBool)
import Test.Framework (Test, testGroup)
import Test.Framework.Providers.HUnit (testCase)
import Data.Maybe (fromJust)
import Network.Haskoin.Crypto
import Network.Haskoin.Node.Bloom
import Network.Haskoin.Util
tests :: [Test]
tests =
[
-- Test cases come from bitcoind /src/test/bloom_tests.cpp
testGroup "Bloom Filters"
[ testCase "Bloom Filter Vector 1" bloomFilter1
, testCase "Bloom Filter Vector 2" bloomFilter2
, testCase "Bloom Filter Vector 3" bloomFilter3
]
]
bloomFilter1 :: Assertion
bloomFilter1 = do
assertBool "Bloom filter doesn't contain vector 1" $ bloomContains f1 v1
assertBool "Bloom filter contains something it should not" $
not $ bloomContains f1 v2
assertBool "Bloom filter doesn't contain vector 3" $ bloomContains f3 v3
assertBool "Bloom filter doesn't contain vector 4" $ bloomContains f4 v4
assertBool "Bloom filter serialization is incorrect" $ (encode' f4) == bs
where
f0 = bloomCreate 3 0.01 0 BloomUpdateAll
f1 = bloomInsert f0 v1
f3 = bloomInsert f1 v3
f4 = bloomInsert f3 v4
v1 = fromJust $ hexToBS "99108ad8ed9bb6274d3980bab5a85c048f0950c8"
v2 = fromJust $ hexToBS "19108ad8ed9bb6274d3980bab5a85c048f0950c8"
v3 = fromJust $ hexToBS "b5a2c786d9ef4658287ced5914b37a1b4aa32eee"
v4 = fromJust $ hexToBS "b9300670b4c5366e95b2699e8b18bc75e5f729c5"
bs = fromJust $ hexToBS "03614e9b050000000000000001"
bloomFilter2 :: Assertion
bloomFilter2 = do
assertBool "Bloom filter doesn't contain vector 1" $ bloomContains f1 v1
assertBool "Bloom filter contains something it should not" $
not $ bloomContains f1 v2
assertBool "Bloom filter doesn't contain vector 3" $ bloomContains f3 v3
assertBool "Bloom filter doesn't contain vector 4" $ bloomContains f4 v4
assertBool "Bloom filter serialization is incorrect" $ (encode' f4) == bs
where
f0 = bloomCreate 3 0.01 2147483649 BloomUpdateAll
f1 = bloomInsert f0 v1
f3 = bloomInsert f1 v3
f4 = bloomInsert f3 v4
v1 = fromJust $ hexToBS "99108ad8ed9bb6274d3980bab5a85c048f0950c8"
v2 = fromJust $ hexToBS "19108ad8ed9bb6274d3980bab5a85c048f0950c8"
v3 = fromJust $ hexToBS "b5a2c786d9ef4658287ced5914b37a1b4aa32eee"
v4 = fromJust $ hexToBS "b9300670b4c5366e95b2699e8b18bc75e5f729c5"
bs = fromJust $ hexToBS "03ce4299050000000100008001"
bloomFilter3 :: Assertion
bloomFilter3 = do
assertBool "Bloom filter serialization is incorrect" $ (encode' f2) == bs
where
f0 = bloomCreate 2 0.001 0 BloomUpdateAll
f1 = bloomInsert f0 $ encode' p
f2 = bloomInsert f1 $ encode' $ getAddrHash $ pubKeyAddr p
k = fromJust $ fromWIF "5Kg1gnAjaLfKiwhhPpGS3QfRg2m6awQvaj98JCZBZQ5SuS2F15C"
p = derivePubKey k
bs = fromJust $ hexToBS "038fc16b080000000000000001"
|
nuttycom/haskoin
|
tests/Network/Haskoin/Node/Units.hs
|
unlicense
| 2,911
| 0
| 10
| 571
| 642
| 322
| 320
| 58
| 1
|
module SSync.Chunk (
Chunk(..)
) where
import qualified Data.ByteString.Lazy as BSL
import Data.Word(Word32)
data Chunk = Block Word32
| Data BSL.ByteString
deriving (Show)
|
socrata-platform/ssync
|
src/main/haskell/SSync/Chunk.hs
|
apache-2.0
| 201
| 0
| 7
| 51
| 57
| 36
| 21
| 7
| 0
|
quicksort :: (Ord a) => [a] -> [a]
quicksort [] = []
quicksort (x:xs) = smallerSorted ++ [x] ++ biggerSorted
where smallerSorted = quicksort (filter (<=x) xs)
biggerSorted = quicksort (filter (>x) xs)
print (quicksort [10, 0, 0, -1, -2, 4, 4, 7, 4, 1, 100, -1000])
|
gixxi/comparareetpendere
|
test/resources/Quicksort.hs
|
bsd-2-clause
| 267
| 1
| 11
| 49
| 162
| 89
| 73
| -1
| -1
|
import Control.Monad
import Control.Monad.State
import qualified Data.Sequence as Seq
data Node a = Node a (Node a) deriving (Read)
instance Show a => Show (Node a) where
show (Node a (Node b _)) = "node: " ++ show a ++ " -> " ++ show b
instance Eq a => Eq (Node a) where
Node a _ == Node b _ = a == b
next :: Node a -> Node a
next (Node _ n) = n
loopSize :: Eq a => Node a -> Int
loopSize n0 =
let (out, nn:_) = evalState (breakM loopState (iterate next n0)) Seq.empty
len1 = length (takeWhile (/=nn) out)
in length out - len1
loopState :: Eq a => Node a -> State (Seq.Seq (Node a)) Bool
loopState n = do
st <- get
if n `elem` st
then return True
else put (n Seq.<| st) >> return False
breakM :: Monad m => (a -> m Bool) -> [a] -> m ([a], [a])
breakM p (x:xs) = do
b <- p x
if b
then return ([], x:xs)
else do
(left, right) <- breakM p xs
return (x : left, right)
breakM p [] = return ([], [])
node1 = Node 1 node2
node2 = Node 2 node3
node3 = Node 3 node4
node4 = Node 4 node5
node5 = Node 5 node6
node6 = Node 6 node7
node7 = Node 7 node8
node8 = Node 8 node9
node9 = Node 9 node10
node10 = Node 10 node11
node11 = Node 11 node12
node12 = Node 12 node13
node13 = Node 13 node14
node14 = Node 14 node4
ns = [node1, node2, node3, node4]
|
niexshao/Exercises
|
src/Loop.hs
|
bsd-3-clause
| 1,299
| 0
| 13
| 334
| 693
| 351
| 342
| 45
| 2
|
module Arhelk.Lexer.Token(
Token(..)
) where
import Data.Monoid
import Data.Text as T
import TextShow
-- | Kinds of tokens in generic lexer
-- Some tokens could be not used in particular languages.
data Token a =
-- | Sequence on non space and non punctuational symbols
Word Text
-- | End of sentence. Example '.'
| EndSentence
-- | Question sign, could also mark end of sentence. Example '?'
| QuestionMark
-- | Exclamation sign, could also mark end of sentence. Example '!'
| ExclamationMark
-- | Comma sign. Example ','
| Comma
-- | Semicolon sign. Example ';'
| Semicolon
-- | Citation sign. Example ':'
| Citation
-- | Dash sign. Example '—'
| Dash
-- | Quotation region. Example '‘’'
| Quotation [Token a]
-- | Tokens that are very specific for language
| ExtToken a
deriving (Eq)
instance Show a => Show (Token a) where
show tok = case tok of
Word t -> "Word \"" <> T.unpack t <> "\""
EndSentence -> "EndSentence"
QuestionMark -> "QuestionMark"
ExclamationMark -> "ExclamationMark"
Comma -> "Comma"
Semicolon -> "Semicolon"
Citation -> "Citation"
Dash -> "Dash"
Quotation t -> "Quotation " <> show t
ExtToken a -> show a
instance TextShow a => TextShow (Token a) where
showb tok = case tok of
Word t -> "W" <> showbSpace <> fromText t
EndSentence -> "ES"
QuestionMark -> "Q"
ExclamationMark -> "EX"
Comma -> "CO"
Semicolon -> "S"
Citation -> "CI"
Dash -> "DA"
Quotation t -> "QS\n" <> unlinesB (showb <$> t) <> "QE"
ExtToken a -> showb a
|
Teaspot-Studio/arhelk-lexer
|
src/Arhelk/Lexer/Token.hs
|
bsd-3-clause
| 1,595
| 0
| 13
| 402
| 356
| 189
| 167
| 41
| 0
|
{-
(c) The University of Glasgow, 2006
\section[HscTypes]{Types for the per-module compiler}
-}
{-# LANGUAGE CPP, ScopedTypeVariables #-}
-- | Types for the per-module compiler
module HscTypes (
-- * compilation state
HscEnv(..), hscEPS,
FinderCache, FindResult(..), InstalledFindResult(..),
Target(..), TargetId(..), pprTarget, pprTargetId,
ModuleGraph, emptyMG,
HscStatus(..),
#ifdef GHCI
IServ(..),
#endif
-- * Hsc monad
Hsc(..), runHsc, runInteractiveHsc,
-- * Information about modules
ModDetails(..), emptyModDetails,
ModGuts(..), CgGuts(..), ForeignStubs(..), appendStubC,
ImportedMods, ImportedModsVal(..),
ModSummary(..), ms_imps, ms_installed_mod, ms_mod_name, showModMsg, isBootSummary,
msHsFilePath, msHiFilePath, msObjFilePath,
SourceModified(..),
-- * Information about the module being compiled
-- (re-exported from DriverPhases)
HscSource(..), isHsBootOrSig, hscSourceString,
-- * State relating to modules in this package
HomePackageTable, HomeModInfo(..), emptyHomePackageTable,
lookupHpt, eltsHpt, filterHpt, allHpt, mapHpt, delFromHpt,
addToHpt, addListToHpt, lookupHptDirectly, listToHpt,
hptInstances, hptRules, hptVectInfo, pprHPT,
hptObjs,
-- * State relating to known packages
ExternalPackageState(..), EpsStats(..), addEpsInStats,
PackageTypeEnv, PackageIfaceTable, emptyPackageIfaceTable,
lookupIfaceByModule, emptyModIface, lookupHptByModule,
PackageInstEnv, PackageFamInstEnv, PackageRuleBase,
mkSOName, mkHsSOName, soExt,
-- * Metaprogramming
MetaRequest(..),
MetaResult, -- data constructors not exported to ensure correct response type
metaRequestE, metaRequestP, metaRequestT, metaRequestD, metaRequestAW,
MetaHook,
-- * Annotations
prepareAnnotations,
-- * Interactive context
InteractiveContext(..), emptyInteractiveContext,
icPrintUnqual, icInScopeTTs, icExtendGblRdrEnv,
extendInteractiveContext, extendInteractiveContextWithIds,
substInteractiveContext,
setInteractivePrintName, icInteractiveModule,
InteractiveImport(..), setInteractivePackage,
mkPrintUnqualified, pprModulePrefix,
mkQualPackage, mkQualModule, pkgQual,
-- * Interfaces
ModIface(..), mkIfaceWarnCache, mkIfaceHashCache, mkIfaceFixCache,
emptyIfaceWarnCache, mi_boot, mi_fix,
mi_semantic_module,
mi_free_holes,
renameFreeHoles,
-- * Fixity
FixityEnv, FixItem(..), lookupFixity, emptyFixityEnv,
-- * TyThings and type environments
TyThing(..), tyThingAvailInfo,
tyThingTyCon, tyThingDataCon,
tyThingId, tyThingCoAxiom, tyThingParent_maybe, tyThingsTyCoVars,
implicitTyThings, implicitTyConThings, implicitClassThings,
isImplicitTyThing,
TypeEnv, lookupType, lookupTypeHscEnv, mkTypeEnv, emptyTypeEnv,
typeEnvFromEntities, mkTypeEnvWithImplicits,
extendTypeEnv, extendTypeEnvList,
extendTypeEnvWithIds, plusTypeEnv,
lookupTypeEnv,
typeEnvElts, typeEnvTyCons, typeEnvIds, typeEnvPatSyns,
typeEnvDataCons, typeEnvCoAxioms, typeEnvClasses,
-- * MonadThings
MonadThings(..),
-- * Information on imports and exports
WhetherHasOrphans, IsBootInterface, Usage(..),
Dependencies(..), noDependencies,
updNameCacheIO,
IfaceExport,
-- * Warnings
Warnings(..), WarningTxt(..), plusWarns,
-- * Linker stuff
Linkable(..), isObjectLinkable, linkableObjs,
Unlinked(..), CompiledByteCode,
isObject, nameOfObject, isInterpretable, byteCodeOfObject,
-- * Program coverage
HpcInfo(..), emptyHpcInfo, isHpcUsed, AnyHpcUsage,
-- * Breakpoints
ModBreaks (..), emptyModBreaks,
-- * Vectorisation information
VectInfo(..), IfaceVectInfo(..), noVectInfo, plusVectInfo,
noIfaceVectInfo, isNoIfaceVectInfo,
-- * Safe Haskell information
IfaceTrustInfo, getSafeMode, setSafeMode, noIfaceTrustInfo,
trustInfoToNum, numToTrustInfo, IsSafeImport,
-- * result of the parser
HsParsedModule(..),
-- * Compilation errors and warnings
SourceError, GhcApiError, mkSrcErr, srcErrorMessages, mkApiErr,
throwOneError, handleSourceError,
handleFlagWarnings, printOrThrowWarnings,
) where
#include "HsVersions.h"
#ifdef GHCI
import ByteCodeTypes
import InteractiveEvalTypes ( Resume )
import GHCi.Message ( Pipe )
import GHCi.RemoteTypes
#endif
import UniqFM
import HsSyn
import RdrName
import Avail
import Module
import InstEnv ( InstEnv, ClsInst, identicalClsInstHead )
import FamInstEnv
import CoreSyn ( CoreProgram, RuleBase, CoreRule, CoreVect )
import Name
import NameEnv
import NameSet
import VarEnv
import VarSet
import Var
import Id
import IdInfo ( IdDetails(..), RecSelParent(..))
import Type
import ApiAnnotation ( ApiAnns )
import Annotations ( Annotation, AnnEnv, mkAnnEnv, plusAnnEnv )
import Class
import TyCon
import CoAxiom
import ConLike
import DataCon
import PatSyn
import PrelNames ( gHC_PRIM, ioTyConName, printName, mkInteractiveModule
, eqTyConName )
import TysWiredIn
import Packages hiding ( Version(..) )
import DynFlags
import DriverPhases ( Phase, HscSource(..), isHsBootOrSig, hscSourceString )
import BasicTypes
import IfaceSyn
import Maybes
import Outputable
import SrcLoc
import Unique
import UniqDFM
import FastString
import StringBuffer ( StringBuffer )
import Fingerprint
import MonadUtils
import Bag
import Binary
import ErrUtils
import NameCache
import Platform
import Util
import UniqDSet
import GHC.Serialized ( Serialized )
import Foreign
import Control.Monad ( guard, liftM, when, ap )
import Data.IORef
import Data.Time
import Exception
import System.FilePath
#ifdef GHCI
import Control.Concurrent
import System.Process ( ProcessHandle )
#endif
-- -----------------------------------------------------------------------------
-- Compilation state
-- -----------------------------------------------------------------------------
-- | Status of a compilation to hard-code
data HscStatus
= HscNotGeneratingCode
| HscUpToDate
| HscUpdateBoot
| HscUpdateSig
| HscRecomp CgGuts ModSummary
-- -----------------------------------------------------------------------------
-- The Hsc monad: Passing an environment and warning state
newtype Hsc a = Hsc (HscEnv -> WarningMessages -> IO (a, WarningMessages))
instance Functor Hsc where
fmap = liftM
instance Applicative Hsc where
pure a = Hsc $ \_ w -> return (a, w)
(<*>) = ap
instance Monad Hsc where
Hsc m >>= k = Hsc $ \e w -> do (a, w1) <- m e w
case k a of
Hsc k' -> k' e w1
instance MonadIO Hsc where
liftIO io = Hsc $ \_ w -> do a <- io; return (a, w)
instance HasDynFlags Hsc where
getDynFlags = Hsc $ \e w -> return (hsc_dflags e, w)
runHsc :: HscEnv -> Hsc a -> IO a
runHsc hsc_env (Hsc hsc) = do
(a, w) <- hsc hsc_env emptyBag
printOrThrowWarnings (hsc_dflags hsc_env) w
return a
runInteractiveHsc :: HscEnv -> Hsc a -> IO a
-- A variant of runHsc that switches in the DynFlags from the
-- InteractiveContext before running the Hsc computation.
runInteractiveHsc hsc_env
= runHsc (hsc_env { hsc_dflags = interactive_dflags })
where
interactive_dflags = ic_dflags (hsc_IC hsc_env)
-- -----------------------------------------------------------------------------
-- Source Errors
-- When the compiler (HscMain) discovers errors, it throws an
-- exception in the IO monad.
mkSrcErr :: ErrorMessages -> SourceError
mkSrcErr = SourceError
srcErrorMessages :: SourceError -> ErrorMessages
srcErrorMessages (SourceError msgs) = msgs
mkApiErr :: DynFlags -> SDoc -> GhcApiError
mkApiErr dflags msg = GhcApiError (showSDoc dflags msg)
throwOneError :: MonadIO m => ErrMsg -> m ab
throwOneError err = liftIO $ throwIO $ mkSrcErr $ unitBag err
-- | A source error is an error that is caused by one or more errors in the
-- source code. A 'SourceError' is thrown by many functions in the
-- compilation pipeline. Inside GHC these errors are merely printed via
-- 'log_action', but API clients may treat them differently, for example,
-- insert them into a list box. If you want the default behaviour, use the
-- idiom:
--
-- > handleSourceError printExceptionAndWarnings $ do
-- > ... api calls that may fail ...
--
-- The 'SourceError's error messages can be accessed via 'srcErrorMessages'.
-- This list may be empty if the compiler failed due to @-Werror@
-- ('Opt_WarnIsError').
--
-- See 'printExceptionAndWarnings' for more information on what to take care
-- of when writing a custom error handler.
newtype SourceError = SourceError ErrorMessages
instance Show SourceError where
show (SourceError msgs) = unlines . map show . bagToList $ msgs
instance Exception SourceError
-- | Perform the given action and call the exception handler if the action
-- throws a 'SourceError'. See 'SourceError' for more information.
handleSourceError :: (ExceptionMonad m) =>
(SourceError -> m a) -- ^ exception handler
-> m a -- ^ action to perform
-> m a
handleSourceError handler act =
gcatch act (\(e :: SourceError) -> handler e)
-- | An error thrown if the GHC API is used in an incorrect fashion.
newtype GhcApiError = GhcApiError String
instance Show GhcApiError where
show (GhcApiError msg) = msg
instance Exception GhcApiError
-- | Given a bag of warnings, turn them into an exception if
-- -Werror is enabled, or print them out otherwise.
printOrThrowWarnings :: DynFlags -> Bag WarnMsg -> IO ()
printOrThrowWarnings dflags warns
| anyBag (isWarnMsgFatal dflags) warns
= throwIO $ mkSrcErr $ warns `snocBag` warnIsErrorMsg dflags
| otherwise
= printBagOfErrors dflags warns
handleFlagWarnings :: DynFlags -> [Located String] -> IO ()
handleFlagWarnings dflags warns
= when (wopt Opt_WarnDeprecatedFlags dflags) $ do
-- It would be nicer if warns :: [Located MsgDoc], but that
-- has circular import problems.
let bag = listToBag [ mkPlainWarnMsg dflags loc (text warn)
| L loc warn <- warns ]
printOrThrowWarnings dflags bag
{-
************************************************************************
* *
\subsection{HscEnv}
* *
************************************************************************
-}
-- | HscEnv is like 'Session', except that some of the fields are immutable.
-- An HscEnv is used to compile a single module from plain Haskell source
-- code (after preprocessing) to either C, assembly or C--. Things like
-- the module graph don't change during a single compilation.
--
-- Historical note: \"hsc\" used to be the name of the compiler binary,
-- when there was a separate driver and compiler. To compile a single
-- module, the driver would invoke hsc on the source code... so nowadays
-- we think of hsc as the layer of the compiler that deals with compiling
-- a single module.
data HscEnv
= HscEnv {
hsc_dflags :: DynFlags,
-- ^ The dynamic flag settings
hsc_targets :: [Target],
-- ^ The targets (or roots) of the current session
hsc_mod_graph :: ModuleGraph,
-- ^ The module graph of the current session
hsc_IC :: InteractiveContext,
-- ^ The context for evaluating interactive statements
hsc_HPT :: HomePackageTable,
-- ^ The home package table describes already-compiled
-- home-package modules, /excluding/ the module we
-- are compiling right now.
-- (In one-shot mode the current module is the only
-- home-package module, so hsc_HPT is empty. All other
-- modules count as \"external-package\" modules.
-- However, even in GHCi mode, hi-boot interfaces are
-- demand-loaded into the external-package table.)
--
-- 'hsc_HPT' is not mutable because we only demand-load
-- external packages; the home package is eagerly
-- loaded, module by module, by the compilation manager.
--
-- The HPT may contain modules compiled earlier by @--make@
-- but not actually below the current module in the dependency
-- graph.
--
-- (This changes a previous invariant: changed Jan 05.)
hsc_EPS :: {-# UNPACK #-} !(IORef ExternalPackageState),
-- ^ Information about the currently loaded external packages.
-- This is mutable because packages will be demand-loaded during
-- a compilation run as required.
hsc_NC :: {-# UNPACK #-} !(IORef NameCache),
-- ^ As with 'hsc_EPS', this is side-effected by compiling to
-- reflect sucking in interface files. They cache the state of
-- external interface files, in effect.
hsc_FC :: {-# UNPACK #-} !(IORef FinderCache),
-- ^ The cached result of performing finding in the file system
hsc_type_env_var :: Maybe (Module, IORef TypeEnv)
-- ^ Used for one-shot compilation only, to initialise
-- the 'IfGblEnv'. See 'TcRnTypes.tcg_type_env_var' for
-- 'TcRnTypes.TcGblEnv'. See also Note [hsc_type_env_var hack]
#ifdef GHCI
, hsc_iserv :: MVar (Maybe IServ)
-- ^ interactive server process. Created the first
-- time it is needed.
#endif
}
-- Note [hsc_type_env_var hack]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- hsc_type_env_var is used to initialize tcg_type_env_var, and
-- eventually it is the mutable variable that is queried from
-- if_rec_types to get a TypeEnv. So, clearly, it's something
-- related to knot-tying (see Note [Tying the knot]).
-- hsc_type_env_var is used in two places: initTcRn (where
-- it initializes tcg_type_env_var) and initIfaceCheck
-- (where it initializes if_rec_types).
--
-- But why do we need a way to feed a mutable variable in? Why
-- can't we just initialize tcg_type_env_var when we start
-- typechecking? The problem is we need to knot-tie the
-- EPS, and we may start adding things to the EPS before type
-- checking starts.
--
-- Here is a concrete example. Suppose we are running
-- "ghc -c A.hs", and we have this file system state:
--
-- A.hs-boot A.hi-boot **up to date**
-- B.hs B.hi **up to date**
-- A.hs A.hi **stale**
--
-- The first thing we do is run checkOldIface on A.hi.
-- checkOldIface will call loadInterface on B.hi so it can
-- get its hands on the fingerprints, to find out if A.hi
-- needs recompilation. But loadInterface also populates
-- the EPS! And so if compilation turns out to be necessary,
-- as it is in this case, the thunks we put into the EPS for
-- B.hi need to have the correct if_rec_types mutable variable
-- to query.
--
-- If the mutable variable is only allocated WHEN we start
-- typechecking, then that's too late: we can't get the
-- information to the thunks. So we need to pre-commit
-- to a type variable in 'hscIncrementalCompile' BEFORE we
-- check the old interface.
--
-- This is all a massive hack because arguably checkOldIface
-- should not populate the EPS. But that's a refactor for
-- another day.
#ifdef GHCI
data IServ = IServ
{ iservPipe :: Pipe
, iservProcess :: ProcessHandle
, iservLookupSymbolCache :: IORef (UniqFM (Ptr ()))
, iservPendingFrees :: [HValueRef]
}
#endif
-- | Retrieve the ExternalPackageState cache.
hscEPS :: HscEnv -> IO ExternalPackageState
hscEPS hsc_env = readIORef (hsc_EPS hsc_env)
-- | A compilation target.
--
-- A target may be supplied with the actual text of the
-- module. If so, use this instead of the file contents (this
-- is for use in an IDE where the file hasn't been saved by
-- the user yet).
data Target
= Target {
targetId :: TargetId, -- ^ module or filename
targetAllowObjCode :: Bool, -- ^ object code allowed?
targetContents :: Maybe (StringBuffer,UTCTime)
-- ^ in-memory text buffer?
}
data TargetId
= TargetModule ModuleName
-- ^ A module name: search for the file
| TargetFile FilePath (Maybe Phase)
-- ^ A filename: preprocess & parse it to find the module name.
-- If specified, the Phase indicates how to compile this file
-- (which phase to start from). Nothing indicates the starting phase
-- should be determined from the suffix of the filename.
deriving Eq
pprTarget :: Target -> SDoc
pprTarget (Target id obj _) =
(if obj then char '*' else empty) <> pprTargetId id
instance Outputable Target where
ppr = pprTarget
pprTargetId :: TargetId -> SDoc
pprTargetId (TargetModule m) = ppr m
pprTargetId (TargetFile f _) = text f
instance Outputable TargetId where
ppr = pprTargetId
{-
************************************************************************
* *
\subsection{Package and Module Tables}
* *
************************************************************************
-}
-- | Helps us find information about modules in the home package
type HomePackageTable = DModuleNameEnv HomeModInfo
-- Domain = modules in the home package that have been fully compiled
-- "home" unit id cached here for convenience
-- | Helps us find information about modules in the imported packages
type PackageIfaceTable = ModuleEnv ModIface
-- Domain = modules in the imported packages
-- | Constructs an empty HomePackageTable
emptyHomePackageTable :: HomePackageTable
emptyHomePackageTable = emptyUDFM
-- | Constructs an empty PackageIfaceTable
emptyPackageIfaceTable :: PackageIfaceTable
emptyPackageIfaceTable = emptyModuleEnv
pprHPT :: HomePackageTable -> SDoc
-- A bit aribitrary for now
pprHPT hpt = pprUDFM hpt $ \hms ->
vcat [ hang (ppr (mi_module (hm_iface hm)))
2 (ppr (md_types (hm_details hm)))
| hm <- hms ]
lookupHpt :: HomePackageTable -> ModuleName -> Maybe HomeModInfo
lookupHpt = lookupUDFM
lookupHptDirectly :: HomePackageTable -> Unique -> Maybe HomeModInfo
lookupHptDirectly = lookupUDFM_Directly
eltsHpt :: HomePackageTable -> [HomeModInfo]
eltsHpt = eltsUDFM
filterHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> HomePackageTable
filterHpt = filterUDFM
allHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> Bool
allHpt = allUDFM
mapHpt :: (HomeModInfo -> HomeModInfo) -> HomePackageTable -> HomePackageTable
mapHpt = mapUDFM
delFromHpt :: HomePackageTable -> ModuleName -> HomePackageTable
delFromHpt = delFromUDFM
addToHpt :: HomePackageTable -> ModuleName -> HomeModInfo -> HomePackageTable
addToHpt = addToUDFM
addListToHpt
:: HomePackageTable -> [(ModuleName, HomeModInfo)] -> HomePackageTable
addListToHpt = addListToUDFM
listToHpt :: [(ModuleName, HomeModInfo)] -> HomePackageTable
listToHpt = listToUDFM
lookupHptByModule :: HomePackageTable -> Module -> Maybe HomeModInfo
-- The HPT is indexed by ModuleName, not Module,
-- we must check for a hit on the right Module
lookupHptByModule hpt mod
= case lookupHpt hpt (moduleName mod) of
Just hm | mi_module (hm_iface hm) == mod -> Just hm
_otherwise -> Nothing
-- | Information about modules in the package being compiled
data HomeModInfo
= HomeModInfo {
hm_iface :: !ModIface,
-- ^ The basic loaded interface file: every loaded module has one of
-- these, even if it is imported from another package
hm_details :: !ModDetails,
-- ^ Extra information that has been created from the 'ModIface' for
-- the module, typically during typechecking
hm_linkable :: !(Maybe Linkable)
-- ^ The actual artifact we would like to link to access things in
-- this module.
--
-- 'hm_linkable' might be Nothing:
--
-- 1. If this is an .hs-boot module
--
-- 2. Temporarily during compilation if we pruned away
-- the old linkable because it was out of date.
--
-- After a complete compilation ('GHC.load'), all 'hm_linkable' fields
-- in the 'HomePackageTable' will be @Just@.
--
-- When re-linking a module ('HscMain.HscNoRecomp'), we construct the
-- 'HomeModInfo' by building a new 'ModDetails' from the old
-- 'ModIface' (only).
}
-- | Find the 'ModIface' for a 'Module', searching in both the loaded home
-- and external package module information
lookupIfaceByModule
:: DynFlags
-> HomePackageTable
-> PackageIfaceTable
-> Module
-> Maybe ModIface
lookupIfaceByModule _dflags hpt pit mod
= case lookupHptByModule hpt mod of
Just hm -> Just (hm_iface hm)
Nothing -> lookupModuleEnv pit mod
-- If the module does come from the home package, why do we look in the PIT as well?
-- (a) In OneShot mode, even home-package modules accumulate in the PIT
-- (b) Even in Batch (--make) mode, there is *one* case where a home-package
-- module is in the PIT, namely GHC.Prim when compiling the base package.
-- We could eliminate (b) if we wanted, by making GHC.Prim belong to a package
-- of its own, but it doesn't seem worth the bother.
-- | Find all the instance declarations (of classes and families) from
-- the Home Package Table filtered by the provided predicate function.
-- Used in @tcRnImports@, to select the instances that are in the
-- transitive closure of imports from the currently compiled module.
hptInstances :: HscEnv -> (ModuleName -> Bool) -> ([ClsInst], [FamInst])
hptInstances hsc_env want_this_module
= let (insts, famInsts) = unzip $ flip hptAllThings hsc_env $ \mod_info -> do
guard (want_this_module (moduleName (mi_module (hm_iface mod_info))))
let details = hm_details mod_info
return (md_insts details, md_fam_insts details)
in (concat insts, concat famInsts)
-- | Get the combined VectInfo of all modules in the home package table. In
-- contrast to instances and rules, we don't care whether the modules are
-- "below" us in the dependency sense. The VectInfo of those modules not "below"
-- us does not affect the compilation of the current module.
hptVectInfo :: HscEnv -> VectInfo
hptVectInfo = concatVectInfo . hptAllThings ((: []) . md_vect_info . hm_details)
-- | Get rules from modules "below" this one (in the dependency sense)
hptRules :: HscEnv -> [(ModuleName, IsBootInterface)] -> [CoreRule]
hptRules = hptSomeThingsBelowUs (md_rules . hm_details) False
-- | Get annotations from modules "below" this one (in the dependency sense)
hptAnns :: HscEnv -> Maybe [(ModuleName, IsBootInterface)] -> [Annotation]
hptAnns hsc_env (Just deps) = hptSomeThingsBelowUs (md_anns . hm_details) False hsc_env deps
hptAnns hsc_env Nothing = hptAllThings (md_anns . hm_details) hsc_env
hptAllThings :: (HomeModInfo -> [a]) -> HscEnv -> [a]
hptAllThings extract hsc_env = concatMap extract (eltsHpt (hsc_HPT hsc_env))
-- | Get things from modules "below" this one (in the dependency sense)
-- C.f Inst.hptInstances
hptSomeThingsBelowUs :: (HomeModInfo -> [a]) -> Bool -> HscEnv -> [(ModuleName, IsBootInterface)] -> [a]
hptSomeThingsBelowUs extract include_hi_boot hsc_env deps
| isOneShot (ghcMode (hsc_dflags hsc_env)) = []
| otherwise
= let hpt = hsc_HPT hsc_env
in
[ thing
| -- Find each non-hi-boot module below me
(mod, is_boot_mod) <- deps
, include_hi_boot || not is_boot_mod
-- unsavoury: when compiling the base package with --make, we
-- sometimes try to look up RULES etc for GHC.Prim. GHC.Prim won't
-- be in the HPT, because we never compile it; it's in the EPT
-- instead. ToDo: clean up, and remove this slightly bogus filter:
, mod /= moduleName gHC_PRIM
-- Look it up in the HPT
, let things = case lookupHpt hpt mod of
Just info -> extract info
Nothing -> pprTrace "WARNING in hptSomeThingsBelowUs" msg []
msg = vcat [text "missing module" <+> ppr mod,
text "Probable cause: out-of-date interface files"]
-- This really shouldn't happen, but see Trac #962
-- And get its dfuns
, thing <- things ]
hptObjs :: HomePackageTable -> [FilePath]
hptObjs hpt = concat (map (maybe [] linkableObjs . hm_linkable) (eltsHpt hpt))
{-
************************************************************************
* *
\subsection{Metaprogramming}
* *
************************************************************************
-}
-- | The supported metaprogramming result types
data MetaRequest
= MetaE (LHsExpr RdrName -> MetaResult)
| MetaP (LPat RdrName -> MetaResult)
| MetaT (LHsType RdrName -> MetaResult)
| MetaD ([LHsDecl RdrName] -> MetaResult)
| MetaAW (Serialized -> MetaResult)
-- | data constructors not exported to ensure correct result type
data MetaResult
= MetaResE { unMetaResE :: LHsExpr RdrName }
| MetaResP { unMetaResP :: LPat RdrName }
| MetaResT { unMetaResT :: LHsType RdrName }
| MetaResD { unMetaResD :: [LHsDecl RdrName] }
| MetaResAW { unMetaResAW :: Serialized }
type MetaHook f = MetaRequest -> LHsExpr Id -> f MetaResult
metaRequestE :: Functor f => MetaHook f -> LHsExpr Id -> f (LHsExpr RdrName)
metaRequestE h = fmap unMetaResE . h (MetaE MetaResE)
metaRequestP :: Functor f => MetaHook f -> LHsExpr Id -> f (LPat RdrName)
metaRequestP h = fmap unMetaResP . h (MetaP MetaResP)
metaRequestT :: Functor f => MetaHook f -> LHsExpr Id -> f (LHsType RdrName)
metaRequestT h = fmap unMetaResT . h (MetaT MetaResT)
metaRequestD :: Functor f => MetaHook f -> LHsExpr Id -> f [LHsDecl RdrName]
metaRequestD h = fmap unMetaResD . h (MetaD MetaResD)
metaRequestAW :: Functor f => MetaHook f -> LHsExpr Id -> f Serialized
metaRequestAW h = fmap unMetaResAW . h (MetaAW MetaResAW)
{-
************************************************************************
* *
\subsection{Dealing with Annotations}
* *
************************************************************************
-}
-- | Deal with gathering annotations in from all possible places
-- and combining them into a single 'AnnEnv'
prepareAnnotations :: HscEnv -> Maybe ModGuts -> IO AnnEnv
prepareAnnotations hsc_env mb_guts = do
eps <- hscEPS hsc_env
let -- Extract annotations from the module being compiled if supplied one
mb_this_module_anns = fmap (mkAnnEnv . mg_anns) mb_guts
-- Extract dependencies of the module if we are supplied one,
-- otherwise load annotations from all home package table
-- entries regardless of dependency ordering.
home_pkg_anns = (mkAnnEnv . hptAnns hsc_env) $ fmap (dep_mods . mg_deps) mb_guts
other_pkg_anns = eps_ann_env eps
ann_env = foldl1' plusAnnEnv $ catMaybes [mb_this_module_anns,
Just home_pkg_anns,
Just other_pkg_anns]
return ann_env
{-
************************************************************************
* *
\subsection{The Finder cache}
* *
************************************************************************
-}
-- | The 'FinderCache' maps modules to the result of
-- searching for that module. It records the results of searching for
-- modules along the search path. On @:load@, we flush the entire
-- contents of this cache.
--
type FinderCache = InstalledModuleEnv InstalledFindResult
data InstalledFindResult
= InstalledFound ModLocation InstalledModule
| InstalledNoPackage InstalledUnitId
| InstalledNotFound [FilePath] (Maybe InstalledUnitId)
-- | The result of searching for an imported module.
--
-- NB: FindResult manages both user source-import lookups
-- (which can result in 'Module') as well as direct imports
-- for interfaces (which always result in 'InstalledModule').
data FindResult
= Found ModLocation Module
-- ^ The module was found
| NoPackage UnitId
-- ^ The requested package was not found
| FoundMultiple [(Module, ModuleOrigin)]
-- ^ _Error_: both in multiple packages
-- | Not found
| NotFound
{ fr_paths :: [FilePath] -- Places where I looked
, fr_pkg :: Maybe UnitId -- Just p => module is in this package's
-- manifest, but couldn't find
-- the .hi file
, fr_mods_hidden :: [UnitId] -- Module is in these packages,
-- but the *module* is hidden
, fr_pkgs_hidden :: [UnitId] -- Module is in these packages,
-- but the *package* is hidden
, fr_suggestions :: [ModuleSuggestion] -- Possible mis-spelled modules
}
{-
************************************************************************
* *
\subsection{Symbol tables and Module details}
* *
************************************************************************
-}
-- | A 'ModIface' plus a 'ModDetails' summarises everything we know
-- about a compiled module. The 'ModIface' is the stuff *before* linking,
-- and can be written out to an interface file. The 'ModDetails is after
-- linking and can be completely recovered from just the 'ModIface'.
--
-- When we read an interface file, we also construct a 'ModIface' from it,
-- except that we explicitly make the 'mi_decls' and a few other fields empty;
-- as when reading we consolidate the declarations etc. into a number of indexed
-- maps and environments in the 'ExternalPackageState'.
data ModIface
= ModIface {
mi_module :: !Module, -- ^ Name of the module we are for
mi_sig_of :: !(Maybe Module), -- ^ Are we a sig of another mod?
mi_iface_hash :: !Fingerprint, -- ^ Hash of the whole interface
mi_mod_hash :: !Fingerprint, -- ^ Hash of the ABI only
mi_flag_hash :: !Fingerprint, -- ^ Hash of the important flags
-- used when compiling this module
mi_orphan :: !WhetherHasOrphans, -- ^ Whether this module has orphans
mi_finsts :: !WhetherHasFamInst, -- ^ Whether this module has family instances
mi_hsc_src :: !HscSource, -- ^ Boot? Signature?
mi_deps :: Dependencies,
-- ^ The dependencies of the module. This is
-- consulted for directly-imported modules, but not
-- for anything else (hence lazy)
mi_usages :: [Usage],
-- ^ Usages; kept sorted so that it's easy to decide
-- whether to write a new iface file (changing usages
-- doesn't affect the hash of this module)
-- NOT STRICT! we read this field lazily from the interface file
-- It is *only* consulted by the recompilation checker
mi_exports :: ![IfaceExport],
-- ^ Exports
-- Kept sorted by (mod,occ), to make version comparisons easier
-- Records the modules that are the declaration points for things
-- exported by this module, and the 'OccName's of those things
mi_exp_hash :: !Fingerprint,
-- ^ Hash of export list
mi_used_th :: !Bool,
-- ^ Module required TH splices when it was compiled.
-- This disables recompilation avoidance (see #481).
mi_fixities :: [(OccName,Fixity)],
-- ^ Fixities
-- NOT STRICT! we read this field lazily from the interface file
mi_warns :: Warnings,
-- ^ Warnings
-- NOT STRICT! we read this field lazily from the interface file
mi_anns :: [IfaceAnnotation],
-- ^ Annotations
-- NOT STRICT! we read this field lazily from the interface file
mi_decls :: [(Fingerprint,IfaceDecl)],
-- ^ Type, class and variable declarations
-- The hash of an Id changes if its fixity or deprecations change
-- (as well as its type of course)
-- Ditto data constructors, class operations, except that
-- the hash of the parent class/tycon changes
mi_globals :: !(Maybe GlobalRdrEnv),
-- ^ Binds all the things defined at the top level in
-- the /original source/ code for this module. which
-- is NOT the same as mi_exports, nor mi_decls (which
-- may contains declarations for things not actually
-- defined by the user). Used for GHCi and for inspecting
-- the contents of modules via the GHC API only.
--
-- (We need the source file to figure out the
-- top-level environment, if we didn't compile this module
-- from source then this field contains @Nothing@).
--
-- Strictly speaking this field should live in the
-- 'HomeModInfo', but that leads to more plumbing.
-- Instance declarations and rules
mi_insts :: [IfaceClsInst], -- ^ Sorted class instance
mi_fam_insts :: [IfaceFamInst], -- ^ Sorted family instances
mi_rules :: [IfaceRule], -- ^ Sorted rules
mi_orphan_hash :: !Fingerprint, -- ^ Hash for orphan rules, class and family
-- instances, and vectorise pragmas combined
mi_vect_info :: !IfaceVectInfo, -- ^ Vectorisation information
-- Cached environments for easy lookup
-- These are computed (lazily) from other fields
-- and are not put into the interface file
mi_warn_fn :: OccName -> Maybe WarningTxt,
-- ^ Cached lookup for 'mi_warns'
mi_fix_fn :: OccName -> Maybe Fixity,
-- ^ Cached lookup for 'mi_fixities'
mi_hash_fn :: OccName -> Maybe (OccName, Fingerprint),
-- ^ Cached lookup for 'mi_decls'.
-- The @Nothing@ in 'mi_hash_fn' means that the thing
-- isn't in decls. It's useful to know that when
-- seeing if we are up to date wrt. the old interface.
-- The 'OccName' is the parent of the name, if it has one.
mi_hpc :: !AnyHpcUsage,
-- ^ True if this program uses Hpc at any point in the program.
mi_trust :: !IfaceTrustInfo,
-- ^ Safe Haskell Trust information for this module.
mi_trust_pkg :: !Bool
-- ^ Do we require the package this module resides in be trusted
-- to trust this module? This is used for the situation where a
-- module is Safe (so doesn't require the package be trusted
-- itself) but imports some trustworthy modules from its own
-- package (which does require its own package be trusted).
-- See Note [RnNames . Trust Own Package]
}
-- | Old-style accessor for whether or not the ModIface came from an hs-boot
-- file.
mi_boot :: ModIface -> Bool
mi_boot iface = mi_hsc_src iface == HsBootFile
-- | Lookups up a (possibly cached) fixity from a 'ModIface'. If one cannot be
-- found, 'defaultFixity' is returned instead.
mi_fix :: ModIface -> OccName -> Fixity
mi_fix iface name = mi_fix_fn iface name `orElse` defaultFixity
-- | The semantic module for this interface; e.g., if it's a interface
-- for a signature, if 'mi_module' is @p[A=<A>]:A@, 'mi_semantic_module'
-- will be @<A>@.
mi_semantic_module :: ModIface -> Module
mi_semantic_module iface = case mi_sig_of iface of
Nothing -> mi_module iface
Just mod -> mod
-- | The "precise" free holes, e.g., the signatures that this
-- 'ModIface' depends on.
mi_free_holes :: ModIface -> UniqDSet ModuleName
mi_free_holes iface =
case splitModuleInsts (mi_module iface) of
(_, Just indef)
-- A mini-hack: we rely on the fact that 'renameFreeHoles'
-- drops things that aren't holes.
-> renameFreeHoles (mkUniqDSet cands) (indefUnitIdInsts (indefModuleUnitId indef))
_ -> emptyUniqDSet
where
cands = map fst (dep_mods (mi_deps iface))
-- | Given a set of free holes, and a unit identifier, rename
-- the free holes according to the instantiation of the unit
-- identifier. For example, if we have A and B free, and
-- our unit identity is @p[A=<C>,B=impl:B]@, the renamed free
-- holes are just C.
renameFreeHoles :: UniqDSet ModuleName -> [(ModuleName, Module)] -> UniqDSet ModuleName
renameFreeHoles fhs insts =
unionManyUniqDSets (map lookup_impl (uniqDSetToList fhs))
where
hmap = listToUFM insts
lookup_impl mod_name
| Just mod <- lookupUFM hmap mod_name = moduleFreeHoles mod
-- It wasn't actually a hole
| otherwise = emptyUniqDSet
instance Binary ModIface where
put_ bh (ModIface {
mi_module = mod,
mi_sig_of = sig_of,
mi_hsc_src = hsc_src,
mi_iface_hash= iface_hash,
mi_mod_hash = mod_hash,
mi_flag_hash = flag_hash,
mi_orphan = orphan,
mi_finsts = hasFamInsts,
mi_deps = deps,
mi_usages = usages,
mi_exports = exports,
mi_exp_hash = exp_hash,
mi_used_th = used_th,
mi_fixities = fixities,
mi_warns = warns,
mi_anns = anns,
mi_decls = decls,
mi_insts = insts,
mi_fam_insts = fam_insts,
mi_rules = rules,
mi_orphan_hash = orphan_hash,
mi_vect_info = vect_info,
mi_hpc = hpc_info,
mi_trust = trust,
mi_trust_pkg = trust_pkg }) = do
put_ bh mod
put_ bh sig_of
put_ bh hsc_src
put_ bh iface_hash
put_ bh mod_hash
put_ bh flag_hash
put_ bh orphan
put_ bh hasFamInsts
lazyPut bh deps
lazyPut bh usages
put_ bh exports
put_ bh exp_hash
put_ bh used_th
put_ bh fixities
lazyPut bh warns
lazyPut bh anns
put_ bh decls
put_ bh insts
put_ bh fam_insts
lazyPut bh rules
put_ bh orphan_hash
put_ bh vect_info
put_ bh hpc_info
put_ bh trust
put_ bh trust_pkg
get bh = do
mod <- get bh
sig_of <- get bh
hsc_src <- get bh
iface_hash <- get bh
mod_hash <- get bh
flag_hash <- get bh
orphan <- get bh
hasFamInsts <- get bh
deps <- lazyGet bh
usages <- {-# SCC "bin_usages" #-} lazyGet bh
exports <- {-# SCC "bin_exports" #-} get bh
exp_hash <- get bh
used_th <- get bh
fixities <- {-# SCC "bin_fixities" #-} get bh
warns <- {-# SCC "bin_warns" #-} lazyGet bh
anns <- {-# SCC "bin_anns" #-} lazyGet bh
decls <- {-# SCC "bin_tycldecls" #-} get bh
insts <- {-# SCC "bin_insts" #-} get bh
fam_insts <- {-# SCC "bin_fam_insts" #-} get bh
rules <- {-# SCC "bin_rules" #-} lazyGet bh
orphan_hash <- get bh
vect_info <- get bh
hpc_info <- get bh
trust <- get bh
trust_pkg <- get bh
return (ModIface {
mi_module = mod,
mi_sig_of = sig_of,
mi_hsc_src = hsc_src,
mi_iface_hash = iface_hash,
mi_mod_hash = mod_hash,
mi_flag_hash = flag_hash,
mi_orphan = orphan,
mi_finsts = hasFamInsts,
mi_deps = deps,
mi_usages = usages,
mi_exports = exports,
mi_exp_hash = exp_hash,
mi_used_th = used_th,
mi_anns = anns,
mi_fixities = fixities,
mi_warns = warns,
mi_decls = decls,
mi_globals = Nothing,
mi_insts = insts,
mi_fam_insts = fam_insts,
mi_rules = rules,
mi_orphan_hash = orphan_hash,
mi_vect_info = vect_info,
mi_hpc = hpc_info,
mi_trust = trust,
mi_trust_pkg = trust_pkg,
-- And build the cached values
mi_warn_fn = mkIfaceWarnCache warns,
mi_fix_fn = mkIfaceFixCache fixities,
mi_hash_fn = mkIfaceHashCache decls })
-- | The original names declared of a certain module that are exported
type IfaceExport = AvailInfo
-- | Constructs an empty ModIface
emptyModIface :: Module -> ModIface
emptyModIface mod
= ModIface { mi_module = mod,
mi_sig_of = Nothing,
mi_iface_hash = fingerprint0,
mi_mod_hash = fingerprint0,
mi_flag_hash = fingerprint0,
mi_orphan = False,
mi_finsts = False,
mi_hsc_src = HsSrcFile,
mi_deps = noDependencies,
mi_usages = [],
mi_exports = [],
mi_exp_hash = fingerprint0,
mi_used_th = False,
mi_fixities = [],
mi_warns = NoWarnings,
mi_anns = [],
mi_insts = [],
mi_fam_insts = [],
mi_rules = [],
mi_decls = [],
mi_globals = Nothing,
mi_orphan_hash = fingerprint0,
mi_vect_info = noIfaceVectInfo,
mi_warn_fn = emptyIfaceWarnCache,
mi_fix_fn = emptyIfaceFixCache,
mi_hash_fn = emptyIfaceHashCache,
mi_hpc = False,
mi_trust = noIfaceTrustInfo,
mi_trust_pkg = False }
-- | Constructs cache for the 'mi_hash_fn' field of a 'ModIface'
mkIfaceHashCache :: [(Fingerprint,IfaceDecl)]
-> (OccName -> Maybe (OccName, Fingerprint))
mkIfaceHashCache pairs
= \occ -> lookupOccEnv env occ
where
env = foldr add_decl emptyOccEnv pairs
add_decl (v,d) env0 = foldr add env0 (ifaceDeclFingerprints v d)
where
add (occ,hash) env0 = extendOccEnv env0 occ (occ,hash)
emptyIfaceHashCache :: OccName -> Maybe (OccName, Fingerprint)
emptyIfaceHashCache _occ = Nothing
-- | The 'ModDetails' is essentially a cache for information in the 'ModIface'
-- for home modules only. Information relating to packages will be loaded into
-- global environments in 'ExternalPackageState'.
data ModDetails
= ModDetails {
-- The next two fields are created by the typechecker
md_exports :: [AvailInfo],
md_types :: !TypeEnv, -- ^ Local type environment for this particular module
-- Includes Ids, TyCons, PatSyns
md_insts :: ![ClsInst], -- ^ 'DFunId's for the instances in this module
md_fam_insts :: ![FamInst],
md_rules :: ![CoreRule], -- ^ Domain may include 'Id's from other modules
md_anns :: ![Annotation], -- ^ Annotations present in this module: currently
-- they only annotate things also declared in this module
md_vect_info :: !VectInfo -- ^ Module vectorisation information
}
-- | Constructs an empty ModDetails
emptyModDetails :: ModDetails
emptyModDetails
= ModDetails { md_types = emptyTypeEnv,
md_exports = [],
md_insts = [],
md_rules = [],
md_fam_insts = [],
md_anns = [],
md_vect_info = noVectInfo }
-- | Records the modules directly imported by a module for extracting e.g.
-- usage information, and also to give better error message
type ImportedMods = ModuleEnv [ImportedModsVal]
data ImportedModsVal
= ImportedModsVal {
imv_name :: ModuleName, -- ^ The name the module is imported with
imv_span :: SrcSpan, -- ^ the source span of the whole import
imv_is_safe :: IsSafeImport, -- ^ whether this is a safe import
imv_is_hiding :: Bool, -- ^ whether this is an "hiding" import
imv_all_exports :: GlobalRdrEnv, -- ^ all the things the module could provide
imv_qualified :: Bool -- ^ whether this is a qualified import
}
-- | A ModGuts is carried through the compiler, accumulating stuff as it goes
-- There is only one ModGuts at any time, the one for the module
-- being compiled right now. Once it is compiled, a 'ModIface' and
-- 'ModDetails' are extracted and the ModGuts is discarded.
data ModGuts
= ModGuts {
mg_module :: !Module, -- ^ Module being compiled
mg_hsc_src :: HscSource, -- ^ Whether it's an hs-boot module
mg_loc :: SrcSpan, -- ^ For error messages from inner passes
mg_exports :: ![AvailInfo], -- ^ What it exports
mg_deps :: !Dependencies, -- ^ What it depends on, directly or
-- otherwise
mg_usages :: ![Usage], -- ^ What was used? Used for interfaces.
mg_used_th :: !Bool, -- ^ Did we run a TH splice?
mg_rdr_env :: !GlobalRdrEnv, -- ^ Top-level lexical environment
-- These fields all describe the things **declared in this module**
mg_fix_env :: !FixityEnv, -- ^ Fixities declared in this module.
-- Used for creating interface files.
mg_tcs :: ![TyCon], -- ^ TyCons declared in this module
-- (includes TyCons for classes)
mg_insts :: ![ClsInst], -- ^ Class instances declared in this module
mg_fam_insts :: ![FamInst],
-- ^ Family instances declared in this module
mg_patsyns :: ![PatSyn], -- ^ Pattern synonyms declared in this module
mg_rules :: ![CoreRule], -- ^ Before the core pipeline starts, contains
-- See Note [Overall plumbing for rules] in Rules.hs
mg_binds :: !CoreProgram, -- ^ Bindings for this module
mg_foreign :: !ForeignStubs, -- ^ Foreign exports declared in this module
mg_warns :: !Warnings, -- ^ Warnings declared in the module
mg_anns :: [Annotation], -- ^ Annotations declared in this module
mg_hpc_info :: !HpcInfo, -- ^ Coverage tick boxes in the module
mg_modBreaks :: !(Maybe ModBreaks), -- ^ Breakpoints for the module
mg_vect_decls:: ![CoreVect], -- ^ Vectorisation declarations in this module
-- (produced by desugarer & consumed by vectoriser)
mg_vect_info :: !VectInfo, -- ^ Pool of vectorised declarations in the module
-- The next two fields are unusual, because they give instance
-- environments for *all* modules in the home package, including
-- this module, rather than for *just* this module.
-- Reason: when looking up an instance we don't want to have to
-- look at each module in the home package in turn
mg_inst_env :: InstEnv, -- ^ Class instance environment for
-- /home-package/ modules (including this
-- one); c.f. 'tcg_inst_env'
mg_fam_inst_env :: FamInstEnv, -- ^ Type-family instance environment for
-- /home-package/ modules (including this
-- one); c.f. 'tcg_fam_inst_env'
mg_safe_haskell :: SafeHaskellMode, -- ^ Safe Haskell mode
mg_trust_pkg :: Bool -- ^ Do we need to trust our
-- own package for Safe Haskell?
-- See Note [RnNames . Trust Own Package]
}
-- The ModGuts takes on several slightly different forms:
--
-- After simplification, the following fields change slightly:
-- mg_rules Orphan rules only (local ones now attached to binds)
-- mg_binds With rules attached
---------------------------------------------------------
-- The Tidy pass forks the information about this module:
-- * one lot goes to interface file generation (ModIface)
-- and later compilations (ModDetails)
-- * the other lot goes to code generation (CgGuts)
-- | A restricted form of 'ModGuts' for code generation purposes
data CgGuts
= CgGuts {
cg_module :: !Module,
-- ^ Module being compiled
cg_tycons :: [TyCon],
-- ^ Algebraic data types (including ones that started
-- life as classes); generate constructors and info
-- tables. Includes newtypes, just for the benefit of
-- External Core
cg_binds :: CoreProgram,
-- ^ The tidied main bindings, including
-- previously-implicit bindings for record and class
-- selectors, and data constructor wrappers. But *not*
-- data constructor workers; reason: we we regard them
-- as part of the code-gen of tycons
cg_foreign :: !ForeignStubs, -- ^ Foreign export stubs
cg_dep_pkgs :: ![InstalledUnitId], -- ^ Dependent packages, used to
-- generate #includes for C code gen
cg_hpc_info :: !HpcInfo, -- ^ Program coverage tick box information
cg_modBreaks :: !(Maybe ModBreaks) -- ^ Module breakpoints
}
-----------------------------------
-- | Foreign export stubs
data ForeignStubs
= NoStubs
-- ^ We don't have any stubs
| ForeignStubs SDoc SDoc
-- ^ There are some stubs. Parameters:
--
-- 1) Header file prototypes for
-- "foreign exported" functions
--
-- 2) C stubs to use when calling
-- "foreign exported" functions
appendStubC :: ForeignStubs -> SDoc -> ForeignStubs
appendStubC NoStubs c_code = ForeignStubs empty c_code
appendStubC (ForeignStubs h c) c_code = ForeignStubs h (c $$ c_code)
{-
************************************************************************
* *
The interactive context
* *
************************************************************************
Note [The interactive package]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Type, class, and value declarations at the command prompt are treated
as if they were defined in modules
interactive:Ghci1
interactive:Ghci2
...etc...
with each bunch of declarations using a new module, all sharing a
common package 'interactive' (see Module.interactiveUnitId, and
PrelNames.mkInteractiveModule).
This scheme deals well with shadowing. For example:
ghci> data T = A
ghci> data T = B
ghci> :i A
data Ghci1.T = A -- Defined at <interactive>:2:10
Here we must display info about constructor A, but its type T has been
shadowed by the second declaration. But it has a respectable
qualified name (Ghci1.T), and its source location says where it was
defined.
So the main invariant continues to hold, that in any session an
original name M.T only refers to one unique thing. (In a previous
iteration both the T's above were called :Interactive.T, albeit with
different uniques, which gave rise to all sorts of trouble.)
The details are a bit tricky though:
* The field ic_mod_index counts which Ghci module we've got up to.
It is incremented when extending ic_tythings
* ic_tythings contains only things from the 'interactive' package.
* Module from the 'interactive' package (Ghci1, Ghci2 etc) never go
in the Home Package Table (HPT). When you say :load, that's when we
extend the HPT.
* The 'thisPackage' field of DynFlags is *not* set to 'interactive'.
It stays as 'main' (or whatever -this-unit-id says), and is the
package to which :load'ed modules are added to.
* So how do we arrange that declarations at the command prompt get to
be in the 'interactive' package? Simply by setting the tcg_mod
field of the TcGblEnv to "interactive:Ghci1". This is done by the
call to initTc in initTcInteractive, which in turn get the module
from it 'icInteractiveModule' field of the interactive context.
The 'thisPackage' field stays as 'main' (or whatever -this-unit-id says.
* The main trickiness is that the type environment (tcg_type_env) and
fixity envt (tcg_fix_env), now contain entities from all the
interactive-package modules (Ghci1, Ghci2, ...) together, rather
than just a single module as is usually the case. So you can't use
"nameIsLocalOrFrom" to decide whether to look in the TcGblEnv vs
the HPT/PTE. This is a change, but not a problem provided you
know.
* However, the tcg_binds, tcg_sigs, tcg_insts, tcg_fam_insts, etc fields
of the TcGblEnv, which collect "things defined in this module", all
refer to stuff define in a single GHCi command, *not* all the commands
so far.
In contrast, tcg_inst_env, tcg_fam_inst_env, have instances from
all GhciN modules, which makes sense -- they are all "home package"
modules.
Note [Interactively-bound Ids in GHCi]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Ids bound by previous Stmts in GHCi are currently
a) GlobalIds, with
b) An External Name, like Ghci4.foo
See Note [The interactive package] above
c) A tidied type
(a) They must be GlobalIds (not LocalIds) otherwise when we come to
compile an expression using these ids later, the byte code
generator will consider the occurrences to be free rather than
global.
(b) Having an External Name is important because of Note
[GlobalRdrEnv shadowing] in RdrName
(c) Their types are tidied. This is important, because :info may ask
to look at them, and :info expects the things it looks up to have
tidy types
Where do interactively-bound Ids come from?
- GHCi REPL Stmts e.g.
ghci> let foo x = x+1
These start with an Internal Name because a Stmt is a local
construct, so the renamer naturally builds an Internal name for
each of its binders. Then in tcRnStmt they are externalised via
TcRnDriver.externaliseAndTidyId, so they get Names like Ghic4.foo.
- Ids bound by the debugger etc have Names constructed by
IfaceEnv.newInteractiveBinder; at the call sites it is followed by
mkVanillaGlobal or mkVanillaGlobalWithInfo. So again, they are
all Global, External.
- TyCons, Classes, and Ids bound by other top-level declarations in
GHCi (eg foreign import, record selectors) also get External
Names, with Ghci9 (or 8, or 7, etc) as the module name.
Note [ic_tythings]
~~~~~~~~~~~~~~~~~~
The ic_tythings field contains
* The TyThings declared by the user at the command prompt
(eg Ids, TyCons, Classes)
* The user-visible Ids that arise from such things, which
*don't* come from 'implicitTyThings', notably:
- record selectors
- class ops
The implicitTyThings are readily obtained from the TyThings
but record selectors etc are not
It does *not* contain
* DFunIds (they can be gotten from ic_instances)
* CoAxioms (ditto)
See also Note [Interactively-bound Ids in GHCi]
Note [Override identical instances in GHCi]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you declare a new instance in GHCi that is identical to a previous one,
we simply override the previous one; we don't regard it as overlapping.
e.g. Prelude> data T = A | B
Prelude> instance Eq T where ...
Prelude> instance Eq T where ... -- This one overrides
It's exactly the same for type-family instances. See Trac #7102
-}
-- | Interactive context, recording information about the state of the
-- context in which statements are executed in a GHC session.
data InteractiveContext
= InteractiveContext {
ic_dflags :: DynFlags,
-- ^ The 'DynFlags' used to evaluate interative expressions
-- and statements.
ic_mod_index :: Int,
-- ^ Each GHCi stmt or declaration brings some new things into
-- scope. We give them names like interactive:Ghci9.T,
-- where the ic_index is the '9'. The ic_mod_index is
-- incremented whenever we add something to ic_tythings
-- See Note [The interactive package]
ic_imports :: [InteractiveImport],
-- ^ The GHCi top-level scope (ic_rn_gbl_env) is extended with
-- these imports
--
-- This field is only stored here so that the client
-- can retrieve it with GHC.getContext. GHC itself doesn't
-- use it, but does reset it to empty sometimes (such
-- as before a GHC.load). The context is set with GHC.setContext.
ic_tythings :: [TyThing],
-- ^ TyThings defined by the user, in reverse order of
-- definition (ie most recent at the front)
-- See Note [ic_tythings]
ic_rn_gbl_env :: GlobalRdrEnv,
-- ^ The cached 'GlobalRdrEnv', built by
-- 'InteractiveEval.setContext' and updated regularly
-- It contains everything in scope at the command line,
-- including everything in ic_tythings
ic_instances :: ([ClsInst], [FamInst]),
-- ^ All instances and family instances created during
-- this session. These are grabbed en masse after each
-- update to be sure that proper overlapping is retained.
-- That is, rather than re-check the overlapping each
-- time we update the context, we just take the results
-- from the instance code that already does that.
ic_fix_env :: FixityEnv,
-- ^ Fixities declared in let statements
ic_default :: Maybe [Type],
-- ^ The current default types, set by a 'default' declaration
#ifdef GHCI
ic_resume :: [Resume],
-- ^ The stack of breakpoint contexts
#endif
ic_monad :: Name,
-- ^ The monad that GHCi is executing in
ic_int_print :: Name,
-- ^ The function that is used for printing results
-- of expressions in ghci and -e mode.
ic_cwd :: Maybe FilePath
-- virtual CWD of the program
}
data InteractiveImport
= IIDecl (ImportDecl RdrName)
-- ^ Bring the exports of a particular module
-- (filtered by an import decl) into scope
| IIModule ModuleName
-- ^ Bring into scope the entire top-level envt of
-- of this module, including the things imported
-- into it.
-- | Constructs an empty InteractiveContext.
emptyInteractiveContext :: DynFlags -> InteractiveContext
emptyInteractiveContext dflags
= InteractiveContext {
ic_dflags = dflags,
ic_imports = [],
ic_rn_gbl_env = emptyGlobalRdrEnv,
ic_mod_index = 1,
ic_tythings = [],
ic_instances = ([],[]),
ic_fix_env = emptyNameEnv,
ic_monad = ioTyConName, -- IO monad by default
ic_int_print = printName, -- System.IO.print by default
ic_default = Nothing,
#ifdef GHCI
ic_resume = [],
#endif
ic_cwd = Nothing }
icInteractiveModule :: InteractiveContext -> Module
icInteractiveModule (InteractiveContext { ic_mod_index = index })
= mkInteractiveModule index
-- | This function returns the list of visible TyThings (useful for
-- e.g. showBindings)
icInScopeTTs :: InteractiveContext -> [TyThing]
icInScopeTTs = ic_tythings
-- | Get the PrintUnqualified function based on the flags and this InteractiveContext
icPrintUnqual :: DynFlags -> InteractiveContext -> PrintUnqualified
icPrintUnqual dflags InteractiveContext{ ic_rn_gbl_env = grenv } =
mkPrintUnqualified dflags grenv
-- | extendInteractiveContext is called with new TyThings recently defined to update the
-- InteractiveContext to include them. Ids are easily removed when shadowed,
-- but Classes and TyCons are not. Some work could be done to determine
-- whether they are entirely shadowed, but as you could still have references
-- to them (e.g. instances for classes or values of the type for TyCons), it's
-- not clear whether removing them is even the appropriate behavior.
extendInteractiveContext :: InteractiveContext
-> [TyThing]
-> [ClsInst] -> [FamInst]
-> Maybe [Type]
-> FixityEnv
-> InteractiveContext
extendInteractiveContext ictxt new_tythings new_cls_insts new_fam_insts defaults fix_env
= ictxt { ic_mod_index = ic_mod_index ictxt + 1
-- Always bump this; even instances should create
-- a new mod_index (Trac #9426)
, ic_tythings = new_tythings ++ ic_tythings ictxt
, ic_rn_gbl_env = ic_rn_gbl_env ictxt `icExtendGblRdrEnv` new_tythings
, ic_instances = ( new_cls_insts ++ old_cls_insts
, new_fam_insts ++ old_fam_insts )
, ic_default = defaults
, ic_fix_env = fix_env -- See Note [Fixity declarations in GHCi]
}
where
-- Discard old instances that have been fully overrridden
-- See Note [Override identical instances in GHCi]
(cls_insts, fam_insts) = ic_instances ictxt
old_cls_insts = filterOut (\i -> any (identicalClsInstHead i) new_cls_insts) cls_insts
old_fam_insts = filterOut (\i -> any (identicalFamInstHead i) new_fam_insts) fam_insts
extendInteractiveContextWithIds :: InteractiveContext -> [Id] -> InteractiveContext
-- Just a specialised version
extendInteractiveContextWithIds ictxt new_ids
| null new_ids = ictxt
| otherwise = ictxt { ic_mod_index = ic_mod_index ictxt + 1
, ic_tythings = new_tythings ++ ic_tythings ictxt
, ic_rn_gbl_env = ic_rn_gbl_env ictxt `icExtendGblRdrEnv` new_tythings }
where
new_tythings = map AnId new_ids
setInteractivePackage :: HscEnv -> HscEnv
-- Set the 'thisPackage' DynFlag to 'interactive'
setInteractivePackage hsc_env
= hsc_env { hsc_dflags = (hsc_dflags hsc_env)
{ thisInstalledUnitId = toInstalledUnitId interactiveUnitId } }
setInteractivePrintName :: InteractiveContext -> Name -> InteractiveContext
setInteractivePrintName ic n = ic{ic_int_print = n}
-- ToDo: should not add Ids to the gbl env here
-- | Add TyThings to the GlobalRdrEnv, earlier ones in the list shadowing
-- later ones, and shadowing existing entries in the GlobalRdrEnv.
icExtendGblRdrEnv :: GlobalRdrEnv -> [TyThing] -> GlobalRdrEnv
icExtendGblRdrEnv env tythings
= foldr add env tythings -- Foldr makes things in the front of
-- the list shadow things at the back
where
-- One at a time, to ensure each shadows the previous ones
add thing env
| is_sub_bndr thing
= env
| otherwise
= foldl extendGlobalRdrEnv env1 (concatMap localGREsFromAvail avail)
where
env1 = shadowNames env (concatMap availNames avail)
avail = tyThingAvailInfo thing
-- Ugh! The new_tythings may include record selectors, since they
-- are not implicit-ids, and must appear in the TypeEnv. But they
-- will also be brought into scope by the corresponding (ATyCon
-- tc). And we want the latter, because that has the correct
-- parent (Trac #10520)
is_sub_bndr (AnId f) = case idDetails f of
RecSelId {} -> True
ClassOpId {} -> True
_ -> False
is_sub_bndr _ = False
substInteractiveContext :: InteractiveContext -> TCvSubst -> InteractiveContext
substInteractiveContext ictxt@InteractiveContext{ ic_tythings = tts } subst
| isEmptyTCvSubst subst = ictxt
| otherwise = ictxt { ic_tythings = map subst_ty tts }
where
subst_ty (AnId id) = AnId $ id `setIdType` substTyUnchecked subst (idType id)
subst_ty tt = tt
instance Outputable InteractiveImport where
ppr (IIModule m) = char '*' <> ppr m
ppr (IIDecl d) = ppr d
{-
************************************************************************
* *
Building a PrintUnqualified
* *
************************************************************************
Note [Printing original names]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Deciding how to print names is pretty tricky. We are given a name
P:M.T, where P is the package name, M is the defining module, and T is
the occurrence name, and we have to decide in which form to display
the name given a GlobalRdrEnv describing the current scope.
Ideally we want to display the name in the form in which it is in
scope. However, the name might not be in scope at all, and that's
where it gets tricky. Here are the cases:
1. T uniquely maps to P:M.T ---> "T" NameUnqual
2. There is an X for which X.T
uniquely maps to P:M.T ---> "X.T" NameQual X
3. There is no binding for "M.T" ---> "M.T" NameNotInScope1
4. Otherwise ---> "P:M.T" NameNotInScope2
(3) and (4) apply when the entity P:M.T is not in the GlobalRdrEnv at
all. In these cases we still want to refer to the name as "M.T", *but*
"M.T" might mean something else in the current scope (e.g. if there's
an "import X as M"), so to avoid confusion we avoid using "M.T" if
there's already a binding for it. Instead we write P:M.T.
There's one further subtlety: in case (3), what if there are two
things around, P1:M.T and P2:M.T? Then we don't want to print both of
them as M.T! However only one of the modules P1:M and P2:M can be
exposed (say P2), so we use M.T for that, and P1:M.T for the other one.
This is handled by the qual_mod component of PrintUnqualified, inside
the (ppr mod) of case (3), in Name.pprModulePrefix
Note [Printing unit ids]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the old days, original names were tied to PackageIds, which directly
corresponded to the entities that users wrote in Cabal files, and were perfectly
suitable for printing when we need to disambiguate packages. However, with
UnitId, the situation can be different: if the key is instantiated with
some holes, we should try to give the user some more useful information.
-}
-- | Creates some functions that work out the best ways to format
-- names for the user according to a set of heuristics.
mkPrintUnqualified :: DynFlags -> GlobalRdrEnv -> PrintUnqualified
mkPrintUnqualified dflags env = QueryQualify qual_name
(mkQualModule dflags)
(mkQualPackage dflags)
where
qual_name mod occ
| [gre] <- unqual_gres
, right_name gre
= NameUnqual -- If there's a unique entity that's in scope
-- unqualified with 'occ' AND that entity is
-- the right one, then we can use the unqualified name
| [] <- unqual_gres
, any is_name forceUnqualNames
, not (isDerivedOccName occ)
= NameUnqual -- Don't qualify names that come from modules
-- that come with GHC, often appear in error messages,
-- but aren't typically in scope. Doing this does not
-- cause ambiguity, and it reduces the amount of
-- qualification in error messages thus improving
-- readability.
--
-- A motivating example is 'Constraint'. It's often not
-- in scope, but printing GHC.Prim.Constraint seems
-- overkill.
| [gre] <- qual_gres
= NameQual (greQualModName gre)
| null qual_gres
= if null (lookupGRE_RdrName (mkRdrQual (moduleName mod) occ) env)
then NameNotInScope1
else NameNotInScope2
| otherwise
= NameNotInScope1 -- Can happen if 'f' is bound twice in the module
-- Eg f = True; g = 0; f = False
where
is_name :: Name -> Bool
is_name name = ASSERT2( isExternalName name, ppr name )
nameModule name == mod && nameOccName name == occ
forceUnqualNames :: [Name]
forceUnqualNames =
map tyConName [ constraintKindTyCon, heqTyCon, coercibleTyCon
, starKindTyCon, unicodeStarKindTyCon ]
++ [ eqTyConName ]
right_name gre = nameModule_maybe (gre_name gre) == Just mod
unqual_gres = lookupGRE_RdrName (mkRdrUnqual occ) env
qual_gres = filter right_name (lookupGlobalRdrEnv env occ)
-- we can mention a module P:M without the P: qualifier iff
-- "import M" would resolve unambiguously to P:M. (if P is the
-- current package we can just assume it is unqualified).
-- | Creates a function for formatting modules based on two heuristics:
-- (1) if the module is the current module, don't qualify, and (2) if there
-- is only one exposed package which exports this module, don't qualify.
mkQualModule :: DynFlags -> QueryQualifyModule
mkQualModule dflags mod
| moduleUnitId mod == thisPackage dflags = False
| [(_, pkgconfig)] <- lookup,
packageConfigId pkgconfig == moduleUnitId mod
-- this says: we are given a module P:M, is there just one exposed package
-- that exposes a module M, and is it package P?
= False
| otherwise = True
where lookup = lookupModuleInAllPackages dflags (moduleName mod)
-- | Creates a function for formatting packages based on two heuristics:
-- (1) don't qualify if the package in question is "main", and (2) only qualify
-- with a unit id if the package ID would be ambiguous.
mkQualPackage :: DynFlags -> QueryQualifyPackage
mkQualPackage dflags pkg_key
| pkg_key == mainUnitId || pkg_key == interactiveUnitId
-- Skip the lookup if it's main, since it won't be in the package
-- database!
= False
| Just pkgid <- mb_pkgid
, searchPackageId dflags pkgid `lengthIs` 1
-- this says: we are given a package pkg-0.1@MMM, are there only one
-- exposed packages whose package ID is pkg-0.1?
= False
| otherwise
= True
where mb_pkgid = fmap sourcePackageId (lookupPackage dflags pkg_key)
-- | A function which only qualifies package names if necessary; but
-- qualifies all other identifiers.
pkgQual :: DynFlags -> PrintUnqualified
pkgQual dflags = alwaysQualify {
queryQualifyPackage = mkQualPackage dflags
}
{-
************************************************************************
* *
Implicit TyThings
* *
************************************************************************
Note [Implicit TyThings]
~~~~~~~~~~~~~~~~~~~~~~~~
DEFINITION: An "implicit" TyThing is one that does not have its own
IfaceDecl in an interface file. Instead, its binding in the type
environment is created as part of typechecking the IfaceDecl for
some other thing.
Examples:
* All DataCons are implicit, because they are generated from the
IfaceDecl for the data/newtype. Ditto class methods.
* Record selectors are *not* implicit, because they get their own
free-standing IfaceDecl.
* Associated data/type families are implicit because they are
included in the IfaceDecl of the parent class. (NB: the
IfaceClass decl happens to use IfaceDecl recursively for the
associated types, but that's irrelevant here.)
* Dictionary function Ids are not implicit.
* Axioms for newtypes are implicit (same as above), but axioms
for data/type family instances are *not* implicit (like DFunIds).
-}
-- | Determine the 'TyThing's brought into scope by another 'TyThing'
-- /other/ than itself. For example, Id's don't have any implicit TyThings
-- as they just bring themselves into scope, but classes bring their
-- dictionary datatype, type constructor and some selector functions into
-- scope, just for a start!
-- N.B. the set of TyThings returned here *must* match the set of
-- names returned by LoadIface.ifaceDeclImplicitBndrs, in the sense that
-- TyThing.getOccName should define a bijection between the two lists.
-- This invariant is used in LoadIface.loadDecl (see note [Tricky iface loop])
-- The order of the list does not matter.
implicitTyThings :: TyThing -> [TyThing]
implicitTyThings (AnId _) = []
implicitTyThings (ACoAxiom _cc) = []
implicitTyThings (ATyCon tc) = implicitTyConThings tc
implicitTyThings (AConLike cl) = implicitConLikeThings cl
implicitConLikeThings :: ConLike -> [TyThing]
implicitConLikeThings (RealDataCon dc)
= dataConImplicitTyThings dc
implicitConLikeThings (PatSynCon {})
= [] -- Pattern synonyms have no implicit Ids; the wrapper and matcher
-- are not "implicit"; they are simply new top-level bindings,
-- and they have their own declaration in an interface file
-- Unless a record pat syn when there are implicit selectors
-- They are still not included here as `implicitConLikeThings` is
-- used by `tcTyClsDecls` whilst pattern synonyms are typed checked
-- by `tcTopValBinds`.
implicitClassThings :: Class -> [TyThing]
implicitClassThings cl
= -- Does not include default methods, because those Ids may have
-- their own pragmas, unfoldings etc, not derived from the Class object
-- associated types
-- No recursive call for the classATs, because they
-- are only the family decls; they have no implicit things
map ATyCon (classATs cl) ++
-- superclass and operation selectors
map AnId (classAllSelIds cl)
implicitTyConThings :: TyCon -> [TyThing]
implicitTyConThings tc
= class_stuff ++
-- fields (names of selectors)
-- (possibly) implicit newtype axioms
-- or type family axioms
implicitCoTyCon tc ++
-- for each data constructor in order,
-- the constructor, worker, and (possibly) wrapper
[ thing | dc <- tyConDataCons tc
, thing <- AConLike (RealDataCon dc) : dataConImplicitTyThings dc ]
-- NB. record selectors are *not* implicit, they have fully-fledged
-- bindings that pass through the compilation pipeline as normal.
where
class_stuff = case tyConClass_maybe tc of
Nothing -> []
Just cl -> implicitClassThings cl
-- For newtypes and closed type families (only) add the implicit coercion tycon
implicitCoTyCon :: TyCon -> [TyThing]
implicitCoTyCon tc
| Just co <- newTyConCo_maybe tc = [ACoAxiom $ toBranchedAxiom co]
| Just co <- isClosedSynFamilyTyConWithAxiom_maybe tc
= [ACoAxiom co]
| otherwise = []
-- | Returns @True@ if there should be no interface-file declaration
-- for this thing on its own: either it is built-in, or it is part
-- of some other declaration, or it is generated implicitly by some
-- other declaration.
isImplicitTyThing :: TyThing -> Bool
isImplicitTyThing (AConLike cl) = case cl of
RealDataCon {} -> True
PatSynCon {} -> False
isImplicitTyThing (AnId id) = isImplicitId id
isImplicitTyThing (ATyCon tc) = isImplicitTyCon tc
isImplicitTyThing (ACoAxiom ax) = isImplicitCoAxiom ax
-- | tyThingParent_maybe x returns (Just p)
-- when pprTyThingInContext sould print a declaration for p
-- (albeit with some "..." in it) when asked to show x
-- It returns the *immediate* parent. So a datacon returns its tycon
-- but the tycon could be the associated type of a class, so it in turn
-- might have a parent.
tyThingParent_maybe :: TyThing -> Maybe TyThing
tyThingParent_maybe (AConLike cl) = case cl of
RealDataCon dc -> Just (ATyCon (dataConTyCon dc))
PatSynCon{} -> Nothing
tyThingParent_maybe (ATyCon tc) = case tyConAssoc_maybe tc of
Just cls -> Just (ATyCon (classTyCon cls))
Nothing -> Nothing
tyThingParent_maybe (AnId id) = case idDetails id of
RecSelId { sel_tycon = RecSelData tc } ->
Just (ATyCon tc)
ClassOpId cls ->
Just (ATyCon (classTyCon cls))
_other -> Nothing
tyThingParent_maybe _other = Nothing
tyThingsTyCoVars :: [TyThing] -> TyCoVarSet
tyThingsTyCoVars tts =
unionVarSets $ map ttToVarSet tts
where
ttToVarSet (AnId id) = tyCoVarsOfType $ idType id
ttToVarSet (AConLike cl) = case cl of
RealDataCon dc -> tyCoVarsOfType $ dataConRepType dc
PatSynCon{} -> emptyVarSet
ttToVarSet (ATyCon tc)
= case tyConClass_maybe tc of
Just cls -> (mkVarSet . fst . classTvsFds) cls
Nothing -> tyCoVarsOfType $ tyConKind tc
ttToVarSet (ACoAxiom _) = emptyVarSet
-- | The Names that a TyThing should bring into scope. Used to build
-- the GlobalRdrEnv for the InteractiveContext.
tyThingAvailInfo :: TyThing -> [AvailInfo]
tyThingAvailInfo (ATyCon t)
= case tyConClass_maybe t of
Just c -> [AvailTC n (n : map getName (classMethods c)
++ map getName (classATs c))
[] ]
where n = getName c
Nothing -> [AvailTC n (n : map getName dcs) flds]
where n = getName t
dcs = tyConDataCons t
flds = tyConFieldLabels t
tyThingAvailInfo (AConLike (PatSynCon p))
= map avail ((getName p) : map flSelector (patSynFieldLabels p))
tyThingAvailInfo t
= [avail (getName t)]
{-
************************************************************************
* *
TypeEnv
* *
************************************************************************
-}
-- | A map from 'Name's to 'TyThing's, constructed by typechecking
-- local declarations or interface files
type TypeEnv = NameEnv TyThing
emptyTypeEnv :: TypeEnv
typeEnvElts :: TypeEnv -> [TyThing]
typeEnvTyCons :: TypeEnv -> [TyCon]
typeEnvCoAxioms :: TypeEnv -> [CoAxiom Branched]
typeEnvIds :: TypeEnv -> [Id]
typeEnvPatSyns :: TypeEnv -> [PatSyn]
typeEnvDataCons :: TypeEnv -> [DataCon]
typeEnvClasses :: TypeEnv -> [Class]
lookupTypeEnv :: TypeEnv -> Name -> Maybe TyThing
emptyTypeEnv = emptyNameEnv
typeEnvElts env = nameEnvElts env
typeEnvTyCons env = [tc | ATyCon tc <- typeEnvElts env]
typeEnvCoAxioms env = [ax | ACoAxiom ax <- typeEnvElts env]
typeEnvIds env = [id | AnId id <- typeEnvElts env]
typeEnvPatSyns env = [ps | AConLike (PatSynCon ps) <- typeEnvElts env]
typeEnvDataCons env = [dc | AConLike (RealDataCon dc) <- typeEnvElts env]
typeEnvClasses env = [cl | tc <- typeEnvTyCons env,
Just cl <- [tyConClass_maybe tc]]
mkTypeEnv :: [TyThing] -> TypeEnv
mkTypeEnv things = extendTypeEnvList emptyTypeEnv things
mkTypeEnvWithImplicits :: [TyThing] -> TypeEnv
mkTypeEnvWithImplicits things =
mkTypeEnv things
`plusNameEnv`
mkTypeEnv (concatMap implicitTyThings things)
typeEnvFromEntities :: [Id] -> [TyCon] -> [FamInst] -> TypeEnv
typeEnvFromEntities ids tcs famInsts =
mkTypeEnv ( map AnId ids
++ map ATyCon all_tcs
++ concatMap implicitTyConThings all_tcs
++ map (ACoAxiom . toBranchedAxiom . famInstAxiom) famInsts
)
where
all_tcs = tcs ++ famInstsRepTyCons famInsts
lookupTypeEnv = lookupNameEnv
-- Extend the type environment
extendTypeEnv :: TypeEnv -> TyThing -> TypeEnv
extendTypeEnv env thing = extendNameEnv env (getName thing) thing
extendTypeEnvList :: TypeEnv -> [TyThing] -> TypeEnv
extendTypeEnvList env things = foldl extendTypeEnv env things
extendTypeEnvWithIds :: TypeEnv -> [Id] -> TypeEnv
extendTypeEnvWithIds env ids
= extendNameEnvList env [(getName id, AnId id) | id <- ids]
plusTypeEnv :: TypeEnv -> TypeEnv -> TypeEnv
plusTypeEnv env1 env2 = plusNameEnv env1 env2
-- | Find the 'TyThing' for the given 'Name' by using all the resources
-- at our disposal: the compiled modules in the 'HomePackageTable' and the
-- compiled modules in other packages that live in 'PackageTypeEnv'. Note
-- that this does NOT look up the 'TyThing' in the module being compiled: you
-- have to do that yourself, if desired
lookupType :: DynFlags
-> HomePackageTable
-> PackageTypeEnv
-> Name
-> Maybe TyThing
lookupType dflags hpt pte name
| isOneShot (ghcMode dflags) -- in one-shot, we don't use the HPT
= lookupNameEnv pte name
| otherwise
= case lookupHptByModule hpt mod of
Just hm -> lookupNameEnv (md_types (hm_details hm)) name
Nothing -> lookupNameEnv pte name
where
mod = ASSERT2( isExternalName name, ppr name )
if isHoleName name
then mkModule (thisPackage dflags) (moduleName (nameModule name))
else nameModule name
-- | As 'lookupType', but with a marginally easier-to-use interface
-- if you have a 'HscEnv'
lookupTypeHscEnv :: HscEnv -> Name -> IO (Maybe TyThing)
lookupTypeHscEnv hsc_env name = do
eps <- readIORef (hsc_EPS hsc_env)
return $! lookupType dflags hpt (eps_PTE eps) name
where
dflags = hsc_dflags hsc_env
hpt = hsc_HPT hsc_env
-- | Get the 'TyCon' from a 'TyThing' if it is a type constructor thing. Panics otherwise
tyThingTyCon :: TyThing -> TyCon
tyThingTyCon (ATyCon tc) = tc
tyThingTyCon other = pprPanic "tyThingTyCon" (ppr other)
-- | Get the 'CoAxiom' from a 'TyThing' if it is a coercion axiom thing. Panics otherwise
tyThingCoAxiom :: TyThing -> CoAxiom Branched
tyThingCoAxiom (ACoAxiom ax) = ax
tyThingCoAxiom other = pprPanic "tyThingCoAxiom" (ppr other)
-- | Get the 'DataCon' from a 'TyThing' if it is a data constructor thing. Panics otherwise
tyThingDataCon :: TyThing -> DataCon
tyThingDataCon (AConLike (RealDataCon dc)) = dc
tyThingDataCon other = pprPanic "tyThingDataCon" (ppr other)
-- | Get the 'Id' from a 'TyThing' if it is a id *or* data constructor thing. Panics otherwise
tyThingId :: TyThing -> Id
tyThingId (AnId id) = id
tyThingId (AConLike (RealDataCon dc)) = dataConWrapId dc
tyThingId other = pprPanic "tyThingId" (ppr other)
{-
************************************************************************
* *
\subsection{MonadThings and friends}
* *
************************************************************************
-}
-- | Class that abstracts out the common ability of the monads in GHC
-- to lookup a 'TyThing' in the monadic environment by 'Name'. Provides
-- a number of related convenience functions for accessing particular
-- kinds of 'TyThing'
class Monad m => MonadThings m where
lookupThing :: Name -> m TyThing
lookupId :: Name -> m Id
lookupId = liftM tyThingId . lookupThing
lookupDataCon :: Name -> m DataCon
lookupDataCon = liftM tyThingDataCon . lookupThing
lookupTyCon :: Name -> m TyCon
lookupTyCon = liftM tyThingTyCon . lookupThing
{-
************************************************************************
* *
\subsection{Auxiliary types}
* *
************************************************************************
These types are defined here because they are mentioned in ModDetails,
but they are mostly elaborated elsewhere
-}
------------------ Warnings -------------------------
-- | Warning information for a module
data Warnings
= NoWarnings -- ^ Nothing deprecated
| WarnAll WarningTxt -- ^ Whole module deprecated
| WarnSome [(OccName,WarningTxt)] -- ^ Some specific things deprecated
-- Only an OccName is needed because
-- (1) a deprecation always applies to a binding
-- defined in the module in which the deprecation appears.
-- (2) deprecations are only reported outside the defining module.
-- this is important because, otherwise, if we saw something like
--
-- {-# DEPRECATED f "" #-}
-- f = ...
-- h = f
-- g = let f = undefined in f
--
-- we'd need more information than an OccName to know to say something
-- about the use of f in h but not the use of the locally bound f in g
--
-- however, because we only report about deprecations from the outside,
-- and a module can only export one value called f,
-- an OccName suffices.
--
-- this is in contrast with fixity declarations, where we need to map
-- a Name to its fixity declaration.
deriving( Eq )
instance Binary Warnings where
put_ bh NoWarnings = putByte bh 0
put_ bh (WarnAll t) = do
putByte bh 1
put_ bh t
put_ bh (WarnSome ts) = do
putByte bh 2
put_ bh ts
get bh = do
h <- getByte bh
case h of
0 -> return NoWarnings
1 -> do aa <- get bh
return (WarnAll aa)
_ -> do aa <- get bh
return (WarnSome aa)
-- | Constructs the cache for the 'mi_warn_fn' field of a 'ModIface'
mkIfaceWarnCache :: Warnings -> OccName -> Maybe WarningTxt
mkIfaceWarnCache NoWarnings = \_ -> Nothing
mkIfaceWarnCache (WarnAll t) = \_ -> Just t
mkIfaceWarnCache (WarnSome pairs) = lookupOccEnv (mkOccEnv pairs)
emptyIfaceWarnCache :: OccName -> Maybe WarningTxt
emptyIfaceWarnCache _ = Nothing
plusWarns :: Warnings -> Warnings -> Warnings
plusWarns d NoWarnings = d
plusWarns NoWarnings d = d
plusWarns _ (WarnAll t) = WarnAll t
plusWarns (WarnAll t) _ = WarnAll t
plusWarns (WarnSome v1) (WarnSome v2) = WarnSome (v1 ++ v2)
-- | Creates cached lookup for the 'mi_fix_fn' field of 'ModIface'
mkIfaceFixCache :: [(OccName, Fixity)] -> OccName -> Maybe Fixity
mkIfaceFixCache pairs
= \n -> lookupOccEnv env n
where
env = mkOccEnv pairs
emptyIfaceFixCache :: OccName -> Maybe Fixity
emptyIfaceFixCache _ = Nothing
-- | Fixity environment mapping names to their fixities
type FixityEnv = NameEnv FixItem
-- | Fixity information for an 'Name'. We keep the OccName in the range
-- so that we can generate an interface from it
data FixItem = FixItem OccName Fixity
instance Outputable FixItem where
ppr (FixItem occ fix) = ppr fix <+> ppr occ
emptyFixityEnv :: FixityEnv
emptyFixityEnv = emptyNameEnv
lookupFixity :: FixityEnv -> Name -> Fixity
lookupFixity env n = case lookupNameEnv env n of
Just (FixItem _ fix) -> fix
Nothing -> defaultFixity
{-
************************************************************************
* *
\subsection{WhatsImported}
* *
************************************************************************
-}
-- | Records whether a module has orphans. An \"orphan\" is one of:
--
-- * An instance declaration in a module other than the definition
-- module for one of the type constructors or classes in the instance head
--
-- * A transformation rule in a module other than the one defining
-- the function in the head of the rule
--
-- * A vectorisation pragma
type WhetherHasOrphans = Bool
-- | Does this module define family instances?
type WhetherHasFamInst = Bool
-- | Did this module originate from a *-boot file?
type IsBootInterface = Bool
-- | Dependency information about ALL modules and packages below this one
-- in the import hierarchy.
--
-- Invariant: the dependencies of a module @M@ never includes @M@.
--
-- Invariant: none of the lists contain duplicates.
data Dependencies
= Deps { dep_mods :: [(ModuleName, IsBootInterface)]
-- ^ All home-package modules transitively below this one
-- I.e. modules that this one imports, or that are in the
-- dep_mods of those directly-imported modules
, dep_pkgs :: [(InstalledUnitId, Bool)]
-- ^ All packages transitively below this module
-- I.e. packages to which this module's direct imports belong,
-- or that are in the dep_pkgs of those modules
-- The bool indicates if the package is required to be
-- trusted when the module is imported as a safe import
-- (Safe Haskell). See Note [RnNames . Tracking Trust Transitively]
, dep_orphs :: [Module]
-- ^ Transitive closure of orphan modules (whether
-- home or external pkg).
--
-- (Possible optimization: don't include family
-- instance orphans as they are anyway included in
-- 'dep_finsts'. But then be careful about code
-- which relies on dep_orphs having the complete list!)
-- This does NOT include us, unlike 'imp_orphs'.
, dep_finsts :: [Module]
-- ^ Transitive closure of depended upon modules which
-- contain family instances (whether home or external).
-- This is used by 'checkFamInstConsistency'. This
-- does NOT include us, unlike 'imp_finsts'.
}
deriving( Eq )
-- Equality used only for old/new comparison in MkIface.addFingerprints
-- See 'TcRnTypes.ImportAvails' for details on dependencies.
instance Binary Dependencies where
put_ bh deps = do put_ bh (dep_mods deps)
put_ bh (dep_pkgs deps)
put_ bh (dep_orphs deps)
put_ bh (dep_finsts deps)
get bh = do ms <- get bh
ps <- get bh
os <- get bh
fis <- get bh
return (Deps { dep_mods = ms, dep_pkgs = ps, dep_orphs = os,
dep_finsts = fis })
noDependencies :: Dependencies
noDependencies = Deps [] [] [] []
-- | Records modules for which changes may force recompilation of this module
-- See wiki: http://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/RecompilationAvoidance
--
-- This differs from Dependencies. A module X may be in the dep_mods of this
-- module (via an import chain) but if we don't use anything from X it won't
-- appear in our Usage
data Usage
-- | Module from another package
= UsagePackageModule {
usg_mod :: Module,
-- ^ External package module depended on
usg_mod_hash :: Fingerprint,
-- ^ Cached module fingerprint
usg_safe :: IsSafeImport
-- ^ Was this module imported as a safe import
}
-- | Module from the current package
| UsageHomeModule {
usg_mod_name :: ModuleName,
-- ^ Name of the module
usg_mod_hash :: Fingerprint,
-- ^ Cached module fingerprint
usg_entities :: [(OccName,Fingerprint)],
-- ^ Entities we depend on, sorted by occurrence name and fingerprinted.
-- NB: usages are for parent names only, e.g. type constructors
-- but not the associated data constructors.
usg_exports :: Maybe Fingerprint,
-- ^ Fingerprint for the export list of this module,
-- if we directly imported it (and hence we depend on its export list)
usg_safe :: IsSafeImport
-- ^ Was this module imported as a safe import
} -- ^ Module from the current package
-- | A file upon which the module depends, e.g. a CPP #include, or using TH's
-- 'addDependentFile'
| UsageFile {
usg_file_path :: FilePath,
-- ^ External file dependency. From a CPP #include or TH
-- addDependentFile. Should be absolute.
usg_file_hash :: Fingerprint
-- ^ 'Fingerprint' of the file contents.
-- Note: We don't consider things like modification timestamps
-- here, because there's no reason to recompile if the actual
-- contents don't change. This previously lead to odd
-- recompilation behaviors; see #8114
}
-- | A requirement which was merged into this one.
| UsageMergedRequirement {
usg_mod :: Module,
usg_mod_hash :: Fingerprint
}
deriving( Eq )
-- The export list field is (Just v) if we depend on the export list:
-- i.e. we imported the module directly, whether or not we
-- enumerated the things we imported, or just imported
-- everything
-- We need to recompile if M's exports change, because
-- if the import was import M, we might now have a name clash
-- in the importing module.
-- if the import was import M(x) M might no longer export x
-- The only way we don't depend on the export list is if we have
-- import M()
-- And of course, for modules that aren't imported directly we don't
-- depend on their export lists
instance Binary Usage where
put_ bh usg@UsagePackageModule{} = do
putByte bh 0
put_ bh (usg_mod usg)
put_ bh (usg_mod_hash usg)
put_ bh (usg_safe usg)
put_ bh usg@UsageHomeModule{} = do
putByte bh 1
put_ bh (usg_mod_name usg)
put_ bh (usg_mod_hash usg)
put_ bh (usg_exports usg)
put_ bh (usg_entities usg)
put_ bh (usg_safe usg)
put_ bh usg@UsageFile{} = do
putByte bh 2
put_ bh (usg_file_path usg)
put_ bh (usg_file_hash usg)
put_ bh usg@UsageMergedRequirement{} = do
putByte bh 3
put_ bh (usg_mod usg)
put_ bh (usg_mod_hash usg)
get bh = do
h <- getByte bh
case h of
0 -> do
nm <- get bh
mod <- get bh
safe <- get bh
return UsagePackageModule { usg_mod = nm, usg_mod_hash = mod, usg_safe = safe }
1 -> do
nm <- get bh
mod <- get bh
exps <- get bh
ents <- get bh
safe <- get bh
return UsageHomeModule { usg_mod_name = nm, usg_mod_hash = mod,
usg_exports = exps, usg_entities = ents, usg_safe = safe }
2 -> do
fp <- get bh
hash <- get bh
return UsageFile { usg_file_path = fp, usg_file_hash = hash }
3 -> do
mod <- get bh
hash <- get bh
return UsageMergedRequirement { usg_mod = mod, usg_mod_hash = hash }
i -> error ("Binary.get(Usage): " ++ show i)
{-
************************************************************************
* *
The External Package State
* *
************************************************************************
-}
type PackageTypeEnv = TypeEnv
type PackageRuleBase = RuleBase
type PackageInstEnv = InstEnv
type PackageFamInstEnv = FamInstEnv
type PackageVectInfo = VectInfo
type PackageAnnEnv = AnnEnv
-- | Information about other packages that we have slurped in by reading
-- their interface files
data ExternalPackageState
= EPS {
eps_is_boot :: !(ModuleNameEnv (ModuleName, IsBootInterface)),
-- ^ In OneShot mode (only), home-package modules
-- accumulate in the external package state, and are
-- sucked in lazily. For these home-pkg modules
-- (only) we need to record which are boot modules.
-- We set this field after loading all the
-- explicitly-imported interfaces, but before doing
-- anything else
--
-- The 'ModuleName' part is not necessary, but it's useful for
-- debug prints, and it's convenient because this field comes
-- direct from 'TcRnTypes.imp_dep_mods'
eps_PIT :: !PackageIfaceTable,
-- ^ The 'ModIface's for modules in external packages
-- whose interfaces we have opened.
-- The declarations in these interface files are held in the
-- 'eps_decls', 'eps_inst_env', 'eps_fam_inst_env' and 'eps_rules'
-- fields of this record, not in the 'mi_decls' fields of the
-- interface we have sucked in.
--
-- What /is/ in the PIT is:
--
-- * The Module
--
-- * Fingerprint info
--
-- * Its exports
--
-- * Fixities
--
-- * Deprecations and warnings
eps_free_holes :: InstalledModuleEnv (UniqDSet ModuleName),
-- ^ Cache for 'mi_free_holes'. Ordinarily, we can rely on
-- the 'eps_PIT' for this information, EXCEPT that when
-- we do dependency analysis, we need to look at the
-- 'Dependencies' of our imports to determine what their
-- precise free holes are ('moduleFreeHolesPrecise'). We
-- don't want to repeatedly reread in the interface
-- for every import, so cache it here. When the PIT
-- gets filled in we can drop these entries.
eps_PTE :: !PackageTypeEnv,
-- ^ Result of typechecking all the external package
-- interface files we have sucked in. The domain of
-- the mapping is external-package modules
eps_inst_env :: !PackageInstEnv, -- ^ The total 'InstEnv' accumulated
-- from all the external-package modules
eps_fam_inst_env :: !PackageFamInstEnv,-- ^ The total 'FamInstEnv' accumulated
-- from all the external-package modules
eps_rule_base :: !PackageRuleBase, -- ^ The total 'RuleEnv' accumulated
-- from all the external-package modules
eps_vect_info :: !PackageVectInfo, -- ^ The total 'VectInfo' accumulated
-- from all the external-package modules
eps_ann_env :: !PackageAnnEnv, -- ^ The total 'AnnEnv' accumulated
-- from all the external-package modules
eps_mod_fam_inst_env :: !(ModuleEnv FamInstEnv), -- ^ The family instances accumulated from external
-- packages, keyed off the module that declared them
eps_stats :: !EpsStats -- ^ Stastics about what was loaded from external packages
}
-- | Accumulated statistics about what we are putting into the 'ExternalPackageState'.
-- \"In\" means stuff that is just /read/ from interface files,
-- \"Out\" means actually sucked in and type-checked
data EpsStats = EpsStats { n_ifaces_in
, n_decls_in, n_decls_out
, n_rules_in, n_rules_out
, n_insts_in, n_insts_out :: !Int }
addEpsInStats :: EpsStats -> Int -> Int -> Int -> EpsStats
-- ^ Add stats for one newly-read interface
addEpsInStats stats n_decls n_insts n_rules
= stats { n_ifaces_in = n_ifaces_in stats + 1
, n_decls_in = n_decls_in stats + n_decls
, n_insts_in = n_insts_in stats + n_insts
, n_rules_in = n_rules_in stats + n_rules }
{-
Names in a NameCache are always stored as a Global, and have the SrcLoc
of their binding locations.
Actually that's not quite right. When we first encounter the original
name, we might not be at its binding site (e.g. we are reading an
interface file); so we give it 'noSrcLoc' then. Later, when we find
its binding site, we fix it up.
-}
updNameCacheIO :: HscEnv
-> (NameCache -> (NameCache, c)) -- The updating function
-> IO c
updNameCacheIO hsc_env upd_fn
= atomicModifyIORef' (hsc_NC hsc_env) upd_fn
mkSOName :: Platform -> FilePath -> FilePath
mkSOName platform root
= case platformOS platform of
OSMinGW32 -> root <.> soExt platform
_ -> ("lib" ++ root) <.> soExt platform
mkHsSOName :: Platform -> FilePath -> FilePath
mkHsSOName platform root = ("lib" ++ root) <.> soExt platform
soExt :: Platform -> FilePath
soExt platform
= case platformOS platform of
OSDarwin -> "dylib"
OSMinGW32 -> "dll"
_ -> "so"
{-
************************************************************************
* *
The module graph and ModSummary type
A ModSummary is a node in the compilation manager's
dependency graph, and it's also passed to hscMain
* *
************************************************************************
-}
-- | A ModuleGraph contains all the nodes from the home package (only).
-- There will be a node for each source module, plus a node for each hi-boot
-- module.
--
-- The graph is not necessarily stored in topologically-sorted order. Use
-- 'GHC.topSortModuleGraph' and 'Digraph.flattenSCC' to achieve this.
type ModuleGraph = [ModSummary]
emptyMG :: ModuleGraph
emptyMG = []
-- | A single node in a 'ModuleGraph'. The nodes of the module graph
-- are one of:
--
-- * A regular Haskell source module
-- * A hi-boot source module
--
data ModSummary
= ModSummary {
ms_mod :: Module,
-- ^ Identity of the module
ms_hsc_src :: HscSource,
-- ^ The module source either plain Haskell or hs-boot
ms_location :: ModLocation,
-- ^ Location of the various files belonging to the module
ms_hs_date :: UTCTime,
-- ^ Timestamp of source file
ms_obj_date :: Maybe UTCTime,
-- ^ Timestamp of object, if we have one
ms_iface_date :: Maybe UTCTime,
-- ^ Timestamp of hi file, if we *only* are typechecking (it is
-- 'Nothing' otherwise.
-- See Note [Recompilation checking when typechecking only] and #9243
ms_srcimps :: [(Maybe FastString, Located ModuleName)],
-- ^ Source imports of the module
ms_textual_imps :: [(Maybe FastString, Located ModuleName)],
-- ^ Non-source imports of the module from the module *text*
ms_parsed_mod :: Maybe HsParsedModule,
-- ^ The parsed, nonrenamed source, if we have it. This is also
-- used to support "inline module syntax" in Backpack files.
ms_hspp_file :: FilePath,
-- ^ Filename of preprocessed source file
ms_hspp_opts :: DynFlags,
-- ^ Cached flags from @OPTIONS@, @INCLUDE@ and @LANGUAGE@
-- pragmas in the modules source code
ms_hspp_buf :: Maybe StringBuffer
-- ^ The actual preprocessed source, if we have it
}
ms_installed_mod :: ModSummary -> InstalledModule
ms_installed_mod = fst . splitModuleInsts . ms_mod
ms_mod_name :: ModSummary -> ModuleName
ms_mod_name = moduleName . ms_mod
ms_imps :: ModSummary -> [(Maybe FastString, Located ModuleName)]
ms_imps ms =
ms_textual_imps ms ++
map mk_additional_import (dynFlagDependencies (ms_hspp_opts ms))
where
mk_additional_import mod_nm = (Nothing, noLoc mod_nm)
-- The ModLocation contains both the original source filename and the
-- filename of the cleaned-up source file after all preprocessing has been
-- done. The point is that the summariser will have to cpp/unlit/whatever
-- all files anyway, and there's no point in doing this twice -- just
-- park the result in a temp file, put the name of it in the location,
-- and let @compile@ read from that file on the way back up.
-- The ModLocation is stable over successive up-sweeps in GHCi, wheres
-- the ms_hs_date and imports can, of course, change
msHsFilePath, msHiFilePath, msObjFilePath :: ModSummary -> FilePath
msHsFilePath ms = expectJust "msHsFilePath" (ml_hs_file (ms_location ms))
msHiFilePath ms = ml_hi_file (ms_location ms)
msObjFilePath ms = ml_obj_file (ms_location ms)
-- | Did this 'ModSummary' originate from a hs-boot file?
isBootSummary :: ModSummary -> Bool
isBootSummary ms = ms_hsc_src ms == HsBootFile
instance Outputable ModSummary where
ppr ms
= sep [text "ModSummary {",
nest 3 (sep [text "ms_hs_date = " <> text (show (ms_hs_date ms)),
text "ms_mod =" <+> ppr (ms_mod ms)
<> text (hscSourceString (ms_hsc_src ms)) <> comma,
text "ms_textual_imps =" <+> ppr (ms_textual_imps ms),
text "ms_srcimps =" <+> ppr (ms_srcimps ms)]),
char '}'
]
showModMsg :: DynFlags -> HscTarget -> Bool -> ModSummary -> String
showModMsg dflags target recomp mod_summary = showSDoc dflags $
if gopt Opt_HideSourcePaths dflags
then text mod_str
else hsep
[ text (mod_str ++ replicate (max 0 (16 - length mod_str)) ' ')
, char '('
, text (op $ msHsFilePath mod_summary) <> char ','
, case target of
HscInterpreted | recomp -> text "interpreted"
HscNothing -> text "nothing"
_ -> text (op $ msObjFilePath mod_summary)
, char ')'
]
where
op = normalise
mod = moduleName (ms_mod mod_summary)
mod_str = showPpr dflags mod ++ hscSourceString (ms_hsc_src mod_summary)
{-
************************************************************************
* *
\subsection{Recmpilation}
* *
************************************************************************
-}
-- | Indicates whether a given module's source has been modified since it
-- was last compiled.
data SourceModified
= SourceModified
-- ^ the source has been modified
| SourceUnmodified
-- ^ the source has not been modified. Compilation may or may
-- not be necessary, depending on whether any dependencies have
-- changed since we last compiled.
| SourceUnmodifiedAndStable
-- ^ the source has not been modified, and furthermore all of
-- its (transitive) dependencies are up to date; it definitely
-- does not need to be recompiled. This is important for two
-- reasons: (a) we can omit the version check in checkOldIface,
-- and (b) if the module used TH splices we don't need to force
-- recompilation.
{-
************************************************************************
* *
\subsection{Hpc Support}
* *
************************************************************************
-}
-- | Information about a modules use of Haskell Program Coverage
data HpcInfo
= HpcInfo
{ hpcInfoTickCount :: Int
, hpcInfoHash :: Int
}
| NoHpcInfo
{ hpcUsed :: AnyHpcUsage -- ^ Is hpc used anywhere on the module \*tree\*?
}
-- | This is used to signal if one of my imports used HPC instrumentation
-- even if there is no module-local HPC usage
type AnyHpcUsage = Bool
emptyHpcInfo :: AnyHpcUsage -> HpcInfo
emptyHpcInfo = NoHpcInfo
-- | Find out if HPC is used by this module or any of the modules
-- it depends upon
isHpcUsed :: HpcInfo -> AnyHpcUsage
isHpcUsed (HpcInfo {}) = True
isHpcUsed (NoHpcInfo { hpcUsed = used }) = used
{-
************************************************************************
* *
\subsection{Vectorisation Support}
* *
************************************************************************
The following information is generated and consumed by the vectorisation
subsystem. It communicates the vectorisation status of declarations from one
module to another.
Why do we need both f and f_v in the ModGuts/ModDetails/EPS version VectInfo
below? We need to know `f' when converting to IfaceVectInfo. However, during
vectorisation, we need to know `f_v', whose `Var' we cannot lookup based
on just the OccName easily in a Core pass.
-}
-- |Vectorisation information for 'ModGuts', 'ModDetails' and 'ExternalPackageState'; see also
-- documentation at 'Vectorise.Env.GlobalEnv'.
--
-- NB: The following tables may also include 'Var's, 'TyCon's and 'DataCon's from imported modules,
-- which have been subsequently vectorised in the current module.
--
data VectInfo
= VectInfo
{ vectInfoVar :: DVarEnv (Var , Var ) -- ^ @(f, f_v)@ keyed on @f@
, vectInfoTyCon :: NameEnv (TyCon , TyCon) -- ^ @(T, T_v)@ keyed on @T@
, vectInfoDataCon :: NameEnv (DataCon, DataCon) -- ^ @(C, C_v)@ keyed on @C@
, vectInfoParallelVars :: DVarSet -- ^ set of parallel variables
, vectInfoParallelTyCons :: NameSet -- ^ set of parallel type constructors
}
-- |Vectorisation information for 'ModIface'; i.e, the vectorisation information propagated
-- across module boundaries.
--
-- NB: The field 'ifaceVectInfoVar' explicitly contains the workers of data constructors as well as
-- class selectors — i.e., their mappings are /not/ implicitly generated from the data types.
-- Moreover, whether the worker of a data constructor is in 'ifaceVectInfoVar' determines
-- whether that data constructor was vectorised (or is part of an abstractly vectorised type
-- constructor).
--
data IfaceVectInfo
= IfaceVectInfo
{ ifaceVectInfoVar :: [Name] -- ^ All variables in here have a vectorised variant
, ifaceVectInfoTyCon :: [Name] -- ^ All 'TyCon's in here have a vectorised variant;
-- the name of the vectorised variant and those of its
-- data constructors are determined by
-- 'OccName.mkVectTyConOcc' and
-- 'OccName.mkVectDataConOcc'; the names of the
-- isomorphisms are determined by 'OccName.mkVectIsoOcc'
, ifaceVectInfoTyConReuse :: [Name] -- ^ The vectorised form of all the 'TyCon's in here
-- coincides with the unconverted form; the name of the
-- isomorphisms is determined by 'OccName.mkVectIsoOcc'
, ifaceVectInfoParallelVars :: [Name] -- iface version of 'vectInfoParallelVar'
, ifaceVectInfoParallelTyCons :: [Name] -- iface version of 'vectInfoParallelTyCon'
}
noVectInfo :: VectInfo
noVectInfo
= VectInfo emptyDVarEnv emptyNameEnv emptyNameEnv emptyDVarSet emptyNameSet
plusVectInfo :: VectInfo -> VectInfo -> VectInfo
plusVectInfo vi1 vi2 =
VectInfo (vectInfoVar vi1 `plusDVarEnv` vectInfoVar vi2)
(vectInfoTyCon vi1 `plusNameEnv` vectInfoTyCon vi2)
(vectInfoDataCon vi1 `plusNameEnv` vectInfoDataCon vi2)
(vectInfoParallelVars vi1 `unionDVarSet` vectInfoParallelVars vi2)
(vectInfoParallelTyCons vi1 `unionNameSet` vectInfoParallelTyCons vi2)
concatVectInfo :: [VectInfo] -> VectInfo
concatVectInfo = foldr plusVectInfo noVectInfo
noIfaceVectInfo :: IfaceVectInfo
noIfaceVectInfo = IfaceVectInfo [] [] [] [] []
isNoIfaceVectInfo :: IfaceVectInfo -> Bool
isNoIfaceVectInfo (IfaceVectInfo l1 l2 l3 l4 l5)
= null l1 && null l2 && null l3 && null l4 && null l5
instance Outputable VectInfo where
ppr info = vcat
[ text "variables :" <+> ppr (vectInfoVar info)
, text "tycons :" <+> ppr (vectInfoTyCon info)
, text "datacons :" <+> ppr (vectInfoDataCon info)
, text "parallel vars :" <+> ppr (vectInfoParallelVars info)
, text "parallel tycons :" <+> ppr (vectInfoParallelTyCons info)
]
instance Outputable IfaceVectInfo where
ppr info = vcat
[ text "variables :" <+> ppr (ifaceVectInfoVar info)
, text "tycons :" <+> ppr (ifaceVectInfoTyCon info)
, text "tycons reuse :" <+> ppr (ifaceVectInfoTyConReuse info)
, text "parallel vars :" <+> ppr (ifaceVectInfoParallelVars info)
, text "parallel tycons :" <+> ppr (ifaceVectInfoParallelTyCons info)
]
instance Binary IfaceVectInfo where
put_ bh (IfaceVectInfo a1 a2 a3 a4 a5) = do
put_ bh a1
put_ bh a2
put_ bh a3
put_ bh a4
put_ bh a5
get bh = do
a1 <- get bh
a2 <- get bh
a3 <- get bh
a4 <- get bh
a5 <- get bh
return (IfaceVectInfo a1 a2 a3 a4 a5)
{-
************************************************************************
* *
\subsection{Safe Haskell Support}
* *
************************************************************************
This stuff here is related to supporting the Safe Haskell extension,
primarily about storing under what trust type a module has been compiled.
-}
-- | Is an import a safe import?
type IsSafeImport = Bool
-- | Safe Haskell information for 'ModIface'
-- Simply a wrapper around SafeHaskellMode to sepperate iface and flags
newtype IfaceTrustInfo = TrustInfo SafeHaskellMode
getSafeMode :: IfaceTrustInfo -> SafeHaskellMode
getSafeMode (TrustInfo x) = x
setSafeMode :: SafeHaskellMode -> IfaceTrustInfo
setSafeMode = TrustInfo
noIfaceTrustInfo :: IfaceTrustInfo
noIfaceTrustInfo = setSafeMode Sf_None
trustInfoToNum :: IfaceTrustInfo -> Word8
trustInfoToNum it
= case getSafeMode it of
Sf_None -> 0
Sf_Unsafe -> 1
Sf_Trustworthy -> 2
Sf_Safe -> 3
numToTrustInfo :: Word8 -> IfaceTrustInfo
numToTrustInfo 0 = setSafeMode Sf_None
numToTrustInfo 1 = setSafeMode Sf_Unsafe
numToTrustInfo 2 = setSafeMode Sf_Trustworthy
numToTrustInfo 3 = setSafeMode Sf_Safe
numToTrustInfo 4 = setSafeMode Sf_Safe -- retained for backwards compat, used
-- to be Sf_SafeInfered but we no longer
-- differentiate.
numToTrustInfo n = error $ "numToTrustInfo: bad input number! (" ++ show n ++ ")"
instance Outputable IfaceTrustInfo where
ppr (TrustInfo Sf_None) = text "none"
ppr (TrustInfo Sf_Unsafe) = text "unsafe"
ppr (TrustInfo Sf_Trustworthy) = text "trustworthy"
ppr (TrustInfo Sf_Safe) = text "safe"
instance Binary IfaceTrustInfo where
put_ bh iftrust = putByte bh $ trustInfoToNum iftrust
get bh = getByte bh >>= (return . numToTrustInfo)
{-
************************************************************************
* *
\subsection{Parser result}
* *
************************************************************************
-}
data HsParsedModule = HsParsedModule {
hpm_module :: Located (HsModule RdrName),
hpm_src_files :: [FilePath],
-- ^ extra source files (e.g. from #includes). The lexer collects
-- these from '# <file> <line>' pragmas, which the C preprocessor
-- leaves behind. These files and their timestamps are stored in
-- the .hi file, so that we can force recompilation if any of
-- them change (#3589)
hpm_annotations :: ApiAnns
-- See note [Api annotations] in ApiAnnotation.hs
}
{-
************************************************************************
* *
\subsection{Linkable stuff}
* *
************************************************************************
This stuff is in here, rather than (say) in Linker.hs, because the Linker.hs
stuff is the *dynamic* linker, and isn't present in a stage-1 compiler
-}
-- | Information we can use to dynamically link modules into the compiler
data Linkable = LM {
linkableTime :: UTCTime, -- ^ Time at which this linkable was built
-- (i.e. when the bytecodes were produced,
-- or the mod date on the files)
linkableModule :: Module, -- ^ The linkable module itself
linkableUnlinked :: [Unlinked]
-- ^ Those files and chunks of code we have yet to link.
--
-- INVARIANT: A valid linkable always has at least one 'Unlinked' item.
-- If this list is empty, the Linkable represents a fake linkable, which
-- is generated in HscNothing mode to avoid recompiling modules.
--
-- ToDo: Do items get removed from this list when they get linked?
}
isObjectLinkable :: Linkable -> Bool
isObjectLinkable l = not (null unlinked) && all isObject unlinked
where unlinked = linkableUnlinked l
-- A linkable with no Unlinked's is treated as a BCO. We can
-- generate a linkable with no Unlinked's as a result of
-- compiling a module in HscNothing mode, and this choice
-- happens to work well with checkStability in module GHC.
linkableObjs :: Linkable -> [FilePath]
linkableObjs l = [ f | DotO f <- linkableUnlinked l ]
instance Outputable Linkable where
ppr (LM when_made mod unlinkeds)
= (text "LinkableM" <+> parens (text (show when_made)) <+> ppr mod)
$$ nest 3 (ppr unlinkeds)
-------------------------------------------
-- | Objects which have yet to be linked by the compiler
data Unlinked
= DotO FilePath -- ^ An object file (.o)
| DotA FilePath -- ^ Static archive file (.a)
| DotDLL FilePath -- ^ Dynamically linked library file (.so, .dll, .dylib)
| BCOs CompiledByteCode -- ^ A byte-code object, lives only in memory
#ifndef GHCI
data CompiledByteCode = CompiledByteCodeUndefined
_unusedCompiledByteCode :: CompiledByteCode
_unusedCompiledByteCode = CompiledByteCodeUndefined
data ModBreaks = ModBreaksUndefined
emptyModBreaks :: ModBreaks
emptyModBreaks = ModBreaksUndefined
#endif
instance Outputable Unlinked where
ppr (DotO path) = text "DotO" <+> text path
ppr (DotA path) = text "DotA" <+> text path
ppr (DotDLL path) = text "DotDLL" <+> text path
#ifdef GHCI
ppr (BCOs bcos) = text "BCOs" <+> ppr bcos
#else
ppr (BCOs _) = text "No byte code"
#endif
-- | Is this an actual file on disk we can link in somehow?
isObject :: Unlinked -> Bool
isObject (DotO _) = True
isObject (DotA _) = True
isObject (DotDLL _) = True
isObject _ = False
-- | Is this a bytecode linkable with no file on disk?
isInterpretable :: Unlinked -> Bool
isInterpretable = not . isObject
-- | Retrieve the filename of the linkable if possible. Panic if it is a byte-code object
nameOfObject :: Unlinked -> FilePath
nameOfObject (DotO fn) = fn
nameOfObject (DotA fn) = fn
nameOfObject (DotDLL fn) = fn
nameOfObject other = pprPanic "nameOfObject" (ppr other)
-- | Retrieve the compiled byte-code if possible. Panic if it is a file-based linkable
byteCodeOfObject :: Unlinked -> CompiledByteCode
byteCodeOfObject (BCOs bc) = bc
byteCodeOfObject other = pprPanic "byteCodeOfObject" (ppr other)
|
mettekou/ghc
|
compiler/main/HscTypes.hs
|
bsd-3-clause
| 124,442
| 0
| 21
| 37,626
| 15,805
| 8,817
| 6,988
| -1
| -1
|
{-
(c) The University of Glasgow 2006
(c) The AQUA Project, Glasgow University, 1996-1998
TcHsSyn: Specialisations of the @HsSyn@ syntax for the typechecker
This module is an extension of @HsSyn@ syntax, for use in the type
checker.
-}
{-# LANGUAGE CPP, TupleSections #-}
module TcHsSyn (
mkHsConApp, mkHsDictLet, mkHsApp,
hsLitType, hsLPatType, hsPatType,
mkHsAppTy, mkSimpleHsAlt,
nlHsIntLit,
shortCutLit, hsOverLitName,
conLikeResTy,
-- * re-exported from TcMonad
TcId, TcIdSet,
-- * Zonking
-- | For a description of "zonking", see Note [What is zonking?]
-- in TcMType
zonkTopDecls, zonkTopExpr, zonkTopLExpr,
zonkTopBndrs, zonkTyBndrsX, zonkTyBinders,
emptyZonkEnv, mkEmptyZonkEnv,
zonkTcTypeToType, zonkTcTypeToTypes, zonkTyVarOcc,
zonkCoToCo, zonkTcKindToKind,
zonkEvBinds,
-- * Validity checking
checkForRepresentationPolymorphism
) where
#include "HsVersions.h"
import HsSyn
import Id
import TcRnMonad
import PrelNames
import TcType
import TcMType
import TcEvidence
import TysPrim
import TysWiredIn
import Type
import TyCoRep ( TyBinder(..) )
import TyCon
import Coercion
import ConLike
import DataCon
import Name
import Var
import VarSet
import VarEnv
import DynFlags
import Literal
import BasicTypes
import Maybes
import SrcLoc
import Bag
import Outputable
import Util
import UniqFM
import Control.Monad
import Data.List ( partition )
import Control.Arrow ( second )
{-
************************************************************************
* *
\subsection[mkFailurePair]{Code for pattern-matching and other failures}
* *
************************************************************************
Note: If @hsLPatType@ doesn't bear a strong resemblance to @exprType@,
then something is wrong.
-}
hsLPatType :: OutPat Id -> Type
hsLPatType (L _ pat) = hsPatType pat
hsPatType :: Pat Id -> Type
hsPatType (ParPat pat) = hsLPatType pat
hsPatType (WildPat ty) = ty
hsPatType (VarPat (L _ var)) = idType var
hsPatType (BangPat pat) = hsLPatType pat
hsPatType (LazyPat pat) = hsLPatType pat
hsPatType (LitPat lit) = hsLitType lit
hsPatType (AsPat var _) = idType (unLoc var)
hsPatType (ViewPat _ _ ty) = ty
hsPatType (ListPat _ ty Nothing) = mkListTy ty
hsPatType (ListPat _ _ (Just (ty,_))) = ty
hsPatType (PArrPat _ ty) = mkPArrTy ty
hsPatType (TuplePat _ bx tys) = mkTupleTy bx tys
hsPatType (ConPatOut { pat_con = L _ con, pat_arg_tys = tys })
= conLikeResTy con tys
hsPatType (SigPatOut _ ty) = ty
hsPatType (NPat _ _ _ ty) = ty
hsPatType (NPlusKPat _ _ _ _ _ ty) = ty
hsPatType (CoPat _ _ ty) = ty
hsPatType p = pprPanic "hsPatType" (ppr p)
hsLitType :: HsLit -> TcType
hsLitType (HsChar _ _) = charTy
hsLitType (HsCharPrim _ _) = charPrimTy
hsLitType (HsString _ _) = stringTy
hsLitType (HsStringPrim _ _) = addrPrimTy
hsLitType (HsInt _ _) = intTy
hsLitType (HsIntPrim _ _) = intPrimTy
hsLitType (HsWordPrim _ _) = wordPrimTy
hsLitType (HsInt64Prim _ _) = int64PrimTy
hsLitType (HsWord64Prim _ _) = word64PrimTy
hsLitType (HsInteger _ _ ty) = ty
hsLitType (HsRat _ ty) = ty
hsLitType (HsFloatPrim _) = floatPrimTy
hsLitType (HsDoublePrim _) = doublePrimTy
-- Overloaded literals. Here mainly because it uses isIntTy etc
shortCutLit :: DynFlags -> OverLitVal -> TcType -> Maybe (HsExpr TcId)
shortCutLit dflags (HsIntegral src i) ty
| isIntTy ty && inIntRange dflags i = Just (HsLit (HsInt src i))
| isWordTy ty && inWordRange dflags i
= Just (mkLit wordDataCon (HsWordPrim src i))
| isIntegerTy ty = Just (HsLit (HsInteger src i ty))
| otherwise = shortCutLit dflags (HsFractional (integralFractionalLit i)) ty
-- The 'otherwise' case is important
-- Consider (3 :: Float). Syntactically it looks like an IntLit,
-- so we'll call shortCutIntLit, but of course it's a float
-- This can make a big difference for programs with a lot of
-- literals, compiled without -O
shortCutLit _ (HsFractional f) ty
| isFloatTy ty = Just (mkLit floatDataCon (HsFloatPrim f))
| isDoubleTy ty = Just (mkLit doubleDataCon (HsDoublePrim f))
| otherwise = Nothing
shortCutLit _ (HsIsString src s) ty
| isStringTy ty = Just (HsLit (HsString src s))
| otherwise = Nothing
mkLit :: DataCon -> HsLit -> HsExpr Id
mkLit con lit = HsApp (nlHsVar (dataConWrapId con)) (nlHsLit lit)
------------------------------
hsOverLitName :: OverLitVal -> Name
-- Get the canonical 'fromX' name for a particular OverLitVal
hsOverLitName (HsIntegral {}) = fromIntegerName
hsOverLitName (HsFractional {}) = fromRationalName
hsOverLitName (HsIsString {}) = fromStringName
{-
************************************************************************
* *
\subsection[BackSubst-HsBinds]{Running a substitution over @HsBinds@}
* *
************************************************************************
The rest of the zonking is done *after* typechecking.
The main zonking pass runs over the bindings
a) to convert TcTyVars to TyVars etc, dereferencing any bindings etc
b) convert unbound TcTyVar to Void
c) convert each TcId to an Id by zonking its type
The type variables are converted by binding mutable tyvars to immutable ones
and then zonking as normal.
The Ids are converted by binding them in the normal Tc envt; that
way we maintain sharing; eg an Id is zonked at its binding site and they
all occurrences of that Id point to the common zonked copy
It's all pretty boring stuff, because HsSyn is such a large type, and
the environment manipulation is tiresome.
-}
-- Confused by zonking? See Note [What is zonking?] in TcMType.
type UnboundTyVarZonker = TcTyVar -> TcM Type
-- How to zonk an unbound type variable
-- Note [Zonking the LHS of a RULE]
-- | A ZonkEnv carries around several bits.
-- The UnboundTyVarZonker just zaps unbouned meta-tyvars to Any (as
-- defined in zonkTypeZapping), except on the LHS of rules. See
-- Note [Zonking the LHS of a RULE].
--
-- The (TyCoVarEnv TyVar) and is just an optimisation: when binding a
-- tyvar or covar, we zonk the kind right away and add a mapping to
-- the env. This prevents re-zonking the kind at every occurrence. But
-- this is *just* an optimisation.
--
-- The final (IdEnv Var) optimises zonking for Ids. It is
-- knot-tied. We must be careful never to put coercion variables
-- (which are Ids, after all) in the knot-tied env, because coercions
-- can appear in types, and we sometimes inspect a zonked type in this
-- module.
--
-- Confused by zonking? See Note [What is zonking?] in TcMType.
data ZonkEnv
= ZonkEnv
UnboundTyVarZonker
(TyCoVarEnv TyVar)
(IdEnv Var) -- What variables are in scope
-- Maps an Id or EvVar to its zonked version; both have the same Name
-- Note that all evidence (coercion variables as well as dictionaries)
-- are kept in the ZonkEnv
-- Only *type* abstraction is done by side effect
-- Is only consulted lazily; hence knot-tying
instance Outputable ZonkEnv where
ppr (ZonkEnv _ _ty_env var_env) = pprUFM var_env (vcat . map ppr)
-- The EvBinds have to already be zonked, but that's usually the case.
emptyZonkEnv :: ZonkEnv
emptyZonkEnv = mkEmptyZonkEnv zonkTypeZapping
mkEmptyZonkEnv :: UnboundTyVarZonker -> ZonkEnv
mkEmptyZonkEnv zonker = ZonkEnv zonker emptyVarEnv emptyVarEnv
-- | Extend the knot-tied environment.
extendIdZonkEnvRec :: ZonkEnv -> [Var] -> ZonkEnv
extendIdZonkEnvRec (ZonkEnv zonk_ty ty_env id_env) ids
-- NB: Don't look at the var to decide which env't to put it in. That
-- would end up knot-tying all the env'ts.
= ZonkEnv zonk_ty ty_env (extendVarEnvList id_env [(id,id) | id <- ids])
-- Given coercion variables will actually end up here. That's OK though:
-- coercion variables are never looked up in the knot-tied env't, so zonking
-- them simply doesn't get optimised. No one gets hurt. An improvement (?)
-- would be to do SCC analysis in zonkEvBinds and then only knot-tie the
-- recursive groups. But perhaps the time it takes to do the analysis is
-- more than the savings.
extendZonkEnv :: ZonkEnv -> [Var] -> ZonkEnv
extendZonkEnv (ZonkEnv zonk_ty tyco_env id_env) vars
= ZonkEnv zonk_ty (extendVarEnvList tyco_env [(tv,tv) | tv <- tycovars])
(extendVarEnvList id_env [(id,id) | id <- ids])
where (tycovars, ids) = partition isTyCoVar vars
extendIdZonkEnv1 :: ZonkEnv -> Var -> ZonkEnv
extendIdZonkEnv1 (ZonkEnv zonk_ty ty_env id_env) id
= ZonkEnv zonk_ty ty_env (extendVarEnv id_env id id)
extendTyZonkEnv1 :: ZonkEnv -> TyVar -> ZonkEnv
extendTyZonkEnv1 (ZonkEnv zonk_ty ty_env id_env) ty
= ZonkEnv zonk_ty (extendVarEnv ty_env ty ty) id_env
setZonkType :: ZonkEnv -> UnboundTyVarZonker -> ZonkEnv
setZonkType (ZonkEnv _ ty_env id_env) zonk_ty
= ZonkEnv zonk_ty ty_env id_env
zonkEnvIds :: ZonkEnv -> [Id]
zonkEnvIds (ZonkEnv _ _ id_env) = varEnvElts id_env
zonkIdOcc :: ZonkEnv -> TcId -> Id
-- Ids defined in this module should be in the envt;
-- ignore others. (Actually, data constructors are also
-- not LocalVars, even when locally defined, but that is fine.)
-- (Also foreign-imported things aren't currently in the ZonkEnv;
-- that's ok because they don't need zonking.)
--
-- Actually, Template Haskell works in 'chunks' of declarations, and
-- an earlier chunk won't be in the 'env' that the zonking phase
-- carries around. Instead it'll be in the tcg_gbl_env, already fully
-- zonked. There's no point in looking it up there (except for error
-- checking), and it's not conveniently to hand; hence the simple
-- 'orElse' case in the LocalVar branch.
--
-- Even without template splices, in module Main, the checking of
-- 'main' is done as a separate chunk.
zonkIdOcc (ZonkEnv _zonk_ty _ty_env id_env) id
| isLocalVar id = lookupVarEnv id_env id `orElse`
id
| otherwise = id
zonkIdOccs :: ZonkEnv -> [TcId] -> [Id]
zonkIdOccs env ids = map (zonkIdOcc env) ids
-- zonkIdBndr is used *after* typechecking to get the Id's type
-- to its final form. The TyVarEnv give
zonkIdBndr :: ZonkEnv -> TcId -> TcM Id
zonkIdBndr env id
= do ty' <- zonkTcTypeToType env (idType id)
ensureNotRepresentationPolymorphic ty'
(text "In the type of binder" <+> quotes (ppr id))
return (setIdType id ty')
zonkIdBndrs :: ZonkEnv -> [TcId] -> TcM [Id]
zonkIdBndrs env ids = mapM (zonkIdBndr env) ids
zonkTopBndrs :: [TcId] -> TcM [Id]
zonkTopBndrs ids = zonkIdBndrs emptyZonkEnv ids
zonkFieldOcc :: ZonkEnv -> FieldOcc TcId -> TcM (FieldOcc Id)
zonkFieldOcc env (FieldOcc lbl sel) = fmap (FieldOcc lbl) $ zonkIdBndr env sel
zonkEvBndrsX :: ZonkEnv -> [EvVar] -> TcM (ZonkEnv, [Var])
zonkEvBndrsX = mapAccumLM zonkEvBndrX
zonkEvBndrX :: ZonkEnv -> EvVar -> TcM (ZonkEnv, EvVar)
-- Works for dictionaries and coercions
zonkEvBndrX env var
= do { var' <- zonkEvBndr env var
; return (extendZonkEnv env [var'], var') }
zonkEvBndr :: ZonkEnv -> EvVar -> TcM EvVar
-- Works for dictionaries and coercions
-- Does not extend the ZonkEnv
zonkEvBndr env var
= do { let var_ty = varType var
; ty <-
{-# SCC "zonkEvBndr_zonkTcTypeToType" #-}
zonkTcTypeToType env var_ty
; return (setVarType var ty) }
zonkEvVarOcc :: ZonkEnv -> EvVar -> TcM EvTerm
zonkEvVarOcc env v
| isCoVar v
= EvCoercion <$> zonkCoVarOcc env v
| otherwise
= return (EvId $ zonkIdOcc env v)
zonkTyBndrsX :: ZonkEnv -> [TyVar] -> TcM (ZonkEnv, [TyVar])
zonkTyBndrsX = mapAccumLM zonkTyBndrX
zonkTyBndrX :: ZonkEnv -> TyVar -> TcM (ZonkEnv, TyVar)
-- This guarantees to return a TyVar (not a TcTyVar)
-- then we add it to the envt, so all occurrences are replaced
zonkTyBndrX env tv
= ASSERT( isImmutableTyVar tv )
do { ki <- zonkTcTypeToType env (tyVarKind tv)
-- Internal names tidy up better, for iface files.
; let tv' = mkTyVar (tyVarName tv) ki
; return (extendTyZonkEnv1 env tv', tv') }
zonkTyBinders :: ZonkEnv -> [TcTyBinder] -> TcM (ZonkEnv, [TyBinder])
zonkTyBinders = mapAccumLM zonkTyBinder
zonkTyBinder :: ZonkEnv -> TcTyBinder -> TcM (ZonkEnv, TyBinder)
zonkTyBinder env (Anon ty) = (env, ) <$> (Anon <$> zonkTcTypeToType env ty)
zonkTyBinder env (Named tv vis)
= do { (env', tv') <- zonkTyBndrX env tv
; return (env', Named tv' vis) }
zonkTopExpr :: HsExpr TcId -> TcM (HsExpr Id)
zonkTopExpr e = zonkExpr emptyZonkEnv e
zonkTopLExpr :: LHsExpr TcId -> TcM (LHsExpr Id)
zonkTopLExpr e = zonkLExpr emptyZonkEnv e
zonkTopDecls :: Bag EvBind
-> LHsBinds TcId
-> [LRuleDecl TcId] -> [LVectDecl TcId] -> [LTcSpecPrag] -> [LForeignDecl TcId]
-> TcM ([Id],
Bag EvBind,
LHsBinds Id,
[LForeignDecl Id],
[LTcSpecPrag],
[LRuleDecl Id],
[LVectDecl Id])
zonkTopDecls ev_binds binds rules vects imp_specs fords
= do { (env1, ev_binds') <- zonkEvBinds emptyZonkEnv ev_binds
; (env2, binds') <- zonkRecMonoBinds env1 binds
-- Top level is implicitly recursive
; rules' <- zonkRules env2 rules
; vects' <- zonkVects env2 vects
; specs' <- zonkLTcSpecPrags env2 imp_specs
; fords' <- zonkForeignExports env2 fords
; return (zonkEnvIds env2, ev_binds', binds', fords', specs', rules', vects') }
---------------------------------------------
zonkLocalBinds :: ZonkEnv -> HsLocalBinds TcId -> TcM (ZonkEnv, HsLocalBinds Id)
zonkLocalBinds env EmptyLocalBinds
= return (env, EmptyLocalBinds)
zonkLocalBinds _ (HsValBinds (ValBindsIn {}))
= panic "zonkLocalBinds" -- Not in typechecker output
zonkLocalBinds env (HsValBinds (ValBindsOut binds sigs))
= do { (env1, new_binds) <- go env binds
; return (env1, HsValBinds (ValBindsOut new_binds sigs)) }
where
go env []
= return (env, [])
go env ((r,b):bs)
= do { (env1, b') <- zonkRecMonoBinds env b
; (env2, bs') <- go env1 bs
; return (env2, (r,b'):bs') }
zonkLocalBinds env (HsIPBinds (IPBinds binds dict_binds)) = do
new_binds <- mapM (wrapLocM zonk_ip_bind) binds
let
env1 = extendIdZonkEnvRec env [ n | L _ (IPBind (Right n) _) <- new_binds]
(env2, new_dict_binds) <- zonkTcEvBinds env1 dict_binds
return (env2, HsIPBinds (IPBinds new_binds new_dict_binds))
where
zonk_ip_bind (IPBind n e)
= do n' <- mapIPNameTc (zonkIdBndr env) n
e' <- zonkLExpr env e
return (IPBind n' e')
---------------------------------------------
zonkRecMonoBinds :: ZonkEnv -> LHsBinds TcId -> TcM (ZonkEnv, LHsBinds Id)
zonkRecMonoBinds env binds
= fixM (\ ~(_, new_binds) -> do
{ let env1 = extendIdZonkEnvRec env (collectHsBindsBinders new_binds)
; binds' <- zonkMonoBinds env1 binds
; return (env1, binds') })
---------------------------------------------
zonkMonoBinds :: ZonkEnv -> LHsBinds TcId -> TcM (LHsBinds Id)
zonkMonoBinds env binds = mapBagM (zonk_lbind env) binds
zonk_lbind :: ZonkEnv -> LHsBind TcId -> TcM (LHsBind Id)
zonk_lbind env = wrapLocM (zonk_bind env)
zonk_bind :: ZonkEnv -> HsBind TcId -> TcM (HsBind Id)
zonk_bind env bind@(PatBind { pat_lhs = pat, pat_rhs = grhss, pat_rhs_ty = ty})
= do { (_env, new_pat) <- zonkPat env pat -- Env already extended
; new_grhss <- zonkGRHSs env zonkLExpr grhss
; new_ty <- zonkTcTypeToType env ty
; return (bind { pat_lhs = new_pat, pat_rhs = new_grhss, pat_rhs_ty = new_ty }) }
zonk_bind env (VarBind { var_id = var, var_rhs = expr, var_inline = inl })
= do { new_var <- zonkIdBndr env var
; new_expr <- zonkLExpr env expr
; return (VarBind { var_id = new_var, var_rhs = new_expr, var_inline = inl }) }
zonk_bind env bind@(FunBind { fun_id = L loc var, fun_matches = ms
, fun_co_fn = co_fn })
= do { new_var <- zonkIdBndr env var
; (env1, new_co_fn) <- zonkCoFn env co_fn
; new_ms <- zonkMatchGroup env1 zonkLExpr ms
; return (bind { fun_id = L loc new_var, fun_matches = new_ms
, fun_co_fn = new_co_fn }) }
zonk_bind env (AbsBinds { abs_tvs = tyvars, abs_ev_vars = evs
, abs_ev_binds = ev_binds
, abs_exports = exports
, abs_binds = val_binds })
= ASSERT( all isImmutableTyVar tyvars )
do { (env0, new_tyvars) <- zonkTyBndrsX env tyvars
; (env1, new_evs) <- zonkEvBndrsX env0 evs
; (env2, new_ev_binds) <- zonkTcEvBinds_s env1 ev_binds
; (new_val_bind, new_exports) <- fixM $ \ ~(new_val_binds, _) ->
do { let env3 = extendIdZonkEnvRec env2
(collectHsBindsBinders new_val_binds)
; new_val_binds <- zonkMonoBinds env3 val_binds
; new_exports <- mapM (zonkExport env3) exports
; return (new_val_binds, new_exports) }
; return (AbsBinds { abs_tvs = new_tyvars, abs_ev_vars = new_evs
, abs_ev_binds = new_ev_binds
, abs_exports = new_exports, abs_binds = new_val_bind }) }
where
zonkExport env (ABE{ abe_wrap = wrap
, abe_poly = poly_id
, abe_mono = mono_id, abe_prags = prags })
= do new_poly_id <- zonkIdBndr env poly_id
(_, new_wrap) <- zonkCoFn env wrap
new_prags <- zonkSpecPrags env prags
return (ABE{ abe_wrap = new_wrap
, abe_poly = new_poly_id
, abe_mono = zonkIdOcc env mono_id
, abe_prags = new_prags })
zonk_bind env outer_bind@(AbsBindsSig { abs_tvs = tyvars
, abs_ev_vars = evs
, abs_sig_export = poly
, abs_sig_prags = prags
, abs_sig_ev_bind = ev_bind
, abs_sig_bind = lbind })
| L bind_loc bind@(FunBind { fun_id = L loc local
, fun_matches = ms
, fun_co_fn = co_fn }) <- lbind
= ASSERT( all isImmutableTyVar tyvars )
do { (env0, new_tyvars) <- zonkTyBndrsX env tyvars
; (env1, new_evs) <- zonkEvBndrsX env0 evs
; (env2, new_ev_bind) <- zonkTcEvBinds env1 ev_bind
-- Inline zonk_bind (FunBind ...) because we wish to skip
-- the check for representation-polymorphic binders. The
-- local binder in the FunBind in an AbsBindsSig is never actually
-- bound in Core -- indeed, that's the whole point of AbsBindsSig.
-- just calling zonk_bind causes #11405.
; new_local <- updateVarTypeM (zonkTcTypeToType env2) local
; (env3, new_co_fn) <- zonkCoFn env2 co_fn
; new_ms <- zonkMatchGroup env3 zonkLExpr ms
-- If there is a representation polymorphism problem, it will
-- be caught here:
; new_poly_id <- zonkIdBndr env2 poly
; new_prags <- zonkSpecPrags env2 prags
; let new_val_bind = L bind_loc (bind { fun_id = L loc new_local
, fun_matches = new_ms
, fun_co_fn = new_co_fn })
; return (AbsBindsSig { abs_tvs = new_tyvars
, abs_ev_vars = new_evs
, abs_sig_export = new_poly_id
, abs_sig_prags = new_prags
, abs_sig_ev_bind = new_ev_bind
, abs_sig_bind = new_val_bind }) }
| otherwise
= pprPanic "zonk_bind" (ppr outer_bind)
zonk_bind env (PatSynBind bind@(PSB { psb_id = L loc id
, psb_args = details
, psb_def = lpat
, psb_dir = dir }))
= do { id' <- zonkIdBndr env id
; details' <- zonkPatSynDetails env details
; (env1, lpat') <- zonkPat env lpat
; (_env2, dir') <- zonkPatSynDir env1 dir
; return $ PatSynBind $
bind { psb_id = L loc id'
, psb_args = details'
, psb_def = lpat'
, psb_dir = dir' } }
zonkPatSynDetails :: ZonkEnv
-> HsPatSynDetails (Located TcId)
-> TcM (HsPatSynDetails (Located Id))
zonkPatSynDetails env = traverse (wrapLocM $ zonkIdBndr env)
zonkPatSynDir :: ZonkEnv -> HsPatSynDir TcId -> TcM (ZonkEnv, HsPatSynDir Id)
zonkPatSynDir env Unidirectional = return (env, Unidirectional)
zonkPatSynDir env ImplicitBidirectional = return (env, ImplicitBidirectional)
zonkPatSynDir env (ExplicitBidirectional mg) = do
mg' <- zonkMatchGroup env zonkLExpr mg
return (env, ExplicitBidirectional mg')
zonkSpecPrags :: ZonkEnv -> TcSpecPrags -> TcM TcSpecPrags
zonkSpecPrags _ IsDefaultMethod = return IsDefaultMethod
zonkSpecPrags env (SpecPrags ps) = do { ps' <- zonkLTcSpecPrags env ps
; return (SpecPrags ps') }
zonkLTcSpecPrags :: ZonkEnv -> [LTcSpecPrag] -> TcM [LTcSpecPrag]
zonkLTcSpecPrags env ps
= mapM zonk_prag ps
where
zonk_prag (L loc (SpecPrag id co_fn inl))
= do { (_, co_fn') <- zonkCoFn env co_fn
; return (L loc (SpecPrag (zonkIdOcc env id) co_fn' inl)) }
{-
************************************************************************
* *
\subsection[BackSubst-Match-GRHSs]{Match and GRHSs}
* *
************************************************************************
-}
zonkMatchGroup :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
-> MatchGroup TcId (Located (body TcId)) -> TcM (MatchGroup Id (Located (body Id)))
zonkMatchGroup env zBody (MG { mg_alts = L l ms, mg_arg_tys = arg_tys
, mg_res_ty = res_ty, mg_origin = origin })
= do { ms' <- mapM (zonkMatch env zBody) ms
; arg_tys' <- zonkTcTypeToTypes env arg_tys
; res_ty' <- zonkTcTypeToType env res_ty
; return (MG { mg_alts = L l ms', mg_arg_tys = arg_tys'
, mg_res_ty = res_ty', mg_origin = origin }) }
zonkMatch :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
-> LMatch TcId (Located (body TcId)) -> TcM (LMatch Id (Located (body Id)))
zonkMatch env zBody (L loc (Match mf pats _ grhss))
= do { (env1, new_pats) <- zonkPats env pats
; new_grhss <- zonkGRHSs env1 zBody grhss
; return (L loc (Match mf new_pats Nothing new_grhss)) }
-------------------------------------------------------------------------
zonkGRHSs :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
-> GRHSs TcId (Located (body TcId)) -> TcM (GRHSs Id (Located (body Id)))
zonkGRHSs env zBody (GRHSs grhss (L l binds)) = do
(new_env, new_binds) <- zonkLocalBinds env binds
let
zonk_grhs (GRHS guarded rhs)
= do (env2, new_guarded) <- zonkStmts new_env zonkLExpr guarded
new_rhs <- zBody env2 rhs
return (GRHS new_guarded new_rhs)
new_grhss <- mapM (wrapLocM zonk_grhs) grhss
return (GRHSs new_grhss (L l new_binds))
{-
************************************************************************
* *
\subsection[BackSubst-HsExpr]{Running a zonkitution over a TypeCheckedExpr}
* *
************************************************************************
-}
zonkLExprs :: ZonkEnv -> [LHsExpr TcId] -> TcM [LHsExpr Id]
zonkLExpr :: ZonkEnv -> LHsExpr TcId -> TcM (LHsExpr Id)
zonkExpr :: ZonkEnv -> HsExpr TcId -> TcM (HsExpr Id)
zonkLExprs env exprs = mapM (zonkLExpr env) exprs
zonkLExpr env expr = wrapLocM (zonkExpr env) expr
zonkExpr env (HsVar (L l id))
= return (HsVar (L l (zonkIdOcc env id)))
zonkExpr _ (HsIPVar id)
= return (HsIPVar id)
zonkExpr _ (HsOverLabel l)
= return (HsOverLabel l)
zonkExpr env (HsLit (HsRat f ty))
= do new_ty <- zonkTcTypeToType env ty
return (HsLit (HsRat f new_ty))
zonkExpr _ (HsLit lit)
= return (HsLit lit)
zonkExpr env (HsOverLit lit)
= do { lit' <- zonkOverLit env lit
; return (HsOverLit lit') }
zonkExpr env (HsLam matches)
= do new_matches <- zonkMatchGroup env zonkLExpr matches
return (HsLam new_matches)
zonkExpr env (HsLamCase matches)
= do new_matches <- zonkMatchGroup env zonkLExpr matches
return (HsLamCase new_matches)
zonkExpr env (HsApp e1 e2)
= do new_e1 <- zonkLExpr env e1
new_e2 <- zonkLExpr env e2
return (HsApp new_e1 new_e2)
zonkExpr env (HsAppTypeOut e t)
= do new_e <- zonkLExpr env e
return (HsAppTypeOut new_e t)
-- NB: the type is an HsType; can't zonk that!
zonkExpr _ e@(HsRnBracketOut _ _)
= pprPanic "zonkExpr: HsRnBracketOut" (ppr e)
zonkExpr env (HsTcBracketOut body bs)
= do bs' <- mapM zonk_b bs
return (HsTcBracketOut body bs')
where
zonk_b (PendingTcSplice n e) = do e' <- zonkLExpr env e
return (PendingTcSplice n e')
zonkExpr _ (HsSpliceE s) = WARN( True, ppr s ) -- Should not happen
return (HsSpliceE s)
zonkExpr env (OpApp e1 op fixity e2)
= do new_e1 <- zonkLExpr env e1
new_op <- zonkLExpr env op
new_e2 <- zonkLExpr env e2
return (OpApp new_e1 new_op fixity new_e2)
zonkExpr env (NegApp expr op)
= do (env', new_op) <- zonkSyntaxExpr env op
new_expr <- zonkLExpr env' expr
return (NegApp new_expr new_op)
zonkExpr env (HsPar e)
= do new_e <- zonkLExpr env e
return (HsPar new_e)
zonkExpr env (SectionL expr op)
= do new_expr <- zonkLExpr env expr
new_op <- zonkLExpr env op
return (SectionL new_expr new_op)
zonkExpr env (SectionR op expr)
= do new_op <- zonkLExpr env op
new_expr <- zonkLExpr env expr
return (SectionR new_op new_expr)
zonkExpr env (ExplicitTuple tup_args boxed)
= do { new_tup_args <- mapM zonk_tup_arg tup_args
; return (ExplicitTuple new_tup_args boxed) }
where
zonk_tup_arg (L l (Present e)) = do { e' <- zonkLExpr env e
; return (L l (Present e')) }
zonk_tup_arg (L l (Missing t)) = do { t' <- zonkTcTypeToType env t
; return (L l (Missing t')) }
zonkExpr env (HsCase expr ms)
= do new_expr <- zonkLExpr env expr
new_ms <- zonkMatchGroup env zonkLExpr ms
return (HsCase new_expr new_ms)
zonkExpr env (HsIf Nothing e1 e2 e3)
= do new_e1 <- zonkLExpr env e1
new_e2 <- zonkLExpr env e2
new_e3 <- zonkLExpr env e3
return (HsIf Nothing new_e1 new_e2 new_e3)
zonkExpr env (HsIf (Just fun) e1 e2 e3)
= do (env1, new_fun) <- zonkSyntaxExpr env fun
new_e1 <- zonkLExpr env1 e1
new_e2 <- zonkLExpr env1 e2
new_e3 <- zonkLExpr env1 e3
return (HsIf (Just new_fun) new_e1 new_e2 new_e3)
zonkExpr env (HsMultiIf ty alts)
= do { alts' <- mapM (wrapLocM zonk_alt) alts
; ty' <- zonkTcTypeToType env ty
; return $ HsMultiIf ty' alts' }
where zonk_alt (GRHS guard expr)
= do { (env', guard') <- zonkStmts env zonkLExpr guard
; expr' <- zonkLExpr env' expr
; return $ GRHS guard' expr' }
zonkExpr env (HsLet (L l binds) expr)
= do (new_env, new_binds) <- zonkLocalBinds env binds
new_expr <- zonkLExpr new_env expr
return (HsLet (L l new_binds) new_expr)
zonkExpr env (HsDo do_or_lc (L l stmts) ty)
= do (_, new_stmts) <- zonkStmts env zonkLExpr stmts
new_ty <- zonkTcTypeToType env ty
return (HsDo do_or_lc (L l new_stmts) new_ty)
zonkExpr env (ExplicitList ty wit exprs)
= do (env1, new_wit) <- zonkWit env wit
new_ty <- zonkTcTypeToType env1 ty
new_exprs <- zonkLExprs env1 exprs
return (ExplicitList new_ty new_wit new_exprs)
where zonkWit env Nothing = return (env, Nothing)
zonkWit env (Just fln) = second Just <$> zonkSyntaxExpr env fln
zonkExpr env (ExplicitPArr ty exprs)
= do new_ty <- zonkTcTypeToType env ty
new_exprs <- zonkLExprs env exprs
return (ExplicitPArr new_ty new_exprs)
zonkExpr env expr@(RecordCon { rcon_con_expr = con_expr, rcon_flds = rbinds })
= do { new_con_expr <- zonkExpr env con_expr
; new_rbinds <- zonkRecFields env rbinds
; return (expr { rcon_con_expr = new_con_expr
, rcon_flds = new_rbinds }) }
zonkExpr env (RecordUpd { rupd_expr = expr, rupd_flds = rbinds
, rupd_cons = cons, rupd_in_tys = in_tys
, rupd_out_tys = out_tys, rupd_wrap = req_wrap })
= do { new_expr <- zonkLExpr env expr
; new_in_tys <- mapM (zonkTcTypeToType env) in_tys
; new_out_tys <- mapM (zonkTcTypeToType env) out_tys
; new_rbinds <- zonkRecUpdFields env rbinds
; (_, new_recwrap) <- zonkCoFn env req_wrap
; return (RecordUpd { rupd_expr = new_expr, rupd_flds = new_rbinds
, rupd_cons = cons, rupd_in_tys = new_in_tys
, rupd_out_tys = new_out_tys, rupd_wrap = new_recwrap }) }
zonkExpr env (ExprWithTySigOut e ty)
= do { e' <- zonkLExpr env e
; return (ExprWithTySigOut e' ty) }
zonkExpr env (ArithSeq expr wit info)
= do (env1, new_wit) <- zonkWit env wit
new_expr <- zonkExpr env expr
new_info <- zonkArithSeq env1 info
return (ArithSeq new_expr new_wit new_info)
where zonkWit env Nothing = return (env, Nothing)
zonkWit env (Just fln) = second Just <$> zonkSyntaxExpr env fln
zonkExpr env (PArrSeq expr info)
= do new_expr <- zonkExpr env expr
new_info <- zonkArithSeq env info
return (PArrSeq new_expr new_info)
zonkExpr env (HsSCC src lbl expr)
= do new_expr <- zonkLExpr env expr
return (HsSCC src lbl new_expr)
zonkExpr env (HsTickPragma src info srcInfo expr)
= do new_expr <- zonkLExpr env expr
return (HsTickPragma src info srcInfo new_expr)
-- hdaume: core annotations
zonkExpr env (HsCoreAnn src lbl expr)
= do new_expr <- zonkLExpr env expr
return (HsCoreAnn src lbl new_expr)
-- arrow notation extensions
zonkExpr env (HsProc pat body)
= do { (env1, new_pat) <- zonkPat env pat
; new_body <- zonkCmdTop env1 body
; return (HsProc new_pat new_body) }
-- StaticPointers extension
zonkExpr env (HsStatic fvs expr)
= HsStatic fvs <$> zonkLExpr env expr
zonkExpr env (HsWrap co_fn expr)
= do (env1, new_co_fn) <- zonkCoFn env co_fn
new_expr <- zonkExpr env1 expr
return (HsWrap new_co_fn new_expr)
zonkExpr _ e@(HsUnboundVar {}) = return e
zonkExpr _ expr = pprPanic "zonkExpr" (ppr expr)
-------------------------------------------------------------------------
{-
Note [Skolems in zonkSyntaxExpr]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider rebindable syntax with something like
(>>=) :: (forall x. blah) -> (forall y. blah') -> blah''
The x and y become skolems that are in scope when type-checking the
arguments to the bind. This means that we must extend the ZonkEnv with
these skolems when zonking the arguments to the bind. But the skolems
are different between the two arguments, and so we should theoretically
carry around different environments to use for the different arguments.
However, this becomes a logistical nightmare, especially in dealing with
the more exotic Stmt forms. So, we simplify by making the critical
assumption that the uniques of the skolems are different. (This assumption
is justified by the use of newUnique in TcMType.instSkolTyCoVarX.)
Now, we can safely just extend one environment.
-}
-- See Note [Skolems in zonkSyntaxExpr]
zonkSyntaxExpr :: ZonkEnv -> SyntaxExpr TcId
-> TcM (ZonkEnv, SyntaxExpr Id)
zonkSyntaxExpr env (SyntaxExpr { syn_expr = expr
, syn_arg_wraps = arg_wraps
, syn_res_wrap = res_wrap })
= do { (env0, res_wrap') <- zonkCoFn env res_wrap
; expr' <- zonkExpr env0 expr
; (env1, arg_wraps') <- mapAccumLM zonkCoFn env0 arg_wraps
; return (env1, SyntaxExpr { syn_expr = expr'
, syn_arg_wraps = arg_wraps'
, syn_res_wrap = res_wrap' }) }
-------------------------------------------------------------------------
zonkLCmd :: ZonkEnv -> LHsCmd TcId -> TcM (LHsCmd Id)
zonkCmd :: ZonkEnv -> HsCmd TcId -> TcM (HsCmd Id)
zonkLCmd env cmd = wrapLocM (zonkCmd env) cmd
zonkCmd env (HsCmdWrap w cmd)
= do { (env1, w') <- zonkCoFn env w
; cmd' <- zonkCmd env1 cmd
; return (HsCmdWrap w' cmd') }
zonkCmd env (HsCmdArrApp e1 e2 ty ho rl)
= do new_e1 <- zonkLExpr env e1
new_e2 <- zonkLExpr env e2
new_ty <- zonkTcTypeToType env ty
return (HsCmdArrApp new_e1 new_e2 new_ty ho rl)
zonkCmd env (HsCmdArrForm op fixity args)
= do new_op <- zonkLExpr env op
new_args <- mapM (zonkCmdTop env) args
return (HsCmdArrForm new_op fixity new_args)
zonkCmd env (HsCmdApp c e)
= do new_c <- zonkLCmd env c
new_e <- zonkLExpr env e
return (HsCmdApp new_c new_e)
zonkCmd env (HsCmdLam matches)
= do new_matches <- zonkMatchGroup env zonkLCmd matches
return (HsCmdLam new_matches)
zonkCmd env (HsCmdPar c)
= do new_c <- zonkLCmd env c
return (HsCmdPar new_c)
zonkCmd env (HsCmdCase expr ms)
= do new_expr <- zonkLExpr env expr
new_ms <- zonkMatchGroup env zonkLCmd ms
return (HsCmdCase new_expr new_ms)
zonkCmd env (HsCmdIf eCond ePred cThen cElse)
= do { (env1, new_eCond) <- zonkWit env eCond
; new_ePred <- zonkLExpr env1 ePred
; new_cThen <- zonkLCmd env1 cThen
; new_cElse <- zonkLCmd env1 cElse
; return (HsCmdIf new_eCond new_ePred new_cThen new_cElse) }
where
zonkWit env Nothing = return (env, Nothing)
zonkWit env (Just w) = second Just <$> zonkSyntaxExpr env w
zonkCmd env (HsCmdLet (L l binds) cmd)
= do (new_env, new_binds) <- zonkLocalBinds env binds
new_cmd <- zonkLCmd new_env cmd
return (HsCmdLet (L l new_binds) new_cmd)
zonkCmd env (HsCmdDo (L l stmts) ty)
= do (_, new_stmts) <- zonkStmts env zonkLCmd stmts
new_ty <- zonkTcTypeToType env ty
return (HsCmdDo (L l new_stmts) new_ty)
zonkCmdTop :: ZonkEnv -> LHsCmdTop TcId -> TcM (LHsCmdTop Id)
zonkCmdTop env cmd = wrapLocM (zonk_cmd_top env) cmd
zonk_cmd_top :: ZonkEnv -> HsCmdTop TcId -> TcM (HsCmdTop Id)
zonk_cmd_top env (HsCmdTop cmd stack_tys ty ids)
= do new_cmd <- zonkLCmd env cmd
new_stack_tys <- zonkTcTypeToType env stack_tys
new_ty <- zonkTcTypeToType env ty
new_ids <- mapSndM (zonkExpr env) ids
return (HsCmdTop new_cmd new_stack_tys new_ty new_ids)
-------------------------------------------------------------------------
zonkCoFn :: ZonkEnv -> HsWrapper -> TcM (ZonkEnv, HsWrapper)
zonkCoFn env WpHole = return (env, WpHole)
zonkCoFn env (WpCompose c1 c2) = do { (env1, c1') <- zonkCoFn env c1
; (env2, c2') <- zonkCoFn env1 c2
; return (env2, WpCompose c1' c2') }
zonkCoFn env (WpFun c1 c2 t1) = do { (env1, c1') <- zonkCoFn env c1
; (env2, c2') <- zonkCoFn env1 c2
; t1' <- zonkTcTypeToType env2 t1
; return (env2, WpFun c1' c2' t1') }
zonkCoFn env (WpCast co) = do { co' <- zonkCoToCo env co
; return (env, WpCast co') }
zonkCoFn env (WpEvLam ev) = do { (env', ev') <- zonkEvBndrX env ev
; return (env', WpEvLam ev') }
zonkCoFn env (WpEvApp arg) = do { arg' <- zonkEvTerm env arg
; return (env, WpEvApp arg') }
zonkCoFn env (WpTyLam tv) = ASSERT( isImmutableTyVar tv )
do { (env', tv') <- zonkTyBndrX env tv
; return (env', WpTyLam tv') }
zonkCoFn env (WpTyApp ty) = do { ty' <- zonkTcTypeToType env ty
; return (env, WpTyApp ty') }
zonkCoFn env (WpLet bs) = do { (env1, bs') <- zonkTcEvBinds env bs
; return (env1, WpLet bs') }
-------------------------------------------------------------------------
zonkOverLit :: ZonkEnv -> HsOverLit TcId -> TcM (HsOverLit Id)
zonkOverLit env lit@(OverLit { ol_witness = e, ol_type = ty })
= do { ty' <- zonkTcTypeToType env ty
; e' <- zonkExpr env e
; return (lit { ol_witness = e', ol_type = ty' }) }
-------------------------------------------------------------------------
zonkArithSeq :: ZonkEnv -> ArithSeqInfo TcId -> TcM (ArithSeqInfo Id)
zonkArithSeq env (From e)
= do new_e <- zonkLExpr env e
return (From new_e)
zonkArithSeq env (FromThen e1 e2)
= do new_e1 <- zonkLExpr env e1
new_e2 <- zonkLExpr env e2
return (FromThen new_e1 new_e2)
zonkArithSeq env (FromTo e1 e2)
= do new_e1 <- zonkLExpr env e1
new_e2 <- zonkLExpr env e2
return (FromTo new_e1 new_e2)
zonkArithSeq env (FromThenTo e1 e2 e3)
= do new_e1 <- zonkLExpr env e1
new_e2 <- zonkLExpr env e2
new_e3 <- zonkLExpr env e3
return (FromThenTo new_e1 new_e2 new_e3)
-------------------------------------------------------------------------
zonkStmts :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
-> [LStmt TcId (Located (body TcId))] -> TcM (ZonkEnv, [LStmt Id (Located (body Id))])
zonkStmts env _ [] = return (env, [])
zonkStmts env zBody (s:ss) = do { (env1, s') <- wrapLocSndM (zonkStmt env zBody) s
; (env2, ss') <- zonkStmts env1 zBody ss
; return (env2, s' : ss') }
zonkStmt :: ZonkEnv
-> (ZonkEnv -> Located (body TcId) -> TcM (Located (body Id)))
-> Stmt TcId (Located (body TcId)) -> TcM (ZonkEnv, Stmt Id (Located (body Id)))
zonkStmt env _ (ParStmt stmts_w_bndrs mzip_op bind_op bind_ty)
= do { (env1, new_bind_op) <- zonkSyntaxExpr env bind_op
; new_bind_ty <- zonkTcTypeToType env1 bind_ty
; new_stmts_w_bndrs <- mapM (zonk_branch env1) stmts_w_bndrs
; let new_binders = [b | ParStmtBlock _ bs _ <- new_stmts_w_bndrs, b <- bs]
env2 = extendIdZonkEnvRec env1 new_binders
; new_mzip <- zonkExpr env2 mzip_op
; return (env2, ParStmt new_stmts_w_bndrs new_mzip new_bind_op new_bind_ty) }
where
zonk_branch env1 (ParStmtBlock stmts bndrs return_op)
= do { (env2, new_stmts) <- zonkStmts env1 zonkLExpr stmts
; (env3, new_return) <- zonkSyntaxExpr env2 return_op
; return (ParStmtBlock new_stmts (zonkIdOccs env3 bndrs) new_return) }
zonkStmt env zBody (RecStmt { recS_stmts = segStmts, recS_later_ids = lvs, recS_rec_ids = rvs
, recS_ret_fn = ret_id, recS_mfix_fn = mfix_id
, recS_bind_fn = bind_id, recS_bind_ty = bind_ty
, recS_later_rets = later_rets, recS_rec_rets = rec_rets
, recS_ret_ty = ret_ty })
= do { (env1, new_bind_id) <- zonkSyntaxExpr env bind_id
; (env2, new_mfix_id) <- zonkSyntaxExpr env1 mfix_id
; (env3, new_ret_id) <- zonkSyntaxExpr env2 ret_id
; new_bind_ty <- zonkTcTypeToType env3 bind_ty
; new_rvs <- zonkIdBndrs env3 rvs
; new_lvs <- zonkIdBndrs env3 lvs
; new_ret_ty <- zonkTcTypeToType env3 ret_ty
; let env4 = extendIdZonkEnvRec env3 new_rvs
; (env5, new_segStmts) <- zonkStmts env4 zBody segStmts
-- Zonk the ret-expressions in an envt that
-- has the polymorphic bindings in the envt
; new_later_rets <- mapM (zonkExpr env5) later_rets
; new_rec_rets <- mapM (zonkExpr env5) rec_rets
; return (extendIdZonkEnvRec env3 new_lvs, -- Only the lvs are needed
RecStmt { recS_stmts = new_segStmts, recS_later_ids = new_lvs
, recS_rec_ids = new_rvs, recS_ret_fn = new_ret_id
, recS_mfix_fn = new_mfix_id, recS_bind_fn = new_bind_id
, recS_bind_ty = new_bind_ty
, recS_later_rets = new_later_rets
, recS_rec_rets = new_rec_rets, recS_ret_ty = new_ret_ty }) }
zonkStmt env zBody (BodyStmt body then_op guard_op ty)
= do (env1, new_then_op) <- zonkSyntaxExpr env then_op
(env2, new_guard_op) <- zonkSyntaxExpr env1 guard_op
new_body <- zBody env2 body
new_ty <- zonkTcTypeToType env2 ty
return (env2, BodyStmt new_body new_then_op new_guard_op new_ty)
zonkStmt env zBody (LastStmt body noret ret_op)
= do (env1, new_ret) <- zonkSyntaxExpr env ret_op
new_body <- zBody env1 body
return (env, LastStmt new_body noret new_ret)
zonkStmt env _ (TransStmt { trS_stmts = stmts, trS_bndrs = binderMap
, trS_by = by, trS_form = form, trS_using = using
, trS_ret = return_op, trS_bind = bind_op
, trS_bind_arg_ty = bind_arg_ty
, trS_fmap = liftM_op })
= do {
; (env1, bind_op') <- zonkSyntaxExpr env bind_op
; bind_arg_ty' <- zonkTcTypeToType env1 bind_arg_ty
; (env2, stmts') <- zonkStmts env1 zonkLExpr stmts
; by' <- fmapMaybeM (zonkLExpr env2) by
; using' <- zonkLExpr env2 using
; (env3, return_op') <- zonkSyntaxExpr env2 return_op
; binderMap' <- mapM (zonkBinderMapEntry env3) binderMap
; liftM_op' <- zonkExpr env3 liftM_op
; let env3' = extendIdZonkEnvRec env3 (map snd binderMap')
; return (env3', TransStmt { trS_stmts = stmts', trS_bndrs = binderMap'
, trS_by = by', trS_form = form, trS_using = using'
, trS_ret = return_op', trS_bind = bind_op'
, trS_bind_arg_ty = bind_arg_ty'
, trS_fmap = liftM_op' }) }
where
zonkBinderMapEntry env (oldBinder, newBinder) = do
let oldBinder' = zonkIdOcc env oldBinder
newBinder' <- zonkIdBndr env newBinder
return (oldBinder', newBinder')
zonkStmt env _ (LetStmt (L l binds))
= do (env1, new_binds) <- zonkLocalBinds env binds
return (env1, LetStmt (L l new_binds))
zonkStmt env zBody (BindStmt pat body bind_op fail_op bind_ty)
= do { (env1, new_bind) <- zonkSyntaxExpr env bind_op
; new_bind_ty <- zonkTcTypeToType env1 bind_ty
; new_body <- zBody env1 body
; (env2, new_pat) <- zonkPat env1 pat
; (_, new_fail) <- zonkSyntaxExpr env1 fail_op
; return (env2, BindStmt new_pat new_body new_bind new_fail new_bind_ty) }
-- Scopes: join > ops (in reverse order) > pats (in forward order)
-- > rest of stmts
zonkStmt env _zBody (ApplicativeStmt args mb_join body_ty)
= do { (env1, new_mb_join) <- zonk_join env mb_join
; (env2, new_args) <- zonk_args env1 args
; new_body_ty <- zonkTcTypeToType env2 body_ty
; return (env2, ApplicativeStmt new_args new_mb_join new_body_ty) }
where
zonk_join env Nothing = return (env, Nothing)
zonk_join env (Just j) = second Just <$> zonkSyntaxExpr env j
get_pat (_, ApplicativeArgOne pat _) = pat
get_pat (_, ApplicativeArgMany _ _ pat) = pat
replace_pat pat (op, ApplicativeArgOne _ a)
= (op, ApplicativeArgOne pat a)
replace_pat pat (op, ApplicativeArgMany a b _)
= (op, ApplicativeArgMany a b pat)
zonk_args env args
= do { (env1, new_args_rev) <- zonk_args_rev env (reverse args)
; (env2, new_pats) <- zonkPats env1 (map get_pat args)
; return (env2, zipWith replace_pat new_pats (reverse new_args_rev)) }
-- these need to go backward, because if any operators are higher-rank,
-- later operators may introduce skolems that are in scope for earlier
-- arguments
zonk_args_rev env ((op, arg) : args)
= do { (env1, new_op) <- zonkSyntaxExpr env op
; new_arg <- zonk_arg env1 arg
; (env2, new_args) <- zonk_args_rev env1 args
; return (env2, (new_op, new_arg) : new_args) }
zonk_args_rev env [] = return (env, [])
zonk_arg env (ApplicativeArgOne pat expr)
= do { new_expr <- zonkLExpr env expr
; return (ApplicativeArgOne pat new_expr) }
zonk_arg env (ApplicativeArgMany stmts ret pat)
= do { (env1, new_stmts) <- zonkStmts env zonkLExpr stmts
; new_ret <- zonkExpr env1 ret
; return (ApplicativeArgMany new_stmts new_ret pat) }
-------------------------------------------------------------------------
zonkRecFields :: ZonkEnv -> HsRecordBinds TcId -> TcM (HsRecordBinds TcId)
zonkRecFields env (HsRecFields flds dd)
= do { flds' <- mapM zonk_rbind flds
; return (HsRecFields flds' dd) }
where
zonk_rbind (L l fld)
= do { new_id <- wrapLocM (zonkFieldOcc env) (hsRecFieldLbl fld)
; new_expr <- zonkLExpr env (hsRecFieldArg fld)
; return (L l (fld { hsRecFieldLbl = new_id
, hsRecFieldArg = new_expr })) }
zonkRecUpdFields :: ZonkEnv -> [LHsRecUpdField TcId] -> TcM [LHsRecUpdField TcId]
zonkRecUpdFields env = mapM zonk_rbind
where
zonk_rbind (L l fld)
= do { new_id <- wrapLocM (zonkFieldOcc env) (hsRecUpdFieldOcc fld)
; new_expr <- zonkLExpr env (hsRecFieldArg fld)
; return (L l (fld { hsRecFieldLbl = fmap ambiguousFieldOcc new_id
, hsRecFieldArg = new_expr })) }
-------------------------------------------------------------------------
mapIPNameTc :: (a -> TcM b) -> Either (Located HsIPName) a
-> TcM (Either (Located HsIPName) b)
mapIPNameTc _ (Left x) = return (Left x)
mapIPNameTc f (Right x) = do r <- f x
return (Right r)
{-
************************************************************************
* *
\subsection[BackSubst-Pats]{Patterns}
* *
************************************************************************
-}
zonkPat :: ZonkEnv -> OutPat TcId -> TcM (ZonkEnv, OutPat Id)
-- Extend the environment as we go, because it's possible for one
-- pattern to bind something that is used in another (inside or
-- to the right)
zonkPat env pat = wrapLocSndM (zonk_pat env) pat
zonk_pat :: ZonkEnv -> Pat TcId -> TcM (ZonkEnv, Pat Id)
zonk_pat env (ParPat p)
= do { (env', p') <- zonkPat env p
; return (env', ParPat p') }
zonk_pat env (WildPat ty)
= do { ty' <- zonkTcTypeToType env ty
; ensureNotRepresentationPolymorphic ty'
(text "In a wildcard pattern")
; return (env, WildPat ty') }
zonk_pat env (VarPat (L l v))
= do { v' <- zonkIdBndr env v
; return (extendIdZonkEnv1 env v', VarPat (L l v')) }
zonk_pat env (LazyPat pat)
= do { (env', pat') <- zonkPat env pat
; return (env', LazyPat pat') }
zonk_pat env (BangPat pat)
= do { (env', pat') <- zonkPat env pat
; return (env', BangPat pat') }
zonk_pat env (AsPat (L loc v) pat)
= do { v' <- zonkIdBndr env v
; (env', pat') <- zonkPat (extendIdZonkEnv1 env v') pat
; return (env', AsPat (L loc v') pat') }
zonk_pat env (ViewPat expr pat ty)
= do { expr' <- zonkLExpr env expr
; (env', pat') <- zonkPat env pat
; ty' <- zonkTcTypeToType env ty
; return (env', ViewPat expr' pat' ty') }
zonk_pat env (ListPat pats ty Nothing)
= do { ty' <- zonkTcTypeToType env ty
; (env', pats') <- zonkPats env pats
; return (env', ListPat pats' ty' Nothing) }
zonk_pat env (ListPat pats ty (Just (ty2,wit)))
= do { (env', wit') <- zonkSyntaxExpr env wit
; ty2' <- zonkTcTypeToType env' ty2
; ty' <- zonkTcTypeToType env' ty
; (env'', pats') <- zonkPats env' pats
; return (env'', ListPat pats' ty' (Just (ty2',wit'))) }
zonk_pat env (PArrPat pats ty)
= do { ty' <- zonkTcTypeToType env ty
; (env', pats') <- zonkPats env pats
; return (env', PArrPat pats' ty') }
zonk_pat env (TuplePat pats boxed tys)
= do { tys' <- mapM (zonkTcTypeToType env) tys
; (env', pats') <- zonkPats env pats
; return (env', TuplePat pats' boxed tys') }
zonk_pat env p@(ConPatOut { pat_arg_tys = tys, pat_tvs = tyvars
, pat_dicts = evs, pat_binds = binds
, pat_args = args, pat_wrap = wrapper })
= ASSERT( all isImmutableTyVar tyvars )
do { new_tys <- mapM (zonkTcTypeToType env) tys
; (env0, new_tyvars) <- zonkTyBndrsX env tyvars
-- Must zonk the existential variables, because their
-- /kind/ need potential zonking.
-- cf typecheck/should_compile/tc221.hs
; (env1, new_evs) <- zonkEvBndrsX env0 evs
; (env2, new_binds) <- zonkTcEvBinds env1 binds
; (env3, new_wrapper) <- zonkCoFn env2 wrapper
; (env', new_args) <- zonkConStuff env3 args
; return (env', p { pat_arg_tys = new_tys,
pat_tvs = new_tyvars,
pat_dicts = new_evs,
pat_binds = new_binds,
pat_args = new_args,
pat_wrap = new_wrapper}) }
zonk_pat env (LitPat lit) = return (env, LitPat lit)
zonk_pat env (SigPatOut pat ty)
= do { ty' <- zonkTcTypeToType env ty
; (env', pat') <- zonkPat env pat
; return (env', SigPatOut pat' ty') }
zonk_pat env (NPat (L l lit) mb_neg eq_expr ty)
= do { (env1, eq_expr') <- zonkSyntaxExpr env eq_expr
; (env2, mb_neg') <- case mb_neg of
Nothing -> return (env1, Nothing)
Just n -> second Just <$> zonkSyntaxExpr env1 n
; lit' <- zonkOverLit env2 lit
; ty' <- zonkTcTypeToType env2 ty
; return (env2, NPat (L l lit') mb_neg' eq_expr' ty') }
zonk_pat env (NPlusKPat (L loc n) (L l lit1) lit2 e1 e2 ty)
= do { (env1, e1') <- zonkSyntaxExpr env e1
; (env2, e2') <- zonkSyntaxExpr env1 e2
; n' <- zonkIdBndr env2 n
; lit1' <- zonkOverLit env2 lit1
; lit2' <- zonkOverLit env2 lit2
; ty' <- zonkTcTypeToType env2 ty
; return (extendIdZonkEnv1 env2 n',
NPlusKPat (L loc n') (L l lit1') lit2' e1' e2' ty') }
zonk_pat env (CoPat co_fn pat ty)
= do { (env', co_fn') <- zonkCoFn env co_fn
; (env'', pat') <- zonkPat env' (noLoc pat)
; ty' <- zonkTcTypeToType env'' ty
; return (env'', CoPat co_fn' (unLoc pat') ty') }
zonk_pat _ pat = pprPanic "zonk_pat" (ppr pat)
---------------------------
zonkConStuff :: ZonkEnv
-> HsConDetails (OutPat TcId) (HsRecFields id (OutPat TcId))
-> TcM (ZonkEnv,
HsConDetails (OutPat Id) (HsRecFields id (OutPat Id)))
zonkConStuff env (PrefixCon pats)
= do { (env', pats') <- zonkPats env pats
; return (env', PrefixCon pats') }
zonkConStuff env (InfixCon p1 p2)
= do { (env1, p1') <- zonkPat env p1
; (env', p2') <- zonkPat env1 p2
; return (env', InfixCon p1' p2') }
zonkConStuff env (RecCon (HsRecFields rpats dd))
= do { (env', pats') <- zonkPats env (map (hsRecFieldArg . unLoc) rpats)
; let rpats' = zipWith (\(L l rp) p' -> L l (rp { hsRecFieldArg = p' }))
rpats pats'
; return (env', RecCon (HsRecFields rpats' dd)) }
-- Field selectors have declared types; hence no zonking
---------------------------
zonkPats :: ZonkEnv -> [OutPat TcId] -> TcM (ZonkEnv, [OutPat Id])
zonkPats env [] = return (env, [])
zonkPats env (pat:pats) = do { (env1, pat') <- zonkPat env pat
; (env', pats') <- zonkPats env1 pats
; return (env', pat':pats') }
{-
************************************************************************
* *
\subsection[BackSubst-Foreign]{Foreign exports}
* *
************************************************************************
-}
zonkForeignExports :: ZonkEnv -> [LForeignDecl TcId] -> TcM [LForeignDecl Id]
zonkForeignExports env ls = mapM (wrapLocM (zonkForeignExport env)) ls
zonkForeignExport :: ZonkEnv -> ForeignDecl TcId -> TcM (ForeignDecl Id)
zonkForeignExport env (ForeignExport { fd_name = i, fd_co = co, fd_fe = spec })
= return (ForeignExport { fd_name = fmap (zonkIdOcc env) i
, fd_sig_ty = undefined, fd_co = co
, fd_fe = spec })
zonkForeignExport _ for_imp
= return for_imp -- Foreign imports don't need zonking
zonkRules :: ZonkEnv -> [LRuleDecl TcId] -> TcM [LRuleDecl Id]
zonkRules env rs = mapM (wrapLocM (zonkRule env)) rs
zonkRule :: ZonkEnv -> RuleDecl TcId -> TcM (RuleDecl Id)
zonkRule env (HsRule name act (vars{-::[RuleBndr TcId]-}) lhs fv_lhs rhs fv_rhs)
= do { (env_inside, new_bndrs) <- mapAccumLM zonk_bndr env vars
; let env_lhs = setZonkType env_inside zonkTvSkolemising
-- See Note [Zonking the LHS of a RULE]
; new_lhs <- zonkLExpr env_lhs lhs
; new_rhs <- zonkLExpr env_inside rhs
; return (HsRule name act new_bndrs new_lhs fv_lhs new_rhs fv_rhs) }
where
zonk_bndr env (L l (RuleBndr (L loc v)))
= do { (env', v') <- zonk_it env v
; return (env', L l (RuleBndr (L loc v'))) }
zonk_bndr _ (L _ (RuleBndrSig {})) = panic "zonk_bndr RuleBndrSig"
zonk_it env v
| isId v = do { v' <- zonkIdBndr env v
; return (extendIdZonkEnvRec env [v'], v') }
| otherwise = ASSERT( isImmutableTyVar v)
zonkTyBndrX env v
-- DV: used to be return (env,v) but that is plain
-- wrong because we may need to go inside the kind
-- of v and zonk there!
zonkVects :: ZonkEnv -> [LVectDecl TcId] -> TcM [LVectDecl Id]
zonkVects env = mapM (wrapLocM (zonkVect env))
zonkVect :: ZonkEnv -> VectDecl TcId -> TcM (VectDecl Id)
zonkVect env (HsVect s v e)
= do { v' <- wrapLocM (zonkIdBndr env) v
; e' <- zonkLExpr env e
; return $ HsVect s v' e'
}
zonkVect env (HsNoVect s v)
= do { v' <- wrapLocM (zonkIdBndr env) v
; return $ HsNoVect s v'
}
zonkVect _env (HsVectTypeOut s t rt)
= return $ HsVectTypeOut s t rt
zonkVect _ (HsVectTypeIn _ _ _ _) = panic "TcHsSyn.zonkVect: HsVectTypeIn"
zonkVect _env (HsVectClassOut c)
= return $ HsVectClassOut c
zonkVect _ (HsVectClassIn _ _) = panic "TcHsSyn.zonkVect: HsVectClassIn"
zonkVect _env (HsVectInstOut i)
= return $ HsVectInstOut i
zonkVect _ (HsVectInstIn _) = panic "TcHsSyn.zonkVect: HsVectInstIn"
{-
************************************************************************
* *
Constraints and evidence
* *
************************************************************************
-}
zonkEvTerm :: ZonkEnv -> EvTerm -> TcM EvTerm
zonkEvTerm env (EvId v) = ASSERT2( isId v, ppr v )
zonkEvVarOcc env v
zonkEvTerm env (EvCoercion co) = do { co' <- zonkCoToCo env co
; return (EvCoercion co') }
zonkEvTerm env (EvCast tm co) = do { tm' <- zonkEvTerm env tm
; co' <- zonkCoToCo env co
; return (mkEvCast tm' co') }
zonkEvTerm _ (EvLit l) = return (EvLit l)
zonkEvTerm env (EvTypeable ty ev) =
do { ev' <- zonkEvTypeable env ev
; ty' <- zonkTcTypeToType env ty
; return (EvTypeable ty' ev') }
zonkEvTerm env (EvCallStack cs)
= case cs of
EvCsEmpty -> return (EvCallStack cs)
EvCsPushCall n l tm -> do { tm' <- zonkEvTerm env tm
; return (EvCallStack (EvCsPushCall n l tm')) }
zonkEvTerm env (EvSuperClass d n) = do { d' <- zonkEvTerm env d
; return (EvSuperClass d' n) }
zonkEvTerm env (EvDFunApp df tys tms)
= do { tys' <- zonkTcTypeToTypes env tys
; tms' <- mapM (zonkEvTerm env) tms
; return (EvDFunApp (zonkIdOcc env df) tys' tms') }
zonkEvTerm env (EvDelayedError ty msg)
= do { ty' <- zonkTcTypeToType env ty
; return (EvDelayedError ty' msg) }
zonkEvTypeable :: ZonkEnv -> EvTypeable -> TcM EvTypeable
zonkEvTypeable env (EvTypeableTyCon ts)
= do { ts' <- mapM (zonkEvTerm env) ts
; return $ EvTypeableTyCon ts' }
zonkEvTypeable env (EvTypeableTyApp t1 t2)
= do { t1' <- zonkEvTerm env t1
; t2' <- zonkEvTerm env t2
; return (EvTypeableTyApp t1' t2') }
zonkEvTypeable env (EvTypeableTyLit t1)
= do { t1' <- zonkEvTerm env t1
; return (EvTypeableTyLit t1') }
zonkTcEvBinds_s :: ZonkEnv -> [TcEvBinds] -> TcM (ZonkEnv, [TcEvBinds])
zonkTcEvBinds_s env bs = do { (env, bs') <- mapAccumLM zonk_tc_ev_binds env bs
; return (env, [EvBinds (unionManyBags bs')]) }
zonkTcEvBinds :: ZonkEnv -> TcEvBinds -> TcM (ZonkEnv, TcEvBinds)
zonkTcEvBinds env bs = do { (env', bs') <- zonk_tc_ev_binds env bs
; return (env', EvBinds bs') }
zonk_tc_ev_binds :: ZonkEnv -> TcEvBinds -> TcM (ZonkEnv, Bag EvBind)
zonk_tc_ev_binds env (TcEvBinds var) = zonkEvBindsVar env var
zonk_tc_ev_binds env (EvBinds bs) = zonkEvBinds env bs
zonkEvBindsVar :: ZonkEnv -> EvBindsVar -> TcM (ZonkEnv, Bag EvBind)
zonkEvBindsVar env (EvBindsVar ref _) = do { bs <- readMutVar ref
; zonkEvBinds env (evBindMapBinds bs) }
zonkEvBinds :: ZonkEnv -> Bag EvBind -> TcM (ZonkEnv, Bag EvBind)
zonkEvBinds env binds
= {-# SCC "zonkEvBinds" #-}
fixM (\ ~( _, new_binds) -> do
{ let env1 = extendIdZonkEnvRec env (collect_ev_bndrs new_binds)
; binds' <- mapBagM (zonkEvBind env1) binds
; return (env1, binds') })
where
collect_ev_bndrs :: Bag EvBind -> [EvVar]
collect_ev_bndrs = foldrBag add []
add (EvBind { eb_lhs = var }) vars = var : vars
zonkEvBind :: ZonkEnv -> EvBind -> TcM EvBind
zonkEvBind env bind@(EvBind { eb_lhs = var, eb_rhs = term })
= do { var' <- {-# SCC "zonkEvBndr" #-} zonkEvBndr env var
-- Optimise the common case of Refl coercions
-- See Note [Optimise coercion zonking]
-- This has a very big effect on some programs (eg Trac #5030)
; term' <- case getEqPredTys_maybe (idType var') of
Just (r, ty1, ty2) | ty1 `eqType` ty2
-> return (EvCoercion (mkTcReflCo r ty1))
_other -> zonkEvTerm env term
; return (bind { eb_lhs = var', eb_rhs = term' }) }
{-
************************************************************************
* *
Zonking types
* *
************************************************************************
Note [Zonking mutable unbound type or kind variables]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In zonkTypeZapping, we zonk mutable but unbound type or kind variables to an
arbitrary type. We know if they are unbound even though we don't carry an
environment, because at the binding site for a variable we bind the mutable
var to a fresh immutable one. So the mutable store plays the role of an
environment. If we come across a mutable variable that isn't so bound, it
must be completely free. We zonk the expected kind to make sure we don't get
some unbound meta variable as the kind.
Note that since we have kind polymorphism, zonk_unbound_tyvar will handle both
type and kind variables. Consider the following datatype:
data Phantom a = Phantom Int
The type of Phantom is (forall (k : *). forall (a : k). Int). Both `a` and
`k` are unbound variables. We want to zonk this to
(forall (k : Any *). forall (a : Any (Any *)). Int).
Note [Optimise coercion zonking]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When optimising evidence binds we may come across situations where
a coercion looks like
cv = ReflCo ty
or cv1 = cv2
where the type 'ty' is big. In such cases it is a waste of time to zonk both
* The variable on the LHS
* The coercion on the RHS
Rather, we can zonk the variable, and if its type is (ty ~ ty), we can just
use Refl on the right, ignoring the actual coercion on the RHS.
This can have a very big effect, because the constraint solver sometimes does go
to a lot of effort to prove Refl! (Eg when solving 10+3 = 10+3; cf Trac #5030)
-}
zonkTyVarOcc :: ZonkEnv -> TyVar -> TcM TcType
zonkTyVarOcc env@(ZonkEnv zonk_unbound_tyvar tv_env _) tv
| isTcTyVar tv
= case tcTyVarDetails tv of
SkolemTv {} -> lookup_in_env
RuntimeUnk {} -> lookup_in_env
FlatSkol ty -> zonkTcTypeToType env ty
MetaTv { mtv_ref = ref }
-> do { cts <- readMutVar ref
; case cts of
Flexi -> do { kind <- {-# SCC "zonkKind1" #-}
zonkTcTypeToType env (tyVarKind tv)
; zonk_unbound_tyvar (setTyVarKind tv kind) }
Indirect ty -> do { zty <- zonkTcTypeToType env ty
-- Small optimisation: shortern-out indirect steps
-- so that the old type may be more easily collected.
; writeMutVar ref (Indirect zty)
; return zty } }
| otherwise
= lookup_in_env
where
lookup_in_env -- Look up in the env just as we do for Ids
= case lookupVarEnv tv_env tv of
Nothing -> mkTyVarTy <$> updateTyVarKindM (zonkTcTypeToType env) tv
Just tv' -> return (mkTyVarTy tv')
zonkCoVarOcc :: ZonkEnv -> CoVar -> TcM Coercion
zonkCoVarOcc env@(ZonkEnv _ tyco_env _) cv
| Just cv' <- lookupVarEnv tyco_env cv -- don't look in the knot-tied env
= return $ mkCoVarCo cv'
| otherwise
= mkCoVarCo <$> updateVarTypeM (zonkTcTypeToType env) cv
zonkCoHole :: ZonkEnv -> CoercionHole
-> Role -> Type -> Type -- these are all redundant with
-- the details in the hole,
-- unzonked
-> TcM Coercion
zonkCoHole env h r t1 t2
= do { contents <- unpackCoercionHole_maybe h
; case contents of
Just co -> do { co <- zonkCoToCo env co
; checkCoercionHole co h r t1 t2 }
-- This next case should happen only in the presence of
-- (undeferred) type errors. Originally, I put in a panic
-- here, but that caused too many uses of `failIfErrsM`.
Nothing -> do { traceTc "Zonking unfilled coercion hole" (ppr h)
; when debugIsOn $
whenNoErrs $
MASSERT2( False
, text "Type-correct unfilled coercion hole"
<+> ppr h )
; t1 <- zonkTcTypeToType env t1
; t2 <- zonkTcTypeToType env t2
; return $ mkHoleCo h r t1 t2 } }
zonk_tycomapper :: TyCoMapper ZonkEnv TcM
zonk_tycomapper = TyCoMapper
{ tcm_smart = True -- Establish type invariants
-- See Note [Type-checking inside the knot] in TcHsType
, tcm_tyvar = zonkTyVarOcc
, tcm_covar = zonkCoVarOcc
, tcm_hole = zonkCoHole
, tcm_tybinder = \env tv _vis -> zonkTyBndrX env tv }
-- Confused by zonking? See Note [What is zonking?] in TcMType.
zonkTcTypeToType :: ZonkEnv -> TcType -> TcM Type
zonkTcTypeToType = mapType zonk_tycomapper
zonkTcTypeToTypes :: ZonkEnv -> [TcType] -> TcM [Type]
zonkTcTypeToTypes env tys = mapM (zonkTcTypeToType env) tys
-- | Used during kind-checking in TcTyClsDecls, where it's more convenient
-- to keep the binders and result kind separate.
zonkTcKindToKind :: [TcTyBinder] -> TcKind -> TcM ([TyBinder], Kind)
zonkTcKindToKind binders res_kind
= do { (env, binders') <- zonkTyBinders emptyZonkEnv binders
; res_kind' <- zonkTcTypeToType env res_kind
; return (binders', res_kind') }
zonkCoToCo :: ZonkEnv -> Coercion -> TcM Coercion
zonkCoToCo = mapCoercion zonk_tycomapper
zonkTvSkolemising :: UnboundTyVarZonker
-- This variant is used for the LHS of rules
-- See Note [Zonking the LHS of a RULE].
zonkTvSkolemising tv
= do { tv' <- skolemiseUnboundMetaTyVar tv vanillaSkolemTv
; return (mkTyVarTy tv') }
zonkTypeZapping :: UnboundTyVarZonker
-- This variant is used for everything except the LHS of rules
-- It zaps unbound type variables to Any, except for RuntimeRep
-- vars which it zonks to PtrRepLIfted
-- Works on both types and kinds
zonkTypeZapping tv
= do { let ty | isRuntimeRepVar tv = ptrRepLiftedTy
| otherwise = anyTypeOfKind (tyVarKind tv)
; writeMetaTyVar tv ty
; return ty }
---------------------------------------
{- Note [Zonking the LHS of a RULE]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See also DsBinds Note [Free tyvars on rule LHS]
We need to gather the type variables mentioned on the LHS so we can
quantify over them. Example:
data T a = C
foo :: T a -> Int
foo C = 1
{-# RULES "myrule" foo C = 1 #-}
After type checking the LHS becomes (foo alpha (C alpha)) and we do
not want to zap the unbound meta-tyvar 'alpha' to Any, because that
limits the applicability of the rule. Instead, we want to quantify
over it!
We do this in two stages.
* During zonking, we skolemise 'alpha' to 'a'. We do this by using
zonkTvSkolemising as the UnboundTyVarZonker in the ZonkEnv.
(This is the whole reason that the ZonkEnv has a UnboundTyVarZonker.)
* In DsBinds, we quantify over it. See DsBinds
Note [Free tyvars on rule LHS]
Quantifying here is awkward because (a) the data type is big and (b)
finding the free type vars of an expression is necessarily monadic
operation. (consider /\a -> f @ b, where b is side-effected to a)
Note [Unboxed tuples in representation polymorphism check]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Recall that all types that have values (that is, lifted and unlifted
types) have kinds that look like (TYPE rep), where (rep :: RuntimeRep)
tells how the values are represented at runtime. Lifted types have
kind (TYPE PtrRepLifted) (for which * is just a synonym) and, say,
Int# has kind (TYPE IntRep).
It would be terrible if the code generator came upon a binder of a type
whose kind is something like TYPE r, where r is a skolem type variable.
The code generator wouldn't know what to do. So we eliminate that case
here.
Although representation polymorphism and the RuntimeRep type catch
most ways of abusing unlifted types, it still isn't quite satisfactory
around unboxed tuples. That's because all unboxed tuple types have kind
TYPE UnboxedTupleRep, which is clearly a lie: it doesn't actually tell
you what the representation is.
Naively, when checking for representation polymorphism, you might think we can
just look for free variables in a type's RuntimeRep. But this misses the
UnboxedTupleRep case.
So, instead, we handle unboxed tuples specially. Only after unboxed tuples
are handled do we look for free tyvars in a RuntimeRep.
We must still be careful in the UnboxedTupleRep case. A binder whose type
has kind UnboxedTupleRep is OK -- only as long as the type is really an
unboxed tuple, which the code generator treats specially. So we do this:
1. Check if the type is an unboxed tuple. If so, recur.
2. Check if the kind is TYPE UnboxedTupleRep. If so, error.
3. Check if the kind has any free variables. If so, error.
In case 1, we have a type that looks like
(# , #) PtrRepLifted IntRep Bool Int#
recalling that
(# , #) :: forall (r1 :: RuntimeRep) (r2 :: RuntimeRep).
TYPE r1 -> TYPE r2 -> TYPE UnboxedTupleRep
It's tempting just to look at the RuntimeRep arguments to make sure
that they are devoid of free variables and not UnboxedTupleRep. This
naive check, though, fails on nested unboxed tuples, like
(# Int#, (# Bool, Void# #) #). Thus, instead of looking at the RuntimeRep
args to the unboxed tuple constructor, we look at the types themselves.
Here are a few examples:
type family F r :: TYPE r
x :: (F r :: TYPE r) -- REJECTED: simple representation polymorphism
where r is an in-scope type variable of kind RuntimeRep
x :: (F PtrRepLifted :: TYPE PtrRepLifted) -- OK
x :: (F IntRep :: TYPE IntRep) -- OK
x :: (F UnboxedTupleRep :: TYPE UnboxedTupleRep) -- REJECTED
x :: ((# Int, Bool #) :: TYPE UnboxedTupleRep) -- OK
-}
-- | According to the rules around representation polymorphism
-- (see https://ghc.haskell.org/trac/ghc/wiki/NoSubKinds), no binder
-- can have a representation-polymorphic type. This check ensures
-- that we respect this rule. It is a bit regrettable that this error
-- occurs in zonking, after which we should have reported all errors.
-- But it's hard to see where else to do it, because this can be discovered
-- only after all solving is done. And, perhaps most importantly, this
-- isn't really a compositional property of a type system, so it's
-- not a terrible surprise that the check has to go in an awkward spot.
ensureNotRepresentationPolymorphic
:: Type -- its zonked type
-> SDoc -- where this happened
-> TcM ()
ensureNotRepresentationPolymorphic ty doc
= whenNoErrs $ -- sometimes we end up zonking bogus definitions of type
-- forall a. a. See, for example, test ghci/scripts/T9140
checkForRepresentationPolymorphism doc ty
-- See Note [Unboxed tuples in representation polymorphism check]
checkForRepresentationPolymorphism :: SDoc -> Type -> TcM ()
checkForRepresentationPolymorphism extra ty
| Just (tc, tys) <- splitTyConApp_maybe ty
, isUnboxedTupleTyCon tc
= mapM_ (checkForRepresentationPolymorphism extra) (dropRuntimeRepArgs tys)
| runtime_rep `eqType` unboxedTupleRepDataConTy
= addErr (vcat [ text "The type" <+> quotes (ppr tidy_ty) <+>
text "is not an unboxed tuple,"
, text "and yet its kind suggests that it has the representation"
, text "of an unboxed tuple. This is not allowed." ] $$
extra)
| not (isEmptyVarSet (tyCoVarsOfType runtime_rep))
= addErr $
hang (text "A representation-polymorphic type is not allowed here:")
2 (vcat [ text "Type:" <+> ppr tidy_ty
, text "Kind:" <+> ppr tidy_ki ]) $$
extra
| otherwise
= return ()
where
ki = typeKind ty
runtime_rep = getRuntimeRepFromKind "check_type" ki
(tidy_env, tidy_ty) = tidyOpenType emptyTidyEnv ty
tidy_ki = tidyType tidy_env (typeKind ty)
|
vikraman/ghc
|
compiler/typecheck/TcHsSyn.hs
|
bsd-3-clause
| 72,748
| 32
| 19
| 20,637
| 18,845
| 9,573
| 9,272
| -1
| -1
|
{-
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
\section[RnNames]{Extracting imported and top-level names in scope}
-}
{-# LANGUAGE CPP, NondecreasingIndentation #-}
module RnNames (
rnImports, getLocalNonValBinders, newRecordSelector,
rnExports, extendGlobalRdrEnvRn,
gresFromAvails,
calculateAvails,
reportUnusedNames,
checkConName
) where
#include "HsVersions.h"
import DynFlags
import HsSyn
import TcEnv
import RnEnv
import RnHsDoc ( rnHsDoc )
import LoadIface ( loadSrcInterface )
import TcRnMonad
import PrelNames
import Module
import Name
import NameEnv
import NameSet
import Avail
import FieldLabel
import HscTypes
import RdrName
import RdrHsSyn ( setRdrNameSpace )
import Outputable
import Maybes
import SrcLoc
import BasicTypes ( TopLevelFlag(..), StringLiteral(..) )
import ErrUtils
import Util
import FastString
import FastStringEnv
import ListSetOps
import Id
import Type
import PatSyn
import qualified GHC.LanguageExtensions as LangExt
import Control.Monad
import Data.Either ( partitionEithers, isRight, rights )
-- import qualified Data.Foldable as Foldable
import Data.Map ( Map )
import qualified Data.Map as Map
import Data.Ord ( comparing )
import Data.List ( partition, (\\), find, sortBy )
-- import qualified Data.Set as Set
import System.FilePath ((</>))
import System.IO
{-
************************************************************************
* *
\subsection{rnImports}
* *
************************************************************************
Note [Tracking Trust Transitively]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we import a package as well as checking that the direct imports are safe
according to the rules outlined in the Note [HscMain . Safe Haskell Trust Check]
we must also check that these rules hold transitively for all dependent modules
and packages. Doing this without caching any trust information would be very
slow as we would need to touch all packages and interface files a module depends
on. To avoid this we make use of the property that if a modules Safe Haskell
mode changes, this triggers a recompilation from that module in the dependcy
graph. So we can just worry mostly about direct imports.
There is one trust property that can change for a package though without
recompliation being triggered: package trust. So we must check that all
packages a module tranitively depends on to be trusted are still trusted when
we are compiling this module (as due to recompilation avoidance some modules
below may not be considered trusted any more without recompilation being
triggered).
We handle this by augmenting the existing transitive list of packages a module M
depends on with a bool for each package that says if it must be trusted when the
module M is being checked for trust. This list of trust required packages for a
single import is gathered in the rnImportDecl function and stored in an
ImportAvails data structure. The union of these trust required packages for all
imports is done by the rnImports function using the combine function which calls
the plusImportAvails function that is a union operation for the ImportAvails
type. This gives us in an ImportAvails structure all packages required to be
trusted for the module we are currently compiling. Checking that these packages
are still trusted (and that direct imports are trusted) is done in
HscMain.checkSafeImports.
See the note below, [Trust Own Package] for a corner case in this method and
how its handled.
Note [Trust Own Package]
~~~~~~~~~~~~~~~~~~~~~~~~
There is a corner case of package trust checking that the usual transitive check
doesn't cover. (For how the usual check operates see the Note [Tracking Trust
Transitively] below). The case is when you import a -XSafe module M and M
imports a -XTrustworthy module N. If N resides in a different package than M,
then the usual check works as M will record a package dependency on N's package
and mark it as required to be trusted. If N resides in the same package as M
though, then importing M should require its own package be trusted due to N
(since M is -XSafe so doesn't create this requirement by itself). The usual
check fails as a module doesn't record a package dependency of its own package.
So instead we now have a bool field in a modules interface file that simply
states if the module requires its own package to be trusted. This field avoids
us having to load all interface files that the module depends on to see if one
is trustworthy.
Note [Trust Transitive Property]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So there is an interesting design question in regards to transitive trust
checking. Say I have a module B compiled with -XSafe. B is dependent on a bunch
of modules and packages, some packages it requires to be trusted as its using
-XTrustworthy modules from them. Now if I have a module A that doesn't use safe
haskell at all and simply imports B, should A inherit all the the trust
requirements from B? Should A now also require that a package p is trusted since
B required it?
We currently say no but saying yes also makes sense. The difference is, if a
module M that doesn't use Safe Haskell imports a module N that does, should all
the trusted package requirements be dropped since M didn't declare that it cares
about Safe Haskell (so -XSafe is more strongly associated with the module doing
the importing) or should it be done still since the author of the module N that
uses Safe Haskell said they cared (so -XSafe is more strongly associated with
the module that was compiled that used it).
Going with yes is a simpler semantics we think and harder for the user to stuff
up but it does mean that Safe Haskell will affect users who don't care about
Safe Haskell as they might grab a package from Cabal which uses safe haskell (say
network) and that packages imports -XTrustworthy modules from another package
(say bytestring), so requires that package is trusted. The user may now get
compilation errors in code that doesn't do anything with Safe Haskell simply
because they are using the network package. They will have to call 'ghc-pkg
trust network' to get everything working. Due to this invasive nature of going
with yes we have gone with no for now.
-}
-- | Process Import Decls. See 'rnImportDecl' for a description of what
-- the return types represent.
-- Note: Do the non SOURCE ones first, so that we get a helpful warning
-- for SOURCE ones that are unnecessary
rnImports :: [LImportDecl RdrName]
-> RnM ([LImportDecl Name], GlobalRdrEnv, ImportAvails, AnyHpcUsage)
rnImports imports = do
this_mod <- getModule
let (source, ordinary) = partition is_source_import imports
is_source_import d = ideclSource (unLoc d)
stuff1 <- mapAndReportM (rnImportDecl this_mod) ordinary
stuff2 <- mapAndReportM (rnImportDecl this_mod) source
-- Safe Haskell: See Note [Tracking Trust Transitively]
let (decls, rdr_env, imp_avails, hpc_usage) = combine (stuff1 ++ stuff2)
return (decls, rdr_env, imp_avails, hpc_usage)
where
combine :: [(LImportDecl Name, GlobalRdrEnv, ImportAvails, AnyHpcUsage)]
-> ([LImportDecl Name], GlobalRdrEnv, ImportAvails, AnyHpcUsage)
combine = foldr plus ([], emptyGlobalRdrEnv, emptyImportAvails, False)
plus (decl, gbl_env1, imp_avails1,hpc_usage1)
(decls, gbl_env2, imp_avails2,hpc_usage2)
= ( decl:decls,
gbl_env1 `plusGlobalRdrEnv` gbl_env2,
imp_avails1 `plusImportAvails` imp_avails2,
hpc_usage1 || hpc_usage2 )
-- | Given a located import declaration @decl@ from @this_mod@,
-- calculate the following pieces of information:
--
-- 1. An updated 'LImportDecl', where all unresolved 'RdrName' in
-- the entity lists have been resolved into 'Name's,
--
-- 2. A 'GlobalRdrEnv' representing the new identifiers that were
-- brought into scope (taking into account module qualification
-- and hiding),
--
-- 3. 'ImportAvails' summarizing the identifiers that were imported
-- by this declaration, and
--
-- 4. A boolean 'AnyHpcUsage' which is true if the imported module
-- used HPC.
rnImportDecl :: Module -> LImportDecl RdrName
-> RnM (LImportDecl Name, GlobalRdrEnv, ImportAvails, AnyHpcUsage)
rnImportDecl this_mod
(L loc decl@(ImportDecl { ideclName = loc_imp_mod_name, ideclPkgQual = mb_pkg
, ideclSource = want_boot, ideclSafe = mod_safe
, ideclQualified = qual_only, ideclImplicit = implicit
, ideclAs = as_mod, ideclHiding = imp_details }))
= setSrcSpan loc $ do
when (isJust mb_pkg) $ do
pkg_imports <- xoptM LangExt.PackageImports
when (not pkg_imports) $ addErr packageImportErr
-- If there's an error in loadInterface, (e.g. interface
-- file not found) we get lots of spurious errors from 'filterImports'
let imp_mod_name = unLoc loc_imp_mod_name
doc = ppr imp_mod_name <+> text "is directly imported"
-- Check for self-import, which confuses the typechecker (Trac #9032)
-- ghc --make rejects self-import cycles already, but batch-mode may not
-- at least not until TcIface.tcHiBootIface, which is too late to avoid
-- typechecker crashes. (Indirect self imports are not caught until
-- TcIface, see #10337 tracking how to make this error better.)
--
-- Originally, we also allowed 'import {-# SOURCE #-} M', but this
-- caused bug #10182: in one-shot mode, we should never load an hs-boot
-- file for the module we are compiling into the EPS. In principle,
-- it should be possible to support this mode of use, but we would have to
-- extend Provenance to support a local definition in a qualified location.
-- For now, we don't support it, but see #10336
when (imp_mod_name == moduleName this_mod &&
(case mb_pkg of -- If we have import "<pkg>" M, then we should
-- check that "<pkg>" is "this" (which is magic)
-- or the name of this_mod's package. Yurgh!
-- c.f. GHC.findModule, and Trac #9997
Nothing -> True
Just (StringLiteral _ pkg_fs) -> pkg_fs == fsLit "this" ||
fsToUnitId pkg_fs == moduleUnitId this_mod))
(addErr (text "A module cannot import itself:" <+> ppr imp_mod_name))
-- Check for a missing import list (Opt_WarnMissingImportList also
-- checks for T(..) items but that is done in checkDodgyImport below)
case imp_details of
Just (False, _) -> return () -- Explicit import list
_ | implicit -> return () -- Do not bleat for implicit imports
| qual_only -> return ()
| otherwise -> whenWOptM Opt_WarnMissingImportList $
addWarn (Reason Opt_WarnMissingImportList)
(missingImportListWarn imp_mod_name)
iface <- loadSrcInterface doc imp_mod_name want_boot (fmap sl_fs mb_pkg)
-- Compiler sanity check: if the import didn't say
-- {-# SOURCE #-} we should not get a hi-boot file
WARN( not want_boot && mi_boot iface, ppr imp_mod_name ) do
-- Issue a user warning for a redundant {- SOURCE -} import
-- NB that we arrange to read all the ordinary imports before
-- any of the {- SOURCE -} imports.
--
-- in --make and GHCi, the compilation manager checks for this,
-- and indeed we shouldn't do it here because the existence of
-- the non-boot module depends on the compilation order, which
-- is not deterministic. The hs-boot test can show this up.
dflags <- getDynFlags
warnIf NoReason
(want_boot && not (mi_boot iface) && isOneShot (ghcMode dflags))
(warnRedundantSourceImport imp_mod_name)
when (mod_safe && not (safeImportsOn dflags)) $
addErr (text "safe import can't be used as Safe Haskell isn't on!"
$+$ ptext (sLit $ "please enable Safe Haskell through either "
++ "Safe, Trustworthy or Unsafe"))
let
qual_mod_name = as_mod `orElse` imp_mod_name
imp_spec = ImpDeclSpec { is_mod = imp_mod_name, is_qual = qual_only,
is_dloc = loc, is_as = qual_mod_name }
-- filter the imports according to the import declaration
(new_imp_details, gres) <- filterImports iface imp_spec imp_details
-- for certain error messages, we’d like to know what could be imported
-- here, if everything were imported
potential_gres <- mkGlobalRdrEnv . snd <$> filterImports iface imp_spec Nothing
let gbl_env = mkGlobalRdrEnv gres
is_hiding | Just (True,_) <- imp_details = True
| otherwise = False
-- should the import be safe?
mod_safe' = mod_safe
|| (not implicit && safeDirectImpsReq dflags)
|| (implicit && safeImplicitImpsReq dflags)
let imv = ImportedModsVal
{ imv_name = qual_mod_name
, imv_span = loc
, imv_is_safe = mod_safe'
, imv_is_hiding = is_hiding
, imv_all_exports = potential_gres
, imv_qualified = qual_only
}
let imports
= (calculateAvails dflags iface mod_safe' want_boot)
{ imp_mods = unitModuleEnv (mi_module iface) [imv] }
-- Complain if we import a deprecated module
whenWOptM Opt_WarnWarningsDeprecations (
case (mi_warns iface) of
WarnAll txt -> addWarn (Reason Opt_WarnWarningsDeprecations)
(moduleWarn imp_mod_name txt)
_ -> return ()
)
let new_imp_decl = L loc (decl { ideclSafe = mod_safe'
, ideclHiding = new_imp_details })
return (new_imp_decl, gbl_env, imports, mi_hpc iface)
-- | Calculate the 'ImportAvails' induced by an import of a particular
-- interface, but without 'imp_mods'.
calculateAvails :: DynFlags
-> ModIface
-> IsSafeImport
-> IsBootInterface
-> ImportAvails
calculateAvails dflags iface mod_safe' want_boot =
let imp_mod = mi_module iface
orph_iface = mi_orphan iface
has_finsts = mi_finsts iface
deps = mi_deps iface
trust = getSafeMode $ mi_trust iface
trust_pkg = mi_trust_pkg iface
-- If the module exports anything defined in this module, just
-- ignore it. Reason: otherwise it looks as if there are two
-- local definition sites for the thing, and an error gets
-- reported. Easiest thing is just to filter them out up
-- front. This situation only arises if a module imports
-- itself, or another module that imported it. (Necessarily,
-- this invoves a loop.)
--
-- We do this *after* filterImports, so that if you say
-- module A where
-- import B( AType )
-- type AType = ...
--
-- module B( AType ) where
-- import {-# SOURCE #-} A( AType )
--
-- then you won't get a 'B does not export AType' message.
-- Compute new transitive dependencies
orphans | orph_iface = ASSERT( not (imp_mod `elem` dep_orphs deps) )
imp_mod : dep_orphs deps
| otherwise = dep_orphs deps
finsts | has_finsts = ASSERT( not (imp_mod `elem` dep_finsts deps) )
imp_mod : dep_finsts deps
| otherwise = dep_finsts deps
pkg = moduleUnitId (mi_module iface)
-- Does this import mean we now require our own pkg
-- to be trusted? See Note [Trust Own Package]
ptrust = trust == Sf_Trustworthy || trust_pkg
(dependent_mods, dependent_pkgs, pkg_trust_req)
| pkg == thisPackage dflags =
-- Imported module is from the home package
-- Take its dependent modules and add imp_mod itself
-- Take its dependent packages unchanged
--
-- NB: (dep_mods deps) might include a hi-boot file
-- for the module being compiled, CM. Do *not* filter
-- this out (as we used to), because when we've
-- finished dealing with the direct imports we want to
-- know if any of them depended on CM.hi-boot, in
-- which case we should do the hi-boot consistency
-- check. See LoadIface.loadHiBootInterface
((moduleName imp_mod,want_boot):dep_mods deps,dep_pkgs deps,ptrust)
| otherwise =
-- Imported module is from another package
-- Dump the dependent modules
-- Add the package imp_mod comes from to the dependent packages
ASSERT2( not (pkg `elem` (map fst $ dep_pkgs deps))
, ppr pkg <+> ppr (dep_pkgs deps) )
([], (pkg, False) : dep_pkgs deps, False)
in ImportAvails {
imp_mods = emptyModuleEnv, -- this gets filled in later
imp_orphs = orphans,
imp_finsts = finsts,
imp_dep_mods = mkModDeps dependent_mods,
imp_dep_pkgs = map fst $ dependent_pkgs,
-- Add in the imported modules trusted package
-- requirements. ONLY do this though if we import the
-- module as a safe import.
-- See Note [Tracking Trust Transitively]
-- and Note [Trust Transitive Property]
imp_trust_pkgs = if mod_safe'
then map fst $ filter snd dependent_pkgs
else [],
-- Do we require our own pkg to be trusted?
-- See Note [Trust Own Package]
imp_trust_own_pkg = pkg_trust_req
}
warnRedundantSourceImport :: ModuleName -> SDoc
warnRedundantSourceImport mod_name
= text "Unnecessary {-# SOURCE #-} in the import of module"
<+> quotes (ppr mod_name)
{-
************************************************************************
* *
\subsection{importsFromLocalDecls}
* *
************************************************************************
From the top-level declarations of this module produce
* the lexical environment
* the ImportAvails
created by its bindings.
Note [Top-level Names in Template Haskell decl quotes]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See also: Note [Interactively-bound Ids in GHCi] in HscTypes
Note [Looking up Exact RdrNames] in RnEnv
Consider a Template Haskell declaration quotation like this:
module M where
f x = h [d| f = 3 |]
When renaming the declarations inside [d| ...|], we treat the
top level binders specially in two ways
1. We give them an Internal Name, not (as usual) an External one.
This is done by RnEnv.newTopSrcBinder.
2. We make them *shadow* the outer bindings.
See Note [GlobalRdrEnv shadowing]
3. We find out whether we are inside a [d| ... |] by testing the TH
stage. This is a slight hack, because the stage field was really
meant for the type checker, and here we are not interested in the
fields of Brack, hence the error thunks in thRnBrack.
-}
extendGlobalRdrEnvRn :: [AvailInfo]
-> MiniFixityEnv
-> RnM (TcGblEnv, TcLclEnv)
-- Updates both the GlobalRdrEnv and the FixityEnv
-- We return a new TcLclEnv only because we might have to
-- delete some bindings from it;
-- see Note [Top-level Names in Template Haskell decl quotes]
extendGlobalRdrEnvRn avails new_fixities
= do { (gbl_env, lcl_env) <- getEnvs
; stage <- getStage
; isGHCi <- getIsGHCi
; let rdr_env = tcg_rdr_env gbl_env
fix_env = tcg_fix_env gbl_env
th_bndrs = tcl_th_bndrs lcl_env
th_lvl = thLevel stage
-- Delete new_occs from global and local envs
-- If we are in a TemplateHaskell decl bracket,
-- we are going to shadow them
-- See Note [GlobalRdrEnv shadowing]
inBracket = isBrackStage stage
lcl_env_TH = lcl_env { tcl_rdr = delLocalRdrEnvList (tcl_rdr lcl_env) new_occs }
-- See Note [GlobalRdrEnv shadowing]
lcl_env2 | inBracket = lcl_env_TH
| otherwise = lcl_env
-- Deal with shadowing: see Note [GlobalRdrEnv shadowing]
want_shadowing = isGHCi || inBracket
rdr_env1 | want_shadowing = shadowNames rdr_env new_names
| otherwise = rdr_env
lcl_env3 = lcl_env2 { tcl_th_bndrs = extendNameEnvList th_bndrs
[ (n, (TopLevel, th_lvl))
| n <- new_names ] }
; rdr_env2 <- foldlM add_gre rdr_env1 new_gres
; let fix_env' = foldl extend_fix_env fix_env new_gres
gbl_env' = gbl_env { tcg_rdr_env = rdr_env2, tcg_fix_env = fix_env' }
; traceRn (text "extendGlobalRdrEnvRn 2" <+> (pprGlobalRdrEnv True rdr_env2))
; return (gbl_env', lcl_env3) }
where
new_names = concatMap availNames avails
new_occs = map nameOccName new_names
-- If there is a fixity decl for the gre, add it to the fixity env
extend_fix_env fix_env gre
| Just (L _ fi) <- lookupFsEnv new_fixities (occNameFS occ)
= extendNameEnv fix_env name (FixItem occ fi)
| otherwise
= fix_env
where
name = gre_name gre
occ = greOccName gre
new_gres :: [GlobalRdrElt] -- New LocalDef GREs, derived from avails
new_gres = concatMap localGREsFromAvail avails
add_gre :: GlobalRdrEnv -> GlobalRdrElt -> RnM GlobalRdrEnv
-- Extend the GlobalRdrEnv with a LocalDef GRE
-- If there is already a LocalDef GRE with the same OccName,
-- report an error and discard the new GRE
-- This establishes INVARIANT 1 of GlobalRdrEnvs
add_gre env gre
| not (null dups) -- Same OccName defined twice
= do { addDupDeclErr (gre : dups); return env }
| otherwise
= return (extendGlobalRdrEnv env gre)
where
name = gre_name gre
occ = nameOccName name
dups = filter isLocalGRE (lookupGlobalRdrEnv env occ)
{- *********************************************************************
* *
getLocalDeclBindersd@ returns the names for an HsDecl
It's used for source code.
*** See "THE NAMING STORY" in HsDecls ****
* *
********************************************************************* -}
getLocalNonValBinders :: MiniFixityEnv -> HsGroup RdrName
-> RnM ((TcGblEnv, TcLclEnv), NameSet)
-- Get all the top-level binders bound the group *except*
-- for value bindings, which are treated separately
-- Specifically we return AvailInfo for
-- * type decls (incl constructors and record selectors)
-- * class decls (including class ops)
-- * associated types
-- * foreign imports
-- * value signatures (in hs-boot files only)
getLocalNonValBinders fixity_env
(HsGroup { hs_valds = binds,
hs_tyclds = tycl_decls,
hs_instds = inst_decls,
hs_fords = foreign_decls })
= do { -- Process all type/class decls *except* family instances
; overload_ok <- xoptM LangExt.DuplicateRecordFields
; (tc_avails, tc_fldss) <- fmap unzip $ mapM (new_tc overload_ok)
(tyClGroupConcat tycl_decls)
; traceRn (text "getLocalNonValBinders 1" <+> ppr tc_avails)
; envs <- extendGlobalRdrEnvRn tc_avails fixity_env
; setEnvs envs $ do {
-- Bring these things into scope first
-- See Note [Looking up family names in family instances]
-- Process all family instances
-- to bring new data constructors into scope
; (nti_availss, nti_fldss) <- mapAndUnzipM (new_assoc overload_ok)
inst_decls
-- Finish off with value binders:
-- foreign decls and pattern synonyms for an ordinary module
-- type sigs in case of a hs-boot file only
; is_boot <- tcIsHsBootOrSig
; let val_bndrs | is_boot = hs_boot_sig_bndrs
| otherwise = for_hs_bndrs
; val_avails <- mapM new_simple val_bndrs
; let avails = concat nti_availss ++ val_avails
new_bndrs = availsToNameSetWithSelectors avails `unionNameSet`
availsToNameSetWithSelectors tc_avails
flds = concat nti_fldss ++ concat tc_fldss
; traceRn (text "getLocalNonValBinders 2" <+> ppr avails)
; (tcg_env, tcl_env) <- extendGlobalRdrEnvRn avails fixity_env
-- Extend tcg_field_env with new fields (this used to be the
-- work of extendRecordFieldEnv)
; let field_env = extendNameEnvList (tcg_field_env tcg_env) flds
envs = (tcg_env { tcg_field_env = field_env }, tcl_env)
; traceRn (text "getLocalNonValBinders 3" <+> vcat [ppr flds, ppr field_env])
; return (envs, new_bndrs) } }
where
ValBindsIn _val_binds val_sigs = binds
for_hs_bndrs :: [Located RdrName]
for_hs_bndrs = hsForeignDeclsBinders foreign_decls
-- In a hs-boot file, the value binders come from the
-- *signatures*, and there should be no foreign binders
hs_boot_sig_bndrs = [ L decl_loc (unLoc n)
| L decl_loc (TypeSig ns _) <- val_sigs, n <- ns]
-- the SrcSpan attached to the input should be the span of the
-- declaration, not just the name
new_simple :: Located RdrName -> RnM AvailInfo
new_simple rdr_name = do{ nm <- newTopSrcBinder rdr_name
; return (avail nm) }
new_tc :: Bool -> LTyClDecl RdrName
-> RnM (AvailInfo, [(Name, [FieldLabel])])
new_tc overload_ok tc_decl -- NOT for type/data instances
= do { let (bndrs, flds) = hsLTyClDeclBinders tc_decl
; names@(main_name : sub_names) <- mapM newTopSrcBinder bndrs
; flds' <- mapM (newRecordSelector overload_ok sub_names) flds
; let fld_env = case unLoc tc_decl of
DataDecl { tcdDataDefn = d } -> mk_fld_env d names flds'
_ -> []
; return (AvailTC main_name names flds', fld_env) }
-- Calculate the mapping from constructor names to fields, which
-- will go in tcg_field_env. It's convenient to do this here where
-- we are working with a single datatype definition.
mk_fld_env :: HsDataDefn RdrName -> [Name] -> [FieldLabel] -> [(Name, [FieldLabel])]
mk_fld_env d names flds = concatMap find_con_flds (dd_cons d)
where
find_con_flds (L _ (ConDeclH98 { con_name = L _ rdr
, con_details = RecCon cdflds }))
= [( find_con_name rdr
, concatMap find_con_decl_flds (unLoc cdflds) )]
find_con_flds (L _ (ConDeclGADT
{ con_names = rdrs
, con_type = (HsIB { hsib_body = res_ty})}))
= map (\ (L _ rdr) -> ( find_con_name rdr
, concatMap find_con_decl_flds cdflds))
rdrs
where
(_tvs, _cxt, tau) = splitLHsSigmaTy res_ty
cdflds = case tau of
L _ (HsFunTy
(L _ (HsAppsTy
[L _ (HsAppPrefix (L _ (HsRecTy flds)))])) _) -> flds
L _ (HsFunTy (L _ (HsRecTy flds)) _) -> flds
_ -> []
find_con_flds _ = []
find_con_name rdr
= expectJust "getLocalNonValBinders/find_con_name" $
find (\ n -> nameOccName n == rdrNameOcc rdr) names
find_con_decl_flds (L _ x)
= map find_con_decl_fld (cd_fld_names x)
find_con_decl_fld (L _ (FieldOcc (L _ rdr) _))
= expectJust "getLocalNonValBinders/find_con_decl_fld" $
find (\ fl -> flLabel fl == lbl) flds
where lbl = occNameFS (rdrNameOcc rdr)
new_assoc :: Bool -> LInstDecl RdrName
-> RnM ([AvailInfo], [(Name, [FieldLabel])])
new_assoc _ (L _ (TyFamInstD {})) = return ([], [])
-- type instances don't bind new names
new_assoc overload_ok (L _ (DataFamInstD d))
= do { (avail, flds) <- new_di overload_ok Nothing d
; return ([avail], flds) }
new_assoc overload_ok (L _ (ClsInstD (ClsInstDecl { cid_poly_ty = inst_ty
, cid_datafam_insts = adts })))
| Just (L loc cls_rdr) <- getLHsInstDeclClass_maybe inst_ty
= do { cls_nm <- setSrcSpan loc $ lookupGlobalOccRn cls_rdr
; (avails, fldss)
<- mapAndUnzipM (new_loc_di overload_ok (Just cls_nm)) adts
; return (avails, concat fldss) }
| otherwise
= return ([], []) -- Do not crash on ill-formed instances
-- Eg instance !Show Int Trac #3811c
new_di :: Bool -> Maybe Name -> DataFamInstDecl RdrName
-> RnM (AvailInfo, [(Name, [FieldLabel])])
new_di overload_ok mb_cls ti_decl
= do { main_name <- lookupFamInstName mb_cls (dfid_tycon ti_decl)
; let (bndrs, flds) = hsDataFamInstBinders ti_decl
; sub_names <- mapM newTopSrcBinder bndrs
; flds' <- mapM (newRecordSelector overload_ok sub_names) flds
; let avail = AvailTC (unLoc main_name) sub_names flds'
-- main_name is not bound here!
fld_env = mk_fld_env (dfid_defn ti_decl) sub_names flds'
; return (avail, fld_env) }
new_loc_di :: Bool -> Maybe Name -> LDataFamInstDecl RdrName
-> RnM (AvailInfo, [(Name, [FieldLabel])])
new_loc_di overload_ok mb_cls (L _ d) = new_di overload_ok mb_cls d
newRecordSelector :: Bool -> [Name] -> LFieldOcc RdrName -> RnM FieldLabel
newRecordSelector _ [] _ = error "newRecordSelector: datatype has no constructors!"
newRecordSelector overload_ok (dc:_) (L loc (FieldOcc (L _ fld) _))
= do { selName <- newTopSrcBinder $ L loc $ field
; return $ qualFieldLbl { flSelector = selName } }
where
fieldOccName = occNameFS $ rdrNameOcc fld
qualFieldLbl = mkFieldLabelOccs fieldOccName (nameOccName dc) overload_ok
field | isExact fld = fld
-- use an Exact RdrName as is to preserve the bindings
-- of an already renamer-resolved field and its use
-- sites. This is needed to correctly support record
-- selectors in Template Haskell. See Note [Binders in
-- Template Haskell] in Convert.hs and Note [Looking up
-- Exact RdrNames] in RnEnv.hs.
| otherwise = mkRdrUnqual (flSelector qualFieldLbl)
{-
Note [Looking up family names in family instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
module M where
type family T a :: *
type instance M.T Int = Bool
We might think that we can simply use 'lookupOccRn' when processing the type
instance to look up 'M.T'. Alas, we can't! The type family declaration is in
the *same* HsGroup as the type instance declaration. Hence, as we are
currently collecting the binders declared in that HsGroup, these binders will
not have been added to the global environment yet.
Solution is simple: process the type family declarations first, extend
the environment, and then process the type instances.
************************************************************************
* *
\subsection{Filtering imports}
* *
************************************************************************
@filterImports@ takes the @ExportEnv@ telling what the imported module makes
available, and filters it through the import spec (if any).
Note [Dealing with imports]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
For import M( ies ), we take the mi_exports of M, and make
imp_occ_env :: OccEnv (Name, AvailInfo, Maybe Name)
One entry for each Name that M exports; the AvailInfo describes just
that Name.
The situation is made more complicated by associated types. E.g.
module M where
class C a where { data T a }
instance C Int where { data T Int = T1 | T2 }
instance C Bool where { data T Int = T3 }
Then M's export_avails are (recall the AvailTC invariant from Avails.hs)
C(C,T), T(T,T1,T2,T3)
Notice that T appears *twice*, once as a child and once as a parent. From
this list we construt a raw list including
T -> (T, T( T1, T2, T3 ), Nothing)
T -> (C, C( C, T ), Nothing)
and we combine these (in function 'combine' in 'imp_occ_env' in
'filterImports') to get
T -> (T, T(T,T1,T2,T3), Just C)
So the overall imp_occ_env is
C -> (C, C(C,T), Nothing)
T -> (T, T(T,T1,T2,T3), Just C)
T1 -> (T1, T(T1,T2,T3), Nothing) -- similarly T2,T3
If we say
import M( T(T1,T2) )
then we get *two* Avails: C(T), T(T1,T2)
Note that the imp_occ_env will have entries for data constructors too,
although we never look up data constructors.
-}
filterImports
:: ModIface
-> ImpDeclSpec -- The span for the entire import decl
-> Maybe (Bool, Located [LIE RdrName]) -- Import spec; True => hiding
-> RnM (Maybe (Bool, Located [LIE Name]), -- Import spec w/ Names
[GlobalRdrElt]) -- Same again, but in GRE form
filterImports iface decl_spec Nothing
= return (Nothing, gresFromAvails (Just imp_spec) (mi_exports iface))
where
imp_spec = ImpSpec { is_decl = decl_spec, is_item = ImpAll }
filterImports iface decl_spec (Just (want_hiding, L l import_items))
= do -- check for errors, convert RdrNames to Names
items1 <- mapM lookup_lie import_items
let items2 :: [(LIE Name, AvailInfo)]
items2 = concat items1
-- NB the AvailInfo may have duplicates, and several items
-- for the same parent; e.g N(x) and N(y)
names = availsToNameSet (map snd items2)
keep n = not (n `elemNameSet` names)
pruned_avails = filterAvails keep all_avails
hiding_spec = ImpSpec { is_decl = decl_spec, is_item = ImpAll }
gres | want_hiding = gresFromAvails (Just hiding_spec) pruned_avails
| otherwise = concatMap (gresFromIE decl_spec) items2
return (Just (want_hiding, L l (map fst items2)), gres)
where
all_avails = mi_exports iface
-- See Note [Dealing with imports]
imp_occ_env :: OccEnv (Name, -- the name
AvailInfo, -- the export item providing the name
Maybe Name) -- the parent of associated types
imp_occ_env = mkOccEnv_C combine [ (nameOccName n, (n, a, Nothing))
| a <- all_avails, n <- availNames a]
where
-- See Note [Dealing with imports]
-- 'combine' is only called for associated data types which appear
-- twice in the all_avails. In the example, we combine
-- T(T,T1,T2,T3) and C(C,T) to give (T, T(T,T1,T2,T3), Just C)
-- NB: the AvailTC can have fields as well as data constructors (Trac #12127)
combine (name1, a1@(AvailTC p1 _ _), mp1)
(name2, a2@(AvailTC p2 _ _), mp2)
= ASSERT( name1 == name2 && isNothing mp1 && isNothing mp2 )
if p1 == name1 then (name1, a1, Just p2)
else (name1, a2, Just p1)
combine x y = pprPanic "filterImports/combine" (ppr x $$ ppr y)
lookup_name :: RdrName -> IELookupM (Name, AvailInfo, Maybe Name)
lookup_name rdr | isQual rdr = failLookupWith (QualImportError rdr)
| Just succ <- mb_success = return succ
| otherwise = failLookupWith BadImport
where
mb_success = lookupOccEnv imp_occ_env (rdrNameOcc rdr)
lookup_lie :: LIE RdrName -> TcRn [(LIE Name, AvailInfo)]
lookup_lie (L loc ieRdr)
= do (stuff, warns) <- setSrcSpan loc $
liftM (fromMaybe ([],[])) $
run_lookup (lookup_ie ieRdr)
mapM_ emit_warning warns
return [ (L loc ie, avail) | (ie,avail) <- stuff ]
where
-- Warn when importing T(..) if T was exported abstractly
emit_warning (DodgyImport n) = whenWOptM Opt_WarnDodgyImports $
addWarn (Reason Opt_WarnDodgyImports) (dodgyImportWarn n)
emit_warning MissingImportList = whenWOptM Opt_WarnMissingImportList $
addWarn (Reason Opt_WarnMissingImportList) (missingImportListItem ieRdr)
emit_warning BadImportW = whenWOptM Opt_WarnDodgyImports $
addWarn (Reason Opt_WarnDodgyImports) (lookup_err_msg BadImport)
run_lookup :: IELookupM a -> TcRn (Maybe a)
run_lookup m = case m of
Failed err -> addErr (lookup_err_msg err) >> return Nothing
Succeeded a -> return (Just a)
lookup_err_msg err = case err of
BadImport -> badImportItemErr iface decl_spec ieRdr all_avails
IllegalImport -> illegalImportItemErr
QualImportError rdr -> qualImportItemErr rdr
-- For each import item, we convert its RdrNames to Names,
-- and at the same time construct an AvailInfo corresponding
-- to what is actually imported by this item.
-- Returns Nothing on error.
-- We return a list here, because in the case of an import
-- item like C, if we are hiding, then C refers to *both* a
-- type/class and a data constructor. Moreover, when we import
-- data constructors of an associated family, we need separate
-- AvailInfos for the data constructors and the family (as they have
-- different parents). See Note [Dealing with imports]
lookup_ie :: IE RdrName -> IELookupM ([(IE Name, AvailInfo)], [IELookupWarning])
lookup_ie ie = handle_bad_import $ do
case ie of
IEVar (L l n) -> do
(name, avail, _) <- lookup_name n
return ([(IEVar (L l name), trimAvail avail name)], [])
IEThingAll (L l tc) -> do
(name, avail, mb_parent) <- lookup_name tc
let warns = case avail of
Avail {} -- e.g. f(..)
-> [DodgyImport tc]
AvailTC _ subs fs
| null (drop 1 subs) && null fs -- e.g. T(..) where T is a synonym
-> [DodgyImport tc]
| not (is_qual decl_spec) -- e.g. import M( T(..) )
-> [MissingImportList]
| otherwise
-> []
renamed_ie = IEThingAll (L l name)
sub_avails = case avail of
Avail {} -> []
AvailTC name2 subs fs -> [(renamed_ie, AvailTC name2 (subs \\ [name]) fs)]
case mb_parent of
Nothing -> return ([(renamed_ie, avail)], warns)
-- non-associated ty/cls
Just parent -> return ((renamed_ie, AvailTC parent [name] []) : sub_avails, warns)
-- associated type
IEThingAbs (L l tc)
| want_hiding -- hiding ( C )
-- Here the 'C' can be a data constructor
-- *or* a type/class, or even both
-> let tc_name = lookup_name tc
dc_name = lookup_name (setRdrNameSpace tc srcDataName)
in
case catIELookupM [ tc_name, dc_name ] of
[] -> failLookupWith BadImport
names -> return ([mkIEThingAbs l name | name <- names], [])
| otherwise
-> do nameAvail <- lookup_name tc
return ([mkIEThingAbs l nameAvail], [])
IEThingWith (L l rdr_tc) wc rdr_ns rdr_fs ->
ASSERT2(null rdr_fs, ppr rdr_fs) do
(name, AvailTC _ ns subflds, mb_parent) <- lookup_name rdr_tc
-- Look up the children in the sub-names of the parent
let subnames = case ns of -- The tc is first in ns,
[] -> [] -- if it is there at all
-- See the AvailTC Invariant in Avail.hs
(n1:ns1) | n1 == name -> ns1
| otherwise -> ns
case lookupChildren (map Left subnames ++ map Right subflds) rdr_ns of
Nothing -> failLookupWith BadImport
Just (childnames, childflds) ->
case mb_parent of
-- non-associated ty/cls
Nothing
-> return ([(IEThingWith (L l name) wc childnames childflds,
AvailTC name (name:map unLoc childnames) (map unLoc childflds))],
[])
-- associated ty
Just parent
-> return ([(IEThingWith (L l name) wc childnames childflds,
AvailTC name (map unLoc childnames) (map unLoc childflds)),
(IEThingWith (L l name) wc childnames childflds,
AvailTC parent [name] [])],
[])
_other -> failLookupWith IllegalImport
-- could be IEModuleContents, IEGroup, IEDoc, IEDocNamed
-- all errors.
where
mkIEThingAbs l (n, av, Nothing ) = (IEThingAbs (L l n),
trimAvail av n)
mkIEThingAbs l (n, _, Just parent) = (IEThingAbs (L l n),
AvailTC parent [n] [])
handle_bad_import m = catchIELookup m $ \err -> case err of
BadImport | want_hiding -> return ([], [BadImportW])
_ -> failLookupWith err
type IELookupM = MaybeErr IELookupError
data IELookupWarning
= BadImportW
| MissingImportList
| DodgyImport RdrName
-- NB. use the RdrName for reporting a "dodgy" import
data IELookupError
= QualImportError RdrName
| BadImport
| IllegalImport
failLookupWith :: IELookupError -> IELookupM a
failLookupWith err = Failed err
catchIELookup :: IELookupM a -> (IELookupError -> IELookupM a) -> IELookupM a
catchIELookup m h = case m of
Succeeded r -> return r
Failed err -> h err
catIELookupM :: [IELookupM a] -> [a]
catIELookupM ms = [ a | Succeeded a <- ms ]
{-
************************************************************************
* *
\subsection{Import/Export Utils}
* *
************************************************************************
-}
plusAvail :: AvailInfo -> AvailInfo -> AvailInfo
plusAvail a1 a2
| debugIsOn && availName a1 /= availName a2
= pprPanic "RnEnv.plusAvail names differ" (hsep [ppr a1,ppr a2])
plusAvail a1@(Avail {}) (Avail {}) = a1
plusAvail (AvailTC _ [] []) a2@(AvailTC {}) = a2
plusAvail a1@(AvailTC {}) (AvailTC _ [] []) = a1
plusAvail (AvailTC n1 (s1:ss1) fs1) (AvailTC n2 (s2:ss2) fs2)
= case (n1==s1, n2==s2) of -- Maintain invariant the parent is first
(True,True) -> AvailTC n1 (s1 : (ss1 `unionLists` ss2))
(fs1 `unionLists` fs2)
(True,False) -> AvailTC n1 (s1 : (ss1 `unionLists` (s2:ss2)))
(fs1 `unionLists` fs2)
(False,True) -> AvailTC n1 (s2 : ((s1:ss1) `unionLists` ss2))
(fs1 `unionLists` fs2)
(False,False) -> AvailTC n1 ((s1:ss1) `unionLists` (s2:ss2))
(fs1 `unionLists` fs2)
plusAvail (AvailTC n1 ss1 fs1) (AvailTC _ [] fs2)
= AvailTC n1 ss1 (fs1 `unionLists` fs2)
plusAvail (AvailTC n1 [] fs1) (AvailTC _ ss2 fs2)
= AvailTC n1 ss2 (fs1 `unionLists` fs2)
plusAvail a1 a2 = pprPanic "RnEnv.plusAvail" (hsep [ppr a1,ppr a2])
-- | trims an 'AvailInfo' to keep only a single name
trimAvail :: AvailInfo -> Name -> AvailInfo
trimAvail (Avail b n) _ = Avail b n
trimAvail (AvailTC n ns fs) m = case find ((== m) . flSelector) fs of
Just x -> AvailTC n [] [x]
Nothing -> ASSERT( m `elem` ns ) AvailTC n [m] []
-- | filters 'AvailInfo's by the given predicate
filterAvails :: (Name -> Bool) -> [AvailInfo] -> [AvailInfo]
filterAvails keep avails = foldr (filterAvail keep) [] avails
-- | filters an 'AvailInfo' by the given predicate
filterAvail :: (Name -> Bool) -> AvailInfo -> [AvailInfo] -> [AvailInfo]
filterAvail keep ie rest =
case ie of
Avail _ n | keep n -> ie : rest
| otherwise -> rest
AvailTC tc ns fs ->
let ns' = filter keep ns
fs' = filter (keep . flSelector) fs in
if null ns' && null fs' then rest else AvailTC tc ns' fs' : rest
-- | Given an import\/export spec, construct the appropriate 'GlobalRdrElt's.
gresFromIE :: ImpDeclSpec -> (LIE Name, AvailInfo) -> [GlobalRdrElt]
gresFromIE decl_spec (L loc ie, avail)
= gresFromAvail prov_fn avail
where
is_explicit = case ie of
IEThingAll (L _ name) -> \n -> n == name
_ -> \_ -> True
prov_fn name
= Just (ImpSpec { is_decl = decl_spec, is_item = item_spec })
where
item_spec = ImpSome { is_explicit = is_explicit name, is_iloc = loc }
{-
Note [Children for duplicate record fields]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider the module
{-# LANGUAGE DuplicateRecordFields #-}
module M (F(foo, MkFInt, MkFBool)) where
data family F a
data instance F Int = MkFInt { foo :: Int }
data instance F Bool = MkFBool { foo :: Bool }
The `foo` in the export list refers to *both* selectors! For this
reason, lookupChildren builds an environment that maps the FastString
to a list of items, rather than a single item.
-}
mkChildEnv :: [GlobalRdrElt] -> NameEnv [GlobalRdrElt]
mkChildEnv gres = foldr add emptyNameEnv gres
where
add gre env = case gre_par gre of
FldParent p _ -> extendNameEnv_Acc (:) singleton env p gre
ParentIs p -> extendNameEnv_Acc (:) singleton env p gre
NoParent -> env
PatternSynonym -> env
findPatSyns :: [GlobalRdrElt] -> [GlobalRdrElt]
findPatSyns gres = foldr add [] gres
where
add g@(GRE { gre_par = PatternSynonym }) ps =
g:ps
add _ ps = ps
findChildren :: NameEnv [a] -> Name -> [a]
findChildren env n = lookupNameEnv env n `orElse` []
lookupChildren :: [Either Name FieldLabel] -> [Located RdrName]
-> Maybe ([Located Name], [Located FieldLabel])
-- (lookupChildren all_kids rdr_items) maps each rdr_item to its
-- corresponding Name all_kids, if the former exists
-- The matching is done by FastString, not OccName, so that
-- Cls( meth, AssocTy )
-- will correctly find AssocTy among the all_kids of Cls, even though
-- the RdrName for AssocTy may have a (bogus) DataName namespace
-- (Really the rdr_items should be FastStrings in the first place.)
lookupChildren all_kids rdr_items
= do xs <- mapM doOne rdr_items
return (fmap concat (partitionEithers xs))
where
doOne (L l r) = case (lookupFsEnv kid_env . occNameFS . rdrNameOcc) r of
Just [Left n] -> Just (Left (L l n))
Just rs | all isRight rs -> Just (Right (map (L l) (rights rs)))
_ -> Nothing
-- See Note [Children for duplicate record fields]
kid_env = extendFsEnvList_C (++) emptyFsEnv
[(either (occNameFS . nameOccName) flLabel x, [x]) | x <- all_kids]
classifyGREs :: [GlobalRdrElt] -> ([Name], [FieldLabel])
classifyGREs = partitionEithers . map classifyGRE
classifyGRE :: GlobalRdrElt -> Either Name FieldLabel
classifyGRE gre = case gre_par gre of
FldParent _ Nothing -> Right (FieldLabel (occNameFS (nameOccName n)) False n)
FldParent _ (Just lbl) -> Right (FieldLabel lbl True n)
_ -> Left n
where
n = gre_name gre
-- | Combines 'AvailInfo's from the same family
-- 'avails' may have several items with the same availName
-- E.g import Ix( Ix(..), index )
-- will give Ix(Ix,index,range) and Ix(index)
-- We want to combine these; addAvail does that
nubAvails :: [AvailInfo] -> [AvailInfo]
nubAvails avails = nameEnvElts (foldl add emptyNameEnv avails)
where
add env avail = extendNameEnv_C plusAvail env (availName avail) avail
{-
************************************************************************
* *
\subsection{Export list processing}
* *
************************************************************************
Processing the export list.
You might think that we should record things that appear in the export
list as ``occurrences'' (using @addOccurrenceName@), but you'd be
wrong. We do check (here) that they are in scope, but there is no
need to slurp in their actual declaration (which is what
@addOccurrenceName@ forces).
Indeed, doing so would big trouble when compiling @PrelBase@, because
it re-exports @GHC@, which includes @takeMVar#@, whose type includes
@ConcBase.StateAndSynchVar#@, and so on...
Note [Exports of data families]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose you see (Trac #5306)
module M where
import X( F )
data instance F Int = FInt
What does M export? AvailTC F [FInt]
or AvailTC F [F,FInt]?
The former is strictly right because F isn't defined in this module.
But then you can never do an explicit import of M, thus
import M( F( FInt ) )
because F isn't exported by M. Nor can you import FInt alone from here
import M( FInt )
because we don't have syntax to support that. (It looks like an import of
the type FInt.)
At one point I implemented a compromise:
* When constructing exports with no export list, or with module M(
module M ), we add the parent to the exports as well.
* But not when you see module M( f ), even if f is a
class method with a parent.
* Nor when you see module M( module N ), with N /= M.
But the compromise seemed too much of a hack, so we backed it out.
You just have to use an explicit export list:
module M( F(..) ) where ...
-}
type ExportAccum -- The type of the accumulating parameter of
-- the main worker function in rnExports
= ([LIE Name], -- Export items with Names
ExportOccMap, -- Tracks exported occurrence names
[AvailInfo]) -- The accumulated exported stuff
-- Not nub'd!
emptyExportAccum :: ExportAccum
emptyExportAccum = ([], emptyOccEnv, [])
type ExportOccMap = OccEnv (Name, IE RdrName)
-- Tracks what a particular exported OccName
-- in an export list refers to, and which item
-- it came from. It's illegal to export two distinct things
-- that have the same occurrence name
rnExports :: Bool -- False => no 'module M(..) where' header at all
-> Maybe (Located [LIE RdrName]) -- Nothing => no explicit export list
-> TcGblEnv
-> RnM (Maybe [LIE Name], TcGblEnv)
-- Complains if two distinct exports have same OccName
-- Warns about identical exports.
-- Complains about exports items not in scope
rnExports explicit_mod exports
tcg_env@(TcGblEnv { tcg_mod = this_mod,
tcg_rdr_env = rdr_env,
tcg_imports = imports })
= unsetWOptM Opt_WarnWarningsDeprecations $
-- Do not report deprecations arising from the export
-- list, to avoid bleating about re-exporting a deprecated
-- thing (especially via 'module Foo' export item)
do {
-- If the module header is omitted altogether, then behave
-- as if the user had written "module Main(main) where..."
-- EXCEPT in interactive mode, when we behave as if he had
-- written "module Main where ..."
-- Reason: don't want to complain about 'main' not in scope
-- in interactive mode
; dflags <- getDynFlags
; let real_exports
| explicit_mod = exports
| ghcLink dflags == LinkInMemory = Nothing
| otherwise
= Just (noLoc [noLoc (IEVar (noLoc main_RDR_Unqual))])
-- ToDo: the 'noLoc' here is unhelpful if 'main'
-- turns out to be out of scope
; (rn_exports, avails) <- exports_from_avail real_exports rdr_env imports this_mod
; traceRn (ppr avails)
; let final_avails = nubAvails avails -- Combine families
final_ns = availsToNameSetWithSelectors final_avails
; traceRn (text "rnExports: Exports:" <+> ppr final_avails)
; let new_tcg_env =
(tcg_env { tcg_exports = final_avails,
tcg_rn_exports = case tcg_rn_exports tcg_env of
Nothing -> Nothing
Just _ -> rn_exports,
tcg_dus = tcg_dus tcg_env `plusDU`
usesOnly final_ns })
; return (rn_exports, new_tcg_env) }
exports_from_avail :: Maybe (Located [LIE RdrName])
-- Nothing => no explicit export list
-> GlobalRdrEnv
-> ImportAvails
-> Module
-> RnM (Maybe [LIE Name], [AvailInfo])
exports_from_avail Nothing rdr_env _imports _this_mod
-- The same as (module M) where M is the current module name,
-- so that's how we handle it, except we also export the data family
-- when a data instance is exported.
= let avails = [ fix_faminst $ availFromGRE gre
| gre <- globalRdrEnvElts rdr_env
, isLocalGRE gre ]
in return (Nothing, avails)
where
-- #11164: when we define a data instance
-- but not data family, re-export the family
-- Even though we don't check whether this is actually a data family
-- only data families can locally define subordinate things (`ns` here)
-- without locally defining (and instead importing) the parent (`n`)
fix_faminst (AvailTC n ns flds)
| not (n `elem` ns)
= AvailTC n (n:ns) flds
fix_faminst avail = avail
exports_from_avail (Just (L _ rdr_items)) rdr_env imports this_mod
= do (ie_names, _, exports) <- foldlM do_litem emptyExportAccum rdr_items
return (Just ie_names, exports)
where
do_litem :: ExportAccum -> LIE RdrName -> RnM ExportAccum
do_litem acc lie = setSrcSpan (getLoc lie) (exports_from_item acc lie)
-- Maps a parent to its in-scope children
kids_env :: NameEnv [GlobalRdrElt]
kids_env = mkChildEnv (globalRdrEnvElts rdr_env)
pat_syns :: [GlobalRdrElt]
pat_syns = findPatSyns (globalRdrEnvElts rdr_env)
imported_modules = [ imv_name imv
| xs <- moduleEnvElts $ imp_mods imports, imv <- xs ]
exports_from_item :: ExportAccum -> LIE RdrName -> RnM ExportAccum
exports_from_item acc@(ie_names, occs, exports)
(L loc (IEModuleContents (L lm mod)))
| let earlier_mods = [ mod
| (L _ (IEModuleContents (L _ mod))) <- ie_names ]
, mod `elem` earlier_mods -- Duplicate export of M
= do { warn_dup_exports <- woptM Opt_WarnDuplicateExports ;
warnIf (Reason Opt_WarnDuplicateExports) warn_dup_exports
(dupModuleExport mod) ;
return acc }
| otherwise
= do { warnDodgyExports <- woptM Opt_WarnDodgyExports
; let { exportValid = (mod `elem` imported_modules)
|| (moduleName this_mod == mod)
; gre_prs = pickGREsModExp mod (globalRdrEnvElts rdr_env)
; new_exports = map (availFromGRE . fst) gre_prs
; names = map (gre_name . fst) gre_prs
; all_gres = foldr (\(gre1,gre2) gres -> gre1 : gre2 : gres) [] gre_prs
}
; checkErr exportValid (moduleNotImported mod)
; warnIf (Reason Opt_WarnDodgyExports)
(warnDodgyExports && exportValid && null gre_prs)
(nullModuleExport mod)
; traceRn (text "efa" <+> (ppr mod $$ ppr all_gres))
; addUsedGREs all_gres
; occs' <- check_occs (IEModuleContents (noLoc mod)) occs names
-- This check_occs not only finds conflicts
-- between this item and others, but also
-- internally within this item. That is, if
-- 'M.x' is in scope in several ways, we'll have
-- several members of mod_avails with the same
-- OccName.
; traceRn (vcat [ text "export mod" <+> ppr mod
, ppr new_exports ])
; return (L loc (IEModuleContents (L lm mod)) : ie_names,
occs', new_exports ++ exports) }
exports_from_item acc@(lie_names, occs, exports) (L loc ie)
| isDoc ie
= do new_ie <- lookup_doc_ie ie
return (L loc new_ie : lie_names, occs, exports)
| otherwise
= do (new_ie, avail) <- lookup_ie ie
if isUnboundName (ieName new_ie)
then return acc -- Avoid error cascade
else do
occs' <- check_occs ie occs (availNames avail)
return (L loc new_ie : lie_names, occs', avail : exports)
-------------
lookup_ie :: IE RdrName -> RnM (IE Name, AvailInfo)
lookup_ie (IEVar (L l rdr))
= do (name, avail) <- lookupGreAvailRn rdr
return (IEVar (L l name), avail)
lookup_ie (IEThingAbs (L l rdr))
= do (name, avail) <- lookupGreAvailRn rdr
return (IEThingAbs (L l name), avail)
lookup_ie ie@(IEThingAll n)
= do
(n, avail, flds) <- lookup_ie_all ie n
let name = unLoc n
return (IEThingAll n, AvailTC name (name:avail) flds)
lookup_ie ie@(IEThingWith l wc sub_rdrs _)
= do
(lname, subs, avails, flds) <- lookup_ie_with ie l sub_rdrs
(_, all_avail, all_flds) <-
case wc of
NoIEWildcard -> return (lname, [], [])
IEWildcard _ -> lookup_ie_all ie l
let name = unLoc lname
return (IEThingWith lname wc subs [],
AvailTC name (name : avails ++ all_avail)
(flds ++ all_flds))
lookup_ie _ = panic "lookup_ie" -- Other cases covered earlier
lookup_ie_with :: IE RdrName -> Located RdrName -> [Located RdrName]
-> RnM (Located Name, [Located Name], [Name], [FieldLabel])
lookup_ie_with ie (L l rdr) sub_rdrs
= do name <- lookupGlobalOccRn rdr
let gres = findChildren kids_env name
mchildren =
lookupChildren (map classifyGRE (gres ++ pat_syns)) sub_rdrs
addUsedKids rdr gres
if isUnboundName name
then return (L l name, [], [name], [])
else
case mchildren of
Nothing -> do
addErr (exportItemErr ie)
return (L l name, [], [name], [])
Just (non_flds, flds) -> do
addUsedKids rdr gres
return (L l name, non_flds
, map unLoc non_flds
, map unLoc flds)
lookup_ie_all :: IE RdrName -> Located RdrName
-> RnM (Located Name, [Name], [FieldLabel])
lookup_ie_all ie (L l rdr) =
do name <- lookupGlobalOccRn rdr
let gres = findChildren kids_env name
(non_flds, flds) = classifyGREs gres
addUsedKids rdr gres
warnDodgyExports <- woptM Opt_WarnDodgyExports
when (null gres) $
if isTyConName name
then when warnDodgyExports $
addWarn (Reason Opt_WarnDodgyExports)
(dodgyExportWarn name)
else -- This occurs when you export T(..), but
-- only import T abstractly, or T is a synonym.
addErr (exportItemErr ie)
return (L l name, non_flds, flds)
-------------
lookup_doc_ie :: IE RdrName -> RnM (IE Name)
lookup_doc_ie (IEGroup lev doc) = do rn_doc <- rnHsDoc doc
return (IEGroup lev rn_doc)
lookup_doc_ie (IEDoc doc) = do rn_doc <- rnHsDoc doc
return (IEDoc rn_doc)
lookup_doc_ie (IEDocNamed str) = return (IEDocNamed str)
lookup_doc_ie _ = panic "lookup_doc_ie" -- Other cases covered earlier
-- In an export item M.T(A,B,C), we want to treat the uses of
-- A,B,C as if they were M.A, M.B, M.C
-- Happily pickGREs does just the right thing
addUsedKids :: RdrName -> [GlobalRdrElt] -> RnM ()
addUsedKids parent_rdr kid_gres = addUsedGREs (pickGREs parent_rdr kid_gres)
isDoc :: IE RdrName -> Bool
isDoc (IEDoc _) = True
isDoc (IEDocNamed _) = True
isDoc (IEGroup _ _) = True
isDoc _ = False
-------------------------------
check_occs :: IE RdrName -> ExportOccMap -> [Name] -> RnM ExportOccMap
check_occs ie occs names -- 'names' are the entities specifed by 'ie'
= foldlM check occs names
where
check occs name
= case lookupOccEnv occs name_occ of
Nothing -> return (extendOccEnv occs name_occ (name, ie))
Just (name', ie')
| name == name' -- Duplicate export
-- But we don't want to warn if the same thing is exported
-- by two different module exports. See ticket #4478.
-> do unless (dupExport_ok name ie ie') $ do
warn_dup_exports <- woptM Opt_WarnDuplicateExports
warnIf (Reason Opt_WarnDuplicateExports) warn_dup_exports
(dupExportWarn name_occ ie ie')
return occs
| otherwise -- Same occ name but different names: an error
-> do { global_env <- getGlobalRdrEnv ;
addErr (exportClashErr global_env name' name ie' ie) ;
return occs }
where
name_occ = nameOccName name
dupExport_ok :: Name -> IE RdrName -> IE RdrName -> Bool
-- The Name is exported by both IEs. Is that ok?
-- "No" iff the name is mentioned explicitly in both IEs
-- or one of the IEs mentions the name *alone*
-- "Yes" otherwise
--
-- Examples of "no": module M( f, f )
-- module M( fmap, Functor(..) )
-- module M( module Data.List, head )
--
-- Example of "yes"
-- module M( module A, module B ) where
-- import A( f )
-- import B( f )
--
-- Example of "yes" (Trac #2436)
-- module M( C(..), T(..) ) where
-- class C a where { data T a }
-- instace C Int where { data T Int = TInt }
--
-- Example of "yes" (Trac #2436)
-- module Foo ( T ) where
-- data family T a
-- module Bar ( T(..), module Foo ) where
-- import Foo
-- data instance T Int = TInt
dupExport_ok n ie1 ie2
= not ( single ie1 || single ie2
|| (explicit_in ie1 && explicit_in ie2) )
where
explicit_in (IEModuleContents _) = False -- module M
explicit_in (IEThingAll r) = nameOccName n == rdrNameOcc (unLoc r) -- T(..)
explicit_in _ = True
single (IEVar {}) = True
single (IEThingAbs {}) = True
single _ = False
{-
*********************************************************
* *
\subsection{Unused names}
* *
*********************************************************
-}
reportUnusedNames :: Maybe (Located [LIE RdrName]) -- Export list
-> TcGblEnv -> RnM ()
reportUnusedNames _export_decls gbl_env
= do { traceRn ((text "RUN") <+> (ppr (tcg_dus gbl_env)))
; warnUnusedImportDecls gbl_env
; warnUnusedTopBinds unused_locals
; warnMissingSignatures gbl_env }
where
used_names :: NameSet
used_names = findUses (tcg_dus gbl_env) emptyNameSet
-- NB: currently, if f x = g, we only treat 'g' as used if 'f' is used
-- Hence findUses
-- Collect the defined names from the in-scope environment
defined_names :: [GlobalRdrElt]
defined_names = globalRdrEnvElts (tcg_rdr_env gbl_env)
-- Note that defined_and_used, defined_but_not_used
-- are both [GRE]; that's why we need defined_and_used
-- rather than just used_names
_defined_and_used, defined_but_not_used :: [GlobalRdrElt]
(_defined_and_used, defined_but_not_used)
= partition (gre_is_used used_names) defined_names
kids_env = mkChildEnv defined_names
-- This is done in mkExports too; duplicated work
gre_is_used :: NameSet -> GlobalRdrElt -> Bool
gre_is_used used_names (GRE {gre_name = name})
= name `elemNameSet` used_names
|| any (\ gre -> gre_name gre `elemNameSet` used_names) (findChildren kids_env name)
-- A use of C implies a use of T,
-- if C was brought into scope by T(..) or T(C)
-- Filter out the ones that are
-- (a) defined in this module, and
-- (b) not defined by a 'deriving' clause
-- The latter have an Internal Name, so we can filter them out easily
unused_locals :: [GlobalRdrElt]
unused_locals = filter is_unused_local defined_but_not_used
is_unused_local :: GlobalRdrElt -> Bool
is_unused_local gre = isLocalGRE gre && isExternalName (gre_name gre)
{-
*********************************************************
* *
\subsection{Unused imports}
* *
*********************************************************
This code finds which import declarations are unused. The
specification and implementation notes are here:
http://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/UnusedImports
-}
type ImportDeclUsage
= ( LImportDecl Name -- The import declaration
, [AvailInfo] -- What *is* used (normalised)
, [Name] ) -- What is imported but *not* used
warnUnusedImportDecls :: TcGblEnv -> RnM ()
warnUnusedImportDecls gbl_env
= do { uses <- readMutVar (tcg_used_gres gbl_env)
; let user_imports = filterOut (ideclImplicit . unLoc) (tcg_rn_imports gbl_env)
-- This whole function deals only with *user* imports
-- both for warning about unnecessary ones, and for
-- deciding the minimal ones
rdr_env = tcg_rdr_env gbl_env
fld_env = mkFieldEnv rdr_env
; let usage :: [ImportDeclUsage]
usage = findImportUsage user_imports uses
; traceRn (vcat [ text "Uses:" <+> ppr uses
, text "Import usage" <+> ppr usage])
; whenWOptM Opt_WarnUnusedImports $
mapM_ (warnUnusedImport Opt_WarnUnusedImports fld_env) usage
; whenGOptM Opt_D_dump_minimal_imports $
printMinimalImports usage }
-- | Warn the user about top level binders that lack type signatures.
warnMissingSignatures :: TcGblEnv -> RnM ()
warnMissingSignatures gbl_env
= do { let exports = availsToNameSet (tcg_exports gbl_env)
sig_ns = tcg_sigs gbl_env
-- We use sig_ns to exclude top-level bindings that are generated by GHC
binds = collectHsBindsBinders $ tcg_binds gbl_env
pat_syns = tcg_patsyns gbl_env
-- Warn about missing signatures
-- Do this only when we we have a type to offer
; warn_missing_sigs <- woptM Opt_WarnMissingSignatures
; warn_only_exported <- woptM Opt_WarnMissingExportedSignatures
; warn_pat_syns <- woptM Opt_WarnMissingPatternSynonymSignatures
; let add_sig_warns
| warn_only_exported = add_warns Opt_WarnMissingExportedSignatures
| warn_missing_sigs = add_warns Opt_WarnMissingSignatures
| warn_pat_syns = add_warns Opt_WarnMissingPatternSynonymSignatures
| otherwise = return ()
add_warns flag
= when warn_pat_syns
(mapM_ add_pat_syn_warn pat_syns) >>
when (warn_missing_sigs || warn_only_exported)
(mapM_ add_bind_warn binds)
where
add_pat_syn_warn p
= add_warn name $
hang (text "Pattern synonym with no type signature:")
2 (text "pattern" <+> pprPrefixName name <+> dcolon <+> pp_ty)
where
name = patSynName p
pp_ty = pprPatSynType p
add_bind_warn id
= do { env <- tcInitTidyEnv -- Why not use emptyTidyEnv?
; let name = idName id
(_, ty) = tidyOpenType env (idType id)
ty_msg = pprSigmaType ty
; add_warn name $
hang (text "Top-level binding with no type signature:")
2 (pprPrefixName name <+> dcolon <+> ty_msg) }
add_warn name msg
= when (name `elemNameSet` sig_ns && export_check name)
(addWarnAt (Reason flag) (getSrcSpan name) msg)
export_check name
= not warn_only_exported || name `elemNameSet` exports
; add_sig_warns }
{-
Note [The ImportMap]
~~~~~~~~~~~~~~~~~~~~
The ImportMap is a short-lived intermediate data struture records, for
each import declaration, what stuff brought into scope by that
declaration is actually used in the module.
The SrcLoc is the location of the END of a particular 'import'
declaration. Why *END*? Because we don't want to get confused
by the implicit Prelude import. Consider (Trac #7476) the module
import Foo( foo )
main = print foo
There is an implicit 'import Prelude(print)', and it gets a SrcSpan
of line 1:1 (just the point, not a span). If we use the *START* of
the SrcSpan to identify the import decl, we'll confuse the implicit
import Prelude with the explicit 'import Foo'. So we use the END.
It's just a cheap hack; we could equally well use the Span too.
The AvailInfos are the things imported from that decl (just a list,
not normalised).
-}
type ImportMap = Map SrcLoc [AvailInfo] -- See [The ImportMap]
findImportUsage :: [LImportDecl Name]
-> [GlobalRdrElt]
-> [ImportDeclUsage]
findImportUsage imports used_gres
= map unused_decl imports
where
import_usage :: ImportMap
import_usage
= foldr extendImportMap Map.empty used_gres
unused_decl decl@(L loc (ImportDecl { ideclHiding = imps }))
= (decl, nubAvails used_avails, nameSetElems unused_imps)
where
used_avails = Map.lookup (srcSpanEnd loc) import_usage `orElse` []
-- srcSpanEnd: see Note [The ImportMap]
used_names = availsToNameSetWithSelectors used_avails
used_parents = mkNameSet [n | AvailTC n _ _ <- used_avails]
unused_imps -- Not trivial; see eg Trac #7454
= case imps of
Just (False, L _ imp_ies) ->
foldr (add_unused . unLoc) emptyNameSet imp_ies
_other -> emptyNameSet -- No explicit import list => no unused-name list
add_unused :: IE Name -> NameSet -> NameSet
add_unused (IEVar (L _ n)) acc = add_unused_name n acc
add_unused (IEThingAbs (L _ n)) acc = add_unused_name n acc
add_unused (IEThingAll (L _ n)) acc = add_unused_all n acc
add_unused (IEThingWith (L _ p) wc ns fs) acc =
add_wc_all (add_unused_with p xs acc)
where xs = map unLoc ns ++ map (flSelector . unLoc) fs
add_wc_all = case wc of
NoIEWildcard -> id
IEWildcard _ -> add_unused_all p
add_unused _ acc = acc
add_unused_name n acc
| n `elemNameSet` used_names = acc
| otherwise = acc `extendNameSet` n
add_unused_all n acc
| n `elemNameSet` used_names = acc
| n `elemNameSet` used_parents = acc
| otherwise = acc `extendNameSet` n
add_unused_with p ns acc
| all (`elemNameSet` acc1) ns = add_unused_name p acc1
| otherwise = acc1
where
acc1 = foldr add_unused_name acc ns
-- If you use 'signum' from Num, then the user may well have
-- imported Num(signum). We don't want to complain that
-- Num is not itself mentioned. Hence the two cases in add_unused_with.
extendImportMap :: GlobalRdrElt -> ImportMap -> ImportMap
-- For each of a list of used GREs, find all the import decls that brought
-- it into scope; choose one of them (bestImport), and record
-- the RdrName in that import decl's entry in the ImportMap
extendImportMap gre imp_map
= add_imp gre (bestImport (gre_imp gre)) imp_map
where
add_imp :: GlobalRdrElt -> ImportSpec -> ImportMap -> ImportMap
add_imp gre (ImpSpec { is_decl = imp_decl_spec }) imp_map
= Map.insertWith add decl_loc [avail] imp_map
where
add _ avails = avail : avails -- add is really just a specialised (++)
decl_loc = srcSpanEnd (is_dloc imp_decl_spec)
-- For srcSpanEnd see Note [The ImportMap]
avail = availFromGRE gre
warnUnusedImport :: WarningFlag -> NameEnv (FieldLabelString, Name)
-> ImportDeclUsage -> RnM ()
warnUnusedImport flag fld_env (L loc decl, used, unused)
| Just (False,L _ []) <- ideclHiding decl
= return () -- Do not warn for 'import M()'
| Just (True, L _ hides) <- ideclHiding decl
, not (null hides)
, pRELUDE_NAME == unLoc (ideclName decl)
= return () -- Note [Do not warn about Prelude hiding]
| null used = addWarnAt (Reason flag) loc msg1 -- Nothing used; drop entire decl
| null unused = return () -- Everything imported is used; nop
| otherwise = addWarnAt (Reason flag) loc msg2 -- Some imports are unused
where
msg1 = vcat [pp_herald <+> quotes pp_mod <+> pp_not_used,
nest 2 (text "except perhaps to import instances from"
<+> quotes pp_mod),
text "To import instances alone, use:"
<+> text "import" <+> pp_mod <> parens Outputable.empty ]
msg2 = sep [pp_herald <+> quotes sort_unused,
text "from module" <+> quotes pp_mod <+> pp_not_used]
pp_herald = text "The" <+> pp_qual <+> text "import of"
pp_qual
| ideclQualified decl = text "qualified"
| otherwise = Outputable.empty
pp_mod = ppr (unLoc (ideclName decl))
pp_not_used = text "is redundant"
ppr_possible_field n = case lookupNameEnv fld_env n of
Just (fld, p) -> ppr p <> parens (ppr fld)
Nothing -> ppr n
-- Print unused names in a deterministic (lexicographic) order
sort_unused = pprWithCommas ppr_possible_field $
sortBy (comparing nameOccName) unused
{-
Note [Do not warn about Prelude hiding]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We do not warn about
import Prelude hiding( x, y )
because even if nothing else from Prelude is used, it may be essential to hide
x,y to avoid name-shadowing warnings. Example (Trac #9061)
import Prelude hiding( log )
f x = log where log = ()
Note [Printing minimal imports]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To print the minimal imports we walk over the user-supplied import
decls, and simply trim their import lists. NB that
* We do *not* change the 'qualified' or 'as' parts!
* We do not disard a decl altogether; we might need instances
from it. Instead we just trim to an empty import list
-}
printMinimalImports :: [ImportDeclUsage] -> RnM ()
-- See Note [Printing minimal imports]
printMinimalImports imports_w_usage
= do { imports' <- mapM mk_minimal imports_w_usage
; this_mod <- getModule
; dflags <- getDynFlags
; liftIO $
do { h <- openFile (mkFilename dflags this_mod) WriteMode
; printForUser dflags h neverQualify (vcat (map ppr imports')) }
-- The neverQualify is important. We are printing Names
-- but they are in the context of an 'import' decl, and
-- we never qualify things inside there
-- E.g. import Blag( f, b )
-- not import Blag( Blag.f, Blag.g )!
}
where
mkFilename dflags this_mod
| Just d <- dumpDir dflags = d </> basefn
| otherwise = basefn
where
basefn = moduleNameString (moduleName this_mod) ++ ".imports"
mk_minimal (L l decl, used, unused)
| null unused
, Just (False, _) <- ideclHiding decl
= return (L l decl)
| otherwise
= do { let ImportDecl { ideclName = L _ mod_name
, ideclSource = is_boot
, ideclPkgQual = mb_pkg } = decl
; iface <- loadSrcInterface doc mod_name is_boot (fmap sl_fs mb_pkg)
; let lies = map (L l) (concatMap (to_ie iface) used)
; return (L l (decl { ideclHiding = Just (False, L l lies) })) }
where
doc = text "Compute minimal imports for" <+> ppr decl
to_ie :: ModIface -> AvailInfo -> [IE Name]
-- The main trick here is that if we're importing all the constructors
-- we want to say "T(..)", but if we're importing only a subset we want
-- to say "T(A,B,C)". So we have to find out what the module exports.
to_ie _ (Avail _ n)
= [IEVar (noLoc n)]
to_ie _ (AvailTC n [m] [])
| n==m = [IEThingAbs (noLoc n)]
to_ie iface (AvailTC n ns fs)
= case [(xs,gs) | AvailTC x xs gs <- mi_exports iface
, x == n
, x `elem` xs -- Note [Partial export]
] of
[xs] | all_used xs -> [IEThingAll (noLoc n)]
| otherwise -> [IEThingWith (noLoc n) NoIEWildcard
(map noLoc (filter (/= n) ns))
(map noLoc fs)]
-- Note [Overloaded field import]
_other | all_non_overloaded fs
-> map (IEVar . noLoc) $ ns ++ map flSelector fs
| otherwise -> [IEThingWith (noLoc n) NoIEWildcard
(map noLoc (filter (/= n) ns)) (map noLoc fs)]
where
fld_lbls = map flLabel fs
all_used (avail_occs, avail_flds)
= all (`elem` ns) avail_occs
&& all (`elem` fld_lbls) (map flLabel avail_flds)
all_non_overloaded = all (not . flIsOverloaded)
{-
Note [Partial export]
~~~~~~~~~~~~~~~~~~~~~
Suppose we have
module A( op ) where
class C a where
op :: a -> a
module B where
import A
f = ..op...
Then the minimal import for module B is
import A( op )
not
import A( C( op ) )
which we would usually generate if C was exported from B. Hence
the (x `elem` xs) test when deciding what to generate.
Note [Overloaded field import]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On the other hand, if we have
{-# LANGUAGE DuplicateRecordFields #-}
module A where
data T = MkT { foo :: Int }
module B where
import A
f = ...foo...
then the minimal import for module B must be
import A ( T(foo) )
because when DuplicateRecordFields is enabled, field selectors are
not in scope without their enclosing datatype.
************************************************************************
* *
\subsection{Errors}
* *
************************************************************************
-}
qualImportItemErr :: RdrName -> SDoc
qualImportItemErr rdr
= hang (text "Illegal qualified name in import item:")
2 (ppr rdr)
badImportItemErrStd :: ModIface -> ImpDeclSpec -> IE RdrName -> SDoc
badImportItemErrStd iface decl_spec ie
= sep [text "Module", quotes (ppr (is_mod decl_spec)), source_import,
text "does not export", quotes (ppr ie)]
where
source_import | mi_boot iface = text "(hi-boot interface)"
| otherwise = Outputable.empty
badImportItemErrDataCon :: OccName -> ModIface -> ImpDeclSpec -> IE RdrName -> SDoc
badImportItemErrDataCon dataType_occ iface decl_spec ie
= vcat [ text "In module"
<+> quotes (ppr (is_mod decl_spec))
<+> source_import <> colon
, nest 2 $ quotes datacon
<+> text "is a data constructor of"
<+> quotes dataType
, text "To import it use"
, nest 2 $ quotes (text "import")
<+> ppr (is_mod decl_spec)
<> parens_sp (dataType <> parens_sp datacon)
, text "or"
, nest 2 $ quotes (text "import")
<+> ppr (is_mod decl_spec)
<> parens_sp (dataType <> text "(..)")
]
where
datacon_occ = rdrNameOcc $ ieName ie
datacon = parenSymOcc datacon_occ (ppr datacon_occ)
dataType = parenSymOcc dataType_occ (ppr dataType_occ)
source_import | mi_boot iface = text "(hi-boot interface)"
| otherwise = Outputable.empty
parens_sp d = parens (space <> d <> space) -- T( f,g )
badImportItemErr :: ModIface -> ImpDeclSpec -> IE RdrName -> [AvailInfo] -> SDoc
badImportItemErr iface decl_spec ie avails
= case find checkIfDataCon avails of
Just con -> badImportItemErrDataCon (availOccName con) iface decl_spec ie
Nothing -> badImportItemErrStd iface decl_spec ie
where
checkIfDataCon (AvailTC _ ns _) =
case find (\n -> importedFS == nameOccNameFS n) ns of
Just n -> isDataConName n
Nothing -> False
checkIfDataCon _ = False
availOccName = nameOccName . availName
nameOccNameFS = occNameFS . nameOccName
importedFS = occNameFS . rdrNameOcc $ ieName ie
illegalImportItemErr :: SDoc
illegalImportItemErr = text "Illegal import item"
dodgyImportWarn :: RdrName -> SDoc
dodgyImportWarn item = dodgyMsg (text "import") item
dodgyExportWarn :: Name -> SDoc
dodgyExportWarn item = dodgyMsg (text "export") item
dodgyMsg :: (OutputableBndr n, HasOccName n) => SDoc -> n -> SDoc
dodgyMsg kind tc
= sep [ text "The" <+> kind <+> ptext (sLit "item")
<+> quotes (ppr (IEThingAll (noLoc tc)))
<+> text "suggests that",
quotes (ppr tc) <+> text "has (in-scope) constructors or class methods,",
text "but it has none" ]
exportItemErr :: IE RdrName -> SDoc
exportItemErr export_item
= sep [ text "The export item" <+> quotes (ppr export_item),
text "attempts to export constructors or class methods that are not visible here" ]
exportClashErr :: GlobalRdrEnv -> Name -> Name -> IE RdrName -> IE RdrName
-> MsgDoc
exportClashErr global_env name1 name2 ie1 ie2
= vcat [ text "Conflicting exports for" <+> quotes (ppr occ) <> colon
, ppr_export ie1' name1'
, ppr_export ie2' name2' ]
where
occ = nameOccName name1
ppr_export ie name = nest 3 (hang (quotes (ppr ie) <+> text "exports" <+>
quotes (ppr name))
2 (pprNameProvenance (get_gre name)))
-- get_gre finds a GRE for the Name, so that we can show its provenance
get_gre name
= case lookupGRE_Name global_env name of
(gre:_) -> gre
[] -> pprPanic "exportClashErr" (ppr name)
get_loc name = greSrcSpan (get_gre name)
(name1', ie1', name2', ie2') = if get_loc name1 < get_loc name2
then (name1, ie1, name2, ie2)
else (name2, ie2, name1, ie1)
addDupDeclErr :: [GlobalRdrElt] -> TcRn ()
addDupDeclErr [] = panic "addDupDeclErr: empty list"
addDupDeclErr gres@(gre : _)
= addErrAt (getSrcSpan (last sorted_names)) $
-- Report the error at the later location
vcat [text "Multiple declarations of" <+>
quotes (ppr (nameOccName name)),
-- NB. print the OccName, not the Name, because the
-- latter might not be in scope in the RdrEnv and so will
-- be printed qualified.
text "Declared at:" <+>
vcat (map (ppr . nameSrcLoc) sorted_names)]
where
name = gre_name gre
sorted_names = sortWith nameSrcLoc (map gre_name gres)
dupExportWarn :: OccName -> IE RdrName -> IE RdrName -> SDoc
dupExportWarn occ_name ie1 ie2
= hsep [quotes (ppr occ_name),
text "is exported by", quotes (ppr ie1),
text "and", quotes (ppr ie2)]
dupModuleExport :: ModuleName -> SDoc
dupModuleExport mod
= hsep [text "Duplicate",
quotes (text "Module" <+> ppr mod),
text "in export list"]
moduleNotImported :: ModuleName -> SDoc
moduleNotImported mod
= text "The export item `module" <+> ppr mod <>
text "' is not imported"
nullModuleExport :: ModuleName -> SDoc
nullModuleExport mod
= text "The export item `module" <+> ppr mod <> ptext (sLit "' exports nothing")
missingImportListWarn :: ModuleName -> SDoc
missingImportListWarn mod
= text "The module" <+> quotes (ppr mod) <+> ptext (sLit "does not have an explicit import list")
missingImportListItem :: IE RdrName -> SDoc
missingImportListItem ie
= text "The import item" <+> quotes (ppr ie) <+> ptext (sLit "does not have an explicit import list")
moduleWarn :: ModuleName -> WarningTxt -> SDoc
moduleWarn mod (WarningTxt _ txt)
= sep [ text "Module" <+> quotes (ppr mod) <> ptext (sLit ":"),
nest 2 (vcat (map (ppr . sl_fs . unLoc) txt)) ]
moduleWarn mod (DeprecatedTxt _ txt)
= sep [ text "Module" <+> quotes (ppr mod)
<+> text "is deprecated:",
nest 2 (vcat (map (ppr . sl_fs . unLoc) txt)) ]
packageImportErr :: SDoc
packageImportErr
= text "Package-qualified imports are not enabled; use PackageImports"
-- This data decl will parse OK
-- data T = a Int
-- treating "a" as the constructor.
-- It is really hard to make the parser spot this malformation.
-- So the renamer has to check that the constructor is legal
--
-- We can get an operator as the constructor, even in the prefix form:
-- data T = :% Int Int
-- from interface files, which always print in prefix form
checkConName :: RdrName -> TcRn ()
checkConName name = checkErr (isRdrDataCon name) (badDataCon name)
badDataCon :: RdrName -> SDoc
badDataCon name
= hsep [text "Illegal data constructor name", quotes (ppr name)]
|
GaloisInc/halvm-ghc
|
compiler/rename/RnNames.hs
|
bsd-3-clause
| 89,299
| 3
| 29
| 28,195
| 16,696
| 8,623
| 8,073
| -1
| -1
|
module Internal.Mips.StorableSpec where
import Foreign
import Foreign.C.Types
import Test.Hspec
import Test.QuickCheck
import Hapstone.Internal.Mips
import Internal.Mips.Default
-- | main spec
spec :: Spec
spec = describe "Hapstone.Internal.Mips" $ do
mipsOpMemStructSpec
csMipsOpSpec
csMipsSpec
getMipsOpMemStruct :: IO MipsOpMemStruct
getMipsOpMemStruct = do
ptr <- mallocArray (sizeOf mipsOpMemStruct) :: IO (Ptr Word8)
poke (castPtr ptr) (fromIntegral $ fromEnum MipsReg15 :: Word32)
poke (plusPtr ptr 8) (0x0123456789abcdef :: Word64)
peek (castPtr ptr) <* free ptr
mipsOpMemStruct :: MipsOpMemStruct
mipsOpMemStruct = MipsOpMemStruct MipsReg15 0x0123456789abcdef
-- | MipsOpMemStruct spec
mipsOpMemStructSpec :: Spec
mipsOpMemStructSpec = describe "Storable MipsOpMemStruct" $ do
it "is a packed struct" $
sizeOf (undefined :: MipsOpMemStruct) ==
sizeOf (0 :: CUInt) * 2 + sizeOf (0 :: Word64)
it "has matching peek- and poke-implementations" $ property $
\s@MipsOpMemStruct{} ->
alloca (\p -> poke p s >> peek p) `shouldReturn` s
it "parses correctly" $ getMipsOpMemStruct `shouldReturn` mipsOpMemStruct
getCsMipsOp :: IO CsMipsOp
getCsMipsOp = do
ptr <- mallocArray (sizeOf csMipsOp) :: IO (Ptr Word8)
poke (castPtr ptr) (fromIntegral $ fromEnum MipsOpImm :: Int32)
poke (plusPtr ptr 8) (72324 :: Int64)
peek (castPtr ptr) <* free ptr
csMipsOp :: CsMipsOp
csMipsOp = Imm 72324
-- | CsMipsOp spec
csMipsOpSpec :: Spec
csMipsOpSpec = describe "Storable CsMipsOp" $ do
it "has a memory-layout we can handle" $
sizeOf (undefined :: CsMipsOp) == 4 + 4 + 16
it "has matching peek- and poke-implementations" $ property $
\s ->
alloca (\p -> poke p s >> (peek p :: IO CsMipsOp)) `shouldReturn` s
it "parses correctly" $ getCsMipsOp `shouldReturn` csMipsOp
getCsMips :: IO CsMips
getCsMips = do
ptr <- mallocArray (sizeOf csMips) :: IO (Ptr Word8)
poke (castPtr ptr) (1 :: Word8)
poke (plusPtr ptr 8) csMipsOp
peek (castPtr ptr) <* free ptr
csMips :: CsMips
csMips = CsMips [csMipsOp]
-- | CsMips spec
csMipsSpec :: Spec
csMipsSpec = describe "Storable CsMips" $ do
it "has a memory-layout we can handle" $
sizeOf (undefined :: CsMips) ==
1 + 7 + 8 * sizeOf (undefined :: CsMipsOp)
it "has matching peek- and poke-implementations" $ property $
\s@CsMips{} ->
alloca (\p -> poke p s >> peek p) `shouldReturn` s
it "parses correctly" $ getCsMips `shouldReturn` csMips
|
ibabushkin/hapstone
|
test/Internal/Mips/StorableSpec.hs
|
bsd-3-clause
| 2,583
| 0
| 16
| 575
| 816
| 412
| 404
| 62
| 1
|
module Input
( AppInput
, parseWinInput
, mousePos
, lbp
, lbpPos
, lbDown
, rbp
, rbpPos
, rbDown
, keyPress
, keyPressed
, quitEvent
, module SDL.Input.Keyboard.Codes
) where
import Data.Maybe
import FRP.Yampa
import Linear (V2(..))
import Linear.Affine (Point(..))
import SDL.Input.Keyboard.Codes
import qualified SDL
import Types
-- <| Signal Functions |> --
-- | Current mouse position
mousePos :: SF AppInput (Double,Double)
mousePos = arr inpMousePos
-- | Events that indicate left button click
lbp :: SF AppInput (Event ())
lbp = lbpPos >>^ tagWith ()
-- | Events that indicate left button click and are tagged with mouse position
lbpPos :: SF AppInput (Event (Double,Double))
lbpPos = inpMouseLeft ^>> edgeJust
-- | Is left button down
lbDown :: SF AppInput Bool
lbDown = arr (isJust . inpMouseLeft)
-- | Events that indicate right button click
rbp :: SF AppInput (Event ())
rbp = rbpPos >>^ tagWith ()
-- | Events that indicate right button click and are tagged with mouse position
rbpPos :: SF AppInput (Event (Double,Double))
rbpPos = inpMouseRight ^>> edgeJust
-- | Is right button down
rbDown :: SF AppInput Bool
rbDown = arr (isJust . inpMouseRight)
keyPress :: SF AppInput (Event SDL.Scancode)
keyPress = inpKeyPressed ^>> edgeJust
keyPressed :: SDL.Scancode -> SF AppInput (Event ())
keyPressed code = keyPress >>^ filterE (code ==) >>^ tagWith ()
quitEvent :: SF AppInput (Event ())
quitEvent = arr inpQuit >>> edge
-- | Exported as abstract type. Fields are accessed with signal functions.
data AppInput = AppInput
{ inpMousePos :: (Double, Double) -- ^ Current mouse position
, inpMouseLeft :: Maybe (Double, Double) -- ^ Left button currently down
, inpMouseRight :: Maybe (Double, Double) -- ^ Right button currently down
, inpKeyPressed :: Maybe SDL.Scancode
, inpQuit :: Bool -- ^ SDL's QuitEvent
}
initAppInput :: AppInput
initAppInput = AppInput { inpMousePos = (0, 0)
, inpMouseLeft = Nothing
, inpMouseRight = Nothing
, inpKeyPressed = Nothing
, inpQuit = False
}
-- | Filter and transform SDL events into events which are relevant to our
-- application
parseWinInput :: SF WinInput AppInput
parseWinInput = accumHoldBy nextAppInput initAppInput
-- | Compute next input
-- FIXME: I am reinventing lenses once again
nextAppInput :: AppInput -> SDL.EventPayload -> AppInput
nextAppInput inp SDL.QuitEvent = inp { inpQuit = True }
nextAppInput inp (SDL.MouseMotionEvent { SDL.mouseMotionEventPos = P (V2 x y) }) =
inp { inpMousePos = (fromIntegral x, fromIntegral y) }
nextAppInput inp (SDL.KeyboardEvent _ _ SDL.KeyPressed _ keysym) =
inp { inpKeyPressed = Just $ SDL.keysymScancode keysym }
nextAppInput inp (SDL.KeyboardEvent _ _ SDL.KeyReleased _ _) =
inp { inpKeyPressed = Nothing }
nextAppInput inp ev@(SDL.MouseButtonEvent{}) = inp { inpMouseLeft = lmb
, inpMouseRight = rmb }
where motion = SDL.mouseButtonEventMotion ev
button = SDL.mouseButtonEventButton ev
pos = inpMousePos inp
inpMod = case (motion,button) of
(SDL.MouseButtonUp, SDL.ButtonLeft) -> first (const Nothing)
(SDL.MouseButtonDown, SDL.ButtonLeft) -> first (const (Just pos))
(SDL.MouseButtonUp, SDL.ButtonRight) -> second (const Nothing)
(SDL.MouseButtonDown, SDL.ButtonRight) -> second (const (Just pos))
_ -> id
(lmb,rmb) = inpMod $ (inpMouseLeft &&& inpMouseRight) inp
nextAppInput inp _ = inp
|
ocramz/yampy-cube
|
src/Input.hs
|
bsd-3-clause
| 3,843
| 0
| 14
| 1,056
| 970
| 542
| 428
| 76
| 5
|
{-# LANGUAGE GeneralizedNewtypeDeriving
, TypeFamilies
#-}
import Data.Default
import Graphics.UI.Toy.Prelude
newtype State = State (TToy [] (CairoDraggable CairoDiagram))
deriving (Interactive Gtk, GtkDisplay, Diagrammable Cairo)
type instance V State = R2
main :: IO ()
main = runToy (def :: State)
instance Default State where
{-
def = State $ TToy
[ mkDraggable (r2 (x, y)) (circle 5 :: CairoDiagram)
| x <- [50,60..100], y <- [50, 60..100]
]
-}
def = State $ TToy
[
mkDraggable (r2 (50,50)) (circle 5 :: CairoDiagram)
-- mkDraggable (r2 (60,50)) (line 0
, mkDraggable (r2 (100,100)) ppp
, mkDraggable (r2 (70,50)) (circle 5 :: CairoDiagram)
, mkDraggable (r2 (110,110)) qqq
]
ppp :: CairoDiagram
-- ppp = text "F" <> square 1 # lw 0
ppp = (s darkred ||| s red) === (s pink ||| s indianred)
where
s c = square 10 # fc c
qqq :: CairoDiagram
qqq = ((text' "a") ||| (text' "=") ||| (text' "1"))
text' :: String -> CairoDiagram
text' s = text s # scaleY (-1) <> square (fromIntegral (8 * length s)) # lw 0
|
alanz/vh-play
|
src/dt.hs
|
bsd-3-clause
| 1,142
| 0
| 12
| 315
| 373
| 198
| 175
| 23
| 1
|
{-# LANGUAGE InstanceSigs #-} -- Because i love it
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE FlexibleInstances #-}
--{-# LANGUAGE TypeSynonymInstances #-}
module Sky.Implementations.Isomorphism.MonoIso where
import Sky.Classes.Isomorphism
import Sky.Classes.Isomorphism.Monomorphic
import Sky.Classes.IsoFunctor
import Sky.Implementations.Isomorphism
import Data.Functor.Identity
import Control.Category (Category)
import qualified Control.Category as Cat
----------------------------------------------------------------------------------------------------
-- Commonly used isomorphisms
type Iso a b = PackedMonoSemiIso Identity a b
isoConst :: a -> b -> Iso a b
isoConst a b = iso (const b) (const a)
isoCons :: Iso (a, [a]) [a]
isoCons = iso (\(x,xs) -> x:xs) (\(x:xs) -> (x,xs))
isoAlternative :: forall a. (a -> Bool) -> Iso (Either a a) a
isoAlternative decision = iso joinAlt splitAlt where
joinAlt :: Either a a -> a
joinAlt (Left a) = a
joinAlt (Right a) = a
splitAlt x = if decision x then Left x else Right x
isoAlt :: forall a. (a -> Bool) -> Iso (Either a a) a
isoAlt = isoAlternative
--isoEither :: Iso a b -> Iso (Either a b) (a, Bool)
isoFixedRight :: b -> Iso (a,b) a
isoFixedRight fixed = iso (\(a,b) -> a) (\a -> (a, fixed))
isoFixedLeft :: a -> Iso (a,b) b
isoFixedLeft fixed = iso (\(a,b) -> b) (\b -> (fixed, b))
--isoSwap :: Iso (Either a b) (Either b a)
--isoRebalance :: Iso (Either (Either a b) c) (Either a (Either b c))
--isoSum :: Iso a b -> Iso c d -> Iso (Either a b) (Either c d)
--endoIso :: Iso a b -> Iso (Either a b) (Either a b)
-- "Functors preserve isomorphisms"
--isoFun :: Functor f => Iso a b -> Iso (f a) (f b)
|
xicesky/sky-haskell-playground
|
src/Sky/Implementations/Isomorphism/MonoIso.hs
|
bsd-3-clause
| 1,707
| 0
| 9
| 318
| 479
| 275
| 204
| 28
| 3
|
{-# LANGUAGE TypeSynonymInstances #-}
-- {-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE FlexibleInstances #-}
-- {-# LANGUAGE UndecidableInstances #-}
-- {-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE LambdaCase #-}
-- {-# LANGUAGE ViewPatterns #-}
-- {-# LANGUAGE DefaultSignatures #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE CPP #-}
-- ghc options
{-# OPTIONS_GHC -Wall #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}
-- {-# OPTIONS_GHC -fno-warn-name-shadowing #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- {-# OPTIONS_GHC -fno-warn-missing-signatures #-}
-- {-# OPTIONS_GHC -fno-warn-unused-do-bind #-}
-- {-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
-- {-# OPTIONS_GHC -fno-warn-incomplete-uni-patterns #-}
-- {-# OPTIONS_GHC -fno-warn-name-shadowing #-}
-- |
-- Copyright : (c) Andreas Reuleaux 2015
-- License : BSD2
-- Maintainer: Andreas Reuleaux <rx@a-rx.info>
-- Stability : experimental
-- Portability: non-portable
--
-- This module provides pretty printing functionality for Pire's
-- abstract and concrete syntax
{-
exmpls
nopos <$> parsingFileS moduleDef_ "test/Nat.pi"
pp <$> parsingFileS moduleDef_ "test/Nat.pi"
(runExceptT $ getModules ["test"] "Nat") >>= return . pp . last . fromRight'
-}
-- A Pretty Printer.
module Pire.Pretty.PP where
import Pire.Syntax.Eps
-- import Pire.Syntax.Binder
import Pire.Syntax.Expr
import Pire.Pretty.Common
import Pire.Pretty.Wrap
import Pire.Pretty.Expr
import Pire.Pretty.Decl ()
import Control.Monad.Reader
import Text.PrettyPrint as TPP
#ifdef MIN_VERSION_GLASGOW_HASKELL
#if MIN_VERSION_GLASGOW_HASKELL(7,10,3,0)
-- ghc >= 7.10.3
#else
-- older ghc versions, but MIN_VERSION_GLASGOW_HASKELL defined
#endif
#else
-- MIN_VERSION_GLASGOW_HASKELL not even defined yet (ghc <= 7.8.x)
import Control.Applicative
#endif
import qualified Data.Text as T
-- -- disp
-- -- dispArg' :: Arg Exp String -> Doc
-- -- dispArg' (Arg ep t) =
-- -- bindParens ep $ dispExp' t
dispArg :: Arg T.Text T.Text -> M Doc
dispArg arg@(Arg ep t) = do
st <- ask
let annotParens = if showAnnots st
then mandatoryBindParens
else bindParens
let wraparg' (Arg p x) = case x of
V _ -> bindParens p
TCon _ [] -> bindParens p
Type -> bindParens p
TyUnit -> bindParens p
LitUnit -> bindParens p
TyBool -> bindParens p
LitBool b -> bindParens p
-- Sigma _ -> bindParens p
Position _ a -> wraparg' (Arg p a)
DCon _ [] _ -> annotParens p
-- Prod _ _ _ -> annotParens p
TrustMe _ -> annotParens p
Refl _ -> annotParens p
_ -> mandatoryBindParens p
wraparg' arg <$> dispExpr t
-- dispArg arg@(Arg ep t) = do
-- bindParens ep <$> disp t
-- hm, we do have this in Pretty/Binder.hs - right ?
-- -- dispBinder :: Disp s => Binder s -> M Doc
-- -- dispBinder (Binder v ws) = (<>) <$> (disp v) <*> disp ws
-- -- dispBinder (BinderInBrackets bo bnd bc) = return $ text "error: dispBinder(BinderInBrackets ...)"
-- -- -- dispBinder (BinderInBrackets bo (Binder v ws) bc) = (<>) <$> (disp v) <*> disp ws
-- -- > runIdentity (runReaderT (gatherBinders $ lama "x" $ V "b") initDI)
-- -- ([x],b)
-- -- > runIdentity (runReaderT (gatherBinders $ lama "x" $ V "x") initDI)
-- -- ([x],x)
-- -- >
-- -- > runIdentity (runReaderT (gatherBinders $ lama "a" $ lama "x" $ V "x") initDI)
-- -- ([a,x],x)
-- -- >
-- -- > runIdentity (runReaderT (gatherBinders $ lam "a" $ lam "x" $ V "x" :@ V "z") initDI)
-- -- ([a,x],x z)
-- -- >
-- -- dito for lams
-- -- > runIdentity (runReaderT (gatherBinders $ lams ["a", "x"] $ V "x" :@ V "z") initDI)
-- -- ([a,x],x z)
-- -- >
-- gatherBinders :: Exp String -> M ([Doc], Doc)
-- gatherBinders (Lam n s) =
-- do
-- let db = text n
-- let body = instantiate1 (V n) s
-- (rest, body) <- gatherBinders body
-- return $ (db : rest, body)
-- gatherBinders (LamA n annot s) =
-- do
-- let dn = text n
-- dt <- dispAnnot annot
-- let db = if isEmpty dt then dn else (parens (dn <+> dt))
-- let body = instantiate1 (V n) s
-- (rest, body) <- gatherBinders body
-- return $ (db : rest, body)
-- gatherBinders (ErasedLamA n annot s) =
-- do
-- let dn = text n
-- dt <- dispAnnot annot
-- let body = instantiate1 (V n) s
-- (rest, body) <- gatherBinders body
-- return $ ( brackets (dn <+> dt) : rest, body)
-- -- maybe somehow gather them recursivley for Lams
-- -- (analogous to the other defs) ?
-- -- this is just the n:[] case, though
-- -- gatherBinders (Lams (n:[]) s) =
-- -- do
-- -- let db = text n
-- -- let body = instantiate1 (V n) s
-- -- (rest, body) <- gatherBinders body
-- -- return $ (db : rest, body)
-- gatherBinders (Lams ns s) =
-- do
-- let dbs = [text b | b <- ns]
-- body <- dispExp $ instantiate (\(Name b ()) -> V b) s
-- return $ (dbs, body)
-- gatherBinders body = do
-- db <- dispExp body
-- return ([], db)
-- -- --------------------------------------------------
dispEps :: Eps -> M Doc
dispEps ErasedP = return $ text "ErasedP"
dispEps RuntimeP = return $ text "RuntimeP"
-- dispEps VarP = return $ text "VarP"
-- dispEps WildP = return $ text "WildP"
instance Disp Eps where
disp = dispEps
|
reuleaux/pire
|
src/Pire/Pretty/PP.hs
|
bsd-3-clause
| 5,547
| 0
| 15
| 1,437
| 484
| 296
| 188
| 44
| 13
|
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Client.Unpack
-- Copyright : (c) Andrea Vezzosi 2008
-- Duncan Coutts 2011
-- License : BSD-like
--
-- Maintainer : cabal-devel@haskell.org
-- Stability : provisional
-- Portability : portable
--
--
-----------------------------------------------------------------------------
module Distribution.Client.Unpack (
-- * Commands
unpack,
) where
import Distribution.Package
( PackageId, packageId )
import Distribution.Simple.Setup
( fromFlagOrDefault )
import Distribution.Simple.Utils
( notice, die )
import Distribution.Verbosity
( Verbosity )
import Distribution.Text(display)
import Distribution.Client.Setup
( GlobalFlags(..), UnpackFlags(..) )
import Distribution.Client.Types
import Distribution.Client.Targets
import Distribution.Client.Dependency
import Distribution.Client.FetchUtils
import qualified Distribution.Client.Tar as Tar (extractTarGzFile)
import Distribution.Client.IndexUtils as IndexUtils
( getAvailablePackages )
import System.Directory
( createDirectoryIfMissing, doesDirectoryExist, doesFileExist )
import Control.Monad
( unless, when )
import Data.Monoid
( mempty )
import System.FilePath
( (</>), addTrailingPathSeparator )
unpack :: Verbosity
-> [Repo]
-> GlobalFlags
-> UnpackFlags
-> [UserTarget]
-> IO ()
unpack verbosity _ _ _ [] =
notice verbosity "No packages requested. Nothing to do."
unpack verbosity repos globalFlags unpackFlags userTargets = do
mapM_ checkTarget userTargets
availableDb <- getAvailablePackages verbosity repos
pkgSpecifiers <- resolveUserTargets verbosity
globalFlags (packageIndex availableDb) userTargets
pkgs <- either (die . unlines . map show) return $
resolveWithoutDependencies
(resolverParams availableDb pkgSpecifiers)
unless (null prefix) $
createDirectoryIfMissing True prefix
flip mapM_ pkgs $ \pkg -> do
location <- fetchPackage verbosity (packageSource pkg)
let pkgid = packageId pkg
case location of
LocalTarballPackage tarballPath ->
unpackPackage verbosity prefix pkgid tarballPath
RemoteTarballPackage _tarballURL tarballPath ->
unpackPackage verbosity prefix pkgid tarballPath
RepoTarballPackage _repo _pkgid tarballPath ->
unpackPackage verbosity prefix pkgid tarballPath
LocalUnpackedPackage _ ->
error "Distribution.Client.Unpack.unpack: the impossible happened."
where
resolverParams availableDb pkgSpecifiers =
--TODO: add commandline constraint and preference args for unpack
standardInstallPolicy mempty availableDb pkgSpecifiers
prefix = fromFlagOrDefault "" (unpackDestDir unpackFlags)
checkTarget :: UserTarget -> IO ()
checkTarget target = case target of
UserTargetLocalDir dir -> die (notTarball dir)
UserTargetLocalCabalFile file -> die (notTarball file)
_ -> return ()
where
notTarball t =
"The 'unpack' command is for tarball packages. "
++ "The target '" ++ t ++ "' is not a tarball."
unpackPackage :: Verbosity -> FilePath -> PackageId -> FilePath -> IO ()
unpackPackage verbosity prefix pkgid pkgPath = do
let pkgdirname = display pkgid
pkgdir = prefix </> pkgdirname
pkgdir' = addTrailingPathSeparator pkgdir
existsDir <- doesDirectoryExist pkgdir
when existsDir $ die $
"The directory \"" ++ pkgdir' ++ "\" already exists, not unpacking."
existsFile <- doesFileExist pkgdir
when existsFile $ die $
"A file \"" ++ pkgdir ++ "\" is in the way, not unpacking."
notice verbosity $ "Unpacking to " ++ pkgdir'
Tar.extractTarGzFile prefix pkgdirname pkgPath
|
yihuang/cabal-install
|
Distribution/Client/Unpack.hs
|
bsd-3-clause
| 3,947
| 0
| 14
| 908
| 807
| 419
| 388
| 82
| 4
|
module MSS where
subseqs :: [a] -> [[a]]
subseqs = foldr op [[]]
where op x xss = xss ++ map (x:) xss
mss :: [Integer] -> Integer
mss = foldr op 0 where op x y = y `max` (x + y)
|
cutsea110/aop
|
src/MSS.hs
|
bsd-3-clause
| 182
| 0
| 9
| 47
| 109
| 61
| 48
| 6
| 1
|
module Math.FeatureReduction.TStochastic
(
) where
import Math.FeatureReduction.Features
import Math.FeatureReduction.Stochastic
import System.Random
import System.IO
import NanoUtils.Set (randPicks)
import Control.Monad.Random (evalRandIO)
import Data.List (nub)
main = do
hSetBuffering stdout NoBuffering
gen <- newStdGen
let xs = [1..100]
fs = fromList xs
phi = samplePhi 692
fs' <- runR fs phi 20 (\f i lvl -> putStrLn $ show lvl ++ " : " ++ show (size f)) manual 692 fs gen -- ++ " : " ++ show f ++ " : " ++ show i) 692 fs gen
let sub = diff fs fs'
val <- phi sub
putStrLn $ show val ++ " : " ++ show sub
manual fs lvl = do
print fs
putStr "Choose one: "
ln <- hGetLine stdin
putStrLn $ "You entered: " ++ show ln
return (Just $ read ln)
--main = do
-- hSetBuffering stdout NoBuffering
-- gen <- newStdGen
-- let xs = nub.take 50 $ randomRs (1,10000) gen
-- fs = fromList xs
-- phi = samplePhi2 10000
-- val <- phi fs
-- interactive2 phi fs fs
-- interactive phi fs val val 20000 fs
-- interactive2 phi all fs = do
-- target <- phi fs
-- putStrLn $ "FEATURES: " ++ show fs ++ "; TARGET: " ++ show target
-- gen <- newStdGen
-- fs' <- runR fs phi 30 (\f i lvl -> putStrLn $ show lvl ++ " : " ++ show f ++ " : " ++ show i) target fs gen
-- let sub = diff all fs'
-- val <- phi sub
-- putStrLn $ show val ++ " : " ++ show sub
-- (sub',val') <- pickTillDrop phi val sub fs'
-- putStrLn $ show val' ++ " : " ++ show sub'
-- putStrLn "continue?"
-- hGetLine stdin
-- interactive2 phi all sub'
pickTillDrop phi maxVal core fs = do
case size fs of
0 -> return (core,maxVal)
_ -> do
(rest,ps) <- evalRandIO $ randPicks (size fs) 2 (toList fs)
let core' = union core (fromList ps)
fs' = fromList rest
val <- phi core'
case val > maxVal of
True -> return (core',val) -- pickTillDrop phi val core' fs'
False -> return (core',val)
-- interactive phi all low target high fs = do
-- putStrLn $ "LOW: " ++ show low ++ "; HIGH: " ++ show high ++
-- "; CURRENT: " ++ show target ++ "; FEATURES: " ++ show fs
-- gen <- newStdGen
-- fs' <- runR all phi 10 (\f i lvl -> putStrLn $ show lvl ++ " : " ++ show f ++ " : " ++ show i) target fs gen
-- let sub = diff all fs'
-- val <- phi sub
-- putStrLn $ show val ++ " : " ++ show sub
-- putStr "Go higher? [y/n] "
-- c <- hGetLine stdin
-- putStrLn ""
-- case c of
-- "y" -> let target' = (target + high)/2
-- in interactive phi all target target' high fs
-- "d" -> return ()
-- _ -> let target' = (low + target)/2
-- in interactive phi all low target' target fs
samplePhi :: Double -> Features -> IO Double
samplePhi n = return.pick.fromIntegral.sum.toList
where pick x | x > n = n
| otherwise = x
samplePhi2 n = return.fromIntegral.penalize.sum.toList
where pick x | x > n = n
| otherwise = x
penalize v = if v-n > 0
then if v-n > n then 0 else n - (v-n)
else v
|
nanonaren/Reducer
|
Math/FeatureReduction/TStochastic.hs
|
bsd-3-clause
| 3,114
| 0
| 17
| 905
| 638
| 335
| 303
| 46
| 3
|
{-# LANGUAGE OverloadedLists #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PatternGuards #-}
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ViewPatterns #-}
{-| This library only exports a single `dhallToJSON` function for translating a
Dhall syntax tree to a JSON syntax tree (i.e. a `Value`) for the @aeson@
library
NOTE: The @yaml@ library uses the same `Value` type to represent YAML
files, so you can use this to convert Dhall expressions to YAML, too
See the @dhall@ package if you would like to transform Dhall source code
into a Dhall syntax tree. Similarly, see the @aeson@ package if you would
like to translate a JSON syntax tree into JSON.
This package also provides @dhall-to-json@ and @dhall-to-yaml@ executables
which you can use to compile Dhall source code directly to JSON or YAML for
your convenience
Not all Dhall expressions can be converted to JSON since JSON is not a
programming language. The only things you can convert are:
* @Bool@s
* @Natural@s
* @Integer@s
* @Double@s
* @Text@ values
* @List@s
* @Optional@ values
* unions
* records
Dhall @Bool@s translate to JSON bools:
> $ dhall-to-json <<< 'True'
> true
> $ dhall-to-json <<< 'False'
> false
Dhall numbers translate to JSON numbers:
> $ dhall-to-json <<< '+2'
> 2
> $ dhall-to-json <<< '2'
> 2
> $ dhall-to-json <<< '2.3'
> 2.3
Dhall @Text@ translates to JSON text:
> $ dhall-to-json <<< '"ABC"'
> "ABC"
Dhall @List@s translate to JSON lists:
> $ dhall-to-json <<< '[1, 2, 3] : List Natural'
> [
> 1,
> 2,
> 3
> ]
Dhall @Optional@ values translate to @null@ if absent and the unwrapped
value otherwise:
> $ dhall-to-json <<< 'None Natural'
> null
> $ dhall-to-json <<< 'Some 1'
> 1
Dhall records translate to JSON records:
> $ dhall-to-json <<< '{ foo = 1, bar = True }'
> {
> "bar": true,
> "foo": 1
> }
Dhall unions translate to the wrapped value:
> $ dhall-to-json <<< "< Left : Natural | Right : Natural>.Left 2"
> 2
> $ cat config
> let MyType =
> < Person : { age : Natural, name : Text } | Place : { location : Text } >
>
> in [ MyType.Person { age = 47, name = "John" }
> , MyType.Place { location = "North Pole" }
> , MyType.Place { location = "Sahara Desert" }
> , MyType.Person { age = 35, name = "Alice" }
> ]
> $ dhall-to-json <<< "./config"
> [
> {
> "age": 47,
> "name": "John"
> },
> {
> "location": "North Pole"
> },
> {
> "location": "Sahara Desert"
> },
> {
> "age": 35,
> "name": "Alice"
> }
> ]
You can preserve the name of the alternative if you wrap the value in a
record with three fields:
* @contents@: The union literal that you want to preserve the tag of
* @field@: the name of the field that will store the name of the
alternative
* @nesting@: A value of type @\< Inline | Nested : Text \>@.
If @nesting@ is set to @Inline@ and the union literal stored in @contents@
contains a record then the name of the alternative is stored inline within
the same record. For example, this code:
> let Example = < Left : { foo : Natural } | Right : { bar : Bool } >
>
> let Nesting = < Inline | Nested : Text >
>
> in { field = "name"
> , nesting = Nesting.Inline
> , contents = Example.Left { foo = 2 }
> }
... produces this JSON:
> {
> "foo": 2,
> "name": "Left"
> }
If @nesting@ is set to @Nested nestedField@ then the union is stored
underneath a field named @nestedField@. For example, this code:
> let Example = < Left : { foo : Natural } | Right : { bar : Bool } >
>
> let Nesting = < Inline | Nested : Text >
>
> in { field = "name"
> , nesting = Nesting.Nested "value"
> , contents = Example.Left { foo = 2 }
> }
... produces this JSON:
> {
> "name": "Left",
> "value": {
> "foo": 2
> }
> }
You can also translate Dhall expressions encoding weakly-typed JSON
(see: <https://prelude.dhall-lang.org/JSON/Type>):
> $ cat ./example.dhall
> let JSON = https://prelude.dhall-lang.org/JSON/package.dhall
>
> in JSON.object
> [ { mapKey = "foo", mapValue = JSON.null }
> , { mapKey =
> "bar"
> , mapValue =
> JSON.array [ JSON.number 1.0, JSON.bool True ]
> }
> ]
By default, the fields that are evaluated to @null@ will be removed,
but here we're preserving them with the @--preserveNull@ flag.
> $ dhall-to-json --preserveNull <<< './example.dhall'
> {
> "bar": [
> 1,
> true
> ],
> "foo": null
> }
Also, all Dhall expressions are normalized before translation to JSON:
> $ dhall-to-json <<< "True == False"
> false
-}
module Dhall.JSON (
-- * Dhall to JSON
dhallToJSON
, omitNull
, omitEmpty
, parsePreservationAndOmission
, Conversion(..)
, defaultConversion
, convertToHomogeneousMaps
, parseConversion
, SpecialDoubleMode(..)
, handleSpecialDoubles
, codeToValue
-- * Exceptions
, CompileError(..)
) where
import Control.Applicative (empty, (<|>))
import Control.Exception (Exception, throwIO)
import Control.Monad (guard)
import Data.Aeson (ToJSON (..), Value (..))
import Data.Maybe (fromMaybe)
import Data.Text (Text)
import Data.Void (Void)
import Dhall.Core (Binding (..), DhallDouble (..), Expr)
import Dhall.Import (SemanticCacheMode (..))
import Dhall.JSON.Util (pattern FA, pattern V)
import Dhall.Map (Map)
import Options.Applicative (Parser)
import Prelude hiding (getContents)
import Prettyprinter (Pretty)
import qualified Data.Aeson as Aeson
import qualified Data.Foldable as Foldable
import qualified Data.List
import qualified Data.Map
import qualified Data.Ord
import qualified Data.Text
import qualified Data.Vector as Vector
import qualified Dhall.Core as Core
import qualified Dhall.Import
import qualified Dhall.JSON.Compat as JSON.Compat
import qualified Dhall.Map
import qualified Dhall.Optics
import qualified Dhall.Parser
import qualified Dhall.Pretty
import qualified Dhall.TypeCheck
import qualified Dhall.Util
import qualified Lens.Family as Lens
import qualified Options.Applicative
import qualified Prettyprinter.Render.Text as Pretty
import qualified System.FilePath
{-| This is the exception type for errors that might arise when translating
Dhall to JSON
Because the majority of Dhall language features do not translate to JSON
this just returns the expression that failed
-}
data CompileError
= Unsupported (Expr Void Void)
| SpecialDouble Double
| BareNone
| InvalidInlineContents (Expr Void Void) (Expr Void Void)
instance Show CompileError where
show BareNone =
Data.Text.unpack $
_ERROR <> ": ❰None❱ is not valid on its own \n\
\ \n\
\Explanation: The conversion to JSON/YAML does not accept ❰None❱ in isolation as \n\
\a valid way to represent ❰null❱. In Dhall, ❰None❱ is a function whose input is \n\
\a type and whose output is an ❰Optional❱ of that type. \n\
\ \n\
\For example: \n\
\ \n\
\ \n\
\ ┌─────────────────────────────────┐ ❰None❱ is a function whose result is \n\
\ │ None : ∀(a : Type) → Optional a │ an ❰Optional❱ value, but the function \n\
\ └─────────────────────────────────┘ itself is not a valid ❰Optional❱ value \n\
\ \n\
\ \n\
\ ┌─────────────────────────────────┐ ❰None Natural❱ is a valid ❰Optional❱ \n\
\ │ None Natural : Optional Natural │ value (an absent ❰Natural❱ number in \n\
\ └─────────────────────────────────┘ this case) \n\
\ \n\
\ \n\
\ \n\
\The conversion to JSON/YAML only translates the fully applied form to ❰null❱. "
show (SpecialDouble n) =
Data.Text.unpack $
_ERROR <> ": " <> special <> " disallowed in JSON \n\
\ \n\
\Explanation: The JSON standard does not define a canonical way to encode \n\
\❰NaN❱/❰Infinity❱/❰-Infinity❱. You can fix this error by either: \n\
\ \n\
\● Using ❰dhall-to-yaml❱ instead of ❰dhall-to-json❱, since YAML does support \n\
\ ❰NaN❱/❰Infinity❱/❰-Infinity❱ \n\
\ \n\
\● Enabling the ❰--approximate-special-doubles❱ flag which will encode ❰NaN❱ as \n\
\ ❰null❱, ❰Infinity❱ as the maximum ❰Double❱, and ❰-Infinity❱ as the minimum \n\
\❰Double❱ \n\
\ \n\
\● See if there is a way to remove ❰NaN❱/❰Infinity❱/❰-Infinity❱ from the \n\
\ expression that you are converting to JSON "
where
special = Data.Text.pack (show n)
show (Unsupported e) =
Data.Text.unpack $
_ERROR <> ": Cannot translate to JSON \n\
\ \n\
\Explanation: Only primitive values, records, unions, ❰List❱s, and ❰Optional❱ \n\
\values can be translated from Dhall to JSON \n\
\ \n\
\The following Dhall expression could not be translated to JSON: \n\
\ \n\
\" <> insert e
show (InvalidInlineContents record alternativeContents) =
Data.Text.unpack $
_ERROR <> ": Union value is not compatible with ❰Inline❱ nesting. \n\
\ \n\
\Explanation: You can use the ❰Inline❱ nesting to compactly encode a union while \n\
\preserving the name of the alternative. However the alternative must either be \n\
\empty or contain a record value. \n\
\ \n\
\For example: \n\
\ \n\
\ \n\
\ ┌─────────────────────────────────────────────────┐ \n\
\ │ let Example = < Empty | Record : { x : Bool } > │ \n\
\ │ │ \n\
\ │ let Nesting = < Inline | Nested : Text > │ \n\
\ │ │ \n\
\ │ in { field = \"name\" │ \n\
\ │ , nesting = Nesting.Inline │ \n\
\ │ , contents = Example.Empty │ An empty alternative \n\
\ │ } │ is ok. \n\
\ └─────────────────────────────────────────────────┘ \n\
\ \n\
\ \n\
\... is converted to this JSON: \n\
\ \n\
\ \n\
\ ┌─────────────────────┐ \n\
\ │ { \"name\": \"Empty\" } │ \n\
\ └─────────────────────┘ \n\
\ \n\
\ \n\
\ ┌──────────────────────────────────────────────┐ \n\
\ │ ... │ \n\
\ │ │ \n\
\ │ in { field = \"name\" │ \n\
\ │ , nesting = Nesting.Inline │ \n\
\ │ , contents = Example.Record { x = True } │ An alternative containing \n\
\ │ } │ a record value is ok. \n\
\ └──────────────────────────────────────────────┘ \n\
\ \n\
\ \n\
\... is converted to this JSON: \n\
\ \n\
\ \n\
\ ┌─────────────────────────────────┐ \n\
\ │ { \"name\": \"Record\", \"x\": true } │ \n\
\ └─────────────────────────────────┘ \n\
\ \n\
\ \n\
\This isn't valid: \n\
\ \n\
\ \n\
\ ┌──────────────────────────────────────────┐ \n\
\ │ let Example = < Foo : Bool > │ \n\
\ │ │ \n\
\ │ let Nesting = < Inline | Nested : Text > │ \n\
\ │ │ \n\
\ │ in { field = \"name\" │ \n\
\ │ , nesting = Nesting.Inline │ \n\
\ │ , contents = Example.Foo True │ ❰True❱ is not a record \n\
\ │ } │ \n\
\ └──────────────────────────────────────────┘ \n\
\ \n\
\ \n\
\The following Dhall expression could not be translated to JSON: \n\
\ \n\
\" <> insert record <> " \n\
\ \n\
\... because \n\
\ \n\
\" <> insert alternativeContents <> " \n\
\ \n\
\... is not a record."
_ERROR :: Data.Text.Text
_ERROR = Dhall.Util._ERROR
insert :: Pretty a => a -> Text
insert = Pretty.renderStrict . Dhall.Pretty.layout . Dhall.Util.insert
instance Exception CompileError
{-| Convert a Dhall expression to the equivalent JSON expression
>>> :set -XOverloadedStrings
>>> :set -XOverloadedLists
>>> import Core
>>> dhallToJSON (RecordLit [("foo", IntegerLit 1), ("bar", TextLit "ABC")])
Right (Object (fromList [("foo",Number 1.0),("bar",String "ABC")]))
>>> fmap Aeson.encode it
Right "{\"foo\":1,\"bar\":\"ABC\"}"
-}
dhallToJSON
:: Expr s Void
-> Either CompileError Value
dhallToJSON e0 = loop (Core.alphaNormalize (Core.normalize e0))
where
loop e = case e of
Core.BoolLit a -> return (toJSON a)
Core.NaturalLit a -> return (toJSON a)
Core.IntegerLit a -> return (toJSON a)
Core.DoubleLit (DhallDouble a) -> return (toJSON a)
Core.TextLit (Core.Chunks [] a) -> return (toJSON a)
Core.ListLit _ a -> do
a' <- traverse loop a
return (toJSON a')
Core.Some a -> do
a' <- loop a
return (toJSON a')
Core.App Core.None _ -> return Aeson.Null
-- Provide a nicer error message for a common user mistake.
--
-- See: https://github.com/dhall-lang/dhall-lang/issues/492
Core.None -> Left BareNone
_ | Just text <- Dhall.Pretty.temporalToText e ->
loop (Core.TextLit (Core.Chunks [] text))
Core.RecordLit a ->
case toOrderedList a of
[ ( "contents"
, Core.recordFieldValue -> contents
)
, ( "field"
, Core.recordFieldValue -> Core.TextLit
(Core.Chunks [] field)
)
, ( "nesting"
, Core.recordFieldValue -> Core.App
(Core.Field
(Core.Union
[ ("Inline", mInlineType)
, ("Nested", Just Core.Text)
]
)
(FA "Nested")
)
(Core.TextLit
(Core.Chunks [] nestedField)
)
)
] | all (== Core.Record []) mInlineType
, Just (alternativeName, mExpr) <- getContents contents -> do
contents' <- case mExpr of
Just expr -> loop expr
Nothing -> return Aeson.Null
let taggedValue =
Data.Map.fromList
[ ( field
, toJSON alternativeName
)
, ( nestedField
, contents'
)
]
return (Aeson.toJSON taggedValue)
[ ( "contents"
, Core.recordFieldValue -> contents
)
, ( "field"
, Core.recordFieldValue -> Core.TextLit
(Core.Chunks [] field)
)
, ( "nesting"
, Core.recordFieldValue -> nesting
)
] | isInlineNesting nesting
, Just (alternativeName, mExpr) <- getContents contents -> do
kvs0 <- case mExpr of
Just (Core.RecordLit kvs) -> return kvs
Just alternativeContents ->
Left (InvalidInlineContents e alternativeContents)
Nothing -> return mempty
let name = Core.makeRecordField $ Core.TextLit (Core.Chunks [] alternativeName)
let kvs1 = Dhall.Map.insert field name kvs0
loop (Core.RecordLit kvs1)
_ -> do
a' <- traverse (loop . Core.recordFieldValue) a
return (Aeson.toJSON (Dhall.Map.toMap a'))
Core.App (Core.Field (Core.Union _) _) b -> loop b
Core.Field (Core.Union _) (FA k) -> return (Aeson.toJSON k)
Core.Lam _ (Core.functionBindingAnnotation -> Core.Const Core.Type)
(Core.Lam _ (Core.functionBindingAnnotation ->
(Core.Record
[ ("array" , Core.recordFieldValue -> Core.Pi _ _ (Core.App Core.List (V 0)) (V 1))
, ("bool" , Core.recordFieldValue -> Core.Pi _ _ Core.Bool (V 1))
, ("null" , Core.recordFieldValue -> V 0)
, ("number", Core.recordFieldValue -> Core.Pi _ _ Core.Double (V 1))
, ("object", Core.recordFieldValue ->
Core.Pi _ _ (Core.App Core.List (Core.Record
[ ("mapKey", Core.recordFieldValue -> Core.Text)
, ("mapValue", Core.recordFieldValue -> V 0)])) (V 1))
, ("string", Core.recordFieldValue -> Core.Pi _ _ Core.Text (V 1))
]
))
value
) -> do
let outer (Core.Field (V 0) (FA "null")) = return Aeson.Null
outer (Core.App (Core.Field (V 0) (FA "bool")) (Core.BoolLit b)) =
return (Aeson.Bool b)
outer (Core.App (Core.Field (V 0) (FA "array")) (Core.ListLit _ xs)) = do
ys <- traverse outer (Foldable.toList xs)
return (Aeson.Array (Vector.fromList ys))
outer (Core.App (Core.Field (V 0) (FA "object")) (Core.ListLit _ xs)) = do
let inner (Core.RecordLit
[ ("mapKey", Core.recordFieldValue -> Core.TextLit (Core.Chunks [] mapKey))
, ("mapValue", Core.recordFieldValue -> mapExpression)]) = do
mapValue <- outer mapExpression
return (mapKey, mapValue)
inner _ = Left (Unsupported e)
ys <- traverse inner (Foldable.toList xs)
return (Aeson.Object (JSON.Compat.objectFromList ys))
outer (Core.App (Core.Field (V 0) (FA "number")) (Core.DoubleLit (DhallDouble n))) =
return (Aeson.toJSON n)
outer (Core.App (Core.Field (V 0) (FA "string")) (Core.TextLit (Core.Chunks [] text))) =
return (toJSON text)
outer _ = Left (Unsupported e)
outer value
Core.Lam _ (Core.functionBindingAnnotation -> Core.Const Core.Type)
(Core.Lam _ (Core.functionBindingAnnotation ->
(Core.Record
[ ("array" , Core.recordFieldValue -> Core.Pi _ _ (Core.App Core.List (V 0)) (V 1))
, ("bool" , Core.recordFieldValue -> Core.Pi _ _ Core.Bool (V 1))
, ("double", Core.recordFieldValue -> Core.Pi _ _ Core.Double (V 1))
, ("integer", Core.recordFieldValue -> Core.Pi _ _ Core.Integer (V 1))
, ("null" , Core.recordFieldValue -> V 0)
, ("object", Core.recordFieldValue ->
Core.Pi _ _ (Core.App Core.List (Core.Record
[ ("mapKey", Core.recordFieldValue -> Core.Text)
, ("mapValue", Core.recordFieldValue -> V 0)
])) (V 1))
, ("string", Core.recordFieldValue -> Core.Pi _ _ Core.Text (V 1))
]
))
value
) -> do
let outer (Core.Field (V 0) (FA "null")) =
return Aeson.Null
outer (Core.App (Core.Field (V 0) (FA "bool")) (Core.BoolLit b)) =
return (Aeson.Bool b)
outer (Core.App (Core.Field (V 0) (FA "array")) (Core.ListLit _ xs)) = do
ys <- traverse outer (Foldable.toList xs)
return (Aeson.Array (Vector.fromList ys))
outer (Core.App (Core.Field (V 0) (FA "object")) (Core.ListLit _ xs)) = do
let inner (Core.RecordLit
[ ("mapKey", Core.recordFieldValue -> Core.TextLit (Core.Chunks [] mapKey))
, ("mapValue", Core.recordFieldValue -> mapExpression)]) = do
mapValue <- outer mapExpression
return (mapKey, mapValue)
inner _ = Left (Unsupported e)
ys <- traverse inner (Foldable.toList xs)
return (Aeson.Object (JSON.Compat.objectFromList ys))
outer (Core.App (Core.Field (V 0) (FA "double")) (Core.DoubleLit (DhallDouble n))) =
return (Aeson.toJSON n)
outer (Core.App (Core.Field (V 0) (FA "integer")) (Core.IntegerLit n)) =
return (Aeson.toJSON n)
outer (Core.App (Core.Field (V 0) (FA "string")) (Core.TextLit (Core.Chunks [] text))) =
return (toJSON text)
outer _ = Left (Unsupported e)
outer value
_ -> Left (Unsupported e)
getContents :: Expr s Void -> Maybe (Text, Maybe (Expr s Void))
getContents (Core.App
(Core.Field
_
(FA alternativeName)
)
expression
) = Just (alternativeName, Just expression)
getContents (Core.Field _ (FA alternativeName)) = Just (alternativeName, Nothing)
getContents _ = Nothing
isInlineNesting :: Expr s Void -> Bool
isInlineNesting (Core.App
(Core.Field
(Core.Union
[ ("Inline", Just (Core.Record []))
, ("Nested", Just Core.Text)
]
)
(FA "Inline")
)
(Core.RecordLit [])
) = True
isInlineNesting (Core.Field
(Core.Union
[ ("Inline", Nothing)
, ("Nested", Just Core.Text)
]
)
(FA "Inline")
) = True
isInlineNesting _ = False
toOrderedList :: Ord k => Map k v -> [(k, v)]
toOrderedList =
Data.List.sortBy (Data.Ord.comparing fst)
. Dhall.Map.toList
-- | Omit record fields that are @null@
omitNull :: Value -> Value
omitNull (Object object) = Object fields
where
fields = JSON.Compat.filterObject (/= Null) (fmap omitNull object)
omitNull (Array array) =
Array (fmap omitNull array)
omitNull (String string) =
String string
omitNull (Number number) =
Number number
omitNull (Bool bool) =
Bool bool
omitNull Null =
Null
{-| Omit record fields that are @null@, arrays and records whose transitive
fields are all null
-}
omitEmpty :: Value -> Value
omitEmpty (Object object) =
if null fields then Null else Object fields
where
fields = JSON.Compat.filterObject (/= Null) (fmap omitEmpty object)
omitEmpty (Array array) =
if null elems then Null else Array elems
where
elems = Vector.filter (/= Null) (fmap omitEmpty array)
omitEmpty (String string) =
String string
omitEmpty (Number number) =
Number number
omitEmpty (Bool bool) =
Bool bool
omitEmpty Null =
Null
-- | Parser for command-line options related to omitting fields
parseOmission :: Parser (Value -> Value)
parseOmission =
Options.Applicative.flag'
omitEmpty
( Options.Applicative.long "omit-empty"
<> Options.Applicative.help "Omit record fields that are null or empty records"
)
-- | Parser for command-line options related to preserving null fields.
parseNullPreservation :: Parser (Value -> Value)
parseNullPreservation =
Options.Applicative.flag
omitNull
id
( Options.Applicative.long "preserve-null"
<> Options.Applicative.help "Preserve record fields that are null"
)
-- | Combines parsers for command-line options related to preserving & omitting null fields.
parsePreservationAndOmission :: Parser (Value -> Value)
parsePreservationAndOmission = parseOmission <|> parseNullPreservation
{-| Specify whether or not to convert association lists of type
@List { mapKey: Text, mapValue : v }@ to records
-}
data Conversion
= NoConversion
| Conversion { mapKey :: Text, mapValue :: Text }
defaultConversion :: Conversion
defaultConversion = Conversion
{ mapKey = "mapKey"
, mapValue = "mapValue"
}
{-| Convert association lists to homogeneous maps
This converts an association list of the form:
> [ { mapKey = k0, mapValue = v0 }, { mapKey = k1, mapValue = v1 } ]
... to a record of the form:
> { k0 = v0, k1 = v1 }
-}
convertToHomogeneousMaps :: Conversion -> Expr s Void -> Expr s Void
convertToHomogeneousMaps NoConversion e0 = e0
convertToHomogeneousMaps (Conversion {..}) e0 = loop (Core.normalize e0)
where
loop e = case e of
Core.Const a ->
Core.Const a
Core.Var v ->
Core.Var v
{- Minor hack: Don't descend into lambda, since the only thing it can
possibly encode is a Boehm-Berarducci-encoded JSON value. In such a
case we do *not* want to perform this rewrite since it will
interfere with decoding the value.
-}
Core.Lam cs a b ->
Core.Lam cs a b
Core.Pi cs a b c ->
Core.Pi cs a b' c'
where
b' = loop b
c' = loop c
Core.App a b ->
Core.App a' b'
where
a' = loop a
b' = loop b
Core.Let (Binding src0 a src1 b src2 c) d ->
Core.Let (Binding src0 a src1 b' src2 c') d'
where
b' = fmap (fmap loop) b
c' = loop c
d' = loop d
Core.Annot a b ->
Core.Annot a' b'
where
a' = loop a
b' = loop b
Core.Bool ->
Core.Bool
Core.BoolLit a ->
Core.BoolLit a
Core.BoolAnd a b ->
Core.BoolAnd a' b'
where
a' = loop a
b' = loop b
Core.BoolOr a b ->
Core.BoolOr a' b'
where
a' = loop a
b' = loop b
Core.BoolEQ a b ->
Core.BoolEQ a' b'
where
a' = loop a
b' = loop b
Core.BoolNE a b ->
Core.BoolNE a' b'
where
a' = loop a
b' = loop b
Core.BoolIf a b c ->
Core.BoolIf a' b' c'
where
a' = loop a
b' = loop b
c' = loop c
Core.Natural ->
Core.Natural
Core.NaturalLit a ->
Core.NaturalLit a
Core.NaturalFold ->
Core.NaturalFold
Core.NaturalBuild ->
Core.NaturalBuild
Core.NaturalIsZero ->
Core.NaturalIsZero
Core.NaturalEven ->
Core.NaturalEven
Core.NaturalOdd ->
Core.NaturalOdd
Core.NaturalToInteger ->
Core.NaturalToInteger
Core.NaturalShow ->
Core.NaturalShow
Core.NaturalSubtract ->
Core.NaturalSubtract
Core.NaturalPlus a b ->
Core.NaturalPlus a' b'
where
a' = loop a
b' = loop b
Core.NaturalTimes a b ->
Core.NaturalTimes a' b'
where
a' = loop a
b' = loop b
Core.Integer ->
Core.Integer
Core.IntegerLit a ->
Core.IntegerLit a
Core.IntegerClamp ->
Core.IntegerClamp
Core.IntegerNegate ->
Core.IntegerNegate
Core.IntegerShow ->
Core.IntegerShow
Core.IntegerToDouble ->
Core.IntegerToDouble
Core.Double ->
Core.Double
Core.DoubleLit a ->
Core.DoubleLit a
Core.DoubleShow ->
Core.DoubleShow
Core.Text ->
Core.Text
Core.TextLit (Core.Chunks a b) ->
Core.TextLit (Core.Chunks a' b)
where
a' = fmap (fmap loop) a
Core.TextAppend a b ->
Core.TextAppend a' b'
where
a' = loop a
b' = loop b
Core.TextReplace ->
Core.TextReplace
Core.TextShow ->
Core.TextShow
Core.Date ->
Core.Date
Core.DateLiteral d ->
Core.DateLiteral d
Core.Time ->
Core.Time
Core.TimeLiteral t p ->
Core.TimeLiteral t p
Core.TimeZone ->
Core.TimeZone
Core.TimeZoneLiteral z ->
Core.TimeZoneLiteral z
Core.List ->
Core.List
Core.ListLit a b ->
case transform of
Just c -> loop c
Nothing -> Core.ListLit a' b'
where
elements = Foldable.toList b
toKeyValue :: Expr s Void -> Maybe (Text, Expr s Void)
toKeyValue (Core.RecordLit m) = do
guard (Foldable.length m == 2)
key <- Core.recordFieldValue <$> Dhall.Map.lookup mapKey m
value <- Core.recordFieldValue <$> Dhall.Map.lookup mapValue m
keyText <- case key of
Core.TextLit (Core.Chunks [] keyText) ->
return keyText
Core.Field (Core.Union _) (FA keyText) ->
return keyText
_ ->
empty
return (keyText, value)
toKeyValue _ =
empty
transform =
case elements of
[] ->
case a of
Just (Core.App Core.List (Core.Record m)) -> do
guard (Foldable.length m == 2)
guard (Dhall.Map.member mapKey m)
guard (Dhall.Map.member mapValue m)
return (Core.RecordLit mempty)
_ -> empty
_ -> do
keyValues <- traverse toKeyValue elements
let recordLiteral = Core.makeRecordField <$>
Dhall.Map.fromList keyValues
return (Core.RecordLit recordLiteral)
a' = fmap loop a
b' = fmap loop b
Core.ListAppend a b ->
Core.ListAppend a' b'
where
a' = loop a
b' = loop b
Core.ListBuild ->
Core.ListBuild
Core.ListFold ->
Core.ListFold
Core.ListLength ->
Core.ListLength
Core.ListHead ->
Core.ListHead
Core.ListLast ->
Core.ListLast
Core.ListIndexed ->
Core.ListIndexed
Core.ListReverse ->
Core.ListReverse
Core.Optional ->
Core.Optional
Core.Some a ->
Core.Some a'
where
a' = loop a
Core.None ->
Core.None
Core.Record a ->
Core.Record a'
where
a' = Lens.over Core.recordFieldExprs loop <$> a
Core.RecordLit a ->
Core.RecordLit a'
where
a' = Lens.over Core.recordFieldExprs loop <$> a
Core.Union a ->
Core.Union a'
where
a' = fmap (fmap loop) a
Core.Combine cs a b c ->
Core.Combine cs a b' c'
where
b' = loop b
c' = loop c
Core.CombineTypes cs a b ->
Core.CombineTypes cs a' b'
where
a' = loop a
b' = loop b
Core.Prefer cs a b c ->
Core.Prefer cs a b' c'
where
b' = loop b
c' = loop c
Core.RecordCompletion a b ->
Core.RecordCompletion a' b'
where
a' = loop a
b' = loop b
Core.Merge a b c ->
Core.Merge a' b' c'
where
a' = loop a
b' = loop b
c' = fmap loop c
Core.ToMap a b ->
Core.ToMap a' b'
where
a' = loop a
b' = fmap loop b
Core.ShowConstructor a ->
Core.ShowConstructor a'
where
a' = loop a
Core.Field a b ->
Core.Field a' b
where
a' = loop a
Core.Project a b ->
Core.Project a' b
where
a' = loop a
Core.Assert a ->
Core.Assert a'
where
a' = loop a
Core.Equivalent cs a b ->
Core.Equivalent cs a' b'
where
a' = loop a
b' = loop b
Core.With a b c ->
Core.With a' b c'
where
a' = loop a
c' = loop c
Core.ImportAlt a b ->
Core.ImportAlt a' b'
where
a' = loop a
b' = loop b
Core.Note a b ->
Core.Note a b'
where
b' = loop b
Core.Embed a ->
Core.Embed a
-- | Parser for command-line options related to homogeneous map support
parseConversion :: Parser Conversion
parseConversion =
conversion
<|> noConversion
where
conversion = Conversion <$> parseKeyField <*> parseValueField
where
parseKeyField =
Options.Applicative.strOption
( Options.Applicative.long "key"
<> Options.Applicative.help "Reserved key field name for association lists"
<> Options.Applicative.value "mapKey"
<> Options.Applicative.showDefaultWith Data.Text.unpack
)
parseValueField =
Options.Applicative.strOption
( Options.Applicative.long "value"
<> Options.Applicative.help "Reserved value field name for association lists"
<> Options.Applicative.value "mapValue"
<> Options.Applicative.showDefaultWith Data.Text.unpack
)
noConversion =
Options.Applicative.flag'
NoConversion
( Options.Applicative.long "no-maps"
<> Options.Applicative.help "Disable conversion of association lists to homogeneous maps"
)
-- | This option specifies how to encode @NaN@\/@Infinity@\/@-Infinity@
data SpecialDoubleMode
= UseYAMLEncoding
-- ^ YAML natively supports @NaN@\/@Infinity@\/@-Infinity@
| ForbidWithinJSON
-- ^ Forbid @NaN@\/@Infinity@\/@-Infinity@ because JSON doesn't support them
| ApproximateWithinJSON
-- ^ Encode @NaN@\/@Infinity@\/@-Infinity@ as
-- @null@\/@1.7976931348623157e308@\/@-1.7976931348623157e308@,
-- respectively
{-| Pre-process an expression containing @NaN@\/@Infinity@\/@-Infinity@,
handling them as specified according to the `SpecialDoubleMode`
-}
handleSpecialDoubles
:: SpecialDoubleMode -> Expr s Void -> Either CompileError (Expr s Void)
handleSpecialDoubles specialDoubleMode =
Dhall.Optics.rewriteMOf Core.subExpressions rewrite
where
rewrite =
case specialDoubleMode of
UseYAMLEncoding -> useYAMLEncoding
ForbidWithinJSON -> forbidWithinJSON
ApproximateWithinJSON -> approximateWithinJSON
useYAMLEncoding (Core.DoubleLit (DhallDouble n))
| isInfinite n && 0 < n =
return (Just (Core.TextLit (Core.Chunks [] "inf")))
| isInfinite n && n < 0 =
return (Just (Core.TextLit (Core.Chunks [] "-inf")))
| isNaN n =
return (Just (Core.TextLit (Core.Chunks [] "nan")))
useYAMLEncoding _ =
return Nothing
forbidWithinJSON (Core.DoubleLit (DhallDouble n))
| isInfinite n || isNaN n =
Left (SpecialDouble n)
forbidWithinJSON _ =
return Nothing
approximateWithinJSON (Core.DoubleLit (DhallDouble n))
| isInfinite n && n > 0 =
return (Just (Core.DoubleLit (DhallDouble 1.7976931348623157e308)))
| isInfinite n && n < 0 =
return (Just (Core.DoubleLit (DhallDouble (-1.7976931348623157e308))))
-- Do nothing for @NaN@, which already encodes to @null@
approximateWithinJSON _ =
return Nothing
{-| Convert a piece of Text carrying a Dhall inscription to an equivalent JSON Value
>>> :set -XOverloadedStrings
>>> import Core
>>> Dhall.JSON.codeToValue defaultConversion ForbidWithinJSON Nothing "{ a = 1 }"
>>> Object (fromList [("a",Number 1.0)])
-}
codeToValue
:: Conversion
-> SpecialDoubleMode
-> Maybe FilePath -- ^ The source file path. If no path is given, imports
-- are resolved relative to the current directory.
-> Text -- ^ Input text.
-> IO Value
codeToValue conversion specialDoubleMode mFilePath code = do
parsedExpression <- Core.throws (Dhall.Parser.exprFromText (fromMaybe "(input)" mFilePath) code)
let rootDirectory = case mFilePath of
Nothing -> "."
Just fp -> System.FilePath.takeDirectory fp
resolvedExpression <- Dhall.Import.loadRelativeTo rootDirectory UseSemanticCache parsedExpression
_ <- Core.throws (Dhall.TypeCheck.typeOf resolvedExpression)
let convertedExpression =
convertToHomogeneousMaps conversion resolvedExpression
specialDoubleExpression <- Core.throws (handleSpecialDoubles specialDoubleMode convertedExpression)
case dhallToJSON specialDoubleExpression of
Left err -> Control.Exception.throwIO err
Right json -> return json
|
Gabriel439/Haskell-Dhall-Library
|
dhall-json/src/Dhall/JSON.hs
|
bsd-3-clause
| 46,247
| 0
| 29
| 20,747
| 7,438
| 3,738
| 3,700
| 630
| 83
|
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE OverloadedStrings #-}
module Main where
-- This example demonstrates how you can split your update function
-- into separate update functions for parts of your model and then
-- combine them into a single update function operating on the whole
-- model which combines their effects.
import Control.Monad
import Data.Monoid
import Miso
import Miso.String
-- In this slightly contrived example, our model consists of two
-- counters. When one of those counters is incremented, the other is
-- decremented and the other way around.
type Model = (Int, Int)
data Action
= Increment
| Decrement
| NoOp
deriving (Show, Eq)
-- We are going to use 'Lens'es in this example. Since @miso@ does not
-- depend on a lens library we are going to define a couple of
-- utilities ourselves. We recommend that in your own applications,
-- you depend on a lens library such as @lens@ or @microlens@ to get
-- these definitions.
type Lens s t a b = forall f. Functor f => (a -> f b) -> s -> f t
-- | You can find this under the same name in @lens@ and
-- @microlens@. @lens@ also provides the infix operator '%%~' as a
-- synonym for 'traverseOf'.
--
-- In this example we are only going to use this when applied to
-- 'Lens' m a' and using 'Effect Action' for the @f@ type variable. In
-- that case the specialized type signature is:
--
-- @traverseOf :: Functor f => Lens' m a -> (a -> Effect Action a) -> s -> Effect action s
traverseOf :: Functor f => Lens s t a b -> (a -> f b) -> s -> f t
traverseOf = id
-- | A lens into the first element of a tuple. Both @lens@ and
-- @microlens@ provide this under the same name.
_1 :: Lens (a,c) (b,c) a b
_1 f (a,c) = (,c) <$> f a
-- | A lens into the second element of a tuple. Both @lens@ and
-- @microlens@ provide this under the same name.
_2 :: Lens (c,a) (c,b) a b
_2 f (c,a) = (c,) <$> f a
-- | Update function for the first counter in our 'Model'.
updateFirstCounter :: Action -> Int -> Effect Action Int
updateFirstCounter Increment m = noEff (m + 1)
updateFirstCounter Decrement m = noEff (m - 1)
updateFirstCounter NoOp m = noEff m
-- | Update function for the second counter in our 'Model'. As we’ve
-- mentioned before, this counter is decremented when the first
-- counter is incremented and the other way around.
updateSecondCounter :: Action -> Int -> Effect Action Int
updateSecondCounter Increment m = noEff (m - 1)
updateSecondCounter Decrement m = noEff (m + 1)
updateSecondCounter NoOp m = noEff m
-- | This is the combined update function for both counters.
updateModel :: Action -> Model -> Effect Action Model
updateModel act =
let -- We use 'traverseOf' to lift an update function for one
-- counter to an update function that operates on both
-- counters. The lifted function leaves the other counter
-- untouched.
liftedUpdateFirst :: Model -> Effect Action Model
liftedUpdateFirst = traverseOf _1 (updateFirstCounter act)
liftedUpdateSecond :: Model -> Effect Action Model
liftedUpdateSecond = traverseOf _2 (updateSecondCounter act)
in -- Since 'Effect Action' is an instance of 'Monad', we can just
-- use '<=<' to compose these lifted update functions. It might
-- be helpful to look at the type signature of '<=<' specialized
-- for 'Effect Action':
--
-- @(<=<) :: (b -> Effect Action c) -> (a -> Effect Action b) -> a -> Effect Action c
liftedUpdateFirst <=< liftedUpdateSecond
main :: IO ()
main = startApp App { initialAction = NoOp, ..}
where
model = (0, 0)
update = updateModel
view = viewModel
events = defaultEvents
subs = []
mountPoint = Nothing
logLevel = Off
viewModel :: Model -> View Action
viewModel (x, y) =
div_
[]
[ button_ [onClick Increment] [text "+"]
, text (ms x <> " | " <> ms y)
, button_ [onClick Decrement] [text "-"]
]
|
dmjio/miso
|
examples/compose-update/Main.hs
|
bsd-3-clause
| 3,966
| 0
| 11
| 853
| 710
| 397
| 313
| 54
| 1
|
module Main where
import Data.Maybe
import Language.OpenCc
import Test.QuickCheck
main = do
handle <- loadOpenCcConf "zhtw2zhcn_s.ini"
simp <- fmap fromJust $ convertFromUTF8 handle "紐約時報"
putStrLn simp
freeOpenCcConf handle
|
MnO2/hopencc
|
tests/Test.hs
|
bsd-3-clause
| 251
| 0
| 9
| 46
| 65
| 31
| 34
| 9
| 1
|
module Domain.Role where
import Import
import Domain.Types
createRole :: Role -> AppM(Role)
createRole role = undefined
getRole :: RoleID -> AppM(Role)
getRole rid = undefined
|
vacationlabs/haskell-webapps
|
skeleton/src/Domain/Role.hs
|
mit
| 179
| 0
| 7
| 28
| 60
| 33
| 27
| 7
| 1
|
/*Owner & Copyrights: Vance King Saxbe. A.*//* Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @vsaxbe@yahoo.com. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager.*/{-# Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting, GoldSax Money, GoldSax Treasury, GoldSax Finance, GoldSax Banking and GoldSax Technologies email @vsaxbe@yahoo.com. Development teams from Power Dominion Enterprise, Precieux Consulting. This Engagement sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager. LANGUAGE DeriveGeneric #-}
module GoldSaxMachineModule8.APriori.Types where
import Data.Set (Set)
import qualified Data.Set as S
import Control.DeepSeq
import GHC.Generics
data Client = GovOrg { clientName :: String }
| Company { clientName :: String, person :: Person, duty :: String }
| Individual { person :: Person }
deriving (Show, Eq, Ord, Generic)
data ClientKind = KindGovOrg | KindCompany | KindIndividual
deriving (Show, Eq, Ord, Generic)
data Person = Person { firstName :: String, lastName :: String, gender :: Gender }
deriving (Show, Eq, Ord, Generic)
data Gender = Male | Female | UnknownGender
deriving (Show, Eq, Ord, Generic)
data Product = Product { productId :: Integer, productType :: ProductType }
deriving (Show, Eq, Ord, Generic)
data ProductType = TimeMachine | TravelGuide | Tool | Trip
deriving (Show, Eq, Ord, Generic)
data Purchase = Purchase { client :: Client, products :: [Product] }
deriving (Show, Eq, Ord, Generic)
data PurchaseInfo = InfoClientKind ClientKind
| InfoClientDuty String
| InfoClientGender Gender
| InfoPurchasedProduct Integer
| InfoPurchasedProductType ProductType
deriving (Show, Eq, Ord, Generic)
instance NFData Client
instance NFData ClientKind
instance NFData Person
instance NFData Gender
instance NFData Product
instance NFData ProductType
instance NFData Purchase
instance NFData PurchaseInfo
clientToPurchaseInfo :: Client -> Set PurchaseInfo
clientToPurchaseInfo GovOrg { } =
S.singleton $ InfoClientKind KindGovOrg
clientToPurchaseInfo Company { duty = d } =
S.fromList [ InfoClientKind KindCompany, InfoClientDuty d ]
clientToPurchaseInfo Individual { person = Person { gender = UnknownGender } } =
S.singleton $ InfoClientKind KindIndividual
clientToPurchaseInfo Individual { person = Person { gender = g } } =
S.fromList [ InfoClientKind KindIndividual, InfoClientGender g ]
productsToPurchaseInfo :: [Product] -> Set PurchaseInfo
productsToPurchaseInfo = foldr
(\(Product i t) pinfos -> S.insert (InfoPurchasedProduct i) $
S.insert (InfoPurchasedProductType t) pinfos)
S.empty
purchaseToTransaction :: Purchase -> Transaction
purchaseToTransaction (Purchase c p) =
Transaction $ clientToPurchaseInfo c `S.union` productsToPurchaseInfo p
newtype Transaction = Transaction (Set PurchaseInfo) deriving (Eq, Ord, Generic)
newtype FrequentSet = FrequentSet (Set PurchaseInfo) deriving (Eq, Ord, Generic)
data AssocRule = AssocRule (Set PurchaseInfo) (Set PurchaseInfo) deriving (Eq, Ord, Generic)
instance NFData Transaction
instance NFData FrequentSet
instance NFData AssocRule
instance Show AssocRule where
show (AssocRule a b) = show a ++ " => " ++ show b
setSupport :: [Transaction] -> FrequentSet -> Double
setSupport transactions (FrequentSet sElts) =
let total = length transactions
supp = length (filter (\(Transaction tElts) -> sElts `S.isSubsetOf` tElts) transactions)
in fromIntegral supp / fromIntegral total
ruleConfidence :: [Transaction] -> AssocRule -> Double
ruleConfidence transactions (AssocRule a b) =
setSupport transactions (FrequentSet $ a `S.union` b) / setSupport transactions (FrequentSet a)
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/
|
VanceKingSaxbeA/GoldSaxMachineStore
|
GoldSaxMachineModule8/src/Chapter8/APriori/Types.hs
|
mit
| 4,573
| 23
| 16
| 970
| 1,259
| 663
| 596
| 68
| 1
|
module Graphics.VinylGL (module Graphics.VinylGL.Uniforms,
module Graphics.VinylGL.Vertex)
where
import Graphics.VinylGL.Uniforms
import Graphics.VinylGL.Vertex
|
spetz911/progames
|
vinyl-gl-master/src/Graphics/VinylGL.hs
|
mit
| 189
| 0
| 5
| 39
| 34
| 23
| 11
| 4
| 0
|
module Pos.Chain.Block.IsHeader
( IsHeader
, IsGenesisHeader
, IsMainHeader (..)
) where
import Control.Lens (Lens')
import Pos.Chain.Block.HasPrevBlock (HasPrevBlock (..))
import Pos.Chain.Block.Header (BlockHeader, GenesisBlockHeader,
HasHeaderHash (..), MainBlockHeader, mainHeaderLeaderKey,
mainHeaderSlot)
import Pos.Chain.Update.BlockVersion (HasBlockVersion (..))
import Pos.Chain.Update.SoftwareVersion (HasSoftwareVersion (..))
import Pos.Core.Common (HasDifficulty (..))
import Pos.Core.Slotting (HasEpochIndex (..), HasEpochOrSlot (..),
SlotId (..))
import Pos.Crypto (PublicKey)
import Pos.Util.Some (Some, applySome, liftLensSome)
----------------------------------------------------------------------------
-- IsHeader
----------------------------------------------------------------------------
{- | A class that lets subpackages use some fields from headers without
depending on cardano-sl:
* 'difficultyL'
* 'epochIndexL'
* 'epochOrSlotG'
* 'prevBlockL'
* 'headerHashG'
-}
class ( HasDifficulty header
, HasEpochIndex header
, HasEpochOrSlot header
, HasPrevBlock header
, HasHeaderHash header) =>
IsHeader header
instance HasDifficulty (Some IsHeader) where
difficultyL = liftLensSome difficultyL
instance HasEpochIndex (Some IsHeader) where
epochIndexL = liftLensSome epochIndexL
instance HasEpochOrSlot (Some IsHeader) where
getEpochOrSlot = applySome getEpochOrSlot
instance HasPrevBlock (Some IsHeader) where
prevBlockL = liftLensSome prevBlockL
instance HasHeaderHash (Some IsHeader) where
headerHash = applySome headerHash
instance IsHeader (Some IsHeader)
instance IsHeader BlockHeader
instance IsHeader MainBlockHeader
instance IsHeader GenesisBlockHeader
----------------------------------------------------------------------------
-- IsGenesisHeader
----------------------------------------------------------------------------
-- | A class for genesis headers.
class IsHeader header => IsGenesisHeader header
instance HasDifficulty (Some IsGenesisHeader) where
difficultyL = liftLensSome difficultyL
instance HasEpochIndex (Some IsGenesisHeader) where
epochIndexL = liftLensSome epochIndexL
instance HasEpochOrSlot (Some IsGenesisHeader) where
getEpochOrSlot = applySome getEpochOrSlot
instance HasPrevBlock (Some IsGenesisHeader) where
prevBlockL = liftLensSome prevBlockL
instance HasHeaderHash (Some IsGenesisHeader) where
headerHash = applySome headerHash
instance IsHeader (Some IsGenesisHeader)
instance IsGenesisHeader (Some IsGenesisHeader)
instance IsGenesisHeader GenesisBlockHeader
----------------------------------------------------------------------------
-- IsMainHeader
----------------------------------------------------------------------------
{- | A class for main headers. In addition to 'IsHeader', provides:
* 'headerSlotL'
* 'headerLeaderKeyL'
* 'blockVersionL'
* 'softwareVersionL'
-}
class (IsHeader header
,HasBlockVersion header
,HasSoftwareVersion header) =>
IsMainHeader header
where
-- | Id of the slot for which this block was generated.
headerSlotL :: Lens' header SlotId
-- | Public key of slot leader.
headerLeaderKeyL :: Lens' header PublicKey
instance HasDifficulty (Some IsMainHeader) where
difficultyL = liftLensSome difficultyL
instance HasEpochIndex (Some IsMainHeader) where
epochIndexL = liftLensSome epochIndexL
instance HasEpochOrSlot (Some IsMainHeader) where
getEpochOrSlot = applySome getEpochOrSlot
instance HasPrevBlock (Some IsMainHeader) where
prevBlockL = liftLensSome prevBlockL
instance HasHeaderHash (Some IsMainHeader) where
headerHash = applySome headerHash
instance HasBlockVersion (Some IsMainHeader) where
blockVersionL = liftLensSome blockVersionL
instance HasSoftwareVersion (Some IsMainHeader) where
softwareVersionL = liftLensSome softwareVersionL
instance IsHeader (Some IsMainHeader)
instance IsMainHeader (Some IsMainHeader) where
headerSlotL = liftLensSome headerSlotL
headerLeaderKeyL = liftLensSome headerLeaderKeyL
instance IsMainHeader MainBlockHeader where
headerSlotL = mainHeaderSlot
headerLeaderKeyL = mainHeaderLeaderKey
|
input-output-hk/pos-haskell-prototype
|
chain/src/Pos/Chain/Block/IsHeader.hs
|
mit
| 4,424
| 0
| 7
| 788
| 803
| 436
| 367
| -1
| -1
|
--
--
--
-----------------
-- Exercise 6.37.
-----------------
--
--
--
module E'6'37 where
import B'C'6 ( Image )
import E'6'36 ( superimposeImage )
import Pictures
(
height
, width
)
-- "How would you use Image superimposition to give analogues of above and beside for Images?"
--
-- above: First image above second image, at the second coordinates.
-- beside: First image beside second image, at the second coordinates.
imageAbove, imageBeside :: Image -> Image -> Image
imageAbove (top , _) ( bottom , (bottomX , bottomY) )
= superimposeImage ( top , ( bottomX , bottomY + ( toInteger (height top) ) ) )
( bottom , ( bottomX , bottomY ) )
imageBeside (left , _) ( right , (rightX , rightY) )
= superimposeImage ( left , ( rightX - ( toInteger (width left) ) , rightY ) )
( right , ( rightX , rightY ) )
-- ... other ways of using different Image superimpositions,
-- coordinate transformations and picture positions exist.
|
pascal-knodel/haskell-craft
|
_/links/E'6'37.hs
|
mit
| 1,086
| 0
| 12
| 327
| 207
| 130
| 77
| 14
| 1
|
{-# LANGUAGE GADTs, EmptyDataDecls, FlexibleInstances, FunctionalDependencies, MultiParamTypeClasses, RankNTypes, StandaloneDeriving, TypeSynonymInstances,UndecidableInstances #-}
module Data.Time.Recurrence.ScheduleDetails
(
-- * ScheduleDetails
ScheduleDetails
, eval
-- * Functional interface to constructors
, enum
, filter
, select
-- * Period Filters
, PeriodFilter (..)
, EnumerablePeriodFilter (..)
, FilterablePeriodFilter (..)
, SelectablePeriodFilter (..)
)
where
import Prelude hiding (filter)
import Control.Monad ((>=>))
import Data.Time.Calendar.Month
import Data.Time.Calendar.WeekDay
import Data.Time.CalendarTime
import Data.Time.Moment hiding (Period(..))
import Data.Time.Recurrence.AndThen
data ScheduleDetails a where
Enumerate :: EnumerablePeriodFilter -> ScheduleDetails EnumerablePeriodFilter
Filter :: FilterablePeriodFilter -> ScheduleDetails FilterablePeriodFilter
Select :: SelectablePeriodFilter -> ScheduleDetails SelectablePeriodFilter
EPFCons :: ScheduleDetails EnumerablePeriodFilter -> ScheduleDetails EnumerablePeriodFilter -> ScheduleDetails EnumerablePeriodFilter
FPFCons :: ScheduleDetails FilterablePeriodFilter -> ScheduleDetails FilterablePeriodFilter -> ScheduleDetails FilterablePeriodFilter
SPFCons :: ScheduleDetails SelectablePeriodFilter -> ScheduleDetails SelectablePeriodFilter -> ScheduleDetails SelectablePeriodFilter
EPFConsFPF :: ScheduleDetails EnumerablePeriodFilter -> ScheduleDetails FilterablePeriodFilter -> ScheduleDetails FilterablePeriodFilter
FPFConsSPF :: ScheduleDetails FilterablePeriodFilter -> ScheduleDetails SelectablePeriodFilter -> ScheduleDetails SelectablePeriodFilter
EPFConsSPF :: ScheduleDetails EnumerablePeriodFilter -> ScheduleDetails SelectablePeriodFilter -> ScheduleDetails SelectablePeriodFilter
deriving instance Show (ScheduleDetails a)
enum :: PeriodFilter Month WeekDay NotEnumerable -> ScheduleDetails EnumerablePeriodFilter
enum = Enumerate . EPF
filter :: PeriodFilter Month NotFilterable WeekDay -> ScheduleDetails FilterablePeriodFilter
filter = Filter . FPF
select :: PeriodFilter Int Int Int -> ScheduleDetails SelectablePeriodFilter
select = Select . SPF
type BareEPF = EnumerablePeriodFilter
type WrapEPF = ScheduleDetails EnumerablePeriodFilter
instance AndThen BareEPF BareEPF WrapEPF where
(>==>) x y = (Enumerate x) `EPFCons` (Enumerate y)
instance AndThen BareEPF WrapEPF WrapEPF where
(>==>) x y = (Enumerate x) `EPFCons` y
instance AndThen WrapEPF WrapEPF WrapEPF where
(>==>) x y = x `EPFCons` y
type BareFPF = FilterablePeriodFilter
type WrapFPF = ScheduleDetails FilterablePeriodFilter
instance AndThen BareFPF BareFPF WrapFPF where
(>==>) x y = (Filter x) `FPFCons` (Filter y)
instance AndThen BareFPF WrapFPF WrapFPF where
(>==>) x y = (Filter x) `FPFCons` y
instance AndThen WrapFPF WrapFPF WrapFPF where
(>==>) x y = x `FPFCons` y
type BareSPF = SelectablePeriodFilter
type WrapSPF = ScheduleDetails SelectablePeriodFilter
instance AndThen BareSPF BareSPF WrapSPF where
(>==>) x y = (Select x) `SPFCons` (Select y)
instance AndThen BareSPF WrapSPF WrapSPF where
(>==>) x y = (Select x) `SPFCons` y
instance AndThen WrapSPF WrapSPF WrapSPF where
(>==>) x y = x `SPFCons` y
instance AndThen WrapEPF WrapFPF WrapFPF where
(>==>) x y = x `EPFConsFPF` y
instance AndThen WrapFPF WrapSPF WrapSPF where
(>==>) x y = x `FPFConsSPF` y
instance AndThen WrapEPF WrapSPF WrapSPF where
(>==>) x y = x `EPFConsSPF` y
data PeriodFilter m e f
= Seconds [Int]
| Minutes [Int]
| Hours [Int]
| Days [Int]
| Weeks [Int]
| WeekDays [f]
| WeekDaysInWeek [e]
| WeekDaysInMonth [e]
| Months [m]
| YearDays [Int]
deriving (Read, Show)
data NotEnumerable
data NotFilterable
instance Show NotEnumerable where
show _ = undefined
instance Read NotEnumerable where
readsPrec _ _ = undefined
instance Show NotFilterable where
show _ = undefined
instance Read NotFilterable where
readsPrec _ _ = undefined
newtype EnumerablePeriodFilter = EPF { fromEPF :: PeriodFilter Month WeekDay NotEnumerable } deriving (Read, Show)
newtype FilterablePeriodFilter = FPF { fromFPF :: PeriodFilter Month NotFilterable WeekDay } deriving (Read, Show)
newtype SelectablePeriodFilter = SPF { fromSPF :: PeriodFilter Int Int Int } deriving (Read, Show)
eval :: (CalendarTimeConvertible a, Ord a, Moment a) => ScheduleDetails b -> ([a] -> FutureMoments a)
eval (Enumerate x) = case (fromEPF x) of
(Seconds ss) -> enumSeconds ss
(Minutes mm) -> enumMinutes mm
(Hours hh) -> enumHours hh
(WeekDays _) -> undefined
(WeekDaysInWeek ww) -> enumWeekDaysInWeek ww
(WeekDaysInMonth ww) -> enumWeekDaysInMonth ww
(Days dd) -> enumDays dd
(Weeks wk) -> enumWeeks wk
(Months mm) -> enumMonths mm
(YearDays yy) -> enumYearDays yy
eval (Filter x) = case (fromFPF x) of
(Seconds ss) -> filterSeconds ss
(Minutes mm) -> filterMinutes mm
(Hours hh) -> filterHours hh
(WeekDays ww) -> filterWeekDays ww
(WeekDaysInWeek _) -> undefined
(WeekDaysInMonth _) -> undefined
(Days dd) -> filterDays dd
(Weeks wk) -> filterWeeks wk
(Months mm) -> filterMonths mm
(YearDays yy) -> filterYearDays yy
eval (Select x) = case (fromSPF x) of
(Seconds ss) -> nthSecond ss
(Minutes mm) -> nthMinute mm
(Hours hh) -> nthHour hh
(WeekDays ww) -> nthWeekDay ww
(WeekDaysInWeek ww) -> nthWeekDayOfWeek ww
(WeekDaysInMonth ww) -> nthWeekDayOfMonth ww
(Weeks wk) -> nthWeek wk
(Days dd) -> nthDay dd
(Months mm) -> nthDay mm
(YearDays yy) -> nthYearDay yy
eval (EPFCons x y) = eval x >=> eval y
eval (FPFCons x y) = eval x >=> eval y
eval (SPFCons x y) = eval x >=> eval y
eval (EPFConsFPF x y) = eval x >=> eval y
eval (FPFConsSPF x y) = eval x >=> eval y
eval (EPFConsSPF x y) = eval x >=> eval y
|
hellertime/time-recurrence
|
src/Data/Time/Recurrence/ScheduleDetails.hs
|
lgpl-3.0
| 6,184
| 0
| 9
| 1,321
| 1,831
| 966
| 865
| -1
| -1
|
{-# OPTIONS_GHC -fglasgow-exts #-}
-----------------------------------------------------------------------------
-- |
-- Module : Control.Functor.KanExtension.Interpreter
-- Copyright : (C) 2008 Edward Kmett
-- License : BSD-style (see the file LICENSE)
--
-- Maintainer : Edward Kmett <ekmett@gmail.com>
-- Stability : experimental
-- Portability : non-portable (rank-2 polymorphism)
--
-- Ghani and Johann's Interp/InterpT types from ''Initial Algebra Semantics is Enough!''
-- <http://crab.rutgers.edu/~pjohann/tlca07-rev.pdf> and its dual.
----------------------------------------------------------------------------
module Control.Functor.KanExtension.Interpreter
( Interpreter, InterpreterT
, interpreterAlgebra, algebraInterpreter
, Cointerpreter, CointerpreterT
, cointerpreterCoalgebra, coalgebraCointerpreter
) where
import Control.Functor.Extras
import Control.Functor.HigherOrder
import Control.Functor.KanExtension
type Interpreter y g h = y :~> Ran g h
type InterpreterT f g h = forall y. Functor y => Interpreter y g h -> Interpreter (f y) g h
interpreterAlgebra :: InterpreterT f g h -> HAlgebra f (Ran g h)
interpreterAlgebra i = i id
algebraInterpreter :: HFunctor f => HAlgebra f (Ran g h) -> InterpreterT f g h
algebraInterpreter h i = h . hfmap i
type Cointerpreter y g h = Lan g h :~> y
type CointerpreterT f g h = forall y. Functor y => Cointerpreter y g h -> Cointerpreter (f y) g h
cointerpreterCoalgebra :: CointerpreterT f g h -> HCoalgebra f (Lan g h)
cointerpreterCoalgebra i = i id
coalgebraCointerpreter :: HFunctor f => HCoalgebra f (Lan g h) -> CointerpreterT f g h
coalgebraCointerpreter h i = hfmap i . h
|
urska19/MFP---Samodejno-racunanje-dvosmernih-preslikav
|
Control/Functor/KanExtension/Interpreter.hs
|
apache-2.0
| 1,672
| 4
| 10
| 260
| 382
| 209
| 173
| -1
| -1
|
{-# LANGUAGE Trustworthy #-}
{-# LANGUAGE BangPatterns
, CPP
, ExistentialQuantification
, NoImplicitPrelude
, TypeSynonymInstances
, FlexibleInstances
#-}
module GHC.Event.TimerManager
( -- * Types
TimerManager
-- * Creation
, new
, newWith
, newDefaultBackend
, emControl
-- * Running
, finished
, loop
, step
, shutdown
, cleanup
, wakeManager
-- * Registering interest in timeout events
, TimeoutCallback
, TimeoutKey
, registerTimeout
, updateTimeout
, unregisterTimeout
) where
#include "EventConfig.h"
------------------------------------------------------------------------
-- Imports
import Control.Exception (finally)
import Data.Foldable (sequence_)
import Data.IORef (IORef, atomicModifyIORef', mkWeakIORef, newIORef, readIORef,
writeIORef)
import GHC.Base
import GHC.Clock (getMonotonicTimeNSec)
import GHC.Conc.Signal (runHandlers)
import GHC.Num (Num(..))
import GHC.Real (fromIntegral)
import GHC.Show (Show(..))
import GHC.Event.Control
import GHC.Event.Internal (Backend, Event, evtRead, Timeout(..))
import GHC.Event.Unique (Unique, UniqueSource, newSource, newUnique)
import System.Posix.Types (Fd)
import qualified GHC.Event.Internal as I
import qualified GHC.Event.PSQ as Q
#if defined(HAVE_POLL)
import qualified GHC.Event.Poll as Poll
#else
# error not implemented for this operating system
#endif
------------------------------------------------------------------------
-- Types
-- | A timeout registration cookie.
newtype TimeoutKey = TK Unique
deriving (Eq)
-- | Callback invoked on timeout events.
type TimeoutCallback = IO ()
data State = Created
| Running
| Dying
| Finished
deriving (Eq, Show)
-- | A priority search queue, with timeouts as priorities.
type TimeoutQueue = Q.PSQ TimeoutCallback
-- | An edit to apply to a 'TimeoutQueue'.
type TimeoutEdit = TimeoutQueue -> TimeoutQueue
-- | The event manager state.
data TimerManager = TimerManager
{ emBackend :: !Backend
, emTimeouts :: {-# UNPACK #-} !(IORef TimeoutQueue)
, emState :: {-# UNPACK #-} !(IORef State)
, emUniqueSource :: {-# UNPACK #-} !UniqueSource
, emControl :: {-# UNPACK #-} !Control
}
------------------------------------------------------------------------
-- Creation
handleControlEvent :: TimerManager -> Fd -> Event -> IO ()
handleControlEvent mgr fd _evt = do
msg <- readControlMessage (emControl mgr) fd
case msg of
CMsgWakeup -> return ()
CMsgDie -> writeIORef (emState mgr) Finished
CMsgSignal fp s -> runHandlers fp s
newDefaultBackend :: IO Backend
#if defined(HAVE_POLL)
newDefaultBackend = Poll.new
#else
newDefaultBackend = errorWithoutStackTrace "no back end for this platform"
#endif
-- | Create a new event manager.
new :: IO TimerManager
new = newWith =<< newDefaultBackend
newWith :: Backend -> IO TimerManager
newWith be = do
timeouts <- newIORef Q.empty
ctrl <- newControl True
state <- newIORef Created
us <- newSource
_ <- mkWeakIORef state $ do
st <- atomicModifyIORef' state $ \s -> (Finished, s)
when (st /= Finished) $ do
I.delete be
closeControl ctrl
let mgr = TimerManager { emBackend = be
, emTimeouts = timeouts
, emState = state
, emUniqueSource = us
, emControl = ctrl
}
_ <- I.modifyFd be (controlReadFd ctrl) mempty evtRead
_ <- I.modifyFd be (wakeupReadFd ctrl) mempty evtRead
return mgr
-- | Asynchronously shuts down the event manager, if running.
shutdown :: TimerManager -> IO ()
shutdown mgr = do
state <- atomicModifyIORef' (emState mgr) $ \s -> (Dying, s)
when (state == Running) $ sendDie (emControl mgr)
finished :: TimerManager -> IO Bool
finished mgr = (== Finished) `liftM` readIORef (emState mgr)
cleanup :: TimerManager -> IO ()
cleanup mgr = do
writeIORef (emState mgr) Finished
I.delete (emBackend mgr)
closeControl (emControl mgr)
------------------------------------------------------------------------
-- Event loop
-- | Start handling events. This function loops until told to stop,
-- using 'shutdown'.
--
-- /Note/: This loop can only be run once per 'TimerManager', as it
-- closes all of its control resources when it finishes.
loop :: TimerManager -> IO ()
loop mgr = do
state <- atomicModifyIORef' (emState mgr) $ \s -> case s of
Created -> (Running, s)
_ -> (s, s)
case state of
Created -> go `finally` cleanup mgr
Dying -> cleanup mgr
_ -> do cleanup mgr
errorWithoutStackTrace $ "GHC.Event.Manager.loop: state is already " ++
show state
where
go = do running <- step mgr
when running go
step :: TimerManager -> IO Bool
step mgr = do
timeout <- mkTimeout
_ <- I.poll (emBackend mgr) (Just timeout) (handleControlEvent mgr)
state <- readIORef (emState mgr)
state `seq` return (state == Running)
where
-- | Call all expired timer callbacks and return the time to the
-- next timeout.
mkTimeout :: IO Timeout
mkTimeout = do
now <- getMonotonicTimeNSec
(expired, timeout) <- atomicModifyIORef' (emTimeouts mgr) $ \tq ->
let (expired, tq') = Q.atMost now tq
timeout = case Q.minView tq' of
Nothing -> Forever
Just (Q.E _ t _, _) ->
-- This value will always be positive since the call
-- to 'atMost' above removed any timeouts <= 'now'
let t' = t - now in t' `seq` Timeout t'
in (tq', (expired, timeout))
sequence_ $ map Q.value expired
return timeout
-- | Wake up the event manager.
wakeManager :: TimerManager -> IO ()
wakeManager mgr = sendWakeup (emControl mgr)
------------------------------------------------------------------------
-- Registering interest in timeout events
-- | Register a timeout in the given number of microseconds. The
-- returned 'TimeoutKey' can be used to later unregister or update the
-- timeout. The timeout is automatically unregistered after the given
-- time has passed.
registerTimeout :: TimerManager -> Int -> TimeoutCallback -> IO TimeoutKey
registerTimeout mgr us cb = do
!key <- newUnique (emUniqueSource mgr)
if us <= 0 then cb
else do
now <- getMonotonicTimeNSec
let expTime = fromIntegral us * 1000 + now
editTimeouts mgr (Q.insert key expTime cb)
return $ TK key
-- | Unregister an active timeout.
unregisterTimeout :: TimerManager -> TimeoutKey -> IO ()
unregisterTimeout mgr (TK key) = do
editTimeouts mgr (Q.delete key)
-- | Update an active timeout to fire in the given number of
-- microseconds.
updateTimeout :: TimerManager -> TimeoutKey -> Int -> IO ()
updateTimeout mgr (TK key) us = do
now <- getMonotonicTimeNSec
let expTime = fromIntegral us * 1000 + now
editTimeouts mgr (Q.adjust (const expTime) key)
editTimeouts :: TimerManager -> TimeoutEdit -> IO ()
editTimeouts mgr g = do
wake <- atomicModifyIORef' (emTimeouts mgr) f
when wake (wakeManager mgr)
where
f q = (q', wake)
where
q' = g q
wake = case Q.minView q of
Nothing -> True
Just (Q.E _ t0 _, _) ->
case Q.minView q' of
Just (Q.E _ t1 _, _) ->
-- don't wake the manager if the
-- minimum element didn't change.
t0 /= t1
_ -> True
|
ezyang/ghc
|
libraries/base/GHC/Event/TimerManager.hs
|
bsd-3-clause
| 7,765
| 0
| 23
| 2,079
| 1,873
| 988
| 885
| -1
| -1
|
module Language.Haskell.GhcMod.Doc where
import GHC (Ghc, DynFlags)
import qualified GHC as G
import Language.Haskell.GhcMod.Gap (withStyle, showDocWith)
import Outputable (SDoc, PprStyle, mkUserStyle, Depth(AllTheWay), neverQualify)
import Pretty (Mode(..))
showPage :: DynFlags -> PprStyle -> SDoc -> String
showPage dflag style = showDocWith dflag PageMode . withStyle dflag style
showOneLine :: DynFlags -> PprStyle -> SDoc -> String
showOneLine dflag style = showDocWith dflag OneLineMode . withStyle dflag style
getStyle :: Ghc PprStyle
getStyle = do
unqual <- G.getPrintUnqual
return $ mkUserStyle unqual AllTheWay
styleUnqualified :: PprStyle
styleUnqualified = mkUserStyle neverQualify AllTheWay
|
carlohamalainen/ghc-mod
|
Language/Haskell/GhcMod/Doc.hs
|
bsd-3-clause
| 718
| 0
| 8
| 102
| 207
| 116
| 91
| 16
| 1
|
module CSPM.Parser.Tokens (
Token(..), LToken, Model(..),
)
where
import qualified Data.ByteString as B
import CSPM.Syntax.AST (Model(..))
import CSPM.PrettyPrinter
import Util.Annotated
import Util.PrettyPrint
data Token =
TInteger Int
| TChar Char
| TString B.ByteString
| TFalse
| TTrue
| TIdent B.ByteString
| TPrint B.ByteString
| TRefines Model
| TModel Model
| TTauPriority
| TPartialOrderReduce
| TStringOption B.ByteString
| TDeadlockFree
| TDivergenceFree
| TLivelockFree
| TDeterministic
| THasTrace
| TNewLine
| TDefineEqual
| TModule
| TExports
| TEndModule
| TScope
| TInstance
| TOfType
| TYield
| TYieldStar
| TTimed
| TComma
| TDot
| TExclamationMark
| TQuestionMark
| TDollar
| TPipe
| TDoubleDot
| TColon
| TDrawnFrom
| TTie -- "<->"
| TDoubleAt
| TWildCard
| TIf
| TThen
| TElse
| TLet
| TWithin
| TBackSlash
| TLambdaDot
| TChannel
| TAssert
| TAssertNot
| TDataType
| TSubType
| TExternal
| TTransparent
| TNameType
| TSemiColon
| TGuard
| TNot
| TAnd
| TOr
| TEq
| TNotEq
| TLtEq
| TGtEq
| TLt
| TGt
| TPlus
| TMinus
| TTimes
| TDivide
| TMod
| TCloseSeq
| TEmptySeq
| TConcat
| THash
| TLParen
| TRParen
| TLBrace
| TRBrace
| TLPipeBrace
| TRPipeBrace
| TLDoubleSqBracket
| TRDoubleSqBracket
| TLPipeSqBracket
| TRPipeSqBracket
| TLSqBracket
| TRSqBracket
| TExtChoice
| TIntChoice
| TInterleave
| TPrefix
| TInterrupt
| TSlidingChoice
| TRException
| TParallel
| TProject
| TLSyncInterrupt
| TRSyncInterrupt
| TLSyncExtChoice
| TRSyncExtChoice
| TLMap
| TRMap
| TEOF
deriving Eq
type LToken = Located Token
instance Show Token where
show t = show (prettyPrint t)
instance PrettyPrintable Token where
prettyPrint (TInteger i) = int i
prettyPrint (TChar c) = quotes (char c)
prettyPrint (TString s) = doubleQuotes (bytestring s)
prettyPrint TFalse = text "false"
prettyPrint TTrue = text "true"
prettyPrint (TIdent s) = bytestring s
prettyPrint (TPrint s) = text "print" <+> bytestring s
prettyPrint (TRefines m) = char '[' <> prettyPrint m <> char '='
prettyPrint (TModel m) = char '[' <> prettyPrint m <> char ']'
prettyPrint TTauPriority = text "tau priority"
prettyPrint TPartialOrderReduce = text "partial order reduce"
prettyPrint (TStringOption s) = char '[' <> bytestring s <> char ']'
prettyPrint TDeadlockFree = text "deadlock free"
prettyPrint TDivergenceFree = text "divergence free"
prettyPrint TLivelockFree = text "livelock free"
prettyPrint TDeterministic = text "deterministic"
prettyPrint THasTrace = text "has trace"
prettyPrint TNewLine = text "<newline>"
prettyPrint TDefineEqual = char '='
prettyPrint TModule = text "module"
prettyPrint TExports = text "exports"
prettyPrint TEndModule = text "endmodule"
prettyPrint TScope = text "::"
prettyPrint TInstance = text "instance"
prettyPrint TOfType = text "::"
prettyPrint TYield = text "=>"
prettyPrint TYieldStar = text "=>*"
prettyPrint TTimed = text "Timed"
prettyPrint TComma = char ','
prettyPrint TDot = char '.'
prettyPrint TExclamationMark = char '!'
prettyPrint TQuestionMark = char '?'
prettyPrint TDollar = char '$'
prettyPrint TPipe = char '|'
prettyPrint TDoubleDot = text ".."
prettyPrint TColon = char ':'
prettyPrint TDrawnFrom = text "<-"
prettyPrint TTie = text "<->"
prettyPrint TDoubleAt = text "@@"
prettyPrint TWildCard = char '_'
prettyPrint TIf = text "if"
prettyPrint TThen = text "then"
prettyPrint TElse = text "else"
prettyPrint TLet = text "let"
prettyPrint TWithin = text "within"
prettyPrint TBackSlash = char '\\'
prettyPrint TLambdaDot = char '@'
prettyPrint TChannel = text "channel"
prettyPrint TAssert = text "assert"
prettyPrint TAssertNot = text "not"
prettyPrint TDataType = text "datatype"
prettyPrint TSubType = text "subtype"
prettyPrint TExternal = text "external"
prettyPrint TTransparent = text "transparent"
prettyPrint TNameType = text "nametype"
prettyPrint TSemiColon = char ';'
prettyPrint TGuard = char '&'
prettyPrint TNot = text "not"
prettyPrint TAnd = text "and"
prettyPrint TOr = text "or"
prettyPrint TEq = text "=="
prettyPrint TNotEq = text "!="
prettyPrint TLtEq = text "<="
prettyPrint TGtEq = text ">="
prettyPrint TLt = char '<'
prettyPrint TGt = char '>'
prettyPrint TPlus = char '+'
prettyPrint TMinus = char '-'
prettyPrint TTimes = char '*'
prettyPrint TDivide = char '/'
prettyPrint TMod = char '%'
prettyPrint TCloseSeq = char '<'
prettyPrint TEmptySeq = text "<>"
prettyPrint TConcat = char '^'
prettyPrint THash = char '#'
prettyPrint TLParen = char '('
prettyPrint TRParen = char ')'
prettyPrint TLBrace = char '{'
prettyPrint TRBrace = char '}'
prettyPrint TLPipeBrace = text "{|"
prettyPrint TRPipeBrace = text "|}"
prettyPrint TLDoubleSqBracket = text "[["
prettyPrint TRDoubleSqBracket = text "]]"
prettyPrint TLPipeSqBracket = text "[|"
prettyPrint TRPipeSqBracket = text "|]"
prettyPrint TLSqBracket = text "["
prettyPrint TRSqBracket = text "]"
prettyPrint TExtChoice = text "[]"
prettyPrint TIntChoice = text "|~|"
prettyPrint TInterleave = text "|||"
prettyPrint TPrefix = text "->"
prettyPrint TInterrupt = text "/\\"
prettyPrint TSlidingChoice = text "[>"
prettyPrint TRException = text "|>"
prettyPrint TParallel = text "||"
prettyPrint TProject = text "|\\"
prettyPrint TLSyncInterrupt = text "/+"
prettyPrint TRSyncInterrupt = text "+\\"
prettyPrint TLSyncExtChoice = text "[+"
prettyPrint TRSyncExtChoice = text "+]"
prettyPrint TLMap = text "(|"
prettyPrint TRMap = text "|)"
prettyPrint TEOF = text "EOF"
|
sashabu/libcspm
|
src/CSPM/Parser/Tokens.hs
|
bsd-3-clause
| 6,278
| 0
| 8
| 1,704
| 1,690
| 854
| 836
| 219
| 0
|
{- |
Module : $Header$
Description : theorem hide shift proof rule for development graphs
Copyright : (c) Jorina F. Gerken, Till Mossakowski, Uni Bremen 2002-2006
License : GPLv2 or higher, see LICENSE.txt
Maintainer : Christian.Maeder@dfki.de
Stability : provisional
Portability : non-portable(Logic)
theorem hide shift proof rule for development graphs
Follows Sect. IV:4.4 of the CASL Reference Manual.
-}
{-
References:
T. Mossakowski, S. Autexier and D. Hutter:
Extending Development Graphs With Hiding.
H. Hussmann (ed.): Fundamental Approaches to Software Engineering 2001,
Lecture Notes in Computer Science 2029, p. 269-283,
Springer-Verlag 2001.
-}
module Proofs.TheoremHideShift
( theoremHideShift
, theoremHideShiftFromList
) where
import Logic.Logic
import Static.DevGraph
import Static.DgUtils
import Static.History
import Proofs.EdgeUtils
import Proofs.SimpleTheoremHideShift
(thmHideShift, getInComingGlobalUnprovenEdges)
import Common.LibName
import Common.Result
import Data.Graph.Inductive.Graph as Graph
import qualified Data.Map as Map
import Data.Maybe
{- ----------------------------------------------
Theorem hide shift and auxiliaries
--------------------------------------------- -}
theoremHideShift :: LibName -> LibEnv -> Result LibEnv
theoremHideShift ln = return .
Map.adjust (\ dg -> theoremHideShiftAux (labNodesDG dg) dg) ln
{- | assume that the normal forms a commputed already.
return Nothing if nothing changed -}
theoremHideShiftAux :: [LNode DGNodeLab] -> DGraph -> DGraph
theoremHideShiftAux ns dg = let
nodesWHiding = map fst $ filter
(\ (_, lbl) -> labelHasHiding lbl && isJust (dgn_nf lbl)
&& isJust (dgn_sigma lbl)) ns
{- all nodes with incoming hiding links
all the theorem links entering these nodes
have to replaced by theorem links with the same origin
but pointing to the normal form of the former target node -}
ingoingEdges = concatMap (getInComingGlobalUnprovenEdges dg) nodesWHiding
in foldl theoremHideShiftForEdge dg ingoingEdges
theoremHideShiftForEdge :: DGraph -> LEdge DGLinkLab -> DGraph
theoremHideShiftForEdge dg edge@(source, target, edgeLab) =
case maybeResult $ theoremHideShiftForEdgeAux dg edge of
Nothing -> error "theoremHideShiftForEdgeAux"
Just (dg', pbasis) -> let
provenEdge = (source, target, edgeLab
{ dgl_type = setProof (Proven thmHideShift pbasis) $ dgl_type edgeLab
, dgl_origin = DGLinkProof
, dgl_id = defaultEdgeId })
in insertDGLEdge provenEdge $ changeDGH dg' $ DeleteEdge edge
theoremHideShiftForEdgeAux :: DGraph -> LEdge DGLinkLab
-> Result (DGraph, ProofBasis)
theoremHideShiftForEdgeAux dg (sn, tn, llab) = do
let tlab = labDG dg tn
Just nfNode = dgn_nf tlab
phi = dgl_morphism llab
Just muN = dgn_sigma tlab
cmor <- comp phi muN
let newEdge = (sn, nfNode, defDGLink cmor globalThm DGLinkProof)
case tryToGetEdge newEdge dg of
Nothing -> let
newGraph = changeDGH dg $ InsertEdge newEdge
finalEdge = case getLastChange newGraph of
InsertEdge final_e -> final_e
_ -> error "Proofs.Global.globDecompForOneEdgeAux"
in return
(newGraph, addEdgeId emptyProofBasis $ getEdgeId finalEdge)
Just e -> return (dg, addEdgeId emptyProofBasis $ getEdgeId e)
theoremHideShiftFromList :: LibName -> [LNode DGNodeLab] -> LibEnv
-> Result LibEnv
theoremHideShiftFromList ln ls =
return . Map.adjust (theoremHideShiftAux ls) ln
|
mariefarrell/Hets
|
Proofs/TheoremHideShift.hs
|
gpl-2.0
| 3,623
| 0
| 18
| 767
| 707
| 363
| 344
| 57
| 3
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MagicHash #-}
{-# LANGUAGE MultiParamTypeClasses #-}
#ifdef USE_REFLEX_OPTIMIZER
{-# OPTIONS_GHC -fplugin=Reflex.Optimizer #-}
#endif
-- | This module provides a variation of 'Dynamic' values that uses cheap
-- pointer equality checks to reduce the amount of signal propagation needed.
module Reflex.Dynamic.Uniq
( UniqDynamic
, uniqDynamic
, fromUniqDynamic
, alreadyUniqDynamic
) where
import Control.Applicative (Applicative (..))
import GHC.Exts
import Reflex.Class
-- | A 'Dynamic' whose 'updated' 'Event' will never fire with the same value as
-- the 'current' 'Behavior''s contents. In order to maintain this constraint,
-- the value inside a 'UniqDynamic' is always evaluated to
-- <https://wiki.haskell.org/Weak_head_normal_form weak head normal form>.
--
-- Internally, 'UniqDynamic' uses pointer equality as a heuristic to avoid
-- unnecessary update propagation; this is much more efficient than performing
-- full comparisons. However, when the 'UniqDynamic' is converted back into a
-- regular 'Dynamic', a full comparison is performed.
newtype UniqDynamic t a = UniqDynamic { unUniqDynamic :: Dynamic t a }
-- | Construct a 'UniqDynamic' by eliminating redundant updates from a 'Dynamic'.
uniqDynamic :: Reflex t => Dynamic t a -> UniqDynamic t a
uniqDynamic d = UniqDynamic $ unsafeBuildDynamic (sample $ current d) $ flip pushCheap (updated d) $ \new -> do
old <- sample $ current d --TODO: Is it better to sample ourselves here?
return $ unsafeJustChanged old new
-- | Retrieve a normal 'Dynamic' from a 'UniqDynamic'. This will perform a
-- final check using the output type's 'Eq' instance to ensure deterministic
-- behavior.
--
-- WARNING: If used with a type whose 'Eq' instance is not law-abiding -
-- specifically, if there are cases where @x /= x@, 'fromUniqDynamic' may
-- eliminate more 'updated' occurrences than it should. For example, NaN values
-- of 'Double' and 'Float' are considered unequal to themselves by the 'Eq'
-- instance, but can be equal by pointer equality. This may cause 'UniqDynamic'
-- to lose changes from NaN to NaN.
fromUniqDynamic :: (Reflex t, Eq a) => UniqDynamic t a -> Dynamic t a
fromUniqDynamic (UniqDynamic d) = unsafeDynamic (current d) e'
where
-- Only consider values different if they fail both pointer equality /and/
-- 'Eq' equality. This is to make things a bit more deterministic in the
-- case of unlawful 'Eq' instances. However, it is still possible to
-- achieve nondeterminism by constructing elements that are identical in
-- value, unequal according to 'Eq', and nondeterministically equal or
-- nonequal by pointer quality. I suspect that it is impossible to make the
-- behavior deterministic in this case.
superEq a b = a `unsafePtrEq` b || a == b
e' = attachWithMaybe (\x x' -> if x' `superEq` x then Nothing else Just x') (current d) (updated d)
-- | Create a UniqDynamic without uniqing it on creation. This will be slightly
-- faster than uniqDynamic when used with a Dynamic whose values are always (or
-- nearly always) different from its previous values; if used with a Dynamic
-- whose values do not change frequently, it may be much slower than uniqDynamic
alreadyUniqDynamic :: Dynamic t a -> UniqDynamic t a
alreadyUniqDynamic = UniqDynamic
unsafePtrEq :: a -> a -> Bool
unsafePtrEq a b = case a `seq` b `seq` reallyUnsafePtrEquality# a b of
0# -> False
_ -> True
unsafeJustChanged :: a -> a -> Maybe a
unsafeJustChanged old new =
if old `unsafePtrEq` new
then Nothing
else Just new
instance Reflex t => Accumulator t (UniqDynamic t) where
accumMaybeM f z e = do
let f' old change = do
mNew <- f old change
return $ unsafeJustChanged old =<< mNew
d <- accumMaybeMDyn f' z e
return $ UniqDynamic d
mapAccumMaybeM f z e = do
let f' old change = do
(mNew, output) <- f old change
return (unsafeJustChanged old =<< mNew, output)
(d, out) <- mapAccumMaybeMDyn f' z e
return (UniqDynamic d, out)
instance Reflex t => Functor (UniqDynamic t) where
fmap f (UniqDynamic d) = uniqDynamic $ fmap f d
instance Reflex t => Applicative (UniqDynamic t) where
pure = UniqDynamic . constDyn
UniqDynamic a <*> UniqDynamic b = uniqDynamic $ a <*> b
_ *> b = b
a <* _ = a
instance Reflex t => Monad (UniqDynamic t) where
UniqDynamic x >>= f = uniqDynamic $ x >>= unUniqDynamic . f
_ >> b = b
return = pure
|
ryantrinkle/reflex
|
src/Reflex/Dynamic/Uniq.hs
|
bsd-3-clause
| 4,511
| 0
| 16
| 896
| 849
| 445
| 404
| 56
| 2
|
module Nums where
main :: Fay ()
main = print (-10 :: Double)
|
beni55/fay
|
tests/nums.hs
|
bsd-3-clause
| 63
| 0
| 7
| 14
| 30
| 17
| 13
| 3
| 1
|
{-# LANGUAGE CPP, ScopedTypeVariables, OverloadedStrings #-}
-----------------------------------------------------------------------------
--
-- Module : IDE.Completion
-- Copyright : 2007-2011 Juergen Nicklisch-Franken, Hamish Mackenzie
-- License : GPL
--
-- Maintainer : <maintainer@leksah.org>
-- Stability : provisional
-- Portability :
--
-- |
--
-----------------------------------------------------------------------------
module IDE.Completion (complete, cancel, setCompletionSize) where
import Prelude hiding(getChar, getLine)
import Data.List as List (stripPrefix, isPrefixOf, filter)
import Data.Char
import Data.IORef
import Control.Monad
import Graphics.UI.Gtk as Gtk
import Graphics.UI.Gtk.Gdk.EventM as Gtk
import IDE.Core.State
import IDE.Metainfo.Provider(getDescription,getCompletionOptions)
import IDE.TextEditor as TE
import Control.Monad.IO.Class (MonadIO(..))
import Control.Monad.Trans.Reader (ask)
import qualified Control.Monad.Reader as Gtk (liftIO)
import Control.Monad.Trans.Class (MonadTrans(..))
import Control.Applicative ((<$>))
import IDE.Utils.GUIUtils (getDarkState)
import Data.Text (Text)
import qualified Data.Text as T
(empty, commonPrefixes, pack, unpack, null, stripPrefix,
isPrefixOf)
complete :: TextEditor editor => EditorView editor -> Bool -> IDEAction
complete sourceView always = do
currentState' <- readIDE currentState
prefs' <- readIDE prefs
(_, completion') <- readIDE completion
case (currentState',completion') of
(IsCompleting c, Just (CompletionWindow window tv st)) -> do
isWordChar <- getIsWordChar sourceView
updateOptions window tv st sourceView c isWordChar always
(IsRunning,_) -> when (always || not (completeRestricted prefs'))
(initCompletion sourceView always)
_ -> return ()
cancel :: IDEAction
cancel = do
currentState' <- readIDE currentState
(_, completion') <- readIDE completion
case (currentState',completion') of
(IsCompleting conn , Just (CompletionWindow window tv st)) -> do
cancelCompletion window tv st conn
_ -> return ()
setCompletionSize :: (Int, Int) -> IDEAction
setCompletionSize (x, y) | x > 10 && y > 10 = do
(_, completion) <- readIDE completion
case completion of
Just (CompletionWindow window _ _) -> liftIO $ windowResize window x y
Nothing -> return ()
modifyIDE_ $ \ide -> ide{completion = ((x, y), completion)}
setCompletionSize _ = return ()
getIsWordChar :: forall editor. TextEditor editor => EditorView editor -> IDEM (Char -> Bool)
getIsWordChar sourceView = do
ideR <- ask
buffer <- getBuffer sourceView
(_, end) <- getSelectionBounds buffer
sol <- backwardToLineStartC end
eol <- forwardToLineEndC end
line <- getSlice buffer sol eol False
let isImport = "import " `T.isPrefixOf` line
isIdent a = isAlphaNum a || a == '\'' || a == '_' || (isImport && a == '.')
isOp a = isSymbol a || a == ':' || a == '\\' || a == '*' || a == '/' || a == '-'
|| a == '!' || a == '@' || a == '%' || a == '&' || a == '?'
prev <- backwardCharC end
prevChar <- getChar prev
case prevChar of
Just prevChar | isIdent prevChar -> return isIdent
Just prevChar | isOp prevChar -> return isOp
_ -> return $ const False
initCompletion :: forall editor. TextEditor editor => EditorView editor -> Bool -> IDEAction
initCompletion sourceView always = do
ideR <- ask
((width, height), completion') <- readIDE completion
isWordChar <- getIsWordChar sourceView
case completion' of
Just (CompletionWindow window' tree' store') -> do
cids <- addEventHandling window' sourceView tree' store' isWordChar always
modifyIDE_ (\ide -> ide{currentState = IsCompleting cids})
updateOptions window' tree' store' sourceView cids isWordChar always
Nothing -> do
windows <- getWindows
prefs <- readIDE prefs
window <- liftIO windowNewPopup
liftIO $ set window [
windowTypeHint := WindowTypeHintUtility,
windowDecorated := False,
windowResizable := True,
windowDefaultWidth := width,
windowDefaultHeight := height,
windowTransientFor := head windows]
liftIO $ containerSetBorderWidth window 3
paned <- liftIO $ hPanedNew
liftIO $ containerAdd window paned
nameScrolledWindow <- liftIO $ scrolledWindowNew Nothing Nothing
liftIO $ widgetSetSizeRequest nameScrolledWindow 250 40
tree <- liftIO $ treeViewNew
liftIO $ containerAdd nameScrolledWindow tree
store <- liftIO $ listStoreNew []
liftIO $ treeViewSetModel tree store
font <- liftIO $ case textviewFont prefs of
Just str -> do
fontDescriptionFromString str
Nothing -> do
f <- fontDescriptionNew
fontDescriptionSetFamily f ("Monospace" :: Text)
return f
liftIO $ widgetModifyFont tree (Just font)
column <- liftIO $ treeViewColumnNew
liftIO $ set column [
treeViewColumnSizing := TreeViewColumnFixed,
treeViewColumnMinWidth := 800] -- OSX does not like it if there is no hscroll
liftIO $ treeViewAppendColumn tree column
renderer <- liftIO $ cellRendererTextNew
liftIO $ treeViewColumnPackStart column renderer True
liftIO $ cellLayoutSetAttributes column renderer store (\name -> [ cellText := name ])
liftIO $ set tree [treeViewHeadersVisible := False]
descriptionBuffer <- newDefaultBuffer Nothing ""
descriptionView <- newView descriptionBuffer (textviewFont prefs)
preferDark <- getDarkState
setStyle preferDark descriptionBuffer $ case sourceStyle prefs of
(False,_) -> Nothing
(True,v) -> Just v
descriptionScrolledWindow <- getScrolledWindow descriptionView
visible <- liftIO $ newIORef False
activeView <- liftIO $ newIORef Nothing
treeSelection <- liftIO $ treeViewGetSelection tree
liftIO $ on treeSelection treeSelectionSelectionChanged $ do
treeSelectionSelectedForeach treeSelection $ \treePath -> do
rows <- treeSelectionGetSelectedRows treeSelection
case rows of
[treePath] -> reflectIDE (withWord store treePath (\name -> do
description <- getDescription name
setText descriptionBuffer description
)) ideR
_ -> return ()
liftIO $ panedAdd1 paned nameScrolledWindow
liftIO $ panedAdd2 paned descriptionScrolledWindow
cids <- addEventHandling window sourceView tree store isWordChar always
modifyIDE_ (\ide -> ide{currentState = IsCompleting cids,
completion = ((width, height), Just (CompletionWindow window tree store))})
updateOptions window tree store sourceView cids isWordChar always
addEventHandling :: TextEditor editor => Window -> EditorView editor -> TreeView -> ListStore Text
-> (Char -> Bool) -> Bool -> IDEM Connections
addEventHandling window sourceView tree store isWordChar always = do
ideR <- ask
cidsPress <- TE.onKeyPress sourceView $ do
keyVal <- lift eventKeyVal
name <- lift eventKeyName
modifier <- lift eventModifier
char <- liftIO $ keyvalToChar keyVal
Just model <- liftIO $ treeViewGetModel tree
selection <- liftIO $ treeViewGetSelection tree
count <- liftIO $ treeModelIterNChildren model Nothing
Just column <- liftIO $ treeViewGetColumn tree 0
case (name, modifier, char) of
("Tab", _, _) -> (do
visible <- liftIO $ get tree widgetVisible
if visible then (do
liftIDE $ tryToUpdateOptions window tree store sourceView True isWordChar always
return True
)
else return False
)
("Return", _, _) -> (do
visible <- liftIO $ get tree widgetVisible
if visible then (do
maybeRow <- liftIO $ getRow tree
case maybeRow of
Just row -> (do
liftIO $ treeViewRowActivated tree [row] column
return True
)
Nothing -> (do
liftIDE cancel
return False
)
)
else return False
)
("Down", _, _) -> (do
visible <- liftIO $ get tree widgetVisible
if visible then (do
maybeRow <- liftIO $ getRow tree
let newRow = maybe 0 (\row -> row + 1) maybeRow
when (newRow < count) $ liftIO $ do
treeSelectionSelectPath selection [newRow]
treeViewScrollToCell tree (Just [newRow]) Nothing Nothing
return True
)
else return False
)
("Up", _, _) -> (do
visible <- liftIO $ get tree widgetVisible
if visible then (do
maybeRow <- liftIO $ getRow tree
let newRow = maybe 0 (\row -> row - 1) maybeRow
when (newRow >= 0) $ liftIO $ do
treeSelectionSelectPath selection [newRow]
treeViewScrollToCell tree (Just [newRow]) Nothing Nothing
return True
)
else return False
)
(_, _, Just c) | isWordChar c -> (do
return False
)
("BackSpace", _, _) -> (do
return False
)
(shift, _, _) | (shift == "Shift_L") || (shift == "Shift_R") -> (do
return False
)
_ -> (do
liftIDE cancel
return False
)
cidsRelease <- TE.onKeyRelease sourceView $ do
name <- lift eventKeyName
modifier <- lift eventModifier
case (name, modifier) of
("BackSpace", _) -> do
liftIDE $ complete sourceView False
return False
_ -> return False
resizeHandler <- liftIO $ newIORef Nothing
idButtonPress <- liftIO $ window `on` buttonPressEvent $ do
button <- eventButton
(x, y) <- eventCoordinates
time <- eventTime
#ifdef MIN_VERSION_gtk3
mbDrawWindow <- Gtk.liftIO $ widgetGetWindow window
#else
mbDrawWindow <- Gtk.liftIO $ Just <$> widgetGetDrawWindow window
#endif
case mbDrawWindow of
Just drawWindow -> do
status <- Gtk.liftIO $ pointerGrab
drawWindow
False
[PointerMotionMask, ButtonReleaseMask]
(Nothing:: Maybe DrawWindow)
Nothing
time
when (status == GrabSuccess) $ Gtk.liftIO $ do
(width, height) <- windowGetSize window
writeIORef resizeHandler $ Just $ \(newX, newY) -> do
reflectIDE (
setCompletionSize ((width + (floor (newX - x))), (height + (floor (newY - y))))) ideR
Nothing -> return ()
return True
idMotion <- liftIO $ window `on` motionNotifyEvent $ do
mbResize <- Gtk.liftIO $ readIORef resizeHandler
case mbResize of
Just resize -> eventCoordinates >>= (Gtk.liftIO . resize) >> return True
Nothing -> return False
idButtonRelease <- liftIO $ window `on` buttonReleaseEvent $ do
mbResize <- Gtk.liftIO $ readIORef resizeHandler
case mbResize of
Just resize -> do
eventCoordinates >>= (Gtk.liftIO . resize)
eventTime >>= (Gtk.liftIO . pointerUngrab)
Gtk.liftIO $ writeIORef resizeHandler Nothing
return True
Nothing -> return False
idSelected <- liftIO $ on tree rowActivated $ \treePath column -> do
reflectIDE (withWord store treePath (replaceWordStart sourceView isWordChar)) ideR
liftIO $ postGUIAsync $ reflectIDE cancel ideR
return $ concat [cidsPress, cidsRelease, [ConnectC idButtonPress, ConnectC idMotion, ConnectC idButtonRelease, ConnectC idSelected]]
withWord :: ListStore Text -> TreePath -> (Text -> IDEM ()) -> IDEM ()
withWord store treePath f = (do
case treePath of
[row] -> (do
value <- liftIO $ listStoreGetValue store row
f value
)
_ -> return ()
)
replaceWordStart :: TextEditor editor => EditorView editor -> (Char -> Bool) -> Text -> IDEM ()
replaceWordStart sourceView isWordChar name = do
buffer <- getBuffer sourceView
(selStart, selEnd) <- getSelectionBounds buffer
start <- findWordStart selStart isWordChar
wordStart <- getText buffer start selEnd True
case T.stripPrefix wordStart name of
Just extra -> do
end <- findWordEnd selEnd isWordChar
wordFinish <- getText buffer selEnd end True
case T.stripPrefix wordFinish extra of
Just extra2 | not (T.null wordFinish) -> do
selectRange buffer end end
insert buffer end extra2
_ -> insert buffer selEnd extra
Nothing -> return ()
cancelCompletion :: Window -> TreeView -> ListStore Text -> Connections -> IDEAction
cancelCompletion window tree store connections = do
liftIO (do
listStoreClear (store :: ListStore Text)
signalDisconnectAll connections
widgetHide window
)
modifyIDE_ (\ide -> ide{currentState = IsRunning})
updateOptions :: forall editor. TextEditor editor => Window -> TreeView -> ListStore Text -> EditorView editor -> Connections -> (Char -> Bool) -> Bool -> IDEAction
updateOptions window tree store sourceView connections isWordChar always = do
result <- tryToUpdateOptions window tree store sourceView False isWordChar always
when (not result) $ cancelCompletion window tree store connections
tryToUpdateOptions :: TextEditor editor => Window -> TreeView -> ListStore Text -> EditorView editor -> Bool -> (Char -> Bool) -> Bool -> IDEM Bool
tryToUpdateOptions window tree store sourceView selectLCP isWordChar always = do
ideR <- ask
liftIO $ listStoreClear (store :: ListStore Text)
buffer <- getBuffer sourceView
(selStart, end) <- getSelectionBounds buffer
start <- findWordStart selStart isWordChar
equal <- iterEqual start end
if equal
then return False
else do
wordStart <- getText buffer start end True
liftIO $ do -- dont use postGUIAsync - it causes bugs related to several repeated tryToUpdateOptions in thread
reflectIDE (do
options <- getCompletionOptions wordStart
processResults window tree store sourceView wordStart options selectLCP isWordChar always) ideR
return ()
return True
findWordStart :: TextEditor editor => EditorIter editor -> (Char -> Bool) -> IDEM (EditorIter editor)
findWordStart iter isWordChar = do
maybeWS <- backwardFindCharC iter (not . isWordChar) Nothing
case maybeWS of
Nothing -> atOffset iter 0
Just ws -> forwardCharC ws
findWordEnd :: TextEditor editor => EditorIter editor -> (Char -> Bool) -> IDEM (EditorIter editor)
findWordEnd iter isWordChar = do
maybeWE <- forwardFindCharC iter (not . isWordChar) Nothing
case maybeWE of
Nothing -> forwardToLineEndC iter
Just we -> return we
longestCommonPrefix a b = case T.commonPrefixes a b of
Nothing -> T.empty
Just (p, _, _) -> p
processResults :: TextEditor editor => Window -> TreeView -> ListStore Text -> EditorView editor -> Text -> [Text]
-> Bool -> (Char -> Bool) -> Bool -> IDEAction
processResults window tree store sourceView wordStart options selectLCP isWordChar always = do
case options of
[] -> cancel
_ | not always && (not . null $ drop 200 options) -> cancel
_ -> do
buffer <- getBuffer sourceView
(selStart, end) <- getSelectionBounds buffer
start <- findWordStart selStart isWordChar
currentWordStart <- getText buffer start end True
newWordStart <- do
if selectLCP && currentWordStart == wordStart && (not $ null options)
then do
return $ foldl1 longestCommonPrefix options
else
return currentWordStart
when (T.isPrefixOf wordStart newWordStart) $ do
liftIO $ listStoreClear store
let newOptions = List.filter (T.isPrefixOf newWordStart) options
liftIO $ forM_ (take 200 newOptions) (listStoreAppend store)
Rectangle startx starty width height <- getIterLocation sourceView start
(wWindow, hWindow) <- liftIO $ windowGetSize window
(x, y) <- bufferToWindowCoords sourceView (startx, starty+height)
mbDrawWindow <- getWindow sourceView
case mbDrawWindow of
Nothing -> return ()
Just drawWindow -> do
(ox, oy) <- liftIO $ drawWindowGetOrigin drawWindow
Just namesSW <- liftIO $ widgetGetParent tree
(Rectangle _ _ wNames hNames) <- liftIO $ widgetGetAllocation namesSW
Just paned <- liftIO $ widgetGetParent namesSW
Just first <- liftIO $ panedGetChild1 (castToPaned paned)
Just second <- liftIO $ panedGetChild2 (castToPaned paned)
screen <- liftIO $ windowGetScreen window
monitor <- liftIO $ screenGetMonitorAtPoint screen (ox+x) (oy+y)
monitorLeft <- liftIO $ screenGetMonitorAtPoint screen (ox+x-wWindow+wNames) (oy+y)
monitorRight <- liftIO $ screenGetMonitorAtPoint screen (ox+x+wWindow) (oy+y)
monitorBelow <- liftIO $ screenGetMonitorAtPoint screen (ox+x) (oy+y+hWindow)
wScreen <- liftIO $ screenGetWidth screen
hScreen <- liftIO $ screenGetHeight screen
top <- if monitorBelow /= monitor || (oy+y+hWindow) > hScreen
then do
sourceSW <- getScrolledWindow sourceView
(Rectangle _ _ _ hSource) <- liftIO $ widgetGetAllocation sourceSW
scrollToIter sourceView end 0.1 (Just (1.0, 1.0 - (fromIntegral hWindow / fromIntegral hSource)))
(_, newy) <- bufferToWindowCoords sourceView (startx, starty+height)
return (oy+newy)
else return (oy+y)
swap <- if (monitorRight /= monitor || (ox+x+wWindow) > wScreen) && monitorLeft == monitor && (ox+x-wWindow+wNames) > 0
then do
liftIO $ windowMove window (ox+x-wWindow+wNames) top
return $ first == namesSW
else do
liftIO $ windowMove window (ox+x) top
return $ first /= namesSW
when swap $ liftIO $ do
pos <- panedGetPosition (castToPaned paned)
containerRemove (castToPaned paned) first
containerRemove (castToPaned paned) second
panedAdd1 (castToPaned paned) second
panedAdd2 (castToPaned paned) first
panedSetPosition (castToPaned paned) (wWindow-pos)
when (not $ null newOptions) $ liftIO $ treeViewSetCursor tree [0] Nothing
liftIO $ widgetShowAll window
when (newWordStart /= currentWordStart) $
replaceWordStart sourceView isWordChar newWordStart
getRow tree = do
Just model <- treeViewGetModel tree
selection <- treeViewGetSelection tree
maybeIter <- treeSelectionGetSelected selection
case maybeIter of
Just iter -> (do
[row] <- treeModelGetPath model iter
return $ Just row
)
Nothing -> return Nothing
|
573/leksah
|
src/IDE/Completion.hs
|
gpl-2.0
| 22,084
| 0
| 32
| 8,129
| 5,896
| 2,841
| 3,055
| 391
| 17
|
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Simple.Bench
-- Copyright : Johan Tibell 2011
-- License : BSD3
--
-- Maintainer : cabal-devel@haskell.org
-- Portability : portable
--
-- This is the entry point into running the benchmarks in a built
-- package. It performs the \"@.\/setup bench@\" action. It runs
-- benchmarks designated in the package description.
module Distribution.Simple.Bench
( bench
) where
import qualified Distribution.PackageDescription as PD
( PackageDescription(..), BuildInfo(buildable)
, Benchmark(..), BenchmarkInterface(..), benchmarkType, hasBenchmarks )
import Distribution.Simple.BuildPaths ( exeExtension )
import Distribution.Simple.Compiler ( compilerInfo )
import Distribution.Simple.InstallDirs
( fromPathTemplate, initialPathTemplateEnv, PathTemplateVariable(..)
, substPathTemplate , toPathTemplate, PathTemplate )
import qualified Distribution.Simple.LocalBuildInfo as LBI
import Distribution.Simple.Setup ( BenchmarkFlags(..), fromFlag )
import Distribution.Simple.UserHooks ( Args )
import Distribution.Simple.Utils ( die, notice, rawSystemExitCode )
import Distribution.Text
import Control.Monad ( when, unless, forM )
import System.Exit ( ExitCode(..), exitFailure, exitSuccess )
import System.Directory ( doesFileExist )
import System.FilePath ( (</>), (<.>) )
-- | Perform the \"@.\/setup bench@\" action.
bench :: Args -- ^positional command-line arguments
-> PD.PackageDescription -- ^information from the .cabal file
-> LBI.LocalBuildInfo -- ^information from the configure step
-> BenchmarkFlags -- ^flags sent to benchmark
-> IO ()
bench args pkg_descr lbi flags = do
let verbosity = fromFlag $ benchmarkVerbosity flags
benchmarkNames = args
pkgBenchmarks = PD.benchmarks pkg_descr
enabledBenchmarks = [ t | t <- pkgBenchmarks
, PD.benchmarkEnabled t
, PD.buildable (PD.benchmarkBuildInfo t) ]
-- Run the benchmark
doBench :: PD.Benchmark -> IO ExitCode
doBench bm =
case PD.benchmarkInterface bm of
PD.BenchmarkExeV10 _ _ -> do
let cmd = LBI.buildDir lbi </> PD.benchmarkName bm
</> PD.benchmarkName bm <.> exeExtension
options = map (benchOption pkg_descr lbi bm) $
benchmarkOptions flags
name = PD.benchmarkName bm
-- Check that the benchmark executable exists.
exists <- doesFileExist cmd
unless exists $ die $
"Error: Could not find benchmark program \""
++ cmd ++ "\". Did you build the package first?"
notice verbosity $ startMessage name
-- This will redirect the child process
-- stdout/stderr to the parent process.
exitcode <- rawSystemExitCode verbosity cmd options
notice verbosity $ finishMessage name exitcode
return exitcode
_ -> do
notice verbosity $ "No support for running "
++ "benchmark " ++ PD.benchmarkName bm ++ " of type: "
++ show (disp $ PD.benchmarkType bm)
exitFailure
unless (PD.hasBenchmarks pkg_descr) $ do
notice verbosity "Package has no benchmarks."
exitSuccess
when (PD.hasBenchmarks pkg_descr && null enabledBenchmarks) $
die $ "No benchmarks enabled. Did you remember to configure with "
++ "\'--enable-benchmarks\'?"
bmsToRun <- case benchmarkNames of
[] -> return enabledBenchmarks
names -> forM names $ \bmName ->
let benchmarkMap = zip enabledNames enabledBenchmarks
enabledNames = map PD.benchmarkName enabledBenchmarks
allNames = map PD.benchmarkName pkgBenchmarks
in case lookup bmName benchmarkMap of
Just t -> return t
_ | bmName `elem` allNames ->
die $ "Package configured with benchmark "
++ bmName ++ " disabled."
| otherwise -> die $ "no such benchmark: " ++ bmName
let totalBenchmarks = length bmsToRun
notice verbosity $ "Running " ++ show totalBenchmarks ++ " benchmarks..."
exitcodes <- mapM doBench bmsToRun
let allOk = totalBenchmarks == length (filter (== ExitSuccess) exitcodes)
unless allOk exitFailure
where
startMessage name = "Benchmark " ++ name ++ ": RUNNING...\n"
finishMessage name exitcode = "Benchmark " ++ name ++ ": "
++ (case exitcode of
ExitSuccess -> "FINISH"
ExitFailure _ -> "ERROR")
-- TODO: This is abusing the notion of a 'PathTemplate'. The result isn't
-- necessarily a path.
benchOption :: PD.PackageDescription
-> LBI.LocalBuildInfo
-> PD.Benchmark
-> PathTemplate
-> String
benchOption pkg_descr lbi bm template =
fromPathTemplate $ substPathTemplate env template
where
env = initialPathTemplateEnv
(PD.package pkg_descr) (LBI.localComponentId lbi)
(compilerInfo $ LBI.compiler lbi) (LBI.hostPlatform lbi) ++
[(BenchmarkNameVar, toPathTemplate $ PD.benchmarkName bm)]
|
trskop/cabal
|
Cabal/Distribution/Simple/Bench.hs
|
bsd-3-clause
| 5,650
| 0
| 22
| 1,780
| 1,098
| 574
| 524
| 92
| 5
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
\section[InstEnv]{Utilities for typechecking instance declarations}
The bits common to TcInstDcls and TcDeriv.
-}
{-# LANGUAGE CPP, DeriveDataTypeable #-}
module InstEnv (
DFunId, InstMatch, ClsInstLookupResult,
OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe,
ClsInst(..), DFunInstType, pprInstance, pprInstanceHdr, pprInstances,
instanceHead, instanceSig, mkLocalInstance, mkImportedInstance,
instanceDFunId, tidyClsInstDFun, instanceRoughTcs,
fuzzyClsInstCmp, orphNamesOfClsInst,
InstEnvs(..), VisibleOrphanModules, InstEnv,
emptyInstEnv, extendInstEnv, deleteFromInstEnv, identicalClsInstHead,
extendInstEnvList, lookupUniqueInstEnv, lookupInstEnv, instEnvElts,
memberInstEnv, instIsVisible,
classInstances, instanceBindFun,
instanceCantMatch, roughMatchTcs
) where
#include "HsVersions.h"
import TcType -- InstEnv is really part of the type checker,
-- and depends on TcType in many ways
import CoreSyn ( IsOrphan(..), isOrphan, chooseOrphanAnchor )
import Module
import Class
import Var
import VarSet
import Name
import NameSet
import Unify
import Outputable
import ErrUtils
import BasicTypes
import UniqFM
import Util
import Id
import Data.Data ( Data, Typeable )
import Data.Maybe ( isJust, isNothing )
{-
************************************************************************
* *
ClsInst: the data type for type-class instances
* *
************************************************************************
-}
data ClsInst
= ClsInst { -- Used for "rough matching"; see Note [Rough-match field]
-- INVARIANT: is_tcs = roughMatchTcs is_tys
is_cls_nm :: Name -- Class name
, is_tcs :: [Maybe Name] -- Top of type args
-- Used for "proper matching"; see Note [Proper-match fields]
, is_tvs :: [TyVar] -- Fresh template tyvars for full match
-- See Note [Template tyvars are fresh]
, is_cls :: Class -- The real class
, is_tys :: [Type] -- Full arg types (mentioning is_tvs)
-- INVARIANT: is_dfun Id has type
-- forall is_tvs. (...) => is_cls is_tys
-- (modulo alpha conversion)
, is_dfun :: DFunId -- See Note [Haddock assumptions]
, is_flag :: OverlapFlag -- See detailed comments with
-- the decl of BasicTypes.OverlapFlag
, is_orphan :: IsOrphan
}
deriving (Data, Typeable)
-- | A fuzzy comparison function for class instances, intended for sorting
-- instances before displaying them to the user.
fuzzyClsInstCmp :: ClsInst -> ClsInst -> Ordering
fuzzyClsInstCmp x y =
stableNameCmp (is_cls_nm x) (is_cls_nm y) `mappend`
mconcat (map cmp (zip (is_tcs x) (is_tcs y)))
where
cmp (Nothing, Nothing) = EQ
cmp (Nothing, Just _) = LT
cmp (Just _, Nothing) = GT
cmp (Just x, Just y) = stableNameCmp x y
{-
Note [Template tyvars are fresh]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The is_tvs field of a ClsInst has *completely fresh* tyvars.
That is, they are
* distinct from any other ClsInst
* distinct from any tyvars free in predicates that may
be looked up in the class instance environment
Reason for freshness: we use unification when checking for overlap
etc, and that requires the tyvars to be distinct.
The invariant is checked by the ASSERT in lookupInstEnv'.
Note [Rough-match field]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The is_cls_nm, is_tcs fields allow a "rough match" to be done
*without* poking inside the DFunId. Poking the DFunId forces
us to suck in all the type constructors etc it involves,
which is a total waste of time if it has no chance of matching
So the Name, [Maybe Name] fields allow us to say "definitely
does not match", based only on the Name.
In is_tcs,
Nothing means that this type arg is a type variable
(Just n) means that this type arg is a
TyConApp with a type constructor of n.
This is always a real tycon, never a synonym!
(Two different synonyms might match, but two
different real tycons can't.)
NB: newtypes are not transparent, though!
Note [Proper-match fields]
~~~~~~~~~~~~~~~~~~~~~~~~~
The is_tvs, is_cls, is_tys fields are simply cached values, pulled
out (lazily) from the dfun id. They are cached here simply so
that we don't need to decompose the DFunId each time we want
to match it. The hope is that the fast-match fields mean
that we often never poke the proper-match fields.
However, note that:
* is_tvs must be a superset of the free vars of is_tys
* is_tvs, is_tys may be alpha-renamed compared to the ones in
the dfun Id
Note [Haddock assumptions]
~~~~~~~~~~~~~~~~~~~~~~~~~~
For normal user-written instances, Haddock relies on
* the SrcSpan of
* the Name of
* the is_dfun of
* an Instance
being equal to
* the SrcSpan of
* the instance head type of
* the InstDecl used to construct the Instance.
-}
instanceDFunId :: ClsInst -> DFunId
instanceDFunId = is_dfun
tidyClsInstDFun :: (DFunId -> DFunId) -> ClsInst -> ClsInst
tidyClsInstDFun tidy_dfun ispec
= ispec { is_dfun = tidy_dfun (is_dfun ispec) }
instanceRoughTcs :: ClsInst -> [Maybe Name]
instanceRoughTcs = is_tcs
instance NamedThing ClsInst where
getName ispec = getName (is_dfun ispec)
instance Outputable ClsInst where
ppr = pprInstance
pprInstance :: ClsInst -> SDoc
-- Prints the ClsInst as an instance declaration
pprInstance ispec
= hang (pprInstanceHdr ispec)
2 (vcat [ text "--" <+> pprDefinedAt (getName ispec)
, ifPprDebug (ppr (is_dfun ispec)) ])
-- * pprInstanceHdr is used in VStudio to populate the ClassView tree
pprInstanceHdr :: ClsInst -> SDoc
-- Prints the ClsInst as an instance declaration
pprInstanceHdr (ClsInst { is_flag = flag, is_dfun = dfun })
= text "instance" <+> ppr flag <+> pprSigmaType (idType dfun)
pprInstances :: [ClsInst] -> SDoc
pprInstances ispecs = vcat (map pprInstance ispecs)
instanceHead :: ClsInst -> ([TyVar], Class, [Type])
-- Returns the head, using the fresh tyavs from the ClsInst
instanceHead (ClsInst { is_tvs = tvs, is_tys = tys, is_dfun = dfun })
= (tvs, cls, tys)
where
(_, _, cls, _) = tcSplitDFunTy (idType dfun)
-- | Collects the names of concrete types and type constructors that make
-- up the head of a class instance. For instance, given `class Foo a b`:
--
-- `instance Foo (Either (Maybe Int) a) Bool` would yield
-- [Either, Maybe, Int, Bool]
--
-- Used in the implementation of ":info" in GHCi.
--
-- The 'tcSplitSigmaTy' is because of
-- instance Foo a => Baz T where ...
-- The decl is an orphan if Baz and T are both not locally defined,
-- even if Foo *is* locally defined
orphNamesOfClsInst :: ClsInst -> NameSet
orphNamesOfClsInst (ClsInst { is_cls_nm = cls_nm, is_tys = tys })
= orphNamesOfTypes tys `unionNameSet` unitNameSet cls_nm
instanceSig :: ClsInst -> ([TyVar], [Type], Class, [Type])
-- Decomposes the DFunId
instanceSig ispec = tcSplitDFunTy (idType (is_dfun ispec))
mkLocalInstance :: DFunId -> OverlapFlag
-> [TyVar] -> Class -> [Type]
-> ClsInst
-- Used for local instances, where we can safely pull on the DFunId.
-- Consider using newClsInst instead; this will also warn if
-- the instance is an orphan.
mkLocalInstance dfun oflag tvs cls tys
= ClsInst { is_flag = oflag, is_dfun = dfun
, is_tvs = tvs
, is_cls = cls, is_cls_nm = cls_name
, is_tys = tys, is_tcs = roughMatchTcs tys
, is_orphan = orph
}
where
cls_name = className cls
dfun_name = idName dfun
this_mod = ASSERT( isExternalName dfun_name ) nameModule dfun_name
is_local name = nameIsLocalOrFrom this_mod name
-- Compute orphanhood. See Note [Orphans] in InstEnv
(cls_tvs, fds) = classTvsFds cls
arg_names = [filterNameSet is_local (orphNamesOfType ty) | ty <- tys]
-- See Note [When exactly is an instance decl an orphan?]
orph | is_local cls_name = NotOrphan (nameOccName cls_name)
| all notOrphan mb_ns = ASSERT( not (null mb_ns) ) head mb_ns
| otherwise = IsOrphan
notOrphan NotOrphan{} = True
notOrphan _ = False
mb_ns :: [IsOrphan] -- One for each fundep; a locally-defined name
-- that is not in the "determined" arguments
mb_ns | null fds = [choose_one arg_names]
| otherwise = map do_one fds
do_one (_ltvs, rtvs) = choose_one [ns | (tv,ns) <- cls_tvs `zip` arg_names
, not (tv `elem` rtvs)]
choose_one nss = chooseOrphanAnchor (nameSetElems (unionNameSets nss))
mkImportedInstance :: Name
-> [Maybe Name]
-> DFunId
-> OverlapFlag
-> IsOrphan
-> ClsInst
-- Used for imported instances, where we get the rough-match stuff
-- from the interface file
-- The bound tyvars of the dfun are guaranteed fresh, because
-- the dfun has been typechecked out of the same interface file
mkImportedInstance cls_nm mb_tcs dfun oflag orphan
= ClsInst { is_flag = oflag, is_dfun = dfun
, is_tvs = tvs, is_tys = tys
, is_cls_nm = cls_nm, is_cls = cls, is_tcs = mb_tcs
, is_orphan = orphan }
where
(tvs, _, cls, tys) = tcSplitDFunTy (idType dfun)
{-
Note [When exactly is an instance decl an orphan?]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
(see MkIface.instanceToIfaceInst, which implements this)
Roughly speaking, an instance is an orphan if its head (after the =>)
mentions nothing defined in this module.
Functional dependencies complicate the situation though. Consider
module M where { class C a b | a -> b }
and suppose we are compiling module X:
module X where
import M
data T = ...
instance C Int T where ...
This instance is an orphan, because when compiling a third module Y we
might get a constraint (C Int v), and we'd want to improve v to T. So
we must make sure X's instances are loaded, even if we do not directly
use anything from X.
More precisely, an instance is an orphan iff
If there are no fundeps, then at least of the names in
the instance head is locally defined.
If there are fundeps, then for every fundep, at least one of the
names free in a *non-determined* part of the instance head is
defined in this module.
(Note that these conditions hold trivially if the class is locally
defined.)
************************************************************************
* *
InstEnv, ClsInstEnv
* *
************************************************************************
A @ClsInstEnv@ all the instances of that class. The @Id@ inside a
ClsInstEnv mapping is the dfun for that instance.
If class C maps to a list containing the item ([a,b], [t1,t2,t3], dfun), then
forall a b, C t1 t2 t3 can be constructed by dfun
or, to put it another way, we have
instance (...) => C t1 t2 t3, witnessed by dfun
-}
---------------------------------------------------
type InstEnv = UniqFM ClsInstEnv -- Maps Class to instances for that class
-- | 'InstEnvs' represents the combination of the global type class instance
-- environment, the local type class instance environment, and the set of
-- transitively reachable orphan modules (according to what modules have been
-- directly imported) used to test orphan instance visibility.
data InstEnvs = InstEnvs {
ie_global :: InstEnv, -- External-package instances
ie_local :: InstEnv, -- Home-package instances
ie_visible :: VisibleOrphanModules -- Set of all orphan modules transitively
-- reachable from the module being compiled
-- See Note [Instance lookup and orphan instances]
}
-- | Set of visible orphan modules, according to what modules have been directly
-- imported. This is based off of the dep_orphs field, which records
-- transitively reachable orphan modules (modules that define orphan instances).
type VisibleOrphanModules = ModuleSet
newtype ClsInstEnv
= ClsIE [ClsInst] -- The instances for a particular class, in any order
instance Outputable ClsInstEnv where
ppr (ClsIE is) = pprInstances is
-- INVARIANTS:
-- * The is_tvs are distinct in each ClsInst
-- of a ClsInstEnv (so we can safely unify them)
-- Thus, the @ClassInstEnv@ for @Eq@ might contain the following entry:
-- [a] ===> dfun_Eq_List :: forall a. Eq a => Eq [a]
-- The "a" in the pattern must be one of the forall'd variables in
-- the dfun type.
emptyInstEnv :: InstEnv
emptyInstEnv = emptyUFM
instEnvElts :: InstEnv -> [ClsInst]
instEnvElts ie = [elt | ClsIE elts <- eltsUFM ie, elt <- elts]
-- | Test if an instance is visible, by checking that its origin module
-- is in 'VisibleOrphanModules'.
-- See Note [Instance lookup and orphan instances]
instIsVisible :: VisibleOrphanModules -> ClsInst -> Bool
instIsVisible vis_mods ispec
-- NB: Instances from the interactive package always are visible. We can't
-- add interactive modules to the set since we keep creating new ones
-- as a GHCi session progresses.
| isInteractiveModule mod = True
| IsOrphan <- is_orphan ispec = mod `elemModuleSet` vis_mods
| otherwise = True
where
mod = nameModule (idName (is_dfun ispec))
classInstances :: InstEnvs -> Class -> [ClsInst]
classInstances (InstEnvs { ie_global = pkg_ie, ie_local = home_ie, ie_visible = vis_mods }) cls
= get home_ie ++ get pkg_ie
where
get env = case lookupUFM env cls of
Just (ClsIE insts) -> filter (instIsVisible vis_mods) insts
Nothing -> []
-- | Checks for an exact match of ClsInst in the instance environment.
-- We use this when we do signature checking in TcRnDriver
memberInstEnv :: InstEnv -> ClsInst -> Bool
memberInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm } ) =
maybe False (\(ClsIE items) -> any (identicalClsInstHead ins_item) items)
(lookupUFM inst_env cls_nm)
extendInstEnvList :: InstEnv -> [ClsInst] -> InstEnv
extendInstEnvList inst_env ispecs = foldl extendInstEnv inst_env ispecs
extendInstEnv :: InstEnv -> ClsInst -> InstEnv
extendInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm })
= addToUFM_C add inst_env cls_nm (ClsIE [ins_item])
where
add (ClsIE cur_insts) _ = ClsIE (ins_item : cur_insts)
deleteFromInstEnv :: InstEnv -> ClsInst -> InstEnv
deleteFromInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm })
= adjustUFM adjust inst_env cls_nm
where
adjust (ClsIE items) = ClsIE (filterOut (identicalClsInstHead ins_item) items)
identicalClsInstHead :: ClsInst -> ClsInst -> Bool
-- ^ True when when the instance heads are the same
-- e.g. both are Eq [(a,b)]
-- Used for overriding in GHCi
-- Obviously should be insenstive to alpha-renaming
identicalClsInstHead (ClsInst { is_cls_nm = cls_nm1, is_tcs = rough1, is_tys = tys1 })
(ClsInst { is_cls_nm = cls_nm2, is_tcs = rough2, is_tys = tys2 })
= cls_nm1 == cls_nm2
&& not (instanceCantMatch rough1 rough2) -- Fast check for no match, uses the "rough match" fields
&& isJust (tcMatchTys tys1 tys2)
&& isJust (tcMatchTys tys2 tys1)
{-
************************************************************************
* *
Looking up an instance
* *
************************************************************************
@lookupInstEnv@ looks up in a @InstEnv@, using a one-way match. Since
the env is kept ordered, the first match must be the only one. The
thing we are looking up can have an arbitrary "flexi" part.
Note [Instance lookup and orphan instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we are compiling a module M, and we have a zillion packages
loaded, and we are looking up an instance for C (T W). If we find a
match in module 'X' from package 'p', should be "in scope"; that is,
is p:X in the transitive closure of modules imported from M?
The difficulty is that the "zillion packages" might include ones loaded
through earlier invocations of the GHC API, or earlier module loads in GHCi.
They might not be in the dependencies of M itself; and if not, the instances
in them should not be visible. Trac #2182, #8427.
There are two cases:
* If the instance is *not an orphan*, then module X defines C, T, or W.
And in order for those types to be involved in typechecking M, it
must be that X is in the transitive closure of M's imports. So we
can use the instance.
* If the instance *is an orphan*, the above reasoning does not apply.
So we keep track of the set of orphan modules transitively below M;
this is the ie_visible field of InstEnvs, of type VisibleOrphanModules.
If module p:X is in this set, then we can use the instance, otherwise
we can't.
Note [Rules for instance lookup]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These functions implement the carefully-written rules in the user
manual section on "overlapping instances". At risk of duplication,
here are the rules. If the rules change, change this text and the
user manual simultaneously. The link may be this:
http://www.haskell.org/ghc/docs/latest/html/users_guide/type-class-extensions.html#instance-overlap
The willingness to be overlapped or incoherent is a property of the
instance declaration itself, controlled as follows:
* An instance is "incoherent"
if it has an INCOHERENT pragma, or
if it appears in a module compiled with -XIncoherentInstances.
* An instance is "overlappable"
if it has an OVERLAPPABLE or OVERLAPS pragma, or
if it appears in a module compiled with -XOverlappingInstances, or
if the instance is incoherent.
* An instance is "overlapping"
if it has an OVERLAPPING or OVERLAPS pragma, or
if it appears in a module compiled with -XOverlappingInstances, or
if the instance is incoherent.
compiled with -XOverlappingInstances.
Now suppose that, in some client module, we are searching for an instance
of the target constraint (C ty1 .. tyn). The search works like this.
* Find all instances I that match the target constraint; that is, the
target constraint is a substitution instance of I. These instance
declarations are the candidates.
* Find all non-candidate instances that unify with the target
constraint. Such non-candidates instances might match when the
target constraint is further instantiated. If all of them are
incoherent, proceed; if not, the search fails.
* Eliminate any candidate IX for which both of the following hold:
* There is another candidate IY that is strictly more specific;
that is, IY is a substitution instance of IX but not vice versa.
* Either IX is overlappable or IY is overlapping.
* If only one candidate remains, pick it. Otherwise if all remaining
candidates are incoherent, pick an arbitrary candidate. Otherwise fail.
Note [Overlapping instances] (NB: these notes are quite old)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Overlap is permitted, but only in such a way that one can make
a unique choice when looking up. That is, overlap is only permitted if
one template matches the other, or vice versa. So this is ok:
[a] [Int]
but this is not
(Int,a) (b,Int)
If overlap is permitted, the list is kept most specific first, so that
the first lookup is the right choice.
For now we just use association lists.
\subsection{Avoiding a problem with overlapping}
Consider this little program:
\begin{pseudocode}
class C a where c :: a
class C a => D a where d :: a
instance C Int where c = 17
instance D Int where d = 13
instance C a => C [a] where c = [c]
instance ({- C [a], -} D a) => D [a] where d = c
instance C [Int] where c = [37]
main = print (d :: [Int])
\end{pseudocode}
What do you think `main' prints (assuming we have overlapping instances, and
all that turned on)? Well, the instance for `D' at type `[a]' is defined to
be `c' at the same type, and we've got an instance of `C' at `[Int]', so the
answer is `[37]', right? (the generic `C [a]' instance shouldn't apply because
the `C [Int]' instance is more specific).
Ghc-4.04 gives `[37]', while ghc-4.06 gives `[17]', so 4.06 is wrong. That
was easy ;-) Let's just consult hugs for good measure. Wait - if I use old
hugs (pre-September99), I get `[17]', and stranger yet, if I use hugs98, it
doesn't even compile! What's going on!?
What hugs complains about is the `D [a]' instance decl.
\begin{pseudocode}
ERROR "mj.hs" (line 10): Cannot build superclass instance
*** Instance : D [a]
*** Context supplied : D a
*** Required superclass : C [a]
\end{pseudocode}
You might wonder what hugs is complaining about. It's saying that you
need to add `C [a]' to the context of the `D [a]' instance (as appears
in comments). But there's that `C [a]' instance decl one line above
that says that I can reduce the need for a `C [a]' instance to the
need for a `C a' instance, and in this case, I already have the
necessary `C a' instance (since we have `D a' explicitly in the
context, and `C' is a superclass of `D').
Unfortunately, the above reasoning indicates a premature commitment to the
generic `C [a]' instance. I.e., it prematurely rules out the more specific
instance `C [Int]'. This is the mistake that ghc-4.06 makes. The fix is to
add the context that hugs suggests (uncomment the `C [a]'), effectively
deferring the decision about which instance to use.
Now, interestingly enough, 4.04 has this same bug, but it's covered up
in this case by a little known `optimization' that was disabled in
4.06. Ghc-4.04 silently inserts any missing superclass context into
an instance declaration. In this case, it silently inserts the `C
[a]', and everything happens to work out.
(See `basicTypes/MkId:mkDictFunId' for the code in question. Search for
`Mark Jones', although Mark claims no credit for the `optimization' in
question, and would rather it stopped being called the `Mark Jones
optimization' ;-)
So, what's the fix? I think hugs has it right. Here's why. Let's try
something else out with ghc-4.04. Let's add the following line:
d' :: D a => [a]
d' = c
Everyone raise their hand who thinks that `d :: [Int]' should give a
different answer from `d' :: [Int]'. Well, in ghc-4.04, it does. The
`optimization' only applies to instance decls, not to regular
bindings, giving inconsistent behavior.
Old hugs had this same bug. Here's how we fixed it: like GHC, the
list of instances for a given class is ordered, so that more specific
instances come before more generic ones. For example, the instance
list for C might contain:
..., C Int, ..., C a, ...
When we go to look for a `C Int' instance we'll get that one first.
But what if we go looking for a `C b' (`b' is unconstrained)? We'll
pass the `C Int' instance, and keep going. But if `b' is
unconstrained, then we don't know yet if the more specific instance
will eventually apply. GHC keeps going, and matches on the generic `C
a'. The fix is to, at each step, check to see if there's a reverse
match, and if so, abort the search. This prevents hugs from
prematurely chosing a generic instance when a more specific one
exists.
--Jeff
v
BUT NOTE [Nov 2001]: we must actually *unify* not reverse-match in
this test. Suppose the instance envt had
..., forall a b. C a a b, ..., forall a b c. C a b c, ...
(still most specific first)
Now suppose we are looking for (C x y Int), where x and y are unconstrained.
C x y Int doesn't match the template {a,b} C a a b
but neither does
C a a b match the template {x,y} C x y Int
But still x and y might subsequently be unified so they *do* match.
Simple story: unify, don't match.
-}
type DFunInstType = Maybe Type
-- Just ty => Instantiate with this type
-- Nothing => Instantiate with any type of this tyvar's kind
-- See Note [DFunInstType: instantiating types]
type InstMatch = (ClsInst, [DFunInstType])
type ClsInstLookupResult
= ( [InstMatch] -- Successful matches
, [ClsInst] -- These don't match but do unify
, [InstMatch] ) -- Unsafe overlapped instances under Safe Haskell
-- (see Note [Safe Haskell Overlapping Instances] in
-- TcSimplify).
{-
Note [DFunInstType: instantiating types]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A successful match is a ClsInst, together with the types at which
the dfun_id in the ClsInst should be instantiated
The instantiating types are (Either TyVar Type)s because the dfun
might have some tyvars that *only* appear in arguments
dfun :: forall a b. C a b, Ord b => D [a]
When we match this against D [ty], we return the instantiating types
[Just ty, Nothing]
where the 'Nothing' indicates that 'b' can be freely instantiated.
(The caller instantiates it to a flexi type variable, which will
presumably later become fixed via functional dependencies.)
-}
-- |Look up an instance in the given instance environment. The given class application must match exactly
-- one instance and the match may not contain any flexi type variables. If the lookup is unsuccessful,
-- yield 'Left errorMessage'.
lookupUniqueInstEnv :: InstEnvs
-> Class -> [Type]
-> Either MsgDoc (ClsInst, [Type])
lookupUniqueInstEnv instEnv cls tys
= case lookupInstEnv False instEnv cls tys of
([(inst, inst_tys)], _, _)
| noFlexiVar -> Right (inst, inst_tys')
| otherwise -> Left $ text "flexible type variable:" <+>
(ppr $ mkTyConApp (classTyCon cls) tys)
where
inst_tys' = [ty | Just ty <- inst_tys]
noFlexiVar = all isJust inst_tys
_other -> Left $ text "instance not found" <+>
(ppr $ mkTyConApp (classTyCon cls) tys)
lookupInstEnv' :: InstEnv -- InstEnv to look in
-> VisibleOrphanModules -- But filter against this
-> Class -> [Type] -- What we are looking for
-> ([InstMatch], -- Successful matches
[ClsInst]) -- These don't match but do unify
-- The second component of the result pair happens when we look up
-- Foo [a]
-- in an InstEnv that has entries for
-- Foo [Int]
-- Foo [b]
-- Then which we choose would depend on the way in which 'a'
-- is instantiated. So we report that Foo [b] is a match (mapping b->a)
-- but Foo [Int] is a unifier. This gives the caller a better chance of
-- giving a suitable error message
lookupInstEnv' ie vis_mods cls tys
= lookup ie
where
rough_tcs = roughMatchTcs tys
all_tvs = all isNothing rough_tcs
--------------
lookup env = case lookupUFM env cls of
Nothing -> ([],[]) -- No instances for this class
Just (ClsIE insts) -> find [] [] insts
--------------
find ms us [] = (ms, us)
find ms us (item@(ClsInst { is_tcs = mb_tcs, is_tvs = tpl_tvs
, is_tys = tpl_tys, is_flag = oflag }) : rest)
| not (instIsVisible vis_mods item)
= find ms us rest -- See Note [Instance lookup and orphan instances]
-- Fast check for no match, uses the "rough match" fields
| instanceCantMatch rough_tcs mb_tcs
= find ms us rest
| Just subst <- tcMatchTys tpl_tys tys
= find ((item, map (lookupTyVar subst) tpl_tvs) : ms) us rest
-- Does not match, so next check whether the things unify
-- See Note [Overlapping instances] and Note [Incoherent instances]
| Incoherent _ <- overlapMode oflag
= find ms us rest
| otherwise
= ASSERT2( tyCoVarsOfTypes tys `disjointVarSet` tpl_tv_set,
(ppr cls <+> ppr tys <+> ppr all_tvs) $$
(ppr tpl_tvs <+> ppr tpl_tys)
)
-- Unification will break badly if the variables overlap
-- They shouldn't because we allocate separate uniques for them
-- See Note [Template tyvars are fresh]
case tcUnifyTys instanceBindFun tpl_tys tys of
Just _ -> find ms (item:us) rest
Nothing -> find ms us rest
where
tpl_tv_set = mkVarSet tpl_tvs
---------------
-- This is the common way to call this function.
lookupInstEnv :: Bool -- Check Safe Haskell overlap restrictions
-> InstEnvs -- External and home package inst-env
-> Class -> [Type] -- What we are looking for
-> ClsInstLookupResult
-- ^ See Note [Rules for instance lookup]
-- ^ See Note [Safe Haskell Overlapping Instances] in TcSimplify
-- ^ See Note [Safe Haskell Overlapping Instances Implementation] in TcSimplify
lookupInstEnv check_overlap_safe
(InstEnvs { ie_global = pkg_ie
, ie_local = home_ie
, ie_visible = vis_mods })
cls
tys
= -- pprTrace "lookupInstEnv" (ppr cls <+> ppr tys $$ ppr home_ie) $
(final_matches, final_unifs, unsafe_overlapped)
where
(home_matches, home_unifs) = lookupInstEnv' home_ie vis_mods cls tys
(pkg_matches, pkg_unifs) = lookupInstEnv' pkg_ie vis_mods cls tys
all_matches = home_matches ++ pkg_matches
all_unifs = home_unifs ++ pkg_unifs
final_matches = foldr insert_overlapping [] all_matches
-- Even if the unifs is non-empty (an error situation)
-- we still prune the matches, so that the error message isn't
-- misleading (complaining of multiple matches when some should be
-- overlapped away)
unsafe_overlapped
= case final_matches of
[match] -> check_safe match
_ -> []
-- If the selected match is incoherent, discard all unifiers
final_unifs = case final_matches of
(m:_) | is_incoherent m -> []
_ -> all_unifs
-- NOTE [Safe Haskell isSafeOverlap]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- We restrict code compiled in 'Safe' mode from overriding code
-- compiled in any other mode. The rationale is that code compiled
-- in 'Safe' mode is code that is untrusted by the ghc user. So
-- we shouldn't let that code change the behaviour of code the
-- user didn't compile in 'Safe' mode since that's the code they
-- trust. So 'Safe' instances can only overlap instances from the
-- same module. A same instance origin policy for safe compiled
-- instances.
check_safe (inst,_)
= case check_overlap_safe && unsafeTopInstance inst of
-- make sure it only overlaps instances from the same module
True -> go [] all_matches
-- most specific is from a trusted location.
False -> []
where
go bad [] = bad
go bad (i@(x,_):unchecked) =
if inSameMod x || isOverlappable x
then go bad unchecked
else go (i:bad) unchecked
inSameMod b =
let na = getName $ getName inst
la = isInternalName na
nb = getName $ getName b
lb = isInternalName nb
in (la && lb) || (nameModule na == nameModule nb)
isOverlappable i = hasOverlappableFlag $ overlapMode $ is_flag i
-- We consider the most specific instance unsafe when it both:
-- (1) Comes from a module compiled as `Safe`
-- (2) Is an orphan instance, OR, an instance for a MPTC
unsafeTopInstance inst = isSafeOverlap (is_flag inst) &&
(isOrphan (is_orphan inst) || classArity (is_cls inst) > 1)
---------------
is_incoherent :: InstMatch -> Bool
is_incoherent (inst, _) = case overlapMode (is_flag inst) of
Incoherent _ -> True
_ -> False
---------------
insert_overlapping :: InstMatch -> [InstMatch] -> [InstMatch]
-- ^ Add a new solution, knocking out strictly less specific ones
-- See Note [Rules for instance lookup]
insert_overlapping new_item [] = [new_item]
insert_overlapping new_item (old_item : old_items)
| new_beats_old -- New strictly overrides old
, not old_beats_new
, new_item `can_override` old_item
= insert_overlapping new_item old_items
| old_beats_new -- Old strictly overrides new
, not new_beats_old
, old_item `can_override` new_item
= old_item : old_items
-- Discard incoherent instances; see Note [Incoherent instances]
| is_incoherent old_item -- Old is incoherent; discard it
= insert_overlapping new_item old_items
| is_incoherent new_item -- New is incoherent; discard it
= old_item : old_items
-- Equal or incomparable, and neither is incoherent; keep both
| otherwise
= old_item : insert_overlapping new_item old_items
where
new_beats_old = new_item `more_specific_than` old_item
old_beats_new = old_item `more_specific_than` new_item
-- `instB` can be instantiated to match `instA`
-- or the two are equal
(instA,_) `more_specific_than` (instB,_)
= isJust (tcMatchTys (is_tys instB) (is_tys instA))
(instA, _) `can_override` (instB, _)
= hasOverlappingFlag (overlapMode (is_flag instA))
|| hasOverlappableFlag (overlapMode (is_flag instB))
-- Overlap permitted if either the more specific instance
-- is marked as overlapping, or the more general one is
-- marked as overlappable.
-- Latest change described in: Trac #9242.
-- Previous change: Trac #3877, Dec 10.
{-
Note [Incoherent instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
For some classes, the choice of a particular instance does not matter, any one
is good. E.g. consider
class D a b where { opD :: a -> b -> String }
instance D Int b where ...
instance D a Int where ...
g (x::Int) = opD x x -- Wanted: D Int Int
For such classes this should work (without having to add an "instance D Int
Int", and using -XOverlappingInstances, which would then work). This is what
-XIncoherentInstances is for: Telling GHC "I don't care which instance you use;
if you can use one, use it."
Should this logic only work when *all* candidates have the incoherent flag, or
even when all but one have it? The right choice is the latter, which can be
justified by comparing the behaviour with how -XIncoherentInstances worked when
it was only about the unify-check (note [Overlapping instances]):
Example:
class C a b c where foo :: (a,b,c)
instance C [a] b Int
instance [incoherent] [Int] b c
instance [incoherent] C a Int c
Thanks to the incoherent flags,
[Wanted] C [a] b Int
works: Only instance one matches, the others just unify, but are marked
incoherent.
So I can write
(foo :: ([a],b,Int)) :: ([Int], Int, Int).
but if that works then I really want to be able to write
foo :: ([Int], Int, Int)
as well. Now all three instances from above match. None is more specific than
another, so none is ruled out by the normal overlapping rules. One of them is
not incoherent, but we still want this to compile. Hence the
"all-but-one-logic".
The implementation is in insert_overlapping, where we remove matching
incoherent instances as long as there are others.
************************************************************************
* *
Binding decisions
* *
************************************************************************
-}
instanceBindFun :: TyCoVar -> BindFlag
instanceBindFun tv | isTcTyVar tv && isOverlappableTyVar tv = Skolem
| otherwise = BindMe
-- Note [Binding when looking up instances]
{-
Note [Binding when looking up instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When looking up in the instance environment, or family-instance environment,
we are careful about multiple matches, as described above in
Note [Overlapping instances]
The key_tys can contain skolem constants, and we can guarantee that those
are never going to be instantiated to anything, so we should not involve
them in the unification test. Example:
class Foo a where { op :: a -> Int }
instance Foo a => Foo [a] -- NB overlap
instance Foo [Int] -- NB overlap
data T = forall a. Foo a => MkT a
f :: T -> Int
f (MkT x) = op [x,x]
The op [x,x] means we need (Foo [a]). Without the filterVarSet we'd
complain, saying that the choice of instance depended on the instantiation
of 'a'; but of course it isn't *going* to be instantiated.
We do this only for isOverlappableTyVar skolems. For example we reject
g :: forall a => [a] -> Int
g x = op x
on the grounds that the correct instance depends on the instantiation of 'a'
-}
|
tjakway/ghcjvm
|
compiler/types/InstEnv.hs
|
bsd-3-clause
| 38,001
| 0
| 14
| 9,781
| 3,914
| 2,167
| 1,747
| -1
| -1
|
module D1(sumFun) where
{- generalise function 'f' on the sub-expression '(y+1)' with a new
parameter 'z'. This refactoring only affects the current module-}
y=0
f z x =x + z
sumFun xs = sum $ map (f (y + 1)) xs
|
kmate/HaRe
|
old/testing/generaliseDef/D1_TokOut.hs
|
bsd-3-clause
| 222
| 0
| 10
| 51
| 59
| 32
| 27
| 4
| 1
|
-----------------------------------------------------------------------------
-- |
-- Module : Hadrian.Builder.Sphinx
-- Copyright : (c) Andrey Mokhov 2014-2017
-- License : MIT (see the file LICENSE)
-- Maintainer : andrey.mokhov@gmail.com
-- Stability : experimental
--
-- Support for invoking the documentation utility Sphinx.
-----------------------------------------------------------------------------
module Hadrian.Builder.Sphinx (SphinxMode (..), args) where
import Development.Shake
import Development.Shake.Classes
import GHC.Generics
import Hadrian.Expression
import Hadrian.Utilities
-- | Sphinx can be used in three different modes to convert reStructuredText
-- documents into HTML, LaTeX or Man pages.
data SphinxMode = Html | Latex | Man deriving (Eq, Generic, Show)
instance Binary SphinxMode
instance Hashable SphinxMode
instance NFData SphinxMode
-- | Default command line arguments for invoking the archiving utility @tar@.
args :: (ShakeValue c, ShakeValue b) => SphinxMode -> Args c b
args mode = do
outPath <- getOutput
mconcat [ arg "-b", arg modeString
, arg "-d", arg $ outPath -/- (".doctrees-" ++ modeString)
, arg =<< getInput
, arg outPath ]
where
modeString = case mode of
Html -> "html"
Latex -> "latex"
Man -> "man"
|
bgamari/shaking-up-ghc
|
src/Hadrian/Builder/Sphinx.hs
|
bsd-3-clause
| 1,344
| 0
| 11
| 262
| 235
| 132
| 103
| 21
| 3
|
{- Tests let-expressions in do-statments -}
module Main( main ) where
foo = do
putStr "a"
let x = "b" in putStr x
putStr "c"
main = do
putStr "a"
foo
let x = "b" in putStrLn x
|
snoyberg/ghc
|
testsuite/tests/deSugar/should_run/dsrun002.hs
|
bsd-3-clause
| 231
| 0
| 10
| 93
| 75
| 34
| 41
| 9
| 1
|
module Compose where
data ST s a = ST {runState :: s -> (a,s)}
{-@ data ST s a <p :: s -> Prop, q :: s -> s -> Prop, r :: s -> a -> Prop>
= ST (runState :: x:s<p> -> (a<r x>, s<q x>)) @-}
{-@ runState :: forall <p :: s -> Prop, q :: s -> s -> Prop, r :: s -> a -> Prop>. ST <p, q, r> s a -> x:s<p> -> (a<r x>, s<q x>) @-}
{-
cmp :: forall < pref :: s -> Prop, postf :: s -> s -> Prop
, pre :: s -> Prop, postg :: s -> s -> Prop
, post :: s -> s -> Prop
, rg :: s -> a -> Prop
, rf :: s -> b -> Prop
, r :: s -> b -> Prop
>.
{xx:s<pre> -> w:s<postg xx> -> s<postf w> -> s<post xx>}
{ww:s<pre> -> s<postg ww> -> s<pref>}
(ST <pre, postg, rg> s a)
-> (ST <pref, postf, rf> s b)
-> (ST <pre, post, r> s b)
@-}
cmp :: (ST s a)
-> (ST s b)
-> (ST s b)
cmp (ST g) (ST f) = ST (\x -> case g x of {(_, s) -> f s})
{-@
bind :: forall < pref :: s -> Prop, postf :: s -> s -> Prop
, pre :: s -> Prop, postg :: s -> s -> Prop
, post :: s -> s -> Prop
, rg :: s -> a -> Prop
, rf :: s -> b -> Prop
, r :: s -> b -> Prop
, pref0 :: a -> Prop
>.
{x:s<pre> -> a<rg x> -> a<pref0>}
{x:s<pre> -> y:s<postg x> -> b<rf y> -> b<r x>}
{xx:s<pre> -> w:s<postg xx> -> s<postf w> -> s<post xx>}
{ww:s<pre> -> s<postg ww> -> s<pref>}
(ST <pre, postg, rg> s a)
-> (a<pref0> -> ST <pref, postf, rf> s b)
-> (ST <pre, post, r> s b)
@-}
bind :: (ST s a)
-> (a -> ST s b)
-> (ST s b)
bind (ST g) f = ST (\x -> case g x of {(y, s) -> (runState (f y)) s})
{-@ unit :: forall s a <p :: s -> Prop>. x:a -> ST <p, {\s v -> v == s}, {\s v -> x == v}> s a @-}
unit :: a -> ST s a
unit x = ST $ \s -> (x, s)
{-@ incr :: ST <{\x -> x >= 0}, {\x v -> v = x + 1}, {\x v -> v = x}> Nat Nat @-}
incr :: ST Int Int
incr = ST $ \x -> (x, x + 1)
{-@ incr' :: ST <{\x -> x >= 0}, {\x v -> v = x + 1}, {\w vw -> vw = w}> Nat Nat @-}
incr' :: ST Int Int
incr' = bind incr (\x -> unit x)
|
mightymoose/liquidhaskell
|
tests/todo/StateConstraints.hs
|
bsd-3-clause
| 2,203
| 0
| 15
| 868
| 338
| 185
| 153
| 16
| 1
|
{-# LANGUAGE ScopedTypeVariables #-}
module Fixme () where
import Language.Haskell.Liquid.Prelude (liquidAssert)
{-@ invariant {v:Int | v >= 0} @-}
{-@ Decrease go 1 @-}
loop :: Int -> Int -> a -> (Int -> a -> a) -> a
loop lo hi base f = go (hi-lo) base lo
where
go (d::Int) acc i
| i /= hi = go (d-1) (f i acc) (i + 1)
| otherwise = acc
poo = loop 0 10 0 (+)
|
mightymoose/liquidhaskell
|
tests/pos/rec_annot_go.hs
|
bsd-3-clause
| 392
| 0
| 11
| 110
| 167
| 90
| 77
| 9
| 1
|
{-# LANGUAGE MagicHash, NoImplicitPrelude, BangPatterns #-}
-----------------------------------------------------------------------------
-- |
-- Module : GHC.CString
-- Copyright : (c) The University of Glasgow 2011
-- License : see libraries/ghc-prim/LICENSE
--
-- Maintainer : cvs-ghc@haskell.org
-- Stability : internal
-- Portability : non-portable (GHC Extensions)
--
-- GHC C strings definitions (previously in GHC.Base).
-- Use GHC.Exts from the base package instead of importing this
-- module directly.
--
-----------------------------------------------------------------------------
module GHC.CString (
unpackCString#, unpackAppendCString#, unpackFoldrCString#,
unpackCStringUtf8#, unpackNBytes#
) where
import GHC.Types
import GHC.Prim
-----------------------------------------------------------------------------
-- Unpacking C strings
-----------------------------------------------------------------------------
-- This code is needed for virtually all programs, since it's used for
-- unpacking the strings of error messages.
-- Used to be in GHC.Base, but was moved to ghc-prim because the new generics
-- stuff uses Strings in the representation, so to give representations for
-- ghc-prim types we need unpackCString#
{-
Note [Inlining unpackCString#]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There's really no point in ever inlining things like unpackCString# as the loop
doesn't specialise in an interesting way and we can't deforest the list
constructors (we'd want to use unpackFoldrCString# for this). Moreover, it's
pretty small, so there's a danger that it'll be inlined at every literal, which
is a waste.
Moreover, inlining early may interfere with a variety of rules that are supposed
to match unpackCString#,
* BuiltInRules in PrelRules.hs; e.g.
eqString (unpackCString# (Lit s1)) (unpackCString# (Lit s2)
= s1 == s2
* unpacking rules; e.g. in GHC.Base,
unpackCString# a
= build (unpackFoldrCString# a)
* stream fusion rules; e.g. in the `text` library,
unstream (S.map safe (S.streamList (GHC.unpackCString# a)))
= unpackCString# a
-}
unpackCString# :: Addr# -> [Char]
{-# NOINLINE unpackCString# #-}
unpackCString# addr
= unpack 0#
where
unpack nh
| isTrue# (ch `eqChar#` '\0'#) = []
| True = C# ch : unpack (nh +# 1#)
where
!ch = indexCharOffAddr# addr nh
unpackAppendCString# :: Addr# -> [Char] -> [Char]
{-# NOINLINE unpackAppendCString# #-}
-- See the NOINLINE note on unpackCString#
unpackAppendCString# addr rest
= unpack 0#
where
unpack nh
| isTrue# (ch `eqChar#` '\0'#) = rest
| True = C# ch : unpack (nh +# 1#)
where
!ch = indexCharOffAddr# addr nh
unpackFoldrCString# :: Addr# -> (Char -> a -> a) -> a -> a
-- Usually the unpack-list rule turns unpackFoldrCString# into unpackCString#
-- It also has a BuiltInRule in PrelRules.hs:
-- unpackFoldrCString# "foo" c (unpackFoldrCString# "baz" c n)
-- = unpackFoldrCString# "foobaz" c n
{-# NOINLINE unpackFoldrCString# #-}
-- At one stage I had NOINLINE [0] on the grounds that, unlike
-- unpackCString#, there *is* some point in inlining
-- unpackFoldrCString#, because we get better code for the
-- higher-order function call. BUT there may be a lot of
-- literal strings, and making a separate 'unpack' loop for
-- each is highly gratuitous. See nofib/real/anna/PrettyPrint.
unpackFoldrCString# addr f z
= unpack 0#
where
unpack nh
| isTrue# (ch `eqChar#` '\0'#) = z
| True = C# ch `f` unpack (nh +# 1#)
where
!ch = indexCharOffAddr# addr nh
-- There's really no point in inlining this for the same reasons as
-- unpackCString. See Note [Inlining unpackCString#] above for details.
unpackCStringUtf8# :: Addr# -> [Char]
{-# NOINLINE unpackCStringUtf8# #-}
unpackCStringUtf8# addr
= unpack 0#
where
unpack nh
| isTrue# (ch `eqChar#` '\0'# ) = []
| isTrue# (ch `leChar#` '\x7F'#) = C# ch : unpack (nh +# 1#)
| isTrue# (ch `leChar#` '\xDF'#) =
C# (chr# (((ord# ch -# 0xC0#) `uncheckedIShiftL#` 6#) +#
(ord# (indexCharOffAddr# addr (nh +# 1#)) -# 0x80#))) :
unpack (nh +# 2#)
| isTrue# (ch `leChar#` '\xEF'#) =
C# (chr# (((ord# ch -# 0xE0#) `uncheckedIShiftL#` 12#) +#
((ord# (indexCharOffAddr# addr (nh +# 1#)) -# 0x80#) `uncheckedIShiftL#` 6#) +#
(ord# (indexCharOffAddr# addr (nh +# 2#)) -# 0x80#))) :
unpack (nh +# 3#)
| True =
C# (chr# (((ord# ch -# 0xF0#) `uncheckedIShiftL#` 18#) +#
((ord# (indexCharOffAddr# addr (nh +# 1#)) -# 0x80#) `uncheckedIShiftL#` 12#) +#
((ord# (indexCharOffAddr# addr (nh +# 2#)) -# 0x80#) `uncheckedIShiftL#` 6#) +#
(ord# (indexCharOffAddr# addr (nh +# 3#)) -# 0x80#))) :
unpack (nh +# 4#)
where
!ch = indexCharOffAddr# addr nh
-- There's really no point in inlining this for the same reasons as
-- unpackCString. See Note [Inlining unpackCString#] above for details.
unpackNBytes# :: Addr# -> Int# -> [Char]
{-# NOINLINE unpackNBytes# #-}
unpackNBytes# _addr 0# = []
unpackNBytes# addr len# = unpack [] (len# -# 1#)
where
unpack acc i#
| isTrue# (i# <# 0#) = acc
| True =
case indexCharOffAddr# addr i# of
ch -> unpack (C# ch : acc) (i# -# 1#)
|
olsner/ghc
|
libraries/ghc-prim/GHC/CString.hs
|
bsd-3-clause
| 5,692
| 0
| 24
| 1,479
| 1,002
| 536
| 466
| 62
| 1
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="el-GR">
<title>Online Menu | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Ευρετήριο</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Αναζήτηση</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
ccgreen13/zap-extensions
|
src/org/zaproxy/zap/extension/onlineMenu/resources/help_el_GR/helpset_el_GR.hs
|
apache-2.0
| 998
| 89
| 29
| 160
| 432
| 229
| 203
| -1
| -1
|
{-# LANGUAGE CPP #-}
-------------------------------------------------------------------------------
--
-- | Command-line parser
--
-- This is an abstract command-line parser used by both StaticFlags and
-- DynFlags.
--
-- (c) The University of Glasgow 2005
--
-------------------------------------------------------------------------------
module CmdLineParser
(
processArgs, OptKind(..), GhcFlagMode(..),
CmdLineP(..), getCmdLineState, putCmdLineState,
Flag(..), defFlag, defGhcFlag, defGhciFlag, defHiddenFlag,
errorsToGhcException,
EwM, runEwM, addErr, addWarn, getArg, getCurLoc, liftEwM, deprecate
) where
#include "HsVersions.h"
import Util
import Outputable
import Panic
import Bag
import SrcLoc
import Data.Function
import Data.List
import Control.Monad (liftM, ap)
#if __GLASGOW_HASKELL__ < 709
import Control.Applicative (Applicative(..))
#endif
--------------------------------------------------------
-- The Flag and OptKind types
--------------------------------------------------------
data Flag m = Flag
{ flagName :: String, -- Flag, without the leading "-"
flagOptKind :: OptKind m, -- What to do if we see it
flagGhcMode :: GhcFlagMode -- Which modes this flag affects
}
defFlag :: String -> OptKind m -> Flag m
defFlag name optKind = Flag name optKind AllModes
defGhcFlag :: String -> OptKind m -> Flag m
defGhcFlag name optKind = Flag name optKind OnlyGhc
defGhciFlag :: String -> OptKind m -> Flag m
defGhciFlag name optKind = Flag name optKind OnlyGhci
defHiddenFlag :: String -> OptKind m -> Flag m
defHiddenFlag name optKind = Flag name optKind HiddenFlag
-- | GHC flag modes describing when a flag has an effect.
data GhcFlagMode
= OnlyGhc -- ^ The flag only affects the non-interactive GHC
| OnlyGhci -- ^ The flag only affects the interactive GHC
| AllModes -- ^ The flag affects multiple ghc modes
| HiddenFlag -- ^ This flag should not be seen in cli completion
data OptKind m -- Suppose the flag is -f
= NoArg (EwM m ()) -- -f all by itself
| HasArg (String -> EwM m ()) -- -farg or -f arg
| SepArg (String -> EwM m ()) -- -f arg
| Prefix (String -> EwM m ()) -- -farg
| OptPrefix (String -> EwM m ()) -- -f or -farg (i.e. the arg is optional)
| OptIntSuffix (Maybe Int -> EwM m ()) -- -f or -f=n; pass n to fn
| IntSuffix (Int -> EwM m ()) -- -f or -f=n; pass n to fn
| FloatSuffix (Float -> EwM m ()) -- -f or -f=n; pass n to fn
| PassFlag (String -> EwM m ()) -- -f; pass "-f" fn
| AnySuffix (String -> EwM m ()) -- -f or -farg; pass entire "-farg" to fn
| PrefixPred (String -> Bool) (String -> EwM m ())
| AnySuffixPred (String -> Bool) (String -> EwM m ())
--------------------------------------------------------
-- The EwM monad
--------------------------------------------------------
type Err = Located String
type Warn = Located String
type Errs = Bag Err
type Warns = Bag Warn
-- EwM ("errors and warnings monad") is a monad
-- transformer for m that adds an (err, warn) state
newtype EwM m a = EwM { unEwM :: Located String -- Current parse arg
-> Errs -> Warns
-> m (Errs, Warns, a) }
instance Monad m => Functor (EwM m) where
fmap = liftM
instance Monad m => Applicative (EwM m) where
pure = return
(<*>) = ap
instance Monad m => Monad (EwM m) where
(EwM f) >>= k = EwM (\l e w -> do (e', w', r) <- f l e w
unEwM (k r) l e' w')
return v = EwM (\_ e w -> return (e, w, v))
runEwM :: EwM m a -> m (Errs, Warns, a)
runEwM action = unEwM action (panic "processArgs: no arg yet") emptyBag emptyBag
setArg :: Located String -> EwM m () -> EwM m ()
setArg l (EwM f) = EwM (\_ es ws -> f l es ws)
addErr :: Monad m => String -> EwM m ()
addErr e = EwM (\(L loc _) es ws -> return (es `snocBag` L loc e, ws, ()))
addWarn :: Monad m => String -> EwM m ()
addWarn msg = EwM (\(L loc _) es ws -> return (es, ws `snocBag` L loc msg, ()))
deprecate :: Monad m => String -> EwM m ()
deprecate s = do
arg <- getArg
addWarn (arg ++ " is deprecated: " ++ s)
getArg :: Monad m => EwM m String
getArg = EwM (\(L _ arg) es ws -> return (es, ws, arg))
getCurLoc :: Monad m => EwM m SrcSpan
getCurLoc = EwM (\(L loc _) es ws -> return (es, ws, loc))
liftEwM :: Monad m => m a -> EwM m a
liftEwM action = EwM (\_ es ws -> do { r <- action; return (es, ws, r) })
--------------------------------------------------------
-- A state monad for use in the command-line parser
--------------------------------------------------------
-- (CmdLineP s) typically instantiates the 'm' in (EwM m) and (OptKind m)
newtype CmdLineP s a = CmdLineP { runCmdLine :: s -> (a, s) }
instance Functor (CmdLineP s) where
fmap = liftM
instance Applicative (CmdLineP s) where
pure = return
(<*>) = ap
instance Monad (CmdLineP s) where
m >>= k = CmdLineP $ \s ->
let (a, s') = runCmdLine m s
in runCmdLine (k a) s'
return a = CmdLineP $ \s -> (a, s)
getCmdLineState :: CmdLineP s s
getCmdLineState = CmdLineP $ \s -> (s,s)
putCmdLineState :: s -> CmdLineP s ()
putCmdLineState s = CmdLineP $ \_ -> ((),s)
--------------------------------------------------------
-- Processing arguments
--------------------------------------------------------
processArgs :: Monad m
=> [Flag m] -- cmdline parser spec
-> [Located String] -- args
-> m ( [Located String], -- spare args
[Located String], -- errors
[Located String] ) -- warnings
processArgs spec args = do
(errs, warns, spare) <- runEwM action
return (spare, bagToList errs, bagToList warns)
where
action = process args []
-- process :: [Located String] -> [Located String] -> EwM m [Located String]
process [] spare = return (reverse spare)
process (locArg@(L _ ('-' : arg)) : args) spare =
case findArg spec arg of
Just (rest, opt_kind) ->
case processOneArg opt_kind rest arg args of
Left err ->
let b = process args spare
in (setArg locArg $ addErr err) >> b
Right (action,rest) ->
let b = process rest spare
in (setArg locArg $ action) >> b
Nothing -> process args (locArg : spare)
process (arg : args) spare = process args (arg : spare)
processOneArg :: OptKind m -> String -> String -> [Located String]
-> Either String (EwM m (), [Located String])
processOneArg opt_kind rest arg args
= let dash_arg = '-' : arg
rest_no_eq = dropEq rest
in case opt_kind of
NoArg a -> ASSERT(null rest) Right (a, args)
HasArg f | notNull rest_no_eq -> Right (f rest_no_eq, args)
| otherwise -> case args of
[] -> missingArgErr dash_arg
(L _ arg1:args1) -> Right (f arg1, args1)
-- See Trac #9776
SepArg f -> case args of
[] -> missingArgErr dash_arg
(L _ arg1:args1) -> Right (f arg1, args1)
Prefix f | notNull rest_no_eq -> Right (f rest_no_eq, args)
| otherwise -> unknownFlagErr dash_arg
PrefixPred _ f | notNull rest_no_eq -> Right (f rest_no_eq, args)
| otherwise -> unknownFlagErr dash_arg
PassFlag f | notNull rest -> unknownFlagErr dash_arg
| otherwise -> Right (f dash_arg, args)
OptIntSuffix f | null rest -> Right (f Nothing, args)
| Just n <- parseInt rest_no_eq -> Right (f (Just n), args)
| otherwise -> Left ("malformed integer argument in " ++ dash_arg)
IntSuffix f | Just n <- parseInt rest_no_eq -> Right (f n, args)
| otherwise -> Left ("malformed integer argument in " ++ dash_arg)
FloatSuffix f | Just n <- parseFloat rest_no_eq -> Right (f n, args)
| otherwise -> Left ("malformed float argument in " ++ dash_arg)
OptPrefix f -> Right (f rest_no_eq, args)
AnySuffix f -> Right (f dash_arg, args)
AnySuffixPred _ f -> Right (f dash_arg, args)
findArg :: [Flag m] -> String -> Maybe (String, OptKind m)
findArg spec arg =
case sortBy (compare `on` (length . fst)) -- prefer longest matching flag
[ (removeSpaces rest, optKind)
| flag <- spec,
let optKind = flagOptKind flag,
Just rest <- [stripPrefix (flagName flag) arg],
arg_ok optKind rest arg ]
of
[] -> Nothing
(one:_) -> Just one
arg_ok :: OptKind t -> [Char] -> String -> Bool
arg_ok (NoArg _) rest _ = null rest
arg_ok (HasArg _) _ _ = True
arg_ok (SepArg _) rest _ = null rest
arg_ok (Prefix _) rest _ = notNull rest
arg_ok (PrefixPred p _) rest _ = notNull rest && p (dropEq rest)
arg_ok (OptIntSuffix _) _ _ = True
arg_ok (IntSuffix _) _ _ = True
arg_ok (FloatSuffix _) _ _ = True
arg_ok (OptPrefix _) _ _ = True
arg_ok (PassFlag _) rest _ = null rest
arg_ok (AnySuffix _) _ _ = True
arg_ok (AnySuffixPred p _) _ arg = p arg
-- | Parse an Int
--
-- Looks for "433" or "=342", with no trailing gubbins
-- * n or =n => Just n
-- * gibberish => Nothing
parseInt :: String -> Maybe Int
parseInt s = case reads s of
((n,""):_) -> Just n
_ -> Nothing
parseFloat :: String -> Maybe Float
parseFloat s = case reads s of
((n,""):_) -> Just n
_ -> Nothing
-- | Discards a leading equals sign
dropEq :: String -> String
dropEq ('=' : s) = s
dropEq s = s
unknownFlagErr :: String -> Either String a
unknownFlagErr f = Left ("unrecognised flag: " ++ f)
missingArgErr :: String -> Either String a
missingArgErr f = Left ("missing argument for flag: " ++ f)
--------------------------------------------------------
-- Utils
--------------------------------------------------------
-- See Note [Handling errors when parsing flags]
errorsToGhcException :: [(String, -- Location
String)] -- Error
-> GhcException
errorsToGhcException errs =
UsageError $ intercalate "\n" $ [ l ++ ": " ++ e | (l, e) <- errs ]
{- Note [Handling errors when parsing commandline flags]
Parsing of static and mode flags happens before any session is started, i.e.,
before the first call to 'GHC.withGhc'. Therefore, to report errors for
invalid usage of these two types of flags, we can not call any function that
needs DynFlags, as there are no DynFlags available yet (unsafeGlobalDynFlags
is not set either). So we always print "on the commandline" as the location,
which is true except for Api users, which is probably ok.
When reporting errors for invalid usage of dynamic flags we /can/ make use of
DynFlags, and we do so explicitly in DynFlags.parseDynamicFlagsFull.
Before, we called unsafeGlobalDynFlags when an invalid (combination of)
flag(s) was given on the commandline, resulting in panics (#9963).
-}
|
TomMD/ghc
|
compiler/main/CmdLineParser.hs
|
bsd-3-clause
| 11,623
| 0
| 18
| 3,471
| 3,365
| 1,750
| 1,615
| 192
| 14
|
{-# LANGUAGE TypeFamilyDependencies, Safe, PolyKinds #-}
{-# LANGUAGE TypeFamilies, TypeOperators, DataKinds, NoImplicitPrelude #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.Type.Bool
-- License : BSD-style (see the LICENSE file in the distribution)
--
-- Maintainer : libraries@haskell.org
-- Stability : experimental
-- Portability : not portable
--
-- Basic operations on type-level Booleans.
--
-- @since 4.7.0.0
-----------------------------------------------------------------------------
module Data.Type.Bool (
If, type (&&), type (||), Not
) where
import Data.Bool
-- This needs to be in base because (&&) is used in Data.Type.Equality.
-- The other functions do not need to be in base, but seemed to be appropriate
-- here.
-- | Type-level "If". @If True a b@ ==> @a@; @If False a b@ ==> @b@
type family If cond tru fls where
If 'True tru fls = tru
If 'False tru fls = fls
-- | Type-level "and"
type family a && b where
'False && a = 'False
'True && a = a
a && 'False = 'False
a && 'True = a
a && a = a
infixr 3 &&
-- | Type-level "or"
type family a || b where
'False || a = a
'True || a = 'True
a || 'False = a
a || 'True = 'True
a || a = a
infixr 2 ||
-- | Type-level "not". An injective type family since @4.10.0.0@.
--
-- @since 4.7.0.0
type family Not a = res | res -> a where
Not 'False = 'True
Not 'True = 'False
|
snoyberg/ghc
|
libraries/base/Data/Type/Bool.hs
|
bsd-3-clause
| 1,513
| 0
| 7
| 370
| 286
| 171
| 115
| -1
| -1
|
{-# LANGUAGE TypeSynonymInstances, FlexibleInstances #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Language.Futhark.CoreTests ()
where
import Test.QuickCheck
import Language.Futhark.Core
import Futhark.Representation.PrimitiveTests()
instance Arbitrary Name where
arbitrary = nameFromString <$> listOf1 (elements ['a'..'z'])
instance Arbitrary VName where
arbitrary = VName <$> arbitrary <*> arbitrary
|
ihc/futhark
|
unittests/Language/Futhark/CoreTests.hs
|
isc
| 413
| 0
| 10
| 49
| 84
| 49
| 35
| 10
| 0
|
{- |
module: CSV
license: LGPL
maintainer: Jaap Weel <weel at ugcs dot caltech dot edu>
stability: provisional
portability: ghc
This module parses and dumps documents that are formatted more or
less according to RFC 4180, \"Common Format and MIME Type for
Comma-Separated Values (CSV) Files\",
<http://www.rfc-editor.org/rfc/rfc4180.txt>.
There are some issues with this RFC. I will describe what these
issues are and how I deal with them.
First, the RFC prescribes CRLF standard network line breaks, but
you are likely to run across CSV files with other line endings, so
we accept any sequence of CRs and LFs as a line break.
Second, there is an optional header line, but the format for the
header line is exactly like a regular record and you can only
figure out whether it exists from the mime type, which may not be
available. I ignore the issues of header lines and simply turn them
into regular records.
Third, there is an inconsistency, in that the formal grammar
specifies that fields can contain only certain US ASCII characters,
but the specification of the MIME type allows for other character
sets. I will allow all characters in fields, except for commas, CRs
and LFs in unquoted fields. This should make it possible to parse
CSV files in any encoding, but it allows for characters such as
tabs that the RFC may be interpreted to forbid even in non-US-ASCII
character sets. -}
{- Copyright (c) Jaap Weel 2007. This library is free software; you
can redistribute it and/or modify it under the terms of the GNU
Lesser General Public License as published by the Free Software
Foundation; either version 2.1 of the License, or (at your option)
any later version. This library is distributed in the hope that it
will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details. You
should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
02110-1301 USA. -}
module CSV (CSV
, Record
, Field
, csv
, parseCSV
, parseCSVFromFile
, parseCSVTest
, printCSV
) where
import Text.ParserCombinators.Parsec
import Data.List (intersperse)
-- | A CSV file is a series of records. According to the RFC, the
-- records all have to have the same length. As an extension, I
-- allow variable length records.
type CSV = [Record]
-- | A record is a series of fields
type Record = [Field]
-- | A field is a string
type Field = String
-- | A Parsec parser for parsing CSV files
csv :: Parser CSV
csv = do x <- record `sepEndBy` many1 (oneOf "\n\r")
eof
return x
record :: Parser Record
record = (quotedField <|> field) `sepBy` char ','
field :: Parser Field
field = many (noneOf ",\n\r\"")
quotedField :: Parser Field
quotedField = between (char '"') (char '"') $
many (noneOf "\"" <|> try (string "\"\"" >> return '"'))
-- | Given a file name (used only for error messages) and a string to
-- parse, run the parser.
parseCSV :: FilePath -> String -> Either ParseError CSV
parseCSV = parse csv
-- | Given a file name, read from that file and run the parser
parseCSVFromFile :: FilePath -> IO (Either ParseError CSV)
parseCSVFromFile = parseFromFile csv
-- | Given a string, run the parser, and print the result on stdout.
parseCSVTest :: String -> IO ()
parseCSVTest = parseTest csv
-- | Given an object of type CSV, generate a CSV formatted
-- string. Always uses escaped fields.
printCSV :: CSV -> String
printCSV records = unlines (printRecord `map` records)
where printRecord = concat . intersperse "," . map printField
printField f = "\"" ++ concatMap escape f ++ "\""
escape '"' = "\"\""
escape x = [x]
|
ladinu/cs457
|
src/CSV.hs
|
mit
| 4,109
| 0
| 12
| 1,009
| 408
| 222
| 186
| 36
| 2
|
{-# htermination plusFM :: FiniteMap (Ratio Int) b -> FiniteMap (Ratio Int) b -> FiniteMap (Ratio Int) b #-}
import FiniteMap
|
ComputationWithBoundedResources/ara-inference
|
doc/tpdb_trs/Haskell/full_haskell/FiniteMap_plusFM_7.hs
|
mit
| 126
| 0
| 3
| 21
| 5
| 3
| 2
| 1
| 0
|
-- TODO: make some kind of bounded float type / look into smart ctors for t;
-- params don't need to go over 1. Maybe create a normalized version with
-- adjacent max?
-- | Module implementing Bezier curves (just the math)
module Main where
import Data.Complex
import Data.List.Split
import System.Environment (getArgs)
type Curve = Float -> Complex Float
-- | Straight-line homotopy from first curve to second
slh :: Curve -> Curve -> Curve
slh c1 c2 t = t1 * c1 t + t2 * c2 t
where t1 = (1-t) :+ 0
t2 = t :+ 0
-- | Takes a list of n 2D points and returns an nth-degree Bezier curve
-- recursively
recBez :: [Complex Float] -> Curve
recBez [p] = const p
recBez ps = slh (recBez $ init ps) (recBez $ tail ps)
-- | Reads list of float literals as rectangular coords for complex numbers
readPts :: [String] -> [Complex Float]
readPts ps = zipWith (:+) (map head pairs) (map last pairs)
where pairs = chunksOf 2 $ map read ps
-- | Turns list of control points for a Bezier curve into its graph
genPts :: [Complex Float]-> [Complex Float]
genPts ss = map (recBez ss) [0.00,0.01..1.00]
-- | Stringifies list of complex numbers as space delimited rectangular form
printPts :: [Complex Float] -> [String]
printPts = map (\z -> show (realPart z) ++ ' ' : show (imagPart z))
main = getArgs >>= mapM_ putStrLn . printPts . genPts . readPts
|
sergsr/algos-and-data-structs
|
haskell/misc/Bezier.hs
|
mit
| 1,360
| 0
| 12
| 281
| 382
| 204
| 178
| 20
| 1
|
module Tictactoe.Bencode.Encoder (
stringifyBoard,
stringifyField
) where
import Tictactoe.Base
stringifyBoard :: Board -> String
stringifyBoard board = "l" ++ (stringifyBoardFields board "") ++ "e"
stringifyBoardFields :: Board -> String -> String
stringifyBoardFields [] str = str
stringifyBoardFields (field : left) str = let
strField = stringifyField field
in stringifyBoardFields left (str ++ strField)
stringifyField :: BoardField -> String
stringifyField (x, y, v) = "d1:v1:" ++ [v] ++ "1:xi" ++ (show x) ++ "e1:yi" ++ (show y) ++ "ee"
|
viktorasl/tictactoe-bot
|
src/Tictactoe/Bencode/Encoder.hs
|
mit
| 563
| 0
| 11
| 98
| 186
| 99
| 87
| 13
| 1
|
import Data.List
{-baby x=x+x
doubleSmallNumber x = if x <= 100
then 2*x
else x-}
boomBangs xs = [if x < 10 then (x,"BOOM!") else (x,"BANG!")|x<-xs,odd x]
data Lists a = List [a] | ListOfLists [Lists a]
flatten :: Lists a -> [a]
flatten (List xs) = xs
flatten (ListOfLists xss) = concatMap flatten xss
circumference :: Float -> Float
circumference r = 2 * pi * r
-- CHAPTER 4 SYNTAX IN FUNCTIONS
lucky :: (Integral a) => a -> String
lucky 7 = "LUCKY NUMBER SEVEN!"
lucky x = "Sorry, you're out of luck, pal!"
factorial :: (Integral a) => a -> a
factorial n = product [1..n]
factorialr :: (Integral a) => a -> a
factorialr 0 = 1
factorialr n = n * factorialr (n-1)
addVectors' :: (Num a) => (a, a) -> (a, a) -> (a, a)
addVectors' a b = (fst a + fst b, snd a + snd b)
addVectors :: (Num a) => (a, a) -> (a, a) -> (a, a)
addVectors (x1,y1) (x2,y2) = (x1+x2,y1+y2)
first :: (a, b, c) -> a
first (x,_,_) = x
second :: (a, b, c) -> b
second (_,y,_) = y
third :: (a, b, c) -> c
third (_,_,z) = z
tell :: (Show a) => [a] -> String
tell [] = "The list is empty"
tell (x:[]) = "The list has one element: " ++ show x
tell (x:y:[]) = "The list has two elements: " ++ show x ++ " and " ++ show y
tell (x:y:_) = "This list is long. The first two elements are: " ++ show x ++ " and " ++ show y
length'' :: [a] -> Integer
length'' xs = sum [1 | _ <- xs]
-- lengthr :: (Num b) => [a] -> b -- też OK
lengthr :: [a] -> Integer
lengthr [] = 0
lengthr (_:xs) = 1 + lengthr xs
sum' :: (Num a) => [a] -> a
sum' [] = 0
sum' (x:xs) = x + sum xs
capital :: String -> String
capital "" = "Empty string, whoops!"
capital s@(x:xs) = "The first letter of " ++ s ++ " is " ++ [x]
bmiTell :: Double -> Double -> String
bmiTell w h
| bmi <= skinny = "You're underweight, you emo, you!"
| bmi <= normal = "You're supposedly normal. Pffft, I bet you're ugly!"
| cmi <= 30 = "You're fat! Lose some weight, fatty!"++" "++show cmi
| otherwise = "You're a whale, congratulations!"
where bmi = w/h^2; cmi = w/h^2
[skinny, normal] = [18.5,25]
bmiTell' :: Double -> Double -> String
bmiTell' w h =
if w/h^2 <= 18.5 then "You're underweight, you emo, you!"
else if w/h^2 <= 25.0 then "You're supposedly normal. Pffft, I bet you're ugly!"
else if w/h^2 <= 30.0 then "You're fat! Lose some weight, fatty!"
else "You're a whale, congratulations!"
max' :: (Ord a) => a -> a -> a
max' x y
| x > y = x
| otherwise = y
max'' :: (Ord a) => a -> a -> a
max'' x y =
if x > y
then x
else y
compare' :: (Ord a) => a -> a -> Ordering
x `compare'` y
| x > y = GT
| x == y = EQ
| otherwise = LT
inicjaly :: String -> String -> String
inicjaly [] nazwisko@(_:_) = error "Nie podano imienia!"
inicjaly imie@(_:_) [] = error "Nie podano nazwiska!"
inicjaly [] [] = error "Nie podano imienia i nazwiska!"
inicjaly imiee nazwiskoo = [x]++[y]
where (x:_) = imiee
(y:_) = nazwiskoo
listaBMI = [(80.0,1.8),(50,1.7),(95,1.9)]
xxs = [[1,3,5,2,3,1,2,4,5],[1,2,3,4,5,6,7,8,9],[1,2,4,2,1,6,3,1,3,2,3,6]]
calcBMIs :: (RealFloat a) =>[(a, a)] -> [a]
calcBMIs [] = error "WTF a gdzie bmi???"
calcBMIs listaBMI = [bmi w h|(w,h)<-listaBMI]
where bmi weight height = weight/height^2
calcBMIs' :: Fractional a => [(Double,Double)] -> [Double]
calcBMIs' [] = error "BRAK DANHY"
calcBMIs' listaBMI = [bmi w h |(w,h)<-listaBMI]
where
bmi weight height = weight/height^2
cylinder :: (RealFloat a) => a -> a -> a
cylinder r h =
let sideArea = 2 * pi * r * h
topArea = pi * r^2
in sideArea + 2 * topArea
poleKola :: (RealFloat a) => a -> a
poleKola r =
let pole = pi * r^2
in pole
calcBMIsl :: (RealFloat a) => [(a,a)] -> [a]
calcBMIsl bmis = [bmi |(w,h)<-bmis, let bmi = w/h^2]
headc :: [a] -> a
headc xs = case xs of [] -> error "empty list"
(x:_) -> x
describeList :: [a] -> String
describeList xs = "The list is " ++ case xs of [] -> "empty."
[x] -> "a singleton list."
xs -> "a longer list."
-- CHAPTER 5 RECURSION
maximum' :: (Ord a) => [a] -> a
maximum' [] = error "empty list"
maximum' (x:[]) = x
maximum' (x:xs)
| x > maxTail = x
| otherwise = maxTail
where maxTail = maximum' xs
maximumMax :: (Ord t) => [t] -> t
maximumMax [] = error "empty list"
maximumMax [x] = x
maximumMax (x:xs) = max x (maximumMax xs)
replicate' :: (Num a, Ord a) => a -> t -> [t]
replicate' n x
| n <= 0 = []
| otherwise = x:replicate' (n-1) x
take' :: (Num a, Ord a) => a -> [t] -> [t]
take' n _ | n <= 0 = []
take' _ [] = []
take' n (x:xs) = x:(take' (n-1) xs)
reverse' :: [t] -> [t]
reverse' [] = []
reverse' (x:xs) = reverse' xs ++ [x]
repeat' :: t -> [t]
repeat' t = [t]++repeat' t
repeat'' :: t -> [t]
repeat'' t = t:repeat' t
replicateTake :: (Integral a) => a -> t -> [t]
replicateTake n x = take' n (repeat' x)
myZip :: [a] -> [b] -> [(a,b)]
myZip [] _ = []
myZip _ [] = []
myZip (x:xs) (y:ys) = (x,y):myZip xs ys
myElem :: (Eq t) => t -> [t] -> Bool
myElem _ [] = False
myElem e (x:xs)
| e==x = True
| otherwise = e `myElem` xs
myQuicksort :: (Ord t) => [t] -> [t]
myQuicksort [] = []
myQuicksort (x:xs) = myQuicksort [a | a <- xs, a <= x] ++ [x] ++ myQuicksort [a | a <- xs, a > x]
-- let quickSort [] = []; quickSort (x:xs) = let smallerSorted = quickSort $ filter (<=x) xs; biggerSorted = quickSort $ filter (>x) xs in smallerSorted ++ [x] ++ biggerSorted
-- CHAPTER 6 HIGHER ORDER FUNCTIONS
multThree :: (Num a) => a -> a -> a -> a
multThree x y z = x * y * z
compareWithHundred :: (Num a, Ord a) => a -> Ordering
compareWithHundred = compare 100
divideByTen :: (RealFloat a) => a -> a
divideByTen = (/10)
isUpperAlphanum :: Char -> Bool
isUpperAlphanum = (`elem` ['A'..'Z'])
isLowerAlphanum :: Char -> Bool
isLowerAlphanum x= not (isUpperAlphanum x)
odejmij4 :: (Num a) => a->a
odejmij4 = subtract 4
applyTwice :: (t -> t) -> t -> t
applyTwice f x = f (f x)
zipWith' :: (t1->t2->t)->[t1]->[t2]->[t]
zipWith' _ [] _ = []
zipWith' _ _ [] = []
zipWith' f (x:xs) (y:ys) = (f x y) :(zipWith' f xs ys)
isEmpty :: [t] -> Bool
isEmpty = null
flip' :: (t1->t2->t)->t2->t1->t
flip' f x y = f y x
map' :: (t1->t2)->([t1]->[t2])
map' _ [] = []
map' f (x:xs) = f x : map' f xs
filter' :: (t->Bool)->[t]->[t]
filter' _ [] = []
filter' p (x:xs)
| p x = x:filter' p xs
| otherwise = filter' p xs
quicksort' :: (Ord t) => [t] -> [t]
quicksort' [] = []
quicksort' (x:xs) = smallerSorted ++ [x] ++ biggerSorted
where
smallerSorted = quicksort' (filter (<=x) xs)
biggerSorted = quicksort' (filter ( >x) xs)
largestDivisible :: Integral a => a
largestDivisible = headc (filter p {-(\n-> n `mod` 3829==0)-} [100000,99999..])
where
p x = x `mod` 3829 == 0
takeWhile' :: (t->Bool)->[t]->[t]
takeWhile' _ [] = []
takeWhile' p (x:xs)
| p x = x:takeWhile' p xs
| otherwise = []
collatz :: Integer -> [Integer]
collatz x =
case x of
1 -> 1:[]
n
| even n -> x:collatz (x `div` 2)
| odd n -> x:collatz (3*x+1)
collatz' :: Integer -> [Integer]
collatz' x
| x==1 = 1:[]
| even x = x:collatz' (x `div` 2)
| otherwise = x:collatz' (3*x+1)
--collatzif :: (Integral a) => a -> [a]
--collatzif x =
-- if (x==1)
-- then x:[]
-- else
-- if (even x)
-- then x:collatzif (x`div`2)
-- else x:collatzif (3*x+1)
collatzif :: (Integral a) => a -> [a]
collatzif x = if x==1 then x:[] else if even x then x:collatzif (x`div`2) else x:collatzif (3*x+1)
collatza :: (Integral a) => a -> [a]
collatza 1 = [1]
collatza x
| even x = x:collatza (x `div` 2)
| odd x = x:collatza (3*x+1)
--numLongChains :: Int
--numLongChains = length (filter isLong (map chain [1..100]))
-- where isLong xs = length xs > 15
--length (filter (>15) (map length (map collatz [1..100])))
numLongChains :: Int
numLongChains = length (filter isLong (map collatza [1..100]))
where isLong xs = length xs > 15
numLongChains' :: (Num a) => a
numLongChains' =
let
longer x = length x > 15
in
fromIntegral(length (filter longer(map collatz [1..100])))
numLongChains'' :: Int
numLongChains'' = length (filter (\n->length n > 15) (map collatza [1..100]))
addThree ::(Num a)=>a->a->a->a
addThree = \x-> \y-> \z-> x+y+z
flip'' :: (t1->t2->t)->t2->t1->t
flip'' f = \x y -> f y x
sum'' :: (Num a) => [a] -> a
sum'' xs = foldl (\acc x -> acc + x) 0 xs
sumf :: (Num a) => [a] -> a
sumf = foldl (+) 0
elemf :: (Eq t) => t -> [t] -> Bool
--elemf _ [] = False
--elemf e (x:xs)
-- | e == x = True
-- | otherwise = elemf e xs
elemf e es = foldl (\acc x -> if x == e then True else acc) False es
suma :: (Num a) => [a] -> a
suma = foldl1 (+)
nalezyDo :: (Eq t) => t -> [t] -> Bool
nalezyDo e = foldr (\x acc -> if x == e then True else acc) False
mapf :: (Foldable t) => (a->b) -> t a -> [b]
mapf f = foldr (\x acc -> f x : acc) []
mapfl :: (t1->t2)->[t1]->[t2]
mapfl f = foldl (\acc x -> acc++[f x]) []
maksimum :: (Ord a) => [a] -> a
maksimum [] = error "Pusta lista"
maksimum [x] = x
maksimum (x:xs)
| x >= maxTail = x
| otherwise = maxTail
where maxTail = maksimum xs
maksimumif :: (Ord a) => [a] -> a
maksimumif [] = error "Pusta lista"
maksimumif [x] = x
maksimumif (x:xs) = let maxTail = maksimumif xs in
if x > maxTail
then x
else maxTail
maximumf :: (Ord a, Foldable t) => t a -> a
maximumf = foldr1 (\x acc -> if x>acc then x else acc)
reversef :: (Foldable t) => t a -> [a]
reversef = foldl (\ acc x -> x:acc) []
reversefr :: (Foldable t) => t a -> [a]
reversefr = foldr (\x acc -> acc++[x]) []
--reversef2 :: (Foldable t) => t a -> [a]
--reversef2 = foldr1 (\ x acc -> [x]++acc)
productf :: (Num a, Foldable t) => t a -> a
productf = foldr1 (*)
filtruj :: (t->Bool)->[t]->[t]
filtruj _ [] = []
filtruj p (x:xs)
| p x = x:filtruj p xs
| otherwise = filtruj p xs
filterf :: (Foldable t) => (a->Bool)->t a ->[a]
filterf p = foldr (\x acc -> if (p x) then x:acc else acc) []
glowa :: (Foldable t) => t a -> a
glowa = foldr1 (\x _ -> x)
ostatni :: (Foldable t) => t a -> a
ostatni = foldl1 (\_ x -> x)
glowka :: [a]->a
glowka [] = error "glowka w mur"
glowka (x:_)= x
odwrocListe :: [t]->[t]
odwrocListe [] = []
odwrocListe (x:xs) = odwrocListe xs ++ [x]
revfol :: (Foldable t) => t a -> [a]
revfol = foldl (flip (:)) []
--($) :: (a->b)->a->b
--f $ x = f x
--map ($3) $ map (*) [1..10]
--map ($2)[(*3),(+3),(/3),(^2),sqrt]
--(.) :: (b->c)->(a->b)->a->c
--f . g = \x -> f (g x)
--(.) :: (b->c)->(a->b)->a->c
--f . g = \x -> f (g x)
--numUniques :: (Eq a) => [a] -> Int
--numUniques = \xs -> length $ nub xs
--import qualified Data.List as concatMap
--- CHAPTER 8 MAKING OUR OWN TYPES AND TYPECLASSES ---
data Shape = Circle Point Float | Rectangle Point Point deriving Show
data Point = Point Float Float deriving Show
surface :: Shape -> Float
surface (Circle _ r) = pi * r^2
surface (Rectangle (Point x1 y1) (Point x2 y2)) = (abs $ x2-x1)*(abs $ y2-y1)
nudge :: Float -> Float -> Shape -> Shape
nudge x y (Circle (Point x0 y0) r) = (Circle (Point (x0+x) (y0+y)) r)
nudge x y (Rectangle (Point x1 y1) (Point x2 y2)) = (Rectangle (Point (x1+x) $y1+y) $Point (x2+x) $y2+y)
baseCircle :: Float -> Shape
baseCircle r = Circle (Point 0 0) r
baseRect :: Float -> Float -> Shape
baseRect w h = Rectangle (Point 0 0) $ Point (w) h
--data Person = Person String String Int Float String String deriving (Show)
data Person = Person { firstName :: String
, lastName :: String
, age :: Int
, height :: Float
, phoneNumber :: String
, flavor :: String
} deriving Show
data Car = Car {company :: String, model :: String, year :: Int} deriving (Show)
|
RAFIRAF/HASKELL
|
baby.hs
|
mit
| 12,061
| 22
| 14
| 3,169
| 5,978
| 3,136
| 2,842
| 296
| 4
|
-- |
-- Module: Parser.CodeInfo
-- Copyright: (c) 2015-2016 Martijn Rijkeboer <mrr@sru-systems.com>
-- License: MIT
-- Maintainer: Martijn Rijkeboer <mrr@sru-systems.com>
--
-- CodeInfo type.
module Parser.CodeInfo where
import Data.Text.Lazy (Text)
import System.FilePath (FilePath)
import Prelude (Show, String)
data CodeInfo = CodeInfo
{ directories :: !FilePath
, filename :: !String
, code :: !Text
} deriving Show
|
sru-systems/protobuf-simple
|
src/Parser/CodeInfo.hs
|
mit
| 463
| 0
| 9
| 99
| 78
| 50
| 28
| 15
| 0
|
{-# LANGUAGE CPP #-}
{- Windows compatibility layer
Copyright (c) 2005-2011 John Goerzen <jgoerzen@complete.org>
All rights reserved.
For license and copyright information, see the file LICENSE
-}
{- |
Module : System.IO.WindowsCompat
Copyright : Copyright (C) 2005-2011 John Goerzen
License : BSD3
Maintainer : John Goerzen <jgoerzen@complete.org>
Stability : provisional
Portability: portable
Provides some types and related items on Windows to be compatible with
the System.Posix.* libraries
See also "System.IO.StatCompat", which this module re-exports.
On non-Windows platforms, this module does nothing.
On Windows, it re-exports "System.IO.StatCompat". It also provides various
file type information modes that are otherwise in "System.Posix.Types" or
"System.Posix.Files". It also provides
a rudimentary implemention of getFileStatus that emulates the Posix call
to stat(2).
Common usage might be like this:
>import System.Posix.Types
>#if (defined(mingw32_HOST_OS) || defined(mingw32_TARGET_OS) || defined(__MINGW32__))
>import System.IO.WindowsCompat
>#else
>import System.Posix.Files
>#endif
Or, to avoid having to use CPP and make things even easier, just import
"System.IO.PlafCompat", which essentially does the above.
-}
module System.IO.WindowsCompat
#if !(defined(mingw32_HOST_OS) || defined(mingw32_TARGET_OS) || defined(__MINGW32__))
where
#else
(module System.IO.StatCompat, module System.IO.WindowsCompat)
where
import System.Posix.Types
import Data.Bits
import System.IO.StatCompat
import System.Posix.Consts
import System.Time.Utils
import System.Directory
import Data.Time
import Data.Time.Clock.POSIX
-- these types aren't defined here
nullFileMode :: FileMode
nullFileMode = 0
ownerReadMode :: FileMode
ownerReadMode = 0o00400
ownerWriteMode :: FileMode
ownerWriteMode = 0o00200
ownerExecuteMode :: FileMode
ownerExecuteMode = 0o00100
groupReadMode :: FileMode
groupReadMode = 0o00040
groupWriteMode :: FileMode
groupWriteMode = 0o00020
groupExecuteMode :: FileMode
groupExecuteMode = 0o00010
otherReadMode :: FileMode
otherReadMode = 0o00004
otherWriteMode :: FileMode
otherWriteMode = 0o00002
otherExecuteMode :: FileMode
otherExecuteMode = 0o00001
setUserIDMode :: FileMode
setUserIDMode = 0o0004000
setGroupIDMode :: FileMode
setGroupIDMode = 0o0002000
stdFileMode :: FileMode
stdFileMode = ownerReadMode .|. ownerWriteMode .|.
groupReadMode .|. groupWriteMode .|.
otherReadMode .|. otherWriteMode
ownerModes :: FileMode
ownerModes = 0o00700
groupModes :: FileMode
groupModes = 0o00070
otherModes :: FileMode
otherModes = 0o00007
accessModes :: FileMode
accessModes = ownerModes .|. groupModes .|. otherModes
utcTimeToSeconds :: Num a => UTCTime -> a
utcTimeToSeconds = fromInteger . floor . utcTimeToPOSIXSeconds
----------- stat
type FileStatus = FileStatusCompat
getFileStatus :: FilePath -> IO FileStatus
getFileStatus fp =
do isfile <- doesFileExist fp
isdir <- doesDirectoryExist fp
perms <- getPermissions fp
modct <- getModificationTime fp
#if MIN_VERSION_directory(1,2,0)
let epochtime = utcTimeToSeconds modct
#else
let epochtime = clockTimeToEpoch modct
#endif
return $ FileStatusCompat {deviceID = -1,
fileID = -1,
fileMode = if isfile then regularFileMode
else directoryMode,
linkCount = 1,
fileOwner = 0,
fileGroup = 0,
specialDeviceID = -1,
fileSize = 0, -- fixme: hFileSize?
accessTime = fromInteger epochtime,
modificationTime = fromInteger epochtime,
statusChangeTime = fromInteger epochtime
}
#endif
|
haskellbr/missingh
|
missingh-all/src/System/IO/WindowsCompat.hs
|
mit
| 4,025
| 0
| 8
| 996
| 113
| 72
| 41
| 2
| 0
|
{-# LANGUAGE OverloadedStrings #-}
import TestUtils
import qualified Data.ByteString as B
import VectorsHash
import Crypto.Nettle.Hash
assertHash :: HashAlgorithm a => (B.ByteString, String) -> Tagged a Assertion
assertHash (src, h) = do
h' <- hash src
return $ assertEqualHex "" (hs h) h'
testHash :: HashAlgorithm a => Tagged a Test
testHash = do
name <- hashName
vectors <- findHashTestVectors name
results <- mapM assertHash vectors
return $ testCases ("testing HashAlgorithm " ++ name) results
-- return $ debugTestCases ("testing HashAlgorithm " ++ name) results
main = defaultMain
[ testHash `witness` (undefined :: GOSTHASH94)
, testHash `witness` (undefined :: MD2)
, testHash `witness` (undefined :: MD4)
, testHash `witness` (undefined :: MD5)
, testHash `witness` (undefined :: RIPEMD160)
, testHash `witness` (undefined :: SHA1)
, testHash `witness` (undefined :: SHA224)
, testHash `witness` (undefined :: SHA256)
, testHash `witness` (undefined :: SHA384)
, testHash `witness` (undefined :: SHA512)
, testHash `witness` (undefined :: SHA3_224)
, testHash `witness` (undefined :: SHA3_256)
, testHash `witness` (undefined :: SHA3_384)
, testHash `witness` (undefined :: SHA3_512)
]
|
stbuehler/haskell-nettle
|
src/Tests/Hash.hs
|
mit
| 1,223
| 5
| 10
| 199
| 391
| 230
| 161
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.