code
stringlengths 5
1.03M
| repo_name
stringlengths 5
90
| path
stringlengths 4
158
| license
stringclasses 15
values | size
int64 5
1.03M
| n_ast_errors
int64 0
53.9k
| ast_max_depth
int64 2
4.17k
| n_whitespaces
int64 0
365k
| n_ast_nodes
int64 3
317k
| n_ast_terminals
int64 1
171k
| n_ast_nonterminals
int64 1
146k
| loc
int64 -1
37.3k
| cycloplexity
int64 -1
1.31k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|
-- Module ParseTRL for parsing The Robot Language (TRL)
-- by: Anton Erholt <aerholt@kth.se>
module ParseTRL
(main) where
import System.IO
import Control.Monad
import Text.ParserCombinators.Parsec
import Text.ParserCombinators.Parsec.Expr
import Text.ParserCombinators.Parsec.Language
import qualified Text.ParserCombinators.Parsec.Token as Token
--- Represent the robot operations as data types
data Statement = Seq [Statement]
| TURN Double
| GOFORWARD Integer
| GOBACKWARD Integer
| DIG Integer
deriving Show
--- Define The Robot Lang
languageDef =
emptyDef { Token.commentStart = ""
, Token.commentEnd = ""
, Token.commentLine = "#"
, Token.identStart = letter
, Token.identLetter = alphaNum
, Token.reservedNames = [ "TURN"
, "GO"
, "DIG"
, "FORWARD"
, "BACKWARD"
]
, Token.reservedOpNames = []
}
--- Build the lexer
lexer = Token.makeTokenParser languageDef
-- Simplify parsing
reserved = Token.reserved lexer
semi = Token.semi lexer
integer = Token.integer lexer
double = Token.float lexer
identifier = Token.identifier lexer
whiteSpace = Token.whiteSpace lexer
--- Build the parser
trlParser :: Parser Statement
trlParser = whiteSpace >> statement
statement :: Parser Statement
statement = seqOfStatements
seqOfStatements =
do list <- (sepBy1 statement' semi)
return $ if length list == 1 then head list else Seq list
statement' :: Parser Statement
statement' = turn
<|> go
<|> dig
turn :: Parser Statement
turn =
do reserved "TURN"
degrees <- double
return $ TURN degrees
go :: Parser Statement
go =
do reserved "GO"
dir <- direction
time <- integer
return $ dir time
direction = (reserved "FORWARD" >> return GOFORWARD)
<|> (reserved "BACKWARD" >> return GOBACKWARD)
dig :: Parser Statement
dig =
do reserved "DIG"
time <- integer
return $ DIG time
--- Build an AST
parseString :: String -> Statement
parseString str =
case parse trlParser "" str of
Left e -> error $ show e
Right r -> r
-- Simple Tests
s1 = "TURN 3.14"
s2 = "TURN 15.0;TURN 45.0"
s3 = "GO FORWARD 120;GO BACKWARD 10"
t1 = parseString s1
te = parseString
main :: IO()
main = do
putStrLn "Hello world!"
|
antonaut/the-robot-language
|
lexparse.hs
|
mit
| 2,544
| 0
| 10
| 787
| 594
| 318
| 276
| 75
| 2
|
module Y2015.D13Spec (spec) where
import Y2015
import Test.Hspec
input = unlines [ "Alice would gain 54 happiness units by sitting next to Bob."
, "Alice would lose 79 happiness units by sitting next to Carol."
, "Alice would lose 2 happiness units by sitting next to David."
, "Bob would gain 83 happiness units by sitting next to Alice."
, "Bob would lose 7 happiness units by sitting next to Carol."
, "Bob would lose 63 happiness units by sitting next to David."
, "Carol would lose 62 happiness units by sitting next to Alice."
, "Carol would gain 60 happiness units by sitting next to Bob."
, "Carol would gain 55 happiness units by sitting next to David."
, "David would gain 46 happiness units by sitting next to Alice."
, "David would lose 7 happiness units by sitting next to Bob."
, "David would gain 41 happiness units by sitting next to Carol."
]
spec :: Spec
spec = parallel $ do
describe "Day 13" $ do
describe "solveSeating" $ do
it "maximizes happiness in the example case" $
solveSeating input `shouldBe` 330
|
tylerjl/adventofcode
|
test/Y2015/D13Spec.hs
|
mit
| 1,262
| 0
| 16
| 420
| 116
| 65
| 51
| 21
| 1
|
{-# LANGUAGE OverloadedStrings #-}
module Shifts.Server (runServer) where
import Control.Monad (filterM, liftM)
import Control.Monad.IO.Class (liftIO)
import Control.Monad.Trans.Either
import Data.List (sort)
import Data.Text hiding (filter)
import Data.Time.Calendar
import Data.Time.Clock (getCurrentTime, utctDay)
import Network.Wai
import Network.Wai.Handler.Warp
import Seeds as Seeds
import Servant
import Shifts.API as API
import Shifts.Types
type Handler a = EitherT ServantErr IO a
server :: Server ShiftsAPI
server =
currentRoid
:<|> showRoid
:<|> listShifts
currentRoid :: Maybe Text -> Handler Eng
currentRoid inDuration = return $ Prelude.head Seeds.engineers
showRoid :: Text -> Handler Eng
showRoid initials = return $ Prelude.head Seeds.engineers
listShifts :: Maybe Text -> Handler [Shift]
listShifts startingAfter =
case startingAfter of
Nothing -> return Seeds.shifts
Just startString -> return $ filteredShifts startString
filteredShifts :: Text -> [Shift]
filteredShifts start = do
parsedDay <- parseDurationString start
filter (\s -> (_startDay s) > parsedDay) Seeds.shifts
currentDay :: IO Day
currentDay = liftM utctDay getCurrentTime
parseDurationString :: Text -> IO Day
parseDurationString duration =
case duration of
"7d" -> liftM (addDays 7) currentDay
_ -> liftM (addDays 0) currentDay
app :: Application
app = serve API.shiftsAPI server
runServer :: Port -> IO ()
runServer port = run port app
|
tippenein/shifts
|
src/Shifts/Server.hs
|
mit
| 1,483
| 0
| 12
| 248
| 452
| 242
| 210
| 45
| 2
|
import qualified Data.List as List
import qualified Data.Ord as Ord
main = do
messages <- lines <$> getContents
let characters = List.transpose messages
let modalCharacters = map modal characters
putStrLn modalCharacters
modal = head . last . List.sortBy (Ord.comparing length) . List.group . List.sort
|
SamirTalwar/advent-of-code
|
2016/AOC_06_1.hs
|
mit
| 313
| 0
| 11
| 54
| 105
| 53
| 52
| 8
| 1
|
module WangsAlgorithm.Parser where
import WangsAlgorithm.Proposition
import Text.ParserCombinators.Parsec
import Text.ParserCombinators.Parsec.Expr
import Data.Char (isSpace)
parseProp :: Parser Proposition
parseProp = buildExpressionParser operators factor <?> "Proposition"
where
factor = do { _ <- char '(' ; x <- parseProp ; _ <- char ')' ; return x }
<|> fmap Atom (many1 letter)
<?> "Proposition"
operators = [ unaries Not ["~", "-", "¬"]
, binaries And ["&", "^", "∧"]
, binaries Or ["|", "∨"]
, binaries Implies ["->", "⊃", "→"] ]
unary c n = Prefix . chainl1 (string n >> return c) $ return (.)
binary c n = Infix (string n >> return c) AssocRight
unaries c = map (unary c)
binaries c = map (binary c)
-- | Parses prop list without the brackets.
parseProps :: Parser [Proposition]
parseProps = do {
first <- parseProp
; next <- (char ',' >> parseProps) <|> return []
; return (first : next) } <|> return []
-- | Parses prop list with the brackets.
parsePropList :: Parser [Proposition]
parsePropList = do
_ <- char '['
l <- parseProps
_ <- char ']'
return l
parseSequent :: Parser Sequent
parseSequent = do
lefts <- parsePropList
_ <- (char '|' >> char '-') <|> char '⊢'
rights <- parsePropList
return $ lefts `proves` rights
readSequent :: String -> Either ParseError Sequent
readSequent s = parse parseSequent "Sequent" (filter (not . isSpace) s)
|
joom/WangsAlgorithm
|
src/WangsAlgorithm/Parser.hs
|
mit
| 1,521
| 0
| 12
| 380
| 528
| 269
| 259
| 37
| 1
|
{-# LANGUAGE GADTs #-}
{-# LANGUAGE RankNTypes #-}
module EventSourceHelper
( Fold
, QueryT, Query, CommandT, TransactionT
, fold, fold1
, trans, query
, runQ, runC, runTX
, ensure, find
, lastHappenedAfter, lastHappenedBefore
) where
import Data.Functor.Identity
import Data.Profunctor
import Control.Arrow ((***))
import Control.Applicative ((<**>), liftA2)
import Control.Monad (join)
data Ap f a where
Pure :: a -> Ap f a
Ap :: f a -> Ap f (a -> b) -> Ap f b
instance Functor (Ap f) where
fmap f (Pure a) = Pure (f a)
fmap f (Ap x y) = Ap x ((f .) <$> y)
instance Applicative (Ap f) where
pure = Pure
Pure f <*> y = fmap f y
Ap x y <*> z = Ap x $ flip <$> y <*> z
retractAp :: Applicative f => Ap f a -> f a
retractAp (Pure a) = pure a
retractAp (Ap x y) = x <**> retractAp y
transAp :: (forall x. f x -> g x) -> Ap f a -> Ap g a
transAp _ (Pure a) = Pure a
transAp f (Ap x y) = Ap (f x) (transAp f y)
data Fold f e a where
Fold :: (e -> x -> x) -> x -> (x -> f a) -> Fold f e a
instance Functor f => Profunctor (Fold f) where
lmap f (Fold cons nil fin) = Fold (cons . f) nil fin
rmap f (Fold cons nil fin) = Fold cons nil (fmap f . fin)
instance Functor f => Functor (Fold f e) where
fmap = rmap
instance Applicative f => Applicative (Fold f e) where
pure = liftf . pure
(Fold cons nil fin) <*> (Fold cons' nil' fin') =
Fold (liftA2 (***) cons cons') (nil, nil') (\(x, x') -> fin x <*> fin' x')
joinf :: Monad f => Fold f e (f a) -> Fold f e a
joinf (Fold cons nil fin) = Fold cons nil (join . fin)
liftf :: f a -> Fold f e a
liftf a = Fold (\_ _ -> ()) () (const a)
runf :: Fold f e a -> [e] -> f a
runf (Fold cons nil fin) = fin <$> foldl (flip cons) nil
transf :: (forall x. f x -> g x) -> (forall x y. Fold f x y -> Fold g x y)
transf f (Fold cons nil fin) = Fold cons nil (rmap f fin)
emapf :: (e' -> e) -> Fold f e a -> Fold f e' a
emapf f (Fold cons nil fin) = Fold (cons . f) nil fin
-- Free Applicative of Folds.
newtype QueryT f e a = Query (Ap (Fold f e) (f a))
toFold :: Monad f => QueryT f e a -> Fold f e a
toFold (Query x) = joinf . retractAp $ x
fold :: Applicative f => (e -> a -> a) -> a -> QueryT f e a
fold f a = Query $ Ap (Fold f a pure) $ Pure pure
fold1 :: (Applicative f, Monoid a) => (e -> a -> a) -> QueryT f e a
fold1 f = fold f mempty
type Query e a = QueryT Identity e a
joinq :: Monad f => QueryT f e (f a) -> QueryT f e a
joinq (Query (Pure a)) = Query $ Ap (liftf a) $ Pure id
joinq (Query (Ap l qf)) = Query $ Ap l $ fmap join <$> qf
trans :: Functor f => (forall x. f x -> g x) -> QueryT f e a -> QueryT g e a
trans f (Query x) = Query $ transAp (transf f) (fmap f x)
query :: Applicative f => Query e a -> QueryT f e a
query = trans $ pure . runIdentity
instance Functor f => Profunctor (QueryT f) where
lmap f (Query x) = Query $ transAp (emapf f) x
rmap f (Query x) = Query $ fmap (fmap f) x
instance Functor f => Functor (QueryT f e) where
fmap = rmap
instance Applicative f => Applicative (QueryT f e) where
pure = Query . Pure . pure
Query x <*> Query y = Query $ liftA2 (<*>) x y
runQ :: Monad f => QueryT f e a -> [e] -> f a
runQ = runf . toFold
-- A command is a query that returns an event to be appended.
type CommandT f e = QueryT f e [e]
type TransactionT f e = [CommandT f e]
runC :: Monad f => CommandT f e -> [e] -> f [e]
runC q es = (es ++) <$> runQ q es
runTX :: Monad f => TransactionT f e -> [e] -> f [e]
runTX = sequ . map runC
where sequ (f : fs) es = f es >>= sequ fs
sequ [] es = pure es
ensure :: Query e Bool -> a -> QueryT (Either a) e ()
ensure q a = joinq $ rmap ensure' $ query q
where ensure' True = Right ()
ensure' _ = Left a
find :: (e -> Bool) -> Query e (Maybe e)
find p = fold findE Nothing
where findE e Nothing | p e = Just e
findE _ s = s
lastIndexOf :: (Eq e, Applicative f) => e -> QueryT f e (Maybe Integer)
lastIndexOf e = rmap snd $ fold findLastIndex (0, Nothing)
where findLastIndex e' (i, _) | e == e' = (i, Just i)
findLastIndex _ x = x
lastHappenedBefore :: (Eq e, Applicative f) => e -> e -> QueryT f e Bool
lastHappenedBefore e e' = (<) <$> lastIndexOf e <*> lastIndexOf e'
lastHappenedAfter :: (Eq e, Applicative f) => e -> e -> QueryT f e Bool
lastHappenedAfter e e' = (>) <$> lastIndexOf e <*> lastIndexOf e'
|
srijs/haskell-eventsource
|
EventSourceHelper.hs
|
mit
| 4,337
| 0
| 11
| 1,110
| 2,348
| 1,187
| 1,161
| 101
| 2
|
module Main (main) where
import Phong (main)
|
tfausak/phong
|
executable/Main.hs
|
mit
| 46
| 0
| 5
| 8
| 17
| 11
| 6
| 2
| 0
|
-- ------------------------------------------------------
-- Tying the recursive knot
{- from earlier...
data List' a = Nil' | Cons' a (List' a)
data Tree a = Leaf a | Node a (Tree a) (Tree a)
-}
data Fix s a = FixT {getFix :: s a (Fix s a)}
-- rephrase List_ and Tree_ so that we can express them using the Fix type
-- ('r' represents the recursion of the type)
data List_ a r = Nil_ | Cons_ a r
deriving Show
data Tree_ a r = Leaf_ a | Node_ a r r
deriving Show
-- List_ and Tree_ don't explicitly recur, so that Fix
-- "ties the recursive knot around the shape"
-- Now, we can write
type ListF a = Fix List_ a
type TreeF a = Fix Tree_ a
-- We can construct List_ lists in the familiar way:
aList1 = Cons_ 12 Nil_
-- aList :: List_ Integer (List_ a r)
aList2 = Cons_ 12 (Cons_ 13 Nil_)
-- aList2 :: List_ Integer (List_ Integer (List_ a r))
-- To construct ListF lists:
aListF1 = (Cons_ 13 (FixT Nil_))
-- aListF1 :: List_ Integer (Fix List_ a)
aListF2 = (FixT (Cons_ 13 (FixT Nil_)))
-- aListF2 :: Fix List_ Integer
aListF3 = (Cons_ 12 (FixT (Cons_ 13 (FixT Nil_))))
-- aListF3 :: List_ Integer (Fix List_ Integer)
aListF :: ListF Integer -- type alias for "Fix List_ Integer"
aListF = FixT (Cons_ 12 (FixT (Cons_ 13 (FixT Nil_))))
-- ------------------------------------------------------
-- Generic Map
-- mapL >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
mapL f listF = case list_ of
(Cons_ x r) -> FixT $ Cons_ (f x) (mapL f r)
Nil_ -> FixT Nil_
where
list_ = getFix listF
-- This is clumsy because we have to unwrap the list (with getFix)
-- and then re-wrap the result with FixT in both base clauses.
showListF :: (Show a) => ListF a -> String
showListF (FixT (Cons_ x r))
= (show x) ++ ", " ++ (showListF r)
showListF (FixT (Nil_))
= "Nil_"
main1 = putStrLn . showListF $ mapL (*2) aListF
-- 24, 26, Nil_
-- gmap with BIFUNCTOR >>>>>>>>>>>>>>>>>>>>>>>
-- Bifunctor is just a Functor that applies 2 functions
-- to a type instead of one:
class Bifunctor s where
bimap :: (a -> c) -> (b -> d) -> (s a b -> s c d)
-- (already defined in Data.Bifunctor)
instance Bifunctor List_ where
bimap f g Nil_ = Nil_
bimap f g (Cons_ x r) = Cons_ (f x) (g r)
instance Bifunctor Tree_ where
bimap f g (Leaf_ x) = Leaf_ (f x)
bimap f g (Node_ x rl rr) = Node_ (f x) (g rl) (g rr)
-- note: 'g' is applied to the recursive part(s) of the data type
-- Now that the List_ and Tree_ types are bifunctors,
-- we can write a generic map:
gmap :: Bifunctor s => (a -> b) -> Fix s a -> Fix s b
gmap f = FixT . bimap f (gmap f) . getFix
-- * the 1st function passed to bimap is the mapping function 'f'
-- * the 2nd function is (gmap f), which explicitly applies gmap
-- to the recursive part of the data structure.
main2 = putStrLn . showListF $ gmap (*2) aListF
-- 24, 26, Nil_
-- ------------------------------------------------------
-- Generic Fold
-- generic fold with bimap
gfold :: Bifunctor s => (s a b -> b) -> Fix s a -> b
gfold f = f . bimap id (gfold f) . getFix
-- gfold replaces occurrences of FixT with f:
-- FixT (Cons_ 12 (FixT (Cons_ 13 (FixT Nil_))))
-- f (Cons_ 12 (f (Cons_ 13 (f Nil_))))
-- To fold together a sum, we create an adder:
addL (Cons_ x r) = x + r
addL Nil_ = 0
-- aListF = FixT (Cons_ 12 (FixT (Cons_ 13 (FixT Nil_))))
main3 = print $ gfold addL aListF
-- 25
-- ------------------------------------------------------
-- Generic Unfold
-- fold consumes data structures, unfold produces them by
-- unfolding a structure from a single value:
-- unfold a List
unfoldL stopF nextF val
= if stopF val -- stop if True
then []
else val : (unfoldL stopF nextF (nextF val))
main4 = print $ unfoldL (< (-10)) (\x -> x - 1) 10
-- [10,9,8,7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10]
-- We can use bimap to create a generic unfold:
gunfold :: Bifunctor s => (b -> s a b) -> b -> Fix s a
gunfold f = FixT . bimap id (gunfold f) . f
-- for example
toList 0 = Nil_
toList n = (Cons_ n (n-1))
main5 = putStrLn . showListF $ gunfold toList 10
-- 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, Nil_
{- evaluates as...
gunfold toList 10
(FixT . bimap id (gunfold toList) . toList) 10
(FixT . bimap id (gunfold toList)) (Cons_ 10 9) -- toList 10
FixT $ bimap id (gunfold toList) (Cons_ 10 9)
FixT $ Cons_ (id 10) (bimap id (gunfold toList) 9)
(FixT Cons_ 10 (FixT Cons_ 9 (bimap id (gunfold toList) 8))
....
-}
-- ------------------------------------------------------
-- Generic unfold + fold: hylomorphism
main6 = print $ gfold addL (gunfold toList 100)
-- 5050
-- unfold and fold are each other's mirror images:
-- gunfold f = FixT . bimap id (gunfold f) . f
-- gfold f = f . bimap id (gfold f) . getFix
-- hylo is their composition:
hylo f g = g . bimap id (hylo f g) . f
main7 = print $ hylo toList addL 100
-- 5050
|
uroboros/haskell_design_patterns
|
chapter6/2_origami.hs
|
mit
| 4,928
| 0
| 13
| 1,153
| 1,127
| 605
| 522
| 53
| 2
|
{-# OPTIONS_GHC -F -pgmF htfpp #-}
module Test.Process (htf_thisModulesTests) where
import Test.Framework
import Data.Unique
import Data.Maybe (isNothing, isJust)
import Control.Applicative ((<$>))
import Control.Concurrent (threadDelay, yield)
import Control.Concurrent.MVar (
newEmptyMVar,
takeMVar,
putMVar,
isEmptyMVar
)
import Concurrency.OTP.Process
ms = threadDelay . (1000*)
test_spawnNewProcessAndWait = do
resp <- newEmptyMVar
pid <- spawn $ liftIO $ do
ms 5
putMVar resp ()
isEmptyMVar resp >>= assertBool
wait pid
isEmptyMVar resp >>= assertBool . not
test_normalTerminate = do
resp <- newEmptyMVar
pid <- spawn $ return ()
linkIO pid $ putMVar resp
takeMVar resp >>= assertEqual Normal
test_isAlive = do
pid <- spawn $ liftIO $ ms 10
isAlive pid >>= assertBool
wait pid
isAlive pid >>= assertBool . not
test_unlink = do
lock <- newEmptyMVar
handler1 <- newEmptyMVar
handler2 <- newEmptyMVar
pid <- spawn $ liftIO $ takeMVar lock
linkId <- linkIO pid $ putMVar handler1
linkIO pid $ putMVar handler2
unlinkIO pid linkId
putMVar lock ()
takeMVar handler2
yield
isEmptyMVar handler1 >>= assertBool
test_failInLinkedAction = do
handler1 <- newEmptyMVar
handler2 <- newEmptyMVar
pid <- spawn $ liftIO $ ms 5
linkIO pid $ const $ putMVar handler1 () >> error "oops"
linkIO pid $ const $ putMVar handler2 ()
takeMVar handler1
ms 3
isEmptyMVar handler2 >>= assertBool . not
test_processLinkNormal = do
pid <- spawn $ liftIO $ ms 10
pid2 <- spawn $ link pid >> (liftIO $ ms 50)
wait pid
ms 1
isAlive pid2 >>= assertBool
test_processLinkAbnormal = do
waitCell <- newEmptyMVar
pid <- spawn $ (liftIO $ ms 10) >> error "something wrong"
pid2 <- spawn $ link pid >> (liftIO $ ms 50)
wait pid
ms 1
isAlive pid2 >>= assertBool . not
data Message = Message Unique
deriving (Eq)
instance Show Message where
show (Message u) = "Message#" ++ show (hashUnique u)
newMessage = Message <$> newUnique
test_sendMessage = do
msg <- newMessage
resp <- newEmptyMVar
pid <- spawn $
receive >>= liftIO . putMVar resp
sendIO pid msg
takeMVar resp >>= assertEqual msg
test_send2Messages = do
msg <- newMessage
resp <- newEmptyMVar
pid <- spawn $ do
receive >>= liftIO . putMVar resp
receive >>= liftIO . putMVar resp
sendIO pid msg
takeMVar resp >>= assertEqual msg
sendIO pid msg
takeMVar resp >>= assertEqual msg
test_notReceiveMessageWithTimeout = do
lock <- newEmptyMVar
spawn $ do
msg <- receiveWithTimeout $ Just 10
liftIO $ putMVar lock msg
takeMVar lock >>= assertBool . isNothing
test_receiveMessageWithTimeout = do
lock <- newEmptyMVar
pid <- spawn $ do
msg <- receiveWithTimeout $ Just 20
liftIO $ putMVar lock msg
ms 10
sendIO pid ()
takeMVar lock >>= assertBool . isJust
test_2processInteraction = do
msg <- newMessage
resp <- newEmptyMVar
pid <- spawn $ do
from <- receive
send from msg
spawn $ do
self >>= send pid
receive >>= liftIO . putMVar resp
takeMVar resp >>= assertEqual msg
test_processExit = do
reason <- newEmptyMVar
pid <- spawn $ exit
linkIO pid $ putMVar reason
takeMVar reason >>= assertEqual Normal
isAlive pid >>= assertBool . not
test_processException = do
reason <- newEmptyMVar
pid <- spawn $ error "test"
linkIO pid $ putMVar reason
takeMVar reason >>= assertEqual (Error "test")
test_terminate = do
msg <- newMessage
resp <- newEmptyMVar
reason <- newEmptyMVar
pid <- spawn $ do
receive >>= liftIO . putMVar resp
receive >>= liftIO . putMVar resp
sendIO pid msg
takeMVar resp >>= assertEqual msg
linkIO pid $ putMVar reason
terminate pid
sendIO pid msg
takeMVar reason >>= assertEqual Aborted
|
SPY/haskell-otp
|
tests/Test/Process.hs
|
mit
| 3,796
| 0
| 13
| 852
| 1,365
| 610
| 755
| 136
| 1
|
module Sandbox.MiscSpec (spec) where
import Test.Hspec
import Sandbox.Misc
spec :: Spec
spec = do
describe "matrixMaxMin" $ do
it "Finds min max" $ do
matrixMaxMin [
[ 1, 2, 3, 4, 5 ],
[ 9, 8, 4, 2, 5 ],
[ 7, 21, 17, 8, 7 ],
[ 9, 8, 4, 2, 21 ],
[ 9, 23, 4, 2, 5 ]
] `shouldBe` 7
|
olkinn/my-haskell-sandbox
|
test/Sandbox/MiscSpec.hs
|
mit
| 441
| 0
| 16
| 226
| 153
| 93
| 60
| 14
| 1
|
----------------------------------------------------------------
--
-- | aartifact
-- http://www.aartifact.org/
--
-- @src\/IOParse.hs@
--
-- Parser implementation using the Parsec library.
--
----------------------------------------------------------------
--
module IOParser (parseP, program, stdAloneExp) where
import Text.ParserCombinators.Parsec
import qualified Text.ParserCombinators.Parsec.Token as P
import Text.ParserCombinators.Parsec.Expr
import Text.ParserCombinators.Parsec.Language
import Data.List (partition, sort, isPrefixOf)
import Data.Maybe (catMaybes, listToMaybe)
import Data.Ratio
import IOParserOps
import IOSource
import ExpConst
import Exp
import Validation
----------------------------------------------------------------
-- | Exported parse function.
parseP :: Parse [Stmt] -> String -> String -> IO [Stmt]
parseP p fname tx =
do{ tx' <- return $ normIn tx
; r <- return $ runParser p (PS tx' [fname] opsState0 [True]) "" tx'
; return $ case r of
Right ss -> ss
Left e -> [SyntaxError $ srcTxtFrom tx' (errorPos e)]
}
----------------------------------------------------------------
-- | Parser state representation.
-- The parser state maintains a stack of inclusions
-- (filenames) to detect recursive inclusion dependencies.
type FileSysPath = String
type Flag = Bool
data ParseState = PS String [FileSysPath] OpsState [Flag]
type Parse a = GenParser Char ParseState a
getSrc :: Parse String
getSrc = do{ PS txt _ _ _ <- getState; return txt }
getPos = getPosition
----------------------------------------------------------------
-- Parser definitions ------------------------------------------
----------------------------------------------------------------
----------------------------------------------------------------
-- | Top-level parsers.
stdAloneExp :: Parse [Stmt]
stdAloneExp = do{ whiteSpace; (e,s,_) <- phraseP; eof
; return $ [ExpStmt Consider (e,s)] }
program :: Parse [Stmt]
program =
do{ s <- getSrc
; pos1 <- getPos
; whiteSpace
; pos2 <- getPos
; ss <- many stmtOrErrP
; eof
; return $ [Text (srcTxt s pos1 pos2)]++concat ss
}
----------------------------------------------------------------
-- | Statements.
introKeys = ["Introduce"]
assumpKeys = ["Assume", "Assume that"]
assertKeys = ["Assert", "Assert that"]
considerKeys = ["Consider"]
stmtOrErrP = stmtP <?|>
do{ src <- getSrc
; pos <- getPos
; anyChar
; pos2 <- getPos
; ss <- many stmtOrErrP
; errTxt <- return $ srcTxt src pos pos2
; return $
case concat ss of
(SyntaxError t):ss -> [SyntaxError $ errTxt++t] ++ ss
ss -> [SyntaxError $ errTxt] ++ ss
}
stmtP :: Parse [Stmt]
stmtP = introP
<|> (expStmtP assumpKeys Assume <?> "assumption")
<|> (expStmtP considerKeys Consider <?> "consideration")
<|> (expStmtP assertKeys Assert <?> "assertion")
<|> includeP
introP :: Parse [Stmt]
introP =
do{ src <- getSrc
; p0 <- getPos
; keysP introKeys
; words <- many wordP
; p1 <- getPos
; xs <- mathdelims $ sepBy1 nameP commaSep
; p2 <- getPos
; periodManyP
; p3 <- getPos
; PS txt b ops flags <- getState
; setState $ PS txt b (addOps (map fst xs) ops (introTyp words)) flags
; return $ [Text (srcTxt src p0 p1),
Intro (Src (srcTxt src p1 p2)) (introTyp words) xs,
Text (srcTxt src p2 p3)]
}
<?> "variable or operator introduction"
expStmtP ks stmt =
do{ src <- getSrc
; p1 <- getPos
; keysP ks
; _ <- many uuidP
; (e,s,(p2,p3)) <- phraseP
; periodManyP
; p4 <- getPos
; return $ [Text$ srcTxt src p1 p2,
ExpStmt stmt (e,s),
Text$ srcTxt src p3 p4]
}
includeP :: Parse [Stmt]
includeP =
do{ reserved "\\vdepend"
; ws <- many1 wordP
; reserved "\\vdependbeg"
; n <- return $ foldr (\s1-> \s2-> s1++" "++s2) (last ws) (init ws)
; ss <- ((try $ many stmtP) <?> "ERROR IN INCLUSION [["++n++"]]")
; reserved "\\vdependend"
; return $ [Include n (concat ss)]
}
<?> "inclusion block"
----------------------------------------------------------------
-- | Arithmetic expressions.
expP :: Parse Exp
expP =
(listSepApp commaSep mkTup)
$ expNoCommaP
expNoCommaP :: Parse Exp
expNoCommaP =
(opsP opsLogic).
(opsRelP)
$ expNoRelP
expNoRelP :: Parse Exp
expNoRelP =
do{ PS _ _ (_:opsSet:opsArith:_, _) _ <- getState
; e <- (opsP opsSet).
(listSepApp (reservedOp "\\times") mkProd).
(opsP opsArith)
$ expAppP
; return e
}
expAppP :: Parse Exp
expAppP =
do { es <- many1 expNoAppP
; return $ foldl App (head es) (tail es)
}
<?> "expression with functional application"
expNoAppP :: Parse Exp
expNoAppP =
opsP opsSubscript
( iterP
<|> ifThenElseP
<|> (rootP <?|> rootP')
<|> expAtom
<|> quantP "\\forall" (mkQ Forall Imp)
<|> quantP "\\exists" (mkQ Exists And)
)
<?> "simple expression"
expAtom :: Parse Exp
expAtom =
try (expVectorIterP ("+", Plus))
<|> try (expVectorIterP ("*", Times))
<|> try (expVectorIterP ("\\cdot", Times))
<|> try (bracks "||" "||" VectorNorm expP)
<|> try (bracks "|" "|" (Brack Bar Bar) expP)
<|> expFracP
<|> (try expProbP <|> expProbWithCondP)
<|> expIntervalP
<|> bracks "\\lceil" "\\rceil" Ceil expP
<|> bracks "\\lfloor" "\\rfloor" Floor expP
<|> angles "\\langle" "\\rangle" (Brack Round Round) expP
<|> braces expP
<|> parens expP
<|> expNumP
-- <|> (try $ phrasePred "\\p" NLPredC)
-- <|> (try $ phrasePred "\\l" NLPredLC)
<|> (try varOrConstP)
<|> (try $ mathbraces setEnumP)
<|> (try $ mathbraces setCompP)
<|> (try $ mathbraces setP)
<?> "atomic expression"
expIntervalP =
do{ reserved "\\interval"
; e <- expAtom
; return $ mkInterval e
}
expVectorIterP (opStr,con) =
do{ e1 <- varP
; reservedOp "_"
; e1' <- expAtom
; reservedOp opStr
; reservedOp "\\ldots" <|> reservedOp "..."
; reservedOp opStr
; e2 <- varP
; reservedOp "_"
; e2' <- expAtom
; return $ App (C (IterVect con)) (T[e1,e1',e2,e2'])
}
quantP :: String -> ([(Name, Maybe Exp)] -> Exp -> Exp) -> Parse Exp
quantP str cnstrct =
do{ reserved str
; qvs <- quantVarsList
; symb "."
; e <- expP
; return $ cnstrct (addLimits qvs) e
}
<?> "quantifier: "++str
-- If no domain is provided for a variable, look further
-- down the list and try to find one.
addLimits [] = []
addLimits ((i, Just e):vts) = (i, Just e):(addLimits vts)
addLimits ((i, Nothing):vts) = (i, nxt vts):(addLimits vts)
where nxt = \qvs->listToMaybe $ catMaybes (map snd qvs)
quantVarsList = sepBy1 (quantVarWithIn <?|> quantVarNoIn) commaSep
<?> "quantifier variable"
quantVarNoIn = do{x <- nameP; return (x, Nothing)}
quantVarWithIn =
do{ x <- nameP
; reservedOp "\\in"
; e <- expNoCommaP
; return (x, Just e)
}
iterP :: Parse Exp
iterP = iter1P <?|> iter2P <?|> iter0P
iter0P :: Parse Exp
iter0P =
do{ op <- iterOps
; e2 <- expNoRelP <|> expAtom
; return $ App (C $ Aggregate op) e2
}
<?> "iterated operator application"
iter1P :: Parse Exp
iter1P =
do{ op <- iterOps
; reservedOp "_"
; reservedOp "{"
; x <- nameP
; reservedOp "\\in"
; e1 <- expP
; reservedOp "}"
; e2 <- expNoRelP <|> expAtom
; return $ Bind op [x] $ T[bOp In (Var x) e1,e2]
}
<?> "iterated operator application"
iter2P :: Parse Exp
iter2P =
do{ op <- iterOps
; reservedOp "_"; reservedOp "{"
; x <- nameP
; reservedOp "="
; e1 <- expP
; reservedOp "}"
; reservedOp "^"; reservedOp "{"
; e1' <- expP
; reservedOp "}"
; e2 <- expNoRelP <|> expAtom
; return $ Bind op [x] $ T[bOp In (Var x) (bOp SetEnum e1 e1'),e2]
}
<?> "iterated operator application"
iterOps :: Parse Const
iterOps = (foldr (<|>) (last l) (init l))<?>"iterated operator"
where l = map (\((o,s),_)->do{reserved s; return o}) opsIter
setEnumP :: Parse Exp
setEnumP =
do{ e1 <- expNoCommaP
; commaSep
; reservedOp "\\ldots" <|> reservedOp "..."
; commaSep
; e2 <- expNoCommaP
; return $ bOp SetEnum e1 e2
}
<?> "set (enumeration)"
expProbWithCondP :: Parse Exp
expProbWithCondP =
do{ reserved "\\Pr"
; (reservedOp "(" <|> reservedOp "[")
; e1 <- expP
; reservedOp "|"
; e2 <- expP
; (reservedOp ")" <|> reservedOp "]")
; return $ App (C Probability) (T [e1,e2])
}
<?> "probability expression (with conditions)"
expProbP :: Parse Exp
expProbP =
do{ reserved "\\Pr"
; (reservedOp "(" <|> reservedOp "[")
; e <- expP
; (reservedOp ")" <|> reservedOp "]")
; return $ App (C Probability) e
}
<?> "probability expression (without conditions)"
setCompP :: Parse Exp
setCompP =
do{ e1 <- expP
; reservedOp "|"
; e2 <- listSepApp commaSep listAnd expNoCommaP
; return $ mkSetComp e1 e2
}
<?> "set (comprehension)"
setP :: Parse Exp
setP = listSepApp commaSep ((App (C SetExplicit)).(T).sort) expNoCommaP
<?> "set (explicit)"
ifThenElseP :: Parse Exp
ifThenElseP =
do{ reserved "\\if" ; e1 <- expP
; reserved "\\then" ; e2 <- expP
; reserved "\\else" ; e3 <- expP
; return $ App (C IfThenElse) (T[e1,e2,e3])
}
expFracP :: Parse Exp
expFracP =
do{ reserved "\\frac"
; e1 <- between (symb "{") (symb "}") expP
; e2 <- between (symb "{") (symb "}") expP
; return $ App (C Div) (T[e1,e2])
}
rootP :: Parse Exp
rootP =
do{ reserved "\\sqrt"
; e1 <- between (symb "[") (symb "]") expP
; e2 <- between (symb "{") (symb "}") expP
; return $ mkRoot e1 e2
}
rootP' :: Parse Exp
rootP' =
do{ reserved "\\sqrt"
; e2 <- between (symb "{") (symb "}") expP
; return $ mkRoot (C (N 2)) e2
}
opsRelP :: Parse Exp -> Parse Exp
opsRelP p =
do{ e <- p
; PS _ _ (opsRel:_, _)_ <- getState
; opLL' <- return $ opLL opsRel
; rest <- many$do{ o<-foldr (<|>) (last opLL') (init opLL'); e<-p; return (o,e)}
; return $ if rest==[] then e else listAnd $ mkRelExp e rest
}
where
mkRelExp e [(o,e')] = [bOp o e e']
mkRelExp e ((o,e'):oes) = (bOp o e e'):mkRelExp e' oes
opLL opsRel = map (\((o,s),_) -> do{reservedOp s; return o}) (head opsRel)
opsP :: OpTable -> Parse Exp -> Parse Exp
opsP t = buildExpressionParser $ map (map o) t
where
o ((op, s), Pre) = prefix s $ App (C op)
o ((op, s), None) = prefix s $ App (C op)
o ((op, s), a) = binary s (bOp op) (assoc a)
assoc InL = AssocLeft
assoc InR = AssocRight
expNumP :: Parse Exp
expNumP =
do{ n <- naturalOrFloat
; return $ case n of
Left n -> C$N$ toRational n
Right d -> C$N$ toRational d
}
<?> "numeric literal"
constP ((op,str),_) = do{reserved str; return $ C op}<?>str
opP ((op,str),_) = do{reserved "\\nofix"; braces (reservedOp str); return $ C op}
varOrConstP =
do{ PS _ _ (_,[uCs]) _ <- getState
; foldr (<?|>) varP (map opP opsAll ++ map constP (constAll++uCs))
} <?> "constant or variable"
varP =
do{ x <- nameP;
; PS _ _ ops _ <- getState
; if (fst x) `elem` (opsStrTbl ops) then pzero else return $ Var x
} <?> "variable"
----------------------------------------------------------------
-- | English logical expression phrases.
forallKeys = ["for all", "for any" ]
existsKeys = ["there exists", "there exist"]
iffKeys = ["iff", "if and only if"]
impliesKeys = ["implies", "implies that", "if", "then"]
suchThatKeys = ["such that", "s.t."]
phraseOp f txt (e,s,(p1,p2)) (e',s',(p1',p2')) =
(f e e', SrcL ["", srcTxt txt p2 p1'] [s,s'], (p1,p2'))
phraseOps s =
[ [ binary "or" (phraseOp (bOp Or) s) AssocLeft ]
, [ binary "iff" (phraseOp (bOp Iff) s) AssocLeft
, binary "if and only if" (phraseOp (bOp Iff) s) AssocLeft
, binary "implies that" (phraseOp (bOp Imp) s) AssocLeft
, binary "implies" (phraseOp (bOp Imp) s) AssocLeft
]
]
phraseP :: Parse (Exp, Src, (SourcePos, SourcePos))
phraseP =
do{ PS txt _ _ _ <- getState
; buildExpressionParser (phraseOps txt) phraseAndP
}
<?> "logical expression (English)"
phraseAndP :: Parse (Exp, Src, (SourcePos, SourcePos))
phraseAndP =
do{ src <- getSrc
; es <- sepBy1 phraseNoAndP ((try $ do{reserved ","; reserved "and"}) <|> reserved "," <|> reserved "and")
; return $ foldr (phraseOp (bOp And) src) (last es) (init es)
}
phraseNoAndP :: Parse (Exp, Src, (SourcePos, SourcePos))
phraseNoAndP =
phraseForall
<|> phraseExists
<|> phraseConsidering
<|> phraseInContextForall
<|> phraseIfThen
<|> phraseNot
<|> phraseContradiction
<|> bracksIgnP "{" "}" (phraseP) --(phraseIsP <?|> phraseP)
<|> bracksIgnP "(" ")" (phraseP) -- (wStr $ parens phraseP)
<|> phrasePreds
<|> (try phrasePredBare')
<|> phraseMathP
<|> phraseMakeReport
phrasePreds =
phrasePred "\\p" NLPredC
<|> phrasePred "\\l" NLPredLC
<|> phrasePredBrack NLPredLC
phraseForall = phraseQ (keysP forallKeys) (do{commaP;return ""}) Forall Imp
<?> "universal quantification (English)"
phraseExists = phraseQ (keysP existsKeys) (keysP suchThatKeys) Exists And
<?> "existential quantification (English)"
withPos p = do{ p1<-getPos; p; p2<-getPos; return (p,(p1,p2)) }
phraseQ qP sepP q o =
do{ s <- getSrc
; p1 <- getPos
; qP
; p2 <- getPos
; vs <- mathdelims quantVarsList
; p3 <- getPos
; sepP
; (e,src,(p4,p5)) <- phraseP
; return $ ((mkQ q o) (addLimits vs) e,
SrcL [srcTxt s p1 p2, srcTxt s p3 p4] [Src (srcTxt s p2 p3), src],
(p1,p5))
} <?> "quantified formula (English)"
phraseConsidering =
do{ s <- getSrc
; p1 <- getPos
; keysP ["considering"]
; (e1,src1,(p2,p3)) <- phraseMathP <|> (braces phraseP)
; commaP
; (e2,src2,(p4,p5)) <- phraseP
; return $ (Bind Considering (fv [] e1) (T[e1,e2]),
SrcL [srcTxt s p1 p2,srcTxt s p3 p4] [src1, src2],
(p1,p5))
} <?> "\'considering\' phrase (English)"
phraseInContextForall =
do{ s <- getSrc
; p1 <- getPos
; keysP ["in context for all"]
; commaP
; (e,src,(p2,p3)) <- phraseP
; return $ (Bind InContextForall (fv [] e) e,
SrcL [srcTxt s p1 p2] [src],
(p1,p3))
} <?> "\'in context for all\' phrase (English)"
phraseIfThen =
do{ s <- getSrc
; p1 <- getPos
; keysP ["if"]
; (e1,s1,(p2,p3)) <- phraseP
; keysP ["then"]
; (e2,s2,(p4,p5)) <- phraseP
; return (bOp Imp e1 e2, SrcL [srcTxt s p1 p2, srcTxt s p3 p4] [s1,s2],(p1,p5))
}
<?> "\"if ... then ... \" clause (English)"
phraseNot =
do{ s <- getSrc
; p1 <- getPos
; keysP ["it is not the case that", "not"]
; (e,src,(p2,p3)) <- phraseP
; return (App (C Not) e, SrcL [srcTxt s p1 p2] [src], (p1,p3))
}
<?> "logical negation (English)"
phraseContradiction =
do{ s <- getSrc
; p1 <- getPos
; keysP ["contradiction"]
; p2 <- getPos
; return $ (C Contradiction, Src $ srcTxt s p1 p2, (p1,p2))
} <?> "contradiction"
phraseMathP :: Parse (Exp, Src, (SourcePos, SourcePos))
phraseMathP =
do{ s <- getSrc
; pos1 <- getPos
; e <- mathdelims expP <|> mathbrackets expP
; pos2 <- getPos
; return $ (e, Src $ srcTxt s pos1 pos2, (pos1,pos2))
}
phrasePred flagStr con =
do{ s <- getSrc
; p1 <- getPos
; reserved flagStr
; ews <- braces (many1 (phrasePredSubExp <|> phrasePredWord'' <|> phrasePredWordIs <|> phrasePredSubPred))
; p2 <- getPos
; return $ (mkNLPred con ews, Src $ srcTxt s p1 p2, (p1,p2))
}
<?> "predicate expression (English)"
phrasePredBrack con =
do{ s <- getSrc
; p1 <- getPos
; ews <- brackets (many1 (phrasePredSubExp <|> phrasePredWord'' <|> phrasePredWordIs <|> phrasePredSubPred))
; p2 <- getPos
; return $ (mkNLPred con ews, Src $ srcTxt s p1 p2, (p1,p2))
}
<?> "predicate expression (English)"
phrasePredSubExp = do{(e,_,_) <- phraseMathP; return $ Left e}
phrasePredWord'' = do{w <- wordP''; return $ Right w} where
wordP'' = reservedAsWordP "and" <|> wordP
reservedAsWordP r = do{_ <- reserved r; return r}
phrasePredWordIs = do{ reserved "is"; return $ Right "is"}
phrasePredSubPred = do{ (e,_,_) <- phrasePreds; return $ Left e}
phrasePredBare' =
do{ s <- getSrc
; p1 <- getPos
; e <- phrasePredBare
; p2 <- getPos
; return $ (e, Src $ srcTxt s p1 p2, (p1,p2))
}
<?> "non-delimited predicate expression (English)"
phrasePredBare = phrasePredBare2 <|> phrasePredBare1
phrasePredBare1 =
do{ --ews <- many1 (phrasePredSubExp <|> phrasePredWord <|> phrasePredWordIs)
e1 <- (phrasePredSubExp <|> phrasePredWord <|> phrasePredWordIs)
; ews <- many1 (phrasePredWordIs <|> phrasePredWord)
; if hasRight (e1:ews) then return $ mkNLPred NLPredLC (e1:ews) else pzero
}
<?> "predicate expression (English)"
phrasePredBare2 =
do{ ews <- many1 (phrasePredWordIs <|> phrasePredWord)
; if hasRight ews then return $ mkNLPred NLPredLC ews else pzero
}
<?> "predicate expression (English)"
phraseMakeReport =
do{ s <- getSrc
; pos1 <- getPos
; symb "?"
; es <- sepBy expNoCommaP commaSep
; symb "?"
; pos2 <- getPos
; many1 anyChar
; return $ (App (C MakeReport) (T es), Src $ srcTxt s pos1 pos2, (pos1,pos2))
}
hasRight (Right x:xs) = True
hasRight (_:xs) = hasRight xs
hasRight [] = False
-- NOTE: Reserved words must be allowed to occur here.
phrasePredWord = do{w <- wordP; return $ Right w}
----------------------------------------------------------------
-- | Punctuation
punctAny = symb "," <|> symb "." <|> symb ";" <|> symb ":"
punctP = many punctAny <?> "punctuation mark(s): .,:;"
periodP = symb "." <?> "punctuation mark: \".\""
periodManyP = (many $ symb ".") <?> "punctuation mark: \".\""
commaP = many (symb ",") <?> "comma"
commaSep = do {skipMany1 $ space <|> char ','; return ()}
doubleSlashSep = skipMany1 (reserved " " <|> reserved "\\\\")
----------------------------------------------------------------
-- | Brackets.
bracksIgnP lstr rstr p =
do{ s <- getSrc
; p1 <- getPos
; symb lstr
; (e,src,(p2,p3)) <- p
; symb rstr
; p4 <- getPos
; return (e, SrcIg [Src (srcTxt s p1 p2), src, Src (srcTxt s p3 p4)], (p1,p4))
}
parens :: Parse Exp -> Parse Exp
parens p =
do{ b1 <- (brackP "(" Round <|> brackP "[" Square )
; e <- p
; b2 <- (brackP ")" Round <|> brackP "]" Square )
; return $ mkBrack b1 b2 e
}
<?> "bracketed/parenthesized expression"
angles :: String -> String -> Const -> Parse Exp -> Parse Exp
angles l r c p =
do{ reservedOp l
; e <- p
; reservedOp r
; return $ mkVect e
} <?> "bracketed expression: "++l++"..."++r
bracks :: String -> String -> Const -> Parse Exp -> Parse Exp
bracks l r c p =
do{ reservedOp l
; e <- p
; reservedOp r
; return $ App (C c) e
} <?> "bracketed expression: "++l++"..."++r
brackP str ret = do{symb str; return ret}<?> "bracket"
braces p = between (symb "{") (symb "}") p
brackets p = between (symb "[") (symb "]") p
parens0 p = between (symb "(") (symb ")") p
dollarsigns p = between (symb "$") (symb "$") p
mathpounds p = between (symb "#") (symb "#") p
mathdelims p = dollarsigns p <|> mathpounds p
mathbrackets p = between (reservedOp "\\[") (reservedOp "\\]") p
mathbraces p = between (reservedOp "\\{") (reservedOp "\\}") p
----------------------------------------------------------------
-- | Other basic parsers.
nameP :: Parse Name
nameP = do{i <- idP; if i == "\\" then pzero else return (i,-1)}
keyP :: String -> Parse String
keyP s = do{reserved s; return s;}
keysP :: [String] -> Parse String
keysP l = foldl (<|>) (head l') (tail l') where l' = map keyP l
wordP = do{w <- symb "-"; return "-"} <|> wordP'
wordP' :: Parse String
wordP' = do{w <- idP; if w!!0 == '\\' then pzero else return w}
uuidP :: Parse String
uuidP = do{symb "~"; uuid <- many1 (alphaNum <|> (char '-')); symb "~"; return uuid}
{- wordP'' =
do{ c0 <- letter <|> oneOf "\\"
; cs <- many (alphaNum <|> oneOf "'")
; return $ c0:cs
} -}
listSepApp :: Parse b -> ([a] -> a) -> Parse a -> Parse a
listSepApp sepP f p = do{es <- sepBy1 p sepP; return $ f es}
----------------------------------------------------------------
-- | Parsec definitions.
lang = P.makeTokenParser langDef
langDef
= emptyDef
{ commentStart = "\\vend"
, commentEnd = "\\vbeg"
, commentLine = "%--"
, nestedComments = True
, identStart = letter <|> oneOf "\\"
, identLetter = alphaNum <|> oneOf "'"
, opStart = opLetter langDef
, opLetter = oneOf ""
, reservedOpNames = constStr
, reservedNames =
constStr ++ opsStr ++
introKeys ++ assumpKeys ++ considerKeys ++ assertKeys ++
forallKeys ++ existsKeys ++
iffKeys ++ impliesKeys ++ suchThatKeys ++
["and", ", and", "or"] ++
["\\not"]++
["\\interval"]++
["\\forall", "\\exists", "\\if", "\\then", "\\else", "\\sqrt",
"\\frac",
"\\Pr",
"\\nofix", "\\p", "\\l", "\\q",
".", "\\\\", "|", "\\[", "\\]",
"\\{", "\\}", "\\langle", "\\rangle",
"\\lfloor", "\\rfloor", "\\lceil", "\\rceil",
"\\vdepend", "\\vdependbeg", "\\vdependend"]
, caseSensitive = True
}
whiteSpace = P.whiteSpace lang
reserved = P.reserved lang
reservedOp = P.reservedOp lang
symb = P.symbol lang
idP = P.identifier lang
natural = P.natural lang
naturalOrFloat = P.naturalOrFloat lang
binary str f assoc = Infix (do{ reservedOp str; return f }) assoc
prefix str f = Prefix (do{ reservedOp str; return f })
postfix str f = Postfix (do{ reservedOp str; return f })
(<?|>) :: Parse a -> Parse a -> Parse a
(<?|>) p q = (try p) <|> q
--eof
|
aartifact/aartifact-verifier
|
src/IOParser.hs
|
mit
| 21,934
| 7
| 26
| 5,284
| 8,346
| 4,362
| 3,984
| 580
| 4
|
-- Copyright (c) Microsoft. All rights reserved.
-- Licensed under the MIT license. See LICENSE file in the project root for full license information.
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-|
Copyright : (c) Microsoft
License : MIT
Maintainer : adamsap@microsoft.com
Stability : provisional
Portability : portable
The module exports the built-in code generation templates.
-}
module Language.Bond.Codegen.Templates
( -- * Templates
-- | All codegen templates take at least the following arguments:
--
-- * 'MappingContext' which determines mapping of Bond types to types in
-- the target language
--
-- * base file name, typically the name of the schema definition file
-- without the extension
--
-- * list of 'Import's from the parsed schema definition
--
-- * list of 'Declaration's from the parsed schema definition
--
-- Some templates are parameterized with additional options for
-- customizing the generated code.
--
-- The templates return the name suffix for the target file and lazy
-- 'Text' with the generated code.
-- ** C++
types_h
, types_cpp
, reflection_h
, enum_h
, apply_h
, apply_cpp
, Protocol(..)
-- ** C#
, FieldMapping(..)
, StructMapping(..)
, types_cs
)
where
import Language.Bond.Codegen.Cpp.Apply_cpp
import Language.Bond.Codegen.Cpp.Apply_h
import Language.Bond.Codegen.Cpp.ApplyOverloads
import Language.Bond.Codegen.Cpp.Enum_h
import Language.Bond.Codegen.Cpp.Reflection_h
import Language.Bond.Codegen.Cpp.Types_cpp
import Language.Bond.Codegen.Cpp.Types_h
import Language.Bond.Codegen.Cs.Types_cs
-- redundant imports for haddock
import Language.Bond.Codegen.TypeMapping
import Language.Bond.Syntax.Types
import Data.Text.Lazy
|
alfpark/bond
|
compiler/src/Language/Bond/Codegen/Templates.hs
|
mit
| 1,856
| 0
| 5
| 405
| 161
| 123
| 38
| 24
| 0
|
{-# OPTIONS_GHC -Wall #-}
module Lecture where
main :: IO ()
main = putStrLn "Please enter a number: " >> (readLn >>= (\n -> putStrLn (show (n+1))))
|
BerndSchwarzenbacher/cis194-solutions
|
08-io/Lecture.hs
|
mit
| 152
| 0
| 14
| 30
| 58
| 32
| 26
| 4
| 1
|
-- Since July 20 2013
module Main where
import System.IO
import Control.Exception
main :: IO ()
main = do
putStrLn "Do you want to [read] a file, [write] a file or [quit]?"
cmd <- getLine
case cmd of
"quit" -> do putStrLn "Goodbye!"
return ()
"read" -> do read_file
main
"write" -> do write_file
main
_ -> do putStrLn ("I don't understand the command " ++ cmd ++ ".")
main
read_file = do
putStrLn "Enter a file name to read:"
fname <- getLine
--bracket (openFile fname ReadMode) hClose
-- (\h -> do txt <- hGetContents h
-- putStr txt)
contents <- readFile fname
putStr contents
write_file = do
putStrLn "Enter a file name to write:"
fname <- getLine
putStrLn "Enter text (dot on a line by itself to end):"
bracket (openFile fname WriteMode) hClose
(\h -> read_line_and_write h)
read_line_and_write h = do
line <- getLine
if line /= "." then do
hPutStrLn h line
read_line_and_write h
else
return ()
|
fossilet/yaht
|
ex_5.2.hs
|
mit
| 1,168
| 0
| 15
| 428
| 259
| 119
| 140
| 33
| 4
|
{-# OPTIONS -w -O0 #-}
{- |
Module : CspCASL/ATC_CspCASL.der.hs
Description : generated Typeable, ShATermConvertible instances
Copyright : (c) DFKI Bremen 2008
License : GPLv2 or higher, see LICENSE.txt
Maintainer : Christian.Maeder@dfki.de
Stability : provisional
Portability : non-portable(overlapping Typeable instances)
Automatic derivation of instances via DrIFT-rule Typeable, ShATermConvertible
for the type(s):
'CspCASL.AS_CspCASL.CspBasicExt'
'CspCASL.AS_CspCASL.CHANNEL_DECL'
'CspCASL.AS_CspCASL.PROC_ITEM'
'CspCASL.AS_CspCASL.PARM_PROCNAME'
'CspCASL.AS_CspCASL_Process.EVENT'
'CspCASL.AS_CspCASL_Process.EVENT_SET'
'CspCASL.AS_CspCASL_Process.RenameKind'
'CspCASL.AS_CspCASL_Process.Rename'
'CspCASL.AS_CspCASL_Process.RENAMING'
'CspCASL.AS_CspCASL_Process.PROC_ALPHABET'
'CspCASL.AS_CspCASL_Process.ProcProfile'
'CspCASL.AS_CspCASL_Process.FQ_PROCESS_NAME'
'CspCASL.AS_CspCASL_Process.TypedChanName'
'CspCASL.AS_CspCASL_Process.CommType'
'CspCASL.AS_CspCASL_Process.PROCESS'
'CspCASL.SignCSP.CspSign'
'CspCASL.SignCSP.CspSen'
'CspCASL.SymbItems.CspSymbItems'
'CspCASL.SymbItems.CspSymbMapItems'
'CspCASL.SymbItems.CspSymbKind'
'CspCASL.SymbItems.CspSymb'
'CspCASL.SymbItems.CspType'
'CspCASL.SymbItems.CspSymbMap'
'CspCASL.Symbol.CspSymbType'
'CspCASL.Symbol.CspSymbol'
'CspCASL.Symbol.CspRawSymbol'
'CspCASL.Morphism.CspAddMorphism'
-}
{-
Generated by 'genRules' (automatic rule generation for DrIFT). Don't touch!!
dependency files:
CspCASL/AS_CspCASL.hs
CspCASL/AS_CspCASL_Process.hs
CspCASL/SignCSP.hs
CspCASL/SymbItems.hs
CspCASL/Symbol.hs
CspCASL/Morphism.hs
-}
module CspCASL.ATC_CspCASL () where
import ATerm.Lib
import CASL.AS_Basic_CASL
import CASL.AS_Basic_CASL (FORMULA, SORT, TERM (..), VAR)
import CASL.AS_Basic_CASL (FORMULA, TERM, SORT, SORT_ITEM (..), OpKind (..))
import CASL.AS_Basic_CASL (SORT, VAR, VAR_DECL)
import CASL.ATC_CASL
import CASL.Morphism
import CASL.Morphism as CASL_Morphism
import CASL.Overload
import CASL.Sign
import CASL.Sign as CASL_Sign
import CASL.SymbolParser
import CASL.ToDoc
import Common.AS_Annotation (Annoted)
import Common.AnnoState
import Common.Doc
import Common.Doc hiding (braces)
import Common.DocUtils
import Common.Id
import Common.Keywords
import Common.Lexer
import Common.Lib.Rel (Rel, predecessors, member)
import Common.Parsec
import Common.Result
import Common.Token
import Common.Utils (composeMap)
import Common.Utils (keepMins)
import Control.Monad
import CspCASL.AS_CspCASL
import CspCASL.AS_CspCASL ()
import CspCASL.AS_CspCASL_Process
import CspCASL.CspCASL_Keywords
import CspCASL.Morphism
import CspCASL.Parse_CspCASL_Process
import CspCASL.Print_CspCASL
import CspCASL.Print_CspCASL ()
import CspCASL.SignCSP
import CspCASL.SymbItems
import CspCASL.Symbol
import Data.List
import Data.Ord
import Data.Typeable
import Text.ParserCombinators.Parsec
import qualified CASL.MapSentence as CASL_MapSen
import qualified Common.Lib.MapSet as MapSet
import qualified Common.Lib.Rel as Rel
import qualified CspCASL.LocalTop as LT
import qualified CspCASL.LocalTop as LocalTop
import qualified Data.Map as Map
import qualified Data.Set as Set
{-! for CspCASL.AS_CspCASL.CspBasicExt derive : Typeable !-}
{-! for CspCASL.AS_CspCASL.CHANNEL_DECL derive : Typeable !-}
{-! for CspCASL.AS_CspCASL.PROC_ITEM derive : Typeable !-}
{-! for CspCASL.AS_CspCASL.PARM_PROCNAME derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.EVENT derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.EVENT_SET derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.RenameKind derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.Rename derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.RENAMING derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.PROC_ALPHABET derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.ProcProfile derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.FQ_PROCESS_NAME derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.TypedChanName derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.CommType derive : Typeable !-}
{-! for CspCASL.AS_CspCASL_Process.PROCESS derive : Typeable !-}
{-! for CspCASL.SignCSP.CspSign derive : Typeable !-}
{-! for CspCASL.SignCSP.CspSen derive : Typeable !-}
{-! for CspCASL.SymbItems.CspSymbItems derive : Typeable !-}
{-! for CspCASL.SymbItems.CspSymbMapItems derive : Typeable !-}
{-! for CspCASL.SymbItems.CspSymbKind derive : Typeable !-}
{-! for CspCASL.SymbItems.CspSymb derive : Typeable !-}
{-! for CspCASL.SymbItems.CspType derive : Typeable !-}
{-! for CspCASL.SymbItems.CspSymbMap derive : Typeable !-}
{-! for CspCASL.Symbol.CspSymbType derive : Typeable !-}
{-! for CspCASL.Symbol.CspSymbol derive : Typeable !-}
{-! for CspCASL.Symbol.CspRawSymbol derive : Typeable !-}
{-! for CspCASL.Morphism.CspAddMorphism derive : Typeable !-}
{-! for CspCASL.AS_CspCASL.CspBasicExt derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL.CHANNEL_DECL derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL.PROC_ITEM derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL.PARM_PROCNAME derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.EVENT derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.EVENT_SET derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.RenameKind derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.Rename derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.RENAMING derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.PROC_ALPHABET derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.ProcProfile derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.FQ_PROCESS_NAME derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.TypedChanName derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.CommType derive : ShATermConvertible !-}
{-! for CspCASL.AS_CspCASL_Process.PROCESS derive : ShATermConvertible !-}
{-! for CspCASL.SignCSP.CspSign derive : ShATermConvertible !-}
{-! for CspCASL.SignCSP.CspSen derive : ShATermConvertible !-}
{-! for CspCASL.SymbItems.CspSymbItems derive : ShATermConvertible !-}
{-! for CspCASL.SymbItems.CspSymbMapItems derive : ShATermConvertible !-}
{-! for CspCASL.SymbItems.CspSymbKind derive : ShATermConvertible !-}
{-! for CspCASL.SymbItems.CspSymb derive : ShATermConvertible !-}
{-! for CspCASL.SymbItems.CspType derive : ShATermConvertible !-}
{-! for CspCASL.SymbItems.CspSymbMap derive : ShATermConvertible !-}
{-! for CspCASL.Symbol.CspSymbType derive : ShATermConvertible !-}
{-! for CspCASL.Symbol.CspSymbol derive : ShATermConvertible !-}
{-! for CspCASL.Symbol.CspRawSymbol derive : ShATermConvertible !-}
{-! for CspCASL.Morphism.CspAddMorphism derive : ShATermConvertible !-}
-- Generated by DrIFT, look but don't touch!
_tcCspBasicExtTc :: TyCon
_tcCspBasicExtTc = mkTyCon "CspCASL.AS_CspCASL.CspBasicExt"
instance Typeable CspBasicExt where
typeOf _ = mkTyConApp _tcCspBasicExtTc []
_tcCHANNEL_DECLTc :: TyCon
_tcCHANNEL_DECLTc = mkTyCon "CspCASL.AS_CspCASL.CHANNEL_DECL"
instance Typeable CHANNEL_DECL where
typeOf _ = mkTyConApp _tcCHANNEL_DECLTc []
_tcPROC_ITEMTc :: TyCon
_tcPROC_ITEMTc = mkTyCon "CspCASL.AS_CspCASL.PROC_ITEM"
instance Typeable PROC_ITEM where
typeOf _ = mkTyConApp _tcPROC_ITEMTc []
_tcPARM_PROCNAMETc :: TyCon
_tcPARM_PROCNAMETc = mkTyCon "CspCASL.AS_CspCASL.PARM_PROCNAME"
instance Typeable PARM_PROCNAME where
typeOf _ = mkTyConApp _tcPARM_PROCNAMETc []
instance ShATermConvertible CspBasicExt where
toShATermAux att0 xv = case xv of
Channels a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "Channels" [a', b'] []) att2
ProcItems a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "ProcItems" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "Channels" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, Channels a' b') }}
ShAAppl "ProcItems" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, ProcItems a' b') }}
u -> fromShATermError "CspBasicExt" u
instance ShATermConvertible CHANNEL_DECL where
toShATermAux att0 xv = case xv of
ChannelDecl a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "ChannelDecl" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "ChannelDecl" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, ChannelDecl a' b') }}
u -> fromShATermError "CHANNEL_DECL" u
instance ShATermConvertible PROC_ITEM where
toShATermAux att0 xv = case xv of
Proc_Decl a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "Proc_Decl" [a', b', c'] []) att3
Proc_Defn a b c d -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
return $ addATerm (ShAAppl "Proc_Defn" [a', b', c', d'] []) att4
Proc_Eq a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "Proc_Eq" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "Proc_Decl" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, Proc_Decl a' b' c') }}}
ShAAppl "Proc_Defn" [a, b, c, d] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
(att4, Proc_Defn a' b' c' d') }}}}
ShAAppl "Proc_Eq" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, Proc_Eq a' b') }}
u -> fromShATermError "PROC_ITEM" u
instance ShATermConvertible PARM_PROCNAME where
toShATermAux att0 xv = case xv of
ParmProcname a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "ParmProcname" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "ParmProcname" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, ParmProcname a' b') }}
u -> fromShATermError "PARM_PROCNAME" u
_tcEVENTTc :: TyCon
_tcEVENTTc = mkTyCon "CspCASL.AS_CspCASL_Process.EVENT"
instance Typeable EVENT where
typeOf _ = mkTyConApp _tcEVENTTc []
_tcEVENT_SETTc :: TyCon
_tcEVENT_SETTc = mkTyCon "CspCASL.AS_CspCASL_Process.EVENT_SET"
instance Typeable EVENT_SET where
typeOf _ = mkTyConApp _tcEVENT_SETTc []
_tcRenameKindTc :: TyCon
_tcRenameKindTc = mkTyCon "CspCASL.AS_CspCASL_Process.RenameKind"
instance Typeable RenameKind where
typeOf _ = mkTyConApp _tcRenameKindTc []
_tcRenameTc :: TyCon
_tcRenameTc = mkTyCon "CspCASL.AS_CspCASL_Process.Rename"
instance Typeable Rename where
typeOf _ = mkTyConApp _tcRenameTc []
_tcRENAMINGTc :: TyCon
_tcRENAMINGTc = mkTyCon "CspCASL.AS_CspCASL_Process.RENAMING"
instance Typeable RENAMING where
typeOf _ = mkTyConApp _tcRENAMINGTc []
_tcPROC_ALPHABETTc :: TyCon
_tcPROC_ALPHABETTc = mkTyCon "CspCASL.AS_CspCASL_Process.PROC_ALPHABET"
instance Typeable PROC_ALPHABET where
typeOf _ = mkTyConApp _tcPROC_ALPHABETTc []
_tcProcProfileTc :: TyCon
_tcProcProfileTc = mkTyCon "CspCASL.AS_CspCASL_Process.ProcProfile"
instance Typeable ProcProfile where
typeOf _ = mkTyConApp _tcProcProfileTc []
_tcFQ_PROCESS_NAMETc :: TyCon
_tcFQ_PROCESS_NAMETc = mkTyCon "CspCASL.AS_CspCASL_Process.FQ_PROCESS_NAME"
instance Typeable FQ_PROCESS_NAME where
typeOf _ = mkTyConApp _tcFQ_PROCESS_NAMETc []
_tcTypedChanNameTc :: TyCon
_tcTypedChanNameTc = mkTyCon "CspCASL.AS_CspCASL_Process.TypedChanName"
instance Typeable TypedChanName where
typeOf _ = mkTyConApp _tcTypedChanNameTc []
_tcCommTypeTc :: TyCon
_tcCommTypeTc = mkTyCon "CspCASL.AS_CspCASL_Process.CommType"
instance Typeable CommType where
typeOf _ = mkTyConApp _tcCommTypeTc []
_tcPROCESSTc :: TyCon
_tcPROCESSTc = mkTyCon "CspCASL.AS_CspCASL_Process.PROCESS"
instance Typeable PROCESS where
typeOf _ = mkTyConApp _tcPROCESSTc []
instance ShATermConvertible EVENT where
toShATermAux att0 xv = case xv of
TermEvent a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "TermEvent" [a', b'] []) att2
ExternalPrefixChoice a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "ExternalPrefixChoice" [a', b',
c'] []) att3
InternalPrefixChoice a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "InternalPrefixChoice" [a', b',
c'] []) att3
ChanSend a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "ChanSend" [a', b', c'] []) att3
ChanNonDetSend a b c d -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
return $ addATerm (ShAAppl "ChanNonDetSend" [a', b', c',
d'] []) att4
ChanRecv a b c d -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
return $ addATerm (ShAAppl "ChanRecv" [a', b', c', d'] []) att4
FQTermEvent a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "FQTermEvent" [a', b'] []) att2
FQExternalPrefixChoice a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "FQExternalPrefixChoice" [a',
b'] []) att2
FQInternalPrefixChoice a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "FQInternalPrefixChoice" [a',
b'] []) att2
FQChanSend a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "FQChanSend" [a', b', c'] []) att3
FQChanNonDetSend a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "FQChanNonDetSend" [a', b', c'] []) att3
FQChanRecv a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "FQChanRecv" [a', b', c'] []) att3
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "TermEvent" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, TermEvent a' b') }}
ShAAppl "ExternalPrefixChoice" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, ExternalPrefixChoice a' b' c') }}}
ShAAppl "InternalPrefixChoice" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, InternalPrefixChoice a' b' c') }}}
ShAAppl "ChanSend" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, ChanSend a' b' c') }}}
ShAAppl "ChanNonDetSend" [a, b, c, d] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
(att4, ChanNonDetSend a' b' c' d') }}}}
ShAAppl "ChanRecv" [a, b, c, d] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
(att4, ChanRecv a' b' c' d') }}}}
ShAAppl "FQTermEvent" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, FQTermEvent a' b') }}
ShAAppl "FQExternalPrefixChoice" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, FQExternalPrefixChoice a' b') }}
ShAAppl "FQInternalPrefixChoice" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, FQInternalPrefixChoice a' b') }}
ShAAppl "FQChanSend" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, FQChanSend a' b' c') }}}
ShAAppl "FQChanNonDetSend" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, FQChanNonDetSend a' b' c') }}}
ShAAppl "FQChanRecv" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, FQChanRecv a' b' c') }}}
u -> fromShATermError "EVENT" u
instance ShATermConvertible EVENT_SET where
toShATermAux att0 xv = case xv of
EventSet a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "EventSet" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "EventSet" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, EventSet a' b') }}
u -> fromShATermError "EVENT_SET" u
instance ShATermConvertible RenameKind where
toShATermAux att0 xv = case xv of
TotOp -> return $ addATerm (ShAAppl "TotOp" [] []) att0
PartOp -> return $ addATerm (ShAAppl "PartOp" [] []) att0
BinPred -> return $ addATerm (ShAAppl "BinPred" [] []) att0
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "TotOp" [] _ -> (att0, TotOp)
ShAAppl "PartOp" [] _ -> (att0, PartOp)
ShAAppl "BinPred" [] _ -> (att0, BinPred)
u -> fromShATermError "RenameKind" u
instance ShATermConvertible Rename where
toShATermAux att0 xv = case xv of
Rename a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "Rename" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "Rename" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, Rename a' b') }}
u -> fromShATermError "Rename" u
instance ShATermConvertible RENAMING where
toShATermAux att0 xv = case xv of
Renaming a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "Renaming" [a'] []) att1
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "Renaming" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, Renaming a') }
u -> fromShATermError "RENAMING" u
instance ShATermConvertible PROC_ALPHABET where
toShATermAux att0 xv = case xv of
ProcAlphabet a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "ProcAlphabet" [a'] []) att1
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "ProcAlphabet" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, ProcAlphabet a') }
u -> fromShATermError "PROC_ALPHABET" u
instance ShATermConvertible ProcProfile where
toShATermAux att0 xv = case xv of
ProcProfile a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "ProcProfile" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "ProcProfile" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, ProcProfile a' b') }}
u -> fromShATermError "ProcProfile" u
instance ShATermConvertible FQ_PROCESS_NAME where
toShATermAux att0 xv = case xv of
PROCESS_NAME a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "PROCESS_NAME" [a'] []) att1
FQ_PROCESS_NAME a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "FQ_PROCESS_NAME" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "PROCESS_NAME" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, PROCESS_NAME a') }
ShAAppl "FQ_PROCESS_NAME" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, FQ_PROCESS_NAME a' b') }}
u -> fromShATermError "FQ_PROCESS_NAME" u
instance ShATermConvertible TypedChanName where
toShATermAux att0 xv = case xv of
TypedChanName a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "TypedChanName" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "TypedChanName" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, TypedChanName a' b') }}
u -> fromShATermError "TypedChanName" u
instance ShATermConvertible CommType where
toShATermAux att0 xv = case xv of
CommTypeSort a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "CommTypeSort" [a'] []) att1
CommTypeChan a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "CommTypeChan" [a'] []) att1
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CommTypeSort" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, CommTypeSort a') }
ShAAppl "CommTypeChan" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, CommTypeChan a') }
u -> fromShATermError "CommType" u
instance ShATermConvertible PROCESS where
toShATermAux att0 xv = case xv of
Skip a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "Skip" [a'] []) att1
Stop a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "Stop" [a'] []) att1
Div a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "Div" [a'] []) att1
Run a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "Run" [a', b'] []) att2
Chaos a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "Chaos" [a', b'] []) att2
PrefixProcess a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "PrefixProcess" [a', b', c'] []) att3
Sequential a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "Sequential" [a', b', c'] []) att3
ExternalChoice a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "ExternalChoice" [a', b', c'] []) att3
InternalChoice a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "InternalChoice" [a', b', c'] []) att3
Interleaving a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "Interleaving" [a', b', c'] []) att3
SynchronousParallel a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "SynchronousParallel" [a', b',
c'] []) att3
GeneralisedParallel a b c d -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
return $ addATerm (ShAAppl "GeneralisedParallel" [a', b', c',
d'] []) att4
AlphabetisedParallel a b c d e -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
(att5, e') <- toShATerm' att4 e
return $ addATerm (ShAAppl "AlphabetisedParallel" [a', b', c', d',
e'] []) att5
Hiding a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "Hiding" [a', b', c'] []) att3
RenamingProcess a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "RenamingProcess" [a', b', c'] []) att3
ConditionalProcess a b c d -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
return $ addATerm (ShAAppl "ConditionalProcess" [a', b', c',
d'] []) att4
NamedProcess a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "NamedProcess" [a', b', c'] []) att3
FQProcess a b c -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
return $ addATerm (ShAAppl "FQProcess" [a', b', c'] []) att3
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "Skip" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, Skip a') }
ShAAppl "Stop" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, Stop a') }
ShAAppl "Div" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, Div a') }
ShAAppl "Run" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, Run a' b') }}
ShAAppl "Chaos" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, Chaos a' b') }}
ShAAppl "PrefixProcess" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, PrefixProcess a' b' c') }}}
ShAAppl "Sequential" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, Sequential a' b' c') }}}
ShAAppl "ExternalChoice" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, ExternalChoice a' b' c') }}}
ShAAppl "InternalChoice" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, InternalChoice a' b' c') }}}
ShAAppl "Interleaving" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, Interleaving a' b' c') }}}
ShAAppl "SynchronousParallel" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, SynchronousParallel a' b' c') }}}
ShAAppl "GeneralisedParallel" [a, b, c, d] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
(att4, GeneralisedParallel a' b' c' d') }}}}
ShAAppl "AlphabetisedParallel" [a, b, c, d, e] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
case fromShATerm' e att4 of
{ (att5, e') ->
(att5, AlphabetisedParallel a' b' c' d' e') }}}}}
ShAAppl "Hiding" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, Hiding a' b' c') }}}
ShAAppl "RenamingProcess" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, RenamingProcess a' b' c') }}}
ShAAppl "ConditionalProcess" [a, b, c, d] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
(att4, ConditionalProcess a' b' c' d') }}}}
ShAAppl "NamedProcess" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, NamedProcess a' b' c') }}}
ShAAppl "FQProcess" [a, b, c] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
(att3, FQProcess a' b' c') }}}
u -> fromShATermError "PROCESS" u
_tcCspAddMorphismTc :: TyCon
_tcCspAddMorphismTc = mkTyCon "CspCASL.Morphism.CspAddMorphism"
instance Typeable CspAddMorphism where
typeOf _ = mkTyConApp _tcCspAddMorphismTc []
instance ShATermConvertible CspAddMorphism where
toShATermAux att0 xv = case xv of
CspAddMorphism a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspAddMorphism" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspAddMorphism" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspAddMorphism a' b') }}
u -> fromShATermError "CspAddMorphism" u
_tcCspSignTc :: TyCon
_tcCspSignTc = mkTyCon "CspCASL.SignCSP.CspSign"
instance Typeable CspSign where
typeOf _ = mkTyConApp _tcCspSignTc []
_tcCspSenTc :: TyCon
_tcCspSenTc = mkTyCon "CspCASL.SignCSP.CspSen"
instance Typeable CspSen where
typeOf _ = mkTyConApp _tcCspSenTc []
instance ShATermConvertible CspSign where
toShATermAux att0 xv = case xv of
CspSign a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspSign" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspSign" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspSign a' b') }}
u -> fromShATermError "CspSign" u
instance ShATermConvertible CspSen where
toShATermAux att0 xv = case xv of
ProcessEq a b c d -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
(att3, c') <- toShATerm' att2 c
(att4, d') <- toShATerm' att3 d
return $ addATerm (ShAAppl "ProcessEq" [a', b', c', d'] []) att4
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "ProcessEq" [a, b, c, d] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
case fromShATerm' c att2 of
{ (att3, c') ->
case fromShATerm' d att3 of
{ (att4, d') ->
(att4, ProcessEq a' b' c' d') }}}}
u -> fromShATermError "CspSen" u
instance ShATermConvertible CspSymbMap where
toShATermAux att0 xv = case xv of
CspSymbMap a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspSymbMap" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspSymbMap" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspSymbMap a' b') }}
u -> fromShATermError "CspSymbMap" u
instance ShATermConvertible CspType where
toShATermAux att0 xv = case xv of
CaslType a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "CaslType" [a'] []) att1
ProcType a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "ProcType" [a'] []) att1
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CaslType" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, CaslType a') }
ShAAppl "ProcType" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, ProcType a') }
u -> fromShATermError "CspType" u
instance ShATermConvertible CspSymb where
toShATermAux att0 xv = case xv of
CspSymb a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspSymb" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspSymb" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspSymb a' b') }}
u -> fromShATermError "CspSymb" u
instance ShATermConvertible CspSymbKind where
toShATermAux att0 xv = case xv of
CaslKind a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "CaslKind" [a'] []) att1
ProcessKind -> return $ addATerm (ShAAppl "ProcessKind" [] []) att0
ChannelKind -> return $ addATerm (ShAAppl "ChannelKind" [] []) att0
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CaslKind" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, CaslKind a') }
ShAAppl "ProcessKind" [] _ -> (att0, ProcessKind)
ShAAppl "ChannelKind" [] _ -> (att0, ChannelKind)
u -> fromShATermError "CspSymbKind" u
instance ShATermConvertible CspSymbMapItems where
toShATermAux att0 xv = case xv of
CspSymbMapItems a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspSymbMapItems" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspSymbMapItems" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspSymbMapItems a' b') }}
u -> fromShATermError "CspSymbMapItems" u
instance ShATermConvertible CspSymbItems where
toShATermAux att0 xv = case xv of
CspSymbItems a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspSymbItems" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspSymbItems" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspSymbItems a' b') }}
u -> fromShATermError "CspSymbItems" u
_tcCspSymbMapTc :: TyCon
_tcCspSymbMapTc = mkTyCon "CspCASL.SymbItems.CspSymbMap"
instance Typeable CspSymbMap where
typeOf _ = mkTyConApp _tcCspSymbMapTc []
_tcCspTypeTc :: TyCon
_tcCspTypeTc = mkTyCon "CspCASL.SymbItems.CspType"
instance Typeable CspType where
typeOf _ = mkTyConApp _tcCspTypeTc []
_tcCspSymbTc :: TyCon
_tcCspSymbTc = mkTyCon "CspCASL.SymbItems.CspSymb"
instance Typeable CspSymb where
typeOf _ = mkTyConApp _tcCspSymbTc []
_tcCspSymbKindTc :: TyCon
_tcCspSymbKindTc = mkTyCon "CspCASL.SymbItems.CspSymbKind"
instance Typeable CspSymbKind where
typeOf _ = mkTyConApp _tcCspSymbKindTc []
_tcCspSymbMapItemsTc :: TyCon
_tcCspSymbMapItemsTc = mkTyCon "CspCASL.SymbItems.CspSymbMapItems"
instance Typeable CspSymbMapItems where
typeOf _ = mkTyConApp _tcCspSymbMapItemsTc []
_tcCspSymbItemsTc :: TyCon
_tcCspSymbItemsTc = mkTyCon "CspCASL.SymbItems.CspSymbItems"
instance Typeable CspSymbItems where
typeOf _ = mkTyConApp _tcCspSymbItemsTc []
_tcCspSymbTypeTc :: TyCon
_tcCspSymbTypeTc = mkTyCon "CspCASL.Symbol.CspSymbType"
instance Typeable CspSymbType where
typeOf _ = mkTyConApp _tcCspSymbTypeTc []
_tcCspSymbolTc :: TyCon
_tcCspSymbolTc = mkTyCon "CspCASL.Symbol.CspSymbol"
instance Typeable CspSymbol where
typeOf _ = mkTyConApp _tcCspSymbolTc []
_tcCspRawSymbolTc :: TyCon
_tcCspRawSymbolTc = mkTyCon "CspCASL.Symbol.CspRawSymbol"
instance Typeable CspRawSymbol where
typeOf _ = mkTyConApp _tcCspRawSymbolTc []
instance ShATermConvertible CspSymbType where
toShATermAux att0 xv = case xv of
CaslSymbType a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "CaslSymbType" [a'] []) att1
ProcAsItemType a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "ProcAsItemType" [a'] []) att1
ChanAsItemType a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "ChanAsItemType" [a'] []) att1
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CaslSymbType" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, CaslSymbType a') }
ShAAppl "ProcAsItemType" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, ProcAsItemType a') }
ShAAppl "ChanAsItemType" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, ChanAsItemType a') }
u -> fromShATermError "CspSymbType" u
instance ShATermConvertible CspSymbol where
toShATermAux att0 xv = case xv of
CspSymbol a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspSymbol" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "CspSymbol" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspSymbol a' b') }}
u -> fromShATermError "CspSymbol" u
instance ShATermConvertible CspRawSymbol where
toShATermAux att0 xv = case xv of
ACspSymbol a -> do
(att1, a') <- toShATerm' att0 a
return $ addATerm (ShAAppl "ACspSymbol" [a'] []) att1
CspKindedSymb a b -> do
(att1, a') <- toShATerm' att0 a
(att2, b') <- toShATerm' att1 b
return $ addATerm (ShAAppl "CspKindedSymb" [a', b'] []) att2
fromShATermAux ix att0 = case getShATerm ix att0 of
ShAAppl "ACspSymbol" [a] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
(att1, ACspSymbol a') }
ShAAppl "CspKindedSymb" [a, b] _ ->
case fromShATerm' a att0 of
{ (att1, a') ->
case fromShATerm' b att1 of
{ (att2, b') ->
(att2, CspKindedSymb a' b') }}
u -> fromShATermError "CspRawSymbol" u
|
nevrenato/Hets_Fork
|
CspCASL/ATC_CspCASL.hs
|
gpl-2.0
| 41,264
| 0
| 25
| 10,440
| 13,570
| 7,105
| 6,465
| 968
| 1
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE TupleSections #-}
{-# OPTIONS_GHC -fno-warn-type-defaults #-}
-----------------------------------------------------------------------------
--
-- Module : Main
-- Copyright : 2007-2011 Juergen Nicklisch-Franken, Hamish Mackenzie
-- License : GPL
--
-- Maintainer : maintainer@leksah.org
-- Stability : provisional
-- Portability :
--
-- |
--
-----------------------------------------------------------------------------
module Main (
main
, collectPackage
) where
import Prelude ()
import Prelude.Compat
import System.Console.GetOpt
(ArgDescr(..), usageInfo, ArgOrder(..), getOpt, OptDescr(..))
import System.Environment (getArgs)
import System.FilePath (takeBaseName, (</>), (<.>))
import Control.Monad (forM_, when, forM)
import Data.Version (showVersion)
import IDE.Utils.FileUtils
import IDE.Utils.Utils
import IDE.Utils.GHCUtils
import IDE.StrippedPrefs
import IDE.Metainfo.WorkspaceCollector
import Data.Maybe(catMaybes, fromJust, mapMaybe, isJust)
import qualified Data.Set as Set (member)
import IDE.Core.CTypes hiding (Extension)
import IDE.Metainfo.SourceDB (buildSourceForPackageDB, getDataDir, version)
import Data.Time
import Control.Exception
(catch, SomeException)
import System.Log
import System.Log.Logger(updateGlobalLogger,rootLoggerName,addHandler,debugM,infoM,errorM,
setLevel)
import System.Log.Handler.Simple(fileHandler)
import Network.Socket
(withSocketsDo, getAddrInfo, SocketType(..),
AddrInfo(..), defaultHints, AddrInfoFlag(..))
import IDE.Utils.Server
import System.IO (Handle, hPutStrLn, hGetLine, hFlush, hClose)
import IDE.HeaderParser(parseTheHeader)
import Data.IORef
import Control.Concurrent (MVar,putMVar)
import IDE.Metainfo.PackageCollector
(collectPackageOnly, collectPackage)
import Data.List (nub, delete, sortBy)
import Data.Ord (comparing)
import System.Directory
(createDirectoryIfMissing, removeFile, doesFileExist,
removeDirectoryRecursive, doesDirectoryExist)
import IDE.Metainfo.SourceCollectorH (PackageCollectStats(..))
import Control.Monad.IO.Class (MonadIO(..))
import qualified Data.Text as T (strip, pack, unpack)
import Data.Text (Text)
import Distribution.Package (pkgName)
import Distribution.Text (display)
import IDE.Utils.Project (filePathToProjectKey)
-- --------------------------------------------------------------------
-- Command line options
--
data Flag = CollectSystem
| ServerCommand (Maybe Text)
--modifiers
| Rebuild
| Sources
-- | Directory FilePath
--others
| VersionF
| Help
| Debug
| Verbosity Text
| LogFile Text
| Forever
| EndWithLast
| ProjectFile FilePath
| PackageBuildDir FilePath
| MetadataOutputDir FilePath
deriving (Show,Eq)
options :: [OptDescr Flag]
options = [
-- main functions
Option ['s'] ["system"] (NoArg CollectSystem)
"Collects new information for installed packages"
, Option ['r'] ["server"] (OptArg (ServerCommand . (T.pack <$>)) "Maybe Port")
"Start as server."
, Option ['b'] ["rebuild"] (NoArg Rebuild)
"Modifier for -s and -p: Rebuild metadata"
, Option ['o'] ["sources"] (NoArg Sources)
"Modifier for -s: Gather info about pathes to sources"
, Option ['v'] ["version"] (NoArg VersionF)
"Show the version number of ide"
, Option ['h'] ["help"] (NoArg Help)
"Display command line options"
, Option ['d'] ["debug"] (NoArg Debug)
"Write ascii pack files"
, Option ['e'] ["verbosity"] (ReqArg (Verbosity . T.pack) "Verbosity")
"One of DEBUG, INFO, NOTICE, WARNING, ERROR, CRITICAL, ALERT, EMERGENCY"
, Option ['l'] ["logfile"] (ReqArg (LogFile . T.pack) "LogFile")
"File path for logging messages"
, Option ['f'] ["forever"] (NoArg Forever)
"Don't end the server when last connection ends"
, Option ['c'] ["endWithLast"] (NoArg EndWithLast)
"End the server when last connection ends"
, Option ['p'] ["projectFile"] (ReqArg ProjectFile "ProjectFile")
"Project file to include in collection"
, Option ['i'] ["input"] (ReqArg PackageBuildDir "PackageBuildDir")
"Collect metadata for the package that has been built the specified directory"
, Option ['m'] ["metadata"] (ReqArg MetadataOutputDir "MetadataOutputDir")
"Output location for metadata files"
]
header :: String
header = "Usage: leksah-server [OPTION...] files..."
ideOpts :: [String] -> IO ([Flag], [String])
ideOpts argv =
case getOpt Permute options argv of
(o,n,[] ) -> return (o,n)
(_,_,errs) -> ioError $ userError $ concat errs ++ usageInfo header options
-- ---------------------------------------------------------------------
-- | Main function
--
main :: IO ()
main = withSocketsDo $ catch inner handler
where
handler (e :: SomeException) = do
putStrLn $ "leksah-server: " ++ show e
errorM "leksah-server" (show e)
return ()
inner = do
args <- getArgs
(o,_) <- ideOpts args
let verbosity' = catMaybes $
map (\case
Verbosity s -> Just s
_ -> Nothing) o
let verbosity = case verbosity' of
[] -> INFO
h:_ -> read $ T.unpack h
let logFile' = catMaybes $
map (\case
LogFile s -> Just s
_ -> Nothing) o
let logFile = case logFile' of
[] -> Nothing
h:_ -> Just h
updateGlobalLogger rootLoggerName (setLevel verbosity)
when (isJust logFile) $ do
handler' <- fileHandler (T.unpack $ fromJust logFile) verbosity
updateGlobalLogger rootLoggerName (addHandler handler')
infoM "leksah-server" "***server start"
debugM "leksah-server" $ "args: " ++ show args
if elem VersionF o
then putStrLn $ "Leksah Haskell IDE (server), version " ++ showVersion version
else if elem Help o
then putStrLn $ "Leksah Haskell IDE (server) " ++ usageInfo header options
else do
let sources = elem Sources o
let rebuild = elem Rebuild o
let debug = elem Debug o
let forever = elem Forever o
let endWithLast = elem EndWithLast o
let metadataCollectList = [(inDir, outDir) | PackageBuildDir inDir <- o, MetadataOutputDir outDir <- o]
forM_ metadataCollectList $ \(inDir, outDir) ->
getPackageDBs [] >>= mapM_ (collectOne inDir outDir . pDBsPaths)
when (null metadataCollectList) $ do
dataDir <- getDataDir
prefsPath <- getConfigFilePathForLoad strippedPreferencesFilename Nothing dataDir
prefs <- readStrippedPrefs prefsPath
debugM "leksah-server" $ "prefs " ++ show prefs
connRef <- newIORef []
let hints = defaultHints { addrFlags = [AI_NUMERICHOST, AI_NUMERICSERV], addrSocketType = Stream }
let newPrefs
| forever && not endWithLast = prefs{endWithLastConn = False}
| not forever && endWithLast = prefs{endWithLastConn = True}
| otherwise = prefs
when (elem CollectSystem o) $ do
debugM "leksah-server" "collectSystem"
collectSystem prefs debug rebuild sources =<< getPackageDBs
(mapMaybe filePathToProjectKey [p | ProjectFile p <- o])
case [s | ServerCommand s <- o] of
(Nothing:_) -> do
localServerAddr:_ <- getAddrInfo (Just hints) (Just "127.0.0.1") (Just . show $ serverPort prefs)
running <- serveOne Nothing (server newPrefs connRef localServerAddr)
waitFor running
return ()
(Just ps:_) -> do
let port :: Int = read $ T.unpack ps
localServerAddr:_ <- getAddrInfo (Just hints) (Just "127.0.0.1") (Just . show $ port)
running <- serveOne Nothing (server newPrefs connRef localServerAddr)
waitFor running
return ()
_ -> return ()
server prefs connRef addrInfo = Server addrInfo (doCommands prefs connRef)
doCommands :: Prefs -> IORef [Handle] -> (Handle, t1) -> MVar () -> IO ()
doCommands prefs connRef (h,n) mvar = do
atomicModifyIORef connRef (\ list -> (h : list, ()))
doCommands' prefs connRef (h,n) mvar
doCommands' :: Prefs -> IORef [Handle] -> (Handle, t1) -> MVar () -> IO ()
doCommands' prefs connRef (h,n) mvar = do
debugM "leksah-server" "***wait"
mbLine <- catch (Just <$> hGetLine h)
(\ (_e :: SomeException) -> do
infoM "leksah-server" "***lost connection"
hClose h
atomicModifyIORef connRef (\ list -> (delete h list,()))
handles <- readIORef connRef
case handles of
[] -> do
if endWithLastConn prefs
then do
infoM "leksah-server" "***lost last connection - exiting"
-- we're waiting on that mvar before exiting
putMVar mvar ()
else infoM "leksah-server" "***lost last connection - waiting"
return Nothing
_ -> return Nothing)
case mbLine of
Nothing -> return ()
Just line -> do
case read line of
SystemCommand rebuild sources _extract dbs -> --the extract arg is not used
catch (do
collectSystem prefs False rebuild sources dbs
hPutStrLn h (show ServerOK)
hFlush h)
(\ (e :: SomeException) -> do
hPutStrLn h (show (ServerFailed (T.pack $ show e)))
hFlush h)
WorkspaceCommand rebuild package project packageFile modList ->
catch (do
collectWorkspace package modList rebuild False project packageFile
hPutStrLn h (show ServerOK)
hFlush h)
(\ (e :: SomeException) -> do
hPutStrLn h (show (ServerFailed (T.pack $ show e)))
hFlush h)
ParseHeaderCommand project packageFile filePath ->
catch (do
res <- parseTheHeader project packageFile filePath
hPutStrLn h (show res)
hFlush h)
(\ (e :: SomeException) -> do
hPutStrLn h (show (ServerFailed (T.pack $ show e)))
hFlush h)
doCommands' prefs connRef (h,n) mvar
collectSystem :: Prefs -> Bool -> Bool -> Bool -> [PackageDBs] -> IO()
collectSystem prefs writeAscii forceRebuild findSources dbLists = do
collectorPath <- getCollectorPath
when forceRebuild $ do
exists <- doesDirectoryExist collectorPath
when exists $ removeDirectoryRecursive collectorPath
reportPath <- getConfigFilePathForSave "collectSystem.report"
exists' <- doesFileExist reportPath
when exists' (removeFile reportPath)
knownPackages <- findKnownPackages collectorPath
debugM "leksah-server" $ "collectSystem knownPackages= " ++ show knownPackages
let pkgId = packageIdentifierToString . packId . getThisPackage
packageInfos <-
getSysLibDir Nothing (Just VERSION_ghc) >>= \case
Nothing -> do
debugM "leksah-server" $ "collectSystem could not find system lib dir for GHC " <> VERSION_ghc <> " (used to build Leksah)"
return []
Just libDir -> concat <$> forM dbLists (\dbs -> do
let planIds = pDBsPlanPackages dbs
isInPlan = maybe (const True) (\p -> (`Set.member` p)) planIds
inGhcIO libDir [] [] (pDBsPaths dbs) (\ _ ->
map (,dbs)
. filter (isInPlan . pkgId) <$> getInstalledPackageInfos)
`catch` (\(e :: SomeException) -> do
debugM "leksah-server" $ "collectSystem error " <> show e
return []))
debugM "leksah-server" $ "collectSystem packageInfos= " ++ show (map (packId . getThisPackage . fst) packageInfos)
let newPackages = sortBy (comparing (pkgId. fst)) . nub $
filter (\pi' -> not $ Set.member (pkgId $ fst pi') knownPackages)
packageInfos
if null newPackages
then infoM "leksah-server" "Metadata collector has nothing to do"
else do
when findSources $ liftIO $ buildSourceForPackageDB prefs
infoM "leksah-server" "update_toolbar 0.0"
stats <- forM (zip newPackages [1 .. length newPackages]) $ \(package, n) -> do
let pid = T.unpack . pkgId $ fst package
liftIO (doesFileExist $ collectorPath </> pid <.> leksahMetadataPathFileExtension) >>= \case
True -> debugM "leksah-server" ("Already created metadata for " <> pid) >> return Nothing
False -> Just <$> collectPackage writeAscii prefs (length newPackages) (package, n)
writeStats $ catMaybes stats
infoM "leksah-server" "Metadata collection has finished"
collectOne :: FilePath -> FilePath -> [FilePath] -> IO()
collectOne fpSourceDir outDir dbs = do
packageInfos <- getSysLibDir Nothing (Just VERSION_ghc) >>= \case
Nothing -> do
debugM "leksah-server" $ "collectOne could not find system lib dir for GHC " <> VERSION_ghc <> " (used to build Leksah)"
return []
Just libDir ->
inGhcIO libDir [] [] [fpSourceDir </> "dist" </> "package.conf.inplace"] (const getInstalledPackageInfos)
`catch` (\(e :: SomeException) -> do
debugM "leksah-server" $ "coolectOne error " <> show e
return [])
debugM "leksah-server" $ "coolectOne packageInfos= " ++ show (map (packId . getThisPackage) packageInfos)
cabalFileName fpSourceDir >>= \case
Nothing -> infoM "leksah-server" "Metadata collector could not find cabal file to collect"
Just cabalFile ->
case filter ((==takeBaseName cabalFile) . display . pkgName . packId . getThisPackage) packageInfos of
[] -> infoM "leksah-server" "Metadata collector could not find package to collect"
(package:_) -> do
createDirectoryIfMissing True outDir
collectPackageOnly package dbs
(fpSourceDir </> cabalFile)
(outDir </> T.unpack (packageIdentifierToString . packId $ getThisPackage package) <.> leksahMetadataSystemFileExtension)
infoM "leksah-server" "Metadata collection has finished"
writeStats :: [PackageCollectStats] -> IO ()
writeStats stats = do
reportPath <- getConfigFilePathForSave "collectSystem.report"
time <- getCurrentTime
appendFile reportPath (report time)
where
report time = "\n++++++++++++++++++++++++++++++\n" ++ show time ++ "\n++++++++++++++++++++++++++++++\n"
++ header' time ++ summary ++ T.unpack details
header' _time = "\nLeksah system metadata collection "
summary = "\nSuccess with = " ++ T.unpack packs ++
"\nPackages total = " ++ show packagesTotal ++
"\nPackages with source = " ++ show packagesWithSource ++
"\nPackages retrieved = " ++ show packagesRetreived ++
"\nModules total = " ++ show modulesTotal' ++
"\nModules with source = " ++ show modulesWithSource ++
"\nPercentage source = " ++ show percentageWithSource
packagesTotal = length stats
packagesWithSource = length (filter withSource stats)
packagesRetreived = length (filter retrieved stats)
modulesTotal' = sum (mapMaybe modulesTotal stats)
modulesWithSource = sum (mapMaybe modulesTotal (filter withSource stats))
percentageWithSource = fromIntegral modulesWithSource * 100.0 / fromIntegral modulesTotal'
details = foldr detail "" (filter (isJust . mbError) stats)
detail stat string = string <> "\n" <> packageString stat <> " " <> (T.strip . fromJust $ mbError stat)
packs = foldr (\stat string -> string <> packageString stat <> " ")
"" (take 10 (filter withSource stats))
<> if packagesWithSource > 10 then "..." else ""
|
leksah/leksah-server
|
main/Collector.hs
|
gpl-2.0
| 18,655
| 0
| 27
| 6,784
| 4,420
| 2,245
| 2,175
| 320
| 9
|
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
module MuCalc.MuModelTest (testList) where
import qualified Data.Set as S
import qualified Data.Map as M
import Control.Applicative
import MuCalc.MuFormula
import MuCalc.States
import MuCalc.MuModel
import MuCalc.Realization
import GRSynth.Utils
import Test.HUnit hiding (State)
import MuCalc.Generators
import Data.Maybe
import Test.Framework
import Test.Framework.Providers.HUnit
import Test.Framework.Providers.QuickCheck2
import Test.QuickCheck
testList = [ testGroup "Variable parity" variableParityTests
, testGroup "setOneTrue" setOneTrueTests
, testGroup "Test formulas" testFormulaTests
, testCase "allFalseReachable" allFalseReachableTest
, testCase "exactlyOneTrueReachable" exactlyOneTrueReachableTest
, testGroup "Fixpoint tests" muTestCases
]
-- | State space of boolean 3-vectors
instance State PState where
encode = id
decode = Just . id
aContext = Context True (M.singleton "A" newBottom)
aModel = let base = newMuModel (enumerateStates 2)
m1 = withProp base "0" (!!0)
m2 = withProp m1 "1" (!!1)
in m2
variableParityTests = zipTestCases [ ("Positive", positiveVariableParityTest)
, ("Negative", negativeVariableParityTest)
]
positiveVariableParityTest = let f = Or (Atom "0")
(Negation (And (Negation (Variable "A"))
(Atom "1")))
realization = realizeAux f aContext aModel
in assert (isRight realization)
negativeVariableParityTest = let fs = [ Negation (Variable "A")
, Negation (Negation (Negation (Variable "A")))
, Or (Atom "0")
(Negation (Variable "A"))
]
realizations = map (\f -> realizeAux f aContext aModel) fs
in assert (all isLeft realizations)
-- Tests for action realization --
--Either set exactly one element true, or change nothing.
setOneTrue :: PState -> [PState]
setOneTrue s = let n = length s
f i = [setNthElement s i True | not (s !! i)]
list = concatMap f [0..n-1]
in s:list
setOneTrueTests = zipTestCases [ ("true-false", s1Test)
, ("true-true", s2Test)
, ("false-false", s3Test)
]
s1Test = setOneTrue [True, False] @?= [[True, False], [True, True]]
s2Test = setOneTrue [True, True] @?= [[True, True]]
s3Test = setOneTrue [False, False] @?= [[False, False], [True, False], [False, True]]
n = 3
m = let base = newMuModel (enumerateStates 3)
m1 = withAction base "setOneTrue" setOneTrue
m2 = withProp m1 "0" (!!0)
m3 = withProp m2 "1" (!!1)
m4 = withProp m3 "2" (!!2)
in m4
allFalse = And (Negation (Atom "0"))
(And (Negation (Atom "1"))
(Negation (Atom "2")))
exactlyOneTrue = Or (And (Atom "0") (And (Negation (Atom "1")) (Negation (Atom "2"))))
(Or (And (Atom "1") (And (Negation (Atom "0")) (Negation (Atom "2"))))
(And (Atom "2") (And (Negation (Atom "0")) (Negation (Atom "1")))))
exactlyOneTrueList = [[True, False, False] , [False, True, False] , [False, False, True]]
exactlyTwoTrue = Or (And (Atom "0") (And (Atom "1") (Negation (Atom "2"))))
(Or (And (Atom "1") (And (Atom "2") (Negation (Atom "0"))))
(And (Atom "2") (And (Atom "0") (Negation (Atom "1")))))
exactlyTwoTrueList = [[True, True, False] , [True, False, True] , [False, True, True]]
allTrue = And (Atom "0")
(And (Atom "1")
(Atom "2"))
--Test that we've written these formulas correctly
testFormulaTests = zipTestCases [ ("All false", allFalseTest)
, ("Exactly one true", exactlyOneTrueTest)
, ("Exactly two true", exactlyTwoTrueTest)
, ("All true", allTrueTest)
]
allFalseTest = assertRealization (realize allFalse m) (\list ->
list @?= [[False, False, False]])
exactlyOneTrueTest = assertRealization (realize exactlyOneTrue m) (\list ->
list @?= exactlyOneTrueList)
exactlyTwoTrueTest = assertRealization (realize exactlyTwoTrue m) (\list ->
list @?= exactlyTwoTrueList)
allTrueTest = assertRealization (realize allTrue m) (\list ->
list @?= [[True, True, True]])
--Test the reachability of the states through the action
allFalseReachableTest = let phi = PossiblyNext "setOneTrue" allFalse
in assertRealization (realize phi m) (\list ->
list @?= [[False, False, False]])
exactlyOneTrueReachableTest = let phi = PossiblyNext "setOneTrue" exactlyOneTrue
in assertRealization (realize phi m) (\list ->
list @?= ([False, False, False] : exactlyOneTrueList))
muTestCases = zipTestCases [ ("Least fixpoint of constant", constantFixpointTest)
, ("Least fixpoint of disjunction", simpleOrFixpointTest)
, ("Least fixpoint of conjunction", simpleAndFixpointTest)
, ("Unbound variable check", unboundVariableCheck)
, ("Exactly two true fixpoint", exactlyTwoTrueFixpointTest)
, ("Exactly one true fixpoint", exactlyOneTrueFixpointTest)
]
--The least fixpoint of a constant function should be the constant
constantFixpointTest = let phi = Mu "A" exactlyOneTrue
in assertRealization (realize phi m) (\list ->
list @?= exactlyOneTrueList)
--The least fixpoint of \Z -> Z || phi should be phi
simpleOrFixpointTest = let phi = Mu "A" (Or (exactlyOneTrue) (Variable "A"))
in assertRealization (realize phi m) (\list ->
list @?= exactlyOneTrueList)
simpleAndFixpointTest = let phi = Mu "A" (And (exactlyOneTrue) (Variable "A"))
in assertRealization (realize phi m) (\list ->
list @?= [])
unboundVariableCheck = let phi = Mu "A" (And (exactlyOneTrue) (Variable "B"))
in (realize phi m) @?= Left UnknownVariableError
--These tests rely on the fact that the setOneTrue action only allows you to move
--up the lattice, so that exactlyOneTrue is not reachable from exactlyTwoTrue, and
--exactlyTwoTrue is not reachable from allTrue.
exactlyTwoTrueFixpointTest = let phi = Mu "A" (Or (exactlyTwoTrue)
(PossiblyNext "setOneTrue" (Variable "A")))
notAllTrue = Negation allTrue
in assertRealization (realize phi m) (\set ->
assertRealization (realize notAllTrue m) (\expected ->
set @?= expected))
exactlyOneTrueFixpointTest = let phi = Mu "A" (Or (exactlyOneTrue)
(PossiblyNext "setOneTrue" (Variable "A")))
atMostOneTrue = Negation (Or allTrue exactlyTwoTrue)
in assertRealization (realize phi m) (\set ->
assertRealization (realize atMostOneTrue m) (\expected ->
set @?= expected))
|
johnbcoughlin/mucalc
|
test/MuCalc/MuModelTest.hs
|
gpl-2.0
| 7,869
| 0
| 17
| 2,735
| 2,079
| 1,128
| 951
| 124
| 1
|
module Algorithms.MachineLearning.LinearClassification (
DiscriminantModel,
regressLinearClassificationModel,
) where
import Algorithms.MachineLearning.Framework
import Algorithms.MachineLearning.LinearAlgebra
import Algorithms.MachineLearning.LinearRegression
import Algorithms.MachineLearning.Utilities
import Data.Ord
import Data.List
import Data.Maybe
data (Bounded classes, Enum classes) => DiscriminantModel input classes = DiscriminantModel {
dm_class_models :: AnyModel input (Vector Double)
}
instance (Vectorable input, Bounded classes, Enum classes) => Model (DiscriminantModel input classes) input classes where
predict model input = snd $ maximumBy (comparing fst) predictions
where
predictions = toList (predict (dm_class_models model) input) `zip` enumAsList
regressLinearClassificationModel :: (Vectorable input, Vectorable classes, Bounded classes, Enum classes, Eq classes)
=> [input -> Double] -- ^ Basis functions
-> DataSet input classes -- ^ Class mapping to use for training
-> DiscriminantModel input classes
regressLinearClassificationModel basis_fns ds = DiscriminantModel { dm_class_models = class_models }
where
class_models = AnyModel $ regressLinearModel basis_fns (fmapDataSetTarget classToCharacteristicVector ds)
indexed_classes = enumAsList `zip` [0..]
classToCharacteristicVector the_class = fromList $ replicate index 0 ++ [1] ++ replicate (size - index - 1) 0
where
size = enumSize the_class
index = fromJust $ lookup the_class indexed_classes
|
batterseapower/machine-learning
|
Algorithms/MachineLearning/LinearClassification.hs
|
gpl-2.0
| 1,676
| 0
| 13
| 361
| 383
| 208
| 175
| -1
| -1
|
-- | Language definitions
{-# OPTIONS -O0 #-}
{-# LANGUAGE TemplateHaskell, FlexibleInstances, TypeFamilies #-}
{-# LANGUAGE MultiParamTypeClasses, UndecidableInstances, ConstraintKinds #-}
module Lamdu.I18N.Language
( Language(..), lFonts, lTitle
) where
import qualified Control.Lens as Lens
import qualified Data.Aeson.TH.Extended as JsonTH
import qualified GUI.Momentu.Direction as Dir
import Lamdu.Config.Folder (HasConfigFolder(..))
import qualified Lamdu.Config.Folder as Folder
import qualified Lamdu.I18N.Fonts as I18N.Fonts
import Lamdu.I18N.LangId (LangId)
import Lamdu.I18N.Texts (Texts)
import Lamdu.Prelude
data Language = Language
{ _lDirection :: Dir.Layout
, _lIdentifier :: LangId
, _lFonts ::
I18N.Fonts.ProportionalAndMonospace
(I18N.Fonts.SansAndSerif
(I18N.Fonts.RomanAndItalic
(I18N.Fonts.LightAndBold FilePath)))
, _lTitle :: Text
, _lTexts :: Texts Text
} deriving Eq
Lens.makeLenses ''Language
JsonTH.derivePrefixed "_l" ''Language
instance HasConfigFolder Language where
type Folder Language = Folder.Language
configFolderName _ = "languages"
instance Has LangId Language where has = lIdentifier
instance Has Dir.Layout Language where has = lDirection
instance Has (f Text) (Texts Text) => Has (f Text) Language where
has = lTexts . has
|
Peaker/lamdu
|
src/Lamdu/I18N/Language.hs
|
gpl-3.0
| 1,389
| 0
| 16
| 272
| 322
| 192
| 130
| -1
| -1
|
module Atom
( Atomic
, isTypeTag
, isTypeTagStr
, isAddrStr
, typeOf
, encode
, OSCString
, Blob
, Time
, decode ) where
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString.Lazy.Char8 as C (append, length, pack, unpack)
import qualified Data.Binary.Put as P (runPut, putWord32be, putWord64be)
import qualified Data.Binary.Get as G (runGet, getWord32be, getWord64be)
import qualified Data.Binary.IEEE754 as F (getFloat32be, putFloat32be)
import qualified Data.Word as W (Word8)
newtype OSCString = OSCString String
deriving (Show, Read, Ord, Eq)
newtype Blob = Blob String
deriving (Show, Read, Ord, Eq)
newtype Time = Time Int deriving
(Bounded, Show, Read, Ord, Eq)
c2w8 :: Char -> W.Word8
c2w8 = fromIntegral . fromEnum
isAddrStr :: String -> Bool
isAddrStr a = '/' == head a
isTypeTagStr :: String -> Bool
isTypeTagStr t = ',' == head t
isTypeTag :: Char -> Bool
isTypeTag = flip elem "ifsbhtdScrmTFNI[]"
isLength :: L.ByteString -> Bool
isLength w = 0 == mod (L.length w) 4
bytesLen :: L.ByteString -> Int
bytesLen b = fromIntegral (C.length b) :: Int
class Atomic a where
typeOf :: a -> Char
encode :: a -> L.ByteString
decode :: L.ByteString -> a
length :: a -> Int
instance Atomic Int where
typeOf _ = 'i'
length _ = 4
decode = (.) fromIntegral (G.runGet G.getWord32be)
encode = (P.runPut . P.putWord32be) . fromIntegral
instance Atomic Time where
typeOf _ = 't'
length _ = 8
decode w = Time $ fromIntegral (G.runGet G.getWord64be w)
encode (Time w) = (.) (P.runPut . P.putWord64be) fromIntegral w
instance Atomic Float where
typeOf _ = 'f'
length _ = 4
decode = G.runGet F.getFloat32be
encode = P.runPut . F.putFloat32be
instance Atomic OSCString where
typeOf _ = 's'
length = (.) bytesLen encode
encode (OSCString s)
| null s = L.empty
| otherwise =
let b = C.pack s
p = C.pack $ replicate ((4 - bytesLen b) `mod` 4) '\NUL'
in C.append b p
decode m = OSCString $ C.unpack (sparse L.empty m 1)
where sparse :: L.ByteString -> L.ByteString -> Int -> L.ByteString
sparse c x i
| not isNul = sparse s t (succ i)
| isNul && 0 /= mod i 4 = sparse s t (succ i)
| otherwise = L.reverse s
where h = L.head x; s = L.cons h c; t = L.tail x
isNul = h == c2w8 '\0'
instance Atomic Blob where
typeOf _ = 'b'
length = bytesLen . encode
encode (Blob w) =
let b = encode $ OSCString w
l = encode (bytesLen b)
in L.append l b
decode w =
let l = decode (L.take 4 w) :: Int
(OSCString s) = decode (L.drop (fromIntegral l) w) :: OSCString
in Blob s
|
destroyhimmyrobots/osc.hs
|
OSC/Atom.hs
|
gpl-3.0
| 2,677
| 39
| 14
| 661
| 940
| 529
| 411
| 83
| 1
|
-----------------------------------------------------------------------------
--
-- Module : Sensing
-- Copyright :
--
-- | the data for the object which is sensed
-- operation to sense the object
-- possibly with a recursive version
-- and to output the sensed data
--
-----------------------------------------------------------------------------
{-# LANGUAGE
MultiParamTypeClasses
-- , TypeSynonymInstances
-- , FunctionalDependencies
-- , FlexibleInstances
-- , FlexibleContexts
-- , DeriveFunctor
, ScopedTypeVariables
-- , UndecidableInstances
, OverloadedStrings
, TypeFamilies
#-}
{-# OPTIONS_GHC -fno-warn-missing-methods #-}
module Sensing (Sensing (..),
FilePathO2 (FilePathO2),
FilePathX,
infoPath,
FPtype (FileDataObject)
) where
import Strings (t2s, s2t, b2s, s2b, ByteString)
import qualified Data.ByteString as BS (null)
import qualified System.Posix.ByteString as Posix
import System.Posix.FilePath ((</>))
import System.Directory (canonicalizePath)
import Pipes (yield, ListT(..), ListT, liftIO)
import Pipes.Safe (Base, MonadSafe, bracket)
import qualified Control.Exception as E (SomeException, catch)
import Control.Monad (unless, when)
import Control.Applicative (Alternative(..), Applicative(..))
import Data.Text (Text)
type FilePathX = ByteString
debug_fp = False
data FPtype = FileDataObject | FPdir | FPspecial | FPunreadable -- | FPzerosize
| FPlink | FPbrokenLink | FNotFile
deriving (Show, Read, Eq)
-- these are all the subtypes of FPathO
-- | descibes an entry in a directory - file or directory!
data FilePathO2 = FilePathO2 { infoPath :: FilePathX
, infoType :: FPtype
, infoStatus :: Posix.FileStatus
, infoIsReadable :: Bool
, infoLink :: FilePathX
, infoMD5 :: Text
-- , canonicalPath :: FilePath -- CanonicalFilePath
}
-- deriving (Eq, Ord, Show)
-- | the operations for sensing an object - e.g. a filePath
class Sensing fpo where
sense :: FilePath -> IO fpo
-- ^ does a get stat and keeps the data
senseRecursive5 :: (MonadSafe m, Base m ~ IO) => fpo -> ListT m fpo
senseReport :: fpo -> String
instance Sensing FilePathO2 where
sense fn = do
let fnb = s2b fn
let fp = FilePathO2 {infoPath = fnb
, infoType = undef "infoType initial"
, infoStatus = undef "infoStatus initial"
, infoIsReadable = undef "infoisreadable initial"
, infoLink = undef "infoLink initial"
, infoMD5 = undef "infoMD5 initial" }
-- c <- canonicalizePath $ fn
st <- Posix.getSymbolicLinkStatus fnb
readable <- Posix.fileAccess fnb True False False
let isRegular = Posix.isRegularFile st
let isDir = Posix.isDirectory st
let isLink = Posix.isSymbolicLink st
let fileType = if not readable then FPunreadable
else if isLink then FPlink
else if isDir then FPdir
else if isRegular then FileDataObject
else FPspecial
let fp2 = fp {infoType = fileType
, infoStatus = st
, infoIsReadable = readable}
fp3 <-
case fileType of
FPlink -> do
lk <- Posix.readSymbolicLink fnb
-- let lk = "not valid"
return (fp2 {infoLink = lk})
_ -> return fp2
return fp3
`E.catch`
\(e::E.SomeException) -> case e of
_ -> do
-- when debug_fp $
putStrLn . unwords $ ["sense exception - assume broken link", show e]
-- lk <- Posix.readSymbolicLink . s2b $ fn
return $ FilePathO2 {infoPath = s2b fn
, infoType = FPbrokenLink
-- , infoLink = "broken" -- lk
, infoStatus = undef "sense exception status"
, infoIsReadable = undef "sense exception isReadable"
, infoMD5 = undef "infoMD5 exception" }
-- could be nofile?
senseRecursive5 fpo = do
when debug_fp $ liftIO . putStrLn . unwords $ ["senseRecursive4 start"
, b2s . infoPath $ fpo, show . infoType $ fpo]
case (infoType fpo) of
FPbrokenLink -> do
when debug_fp $ liftIO . putStrLn . unwords $
["senseRecursive4 brokenlink", b2s . infoPath $ fpo]
pure fpo
FPlink -> pure fpo
FPunreadable -> pure fpo
FPspecial -> pure fpo
FileDataObject -> pure fpo -- ( (pure fdo3) <|> (pure fdt3))
FPdir -> pure fpo <|> senseRecursiveFolder5 fpo
_ -> do
liftIO . putStrLn . unwords $
["senseRecursive4 ignored"
, b2s . infoPath $ fpo, show . infoType $ fpo]
pure fpo
senseReport fpo = unwords [path, ftype, "\n\t", md]
where
path = b2s . infoPath $ fpo
ftype = show . infoType $ fpo
md = case infoType fpo of
FileDataObject -> "File \t" ++ path
FPlink -> "Link \t" ++ (b2s . infoLink $ fpo)
_ -> ""
senseRecursiveFolder5 :: (MonadSafe m, Base m ~ IO) => FilePathO2 -> ListT m FilePathO2
senseRecursiveFolder5 fpo = do
when debug_fp $ liftIO . putStrLn . unwords $ ["senseRecursiveFolder5"
, b2s . infoPath $ fpo, show . infoType $ fpo]
(fpo'::FilePathO2) <- readDirStream5 fpo
senseRecursive5 fpo'
readDirStream5 fpo = Select $ do
bracket (Posix.openDirStream . infoPath $ fpo) Posix.closeDirStream $ \dirp -> do
let loop = do
file' <- liftIO $ Posix.readDirStream dirp
unless (BS.null file') $ do
when (file' /= "." && file' /= "..") $ do
let d = infoPath fpo
let d' = d </> file'
when debug_fp $ liftIO . putStrLn . unwords $ ["readDir", show d']
fpo2 <- liftIO . sense . b2s $ d'
when debug_fp $ liftIO . putStrLn . unwords $ ["readDir fpo2"
, b2s . infoPath $ fpo2, show . infoType $ fpo2]
yield fpo2
loop
loop
undef s = error s
|
glueckself/mhaskell-ss14
|
Sensing.hs
|
gpl-3.0
| 6,795
| 0
| 27
| 2,558
| 1,472
| 789
| 683
| -1
| -1
|
module Data.NestedSetSpec (main, spec) where
import Data.NestedSet
import Data.Tree
import Test.Hspec
main :: IO ()
main = hspec spec
spec :: Spec
spec = describe "Nested set" $ do
describe "forestToNestedSets" $ do
it "generates nested sets for empty forest" $
forestToNestedSets [] `shouldBe` ([]::NestedSets Char)
it "generates nested sets for one node" $
forestToNestedSets [Node 'a' []] `shouldBe` [NestedSetsNode (1, 2) 'a' []]
it "generates nested sets for two nodes" $
forestToNestedSets [Node 'a' [], Node 'b' []] `shouldBe` [NestedSetsNode (1, 2) 'a' [], NestedSetsNode (3, 4) 'b' []]
it "generates nested sets for nested nodes" $
forestToNestedSets [Node 'a' [Node 'b' []]] `shouldBe` [NestedSetsNode (1, 4) 'a' [NestedSetsNode (2, 3) 'b' []]]
it "generates nested sets for several nested nodes" $
forestToNestedSets complexForest `shouldBe` complexNestedSets
describe "nestedSetsToForest" $ do
it "generates empty forest on empty input" $
nestedSetsToForest [] `shouldBe` ([]::Forest Char)
it "generates forest for one node" $
nestedSetsToForest [NestedSetsNode (1, 2) 'a' []] `shouldBe` [Node 'a' []]
it "generates forest for two nodes" $
nestedSetsToForest [NestedSetsNode (1, 2) 'a' [], NestedSetsNode (3, 4) 'b' []] `shouldBe` [Node 'a' [], Node 'b' []]
it "generates forest for nested nodes" $
nestedSetsToForest [NestedSetsNode (1, 2) 'a' [NestedSetsNode (3, 4) 'b' []]] `shouldBe` [Node 'a' [Node 'b' []]]
it "converts nested sets to forest" $
nestedSetsToForest complexNestedSets `shouldBe` complexForest
describe "nestedSetsStartPosition" $ do
it "returns Nothing on empty input" $
nestedSetsStartPosition [] `shouldBe` Nothing
it "returns first position on nonempty input" $ do
nestedSetsStartPosition [NestedSetsNode (1, 2) 'a' []] `shouldBe` Just (1, 2)
nestedSetsStartPosition complexNestedSets `shouldBe` Just (1, 8)
describe "nestedSetsNextSiblingPosition" $ do
it "returns Nothing on empty input" $
nestedSetsNextSiblingPosition [] (0, 0) `shouldBe` Nothing
it "returns Nothing if there is just one node" $ do
nestedSetsNextSiblingPosition [NestedSetsNode (1, 2) 'a' []] (0, 0) `shouldBe` Nothing
nestedSetsNextSiblingPosition [NestedSetsNode (1, 2) 'a' []] (1, 2) `shouldBe` Nothing
it "returns position to the second node if the first node is the starting point" $
nestedSetsNextSiblingPosition [NestedSetsNode (1, 2) 'a' [], NestedSetsNode (3, 4) 'b' []] (1, 2) `shouldBe` Just (3, 4)
it "returns Nothing if position is not found" $
nestedSetsNextSiblingPosition [NestedSetsNode (1, 2) 'a' [], NestedSetsNode (3, 4) 'b' []] (1, 1) `shouldBe` Nothing
it "returns position to the third node if the second node is the starting point" $
nestedSetsNextSiblingPosition [NestedSetsNode (1, 2) 'a' [], NestedSetsNode (3, 4) 'b' [], NestedSetsNode (5, 6) 'c' []] (3, 4) `shouldBe` Just (5, 6)
it "advances a nested position" $ do
nestedSetsNextSiblingPosition complexNestedSets (3, 4) `shouldBe` Just (5, 6)
nestedSetsNextSiblingPosition complexNestedSets (10, 11) `shouldBe` Just (12, 13)
nestedSetsNextSiblingPosition complexNestedSets (12, 13) `shouldBe` Nothing
nestedSetsNextSiblingPosition complexNestedSets (2, 7) `shouldBe` Nothing
describe "nestedSetsParentPosition" $ do
it "returns Nothing on empty set" $
nestedSetsParentPosition [] (0, 0) `shouldBe` Nothing
it "returns parent position of the first level" $
nestedSetsParentPosition complexNestedSets (2, 7) `shouldBe` Just (1, 8)
it "returns Nothing if parent of the first level position is requested" $
nestedSetsParentPosition [NestedSetsNode (1, 2) 'a' []] (1, 2) `shouldBe` Nothing
it "returns Nothing if parent of the unknown position is requested" $ do
nestedSetsParentPosition [NestedSetsNode (1, 2) 'a' []] (4, 9) `shouldBe` Nothing
nestedSetsParentPosition complexNestedSets (4, 6) `shouldBe` Nothing
describe "nestedSetsFirstChildPosition" $ do
it "returns Nothing on empty set" $
nestedSetsFirstChildPosition [] (0, 0) `shouldBe` Nothing
it "returns child position of the first level" $ do
nestedSetsFirstChildPosition complexNestedSets (1, 8) `shouldBe` Just (2, 7)
nestedSetsFirstChildPosition complexNestedSets (9, 14) `shouldBe` Just (10, 11)
it "returns child position of a deeper level" $
nestedSetsFirstChildPosition complexNestedSets (2, 7) `shouldBe` Just (3, 4)
it "returns Nothing if a node does not have children" $
nestedSetsFirstChildPosition complexNestedSets (10, 11) `shouldBe` Nothing
describe "nestedSetsPositionValue" $ do
it "returns Nothing on empty set" $
nestedSetsPositionValue [] (0, 0) `shouldBe` (Nothing::Maybe Char)
it "returns value of first level position" $ do
nestedSetsPositionValue complexNestedSets (1, 8) `shouldBe` Just 'a'
nestedSetsPositionValue complexNestedSets (9, 14) `shouldBe` Just 'e'
it "returns value of deeper position" $ do
nestedSetsPositionValue complexNestedSets (3, 4) `shouldBe` Just 'c'
nestedSetsPositionValue complexNestedSets (12, 13) `shouldBe` Just 'g'
it "returns Nothing if it cannot find the position" $
nestedSetsPositionValue complexNestedSets (4, 5) `shouldBe` Nothing
describe "nestedSetsPositionSetValue" $ do
it "returns unmodified nested set if position is not found" $ do
let ns = nestedSetsPositionSetValue complexNestedSets (100, 80) 'X'
ns `shouldBe` complexNestedSets
it "returns modified nested set" $ do
let ns = nestedSetsPositionSetValue complexNestedSets (1, 8) 'X'
nestedSetsPositionValue ns (1, 8) `shouldBe` Just 'X'
complexForest :: Forest Char
complexForest = [Node 'a' [
Node 'b' [
Node 'c' [],
Node 'd' []]],
Node 'e' [
Node 'f' [],
Node 'g' []]]
complexNestedSets :: NestedSets Char
complexNestedSets = [NestedSetsNode (1, 8) 'a' [
NestedSetsNode (2, 7) 'b' [
NestedSetsNode (3, 4) 'c' [],
NestedSetsNode (5, 6) 'd' []]],
NestedSetsNode (9, 14) 'e' [
NestedSetsNode (10, 11) 'f' [],
NestedSetsNode (12, 13) 'g' []]]
|
svalaskevicius/nested-sets
|
test/Data/NestedSetSpec.hs
|
gpl-3.0
| 6,933
| 0
| 18
| 1,926
| 1,992
| 1,033
| 959
| 107
| 1
|
{-# LANGUAGE TemplateHaskell #-}
module Lamdu.Sugar.Types.Tag
( Tag(..), tagName, tagVal, tagInstance
, TagOption(..), toInfo, toPick
, TagChoice(..), tcOptions, tcNewTag
, TagRef(..), tagRefTag, tagRefReplace, tagRefJumpTo
, OptionalTag(..), oTag, oPickAnon
) where
import qualified Control.Lens as Lens
import qualified Lamdu.Calc.Type as T
import Lamdu.Sugar.Internal.EntityId (EntityId)
import Lamdu.Prelude
data Tag name = Tag
{ _tagName :: name
, _tagInstance :: EntityId -- Unique across different uses of a tag
, _tagVal :: T.Tag
} deriving (Eq, Ord, Generic)
data TagOption name o = TagOption
{ _toInfo :: Tag name
, _toPick :: o ()
} deriving Generic
data TagChoice name o = TagChoice
{ _tcOptions :: [TagOption name o]
, _tcNewTag :: TagOption name o
} deriving Generic
-- | A mutable tag (that can be replaced with a different tag)
data TagRef name i o = TagRef
{ _tagRefTag :: Tag name
, _tagRefReplace :: i (TagChoice name o)
, _tagRefJumpTo :: Maybe (o EntityId)
} deriving Generic
data OptionalTag name i o = OptionalTag
{ _oTag :: TagRef name i o
, _oPickAnon :: o EntityId
} deriving Generic
traverse Lens.makeLenses [''Tag, ''TagOption, ''TagRef, ''TagChoice, ''OptionalTag] <&> concat
|
Peaker/lamdu
|
src/Lamdu/Sugar/Types/Tag.hs
|
gpl-3.0
| 1,325
| 0
| 11
| 307
| 368
| 228
| 140
| 34
| 0
|
{-# LANGUAGE OverloadedStrings #-}
module Slidecoding.Indexer
( indexIO
) where
import Slidecoding.Browser
import Slidecoding.Types
import Codec.Binary.Base64.String as B64 (decode)
import Data.Aeson hiding (encode)
import Data.Aeson as A (encode)
import Data.ByteString.Lazy.Char8 as B (hPutStrLn)
import Data.Text as L (pack)
import System.Directory (createDirectoryIfMissing)
import System.FilePath ((</>), (<.>))
import System.IO (IOMode(..), withFile)
indexIO :: Module -> FilePath -> IO ()
indexIO m dir = do
createDirectoryIfMissing True dir
description <- browse m
writeIndexJson dir description
writeSources dir description
writeIndexJson :: FilePath -> Description -> IO ()
writeIndexJson dir description = writeJSON file (D' description)
where file = dir </> m' <.> "json"
Description (Module _ m') _ = description
writeJSON :: ToJSON a => FilePath -> a -> IO ()
writeJSON file o = withFile file WriteMode handler
where handler f = B.hPutStrLn f (A.encode o)
newtype Description' = D' Description
instance ToJSON Description' where
toJSON (D' (Description (Module _ m) items)) = object [ L.pack m .= object (map toPair items) ]
where toPair (Item (Symbol s) (Signature sig) (Source src64)) =
L.pack s .=
object [ "qname" .= qname s
, "signature" .= sig
, "sourceFile" .= sourceFile s
, "sourceBase64" .= src64
]
qname s = m ++ "." ++ s
sourceFile s = m ++ "_" ++ s <.> "hs"
writeSources :: FilePath -> Description -> IO ()
writeSources dir (Description m items) = mapM_ (writeItem dir m) items
writeItem :: FilePath -> Module -> Item -> IO ()
writeItem dir (Module _ m) (Item (Symbol s) _ (Source src)) =
writeFile file (B64.decode src)
where file = dir </> qname <.> "hs"
qname = m ++ "_" ++ s
|
ptitfred/slidecoding
|
src/Slidecoding/Indexer.hs
|
gpl-3.0
| 2,035
| 0
| 12
| 600
| 677
| 354
| 323
| 44
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Redis.Projects.Locations.Operations.Delete
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Deletes a long-running operation. This method indicates that the client
-- is no longer interested in the operation result. It does not cancel the
-- operation. If the server doesn\'t support this method, it returns
-- \`google.rpc.Code.UNIMPLEMENTED\`.
--
-- /See:/ <https://cloud.google.com/memorystore/docs/redis/ Google Cloud Memorystore for Redis API Reference> for @redis.projects.locations.operations.delete@.
module Network.Google.Resource.Redis.Projects.Locations.Operations.Delete
(
-- * REST Resource
ProjectsLocationsOperationsDeleteResource
-- * Creating a Request
, projectsLocationsOperationsDelete
, ProjectsLocationsOperationsDelete
-- * Request Lenses
, plodXgafv
, plodUploadProtocol
, plodAccessToken
, plodUploadType
, plodName
, plodCallback
) where
import Network.Google.Prelude
import Network.Google.Redis.Types
-- | A resource alias for @redis.projects.locations.operations.delete@ method which the
-- 'ProjectsLocationsOperationsDelete' request conforms to.
type ProjectsLocationsOperationsDeleteResource =
"v1" :>
Capture "name" Text :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :> Delete '[JSON] Empty
-- | Deletes a long-running operation. This method indicates that the client
-- is no longer interested in the operation result. It does not cancel the
-- operation. If the server doesn\'t support this method, it returns
-- \`google.rpc.Code.UNIMPLEMENTED\`.
--
-- /See:/ 'projectsLocationsOperationsDelete' smart constructor.
data ProjectsLocationsOperationsDelete =
ProjectsLocationsOperationsDelete'
{ _plodXgafv :: !(Maybe Xgafv)
, _plodUploadProtocol :: !(Maybe Text)
, _plodAccessToken :: !(Maybe Text)
, _plodUploadType :: !(Maybe Text)
, _plodName :: !Text
, _plodCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ProjectsLocationsOperationsDelete' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'plodXgafv'
--
-- * 'plodUploadProtocol'
--
-- * 'plodAccessToken'
--
-- * 'plodUploadType'
--
-- * 'plodName'
--
-- * 'plodCallback'
projectsLocationsOperationsDelete
:: Text -- ^ 'plodName'
-> ProjectsLocationsOperationsDelete
projectsLocationsOperationsDelete pPlodName_ =
ProjectsLocationsOperationsDelete'
{ _plodXgafv = Nothing
, _plodUploadProtocol = Nothing
, _plodAccessToken = Nothing
, _plodUploadType = Nothing
, _plodName = pPlodName_
, _plodCallback = Nothing
}
-- | V1 error format.
plodXgafv :: Lens' ProjectsLocationsOperationsDelete (Maybe Xgafv)
plodXgafv
= lens _plodXgafv (\ s a -> s{_plodXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
plodUploadProtocol :: Lens' ProjectsLocationsOperationsDelete (Maybe Text)
plodUploadProtocol
= lens _plodUploadProtocol
(\ s a -> s{_plodUploadProtocol = a})
-- | OAuth access token.
plodAccessToken :: Lens' ProjectsLocationsOperationsDelete (Maybe Text)
plodAccessToken
= lens _plodAccessToken
(\ s a -> s{_plodAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
plodUploadType :: Lens' ProjectsLocationsOperationsDelete (Maybe Text)
plodUploadType
= lens _plodUploadType
(\ s a -> s{_plodUploadType = a})
-- | The name of the operation resource to be deleted.
plodName :: Lens' ProjectsLocationsOperationsDelete Text
plodName = lens _plodName (\ s a -> s{_plodName = a})
-- | JSONP
plodCallback :: Lens' ProjectsLocationsOperationsDelete (Maybe Text)
plodCallback
= lens _plodCallback (\ s a -> s{_plodCallback = a})
instance GoogleRequest
ProjectsLocationsOperationsDelete
where
type Rs ProjectsLocationsOperationsDelete = Empty
type Scopes ProjectsLocationsOperationsDelete =
'["https://www.googleapis.com/auth/cloud-platform"]
requestClient ProjectsLocationsOperationsDelete'{..}
= go _plodName _plodXgafv _plodUploadProtocol
_plodAccessToken
_plodUploadType
_plodCallback
(Just AltJSON)
redisService
where go
= buildClient
(Proxy ::
Proxy ProjectsLocationsOperationsDeleteResource)
mempty
|
brendanhay/gogol
|
gogol-redis/gen/Network/Google/Resource/Redis/Projects/Locations/Operations/Delete.hs
|
mpl-2.0
| 5,423
| 0
| 15
| 1,139
| 702
| 413
| 289
| 103
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.AndroidPublisher.InternalAppsharingartifacts.Uploadbundle
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Uploads an app bundle to internal app sharing. If you are using the
-- Google API client libraries, please increase the timeout of the http
-- request before calling this endpoint (a timeout of 2 minutes is
-- recommended). See [Timeouts and
-- Errors](https:\/\/developers.google.com\/api-client-library\/java\/google-api-java-client\/errors)
-- for an example in java.
--
-- /See:/ <https://developers.google.com/android-publisher Google Play Android Developer API Reference> for @androidpublisher.internalappsharingartifacts.uploadbundle@.
module Network.Google.Resource.AndroidPublisher.InternalAppsharingartifacts.Uploadbundle
(
-- * REST Resource
InternalAppsharingartifactsUploadbundleResource
-- * Creating a Request
, internalAppsharingartifactsUploadbundle
, InternalAppsharingartifactsUploadbundle
-- * Request Lenses
, iauXgafv
, iauUploadProtocol
, iauPackageName
, iauAccessToken
, iauUploadType
, iauCallback
) where
import Network.Google.AndroidPublisher.Types
import Network.Google.Prelude
-- | A resource alias for @androidpublisher.internalappsharingartifacts.uploadbundle@ method which the
-- 'InternalAppsharingartifactsUploadbundle' request conforms to.
type InternalAppsharingartifactsUploadbundleResource
=
"androidpublisher" :>
"v3" :>
"applications" :>
"internalappsharing" :>
Capture "packageName" Text :>
"artifacts" :>
"bundle" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
Post '[JSON] InternalAppSharingArtifact
:<|>
"upload" :>
"androidpublisher" :>
"v3" :>
"applications" :>
"internalappsharing" :>
Capture "packageName" Text :>
"artifacts" :>
"bundle" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
QueryParam "uploadType" AltMedia :>
AltMedia :>
Post '[JSON] InternalAppSharingArtifact
-- | Uploads an app bundle to internal app sharing. If you are using the
-- Google API client libraries, please increase the timeout of the http
-- request before calling this endpoint (a timeout of 2 minutes is
-- recommended). See [Timeouts and
-- Errors](https:\/\/developers.google.com\/api-client-library\/java\/google-api-java-client\/errors)
-- for an example in java.
--
-- /See:/ 'internalAppsharingartifactsUploadbundle' smart constructor.
data InternalAppsharingartifactsUploadbundle =
InternalAppsharingartifactsUploadbundle'
{ _iauXgafv :: !(Maybe Xgafv)
, _iauUploadProtocol :: !(Maybe Text)
, _iauPackageName :: !Text
, _iauAccessToken :: !(Maybe Text)
, _iauUploadType :: !(Maybe Text)
, _iauCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'InternalAppsharingartifactsUploadbundle' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'iauXgafv'
--
-- * 'iauUploadProtocol'
--
-- * 'iauPackageName'
--
-- * 'iauAccessToken'
--
-- * 'iauUploadType'
--
-- * 'iauCallback'
internalAppsharingartifactsUploadbundle
:: Text -- ^ 'iauPackageName'
-> InternalAppsharingartifactsUploadbundle
internalAppsharingartifactsUploadbundle pIauPackageName_ =
InternalAppsharingartifactsUploadbundle'
{ _iauXgafv = Nothing
, _iauUploadProtocol = Nothing
, _iauPackageName = pIauPackageName_
, _iauAccessToken = Nothing
, _iauUploadType = Nothing
, _iauCallback = Nothing
}
-- | V1 error format.
iauXgafv :: Lens' InternalAppsharingartifactsUploadbundle (Maybe Xgafv)
iauXgafv = lens _iauXgafv (\ s a -> s{_iauXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
iauUploadProtocol :: Lens' InternalAppsharingartifactsUploadbundle (Maybe Text)
iauUploadProtocol
= lens _iauUploadProtocol
(\ s a -> s{_iauUploadProtocol = a})
-- | Package name of the app.
iauPackageName :: Lens' InternalAppsharingartifactsUploadbundle Text
iauPackageName
= lens _iauPackageName
(\ s a -> s{_iauPackageName = a})
-- | OAuth access token.
iauAccessToken :: Lens' InternalAppsharingartifactsUploadbundle (Maybe Text)
iauAccessToken
= lens _iauAccessToken
(\ s a -> s{_iauAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
iauUploadType :: Lens' InternalAppsharingartifactsUploadbundle (Maybe Text)
iauUploadType
= lens _iauUploadType
(\ s a -> s{_iauUploadType = a})
-- | JSONP
iauCallback :: Lens' InternalAppsharingartifactsUploadbundle (Maybe Text)
iauCallback
= lens _iauCallback (\ s a -> s{_iauCallback = a})
instance GoogleRequest
InternalAppsharingartifactsUploadbundle
where
type Rs InternalAppsharingartifactsUploadbundle =
InternalAppSharingArtifact
type Scopes InternalAppsharingartifactsUploadbundle =
'["https://www.googleapis.com/auth/androidpublisher"]
requestClient
InternalAppsharingartifactsUploadbundle'{..}
= go _iauPackageName _iauXgafv _iauUploadProtocol
_iauAccessToken
_iauUploadType
_iauCallback
(Just AltJSON)
androidPublisherService
where go :<|> _
= buildClient
(Proxy ::
Proxy
InternalAppsharingartifactsUploadbundleResource)
mempty
instance GoogleRequest
(MediaUpload InternalAppsharingartifactsUploadbundle)
where
type Rs
(MediaUpload InternalAppsharingartifactsUploadbundle)
= InternalAppSharingArtifact
type Scopes
(MediaUpload InternalAppsharingartifactsUploadbundle)
= Scopes InternalAppsharingartifactsUploadbundle
requestClient
(MediaUpload
InternalAppsharingartifactsUploadbundle'{..} body)
= go _iauPackageName _iauXgafv _iauUploadProtocol
_iauAccessToken
_iauUploadType
_iauCallback
(Just AltJSON)
(Just AltMedia)
body
androidPublisherService
where _ :<|> go
= buildClient
(Proxy ::
Proxy
InternalAppsharingartifactsUploadbundleResource)
mempty
|
brendanhay/gogol
|
gogol-android-publisher/gen/Network/Google/Resource/AndroidPublisher/InternalAppsharingartifacts/Uploadbundle.hs
|
mpl-2.0
| 8,045
| 0
| 37
| 2,242
| 975
| 546
| 429
| 156
| 1
|
-- Copyright © 2014 Garrison Jensen
-- License
-- This code and text are dedicated to the public domain.
-- You can copy, modify, distribute and perform the work,
-- even for commercial purposes, all without asking permission.
import Parser
import Nock(Noun(Cell))
import Interpreter
import Optimizer
import System.Environment
import System.IO
import Data.List(intercalate)
interactiveNock :: IO ()
interactiveNock = do
putStr "> "
hFlush stdout
nockStr <- getLine
case nockStr of
"quit" -> return ()
_ -> do case parseString nockStr of
Just a -> print $ nock $ optimizer a
Nothing -> print "Invalid Nock expression."
>> interactiveNock
readNockFile :: String -> String -> IO ()
readNockFile file input = do
nockStr <- parseFile file
case parseString input of
Just a -> print $ nock $ optimizer $ Cell a nockStr
Nothing -> print $ nock $ optimizer nockStr
helpMsg :: String
helpMsg = "Usage: nock [-h] [file [input]]\n\
\ If program file is not provided, \n\
\ an interactive nock shell is started.\n\
\ -h Print help\n\
\ Examples:\n\
\ $ nock sub.nock [3 2]\n\
\ 1\n\
\ $ nock\n\
\ > [1 2 3 4]\n\
\ 4\n\
\ > [43 [8 [1 0] 8 [1 6 [5 [0 7] 4 0 6] [0 6] 9 2 [0 2] [4 0 6] 0 7] 9 2 0 1]]\n\
\ 42\n\
\ > quit\n"
main :: IO ()
main = do
args <- getArgs
case args of
"-h":_ -> putStr helpMsg
[] -> interactiveNock
file:input -> readNockFile file $ intercalate " " input
|
GarrisonJ/Nock
|
src/Main.hs
|
unlicense
| 1,819
| 0
| 17
| 707
| 321
| 158
| 163
| 33
| 3
|
module Html.Post where
|
mcmaniac/blog.nils.cc
|
src/Html/Post.hs
|
apache-2.0
| 24
| 0
| 3
| 4
| 6
| 4
| 2
| 1
| 0
|
{-# LANGUAGE OverloadedStrings #-}
{-|
Module : Network.NSQ.Parser
Description : Protocol Parser layer for the NSQ client library.
-}
module Network.NSQ.Parser
( message
, decode
, encode
) where
import Data.Word
import Data.Monoid
import Control.Applicative
import Data.Attoparsec.Binary
import Data.Attoparsec.ByteString hiding (count)
import Data.Int
import Prelude hiding (take)
import qualified Data.List as DL
import qualified Data.ByteString as BS
import qualified Data.ByteString.Char8 as C8
import qualified Data.ByteString.Lazy as BL
import qualified Data.ByteString.Lazy.Builder as BL
import qualified Data.Text.Encoding as T
import Network.NSQ.Types
import Network.NSQ.Identify
-- | Decode the 'ByteString' to an 'Message'
decode :: BS.ByteString -> Maybe Message
decode str = case parseOnly message str of
Left _ -> Nothing
Right r -> Just r
-- | Convert various nsq messages into useful 'Message' types
command :: BS.ByteString -> Message
command "_heartbeat_" = Heartbeat
command "OK" = OK
command "CLOSE_WAIT" = CloseWait
command x = CatchAllMessage (FTUnknown 99) x -- TODO: Better FrameType
-- | Convert various Error strings to 'ErrorType'
errorCasting :: BS.ByteString -> ErrorType
errorCasting "E_INVALID" = Invalid
errorCasting "E_BAD_BODY" = BadBody
errorCasting "E_BAD_TOPIC" = BadTopic
errorCasting "E_BAD_MESSAGE" = BadMessage
errorCasting "E_PUB_FAILED" = PubFailed
errorCasting "E_MPUB_FAILED" = MPubFailed
errorCasting "E_FIN_FAILED" = FinFailed
errorCasting "E_REQ_FAILED" = ReqFailed
errorCasting "E_TOUCH_FAILED" = TouchFailed
errorCasting x = Unknown x
-- | Frame types into 'FrameType'
frameType :: Int32 -> FrameType
frameType 0 = FTResponse
frameType 1 = FTError
frameType 2 = FTMessage
frameType x = FTUnknown x
-- TODO: do sanity check such as checking that the size is of a minimal
-- size, then parsing the frameType, then the remainder (fail "messg")
-- | Parse the low level message frames into a 'Message' type.
message :: Parser Message
message = do
size <- fromIntegral <$> anyWord32be
ft <- frameType <$> fromIntegral <$> anyWord32be
frame ft (size - 4) -- Taking in accord the frameType
-- | Parse in the frame (remaining portion) of the message in accordance of
-- the 'Frametype'
frame :: FrameType -> Int -> Parser Message
frame FTError size = Error <$> (errorCasting `fmap` take size)
frame FTResponse size = command <$> take size
frame FTMessage size = Message
<$> (fromIntegral <$> anyWord64be)
<*> anyWord16be
-- 16 bytes message id
<*> take 16
-- Taking in accord timestamp/attempts/msgid
<*> take (size - 26)
frame ft size = CatchAllMessage ft <$> take size
-- TODO: this won't work for streaming the data...
-- Should provide two api, one for in memory (ie where we count up the length of the data manualy
-- And a "streaming" version in which we know the actual size before streaming (ie streaming from a file for ex)
-- | Primitive version for encoding the size of the data into the frame
-- content then encoding the remaining.
sizedData :: BS.ByteString -> BL.Builder
sizedData dat = BL.word32BE (fromIntegral $ BS.length dat) <> BL.byteString dat
-- Body of a foldl to build up a sequence of concat sized data
concatSizedData :: (Word32, Word32, BL.Builder) -> BS.ByteString -> (Word32, Word32, BL.Builder)
concatSizedData (totalSize, count, xs) dat = (
totalSize + 4 + fromIntegral (BS.length dat), -- Add 4 to accord for message size
count + 1,
xs <> sizedData dat
)
-- | Encode a 'Command' into raw 'ByteString' to send to the network to the
-- nsqd daemon. There are a few gotchas here; You can only have one 'Sub'
-- (topic/channel) per nsqld connection, any other will yield 'Invalid'.
-- Also you can publish to any number of topic without limitation.
encode :: Command -> BS.ByteString
encode Protocol = " V2"
encode NOP = "NOP\n"
encode Cls = "CLS\n"
encode (Identify identify) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "IDENTIFY\n" <>
sizedData (BL.toStrict $ encodeMetadata identify)
)
encode (Sub topic channel ephemeral) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "SUB " <>
BL.byteString (T.encodeUtf8 topic) <>
BL.byteString " " <>
BL.byteString (T.encodeUtf8 channel) <>
BL.byteString (if ephemeral then "#ephemeral" else "") <>
BL.byteString "\n"
)
encode (Pub topic dat) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "PUB " <>
BL.byteString (T.encodeUtf8 topic) <>
BL.byteString "\n" <>
sizedData dat
)
encode (MPub topic dx) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "MPUB " <>
BL.byteString (T.encodeUtf8 topic) <>
BL.byteString "\n" <>
BL.word32BE (totalSize + 4) <> -- Accord for message count
BL.word32BE totalCount <>
content
)
where
(totalSize, totalCount, content) = DL.foldl' concatSizedData (0, 0, mempty) dx
encode (Rdy count) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "RDY " <>
BL.byteString (C8.pack $ show count) <>
BL.byteString "\n"
)
encode (Fin msg_id) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "FIN " <>
BL.byteString msg_id <>
BL.byteString "\n"
)
encode (Touch msg_id) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "TOUCH " <>
BL.byteString msg_id <>
BL.byteString "\n"
)
encode (Req msg_id timeout) = BL.toStrict $ BL.toLazyByteString (
BL.byteString "REQ " <>
BL.byteString msg_id <>
BL.byteString " " <>
BL.byteString (C8.pack $ show timeout) <>
BL.byteString "\n"
)
encode (Command m) = m
|
pharaun/hsnsq
|
src/Network/NSQ/Parser.hs
|
apache-2.0
| 5,890
| 0
| 16
| 1,342
| 1,373
| 717
| 656
| 112
| 2
|
-- 73702
import Euler(isSpecialSumSet, splitOn)
parseSets [] = []
parseSets (x:xs) = ss : parseSets xs
where ss = map read $ splitOn ',' x
sumSpecialSets ws = sum $ concat spec
where sets = parseSets ws
spec = filter isSpecialSumSet sets
main = do
contents <- readFile "../files/p105_sets.txt"
putStrLn $ show $ sumSpecialSets $ lines contents
|
higgsd/euler
|
hs/105.hs
|
bsd-2-clause
| 373
| 0
| 9
| 86
| 134
| 66
| 68
| 10
| 1
|
module Buster.Logger (configureLogger,
defaultLog,
debugM,
infoM,
noticeM,
warningM,
errorM,
criticalM,
alertM,
emergencyM) where
import Control.Applicative ((<$>), (<*>), pure)
import System.IO (stdout, openFile, IOMode(..))
import System.Log.Logger (updateGlobalLogger,
Priority(..),
rootLoggerName,
setLevel,
setHandlers)
import System.Log.Formatter (simpleLogFormatter)
import System.Log.Handler (setFormatter)
import System.Log.Handler.Simple (verboseStreamHandler)
import qualified System.Log.Logger as L
configureLogger :: Maybe FilePath -> Bool -> IO ()
configureLogger logFile verbose = do handle <- getHandle
handler <- setFormatter <$> defaultHandler handle <*> pure defaultFormatter
setLogHandler handler
setLogLevel
where defaultHandler handle = verboseStreamHandler handle logLevel
getHandle = case logFile of
Just path -> openFile path AppendMode
_ -> return stdout
setLogHandler handler = updateGlobalLogger defaultLog $ setHandlers [handler]
setLogLevel = updateGlobalLogger defaultLog $ setLevel logLevel
defaultFormatter = simpleLogFormatter defaultFormat
defaultFormat = "$time [$prio] $msg"
logLevel = if verbose
then DEBUG
else WARNING
defaultLog :: String
defaultLog = rootLoggerName
debugM :: String -> IO ()
debugM = L.debugM defaultLog
infoM :: String -> IO ()
infoM = L.infoM defaultLog
noticeM :: String -> IO ()
noticeM = L.noticeM defaultLog
warningM :: String -> IO ()
warningM = L.warningM defaultLog
errorM :: String -> IO ()
errorM = L.errorM defaultLog
criticalM :: String -> IO ()
criticalM = L.criticalM defaultLog
alertM :: String -> IO ()
alertM = L.alertM defaultLog
emergencyM :: String -> IO ()
emergencyM = L.emergencyM defaultLog
|
MichaelXavier/Buster
|
src/Buster/Logger.hs
|
bsd-2-clause
| 2,373
| 0
| 10
| 912
| 527
| 285
| 242
| 55
| 3
|
{- Test Suite for Code Generating -}
module GPC.CodeGenTests (codeGenTests) where
import Test.HUnit
--import GPC.CodeGen
import GPC.Tests
import qualified Test.Framework.Providers.API as TFA
import Test.Framework.Providers.HUnit
-- | Return expected and actual program output
-- | These are expected to pass and to be equal
genAssignCheck :: [(String,String)]
genAssignCheck = [asInt]
where
-- |Check integer literal assignment
asInt = ("placeholder", "placeholder")
generateTest :: String -> (String, String) -> TFA.Test
generateTest label (e, a) = testCase label $ assertEqual "" e a
-- | Generate test cases
generateTests :: String -> [(String, String)] -> [TFA.Test]
generateTests s ps = map (uncurry generateTest) $ zip labels ps
where labels = makeLabels s ps
-- | Test valid assignments
assignGenTests :: [TFA.Test]
assignGenTests = generateTests "CodeGenAssignTest" genAssignCheck
-- | All Test cases to run
codeGenTests :: TFA.Test
codeGenTests = TFA.testGroup "CodeGenTests" $ concat [assignGenTests]
|
RossMeikleham/GPC
|
tests/GPC/CodeGenTests.hs
|
bsd-2-clause
| 1,040
| 0
| 8
| 168
| 246
| 143
| 103
| 17
| 1
|
{-# LANGUAGE OverloadedStrings #-}
-----------------------------------------------------------------------------
-- |
-- Module : Graphics.Hoodle.Render.Simple
-- Copyright : (c) 2011, 2012 Ian-Woo Kim
--
-- License : BSD3
-- Maintainer : Ian-Woo Kim <ianwookim@gmail.com>
-- Stability : experimental
-- Portability : GHC
--
-----------------------------------------------------------------------------
module Graphics.Hoodle.Render.Simple where
import Graphics.Rendering.Cairo
import Control.Applicative
import Control.Monad
import Data.Strict.Tuple hiding (fst,snd)
import Data.Hoodle.Simple
import Data.Hoodle.Predefined
import qualified Data.Map as M
import qualified Data.ByteString.Char8 as S
-- |
drawOneStroke :: Stroke -> Render ()
drawOneStroke s = do
case s of
Stroke _ _ w d -> do
let opacity = if stroke_tool s == "highlighter"
then predefined_highlighter_opacity
else 1.0
case M.lookup (stroke_color s) predefined_pencolor of
Just (r,g,b,a) -> setSourceRGBA r g b (a*opacity)
Nothing -> setSourceRGBA 0 0 0 1
setLineWidth w
setLineCap LineCapRound
setLineJoin LineJoinRound
drawOneStrokeCurve d
stroke
VWStroke _ _ d -> do
let opacity = if stroke_tool s == "highlighter"
then predefined_highlighter_opacity
else 1.0
case M.lookup (stroke_color s) predefined_pencolor of
Just (r,g,b,a) -> setSourceRGBA r g b (a*opacity)
Nothing -> setSourceRGBA 0 0 0 1
setFillRule FillRuleWinding
drawOneVWStrokeCurve d
fill
Img _ (x,y) (Dim w h) -> do
setSourceRGBA 0 0 0 1
setLineWidth 10
rectangle x y w h
stroke
-- |
drawOneStrokeCurve :: [Pair Double Double] -> Render ()
drawOneStrokeCurve ((x0 :!: y0) : xs) = do
x0 `seq` y0 `seq` moveTo x0 y0
mapM_ f xs
where f (x :!: y) = x `seq` y `seq` lineTo x y
drawOneStrokeCurve [] = return ()
-- |
drawOneVWStrokeCurve :: [(Double,Double,Double)] -> Render ()
drawOneVWStrokeCurve [] = return ()
drawOneVWStrokeCurve (_:[]) = return ()
drawOneVWStrokeCurve ((xo,yo,_zo) : xs) = do
moveTo xo yo
let ((xlast,ylast,_zlast):rxs) = reverse xs
foldM_ forward (xo,yo) xs
foldM_ backward (xlast,ylast) rxs
where (dx,dy) = (,) <$> fst <*> snd $ predefinedPenShapeAspectXY
dir (x,y) = x * dy - y * dx
forward (x0,y0) (x,y,z) = do if (dir (x-x0,y-y0) > 0)
then lineTo (x+0.5*dx*z) (y+0.5*dy*z)
else lineTo (x-0.5*dx*z) (y-0.5*dy*z)
return (x,y)
backward (x0,y0) (x,y,z) = do if (dir (x-x0,y-y0) < 0)
then lineTo (x-0.5*dx*z) (y-0.5*dy*z)
else lineTo (x+0.5*dx*z) (y+0.5*dy*z)
return (x,y)
-- | general background drawing (including pdf file)
cairoDrawBackground :: Page -> Render ()
cairoDrawBackground page = do
let Dim w h = page_dim page
case page_bkg page of
Background typ col sty -> cairoDrawBkg (Dim w h) (Background typ col sty)
BackgroundPdf _ _mdomain _mfilename _pagenum ->
error "in cairoDrawBackground, pdf drawing is not defined yet"
-- cairoDrawPdfBkg (Dim w h) mdomain mfilename pagenum
-- |
cairoDrawBkg :: Dimension -> Background -> Render ()
cairoDrawBkg (Dim w h) (Background _typ col sty) = do
let c = M.lookup col predefined_bkgcolor
case c of
Just (r,g,b,_a) -> setSourceRGB r g b
Nothing -> setSourceRGB 1 1 1
rectangle 0 0 w h
fill
cairoDrawRuling w h sty
cairoDrawBkg (Dim w h) (BackgroundPdf _typ _mdomain _mfilename _pagenum) = do
setSourceRGBA 1 1 1 1
rectangle 0 0 w h
fill
-- |
cairoDrawRuling :: Double -> Double -> S.ByteString -> Render ()
cairoDrawRuling w h style = do
let drawHorizRules = do
let (r,g,b,a) = predefined_RULING_COLOR
setSourceRGBA r g b a
setLineWidth predefined_RULING_THICKNESS
let drawonerule y = do
moveTo 0 y
lineTo w y
stroke
mapM_ drawonerule [ predefined_RULING_TOPMARGIN
, predefined_RULING_TOPMARGIN+predefined_RULING_SPACING
..
h-1 ]
case style of
"plain" -> return ()
"lined" -> do
drawHorizRules
let (r2,g2,b2,a2) = predefined_RULING_MARGIN_COLOR
setSourceRGBA r2 g2 b2 a2
setLineWidth predefined_RULING_THICKNESS
moveTo predefined_RULING_LEFTMARGIN 0
lineTo predefined_RULING_LEFTMARGIN h
stroke
"ruled" -> drawHorizRules
"graph" -> do
let (r3,g3,b3,a3) = predefined_RULING_COLOR
setSourceRGBA r3 g3 b3 a3
setLineWidth predefined_RULING_THICKNESS
let drawonegraphvert x = do
moveTo x 0
lineTo x h
stroke
let drawonegraphhoriz y = do
moveTo 0 y
lineTo w y
stroke
mapM_ drawonegraphvert [0,predefined_RULING_GRAPHSPACING..w-1]
mapM_ drawonegraphhoriz [0,predefined_RULING_GRAPHSPACING..h-1]
_ -> return ()
-- |
cairoDrawPage :: Page -> Render ()
cairoDrawPage page = do
let strokes = (layer_strokes . (!!0) . page_layers) page
cairoDrawBackground page
setLineCap LineCapRound
setLineJoin LineJoinRound
mapM_ drawOneStroke strokes
stroke
|
wavewave/hoodle-render
|
src/trash/Old/SimpleOld.hs
|
bsd-2-clause
| 5,570
| 0
| 17
| 1,678
| 1,821
| 907
| 914
| -1
| -1
|
{-# LANGUAGE TypeOperators
,BangPatterns
#-}
-- |
-- Module : Data.OI.IFun
-- Copyright : (c) Nobuo Yamashita 2012-2016
-- License : BSD3
-- Author : Nobuo Yamashita
-- Maintainer : nobsun@sampou.org
-- Stability : experimental
--
module Data.OI.IFun
(
-- * Type of function with interaction
IFun
-- * IFun combinator
,(|::|)
,(|->|)
,(|<>|)
) where
import Data.OI.Internal
type IFun p a b = a -> p :-> b
(|::|) :: IFun p a c -> IFun q b d -> IFun (p,q) (a,b) (c,d)
(f |::| g) (a,b) opq = case dePair opq of
(p,q) -> (f a p, g b q)
(|->|) :: IFun p a (b',c) -> IFun q (b',b) d -> IFun (p,q) (a,b) (c,d)
(f |->| g) (a,b) opq = case dePair opq of
(p,q) -> case f a p of
(b',c) -> (c, g (b',b) q)
(|<>|) :: IFun p (a',a) (b',c) -> IFun q (b',b) (a',d) -> IFun (p,q) (a,b) (c,d)
(f |<>| g) (a,b) opq = case dePair opq of
(p,q) -> (c,d) where (b',c) = f (a',a) p; (a',d) = g (b',b) q
|
nobsun/oi
|
src/Data/OI/IFun.hs
|
bsd-3-clause
| 946
| 0
| 13
| 249
| 497
| 290
| 207
| 20
| 1
|
{-# LANGUAGE Arrows #-}
module Idris.CmdOptions where
import Idris.AbsSyntaxTree
import Idris.REPL
import IRTS.CodegenCommon
import Options.Applicative
import Options.Applicative.Arrows
import Data.Char
import Data.Maybe
import qualified Text.PrettyPrint.ANSI.Leijen as PP
runArgParser :: IO [Opt]
runArgParser = do opts <- execParser $ info parser
(fullDesc
<> headerDoc (Just idrisHeader)
<> progDescDoc (Just idrisProgDesc)
<> footerDoc (Just idrisFooter)
)
return $ preProcOpts opts []
where
idrisHeader = PP.hsep [PP.text "Idris version", PP.text ver, PP.text ", (C) The Idris Community 2014"]
idrisProgDesc = PP.vsep [PP.text "Idris is a general purpose pure functional programming language with dependent",
PP.text "types. Dependent types allow types to be predicated on values, meaning that",
PP.text "some aspects of a program’s behaviour can be specified precisely in the type.",
PP.text "It is compiled, with eager evaluation. Its features are influenced by Haskell",
PP.text "and ML.",
PP.empty,
PP.vsep $ map (\x -> PP.indent 4 (PP.text x)) [
"+ Full dependent types with dependent pattern matching",
"+ where clauses, with rule, simple case expressions",
"+ pattern matching let and lambda bindings",
"+ Type classes, monad comprehensions",
"+ do notation, idiom brackets",
"+ syntactic conveniences for lists, tuples, dependent pairs",
"+ Totality checking",
"+ Coinductive types",
"+ Indentation significant syntax, extensible syntax",
"+ Tactic based theorem proving (influenced by Coq)",
"+ Cumulative universes",
"+ Simple foreign function interface (to C)",
"+ Hugs style interactive environment"
],
PP.empty]
idrisFooter = PP.vsep [PP.text "It is important to note that Idris is first and foremost a research tool",
PP.text "and project. Thus the tooling provided and resulting programs created",
PP.text "should not necessarily be seen as production ready nor for industrial use.",
PP.empty,
PP.text "More details over Idris can be found online here:",
PP.empty,
PP.indent 4 (PP.text "http://www.idris-lang.org/")]
pureArgParser :: [String] -> [Opt]
pureArgParser args = case getParseResult $ execParserPure (prefs idm) (info parser idm) args of
Just opts -> preProcOpts opts []
Nothing -> []
parser :: Parser [Opt]
parser = runA $ proc () -> do
flags <- asA parseFlags -< ()
files <- asA (many $ argument (fmap Filename str) (metavar "FILES")) -< ()
A parseVersion >>> A helper -< (flags ++ files)
parseFlags :: Parser [Opt]
parseFlags = many $
flag' NoBanner (long "nobanner" <> help "Suppress the banner")
<|> flag' Quiet (short 'q' <> long "quiet" <> help "Quiet verbosity")
<|> flag' Ideslave (long "ideslave")
<|> flag' IdeslaveSocket (long "ideslave-socket")
<|> (Client <$> strOption (long "client"))
<|> (OLogging <$> option auto (long "log" <> metavar "LEVEL" <> help "Debugging log level"))
<|> flag' NoBasePkgs (long "nobasepkgs")
<|> flag' NoPrelude (long "noprelude")
<|> flag' NoBuiltins (long "nobuiltins")
<|> flag' NoREPL (long "check" <> help "Typecheck only, don't start the REPL")
<|> (Output <$> strOption (short 'o' <> long "output" <> metavar "FILE" <> help "Specify output file"))
-- <|> flag' TypeCase (long "typecase")
<|> flag' TypeInType (long "typeintype")
<|> flag' DefaultTotal (long "total" <> help "Require functions to be total by default")
<|> flag' DefaultPartial (long "partial")
<|> flag' WarnPartial (long "warnpartial" <> help "Warn about undeclared partial functions")
<|> flag' WarnReach (long "warnreach" <> help "Warn about reachable but inaccessible arguments")
<|> flag' NoCoverage (long "nocoverage")
<|> flag' ErrContext (long "errorcontext")
<|> flag' ShowLibs (long "link" <> help "Display link flags")
<|> flag' ShowLibdir (long "libdir" <> help "Display library directory")
<|> flag' ShowIncs (long "include" <> help "Display the includes flags")
<|> flag' Verbose (short 'V' <> long "verbose" <> help "Loud verbosity")
<|> (IBCSubDir <$> strOption (long "ibcsubdir" <> metavar "FILE" <> help "Write IBC files into sub directory"))
<|> (ImportDir <$> strOption (short 'i' <> long "idrispath" <> help "Add directory to the list of import paths"))
<|> flag' WarnOnly (long "warn")
<|> (Pkg <$> strOption (short 'p' <> long "package"))
<|> (Port <$> strOption (long "port" <> metavar "PORT" <> help "REPL TCP port"))
-- Package commands
<|> (PkgBuild <$> strOption (long "build" <> metavar "IPKG" <> help "Build package"))
<|> (PkgInstall <$> strOption (long "install" <> metavar "IPKG" <> help "Install package"))
<|> (PkgREPL <$> strOption (long "repl"))
<|> (PkgClean <$> strOption (long "clean" <> metavar "IPKG" <> help "Clean package"))
<|> (PkgMkDoc <$> strOption (long "mkdoc" <> metavar "IPKG" <> help "Generate IdrisDoc for package"))
<|> (PkgCheck <$> strOption (long "checkpkg" <> metavar "IPKG" <> help "Check package only"))
<|> (PkgTest <$> strOption (long "testpkg" <> metavar "IPKG" <> help "Run tests for package"))
-- Misc options
<|> (BCAsm <$> strOption (long "bytecode"))
<|> flag' (OutputTy Raw) (short 'S' <> long "codegenonly" <> help "Do no further compilation of code generator output")
<|> flag' (OutputTy Object) (short 'c' <> long "compileonly" <> help "Compile to object files rather than an executable")
<|> flag' (OutputTy MavenProject) (long "mvn" <> help "Create a maven project (for Java codegen)")
<|> (DumpDefun <$> strOption (long "dumpdefuns"))
<|> (DumpCases <$> strOption (long "dumpcases"))
<|> ((\s -> UseCodegen $ parseCodegen s) <$> strOption (long "codegen" <> metavar "TARGET" <> help "Select code generator: C, Java, bytecode"))
<|> (EvalExpr <$> strOption (long "eval" <> short 'e' <> metavar "EXPR" <> help "Evaluate an expression without loading the REPL"))
<|> flag' (InterpretScript "Main.main") (long "execute" <> help "Execute as idris")
<|> (InterpretScript <$> strOption (long "exec" <> metavar "EXPR" <> help "Execute as idris"))
<|> ((\s -> Extension $ getExt s) <$> strOption (long "extension" <> short 'X' <> metavar "EXT" <> help "Turn on language extension (TypeProviders or ErrorReflection)"))
<|> flag' (OptLevel 3) (long "O3")
<|> flag' (OptLevel 2) (long "O2")
<|> flag' (OptLevel 1) (long "O1")
<|> flag' (OptLevel 0) (long "O0")
<|> flag' (AddOpt PETransform) (long "partial-eval")
<|> flag' (RemoveOpt PETransform) (long "no-partial-eval")
<|> (OptLevel <$> option auto (short 'O' <> long "level"))
<|> (TargetTriple <$> strOption (long "target" <> metavar "TRIPLE" <> help "Select target triple (for llvm codegen)"))
<|> (TargetCPU <$> strOption (long "cpu" <> metavar "CPU" <> help "Select target CPU e.g. corei7 or cortex-m3 (for LLVM codegen)"))
<|> flag' (ColourREPL True) (long "colour" <> long "color" <> help "Force coloured output")
<|> flag' (ColourREPL False) (long "nocolour" <> long "nocolor" <> help "Disable coloured output")
where
getExt s = case maybeRead s of
Just ext -> ext
Nothing -> error ("Unknown extension " ++ s)
maybeRead = fmap fst . listToMaybe . reads
parseVersion :: Parser (a -> a)
parseVersion = infoOption ver (short 'v' <> long "version" <> help "Print version information")
preProcOpts :: [Opt] -> [Opt] -> [Opt]
preProcOpts [] ys = ys
preProcOpts (NoBuiltins:xs) ys = NoBuiltins : NoPrelude : preProcOpts xs ys
preProcOpts (Output s:xs) ys = Output s : NoREPL : preProcOpts xs ys
preProcOpts (BCAsm s:xs) ys = BCAsm s : NoREPL : preProcOpts xs ys
preProcOpts (x:xs) ys = preProcOpts xs (x:ys)
parseCodegen :: String -> Codegen
parseCodegen "bytecode" = Bytecode
parseCodegen cg = Via (map toLower cg)
|
andyarvanitis/Idris-dev
|
src/Idris/CmdOptions.hs
|
bsd-3-clause
| 9,018
| 1
| 64
| 2,651
| 2,386
| 1,162
| 1,224
| 128
| 2
|
{-# LANGUAGE OverloadedStrings #-}
module Network.IRC.Bot.Part.Dice where
import Control.Monad (replicateM)
import Control.Monad.Trans (liftIO)
import Data.ByteString (ByteString)
import Data.ByteString.Char8 (pack)
import Data.Monoid ((<>))
import Network.IRC.Bot.Log (LogLevel(Debug))
import Network.IRC.Bot.BotMonad (BotMonad(..), maybeZero)
import Network.IRC.Bot.Commands (PrivMsg(..), sendCommand, replyTo)
import Network.IRC.Bot.Parsec (botPrefix, nat, parsecPart)
import System.Random (randomRIO)
import Text.Parsec (ParsecT, (<|>), (<?>), char, skipMany1, space, string, try)
dicePart :: (BotMonad m) => m ()
dicePart = parsecPart diceCommand
diceCommand :: (BotMonad m) => ParsecT ByteString () m ()
diceCommand =
do try $ botPrefix >> string "dice"
logM Debug "dicePart"
target <- maybeZero =<< replyTo
(numDice, numSides, modifier) <- (do
skipMany1 space
nd <- nat <|> return 1
if nd > 100
then fail "You can not roll more than 100 dice."
else do
char 'd'
ns <- (do n <- nat
if n > 0
then return n
else fail "The dice must have at least 1 side"
)
mod <- (do char '+' >> nat) <|> return 0
return (nd, ns, mod)) <?> "dice <num-dice>d<num-sides>[+<modifier>]"
rolls <- liftIO $ replicateM (fromIntegral numDice) $ randomRIO (1, numSides)
let results = "You rolled " ++ show numDice ++ " " ++ show numSides ++ "-sided dice with a +" ++ show modifier ++ " modifier: " ++ show rolls ++ " => " ++ show (sum (modifier : rolls))
sendCommand (PrivMsg Nothing [target] (pack results))
<|> return ()
|
eigengrau/haskell-ircbot
|
Network/IRC/Bot/Part/Dice.hs
|
bsd-3-clause
| 1,826
| 0
| 22
| 549
| 563
| 304
| 259
| 37
| 3
|
#!/usr/bin/runhaskell
import Distribution.Simple
import System.Process
import System.Exit
main = defaultMainWithHooks simpleUserHooks
|
seereason/haskell-unixutils
|
Setup.hs
|
bsd-3-clause
| 136
| 0
| 5
| 13
| 25
| 14
| 11
| 4
| 1
|
{-# LANGUAGE PackageImports #-}
module System.IO.Unsafe (module M) where
import "base" System.IO.Unsafe as M
|
silkapp/base-noprelude
|
src/System/IO/Unsafe.hs
|
bsd-3-clause
| 114
| 0
| 4
| 18
| 23
| 17
| 6
| 3
| 0
|
-- Properties for Graph
--
module PolyGraph.ReadOnly.Graph.Properties (
isValidGraph
, isValidMorphism
, isValidMorphismSingleEdge
) where
import PolyGraph.ReadOnly (GMorphism(..), isValidGraphDataSet)
import PolyGraph.ReadOnly.Graph (Graph, EdgeSemantics(..))
import PolyGraph.Common (UOPair(..), PairLike(toPair))
import qualified Data.Maybe as M
import qualified Data.Foldable as F
-- This code could be reused better between Graph and DiGraph
isValidGraph :: forall g v e t . Graph g v e t => g -> Bool
isValidGraph = isValidGraphDataSet (toPair . resolveEdge)
-- | to be valid eTrans and resolveEdge needs to commute with the vTrans ignoring the pair order
isValidMorphism :: forall g v0 e0 t v1 e1 . (Eq v0, Eq v1, EdgeSemantics e0 v0, EdgeSemantics e1 v1) =>
[e0] -> GMorphism v0 e0 v1 e1 -> Bool
isValidMorphism es m = M.isNothing $ F.find (isValidMorphismSingleEdge m) es
-- | NOTE UOPair == is diffrent from OPair ==
-- forcing different implementation for EdgeSemantics and DiEdgeSemantics
isValidMorphismSingleEdge :: forall g v0 e0 t v1 e1 . (Eq v0, Eq v1, EdgeSemantics e0 v0, EdgeSemantics e1 v1) =>
GMorphism v0 e0 v1 e1 -> e0 -> Bool
isValidMorphismSingleEdge m e0 =
let UOPair(v0a, v0b) = resolveEdge e0
e1 = eTrans m e0
in UOPair(vTrans m v0a, vTrans m v0b) == resolveEdge e1
|
rpeszek/GraphPlay
|
src/PolyGraph/ReadOnly/Graph/Properties.hs
|
bsd-3-clause
| 1,448
| 0
| 10
| 355
| 370
| 212
| 158
| -1
| -1
|
module LiftMe.Database
( module LiftMe.Database.DB
, module LiftMe.Database.Training
) where
import LiftMe.Database.DB
import LiftMe.Database.Training
|
nilscc/weightlifting.cc
|
src-hs/LiftMe/Database.hs
|
bsd-3-clause
| 158
| 0
| 5
| 21
| 34
| 23
| 11
| 5
| 0
|
-- Non-standard semantics of While in direct style.
-- The semantics provides a sign-detection analysis.
-- We model states as partially ordered maps from variables to numbers.
module While.SignDetection.Main1 where
import qualified Prelude
import Prelude hiding (id, seq)
import SemanticsLib.Main
import qualified SemanticsLib.Map as Map
import While.AbstractSyntax (Var, Stm, factorial)
import While.Fold
import While.DenotationalSemantics.DirectStyle
-- Semantic domains for analysis
type N = Sign
type B = TT
type S = Map Var N
type MA = S -> N
type MB = S -> B
type MS = S -> S
-- Algebra for state transformers
strafos :: STrafoAlg MS MB
strafos = STrafoAlg {
id = Prelude.id
, seq = flip (.)
, cond = \mb ms1 ms2 s ->
case mb s of
TT -> ms1 s
FF -> ms2 s
TopTT -> ms1 s `lub` ms2 s
BottomTT -> bottom
, fix = fixEq2
}
-- Assembly of the semantics
whileAlg :: WhileAlg MA MB MS
whileAlg = ds ttBooleans
signNumbers
statesAsPOrdMaps
strafos
main =
do
let xpos = Map.update "x" Pos bottom
print xpos
print $ foldStm whileAlg factorial xpos
{-
> main
[("x",Pos)]
[("x",TopSign),("y",TopSign)]
-}
|
grammarware/slps
|
topics/implementation/NielsonN07/Haskell/src/While/SignDetection/Main1.hs
|
bsd-3-clause
| 1,274
| 0
| 12
| 365
| 300
| 171
| 129
| 35
| 4
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree. An additional grant
-- of patent rights can be found in the PATENTS file in the same directory.
module Duckling.Duration.FR.Tests
( tests
) where
import Prelude
import Data.String
import Test.Tasty
import Duckling.Dimensions.Types
import Duckling.Duration.FR.Corpus
import Duckling.Testing.Asserts
tests :: TestTree
tests = testGroup "FR Tests"
[ makeCorpusTest [This Duration] corpus
, makeNegativeCorpusTest [This Duration] negativeCorpus
]
|
rfranek/duckling
|
tests/Duckling/Duration/FR/Tests.hs
|
bsd-3-clause
| 663
| 0
| 9
| 105
| 94
| 58
| 36
| 12
| 1
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
The @TyCon@ datatype
-}
{-# LANGUAGE CPP, DeriveDataTypeable #-}
module TyCon(
-- * Main TyCon data types
TyCon, FieldLabel,
AlgTyConRhs(..), visibleDataCons,
TyConParent(..), isNoParent,
FamTyConFlav(..), Role(..),
-- ** Constructing TyCons
mkAlgTyCon,
mkClassTyCon,
mkFunTyCon,
mkPrimTyCon,
mkKindTyCon,
mkLiftedPrimTyCon,
mkTupleTyCon,
mkSynonymTyCon,
mkFamilyTyCon,
mkPromotedDataCon,
mkPromotedTyCon,
-- ** Predicates on TyCons
isAlgTyCon,
isClassTyCon, isFamInstTyCon,
isFunTyCon,
isPrimTyCon,
isTupleTyCon, isUnboxedTupleTyCon, isBoxedTupleTyCon,
isTypeSynonymTyCon,
isDecomposableTyCon,
isPromotedDataCon, isPromotedTyCon,
isPromotedDataCon_maybe, isPromotedTyCon_maybe,
promotableTyCon_maybe, promoteTyCon,
isDataTyCon, isProductTyCon, isDataProductTyCon_maybe,
isEnumerationTyCon,
isNewTyCon, isAbstractTyCon,
isFamilyTyCon, isOpenFamilyTyCon,
isTypeFamilyTyCon, isDataFamilyTyCon,
isOpenTypeFamilyTyCon, isClosedSynFamilyTyCon_maybe,
isBuiltInSynFamTyCon_maybe,
isUnLiftedTyCon,
isGadtSyntaxTyCon, isDistinctTyCon, isDistinctAlgRhs,
isTyConAssoc, tyConAssoc_maybe,
isRecursiveTyCon,
isImplicitTyCon,
-- ** Extracting information out of TyCons
tyConName,
tyConKind,
tyConUnique,
tyConTyVars,
tyConCType, tyConCType_maybe,
tyConDataCons, tyConDataCons_maybe,
tyConSingleDataCon_maybe, tyConSingleAlgDataCon_maybe,
tyConFamilySize,
tyConStupidTheta,
tyConArity,
tyConRoles,
tyConParent,
tyConTuple_maybe, tyConClass_maybe,
tyConFamInst_maybe, tyConFamInstSig_maybe, tyConFamilyCoercion_maybe,
synTyConDefn_maybe, synTyConRhs_maybe, famTyConFlav_maybe,
algTyConRhs,
newTyConRhs, newTyConEtadArity, newTyConEtadRhs,
unwrapNewTyCon_maybe, unwrapNewTyConEtad_maybe,
-- ** Manipulating TyCons
expandSynTyCon_maybe,
makeTyConAbstract,
newTyConCo, newTyConCo_maybe,
pprPromotionQuote,
-- * Primitive representations of Types
PrimRep(..), PrimElemRep(..),
tyConPrimRep, isVoidRep, isGcPtrRep,
primRepSizeW, primElemRepSizeB,
primRepIsFloat,
-- * Recursion breaking
RecTcChecker, initRecTc, checkRecTc
) where
#include "HsVersions.h"
import {-# SOURCE #-} TypeRep ( Kind, Type, PredType )
import {-# SOURCE #-} DataCon ( DataCon, dataConExTyVars )
import Var
import Class
import BasicTypes
import DynFlags
import ForeignCall
import Name
import NameSet
import CoAxiom
import PrelNames
import Maybes
import Outputable
import Constants
import Util
import qualified Data.Data as Data
import Data.Typeable (Typeable)
{-
-----------------------------------------------
Notes about type families
-----------------------------------------------
Note [Type synonym families]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Type synonym families, also known as "type functions", map directly
onto the type functions in FC:
type family F a :: *
type instance F Int = Bool
..etc...
* Reply "yes" to isTypeFamilyTyCon, and isFamilyTyCon
* From the user's point of view (F Int) and Bool are simply
equivalent types.
* A Haskell 98 type synonym is a degenerate form of a type synonym
family.
* Type functions can't appear in the LHS of a type function:
type instance F (F Int) = ... -- BAD!
* Translation of type family decl:
type family F a :: *
translates to
a FamilyTyCon 'F', whose FamTyConFlav is OpenSynFamilyTyCon
type family G a :: * where
G Int = Bool
G Bool = Char
G a = ()
translates to
a FamilyTyCon 'G', whose FamTyConFlav is ClosedSynFamilyTyCon, with the
appropriate CoAxiom representing the equations
* In the future we might want to support
* injective type families (allow decomposition)
but we don't at the moment [2013]
Note [Data type families]
~~~~~~~~~~~~~~~~~~~~~~~~~
See also Note [Wrappers for data instance tycons] in MkId.hs
* Data type families are declared thus
data family T a :: *
data instance T Int = T1 | T2 Bool
Here T is the "family TyCon".
* Reply "yes" to isDataFamilyTyCon, and isFamilyTyCon
* The user does not see any "equivalent types" as he did with type
synonym families. He just sees constructors with types
T1 :: T Int
T2 :: Bool -> T Int
* Here's the FC version of the above declarations:
data T a
data R:TInt = T1 | T2 Bool
axiom ax_ti : T Int ~ R:TInt
The R:TInt is the "representation TyCons".
It has an AlgTyConParent of
FamInstTyCon T [Int] ax_ti
* The axiom ax_ti may be eta-reduced; see
Note [Eta reduction for data family axioms] in TcInstDcls
* The data contructor T2 has a wrapper (which is what the
source-level "T2" invokes):
$WT2 :: Bool -> T Int
$WT2 b = T2 b `cast` sym ax_ti
* A data instance can declare a fully-fledged GADT:
data instance T (a,b) where
X1 :: T (Int,Bool)
X2 :: a -> b -> T (a,b)
Here's the FC version of the above declaration:
data R:TPair a where
X1 :: R:TPair Int Bool
X2 :: a -> b -> R:TPair a b
axiom ax_pr :: T (a,b) ~ R:TPair a b
$WX1 :: forall a b. a -> b -> T (a,b)
$WX1 a b (x::a) (y::b) = X2 a b x y `cast` sym (ax_pr a b)
The R:TPair are the "representation TyCons".
We have a bit of work to do, to unpick the result types of the
data instance declaration for T (a,b), to get the result type in the
representation; e.g. T (a,b) --> R:TPair a b
The representation TyCon R:TList, has an AlgTyConParent of
FamInstTyCon T [(a,b)] ax_pr
* Notice that T is NOT translated to a FC type function; it just
becomes a "data type" with no constructors, which can be coerced inot
into R:TInt, R:TPair by the axioms. These axioms
axioms come into play when (and *only* when) you
- use a data constructor
- do pattern matching
Rather like newtype, in fact
As a result
- T behaves just like a data type so far as decomposition is concerned
- (T Int) is not implicitly converted to R:TInt during type inference.
Indeed the latter type is unknown to the programmer.
- There *is* an instance for (T Int) in the type-family instance
environment, but it is only used for overlap checking
- It's fine to have T in the LHS of a type function:
type instance F (T a) = [a]
It was this last point that confused me! The big thing is that you
should not think of a data family T as a *type function* at all, not
even an injective one! We can't allow even injective type functions
on the LHS of a type function:
type family injective G a :: *
type instance F (G Int) = Bool
is no good, even if G is injective, because consider
type instance G Int = Bool
type instance F Bool = Char
So a data type family is not an injective type function. It's just a
data type with some axioms that connect it to other data types.
Note [Associated families and their parent class]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*Associated* families are just like *non-associated* families, except
that they have a TyConParent of AssocFamilyTyCon, which identifies the
parent class.
However there is an important sharing relationship between
* the tyConTyVars of the parent Class
* the tyConTyvars of the associated TyCon
class C a b where
data T p a
type F a q b
Here the 'a' and 'b' are shared with the 'Class'; that is, they have
the same Unique.
This is important. In an instance declaration we expect
* all the shared variables to be instantiated the same way
* the non-shared variables of the associated type should not
be instantiated at all
instance C [x] (Tree y) where
data T p [x] = T1 x | T2 p
type F [x] q (Tree y) = (x,y,q)
Note [TyCon Role signatures]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Every tycon has a role signature, assigning a role to each of the tyConTyVars
(or of equal length to the tyConArity, if there are no tyConTyVars). An
example demonstrates these best: say we have a tycon T, with parameters a at
nominal, b at representational, and c at phantom. Then, to prove
representational equality between T a1 b1 c1 and T a2 b2 c2, we need to have
nominal equality between a1 and a2, representational equality between b1 and
b2, and nothing in particular (i.e., phantom equality) between c1 and c2. This
might happen, say, with the following declaration:
data T a b c where
MkT :: b -> T Int b c
Data and class tycons have their roles inferred (see inferRoles in TcTyDecls),
as do vanilla synonym tycons. Family tycons have all parameters at role N,
though it is conceivable that we could relax this restriction. (->)'s and
tuples' parameters are at role R. Each primitive tycon declares its roles;
it's worth noting that (~#)'s parameters are at role N. Promoted data
constructors' type arguments are at role R. All kind arguments are at role
N.
************************************************************************
* *
\subsection{The data type}
* *
************************************************************************
-}
-- | TyCons represent type constructors. Type constructors are introduced by
-- things such as:
--
-- 1) Data declarations: @data Foo = ...@ creates the @Foo@ type constructor of
-- kind @*@
--
-- 2) Type synonyms: @type Foo = ...@ creates the @Foo@ type constructor
--
-- 3) Newtypes: @newtype Foo a = MkFoo ...@ creates the @Foo@ type constructor
-- of kind @* -> *@
--
-- 4) Class declarations: @class Foo where@ creates the @Foo@ type constructor
-- of kind @*@
--
-- This data type also encodes a number of primitive, built in type constructors
-- such as those for function and tuple types.
-- If you edit this type, you may need to update the GHC formalism
-- See Note [GHC Formalism] in coreSyn/CoreLint.hs
data TyCon
= -- | The function type constructor, @(->)@
FunTyCon {
tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant:
-- identical to Unique of Name stored in
-- tyConName field.
tyConName :: Name, -- ^ Name of the constructor
tyConKind :: Kind, -- ^ Kind of this TyCon (full kind, not just
-- the return kind)
tyConArity :: Arity -- ^ Number of arguments this TyCon must
-- receive to be considered saturated
-- (including implicit kind variables)
}
-- | Algebraic type constructors, which are defined to be those
-- arising @data@ type and @newtype@ declarations. All these
-- constructors are lifted and boxed. See 'AlgTyConRhs' for more
-- information.
| AlgTyCon {
tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant:
-- identical to Unique of Name stored in
-- tyConName field.
tyConName :: Name, -- ^ Name of the constructor
tyConKind :: Kind, -- ^ Kind of this TyCon (full kind, not just
-- the return kind)
tyConArity :: Arity, -- ^ Number of arguments this TyCon must
-- receive to be considered saturated
-- (including implicit kind variables)
tyConTyVars :: [TyVar], -- ^ The kind and type variables used in the
-- type constructor.
-- Invariant: length tyvars = arity
-- Precisely, this list scopes over:
--
-- 1. The 'algTcStupidTheta'
-- 2. The cached types in algTyConRhs.NewTyCon
-- 3. The family instance types if present
--
-- Note that it does /not/ scope over the data
-- constructors.
tcRoles :: [Role], -- ^ The role for each type variable
-- This list has the same length as tyConTyVars
-- See also Note [TyCon Role signatures]
tyConCType :: Maybe CType,-- ^ The C type that should be used
-- for this type when using the FFI
-- and CAPI
algTcGadtSyntax :: Bool, -- ^ Was the data type declared with GADT
-- syntax? If so, that doesn't mean it's a
-- true GADT; only that the "where" form
-- was used. This field is used only to
-- guide pretty-printing
algTcStupidTheta :: [PredType], -- ^ The \"stupid theta\" for the data
-- type (always empty for GADTs). A
-- \"stupid theta\" is the context to
-- the left of an algebraic type
-- declaration, e.g. @Eq a@ in the
-- declaration @data Eq a => T a ...@.
algTcRhs :: AlgTyConRhs, -- ^ Contains information about the
-- data constructors of the algebraic type
algTcRec :: RecFlag, -- ^ Tells us whether the data type is part
-- of a mutually-recursive group or not
algTcParent :: TyConParent, -- ^ Gives the class or family declaration
-- 'TyCon' for derived 'TyCon's representing
-- class or family instances, respectively.
-- See also 'synTcParent'
tcPromoted :: Maybe TyCon -- ^ Promoted TyCon, if any
}
-- | Represents type synonyms
| SynonymTyCon {
tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant:
-- identical to Unique of Name stored in
-- tyConName field.
tyConName :: Name, -- ^ Name of the constructor
tyConKind :: Kind, -- ^ Kind of this TyCon (full kind, not just
-- the return kind)
tyConArity :: Arity, -- ^ Number of arguments this TyCon must
-- receive to be considered saturated
-- (including implicit kind variables)
tyConTyVars :: [TyVar], -- ^ List of type and kind variables in this
-- TyCon. Includes implicit kind variables.
-- Invariant: length tyConTyVars = tyConArity
tcRoles :: [Role], -- ^ The role for each type variable
-- This list has the same length as tyConTyVars
-- See also Note [TyCon Role signatures]
synTcRhs :: Type -- ^ Contains information about the expansion
-- of the synonym
}
-- | Represents type families
| FamilyTyCon {
tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant:
-- identical to Unique of Name stored in
-- tyConName field.
tyConName :: Name, -- ^ Name of the constructor
tyConKind :: Kind, -- ^ Kind of this TyCon (full kind, not just
-- the return kind)
tyConArity :: Arity, -- ^ Number of arguments this TyCon must
-- receive to be considered saturated
-- (including implicit kind variables)
tyConTyVars :: [TyVar], -- ^ The kind and type variables used in the
-- type constructor.
-- Invariant: length tyvars = arity
-- Precisely, this list scopes over:
--
-- 1. The 'algTcStupidTheta'
-- 2. The cached types in 'algTyConRhs.NewTyCon'
-- 3. The family instance types if present
--
-- Note that it does /not/ scope over the data
-- constructors.
famTcFlav :: FamTyConFlav, -- ^ Type family flavour: open, closed,
-- abstract, built-in. See comments for
-- FamTyConFlav
famTcParent :: TyConParent -- ^ TyCon of enclosing class for
-- associated type families
}
-- | Primitive types; cannot be defined in Haskell. This includes
-- the usual suspects (such as @Int#@) as well as foreign-imported
-- types and kinds
| PrimTyCon {
tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant:
-- identical to Unique of Name stored in
-- tyConName field.
tyConName :: Name, -- ^ Name of the constructor
tyConKind :: Kind, -- ^ Kind of this TyCon (full kind, not just
-- the return kind)
tyConArity :: Arity, -- ^ Number of arguments this TyCon must
-- receive to be considered saturated
-- (including implicit kind variables)
tcRoles :: [Role], -- ^ The role for each type variable
-- This list has the same length as tyConTyVars
-- See also Note [TyCon Role signatures]
primTyConRep :: PrimRep,-- ^ Many primitive tycons are unboxed, but
-- some are boxed (represented by
-- pointers). This 'PrimRep' holds that
-- information. Only relevant if tyConKind = *
isUnLifted :: Bool -- ^ Most primitive tycons are unlifted (may
-- not contain bottom) but other are lifted,
-- e.g. @RealWorld@
}
-- | Represents promoted data constructor.
| PromotedDataCon { -- See Note [Promoted data constructors]
tyConUnique :: Unique, -- ^ Same Unique as the data constructor
tyConName :: Name, -- ^ Same Name as the data constructor
tyConArity :: Arity,
tyConKind :: Kind, -- ^ Translated type of the data constructor
tcRoles :: [Role], -- ^ Roles: N for kind vars, R for type vars
dataCon :: DataCon -- ^ Corresponding data constructor
}
-- | Represents promoted type constructor.
| PromotedTyCon {
tyConUnique :: Unique, -- ^ Same Unique as the type constructor
tyConName :: Name, -- ^ Same Name as the type constructor
tyConArity :: Arity, -- ^ n if ty_con :: * -> ... -> * n times
tyConKind :: Kind, -- ^ Always TysPrim.superKind
ty_con :: TyCon -- ^ Corresponding type constructor
}
deriving Typeable
-- | Names of the fields in an algebraic record type
type FieldLabel = Name
-- | Represents right-hand-sides of 'TyCon's for algebraic types
data AlgTyConRhs
-- | Says that we know nothing about this data type, except that
-- it's represented by a pointer. Used when we export a data type
-- abstractly into an .hi file.
= AbstractTyCon
Bool -- True <=> It's definitely a distinct data type,
-- equal only to itself; ie not a newtype
-- False <=> Not sure
-- See Note [AbstractTyCon and type equality]
-- | Represents an open type family without a fixed right hand
-- side. Additional instances can appear at any time.
--
-- These are introduced by either a top level declaration:
--
-- > data T a :: *
--
-- Or an associated data type declaration, within a class declaration:
--
-- > class C a b where
-- > data T b :: *
| DataFamilyTyCon
-- | Information about those 'TyCon's derived from a @data@
-- declaration. This includes data types with no constructors at
-- all.
| DataTyCon {
data_cons :: [DataCon],
-- ^ The data type constructors; can be empty if the
-- user declares the type to have no constructors
--
-- INVARIANT: Kept in order of increasing 'DataCon'
-- tag (see the tag assignment in DataCon.mkDataCon)
is_enum :: Bool -- ^ Cached value: is this an enumeration type?
-- See Note [Enumeration types]
}
| TupleTyCon { -- A boxed, unboxed, or constraint tuple
data_con :: DataCon, -- NB: it can be an *unboxed* tuple
tup_sort :: TupleSort -- ^ Is this a boxed, unboxed or constraint
-- tuple?
}
-- | Information about those 'TyCon's derived from a @newtype@ declaration
| NewTyCon {
data_con :: DataCon, -- ^ The unique constructor for the @newtype@.
-- It has no existentials
nt_rhs :: Type, -- ^ Cached value: the argument type of the
-- constructor, which is just the representation
-- type of the 'TyCon' (remember that @newtype@s
-- do not exist at runtime so need a different
-- representation type).
--
-- The free 'TyVar's of this type are the
-- 'tyConTyVars' from the corresponding 'TyCon'
nt_etad_rhs :: ([TyVar], Type),
-- ^ Same as the 'nt_rhs', but this time eta-reduced.
-- Hence the list of 'TyVar's in this field may be
-- shorter than the declared arity of the 'TyCon'.
-- See Note [Newtype eta]
nt_co :: CoAxiom Unbranched
-- The axiom coercion that creates the @newtype@
-- from the representation 'Type'.
-- See Note [Newtype coercions]
-- Invariant: arity = #tvs in nt_etad_rhs;
-- See Note [Newtype eta]
-- Watch out! If any newtypes become transparent
-- again check Trac #1072.
}
{-
Note [AbstractTyCon and type equality]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TODO
-}
-- | Extract those 'DataCon's that we are able to learn about. Note
-- that visibility in this sense does not correspond to visibility in
-- the context of any particular user program!
visibleDataCons :: AlgTyConRhs -> [DataCon]
visibleDataCons (AbstractTyCon {}) = []
visibleDataCons DataFamilyTyCon {} = []
visibleDataCons (DataTyCon{ data_cons = cs }) = cs
visibleDataCons (NewTyCon{ data_con = c }) = [c]
visibleDataCons (TupleTyCon{ data_con = c }) = [c]
-- ^ Both type classes as well as family instances imply implicit
-- type constructors. These implicit type constructors refer to their parent
-- structure (ie, the class or family from which they derive) using a type of
-- the following form. We use 'TyConParent' for both algebraic and synonym
-- types, but the variant 'ClassTyCon' will only be used by algebraic 'TyCon's.
data TyConParent
= -- | An ordinary type constructor has no parent.
NoParentTyCon
-- | Type constructors representing a class dictionary.
-- See Note [ATyCon for classes] in TypeRep
| ClassTyCon
Class -- INVARIANT: the classTyCon of this Class is the
-- current tycon
-- | An *associated* type of a class.
| AssocFamilyTyCon
Class -- The class in whose declaration the family is declared
-- See Note [Associated families and their parent class]
-- | Type constructors representing an instance of a *data* family.
-- Parameters:
--
-- 1) The type family in question
--
-- 2) Instance types; free variables are the 'tyConTyVars'
-- of the current 'TyCon' (not the family one). INVARIANT:
-- the number of types matches the arity of the family 'TyCon'
--
-- 3) A 'CoTyCon' identifying the representation
-- type with the type instance family
| FamInstTyCon -- See Note [Data type families]
(CoAxiom Unbranched) -- The coercion axiom.
-- Generally of kind T ty1 ty2 ~ R:T a b c
-- where T is the family TyCon,
-- and R:T is the representation TyCon (ie this one)
-- and a,b,c are the tyConTyVars of this TyCon
--
-- BUT may be eta-reduced; see TcInstDcls
-- Note [Eta reduction for data family axioms]
-- Cached fields of the CoAxiom, but adjusted to
-- use the tyConTyVars of this TyCon
TyCon -- The family TyCon
[Type] -- Argument types (mentions the tyConTyVars of this TyCon)
-- Match in length the tyConTyVars of the family TyCon
-- E.g. data intance T [a] = ...
-- gives a representation tycon:
-- data R:TList a = ...
-- axiom co a :: T [a] ~ R:TList a
-- with R:TList's algTcParent = FamInstTyCon T [a] co
instance Outputable TyConParent where
ppr NoParentTyCon = text "No parent"
ppr (ClassTyCon cls) = text "Class parent" <+> ppr cls
ppr (AssocFamilyTyCon cls) =
text "Class parent (assoc. family)" <+> ppr cls
ppr (FamInstTyCon _ tc tys) =
text "Family parent (family instance)" <+> ppr tc <+> sep (map ppr tys)
-- | Checks the invariants of a 'TyConParent' given the appropriate type class
-- name, if any
okParent :: Name -> TyConParent -> Bool
okParent _ NoParentTyCon = True
okParent tc_name (AssocFamilyTyCon cls) = tc_name `elem` map tyConName (classATs cls)
okParent tc_name (ClassTyCon cls) = tc_name == tyConName (classTyCon cls)
okParent _ (FamInstTyCon _ fam_tc tys) = tyConArity fam_tc == length tys
isNoParent :: TyConParent -> Bool
isNoParent NoParentTyCon = True
isNoParent _ = False
--------------------
-- | Information pertaining to the expansion of a type synonym (@type@)
data FamTyConFlav
= -- | An open type synonym family e.g. @type family F x y :: * -> *@
OpenSynFamilyTyCon
-- | A closed type synonym family e.g.
-- @type family F x where { F Int = Bool }@
| ClosedSynFamilyTyCon
(CoAxiom Branched) -- The one axiom for this family
-- | A closed type synonym family declared in an hs-boot file with
-- type family F a where ..
| AbstractClosedSynFamilyTyCon
-- | Built-in type family used by the TypeNats solver
| BuiltInSynFamTyCon BuiltInSynFamily
{-
Note [Closed type families]
~~~~~~~~~~~~~~~~~~~~~~~~~
* In an open type family you can add new instances later. This is the
usual case.
* In a closed type family you can only put equations where the family
is defined.
Note [Promoted data constructors]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A data constructor can be promoted to become a type constructor,
via the PromotedTyCon alternative in TyCon.
* Only data constructors with
(a) no kind polymorphism
(b) no constraints in its type (eg GADTs)
are promoted. Existentials are ok; see Trac #7347.
* The TyCon promoted from a DataCon has the *same* Name and Unique as
the DataCon. Eg. If the data constructor Data.Maybe.Just(unique 78,
say) is promoted to a TyCon whose name is Data.Maybe.Just(unique 78)
* The *kind* of a promoted DataCon may be polymorphic. Example:
type of DataCon Just :: forall (a:*). a -> Maybe a
kind of (promoted) tycon Just :: forall (a:box). a -> Maybe a
The kind is not identical to the type, because of the */box
kind signature on the forall'd variable; so the tyConKind field of
PromotedTyCon is not identical to the dataConUserType of the
DataCon. But it's the same modulo changing the variable kinds,
done by DataCon.promoteType.
* Small note: We promote the *user* type of the DataCon. Eg
data T = MkT {-# UNPACK #-} !(Bool, Bool)
The promoted kind is
MkT :: (Bool,Bool) -> T
*not*
MkT :: Bool -> Bool -> T
Note [Enumeration types]
~~~~~~~~~~~~~~~~~~~~~~~~
We define datatypes with no constructors to *not* be
enumerations; this fixes trac #2578, Otherwise we
end up generating an empty table for
<mod>_<type>_closure_tbl
which is used by tagToEnum# to map Int# to constructors
in an enumeration. The empty table apparently upset
the linker.
Moreover, all the data constructor must be enumerations, meaning
they have type (forall abc. T a b c). GADTs are not enumerations.
For example consider
data T a where
T1 :: T Int
T2 :: T Bool
T3 :: T a
What would [T1 ..] be? [T1,T3] :: T Int? Easiest thing is to exclude them.
See Trac #4528.
Note [Newtype coercions]
~~~~~~~~~~~~~~~~~~~~~~~~
The NewTyCon field nt_co is a CoAxiom which is used for coercing from
the representation type of the newtype, to the newtype itself. For
example,
newtype T a = MkT (a -> a)
the NewTyCon for T will contain nt_co = CoT where CoT t : T t ~ t -> t.
In the case that the right hand side is a type application
ending with the same type variables as the left hand side, we
"eta-contract" the coercion. So if we had
newtype S a = MkT [a]
then we would generate the arity 0 axiom CoS : S ~ []. The
primary reason we do this is to make newtype deriving cleaner.
In the paper we'd write
axiom CoT : (forall t. T t) ~ (forall t. [t])
and then when we used CoT at a particular type, s, we'd say
CoT @ s
which encodes as (TyConApp instCoercionTyCon [TyConApp CoT [], s])
Note [Newtype eta]
~~~~~~~~~~~~~~~~~~
Consider
newtype Parser a = MkParser (IO a) deriving Monad
Are these two types equal (to Core)?
Monad Parser
Monad IO
which we need to make the derived instance for Monad Parser.
Well, yes. But to see that easily we eta-reduce the RHS type of
Parser, in this case to ([], Froogle), so that even unsaturated applications
of Parser will work right. This eta reduction is done when the type
constructor is built, and cached in NewTyCon.
Here's an example that I think showed up in practice
Source code:
newtype T a = MkT [a]
newtype Foo m = MkFoo (forall a. m a -> Int)
w1 :: Foo []
w1 = ...
w2 :: Foo T
w2 = MkFoo (\(MkT x) -> case w1 of MkFoo f -> f x)
After desugaring, and discarding the data constructors for the newtypes,
we get:
w2 = w1 `cast` Foo CoT
so the coercion tycon CoT must have
kind: T ~ []
and arity: 0
************************************************************************
* *
\subsection{PrimRep}
* *
************************************************************************
Note [rep swamp]
GHC has a rich selection of types that represent "primitive types" of
one kind or another. Each of them makes a different set of
distinctions, and mostly the differences are for good reasons,
although it's probably true that we could merge some of these.
Roughly in order of "includes more information":
- A Width (cmm/CmmType) is simply a binary value with the specified
number of bits. It may represent a signed or unsigned integer, a
floating-point value, or an address.
data Width = W8 | W16 | W32 | W64 | W80 | W128
- Size, which is used in the native code generator, is Width +
floating point information.
data Size = II8 | II16 | II32 | II64 | FF32 | FF64 | FF80
it is necessary because e.g. the instruction to move a 64-bit float
on x86 (movsd) is different from the instruction to move a 64-bit
integer (movq), so the mov instruction is parameterised by Size.
- CmmType wraps Width with more information: GC ptr, float, or
other value.
data CmmType = CmmType CmmCat Width
data CmmCat -- "Category" (not exported)
= GcPtrCat -- GC pointer
| BitsCat -- Non-pointer
| FloatCat -- Float
It is important to have GcPtr information in Cmm, since we generate
info tables containing pointerhood for the GC from this. As for
why we have float (and not signed/unsigned) here, see Note [Signed
vs unsigned].
- ArgRep makes only the distinctions necessary for the call and
return conventions of the STG machine. It is essentially CmmType
+ void.
- PrimRep makes a few more distinctions than ArgRep: it divides
non-GC-pointers into signed/unsigned and addresses, information
that is necessary for passing these values to foreign functions.
There's another tension here: whether the type encodes its size in
bytes, or whether its size depends on the machine word size. Width
and CmmType have the size built-in, whereas ArgRep and PrimRep do not.
This means to turn an ArgRep/PrimRep into a CmmType requires DynFlags.
On the other hand, CmmType includes some "nonsense" values, such as
CmmType GcPtrCat W32 on a 64-bit machine.
-}
-- | A 'PrimRep' is an abstraction of a type. It contains information that
-- the code generator needs in order to pass arguments, return results,
-- and store values of this type.
data PrimRep
= VoidRep
| PtrRep
| IntRep -- ^ Signed, word-sized value
| WordRep -- ^ Unsigned, word-sized value
| Int64Rep -- ^ Signed, 64 bit value (with 32-bit words only)
| Word64Rep -- ^ Unsigned, 64 bit value (with 32-bit words only)
| AddrRep -- ^ A pointer, but /not/ to a Haskell value (use 'PtrRep')
| FloatRep
| DoubleRep
| VecRep Int PrimElemRep -- ^ A vector
deriving( Eq, Show )
data PrimElemRep
= Int8ElemRep
| Int16ElemRep
| Int32ElemRep
| Int64ElemRep
| Word8ElemRep
| Word16ElemRep
| Word32ElemRep
| Word64ElemRep
| FloatElemRep
| DoubleElemRep
deriving( Eq, Show )
instance Outputable PrimRep where
ppr r = text (show r)
instance Outputable PrimElemRep where
ppr r = text (show r)
isVoidRep :: PrimRep -> Bool
isVoidRep VoidRep = True
isVoidRep _other = False
isGcPtrRep :: PrimRep -> Bool
isGcPtrRep PtrRep = True
isGcPtrRep _ = False
-- | Find the size of a 'PrimRep', in words
primRepSizeW :: DynFlags -> PrimRep -> Int
primRepSizeW _ IntRep = 1
primRepSizeW _ WordRep = 1
primRepSizeW dflags Int64Rep = wORD64_SIZE `quot` wORD_SIZE dflags
primRepSizeW dflags Word64Rep = wORD64_SIZE `quot` wORD_SIZE dflags
primRepSizeW _ FloatRep = 1 -- NB. might not take a full word
primRepSizeW dflags DoubleRep = dOUBLE_SIZE dflags `quot` wORD_SIZE dflags
primRepSizeW _ AddrRep = 1
primRepSizeW _ PtrRep = 1
primRepSizeW _ VoidRep = 0
primRepSizeW dflags (VecRep len rep) = len * primElemRepSizeB rep `quot` wORD_SIZE dflags
primElemRepSizeB :: PrimElemRep -> Int
primElemRepSizeB Int8ElemRep = 1
primElemRepSizeB Int16ElemRep = 2
primElemRepSizeB Int32ElemRep = 4
primElemRepSizeB Int64ElemRep = 8
primElemRepSizeB Word8ElemRep = 1
primElemRepSizeB Word16ElemRep = 2
primElemRepSizeB Word32ElemRep = 4
primElemRepSizeB Word64ElemRep = 8
primElemRepSizeB FloatElemRep = 4
primElemRepSizeB DoubleElemRep = 8
-- | Return if Rep stands for floating type,
-- returns Nothing for vector types.
primRepIsFloat :: PrimRep -> Maybe Bool
primRepIsFloat FloatRep = Just True
primRepIsFloat DoubleRep = Just True
primRepIsFloat (VecRep _ _) = Nothing
primRepIsFloat _ = Just False
{-
************************************************************************
* *
\subsection{TyCon Construction}
* *
************************************************************************
Note: the TyCon constructors all take a Kind as one argument, even though
they could, in principle, work out their Kind from their other arguments.
But to do so they need functions from Types, and that makes a nasty
module mutual-recursion. And they aren't called from many places.
So we compromise, and move their Kind calculation to the call site.
-}
-- | Given the name of the function type constructor and it's kind, create the
-- corresponding 'TyCon'. It is reccomended to use 'TypeRep.funTyCon' if you want
-- this functionality
mkFunTyCon :: Name -> Kind -> TyCon
mkFunTyCon name kind
= FunTyCon {
tyConUnique = nameUnique name,
tyConName = name,
tyConKind = kind,
tyConArity = 2
}
-- | This is the making of an algebraic 'TyCon'. Notably, you have to
-- pass in the generic (in the -XGenerics sense) information about the
-- type constructor - you can get hold of it easily (see Generics
-- module)
mkAlgTyCon :: Name
-> Kind -- ^ Kind of the resulting 'TyCon'
-> [TyVar] -- ^ 'TyVar's scoped over: see 'tyConTyVars'.
-- Arity is inferred from the length of this
-- list
-> [Role] -- ^ The roles for each TyVar
-> Maybe CType -- ^ The C type this type corresponds to
-- when using the CAPI FFI
-> [PredType] -- ^ Stupid theta: see 'algTcStupidTheta'
-> AlgTyConRhs -- ^ Information about dat aconstructors
-> TyConParent
-> RecFlag -- ^ Is the 'TyCon' recursive?
-> Bool -- ^ Was the 'TyCon' declared with GADT syntax?
-> Maybe TyCon -- ^ Promoted version
-> TyCon
mkAlgTyCon name kind tyvars roles cType stupid rhs parent is_rec gadt_syn prom_tc
= AlgTyCon {
tyConName = name,
tyConUnique = nameUnique name,
tyConKind = kind,
tyConArity = length tyvars,
tyConTyVars = tyvars,
tcRoles = roles,
tyConCType = cType,
algTcStupidTheta = stupid,
algTcRhs = rhs,
algTcParent = ASSERT2( okParent name parent, ppr name $$ ppr parent ) parent,
algTcRec = is_rec,
algTcGadtSyntax = gadt_syn,
tcPromoted = prom_tc
}
-- | Simpler specialization of 'mkAlgTyCon' for classes
mkClassTyCon :: Name -> Kind -> [TyVar] -> [Role] -> AlgTyConRhs -> Class
-> RecFlag -> TyCon
mkClassTyCon name kind tyvars roles rhs clas is_rec
= mkAlgTyCon name kind tyvars roles Nothing [] rhs (ClassTyCon clas)
is_rec False
Nothing -- Class TyCons are not pormoted
mkTupleTyCon :: Name
-> Kind -- ^ Kind of the resulting 'TyCon'
-> Arity -- ^ Arity of the tuple
-> [TyVar] -- ^ 'TyVar's scoped over: see 'tyConTyVars'
-> DataCon
-> TupleSort -- ^ Whether the tuple is boxed or unboxed
-> Maybe TyCon -- ^ Promoted version
-> TyCon
mkTupleTyCon name kind arity tyvars con sort prom_tc
= AlgTyCon {
tyConName = name,
tyConUnique = nameUnique name,
tyConKind = kind,
tyConArity = arity,
tyConTyVars = tyvars,
tcRoles = replicate arity Representational,
tyConCType = Nothing,
algTcStupidTheta = [],
algTcRhs = TupleTyCon { data_con = con, tup_sort = sort },
algTcParent = NoParentTyCon,
algTcRec = NonRecursive,
algTcGadtSyntax = False,
tcPromoted = prom_tc
}
-- | Create an unlifted primitive 'TyCon', such as @Int#@
mkPrimTyCon :: Name -> Kind -> [Role] -> PrimRep -> TyCon
mkPrimTyCon name kind roles rep
= mkPrimTyCon' name kind roles rep True
-- | Kind constructors
mkKindTyCon :: Name -> Kind -> TyCon
mkKindTyCon name kind
= mkPrimTyCon' name kind [] VoidRep True
-- | Create a lifted primitive 'TyCon' such as @RealWorld@
mkLiftedPrimTyCon :: Name -> Kind -> [Role] -> PrimRep -> TyCon
mkLiftedPrimTyCon name kind roles rep
= mkPrimTyCon' name kind roles rep False
mkPrimTyCon' :: Name -> Kind -> [Role] -> PrimRep -> Bool -> TyCon
mkPrimTyCon' name kind roles rep is_unlifted
= PrimTyCon {
tyConName = name,
tyConUnique = nameUnique name,
tyConKind = kind,
tyConArity = length roles,
tcRoles = roles,
primTyConRep = rep,
isUnLifted = is_unlifted
}
-- | Create a type synonym 'TyCon'
mkSynonymTyCon :: Name -> Kind -> [TyVar] -> [Role] -> Type -> TyCon
mkSynonymTyCon name kind tyvars roles rhs
= SynonymTyCon {
tyConName = name,
tyConUnique = nameUnique name,
tyConKind = kind,
tyConArity = length tyvars,
tyConTyVars = tyvars,
tcRoles = roles,
synTcRhs = rhs
}
-- | Create a type family 'TyCon'
mkFamilyTyCon:: Name -> Kind -> [TyVar] -> FamTyConFlav -> TyConParent
-> TyCon
mkFamilyTyCon name kind tyvars flav parent
= FamilyTyCon
{ tyConUnique = nameUnique name
, tyConName = name
, tyConKind = kind
, tyConArity = length tyvars
, tyConTyVars = tyvars
, famTcFlav = flav
, famTcParent = parent
}
-- | Create a promoted data constructor 'TyCon'
-- Somewhat dodgily, we give it the same Name
-- as the data constructor itself; when we pretty-print
-- the TyCon we add a quote; see the Outputable TyCon instance
mkPromotedDataCon :: DataCon -> Name -> Unique -> Kind -> [Role] -> TyCon
mkPromotedDataCon con name unique kind roles
= PromotedDataCon {
tyConName = name,
tyConUnique = unique,
tyConArity = arity,
tcRoles = roles,
tyConKind = kind,
dataCon = con
}
where
arity = length roles
-- | Create a promoted type constructor 'TyCon'
-- Somewhat dodgily, we give it the same Name
-- as the type constructor itself
mkPromotedTyCon :: TyCon -> Kind -> TyCon
mkPromotedTyCon tc kind
= PromotedTyCon {
tyConName = getName tc,
tyConUnique = getUnique tc,
tyConArity = tyConArity tc,
tyConKind = kind,
ty_con = tc
}
isFunTyCon :: TyCon -> Bool
isFunTyCon (FunTyCon {}) = True
isFunTyCon _ = False
-- | Test if the 'TyCon' is algebraic but abstract (invisible data constructors)
isAbstractTyCon :: TyCon -> Bool
isAbstractTyCon (AlgTyCon { algTcRhs = AbstractTyCon {} }) = True
isAbstractTyCon _ = False
-- | Make an algebraic 'TyCon' abstract. Panics if the supplied 'TyCon' is not
-- algebraic
makeTyConAbstract :: TyCon -> TyCon
makeTyConAbstract tc@(AlgTyCon { algTcRhs = rhs })
= tc { algTcRhs = AbstractTyCon (isDistinctAlgRhs rhs) }
makeTyConAbstract tc = pprPanic "makeTyConAbstract" (ppr tc)
-- | Does this 'TyCon' represent something that cannot be defined in Haskell?
isPrimTyCon :: TyCon -> Bool
isPrimTyCon (PrimTyCon {}) = True
isPrimTyCon _ = False
-- | Is this 'TyCon' unlifted (i.e. cannot contain bottom)? Note that this can
-- only be true for primitive and unboxed-tuple 'TyCon's
isUnLiftedTyCon :: TyCon -> Bool
isUnLiftedTyCon (PrimTyCon {isUnLifted = is_unlifted})
= is_unlifted
isUnLiftedTyCon (AlgTyCon { algTcRhs = rhs } )
| TupleTyCon { tup_sort = sort } <- rhs
= not (isBoxed (tupleSortBoxity sort))
isUnLiftedTyCon _ = False
-- | Returns @True@ if the supplied 'TyCon' resulted from either a
-- @data@ or @newtype@ declaration
isAlgTyCon :: TyCon -> Bool
isAlgTyCon (AlgTyCon {}) = True
isAlgTyCon _ = False
isDataTyCon :: TyCon -> Bool
-- ^ Returns @True@ for data types that are /definitely/ represented by
-- heap-allocated constructors. These are scrutinised by Core-level
-- @case@ expressions, and they get info tables allocated for them.
--
-- Generally, the function will be true for all @data@ types and false
-- for @newtype@s, unboxed tuples and type family 'TyCon's. But it is
-- not guaranteed to return @True@ in all cases that it could.
--
-- NB: for a data type family, only the /instance/ 'TyCon's
-- get an info table. The family declaration 'TyCon' does not
isDataTyCon (AlgTyCon {algTcRhs = rhs})
= case rhs of
TupleTyCon { tup_sort = sort }
-> isBoxed (tupleSortBoxity sort)
DataTyCon {} -> True
NewTyCon {} -> False
DataFamilyTyCon {} -> False
AbstractTyCon {} -> False -- We don't know, so return False
isDataTyCon _ = False
-- | 'isDistinctTyCon' is true of 'TyCon's that are equal only to
-- themselves, even via coercions (except for unsafeCoerce).
-- This excludes newtypes, type functions, type synonyms.
-- It relates directly to the FC consistency story:
-- If the axioms are consistent,
-- and co : S tys ~ T tys, and S,T are "distinct" TyCons,
-- then S=T.
-- Cf Note [Pruning dead case alternatives] in Unify
isDistinctTyCon :: TyCon -> Bool
isDistinctTyCon (AlgTyCon {algTcRhs = rhs}) = isDistinctAlgRhs rhs
isDistinctTyCon (FunTyCon {}) = True
isDistinctTyCon (PrimTyCon {}) = True
isDistinctTyCon (PromotedDataCon {}) = True
isDistinctTyCon _ = False
isDistinctAlgRhs :: AlgTyConRhs -> Bool
isDistinctAlgRhs (TupleTyCon {}) = True
isDistinctAlgRhs (DataTyCon {}) = True
isDistinctAlgRhs (DataFamilyTyCon {}) = True
isDistinctAlgRhs (AbstractTyCon distinct) = distinct
isDistinctAlgRhs (NewTyCon {}) = False
-- | Is this 'TyCon' that for a @newtype@
isNewTyCon :: TyCon -> Bool
isNewTyCon (AlgTyCon {algTcRhs = NewTyCon {}}) = True
isNewTyCon _ = False
-- | Take a 'TyCon' apart into the 'TyVar's it scopes over, the 'Type' it expands
-- into, and (possibly) a coercion from the representation type to the @newtype@.
-- Returns @Nothing@ if this is not possible.
unwrapNewTyCon_maybe :: TyCon -> Maybe ([TyVar], Type, CoAxiom Unbranched)
unwrapNewTyCon_maybe (AlgTyCon { tyConTyVars = tvs,
algTcRhs = NewTyCon { nt_co = co,
nt_rhs = rhs }})
= Just (tvs, rhs, co)
unwrapNewTyCon_maybe _ = Nothing
unwrapNewTyConEtad_maybe :: TyCon -> Maybe ([TyVar], Type, CoAxiom Unbranched)
unwrapNewTyConEtad_maybe (AlgTyCon { algTcRhs = NewTyCon { nt_co = co,
nt_etad_rhs = (tvs,rhs) }})
= Just (tvs, rhs, co)
unwrapNewTyConEtad_maybe _ = Nothing
isProductTyCon :: TyCon -> Bool
-- True of datatypes or newtypes that have
-- one, non-existential, data constructor
-- See Note [Product types]
isProductTyCon tc@(AlgTyCon {})
= case algTcRhs tc of
TupleTyCon {} -> True
DataTyCon{ data_cons = [data_con] }
-> null (dataConExTyVars data_con)
NewTyCon {} -> True
_ -> False
isProductTyCon _ = False
isDataProductTyCon_maybe :: TyCon -> Maybe DataCon
-- True of datatypes (not newtypes) with
-- one, vanilla, data constructor
-- See Note [Product types]
isDataProductTyCon_maybe (AlgTyCon { algTcRhs = rhs })
= case rhs of
DataTyCon { data_cons = [con] }
| null (dataConExTyVars con) -- non-existential
-> Just con
TupleTyCon { data_con = con }
-> Just con
_ -> Nothing
isDataProductTyCon_maybe _ = Nothing
{- Note [Product types]
~~~~~~~~~~~~~~~~~~~~~~~
A product type is
* A data type (not a newtype)
* With one, boxed data constructor
* That binds no existential type variables
The main point is that product types are amenable to unboxing for
* Strict function calls; we can transform
f (D a b) = e
to
fw a b = e
via the worker/wrapper transformation. (Question: couldn't this
work for existentials too?)
* CPR for function results; we can transform
f x y = let ... in D a b
to
fw x y = let ... in (# a, b #)
Note that the data constructor /can/ have evidence arguments: equality
constraints, type classes etc. So it can be GADT. These evidence
arguments are simply value arguments, and should not get in the way.
-}
-- | Is this a 'TyCon' representing a regular H98 type synonym (@type@)?
isTypeSynonymTyCon :: TyCon -> Bool
isTypeSynonymTyCon (SynonymTyCon {}) = True
isTypeSynonymTyCon _ = False
-- As for newtypes, it is in some contexts important to distinguish between
-- closed synonyms and synonym families, as synonym families have no unique
-- right hand side to which a synonym family application can expand.
--
isDecomposableTyCon :: TyCon -> Bool
-- True iff we can decompose (T a b c) into ((T a b) c)
-- I.e. is it injective?
-- Specifically NOT true of synonyms (open and otherwise)
-- Ultimately we may have injective associated types
-- in which case this test will become more interesting
--
-- It'd be unusual to call isDecomposableTyCon on a regular H98
-- type synonym, because you should probably have expanded it first
-- But regardless, it's not decomposable
isDecomposableTyCon (SynonymTyCon {}) = False
isDecomposableTyCon (FamilyTyCon {}) = False
isDecomposableTyCon _other = True
-- | Is this an algebraic 'TyCon' declared with the GADT syntax?
isGadtSyntaxTyCon :: TyCon -> Bool
isGadtSyntaxTyCon (AlgTyCon { algTcGadtSyntax = res }) = res
isGadtSyntaxTyCon _ = False
-- | Is this an algebraic 'TyCon' which is just an enumeration of values?
isEnumerationTyCon :: TyCon -> Bool
-- See Note [Enumeration types] in TyCon
isEnumerationTyCon (AlgTyCon { tyConArity = arity, algTcRhs = rhs })
= case rhs of
DataTyCon { is_enum = res } -> res
TupleTyCon {} -> arity == 0
_ -> False
isEnumerationTyCon _ = False
-- | Is this a 'TyCon', synonym or otherwise, that defines a family?
isFamilyTyCon :: TyCon -> Bool
isFamilyTyCon (FamilyTyCon {}) = True
isFamilyTyCon (AlgTyCon {algTcRhs = DataFamilyTyCon {}}) = True
isFamilyTyCon _ = False
-- | Is this a 'TyCon', synonym or otherwise, that defines a family with
-- instances?
isOpenFamilyTyCon :: TyCon -> Bool
isOpenFamilyTyCon (FamilyTyCon {famTcFlav = OpenSynFamilyTyCon }) = True
isOpenFamilyTyCon (AlgTyCon {algTcRhs = DataFamilyTyCon }) = True
isOpenFamilyTyCon _ = False
-- | Is this a synonym 'TyCon' that can have may have further instances appear?
isTypeFamilyTyCon :: TyCon -> Bool
isTypeFamilyTyCon (FamilyTyCon {}) = True
isTypeFamilyTyCon _ = False
isOpenTypeFamilyTyCon :: TyCon -> Bool
isOpenTypeFamilyTyCon (FamilyTyCon {famTcFlav = OpenSynFamilyTyCon }) = True
isOpenTypeFamilyTyCon _ = False
-- leave out abstract closed families here
isClosedSynFamilyTyCon_maybe :: TyCon -> Maybe (CoAxiom Branched)
isClosedSynFamilyTyCon_maybe
(FamilyTyCon {famTcFlav = ClosedSynFamilyTyCon ax}) = Just ax
isClosedSynFamilyTyCon_maybe _ = Nothing
isBuiltInSynFamTyCon_maybe :: TyCon -> Maybe BuiltInSynFamily
isBuiltInSynFamTyCon_maybe
(FamilyTyCon {famTcFlav = BuiltInSynFamTyCon ops }) = Just ops
isBuiltInSynFamTyCon_maybe _ = Nothing
-- | Is this a synonym 'TyCon' that can have may have further instances appear?
isDataFamilyTyCon :: TyCon -> Bool
isDataFamilyTyCon (AlgTyCon {algTcRhs = DataFamilyTyCon {}}) = True
isDataFamilyTyCon _ = False
-- | Are we able to extract informationa 'TyVar' to class argument list
-- mappping from a given 'TyCon'?
isTyConAssoc :: TyCon -> Bool
isTyConAssoc tc = isJust (tyConAssoc_maybe tc)
tyConAssoc_maybe :: TyCon -> Maybe Class
tyConAssoc_maybe tc = case tyConParent tc of
AssocFamilyTyCon cls -> Just cls
_ -> Nothing
-- The unit tycon didn't used to be classed as a tuple tycon
-- but I thought that was silly so I've undone it
-- If it can't be for some reason, it should be a AlgTyCon
isTupleTyCon :: TyCon -> Bool
-- ^ Does this 'TyCon' represent a tuple?
--
-- NB: when compiling @Data.Tuple@, the tycons won't reply @True@ to
-- 'isTupleTyCon', because they are built as 'AlgTyCons'. However they
-- get spat into the interface file as tuple tycons, so I don't think
-- it matters.
isTupleTyCon (AlgTyCon { algTcRhs = TupleTyCon {} }) = True
isTupleTyCon _ = False
tyConTuple_maybe :: TyCon -> Maybe TupleSort
tyConTuple_maybe (AlgTyCon { algTcRhs = rhs })
| TupleTyCon { tup_sort = sort} <- rhs = Just sort
tyConTuple_maybe _ = Nothing
-- | Is this the 'TyCon' for an unboxed tuple?
isUnboxedTupleTyCon :: TyCon -> Bool
isUnboxedTupleTyCon (AlgTyCon { algTcRhs = rhs })
| TupleTyCon { tup_sort = sort } <- rhs
= not (isBoxed (tupleSortBoxity sort))
isUnboxedTupleTyCon _ = False
-- | Is this the 'TyCon' for a boxed tuple?
isBoxedTupleTyCon :: TyCon -> Bool
isBoxedTupleTyCon (AlgTyCon { algTcRhs = rhs })
| TupleTyCon { tup_sort = sort } <- rhs
= isBoxed (tupleSortBoxity sort)
isBoxedTupleTyCon _ = False
-- | Is this a recursive 'TyCon'?
isRecursiveTyCon :: TyCon -> Bool
isRecursiveTyCon (AlgTyCon {algTcRec = Recursive}) = True
isRecursiveTyCon _ = False
promotableTyCon_maybe :: TyCon -> Maybe TyCon
promotableTyCon_maybe (AlgTyCon { tcPromoted = prom }) = prom
promotableTyCon_maybe _ = Nothing
promoteTyCon :: TyCon -> TyCon
promoteTyCon tc = case promotableTyCon_maybe tc of
Just prom_tc -> prom_tc
Nothing -> pprPanic "promoteTyCon" (ppr tc)
-- | Is this a PromotedTyCon?
isPromotedTyCon :: TyCon -> Bool
isPromotedTyCon (PromotedTyCon {}) = True
isPromotedTyCon _ = False
-- | Retrieves the promoted TyCon if this is a PromotedTyCon;
isPromotedTyCon_maybe :: TyCon -> Maybe TyCon
isPromotedTyCon_maybe (PromotedTyCon { ty_con = tc }) = Just tc
isPromotedTyCon_maybe _ = Nothing
-- | Is this a PromotedDataCon?
isPromotedDataCon :: TyCon -> Bool
isPromotedDataCon (PromotedDataCon {}) = True
isPromotedDataCon _ = False
-- | Retrieves the promoted DataCon if this is a PromotedDataCon;
isPromotedDataCon_maybe :: TyCon -> Maybe DataCon
isPromotedDataCon_maybe (PromotedDataCon { dataCon = dc }) = Just dc
isPromotedDataCon_maybe _ = Nothing
-- | Identifies implicit tycons that, in particular, do not go into interface
-- files (because they are implicitly reconstructed when the interface is
-- read).
--
-- Note that:
--
-- * Associated families are implicit, as they are re-constructed from
-- the class declaration in which they reside, and
--
-- * Family instances are /not/ implicit as they represent the instance body
-- (similar to a @dfun@ does that for a class instance).
isImplicitTyCon :: TyCon -> Bool
isImplicitTyCon (FunTyCon {}) = True
isImplicitTyCon (PrimTyCon {}) = True
isImplicitTyCon (PromotedDataCon {}) = True
isImplicitTyCon (PromotedTyCon {}) = True
isImplicitTyCon (AlgTyCon { algTcRhs = TupleTyCon {} }) = True
isImplicitTyCon (AlgTyCon { algTcParent = AssocFamilyTyCon {} }) = True
isImplicitTyCon (AlgTyCon {}) = False
isImplicitTyCon (FamilyTyCon { famTcParent = AssocFamilyTyCon {} }) = True
isImplicitTyCon (FamilyTyCon {}) = False
isImplicitTyCon (SynonymTyCon {}) = False
tyConCType_maybe :: TyCon -> Maybe CType
tyConCType_maybe tc@(AlgTyCon {}) = tyConCType tc
tyConCType_maybe _ = Nothing
{-
-----------------------------------------------
-- Expand type-constructor applications
-----------------------------------------------
-}
expandSynTyCon_maybe
:: TyCon
-> [tyco] -- ^ Arguments to 'TyCon'
-> Maybe ([(TyVar,tyco)],
Type,
[tyco]) -- ^ Returns a 'TyVar' substitution, the body
-- type of the synonym (not yet substituted)
-- and any arguments remaining from the
-- application
-- ^ Expand a type synonym application, if any
expandSynTyCon_maybe tc tys
| SynonymTyCon { tyConTyVars = tvs, synTcRhs = rhs } <- tc
, let n_tvs = length tvs
= case n_tvs `compare` length tys of
LT -> Just (tvs `zip` tys, rhs, drop n_tvs tys)
EQ -> Just (tvs `zip` tys, rhs, [])
GT -> Nothing
| otherwise
= Nothing
----------------
-- | As 'tyConDataCons_maybe', but returns the empty list of constructors if no
-- constructors could be found
tyConDataCons :: TyCon -> [DataCon]
-- It's convenient for tyConDataCons to return the
-- empty list for type synonyms etc
tyConDataCons tycon = tyConDataCons_maybe tycon `orElse` []
-- | Determine the 'DataCon's originating from the given 'TyCon', if the 'TyCon'
-- is the sort that can have any constructors (note: this does not include
-- abstract algebraic types)
tyConDataCons_maybe :: TyCon -> Maybe [DataCon]
tyConDataCons_maybe (AlgTyCon {algTcRhs = rhs})
= case rhs of
DataTyCon { data_cons = cons } -> Just cons
NewTyCon { data_con = con } -> Just [con]
TupleTyCon { data_con = con } -> Just [con]
_ -> Nothing
tyConDataCons_maybe _ = Nothing
-- | If the given 'TyCon' has a /single/ data constructor, i.e. it is a @data@
-- type with one alternative, a tuple type or a @newtype@ then that constructor
-- is returned. If the 'TyCon' has more than one constructor, or represents a
-- primitive or function type constructor then @Nothing@ is returned. In any
-- other case, the function panics
tyConSingleDataCon_maybe :: TyCon -> Maybe DataCon
tyConSingleDataCon_maybe (AlgTyCon { algTcRhs = rhs })
= case rhs of
DataTyCon { data_cons = [c] } -> Just c
TupleTyCon { data_con = c } -> Just c
NewTyCon { data_con = c } -> Just c
_ -> Nothing
tyConSingleDataCon_maybe _ = Nothing
tyConSingleAlgDataCon_maybe :: TyCon -> Maybe DataCon
-- Returns (Just con) for single-constructor
-- *algebraic* data types *not* newtypes
tyConSingleAlgDataCon_maybe (AlgTyCon { algTcRhs = rhs })
= case rhs of
DataTyCon { data_cons = [c] } -> Just c
TupleTyCon { data_con = c } -> Just c
_ -> Nothing
tyConSingleAlgDataCon_maybe _ = Nothing
-- | Determine the number of value constructors a 'TyCon' has. Panics if the
-- 'TyCon' is not algebraic or a tuple
tyConFamilySize :: TyCon -> Int
tyConFamilySize tc@(AlgTyCon { algTcRhs = rhs })
= case rhs of
DataTyCon { data_cons = cons } -> length cons
NewTyCon {} -> 1
TupleTyCon {} -> 1
DataFamilyTyCon {} -> 0
_ -> pprPanic "tyConFamilySize 1" (ppr tc)
tyConFamilySize tc = pprPanic "tyConFamilySize 2" (ppr tc)
-- | Extract an 'AlgTyConRhs' with information about data constructors from an
-- algebraic or tuple 'TyCon'. Panics for any other sort of 'TyCon'
algTyConRhs :: TyCon -> AlgTyConRhs
algTyConRhs (AlgTyCon {algTcRhs = rhs}) = rhs
algTyConRhs other = pprPanic "algTyConRhs" (ppr other)
-- | Get the list of roles for the type parameters of a TyCon
tyConRoles :: TyCon -> [Role]
-- See also Note [TyCon Role signatures]
tyConRoles tc
= case tc of
{ FunTyCon {} -> const_role Representational
; AlgTyCon { tcRoles = roles } -> roles
; SynonymTyCon { tcRoles = roles } -> roles
; FamilyTyCon {} -> const_role Nominal
; PrimTyCon { tcRoles = roles } -> roles
; PromotedDataCon { tcRoles = roles } -> roles
; PromotedTyCon {} -> const_role Nominal
}
where
const_role r = replicate (tyConArity tc) r
-- | Extract the bound type variables and type expansion of a type synonym
-- 'TyCon'. Panics if the 'TyCon' is not a synonym
newTyConRhs :: TyCon -> ([TyVar], Type)
newTyConRhs (AlgTyCon {tyConTyVars = tvs, algTcRhs = NewTyCon { nt_rhs = rhs }})
= (tvs, rhs)
newTyConRhs tycon = pprPanic "newTyConRhs" (ppr tycon)
-- | The number of type parameters that need to be passed to a newtype to
-- resolve it. May be less than in the definition if it can be eta-contracted.
newTyConEtadArity :: TyCon -> Int
newTyConEtadArity (AlgTyCon {algTcRhs = NewTyCon { nt_etad_rhs = tvs_rhs }})
= length (fst tvs_rhs)
newTyConEtadArity tycon = pprPanic "newTyConEtadArity" (ppr tycon)
-- | Extract the bound type variables and type expansion of an eta-contracted
-- type synonym 'TyCon'. Panics if the 'TyCon' is not a synonym
newTyConEtadRhs :: TyCon -> ([TyVar], Type)
newTyConEtadRhs (AlgTyCon {algTcRhs = NewTyCon { nt_etad_rhs = tvs_rhs }}) = tvs_rhs
newTyConEtadRhs tycon = pprPanic "newTyConEtadRhs" (ppr tycon)
-- | Extracts the @newtype@ coercion from such a 'TyCon', which can be used to
-- construct something with the @newtype@s type from its representation type
-- (right hand side). If the supplied 'TyCon' is not a @newtype@, returns
-- @Nothing@
newTyConCo_maybe :: TyCon -> Maybe (CoAxiom Unbranched)
newTyConCo_maybe (AlgTyCon {algTcRhs = NewTyCon { nt_co = co }}) = Just co
newTyConCo_maybe _ = Nothing
newTyConCo :: TyCon -> CoAxiom Unbranched
newTyConCo tc = case newTyConCo_maybe tc of
Just co -> co
Nothing -> pprPanic "newTyConCo" (ppr tc)
-- | Find the primitive representation of a 'TyCon'
tyConPrimRep :: TyCon -> PrimRep
tyConPrimRep (PrimTyCon {primTyConRep = rep}) = rep
tyConPrimRep tc = ASSERT(not (isUnboxedTupleTyCon tc)) PtrRep
-- | Find the \"stupid theta\" of the 'TyCon'. A \"stupid theta\" is the context
-- to the left of an algebraic type declaration, e.g. @Eq a@ in the declaration
-- @data Eq a => T a ...@
tyConStupidTheta :: TyCon -> [PredType]
tyConStupidTheta (AlgTyCon {algTcStupidTheta = stupid}) = stupid
tyConStupidTheta tycon = pprPanic "tyConStupidTheta" (ppr tycon)
-- | Extract the 'TyVar's bound by a vanilla type synonym
-- and the corresponding (unsubstituted) right hand side.
synTyConDefn_maybe :: TyCon -> Maybe ([TyVar], Type)
synTyConDefn_maybe (SynonymTyCon {tyConTyVars = tyvars, synTcRhs = ty})
= Just (tyvars, ty)
synTyConDefn_maybe _ = Nothing
-- | Extract the information pertaining to the right hand side of a type synonym
-- (@type@) declaration.
synTyConRhs_maybe :: TyCon -> Maybe Type
synTyConRhs_maybe (SynonymTyCon {synTcRhs = rhs}) = Just rhs
synTyConRhs_maybe _ = Nothing
-- | Extract the flavour of a type family (with all the extra information that
-- it carries)
famTyConFlav_maybe :: TyCon -> Maybe FamTyConFlav
famTyConFlav_maybe (FamilyTyCon {famTcFlav = flav}) = Just flav
famTyConFlav_maybe _ = Nothing
-- | Is this 'TyCon' that for a class instance?
isClassTyCon :: TyCon -> Bool
isClassTyCon (AlgTyCon {algTcParent = ClassTyCon _}) = True
isClassTyCon _ = False
-- | If this 'TyCon' is that for a class instance, return the class it is for.
-- Otherwise returns @Nothing@
tyConClass_maybe :: TyCon -> Maybe Class
tyConClass_maybe (AlgTyCon {algTcParent = ClassTyCon clas}) = Just clas
tyConClass_maybe _ = Nothing
----------------------------------------------------------------------------
tyConParent :: TyCon -> TyConParent
tyConParent (AlgTyCon {algTcParent = parent}) = parent
tyConParent (FamilyTyCon {famTcParent = parent}) = parent
tyConParent _ = NoParentTyCon
----------------------------------------------------------------------------
-- | Is this 'TyCon' that for a data family instance?
isFamInstTyCon :: TyCon -> Bool
isFamInstTyCon tc = case tyConParent tc of
FamInstTyCon {} -> True
_ -> False
tyConFamInstSig_maybe :: TyCon -> Maybe (TyCon, [Type], CoAxiom Unbranched)
tyConFamInstSig_maybe tc
= case tyConParent tc of
FamInstTyCon ax f ts -> Just (f, ts, ax)
_ -> Nothing
-- | If this 'TyCon' is that of a family instance, return the family in question
-- and the instance types. Otherwise, return @Nothing@
tyConFamInst_maybe :: TyCon -> Maybe (TyCon, [Type])
tyConFamInst_maybe tc
= case tyConParent tc of
FamInstTyCon _ f ts -> Just (f, ts)
_ -> Nothing
-- | If this 'TyCon' is that of a family instance, return a 'TyCon' which
-- represents a coercion identifying the representation type with the type
-- instance family. Otherwise, return @Nothing@
tyConFamilyCoercion_maybe :: TyCon -> Maybe (CoAxiom Unbranched)
tyConFamilyCoercion_maybe tc
= case tyConParent tc of
FamInstTyCon co _ _ -> Just co
_ -> Nothing
{-
************************************************************************
* *
\subsection[TyCon-instances]{Instance declarations for @TyCon@}
* *
************************************************************************
@TyCon@s are compared by comparing their @Unique@s.
The strictness analyser needs @Ord@. It is a lexicographic order with
the property @(a<=b) || (b<=a)@.
-}
instance Eq TyCon where
a == b = case (a `compare` b) of { EQ -> True; _ -> False }
a /= b = case (a `compare` b) of { EQ -> False; _ -> True }
instance Ord TyCon where
a <= b = case (a `compare` b) of { LT -> True; EQ -> True; GT -> False }
a < b = case (a `compare` b) of { LT -> True; EQ -> False; GT -> False }
a >= b = case (a `compare` b) of { LT -> False; EQ -> True; GT -> True }
a > b = case (a `compare` b) of { LT -> False; EQ -> False; GT -> True }
compare a b = getUnique a `compare` getUnique b
instance Uniquable TyCon where
getUnique tc = tyConUnique tc
instance Outputable TyCon where
-- At the moment a promoted TyCon has the same Name as its
-- corresponding TyCon, so we add the quote to distinguish it here
ppr tc = pprPromotionQuote tc <> ppr (tyConName tc)
pprPromotionQuote :: TyCon -> SDoc
pprPromotionQuote (PromotedDataCon {}) = char '\'' -- Quote promoted DataCons
-- in types
pprPromotionQuote (PromotedTyCon {}) = ifPprDebug (char '\'')
pprPromotionQuote _ = empty -- However, we don't quote TyCons
-- in kinds e.g.
-- type family T a :: Bool -> *
-- cf Trac #5952.
-- Except with -dppr-debug
instance NamedThing TyCon where
getName = tyConName
instance Data.Data TyCon where
-- don't traverse?
toConstr _ = abstractConstr "TyCon"
gunfold _ _ = error "gunfold"
dataTypeOf _ = mkNoRepType "TyCon"
{-
************************************************************************
* *
Walking over recursive TyCons
* *
************************************************************************
Note [Expanding newtypes and products]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When expanding a type to expose a data-type constructor, we need to be
careful about newtypes, lest we fall into an infinite loop. Here are
the key examples:
newtype Id x = MkId x
newtype Fix f = MkFix (f (Fix f))
newtype T = MkT (T -> T)
Type Expansion
--------------------------
T T -> T
Fix Maybe Maybe (Fix Maybe)
Id (Id Int) Int
Fix Id NO NO NO
Notice that we can expand T, even though it's recursive.
And we can expand Id (Id Int), even though the Id shows up
twice at the outer level.
So, when expanding, we keep track of when we've seen a recursive
newtype at outermost level; and bale out if we see it again.
We sometimes want to do the same for product types, so that the
strictness analyser doesn't unbox infinitely deeply.
The function that manages this is checkRecTc.
-}
newtype RecTcChecker = RC NameSet
initRecTc :: RecTcChecker
initRecTc = RC emptyNameSet
checkRecTc :: RecTcChecker -> TyCon -> Maybe RecTcChecker
-- Nothing => Recursion detected
-- Just rec_tcs => Keep going
checkRecTc (RC rec_nts) tc
| not (isRecursiveTyCon tc) = Just (RC rec_nts)
| tc_name `elemNameSet` rec_nts = Nothing
| otherwise = Just (RC (extendNameSet rec_nts tc_name))
where
tc_name = tyConName tc
|
christiaanb/ghc
|
compiler/types/TyCon.hs
|
bsd-3-clause
| 70,662
| 0
| 16
| 21,265
| 8,089
| 4,631
| 3,458
| 713
| 7
|
{-# LANGUAGE PackageImports #-}
module GHC.IP (module M) where
import "base" GHC.IP as M
|
silkapp/base-noprelude
|
src/GHC/IP.hs
|
bsd-3-clause
| 94
| 0
| 4
| 18
| 21
| 15
| 6
| 3
| 0
|
-----------------------------------------------------------------------------
-- |
-- Copyright : (C) 2015 Dimitri Sabadie
-- License : BSD3
--
-- Maintainer : Dimitri Sabadie <dimitri.sabadie@gmail.com>
-- Stability : experimental
-- Portability : portable
--
----------------------------------------------------------------------------
import qualified Codec.Picture as JP
import Control.Monad
import Control.Monad.Error.Class
import Control.Monad.Trans
import Control.Monad.Trans.Either
import Data.Bits
import Data.Foldable
import Data.Monoid
import Data.Traversable
import Data.Vector as V ( Vector, (!), cons, convert, fromList, singleton )
import qualified Data.Vector as V ( concat, concatMap, head, replicate, tail )
import qualified Data.Vector.Storable as SV ( Vector, concat )
import Foreign
import Foreign.C.String
import qualified Graphics.Rendering.FreeType.Internal as FT
import qualified Graphics.Rendering.FreeType.Internal.Bitmap as FT
import qualified Graphics.Rendering.FreeType.Internal.Face as FT
import qualified Graphics.Rendering.FreeType.Internal.GlyphSlot as FT
import qualified Graphics.Rendering.FreeType.Internal.PrimitiveTypes as FT
import System.Environment
-- Pixel in the fontmap. [Pixel] is a line of pixels, but Vector Pixel might be
-- prefered though for O(1) on random access.
type Pixel = Word8
-- A bitmap.
type Bitmap = Vector (Vector Pixel)
-- A glyph is represented as several pixel lines along with information about
-- size and offset (width,rows,top,left,pixels).
type Glyph = (Int,Int,Int,Int,Bitmap)
-- A fontmap is encoded as several glyph lines.
type Fontmap = Vector (Vector Bitmap)
-- Get the alphabet from MonadIO m.
--
-- Because of the format of the input, it’s possible that holes exist in the
-- alphabet, as following:
--
-- abcdef
-- ghijkl
-- mno
--
-- In that case, the last line has three symbols missing to complete the line.
-- That function is not responsible of taking care of any kind of padding.
-- See 'padAlphabet' for such a use.
getAlphabet :: (MonadIO m) => m [[Char]]
getAlphabet = liftIO $ fmap lines getContents
-- Pad the alphabet by inserting '\0' at end of incomplete lines.
padAlphabet :: [[Char]] -> [[Char]]
padAlphabet alpha = map fillGaps alpha
where
fillGaps line = line ++ replicate (maxWidth - length line) '\0'
maxWidth = maximum $ map length alpha
-- Lift a FreeType error into 'MonadError String'.
liftErr :: (MonadError String m) => FT.FT_Error -> m ()
liftErr e
| e == 0 = pure ()
| otherwise = throwError $ "FreeType error: " ++ show e
-- That function can be used to turn FreeType functions that return
-- errors into 'MonadError String m' actions.
wrapErr :: (MonadIO m,MonadError String m) => IO FT.FT_Error -> m ()
wrapErr a = liftIO a >>= liftErr
-- Process a font, given by a 'FilePath'. The alphabet, a '[[Char]]'', is
-- provided to select the symbols that have to be written to the output. The
-- output is expressed as a 'String' and refers to both the .png image and the
-- JSON font setup file. Are also needed the points used to render each
-- symbol, the resolution in dpi and the padding to add around each glyphs.
processFont :: FilePath
-> [[Char]]
-> String
-> Int
-> Int
-> Int
-> Int
-> IO (Either String ())
processFont fontPath alphabet output pt dpix dpiy padding = do
traverse_ putStrLn alphabet
alloca $ \ftlibPtr ->
alloca $ \facePtr -> runEitherT $ do
wrapErr $ FT.ft_Init_FreeType ftlibPtr
ftlib <- liftIO $ peek ftlibPtr
wrapErr . withCString fontPath $ \cfontPath ->
FT.ft_New_Face ftlib cfontPath 0 facePtr
face <- liftIO $ peek facePtr
wrapErr $ FT.ft_Set_Char_Size face 0 (fromIntegral pt * 64)
(fromIntegral dpix) (fromIntegral dpiy)
fontmap <- traverse (traverse $ createGlyph face) alphabet'
let bitmaps = fmap (mergeBitmapLine (maxRows + 2 * padding) . fmap (\(w,h,_,_,p) -> padBitmap padding maxWidth $ resizeBitmap w h maxWidth maxRows p)) fontmap
fontmap' = V.concat (toList fontmap)
(maxWidth,_,_,_,_) = maximumBy (\(a,_,_,_,_) (b,_,_,_,_) -> compare a b) fontmap'
(_,maxRows,_,_,_) = maximumBy (\(_,a,_,_,_) (_,b,_,_,_) -> compare a b) fontmap'
img = JP.ImageY8 $ fontmapToImage
((maxWidth + 2 * padding) * length (head alphabet))
((maxRows + 2 * padding) * length alphabet) bitmaps
liftIO $ JP.savePngImage output img
where
alphabet' = fromList $ (map fromList) alphabet
-- Create a new glyph by rendering it via FreeType.
createGlyph :: (MonadIO m,MonadError String m)
=> FT.FT_Face
-> Char
-> m Glyph
createGlyph face c = do
liftIO . putStrLn $ "processing character " ++ show c
wrapErr $ FT.ft_Load_Char face (fromIntegral $ fromEnum c)
(FT.ft_LOAD_RENDER .|. FT.ft_LOAD_MONOCHROME)
glyph <- liftIO . peek $ FT.glyph face
bitmap <- liftIO . peek $ FT.bitmap glyph
unless (FT.pixel_mode bitmap == 1) . throwError $ show c ++
" is not encoded as a monochrome bitmap"
liftIO $ do
pixels <- extractBitmap bitmap
top <- fmap fromIntegral . peek $ FT.bitmap_top glyph
left <- fmap fromIntegral . peek $ FT.bitmap_left glyph
let rows = fromIntegral $ FT.rows bitmap
width = fromIntegral $ FT.width bitmap
putStrLn $ " width: " ++ show width
putStrLn $ " rows: " ++ show rows
putStrLn $ " top: " ++ show top
putStrLn $ " left: " ++ show left
pure (width,rows,top,left,pixels)
-- Extract the bitmap out of a 'Glyph'.
glyphBitmap :: Glyph -> Bitmap
glyphBitmap (_,_,_,_,bitmap) = bitmap
-- Get the minimum and maximum in a foldable in a single pass using an ordering
-- function.
minmaxBy :: (Bounded a,Foldable f)
=> (a -> a -> Ordering)
-> f a
-> (a,a)
minmaxBy f = foldl' (\(!n,!m) a -> (min' a n,max' m a)) (maxBound,minBound)
where
min' !a !b = case f a b of
LT -> a
_ -> b
max' !a !b = case f a b of
GT -> a
_ -> b
-- Get the minimum and maximum in a foldable in a single pass.
minmax :: (Bounded a,Ord a,Foldable f) => f a -> (a,a)
minmax = minmaxBy compare
-- Unpack pixels out of a byte. That function is required because FreeType
-- packs pixels in bytes. A byte then contains 8 pixels. The generated pixels
-- in an unnormalized monochrome color space (0-255) and are either 0 (white) or
-- black (255).
unpackPixels :: Word8 -> [Word8]
unpackPixels p = map (\x -> if x == 0 then 0 else 255)
[
p .&. 128
, p .&. 64
, p .&. 32
, p .&. 16
, p .&. 8
, p .&. 4
, p .&. 2
, p .&. 1
]
-- Given a FreeType glyph bitmap, that function extracts a Bitmap.
extractBitmap :: FT.FT_Bitmap -> IO Bitmap
extractBitmap bitmap = fmap fromList $ go 0 (FT.buffer bitmap)
where
rows = FT.rows bitmap
width = FT.width bitmap
go col buf
| col < rows = do
line <- fmap (fromList . take (fromIntegral width) . concatMap unpackPixels) $ peekArray (ceiling $ fromIntegral width / 8) (castPtr buf)
nextLines <- go (succ col) (buf `advancePtr` fromIntegral (FT.pitch bitmap))
pure $ line : nextLines
| otherwise = pure []
-- 'resizeBitmap width rows maxWidth maxRow bitmap' resizes 'bitmap' by resizing it
-- regarding the 'maxWidth' and 'maxRows' arguments.
resizeBitmap :: Int -> Int -> Int -> Int -> Bitmap -> Bitmap
resizeBitmap width rows maxWidth maxRow bitmap = pixels' <> V.replicate (maxRow - rows) (V.replicate maxWidth 0)
where
pixels' = fmap (<> pad) bitmap
pad = V.replicate (maxWidth - width) 0
-- Pad a Bitmap by adding padding around the bitmap (top, bottom, left and
-- right).
padBitmap :: Int -> Int -> Bitmap -> Bitmap
padBitmap padding maxWidth bitmap =
fmap (\line -> hpad <> line <> hpad) (vpad <> bitmap <> vpad)
where
hpad = V.replicate padding 0
vpad = V.replicate padding $ V.replicate maxWidth 0
-- Merge a line of Bitmaps.
mergeBitmapLine :: Int -> Vector Bitmap -> Vector Bitmap
mergeBitmapLine remainingRows line
| line == mempty = mempty
| remainingRows > 0 = fmap V.head line `cons` mergeBitmapLine (pred remainingRows) (fmap V.tail line)
| otherwise = mempty
-- Build an image out of the rendered glyphs. That function expects the width
-- and rows for a given bitmap
fontmapToImage :: Int -> Int -> Fontmap -> JP.Image JP.Pixel8
fontmapToImage width rows fontmap = JP.Image width rows (convert . V.concat . toList . V.concat . toList . V.concat $ toList fontmap)
main :: IO ()
main = do
[fontPath,ptStr,dpiXStr,dpiYStr,paddingStr,output] <- getArgs
let pt = read ptStr
dpix = read dpiXStr
dpiy = read dpiYStr
padding = read paddingStr
alphabet <- fmap padAlphabet getAlphabet
putStrLn "alphabet is:"
traverse_ (putStrLn . (" "++)) alphabet
r <- processFont fontPath alphabet output pt dpix dpiy padding
case r of
Left err -> putStrLn err
Right () -> putStrLn "a plus dans l'bus"
|
phaazon/sdffont
|
src/Main.hs
|
bsd-3-clause
| 9,019
| 0
| 24
| 1,976
| 2,564
| 1,357
| 1,207
| -1
| -1
|
{-# LANGUAGE CPP, OverloadedStrings #-}
-- | Compiling abstract syntax trees into 'MOO' computations
module MOO.Compiler (compile, evaluate) where
import Control.Applicative ((<$>), (<*>))
import Control.Monad (forM_, when, unless, void, join, (<=<))
import Control.Monad.Cont (callCC)
import Control.Monad.Reader (asks, local)
import Control.Monad.State (gets)
import Data.Monoid ((<>))
import Database.VCache (deref, vref)
import qualified Data.HashMap.Lazy as HM
import MOO.AST
import MOO.Builtins
import MOO.Object
import MOO.Task
import MOO.Types
# ifdef MOO_WAIF
import MOO.WAIF
# endif
import qualified MOO.List as Lst
import qualified MOO.String as Str
-- | Compile a complete MOO program into a computation in the 'MOO' monad that
-- returns whatever the MOO program returns.
compile :: Program -> MOO Value
compile (Program stmts) = callCC $ compileStatements stmts
compileStatements :: [Statement] -> (Value -> MOO Value) -> MOO Value
compileStatements (statement:rest) yield = case statement of
Expression lineNo expr ->
setLineNumber lineNo >> evaluate expr >> compile' rest
If lineNo cond (Then thens) elseIfs (Else elses) -> runTick >> do
compileIf ((lineNo, cond, thens) : map elseIf elseIfs) elses
compile' rest
where elseIf :: ElseIf -> (LineNo, Expr, [Statement])
elseIf (ElseIf lineNo cond thens) = (lineNo, cond, thens)
compileIf :: [(LineNo, Expr, [Statement])] -> [Statement] -> MOO Value
compileIf ((lineNo,cond,thens):conds) elses = do
setLineNumber lineNo
cond' <- evaluate cond
if truthOf cond' then compile' thens
else compileIf conds elses
compileIf [] elses = compile' elses
ForList lineNo var expr body -> do
handleDebug $ do
setLineNumber lineNo
elts <- getList =<< evaluate expr
callCC $ \break -> do
pushLoopContext (Just var) (Continuation break)
loop var elts (compile' body)
popContext
return zero
compile' rest
where loop :: Id -> [Value] -> MOO a -> MOO ()
loop var elts body = forM_ elts $ \elt -> runTick >> do
storeVariable var elt
callCC $ \k -> setLoopContinue (Continuation k) >> void body
ForRange lineNo var (start, end) body -> do
handleDebug $ do
setLineNumber lineNo
start' <- evaluate start
end' <- evaluate end
(ty, s, e) <- case (start', end') of
(Int s, Int e) -> return (Int . fromInteger, toInteger s, toInteger e)
(Obj s, Obj e) -> return (Obj . fromInteger, toInteger s, toInteger e)
(_ , _ ) -> raise E_TYPE
callCC $ \break -> do
pushLoopContext (Just var) (Continuation break)
loop var ty s e (compile' body)
popContext
return zero
compile' rest
where loop :: Id -> (Integer -> Value) -> Integer -> Integer -> MOO a ->
MOO ()
loop var ty start end body = forM_ [start..end] $ \i -> runTick >> do
storeVariable var (ty i)
callCC $ \k -> setLoopContinue (Continuation k) >> void body
While lineNo var expr body -> do
callCC $ \break -> do
pushLoopContext var (Continuation break)
loop lineNo var (evaluate expr) (compile' body)
popContext
compile' rest
where loop :: LineNo -> Maybe Id -> MOO Value -> MOO a -> MOO ()
loop lineNo var expr body = runTick >> do
setLineNumber lineNo
expr' <- expr
maybe return storeVariable var expr'
when (truthOf expr') $ do
callCC $ \k -> setLoopContinue (Continuation k) >> void body
loop lineNo var expr body
Fork lineNo var delay body -> runTick >> do
handleDebug $ do
setLineNumber lineNo
usecs <- getDelay =<< evaluate delay
checkQueuedTaskLimit
world <- getWorld
gen <- newRandomGen
let taskId = newTaskId world gen
maybe return storeVariable var (Int $ fromIntegral taskId)
forkTask taskId usecs (compileStatements body return)
return zero
compile' rest
Break name -> breakLoop name
Continue name -> continueLoop name
Return _ Nothing -> runTick >> yield zero
Return lineNo (Just expr) -> runTick >> do
setLineNumber lineNo
yield =<< evaluate expr
TryExcept body excepts -> runTick >> do
excepts' <- mapM compileExcept excepts
compile' body `catchException` dispatch excepts'
compile' rest
where compileExcept :: Except -> MOO (Maybe [Value], Maybe Id, MOO Value)
compileExcept (Except lineNo var codes handler) = do
codes' <- case codes of
ANY -> return Nothing
Codes args -> setLineNumber lineNo >> Just <$> expand args
return (codes', var, compile' handler)
dispatch :: [(Maybe [Value], Maybe Id, MOO Value)] -> Exception ->
MOO Value
dispatch ((codes, var, handler):next) except@Exception {
exceptionCode = code
, exceptionMessage = message
, exceptionValue = value
, exceptionCallStack = Stack errorFrames
}
| maybe True (code `elem`) codes = do
Stack currentFrames <- gets stack
let traceback = formatFrames True $ take stackLen errorFrames
stackLen = length errorFrames - length currentFrames + 1
errorInfo = fromList [code, Str message, value, traceback]
maybe return storeVariable var errorInfo
handler
| otherwise = dispatch next except
dispatch [] except = passException except
TryFinally body (Finally finally) -> runTick >> do
let finally' = compile' finally
pushTryFinallyContext finally'
compile' body `catchException` \except ->
popContext >> finally' >> passException except
popContext
finally'
compile' rest
where compile' :: [Statement] -> MOO Value
compile' ss = compileStatements ss yield
compileStatements [] _ = return zero
-- | Compile a MOO expression into a computation in the 'MOO' monad. If a MOO
-- exception is raised and the current verb frame's debug bit is not set,
-- return the error code as a MOO value rather than propagating the exception.
evaluate :: Expr -> MOO Value
evaluate (Literal value) = return value
evaluate expr@Variable{} = handleDebug $ fetch (lValue expr)
evaluate expr = runTick >>= \_ -> handleDebug $ case expr of
List args -> fromList <$> expand args
PropertyRef{} -> fetch (lValue expr)
Assign what expr -> evaluate expr >>= store (lValue what)
Scatter items expr -> evaluate expr >>= usingList (scatterAssign items)
VerbCall target vname args -> do
target' <- evaluate target
vname' <- evaluate vname
args' <- expand args
case (target', vname') of
(this@(Obj oid), Str name) -> callVerb this oid name args'
# ifdef MOO_WAIF
(this@(Waf waf), Str name) -> callWaifVerb this waf name args'
# endif
(_ , _ ) -> raise E_TYPE
BuiltinFunc func args -> expand args >>= callBuiltin func
x `Plus` y -> binary plus x y
x `Minus` y -> binary minus x y
x `Times` y -> binary times x y
x `Divide` y -> binary divide x y
x `Remain` y -> binary remain x y
x `Power` y -> binary power x y
Negate x -> evaluate x >>= \x' -> case x' of
Int n -> return (Int $ negate n)
Flt n -> return (Flt $ negate n)
_ -> raise E_TYPE
Conditional cond x y ->
evaluate cond >>= \cond' -> evaluate $ if truthOf cond' then x else y
x `And` y -> evaluate x >>= \v -> if truthOf v then evaluate y else return v
x `Or` y -> evaluate x >>= \v -> if truthOf v then return v else evaluate y
Not x -> truthValue . not . truthOf <$> evaluate x
x `CompareEQ` y -> equality (==) x y
x `CompareNE` y -> equality (/=) x y
x `CompareLT` y -> comparison (<) x y
x `CompareLE` y -> comparison (<=) x y
x `CompareGT` y -> comparison (>) x y
x `CompareGE` y -> comparison (>=) x y
Index{} -> fetch (lValue expr)
Range{} -> fetch (lValue expr)
Length -> join (asks indexLength)
item `In` list -> do
elt <- evaluate item
evaluate list >>= usingList
(return . Int . maybe 0 (fromIntegral . succ) . Lst.elemIndex elt)
Catch expr codes (Default dv) -> do
codes' <- case codes of
ANY -> return Nothing
Codes args -> Just <$> expand args
let handler except@Exception { exceptionCode = code }
| maybe True (code `elem`) codes' = maybe (return code) evaluate dv
| otherwise = passException except
evaluate expr `catchException` handler
where binary :: (Value -> Value -> MOO Value) -> Expr -> Expr -> MOO Value
binary op x y = evaluate x >>= \x' -> evaluate y >>= \y' -> x' `op` y'
-- binary op x y = join $ op <$> evaluate x <*> evaluate y
equality :: (Value -> Value -> Bool) -> Expr -> Expr -> MOO Value
equality op = binary test
where test x y = return $ truthValue (x `op` y)
comparison :: (Value -> Value -> Bool) -> Expr -> Expr -> MOO Value
comparison op = binary test
where test x y | comparable x y = return $ truthValue (x `op` y)
| otherwise = raise E_TYPE
fetchVariable :: Id -> MOO Value
fetchVariable var =
maybe (raise E_VARNF) return . HM.lookup var =<< frame variables
storeVariable :: Id -> Value -> MOO Value
storeVariable var value = do
modifyFrame $ \frame ->
frame { variables = HM.insert var value (variables frame) }
return value
fetchProperty :: (Value, StrT) -> MOO Value
fetchProperty (Obj oid, name) = do
obj <- maybe (raise E_INVIND) return =<< getObject oid
maybe (search False obj) (handleBuiltin obj) $ builtinProperty name
where search :: Bool -> Object -> MOO Value
search skipPermCheck obj = do
prop <- getProperty obj name
unless (skipPermCheck || propertyPermR prop) $
checkPermission (propertyOwner prop)
case propertyValue prop of
Just value -> return (deref value)
Nothing -> do
parentObj <- maybe (return Nothing) getObject (objectParent obj)
maybe (error $ "No inherited value for property " ++
Str.toString name) (search True) parentObj
handleBuiltin :: Object -> (Object -> Value) -> MOO Value
handleBuiltin obj prop = checkProtectedProperty (toId name) >>
return (prop obj)
# ifdef MOO_WAIF
fetchProperty (Waf waif, name) = fetchWaifProperty waif name
# endif
storeProperty :: (Value, StrT) -> Value -> MOO Value
storeProperty (Obj oid, name) value = do
obj <- maybe (raise E_INVIND) return =<< getObject oid
if isBuiltinProperty name
then checkProtectedProperty (toId name) >>
setBuiltinProperty (oid, obj) name value
else modifyProperty obj name $ \prop -> do
unless (propertyPermW prop) $ checkPermission (propertyOwner prop)
vspace <- getVSpace
return prop { propertyValue = Just (vref vspace value) }
return value
# ifdef MOO_WAIF
storeProperty (Waf waif, name) value = storeWaifProperty waif name value
# endif
withIndexLength :: Value -> MOO a -> MOO a
withIndexLength value = local $ \env -> env { indexLength = valueLength }
where valueLength :: MOO Value
valueLength = Int . fromIntegral <$> case value of
Lst v -> return (Lst.length v)
Str t -> return (Str.length t)
_ -> raise E_TYPE
usingList :: (LstT -> MOO a) -> Value -> MOO a
usingList f (Lst v) = f v
usingList _ _ = raise E_TYPE
getList :: Value -> MOO [Value]
getList = usingList (return . Lst.toList)
getIndex :: Value -> MOO Int
getIndex (Int i) = return (fromIntegral i)
getIndex _ = raise E_TYPE
checkLstRange :: LstT -> Int -> MOO ()
checkLstRange v i = when (i < 1 || i > Lst.length v) $ raise E_RANGE
checkStrRange :: StrT -> Int -> MOO ()
checkStrRange t i = when (i < 1 || t `Str.compareLength` i == LT) $
raise E_RANGE
data LValue = LValue {
fetch :: MOO Value
, store :: Value -> MOO Value
, change :: MOO (Value, Value -> MOO Value)
}
lValue :: Expr -> LValue
lValue (Variable var) = LValue fetch store change
where fetch = fetchVariable var
store = storeVariable var
change = fetch >>= \value -> return (value, store)
lValue (PropertyRef objExpr nameExpr) = LValue fetch store change
where fetch = getRefs >>= fetchProperty
store value = getRefs >>= flip storeProperty value
change = do
refs <- getRefs
value <- fetchProperty refs
return (value, storeProperty refs)
getRefs :: MOO (Value, StrT)
getRefs = do
objRef <- evaluate objExpr
nameRef <- evaluate nameExpr
case (objRef, nameRef) of
(obj@Obj{}, Str name) -> return (obj, name)
# ifdef MOO_WAIF
(waf@Waf{}, Str name) -> return (waf, name)
# endif
_ -> raise E_TYPE
lValue (expr `Index` index) = LValue fetchIndex storeIndex changeIndex
where fetchIndex = fst <$> changeIndex
storeIndex newValue = do
(_, change) <- getLens
change newValue
return newValue
changeIndex :: MOO (Value, Value -> MOO Value)
changeIndex = getLens >>= \(maybeValue, setValue) -> case maybeValue of
Just value -> return (value, setValue)
Nothing -> raise E_RANGE
getLens :: MOO (Maybe Value, Value -> MOO Value)
getLens = do
(value, changeExpr) <- change (lValue expr)
index' <- withIndexLength value (evaluate index)
case index' of
Int i -> getIntLens value (fromIntegral i) changeExpr
Str k -> getStrLens value k changeExpr
_ -> raise E_TYPE
getIntLens :: Value -> Int -> (Value -> MOO Value) ->
MOO (Maybe Value, Value -> MOO Value)
getIntLens value index' changeExpr = do
let i = index' - 1 :: Int
value' <- case value of
Lst v -> checkLstRange v index' >> return (v Lst.! i)
Str t -> checkStrRange t index' >> return (Str $ Str.singleton $
t `Str.index` i)
_ -> raise E_TYPE
return (Just value', changeValue value i changeExpr)
where changeValue :: Value -> Int -> (Value -> MOO Value) ->
Value -> MOO Value
changeValue (Lst v) i changeExpr newValue =
changeExpr $ Lst $ Lst.set v newValue i
changeValue (Str t) i changeExpr (Str c) = do
when (c `Str.compareLength` 1 /= EQ) $ raise E_INVARG
let (s, r) = Str.splitAt i t :: (StrT, StrT)
changeExpr $ Str $ Str.concat [s, c, Str.tail r]
changeValue _ _ _ _ = raise E_TYPE
getStrLens :: Value -> StrT -> (Value -> MOO Value) ->
MOO (Maybe Value, Value -> MOO Value)
getStrLens (Lst lst) key changeExpr = case Lst.assocLens key lst of
Just (maybeValue, changeList) ->
return (maybeValue, changeExpr . Lst . changeList . Just)
Nothing -> raise E_INVIND
getStrLens _ _ _ = raise E_TYPE
lValue (expr `Range` (start, end)) = LValue fetchRange storeRange changeRange
where fetchRange = do
value <- fetch (lValue expr)
(start', end') <- getIndices value
if start' > end'
then case value of
Lst{} -> return emptyList
Str{} -> return emptyString
_ -> raise E_TYPE
else let len = end' - start' + 1 in case value of
Lst v -> do checkLstRange v start' >> checkLstRange v end'
return $ Lst $ Lst.slice (start' - 1) len v
Str t -> do checkStrRange t start' >> checkStrRange t end'
return $ Str $ Str.take len $ Str.drop (start' - 1) t
_ -> raise E_TYPE
storeRange newValue = do
(value, changeExpr) <- change (lValue expr)
startEnd <- getIndices value
changeValue value startEnd changeExpr newValue
return newValue
changeRange = error "Illegal Range as lvalue subexpression"
getIndices :: Value -> MOO (Int, Int)
getIndices value = withIndexLength value $
(,) <$> (evaluate start >>= getIndex)
<*> (evaluate end >>= getIndex)
changeValue :: Value -> (Int, Int) -> (Value -> MOO a) -> Value -> MOO a
changeValue (Lst v) (start, end) changeExpr (Lst r) = do
let len = Lst.length v :: Int
when (end < 0 || start > len + 1) $ raise E_RANGE
let pre = sublist v 1 (start - 1) :: LstT
post = sublist v (end + 1) len :: LstT
sublist :: LstT -> Int -> Int -> LstT
sublist v s e
| e < s = Lst.empty
| otherwise = Lst.slice (s - 1) (e - s + 1) v
changeExpr $ Lst $ Lst.concat [pre, r, post]
changeValue (Str t) (start, end) changeExpr (Str r) = do
when (end < 0 || t `Str.compareLength` (start - 1) == LT) $
raise E_RANGE
let pre = substr t 1 (start - 1) :: StrT
post = substr t (end + 1) (Str.length t) :: StrT
substr :: StrT -> Int -> Int -> StrT
substr t s e
| e < s = Str.empty
| otherwise = Str.take (e - s + 1) $ Str.drop (s - 1) t
changeExpr $ Str $ Str.concat [pre, r, post]
changeValue _ _ _ _ = raise E_TYPE
lValue expr = LValue fetch store change
where fetch = evaluate expr
store _ = error "Unmodifiable LValue"
change = fetch >>= \value -> return (value, store)
scatterAssign :: [ScatterItem] -> LstT -> MOO Value
scatterAssign items args = do
when (nargs < nreqs || (not haveRest && nargs > ntarg)) $ raise E_ARGS
walk items args (nargs - nreqs)
return (Lst args)
where nargs = Lst.length args :: Int
nreqs = count required items :: Int
nopts = count optional items :: Int
ntarg = nreqs + nopts :: Int
nrest = if haveRest && nargs >= ntarg then nargs - ntarg else 0 :: Int
count :: (a -> Bool) -> [a] -> Int
count p = length . filter p
haveRest = any rest items :: Bool
required, optional, rest :: ScatterItem -> Bool
required ScatRequired{} = True
required _ = False
optional ScatOptional{} = True
optional _ = False
rest ScatRest{} = True
rest _ = False
walk :: [ScatterItem] -> LstT -> Int -> MOO ()
walk (item:items) args noptAvail = case item of
ScatRequired var -> do
storeVariable var (Lst.head args)
walk items (Lst.tail args) noptAvail
ScatOptional var opt
| noptAvail > 0 -> do
storeVariable var (Lst.head args)
walk items (Lst.tail args) (pred noptAvail)
| otherwise -> do
maybe (return zero) (storeVariable var <=< evaluate) opt
walk items args noptAvail
ScatRest var -> do
let (s, r) = Lst.splitAt nrest args :: (LstT, LstT)
storeVariable var (Lst s)
walk items r noptAvail
walk [] _ _ = return ()
expand :: [Argument] -> MOO [Value]
expand (x:xs) = case x of
ArgNormal expr -> (:) <$> evaluate expr <*> expand xs
ArgSplice expr -> (++) <$> (evaluate expr >>= getList) <*> expand xs
expand [] = return []
plus :: Value -> Value -> MOO Value
Int x `plus` Int y = return $ Int (x + y)
Flt x `plus` Flt y = checkFloat (x + y)
Str x `plus` Str y = return $ Str (x <> y)
_ `plus` _ = raise E_TYPE
minus :: Value -> Value -> MOO Value
Int x `minus` Int y = return $ Int (x - y)
Flt x `minus` Flt y = checkFloat (x - y)
_ `minus` _ = raise E_TYPE
times :: Value -> Value -> MOO Value
Int x `times` Int y = return $ Int (x * y)
Flt x `times` Flt y = checkFloat (x * y)
_ `times` _ = raise E_TYPE
divide :: Value -> Value -> MOO Value
Int _ `divide` Int 0 = raise E_DIV
Int x `divide` Int (-1)
| x == minBound = return $ Int x -- avoid arithmetic overflow exception
Int x `divide` Int y = return $ Int (x `quot` y)
Flt _ `divide` Flt 0 = raise E_DIV
Flt x `divide` Flt y = checkFloat (x / y)
_ `divide` _ = raise E_TYPE
remain :: Value -> Value -> MOO Value
Int _ `remain` Int 0 = raise E_DIV
Int x `remain` Int y = return $ Int (x `rem` y)
Flt _ `remain` Flt 0 = raise E_DIV
Flt x `remain` Flt y = checkFloat (x `fmod` y)
_ `remain` _ = raise E_TYPE
fmod :: FltT -> FltT -> FltT
x `fmod` y = x - y * fromInteger (truncate $ x / y)
power :: Value -> Value -> MOO Value
Flt x `power` Flt y = checkFloat (x ** y)
Flt x `power` Int y = checkFloat (x ^^ y)
Int x `power` Int y | y >= 0 = return $ Int (x ^ y)
-- | y < 0 ...
Int 0 `power` Int _ = raise E_DIV
Int 1 `power` Int _ = return $ Int 1
Int (-1) `power` Int y | even y = return $ Int 1
| otherwise = return $ Int (-1)
Int _ `power` Int _ = return $ Int 0
_ `power` _ = raise E_TYPE
|
verement/etamoo
|
src/MOO/Compiler.hs
|
bsd-3-clause
| 21,786
| 0
| 21
| 7,004
| 8,113
| 4,004
| 4,109
| 455
| 35
|
module Main (main) where
import Criterion.Main
import Control.Monad.Writer
import Data.DoList
import Data.DoMonoid
import Data.Text as T (Text, unlines)
main :: IO ()
main = defaultMain $ toList $ do
benchGroup "Sum" $ do
whnfBench "Writer" (sumWriter 1 2 3 4) 5
whnfBench "DoList" (sumList 1 2 3 4) 5
whnfBench "DoMonoid" (sumMonoid 1 2 3 4) 5
benchGroup "List" $ do
whnfBench "Writer" (last . listWriter 1 2 3 4) 5
whnfBench "DoList" (last . listList 1 2 3 4) 5
benchGroup "Lines" $ do
whnfBench "Writer" (linesWriter "Line 1\n" "Line 2\n") "Line 3\n"
whnfBench "List" (linesList "Line 1" "Line 2" ) "Line 3"
whnfBench "Monoid" (linesMonoid "Line 1\n" "Line 2\n") "Line 3\n"
sumWriter, sumList, sumMonoid :: Int -> Int -> Int -> Int -> Int -> Sum Int
{-# NOINLINE sumWriter #-}
sumWriter x1 x2 x3 x4 x5 = execWriter $ do
tell $ Sum x1
tell $ Sum x2
tell $ Sum x3
tell $ Sum x4
tell $ Sum x5
{-# NOINLINE sumList #-}
sumList x1 x2 x3 x4 x5 = mconcat $ toList $ do
item $ Sum x1
item $ Sum x2
item $ Sum x3
item $ Sum x4
item $ Sum x5
{-# NOINLINE sumMonoid #-}
sumMonoid x1 x2 x3 x4 x5 = runDoM $ do
DoM $ Sum x1
DoM $ Sum x2
DoM $ Sum x3
DoM $ Sum x4
DoM $ Sum x5
listWriter, listList :: Int -> Int -> Int -> Int -> Int -> [Int]
{-# NOINLINE listWriter #-}
listWriter x1 x2 x3 x4 x5 = execWriter $ do
tell [x1]
tell [x2]
tell [x3]
tell [x4]
tell [x5]
{-# NOINLINE listList #-}
listList x1 x2 x3 x4 x5 = toList $ do
item x1
item x2
item x3
item x4
item x5
linesWriter, linesList, linesMonoid :: Text -> Text -> Text -> Text
{-# NOINLINE linesWriter #-}
linesWriter x1 x2 x3 = execWriter $ do
tell x1
tell x2
tell x3
{-# NOINLINE linesList #-}
linesList x' x2 x3 = T.unlines $ toList $ do
item x'
item x2
item x3
{-# NOINLINE linesMonoid #-}
linesMonoid x1 x2 x3 = runDoM $ do
DoM x1
DoM x2
DoM x3
benchGroup :: String -> DoList Benchmark -> DoList Benchmark
benchGroup name = item . bgroup name . toList
whnfBench :: String -> (a -> b) -> a -> DoList Benchmark
whnfBench name func = item . bench name . whnf func
|
tserduke/do-list
|
bench/Bench.hs
|
bsd-3-clause
| 2,156
| 0
| 14
| 545
| 887
| 417
| 470
| 76
| 1
|
{-# LANGUAGE CPP, ForeignFunctionInterface, BangPatterns #-}
#if __GLASGOW_HASKELL__
{-# LANGUAGE DeriveDataTypeable #-}
#if __GLASGOW_HASKELL__ >= 703
{-# LANGUAGE Unsafe #-}
#endif
#endif
{-# OPTIONS_HADDOCK hide #-}
-- |
-- Module : Data.ByteString.Lazy.Internal
-- Copyright : (c) Don Stewart 2006-2008
-- (c) Duncan Coutts 2006-2011
-- License : BSD-style
-- Maintainer : dons00@gmail.com, duncan@community.haskell.org
-- Stability : unstable
-- Portability : non-portable
--
-- A module containing semi-public 'ByteString' internals. This exposes
-- the 'ByteString' representation and low level construction functions.
-- Modules which extend the 'ByteString' system will need to use this module
-- while ideally most users will be able to make do with the public interface
-- modules.
--
module Data.ByteString.Lazy.Internal (
-- * The lazy @ByteString@ type and representation
ByteString(..), -- instances: Eq, Ord, Show, Read, Data, Typeable
chunk,
foldrChunks,
foldlChunks,
-- * Data type invariant and abstraction function
invariant,
checkInvariant,
-- * Chunk allocation sizes
defaultChunkSize,
smallChunkSize,
chunkOverhead,
-- * Conversion with lists: packing and unpacking
packBytes, packChars,
unpackBytes, unpackChars,
) where
import Prelude hiding (concat)
import qualified Data.ByteString.Internal as S
import qualified Data.ByteString as S (length, take, drop)
import Data.Word (Word8)
import Foreign.Storable (Storable(sizeOf))
#if !(MIN_VERSION_base(4,8,0))
import Data.Monoid (Monoid(..))
#endif
import Control.DeepSeq (NFData, rnf)
#if MIN_VERSION_base(3,0,0)
import Data.String (IsString(..))
#endif
import Data.Typeable (Typeable)
#if MIN_VERSION_base(4,1,0)
import Data.Data (Data(..))
#if MIN_VERSION_base(4,2,0)
import Data.Data (mkNoRepType)
#else
import Data.Data (mkNorepType)
#endif
#else
import Data.Generics (Data(..), mkNorepType)
#endif
-- | A space-efficient representation of a 'Word8' vector, supporting many
-- efficient operations.
--
-- A lazy 'ByteString' contains 8-bit bytes, or by using the operations
-- from "Data.ByteString.Lazy.Char8" it can be interpreted as containing
-- 8-bit characters.
--
data ByteString = Empty | Chunk {-# UNPACK #-} !S.ByteString ByteString
#if defined(__GLASGOW_HASKELL__)
deriving (Typeable)
#endif
instance Eq ByteString where
(==) = eq
instance Ord ByteString where
compare = cmp
instance Monoid ByteString where
mempty = Empty
mappend = append
mconcat = concat
instance NFData ByteString where
rnf Empty = ()
rnf (Chunk _ b) = rnf b
instance Show ByteString where
showsPrec p ps r = showsPrec p (unpackChars ps) r
instance Read ByteString where
readsPrec p str = [ (packChars x, y) | (x, y) <- readsPrec p str ]
#if MIN_VERSION_base(3,0,0)
instance IsString ByteString where
fromString = packChars
#endif
instance Data ByteString where
gfoldl f z txt = z packBytes `f` unpackBytes txt
toConstr _ = error "Data.ByteString.Lazy.ByteString.toConstr"
gunfold _ _ = error "Data.ByteString.Lazy.ByteString.gunfold"
#if MIN_VERSION_base(4,2,0)
dataTypeOf _ = mkNoRepType "Data.ByteString.Lazy.ByteString"
#else
dataTypeOf _ = mkNorepType "Data.ByteString.Lazy.ByteString"
#endif
------------------------------------------------------------------------
-- Packing and unpacking from lists
packBytes :: [Word8] -> ByteString
packBytes cs0 =
packChunks 32 cs0
where
packChunks n cs = case S.packUptoLenBytes n cs of
(bs, []) -> chunk bs Empty
(bs, cs') -> Chunk bs (packChunks (min (n * 2) smallChunkSize) cs')
packChars :: [Char] -> ByteString
packChars cs0 =
packChunks 32 cs0
where
packChunks n cs = case S.packUptoLenChars n cs of
(bs, []) -> chunk bs Empty
(bs, cs') -> Chunk bs (packChunks (min (n * 2) smallChunkSize) cs')
unpackBytes :: ByteString -> [Word8]
unpackBytes Empty = []
unpackBytes (Chunk c cs) = S.unpackAppendBytesLazy c (unpackBytes cs)
unpackChars :: ByteString -> [Char]
unpackChars Empty = []
unpackChars (Chunk c cs) = S.unpackAppendCharsLazy c (unpackChars cs)
------------------------------------------------------------------------
-- | The data type invariant:
-- Every ByteString is either 'Empty' or consists of non-null 'S.ByteString's.
-- All functions must preserve this, and the QC properties must check this.
--
invariant :: ByteString -> Bool
invariant Empty = True
invariant (Chunk (S.PS _ _ len) cs) = len > 0 && invariant cs
-- | In a form that checks the invariant lazily.
checkInvariant :: ByteString -> ByteString
checkInvariant Empty = Empty
checkInvariant (Chunk c@(S.PS _ _ len) cs)
| len > 0 = Chunk c (checkInvariant cs)
| otherwise = error $ "Data.ByteString.Lazy: invariant violation:"
++ show (Chunk c cs)
------------------------------------------------------------------------
-- | Smart constructor for 'Chunk'. Guarantees the data type invariant.
chunk :: S.ByteString -> ByteString -> ByteString
chunk c@(S.PS _ _ len) cs | len == 0 = cs
| otherwise = Chunk c cs
{-# INLINE chunk #-}
-- | Consume the chunks of a lazy ByteString with a natural right fold.
foldrChunks :: (S.ByteString -> a -> a) -> a -> ByteString -> a
foldrChunks f z = go
where go Empty = z
go (Chunk c cs) = f c (go cs)
{-# INLINE foldrChunks #-}
-- | Consume the chunks of a lazy ByteString with a strict, tail-recursive,
-- accumulating left fold.
foldlChunks :: (a -> S.ByteString -> a) -> a -> ByteString -> a
foldlChunks f z = go z
where go a _ | a `seq` False = undefined
go a Empty = a
go a (Chunk c cs) = go (f a c) cs
{-# INLINE foldlChunks #-}
------------------------------------------------------------------------
-- The representation uses lists of packed chunks. When we have to convert from
-- a lazy list to the chunked representation, then by default we use this
-- chunk size. Some functions give you more control over the chunk size.
--
-- Measurements here:
-- http://www.cse.unsw.edu.au/~dons/tmp/chunksize_v_cache.png
--
-- indicate that a value around 0.5 to 1 x your L2 cache is best.
-- The following value assumes people have something greater than 128k,
-- and need to share the cache with other programs.
-- | The chunk size used for I\/O. Currently set to 32k, less the memory management overhead
defaultChunkSize :: Int
defaultChunkSize = 32 * k - chunkOverhead
where k = 1024
-- | The recommended chunk size. Currently set to 4k, less the memory management overhead
smallChunkSize :: Int
smallChunkSize = 4 * k - chunkOverhead
where k = 1024
-- | The memory management overhead. Currently this is tuned for GHC only.
chunkOverhead :: Int
chunkOverhead = 2 * sizeOf (undefined :: Int)
------------------------------------------------------------------------
-- Implementations for Eq, Ord and Monoid instances
eq :: ByteString -> ByteString -> Bool
eq Empty Empty = True
eq Empty _ = False
eq _ Empty = False
eq (Chunk a as) (Chunk b bs) =
case compare (S.length a) (S.length b) of
LT -> a == (S.take (S.length a) b) && eq as (Chunk (S.drop (S.length a) b) bs)
EQ -> a == b && eq as bs
GT -> (S.take (S.length b) a) == b && eq (Chunk (S.drop (S.length b) a) as) bs
cmp :: ByteString -> ByteString -> Ordering
cmp Empty Empty = EQ
cmp Empty _ = LT
cmp _ Empty = GT
cmp (Chunk a as) (Chunk b bs) =
case compare (S.length a) (S.length b) of
LT -> case compare a (S.take (S.length a) b) of
EQ -> cmp as (Chunk (S.drop (S.length a) b) bs)
result -> result
EQ -> case compare a b of
EQ -> cmp as bs
result -> result
GT -> case compare (S.take (S.length b) a) b of
EQ -> cmp (Chunk (S.drop (S.length b) a) as) bs
result -> result
append :: ByteString -> ByteString -> ByteString
append xs ys = foldrChunks Chunk ys xs
concat :: [ByteString] -> ByteString
concat css0 = to css0
where
go Empty css = to css
go (Chunk c cs) css = Chunk c (go cs css)
to [] = Empty
to (cs:css) = go cs css
|
DavidAlphaFox/ghc
|
libraries/bytestring/Data/ByteString/Lazy/Internal.hs
|
bsd-3-clause
| 8,457
| 0
| 18
| 1,947
| 1,986
| 1,071
| 915
| 126
| 6
|
{-# LANGUAGE TemplateHaskell #-}
module ErrorHandlingStaged where
import Control.Applicative
import Data.Foldable (foldl')
import Language.Haskell.TH
import Language.Haskell.TH.Syntax
import Prelude hiding (div)
import qualified Prelude as P
import Symantics
-- Staged with Error Handling
newtype ErrorHandlingStaged = EHS
{ unEHS :: (String -> ExpQ) -> (String -> ExpQ) -> ExpQ
}
instance SymExp ErrorHandlingStaged where
int n = EHS $ \_ _ -> [| Just (n :: Int) |]
var s = EHS $ \env _ -> [| return $(env s) |]
app s (EHS e) = EHS $ \env fenv -> [| do
x <- $(e env fenv)
$(fenv s) x
|]
add (EHS e1) (EHS e2) = EHS $ \env fenv ->
[| liftA2 (+) $(e1 env fenv) $(e2 env fenv) |]
sub (EHS e1) (EHS e2) = EHS $ \env fenv ->
[| liftA2 (-) $(e1 env fenv) $(e2 env fenv) |]
mul (EHS e1) (EHS e2) = EHS $ \env fenv ->
[| liftA2 (*) $(e1 env fenv) $(e2 env fenv) |]
div (EHS e1) (EHS e2) = EHS $ \env fenv -> [| do
x2 <- $(e2 env fenv)
if x2 == 0
then fail "Zero division"
else do
x1 <- $(e1 env fenv)
return $ x1 `P.div` x2
|]
ifz (EHS e1) (EHS e2) (EHS e3) =
EHS $ \env fenv -> [| do
x <- $(e1 env fenv)
if x == 0 then $(e2 env fenv) else $(e3 env fenv)
|]
instance SymDecl ErrorHandlingStaged where
declaration s1 s2 (EHS e1) (EHS e) = EHS $ \env fenv -> [|
let f x = $(e1 (ext env s2 [| x |]) (ext fenv s1 [| f |]))
in $(e env (ext fenv s1 [| f |]))
|]
instance SymProg ErrorHandlingStaged where
program ds e = foldl' (flip ($)) e ds
evalEHS :: ErrorHandlingStaged -> ExpQ
evalEHS (EHS f) = f env0 fenv0
dumpEHS :: ErrorHandlingStaged -> IO ()
dumpEHS staged = runQ (pprint <$> evalEHS staged) >>= putStrLn
|
maoe/MSP
|
ErrorHandlingStaged.hs
|
bsd-3-clause
| 1,737
| 2
| 11
| 456
| 553
| 311
| 242
| 44
| 1
|
module Main
( main -- :: IO ()
) where
import Control.Applicative
import Control.Monad
import Data.List
import System.Directory
import System.FilePath
import Test.DocTest
main :: IO ()
main = allSources >>= \sources -> doctest ("-isrc":sources)
allSources :: IO [FilePath]
allSources = liftM2 (++) (getFiles ".hs" "src")
(getFiles ".o" "dist/build/src/cbits")
getFiles :: String -> FilePath -> IO [FilePath]
getFiles ext root = filter (isSuffixOf ext) <$> go root
where
go dir = do
(dirs, files) <- getFilesAndDirectories dir
(files ++) . concat <$> mapM go dirs
getFilesAndDirectories :: FilePath -> IO ([FilePath], [FilePath])
getFilesAndDirectories dir = do
c <- map (dir </>) . filter (`notElem` ["..", "."]) <$> getDirectoryContents dir
(,) <$> filterM doesDirectoryExist c <*> filterM doesFileExist c
|
thoughtpolice/hs-nacl
|
tests/doctests.hs
|
bsd-3-clause
| 933
| 0
| 12
| 243
| 302
| 161
| 141
| 22
| 1
|
-----------------------------------------------------------------------------
-- |
-- Module : Control.Monad.Trans.Writer.Strict
-- Copyright : (c) Andy Gill 2001,
-- (c) Oregon Graduate Institute of Science and Technology, 2001
-- License : BSD-style (see the file LICENSE)
--
-- Maintainer : ross@soi.city.ac.uk
-- Stability : experimental
-- Portability : portable
--
-- The strict 'WriterT' monad transformer, which adds collection of
-- outputs (such as a count or string output) to a given monad.
--
-- This version builds its output strictly; for a lazy version, see
-- "Control.Monad.Trans.Writer.Lazy", which has the same interface.
--
-- This monad transformer provides only limited access to the output
-- during the computation. For more general access, use
-- "Control.Monad.Trans.State" instead.
-----------------------------------------------------------------------------
module Control.Monad.Trans.Writer.Strict (
-- * The Writer monad
Writer,
writer,
runWriter,
execWriter,
mapWriter,
-- * The WriterT monad transformer
WriterT(..),
execWriterT,
mapWriterT,
-- * Writer operations
tell,
listen,
listens,
pass,
censor,
-- * Lifting other operations
liftCallCC,
liftCatch,
) where
import Control.Monad.IO.Class
import Control.Monad.Trans.Class
import Data.Functor.Identity
import Control.Applicative
import Control.Monad
import Control.Monad.Fix
import Data.Monoid
-- ---------------------------------------------------------------------------
-- | A writer monad parameterized by the type @w@ of output to accumulate.
--
-- The 'return' function produces the output 'mempty', while @>>=@
-- combines the outputs of the subcomputations using 'mappend'.
type Writer w = WriterT w Identity
-- | Construct a writer computation from a (result, output) pair.
-- (The inverse of 'runWriter'.)
writer :: (a, w) -> Writer w a
writer = WriterT . Identity
-- | Unwrap a writer computation as a (result, output) pair.
-- (The inverse of 'writer'.)
runWriter :: Writer w a -> (a, w)
runWriter = runIdentity . runWriterT
-- | Extract the output from a writer computation.
--
-- * @'execWriter' m = 'snd' ('runWriter' m)@
execWriter :: Writer w a -> w
execWriter m = snd (runWriter m)
-- | Map both the return value and output of a computation using
-- the given function.
--
-- * @'runWriter' ('mapWriter' f m) = f ('runWriter' m@)
mapWriter :: ((a, w) -> (b, w')) -> Writer w a -> Writer w' b
mapWriter f = mapWriterT (Identity . f . runIdentity)
-- ---------------------------------------------------------------------------
-- | A writer monad parameterized by:
--
-- * @w@ - the output to accumulate.
--
-- * @m@ - The inner monad.
--
-- The 'return' function produces the output 'mempty', while @>>=@
-- combines the outputs of the subcomputations using 'mappend'.
newtype WriterT w m a = WriterT { runWriterT :: m (a, w) }
-- | Extract the output from a writer computation.
--
-- * @'execWriterT' m = 'liftM' 'snd' ('runWriterT' m)@
execWriterT :: Monad m => WriterT w m a -> m w
execWriterT m = do
(_, w) <- runWriterT m
return w
-- | Map both the return value and output of a computation using
-- the given function.
--
-- * @'runWriterT' ('mapWriterT' f m) = f ('runWriterT' m@)
mapWriterT :: (m (a, w) -> n (b, w')) -> WriterT w m a -> WriterT w' n b
mapWriterT f m = WriterT $ f (runWriterT m)
instance (Functor m) => Functor (WriterT w m) where
fmap f = mapWriterT $ fmap $ \ (a, w) -> (f a, w)
instance (Monoid w, Applicative m) => Applicative (WriterT w m) where
pure a = WriterT $ pure (a, mempty)
f <*> v = WriterT $ liftA2 k (runWriterT f) (runWriterT v)
where k (a, w) (b, w') = (a b, w `mappend` w')
instance (Monoid w, Alternative m) => Alternative (WriterT w m) where
empty = WriterT empty
m <|> n = WriterT $ runWriterT m <|> runWriterT n
instance (Monoid w, Monad m) => Monad (WriterT w m) where
return a = WriterT $ return (a, mempty)
m >>= k = WriterT $ do
(a, w) <- runWriterT m
(b, w') <- runWriterT (k a)
return (b, w `mappend` w')
fail msg = WriterT $ fail msg
instance (Monoid w, MonadPlus m) => MonadPlus (WriterT w m) where
mzero = WriterT mzero
m `mplus` n = WriterT $ runWriterT m `mplus` runWriterT n
instance (Monoid w, MonadFix m) => MonadFix (WriterT w m) where
mfix m = WriterT $ mfix $ \ ~(a, _) -> runWriterT (m a)
instance (Monoid w) => MonadTrans (WriterT w) where
lift m = WriterT $ do
a <- m
return (a, mempty)
instance (Monoid w, MonadIO m) => MonadIO (WriterT w m) where
liftIO = lift . liftIO
-- | @'tell' w@ is an action that produces the output @w@.
tell :: (Monoid w, Monad m) => w -> WriterT w m ()
tell w = WriterT $ return ((), w)
-- | @'listen' m@ is an action that executes the action @m@ and adds its
-- output to the value of the computation.
--
-- * @'runWriterT' ('listen' m) = 'liftM' (\\(a, w) -> ((a, w), w)) ('runWriterT' m)@
listen :: (Monoid w, Monad m) => WriterT w m a -> WriterT w m (a, w)
listen m = WriterT $ do
(a, w) <- runWriterT m
return ((a, w), w)
-- | @'listens' f m@ is an action that executes the action @m@ and adds
-- the result of applying @f@ to the output to the value of the computation.
--
-- * @'listens' f m = 'liftM' (id *** f) ('listen' m)@
--
-- * @'runWriterT' ('listens' f m) = 'liftM' (\\(a, w) -> ((a, f w), w)) ('runWriterT' m)@
listens :: (Monoid w, Monad m) => (w -> b) -> WriterT w m a -> WriterT w m (a, b)
listens f m = do
(a, w) <- listen m
return (a, f w)
-- | @'pass' m@ is an action that executes the action @m@, which returns
-- a value and a function, and returns the value, applying the function
-- to the output.
--
-- * @'runWriterT' ('pass' m) = 'liftM' (\\((a, f), w) -> (a, f w)) ('runWriterT' m)@
pass :: (Monoid w, Monad m) => WriterT w m (a, w -> w) -> WriterT w m a
pass m = WriterT $ do
((a, f), w) <- runWriterT m
return (a, f w)
-- | @'censor' f m@ is an action that executes the action @m@ and
-- applies the function @f@ to its output, leaving the return value
-- unchanged.
--
-- * @'censor' f m = 'pass' ('liftM' (\\x -> (x,f)) m)@
--
-- * @'runWriterT' ('censor' f m) = 'liftM' (\\(a, w) -> (a, f w)) ('runWriterT' m)@
censor :: (Monoid w, Monad m) => (w -> w) -> WriterT w m a -> WriterT w m a
censor f m = pass $ do
a <- m
return (a, f)
-- | Lift a @callCC@ operation to the new monad.
liftCallCC :: (Monoid w) => ((((a,w) -> m (b,w)) -> m (a,w)) -> m (a,w)) ->
((a -> WriterT w m b) -> WriterT w m a) -> WriterT w m a
liftCallCC callCC f = WriterT $
callCC $ \c ->
runWriterT (f (\a -> WriterT $ c (a, mempty)))
-- | Lift a @catchError@ operation to the new monad.
liftCatch :: (m (a,w) -> (e -> m (a,w)) -> m (a,w)) ->
WriterT w m a -> (e -> WriterT w m a) -> WriterT w m a
liftCatch catchError m h =
WriterT $ runWriterT m `catchError` \e -> runWriterT (h e)
|
ekmett/transformers
|
Control/Monad/Trans/Writer/Strict.hs
|
bsd-3-clause
| 7,006
| 0
| 14
| 1,540
| 1,837
| 1,009
| 828
| 93
| 1
|
{-# LANGUAGE FlexibleContexts, OverloadedStrings, DeriveDataTypeable #-}
module Test.WebDriver.Internal
( mkWDUri, mkRequest
, handleHTTPErr, handleJSONErr, handleHTTPResp
, WDResponse(..)
, InvalidURL(..), HTTPStatusUnknown(..), HTTPConnError(..)
, UnknownCommand(..), ServerError(..)
, FailedCommand(..), failedCommand, mkFailedCommandInfo
, FailedCommandType(..), FailedCommandInfo(..), StackFrame(..)
) where
import Test.WebDriver.Classes
import Test.WebDriver.JSON
import Network.HTTP (simpleHTTP, Request(..), Response(..))
import Network.HTTP.Headers (findHeader, Header(..), HeaderName(..))
import Network.Stream (ConnError)
import Network.URI
import Data.Aeson
import Data.Aeson.Types (Parser, typeMismatch)
import Data.Text as T (Text, unpack, splitOn, null)
import Data.ByteString.Lazy.Char8 (ByteString)
import Data.ByteString.Lazy.Char8 as LBS (length, unpack, null)
import qualified Data.ByteString.Base64.Lazy as B64
import Control.Monad.Base
import Control.Exception.Lifted (throwIO)
import Control.Applicative
import Control.Exception (Exception)
import Data.Typeable (Typeable)
import Data.List (isInfixOf)
import Data.Maybe (fromMaybe)
import Data.String (fromString)
import Data.Word (Word, Word8)
mkWDUri :: (SessionState s) => String -> s URI
mkWDUri wdPath = do
WDSession{wdHost = host,
wdPort = port,
wdBasePath = basePath
} <- getSession
let urlStr = "http://" ++ host ++ ":" ++ show port
relPath = basePath ++ wdPath
mBaseURI = parseAbsoluteURI urlStr
mRelURI = parseRelativeReference relPath
case (mBaseURI, mRelURI) of
(Nothing, _) -> throwIO $ InvalidURL urlStr
(_, Nothing) -> throwIO $ InvalidURL relPath
(Just baseURI, Just relURI) -> return $ relURI `relativeTo` baseURI
mkRequest :: (SessionState s, ToJSON a) =>
[Header] -> RequestMethod -> Text -> a -> s (Response ByteString)
mkRequest headers method wdPath args = do
uri <- mkWDUri (T.unpack wdPath)
let body = case toJSON args of
Null -> "" --passing Null as the argument indicates no request body
other -> encode other
req = Request { rqURI = uri --todo: normalization of headers
, rqMethod = method
, rqBody = body
, rqHeaders = headers ++ [ Header HdrAccept
"application/json;charset=UTF-8"
, Header HdrContentType
"application/json;charset=UTF-8"
, Header HdrContentLength
. show . LBS.length $ body
]
}
r <- liftBase (simpleHTTP req) >>= either (throwIO . HTTPConnError) return
modifySession $ \s -> s {lastHTTPRequest = Just req} -- update lastHTTPRequest field
return r
handleHTTPErr :: SessionState s => Response ByteString -> s ()
handleHTTPErr r@Response{rspBody = body, rspCode = code, rspReason = reason} =
case code of
(4,_,_) -> do
lastReq <- lastHTTPRequest <$> getSession
throwIO . UnknownCommand . maybe reason show
$ lastReq
(5,_,_) ->
case findHeader HdrContentType r of
Just ct
| "application/json;" `isInfixOf` ct -> parseJSON' body
>>= handleJSONErr
| otherwise -> err ServerError
Nothing ->
err (ServerError . ("Missing content type. Server response: "++))
(2,_,_) -> return ()
(3,0,x) | x `elem` [2,3]
-> return ()
_ -> err (HTTPStatusUnknown code)
where
err errType = throwIO $ errType reason
handleHTTPResp :: (SessionState s, FromJSON a) => Response ByteString -> s a
handleHTTPResp resp@Response{rspBody = body, rspCode = code} =
case code of
(2,0,4) -> noReturn
(3,0,x)
| x `elem` [2,3] ->
case findHeader HdrLocation resp of
Nothing -> throwIO . HTTPStatusUnknown code
$ (LBS.unpack body)
Just loc -> do
let sessId = last . filter (not . T.null) . splitOn "/" . fromString $ loc
modifySession $ \sess -> sess {wdSessId = Just (SessionId sessId)}
fromJSON' . String $ sessId
_
| LBS.null body -> noReturn
| otherwise -> do
sess@WDSession { wdSessId = sessId} <- getSession -- get current session state
WDResponse { rspSessId = sessId'
, rspVal = val} <- parseJSON' body -- parse response body
case (sessId, (==) <$> sessId <*> sessId') of
-- if our monad has an uninitialized session ID, initialize it from the response object
(Nothing, _) -> putSession sess { wdSessId = sessId' }
-- if the response ID doesn't match our local ID, throw an error.
(_, Just False) -> throwIO . ServerError $ "Server response session ID (" ++ show sessId'
++ ") does not match local session ID (" ++ show sessId ++ ")"
-- otherwise nothing needs to be done
_ -> return ()
fromJSON' val
where
noReturn = fromJSON' Null
handleJSONErr :: SessionState s => WDResponse -> s ()
handleJSONErr WDResponse{rspStatus = 0} = return ()
handleJSONErr WDResponse{rspVal = val, rspStatus = status} = do
sess <- getSession
errInfo <- fromJSON' val
let screen = B64.decodeLenient <$> errScreen errInfo
errInfo' = errInfo { errSess = sess
, errScreen = screen }
e errType = throwIO $ FailedCommand errType errInfo'
case status of
7 -> e NoSuchElement
8 -> e NoSuchFrame
9 -> throwIO . UnknownCommand . errMsg $ errInfo
10 -> e StaleElementReference
11 -> e ElementNotVisible
12 -> e InvalidElementState
13 -> e UnknownError
15 -> e ElementIsNotSelectable
17 -> e JavascriptError
19 -> e XPathLookupError
21 -> e Timeout
23 -> e NoSuchWindow
24 -> e InvalidCookieDomain
25 -> e UnableToSetCookie
26 -> e UnexpectedAlertOpen
27 -> e NoAlertOpen
28 -> e ScriptTimeout
29 -> e InvalidElementCoordinates
30 -> e IMENotAvailable
31 -> e IMEEngineActivationFailed
32 -> e InvalidSelector
34 -> e MoveTargetOutOfBounds
51 -> e InvalidXPathSelector
52 -> e InvalidXPathSelectorReturnType
405 -> e MethodNotAllowed
_ -> e UnknownError
-- |Internal type representing the JSON response object
data WDResponse = WDResponse { rspSessId :: Maybe SessionId
, rspStatus :: Word8
, rspVal :: Value
}
deriving (Eq, Show)
instance FromJSON WDResponse where
parseJSON (Object o) = WDResponse <$> o .:? "sessionId" .!= Nothing
<*> o .: "status"
<*> o .:? "value" .!= Null
parseJSON v = typeMismatch "WDResponse" v
instance Exception InvalidURL
-- |An invalid URL was given
newtype InvalidURL = InvalidURL String
deriving (Eq, Show, Typeable)
instance Exception HTTPStatusUnknown
-- |An unexpected HTTP status was sent by the server.
data HTTPStatusUnknown = HTTPStatusUnknown (Int, Int, Int) String
deriving (Eq, Show, Typeable)
instance Exception HTTPConnError
-- |HTTP connection errors.
newtype HTTPConnError = HTTPConnError ConnError
deriving (Eq, Show, Typeable)
instance Exception UnknownCommand
-- |A command was sent to the WebDriver server that it didn't recognize.
newtype UnknownCommand = UnknownCommand String
deriving (Eq, Show, Typeable)
instance Exception ServerError
-- |A server-side exception occured
newtype ServerError = ServerError String
deriving (Eq, Show, Typeable)
instance Exception FailedCommand
-- |This exception encapsulates a broad variety of exceptions that can
-- occur when a command fails.
data FailedCommand = FailedCommand FailedCommandType FailedCommandInfo
deriving (Show, Typeable)
-- |The type of failed command exception that occured.
data FailedCommandType = NoSuchElement
| NoSuchFrame
| UnknownFrame
| StaleElementReference
| ElementNotVisible
| InvalidElementState
| UnknownError
| ElementIsNotSelectable
| JavascriptError
| XPathLookupError
| Timeout
| NoSuchWindow
| InvalidCookieDomain
| UnableToSetCookie
| UnexpectedAlertOpen
| NoAlertOpen
| ScriptTimeout
| InvalidElementCoordinates
| IMENotAvailable
| IMEEngineActivationFailed
| InvalidSelector
| MoveTargetOutOfBounds
| InvalidXPathSelector
| InvalidXPathSelectorReturnType
| MethodNotAllowed
deriving (Eq, Ord, Enum, Bounded, Show)
-- |Detailed information about the failed command provided by the server.
data FailedCommandInfo =
FailedCommandInfo { -- |The error message.
errMsg :: String
-- |The session associated with
-- the exception.
, errSess :: WDSession
-- |A screen shot of the focused window
-- when the exception occured,
-- if provided.
, errScreen :: Maybe ByteString
-- |The "class" in which the exception
-- was raised, if provided.
, errClass :: Maybe String
-- |A stack trace of the exception.
, errStack :: [StackFrame]
}
-- |Provides a readable printout of the error information, useful for
-- logging.
instance Show FailedCommandInfo where
show i = showChar '\n'
. showString "Session: " . sess
. showChar '\n'
. showString className . showString ": " . showString (errMsg i)
. showChar '\n'
. foldl (\f s-> f . showString " " . shows s) id (errStack i)
$ ""
where
className = fromMaybe "<unknown exception>" . errClass $ i
sess = showString sessId . showString " at "
. showString host . showChar ':' . shows port
where
sessId = case msid of
Just (SessionId sid) -> T.unpack sid
Nothing -> "<no session id>"
WDSession {wdHost = host, wdPort = port, wdSessId = msid } = errSess i
-- |Constructs a FailedCommandInfo from only an error message.
mkFailedCommandInfo :: SessionState s => String -> s FailedCommandInfo
mkFailedCommandInfo m = do
sess <- getSession
return $ FailedCommandInfo {errMsg = m , errSess = sess , errScreen = Nothing
, errClass = Nothing , errStack = [] }
-- |Convenience function to throw a 'FailedCommand' locally with no server-side
-- info present.
failedCommand :: SessionState s => FailedCommandType -> String -> s a
failedCommand t m = throwIO . FailedCommand t =<< mkFailedCommandInfo m
-- |An individual stack frame from the stack trace provided by the server
-- during a FailedCommand.
data StackFrame = StackFrame { sfFileName :: String
, sfClassName :: String
, sfMethodName :: String
, sfLineNumber :: Word
}
deriving (Eq)
instance Show StackFrame where
show f = showString (sfClassName f) . showChar '.'
. showString (sfMethodName f) . showChar ' '
. showParen True ( showString (sfFileName f) . showChar ':'
. shows (sfLineNumber f))
$ "\n"
instance FromJSON FailedCommandInfo where
parseJSON (Object o) =
FailedCommandInfo <$> (req "message" >>= maybe (return "") return)
<*> pure undefined
<*> opt "screen" Nothing
<*> opt "class" Nothing
<*> opt "stackTrace" []
where req :: FromJSON a => Text -> Parser a
req = (o .:) --required key
opt :: FromJSON a => Text -> a -> Parser a
opt k d = o .:? k .!= d --optional key
parseJSON v = typeMismatch "FailedCommandInfo" v
instance FromJSON StackFrame where
parseJSON (Object o) = StackFrame <$> reqStr "fileName"
<*> reqStr "className"
<*> reqStr "methodName"
<*> req "lineNumber"
where req :: FromJSON a => Text -> Parser a
req = (o .:) -- all keys are required
reqStr :: Text -> Parser String
reqStr k = req k >>= maybe (return "") return
parseJSON v = typeMismatch "StackFrame" v
|
fpco/hs-webdriver
|
src/Test/WebDriver/Internal.hs
|
bsd-3-clause
| 13,500
| 0
| 24
| 4,700
| 3,138
| 1,667
| 1,471
| 260
| 26
|
-- xmonad config used by Cedric Fung on 15" rMBP
-- Origin: Vic Fryzel (http://github.com/vicfryzel/xmonad-config)
-- Modifier: Cedric Fung (http://github.com/vec.io/.linux/xmonad)
import System.IO
import System.Exit
import XMonad
import XMonad.Hooks.DynamicLog
import XMonad.Hooks.ManageDocks
import XMonad.Hooks.ManageHelpers
import XMonad.Hooks.SetWMName
import XMonad.Layout.Fullscreen
import XMonad.Layout.NoBorders
import XMonad.Layout.Spiral
import XMonad.Layout.Tabbed
import XMonad.Util.Run(spawnPipe)
import XMonad.Util.EZConfig(additionalKeys)
import qualified XMonad.StackSet as W
import qualified Data.Map as M
------------------------------------------------------------------------
-- Terminal
-- The preferred terminal program, which is used in a binding below and by
-- certain contrib modules.
--
myTerminal = "/usr/bin/terminator"
------------------------------------------------------------------------
-- Workspaces
-- The default number of workspaces (virtual screens) and their names.
--
myWorkspaces = ["1:Term","2:Web","3:Code","4:VM","5:Music","6:Video","7:Chat","8:Art","9:BTC"]
------------------------------------------------------------------------
-- Window rules
-- Execute arbitrary actions and WindowSet manipulations when managing
-- a new window. You can use this to, for example, always float a
-- particular program, or have a client always appear on a particular
-- workspace.
--
-- To find the property name associated with a program, use
-- > xprop | grep WM_CLASS
-- and click on the client you're interested in.
--
-- To match on the WM_NAME, you can use 'title' in the same way that
-- 'className' and 'resource' are used below.
--
myManageHook = composeAll
[ className =? "Firefox" --> doShift "2:Web"
, className =? "jetbrains-android-studio" --> doShift "3:Code"
, className =? "Eclipse" --> doShift "3:Code"
, className =? "VirtualBox" --> doShift "4:VM"
, stringProperty "WM_NAME" =? "ncmpc" --> doShift "5:Music"
, className =? "MPlayer" --> doShift "6:Video"
, className =? "Pidgin" --> doShift "7:Chat"
, className =? "Skype" --> doShift "7:Chat"
, className =? "Gimp" --> doShift "8:Art"
, className =? "Bitcoin-qt" --> doShift "9:BTC"
, className =? "Screenkey" --> doIgnore
, isFullscreen --> (doF W.focusDown <+> doFullFloat)]
------------------------------------------------------------------------
-- Layouts
-- You can specify and transform your layouts by modifying these values.
-- If you change layout bindings be sure to use 'mod-shift-space' after
-- restarting (with 'mod-q') to reset your layout state to the new
-- defaults, as xmonad preserves your old layout settings by default.
--
-- The available layouts. Note that each layout is separated by |||,
-- which denotes layout choice.
--
myLayout = avoidStruts (
tabbed shrinkText tabConfig |||
Tall 1 (3/100) (1/2) |||
Mirror (Tall 1 (3/100) (1/2)) |||
Full) |||
noBorders (fullscreenFull Full)
------------------------------------------------------------------------
-- Colors and borders
-- Currently based on the ir_black theme.
--
myNormalBorderColor = "#7c7c7c"
myFocusedBorderColor = "#ffb6b0"
-- Colors for text and backgrounds of each tab when in "Tabbed" layout.
tabConfig = defaultTheme {
activeBorderColor = "#7C7C7C",
activeTextColor = "#CEFFAC",
activeColor = "#333333",
inactiveBorderColor = "#7C7C7C",
inactiveTextColor = "#EEEEEE",
inactiveColor = "#000000",
fontName = "xft:WenQuanYi Micro Hei:size=8:bold:antialias=true",
decoHeight = 36
}
-- Color of current window title in xmobar.
xmobarTitleColor = "#FFB6B0"
-- Color of current workspace in xmobar.
xmobarCurrentWorkspaceColor = "#CEFFAC"
-- Width of the window border in pixels.
myBorderWidth = 1
------------------------------------------------------------------------
-- Key bindings
--
-- modMask lets you specify which modkey you want to use. The default
-- is mod1Mask ("left alt"). You may also consider using mod3Mask
-- ("right alt"), which does not conflict with emacs keybindings. The
-- "windows key" is usually mod4Mask.
--
myModMask = mod1Mask
myKeys conf@(XConfig {XMonad.modMask = modMask}) = M.fromList $
----------------------------------------------------------------------
-- Custom key bindings
--
-- Start a terminal. Terminal to start is specified by myTerminal variable.
[ ((modMask .|. shiftMask, xK_Return),
spawn $ XMonad.terminal conf)
-- Lock the screen using xscreensaver.
, ((modMask .|. controlMask, xK_l),
spawn "xscreensaver-command -lock")
-- Launch dmenu via yeganesh.
-- Use this to launch programs without a key binding.
, ((modMask, xK_p),
spawn "exe=`dmenu_path_c | yeganesh` && eval \"exec $exe\"")
-- Take a screenshot in select mode.
-- After pressing this key binding, click a window, or draw a rectangle with
-- the mouse.
, ((mod4Mask, xK_p),
spawn "screenshot-select")
-- Take full screenshot in multi-head mode.
-- That is, take a screenshot of everything you see.
, ((mod4Mask .|. shiftMask, xK_p),
spawn "screenshot")
-- Mute volume.
{-, ((0, 0x1008ff12),-}
{-spawn "amixer -q set Master toggle")-}
-- Decrease volume.
{-, ((0, 0x1008ff11),-}
{-spawn "amixer -q set Master 6%-")-}
-- Increase volume.
{-, ((0, 0x1008ff13),-}
{-spawn "amixer -q set Master 6%+")-}
-- Audio previous.
, ((0, 0x1008FF16),
spawn "/usr/bin/mpc prev")
-- Play/pause.
, ((0, 0x1008FF14),
spawn "/usr/bin/mpc toggle")
-- Audio next.
, ((0, 0x1008FF17),
spawn "/usr/bin/mpc next")
-- Eject CD tray.
, ((0, 0x1008FF2C),
spawn "eject -T")
--------------------------------------------------------------------
-- "Standard" xmonad key bindings
--
-- Close focused window.
, ((modMask .|. shiftMask, xK_c),
kill)
-- Cycle through the available layout algorithms.
, ((modMask, xK_space),
sendMessage NextLayout)
-- Reset the layouts on the current workspace to default.
, ((modMask .|. shiftMask, xK_space),
setLayout $ XMonad.layoutHook conf)
-- Resize viewed windows to the correct size.
, ((modMask, xK_n),
refresh)
-- Move focus to the next window.
, ((modMask, xK_Tab),
windows W.focusDown)
-- Move focus to the next window.
, ((modMask, xK_j),
windows W.focusDown)
-- Move focus to the previous window.
, ((modMask, xK_k),
windows W.focusUp )
-- Move focus to the master window.
, ((modMask, xK_m),
windows W.focusMaster )
-- Swap the focused window and the master window.
, ((modMask, xK_Return),
windows W.swapMaster)
-- Swap the focused window with the next window.
, ((modMask .|. shiftMask, xK_j),
windows W.swapDown )
-- Swap the focused window with the previous window.
, ((modMask .|. shiftMask, xK_k),
windows W.swapUp )
-- Shrink the master area.
, ((modMask, xK_h),
sendMessage Shrink)
-- Expand the master area.
, ((modMask, xK_l),
sendMessage Expand)
-- Push window back into tiling.
, ((modMask, xK_t),
withFocused $ windows . W.sink)
-- Increment the number of windows in the master area.
, ((modMask, xK_comma),
sendMessage (IncMasterN 1))
-- Decrement the number of windows in the master area.
, ((modMask, xK_period),
sendMessage (IncMasterN (-1)))
-- Toggle the status bar gap.
-- TODO: update this binding with avoidStruts, ((modMask, xK_b),
-- Quit xmonad.
, ((modMask .|. shiftMask, xK_q),
io (exitWith ExitSuccess))
-- Restart xmonad.
, ((modMask, xK_q),
restart "xmonad" True)
]
++
-- mod-[1..9], Switch to workspace N
-- mod-shift-[1..9], Move client to workspace N
[((m .|. modMask, k), windows $ f i)
| (i, k) <- zip (XMonad.workspaces conf) [xK_1 .. xK_9]
, (f, m) <- [(W.greedyView, 0), (W.shift, shiftMask)]]
++
-- mod-{w,e,r}, Switch to physical/Xinerama screens 1, 2, or 3
-- mod-shift-{w,e,r}, Move client to screen 1, 2, or 3
[((m .|. modMask, key), screenWorkspace sc >>= flip whenJust (windows . f))
| (key, sc) <- zip [xK_w, xK_e, xK_r] [0..]
, (f, m) <- [(W.view, 0), (W.shift, shiftMask)]]
------------------------------------------------------------------------
-- Mouse bindings
--
-- Focus rules
-- True if your focus should follow your mouse cursor.
myFocusFollowsMouse :: Bool
myFocusFollowsMouse = True
myMouseBindings (XConfig {XMonad.modMask = modMask}) = M.fromList $
[
-- mod-button1, Set the window to floating mode and move by dragging
((modMask, button1),
(\w -> focus w >> mouseMoveWindow w))
-- mod-button2, Raise the window to the top of the stack
, ((modMask, button2),
(\w -> focus w >> windows W.swapMaster))
-- mod-button3, Set the window to floating mode and resize by dragging
, ((modMask, button3),
(\w -> focus w >> mouseResizeWindow w))
-- you may also bind events to the mouse scroll wheel (button4 and button5)
]
------------------------------------------------------------------------
-- Status bars and logging
-- Perform an arbitrary action on each internal state change or X event.
-- See the 'DynamicLog' extension for examples.
--
-- To emulate dwm's status bar
--
-- > logHook = dynamicLogDzen
--
------------------------------------------------------------------------
-- Startup hook
-- Perform an arbitrary action each time xmonad starts or is restarted
-- with mod-q. Used by, e.g., XMonad.Layout.PerWorkspace to initialize
-- per-workspace layout choices.
--
-- By default, do nothing.
myStartupHook = return ()
------------------------------------------------------------------------
-- Run xmonad with all the defaults we set up.
--
main = do
xmproc <- spawnPipe "/usr/bin/xmobar ~/.xmonad/xmobar.hs"
xmonad $ defaults {
logHook = dynamicLogWithPP $ xmobarPP {
ppOutput = hPutStrLn xmproc
, ppTitle = xmobarColor xmobarTitleColor "" . shorten 100
, ppCurrent = xmobarColor xmobarCurrentWorkspaceColor ""
, ppSep = " "}
, manageHook = manageDocks <+> myManageHook
, startupHook = setWMName "LG3D"
}
------------------------------------------------------------------------
-- Combine it all together
-- A structure containing your configuration settings, overriding
-- fields in the default config. Any you don't override, will
-- use the defaults defined in xmonad/XMonad/Config.hs
--
-- No need to modify this.
--
defaults = defaultConfig {
-- simple stuff
terminal = myTerminal,
focusFollowsMouse = myFocusFollowsMouse,
borderWidth = myBorderWidth,
modMask = myModMask,
workspaces = myWorkspaces,
normalBorderColor = myNormalBorderColor,
focusedBorderColor = myFocusedBorderColor,
-- key bindings
keys = myKeys,
mouseBindings = myMouseBindings,
-- hooks, layouts
layoutHook = smartBorders $ myLayout,
manageHook = myManageHook,
startupHook = myStartupHook
}
|
vecio/.linux
|
xmonad/xmonad.hs
|
bsd-3-clause
| 11,318
| 0
| 14
| 2,378
| 1,775
| 1,092
| 683
| 148
| 1
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE OverloadedStrings #-}
-- ^
-- Persistence functionality for user-posts
module Persistence.RssReaders.UserPosts
( deleteUserPost
, deleteUserPosts
, indexUserPosts
, indexUserPosts'
, insertUserPost
, insertUserPosts
, mkUserPostId
, mkUserPostId'
, mkUserPostOnCreate
, mkUserPostOnModify
, mkUserPostOnReplace
, mkUserPostsOnCreate
, mkUserPostsOnModify
, mkUserPostsOnReplace
, modifyUserPost
, modifyUserPosts
, replaceUserPost
, replaceUserPosts
) where
import Control.Monad.Except
import Control.Monad.Reader
import Control.Monad.Trans.Control
import Data.Bifunctor
import Data.Bson hiding (lookup, label)
import qualified Data.Map.Strict as Map
import Data.Maybe
import Data.Monoid ((<>))
import Data.Text (Text)
import Data.Time (UTCTime)
import Persistence.Common
import qualified Persistence.ElasticSearch as E
import Persistence.Facade
import Persistence.RssReaders.Common
import Types.Common
import Util.Constants
import Util.Error
insertUserPost
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> Record -> ExceptT ApiError m Record
insertUserPost = toSingle insertUserPosts
replaceUserPost
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> Record -> ExceptT ApiError m Record
replaceUserPost = toSingle replaceUserPosts
modifyUserPost
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> Record -> ExceptT ApiError m Record
modifyUserPost = toSingle modifyUserPosts
deleteUserPost
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> RecordId -> ExceptT ApiError m Record
deleteUserPost = toSingle deleteUserPosts
replaceUserPosts
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> [Record] -> ApiItemsT [ApiError] m [Record]
replaceUserPosts = updateUserPosts mkUserPostsOnReplace
modifyUserPosts
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> [Record] -> ApiItemsT [ApiError] m [Record]
modifyUserPosts = updateUserPosts mkUserPostsOnModify
insertUserPosts
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> [Record] -> ApiItemsT [ApiError] m [Record]
insertUserPosts input = do
valid <- validateUserPosts input
subs <- dbGetExistingMulti subscriptionDefinition (subId <$> valid)
posts <- dbGetExistingMulti postDefinition (postId <$> valid)
records <- mkUserPostsOnCreate subs posts valid
indexUserPosts records
updateUserPosts
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> ([Record] -> [Record] -> ApiItemsT [ApiError] m [(Record, RecordId)])
-> [Record]
-> ApiItemsT [ApiError] m [Record]
updateUserPosts update input = do
valid1 <- validateEsIdMulti input
existing <- toMulti (getUserPosts $ getIdValue' <$> valid1)
records <- update existing valid1
valid2 <- validateMulti' fst validateRecordTuple records
indexUserPosts valid2
-- ^
-- Delete multiple records by id
deleteUserPosts
:: (MonadBaseControl IO m, MonadReader ApiConfig m, MonadIO m)
=> [RecordId] -> ApiItemsT [ApiError] m [Record]
deleteUserPosts ids = do
existing <- getExistingUserPosts ids
_ <- runExceptT $ runEs (E.deleteByIds ids userPostCollection)
return existing
-- ^
-- Get multiple records by id
getExistingUserPosts
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> [RecordId] -> ApiResultsT m
getExistingUserPosts = esGetExistingMulti userPostDefinition
-- ^
-- Index multiple documents and re-query them
indexUserPosts
:: (MonadBaseControl IO m, MonadReader ApiConfig m, MonadIO m)
=> [(Record, RecordId)] -> ApiResultsT m
indexUserPosts input =
runExceptT (indexUserPosts' input) >> getExistingUserPosts (snd <$> input)
-- ^
-- Index multiple documents
indexUserPosts'
:: (MonadBaseControl IO m, MonadReader ApiConfig m, MonadIO m)
=> [(Record, RecordId)] -> ExceptT ApiError m Text
indexUserPosts' [] = return mempty
indexUserPosts' input =
runEs (E.indexDocuments input userPostCollection) <* runEs E.refreshIndex
mkUserPostsOnReplace
:: (MonadIO m)
=> [Record] -> [Record] -> ApiItemsT [ApiError] m [(Record, RecordId)]
mkUserPostsOnReplace = mkUserPostsOnUpdate mkUserPostOnReplace
mkUserPostsOnModify
:: (MonadIO m)
=> [Record] -> [Record] -> ApiItemsT [ApiError] m [(Record, RecordId)]
mkUserPostsOnModify = mkUserPostsOnUpdate mkUserPostOnModify
mkUserPostsOnUpdate
:: Monad m
=> (Maybe Record -> Record -> m (Either e a))
-> [Record]
-> [Record]
-> ApiItemsT [e] m [a]
mkUserPostsOnUpdate f existing = runAction mkRecord
where
mkRecord r = f (get r) r
get r = Map.lookup (getIdValue' r) recordMap
recordMap = mkIdIndexedMap existing
mkUserPostsOnCreate
:: MonadIO m
=> [Record]
-> [Record]
-> [Record]
-> ApiItemsT [ApiError] m [(Record, RecordId)]
mkUserPostsOnCreate subs posts = runAction mkRecord
where
mkRecord r = mkUserPostOnCreate (get subMap subId r) (get postMap postId r) r
get m fid r = Map.lookup (fid r) m
subMap = mkIdIndexedMap subs
postMap = mkIdIndexedMap posts
mkUserPostOnCreate
:: MonadIO m
=> Maybe Record
-> Maybe Record
-> Record
-> m (Either ApiError (Record, RecordId))
mkUserPostOnCreate _ Nothing r = return . Left $ mk404Err postDefinition r
mkUserPostOnCreate Nothing _ r = return . Left $ mk404Err subscriptionDefinition r
mkUserPostOnCreate (Just sub) (Just post) input
| not (postBelongsToSub sub post) =
return . Left $ mk400Err "Post does not belong to subscription." input
| otherwise = Right <$> mkUserPostTuple Nothing recId record
where
(recId, ids) = getIds sub post
record = mergeRecords base input'
sub' = includeFields ["title", "notes", "tags"] sub
base = foldr (uncurry setValue) (mergeRecords sub' $ mkPost post) ids
input' = excludeFields skipFieldsOnCreate input
mkUserPostOnUpdate
:: MonadIO m
=> (Record -> Record -> Record)
-> Maybe Record
-> Record
-> m (Either ApiError (Record, RecordId))
mkUserPostOnUpdate _ Nothing r = return . Left $ mk404Err userPostDefinition r
mkUserPostOnUpdate f (Just existing) input =
Right <$>
mkUserPostTuple
(Just existing)
(getIdValue' existing)
(excludeFields [idLabel, "__v"] $ f existing record)
where
record = excludeFields skipFieldsOnUpdate input
mkUserPostOnReplace
:: MonadIO m
=> Maybe Record -> Record -> m (Either ApiError (Record, RecordId))
mkUserPostOnReplace = mkUserPostOnUpdate (replaceRecords skipFieldsOnUpdate)
mkUserPostOnModify
:: MonadIO m
=> Maybe Record -> Record -> m (Either ApiError (Record, RecordId))
mkUserPostOnModify = mkUserPostOnUpdate mergeRecords
mkUserPostTuple
:: MonadIO m
=> Maybe Record -> RecordId -> Record -> m (Record, RecordId)
mkUserPostTuple existing recId record =
flip (,) recId <$> addTimestamp existing record
postBelongsToSub :: Record -> Record -> Bool
postBelongsToSub sub post =
getValue' "feedId" sub == (getValue' "feedId" post :: RecordId)
getUserPosts
:: (MonadIO m, MonadReader ApiConfig m, MonadBaseControl IO m)
=> [RecordId] -> ExceptT ApiError m [Record]
getUserPosts ids =
runEsAndExtract $ E.getByIds ids (recordCollection userPostDefinition)
-- ^
-- Return the value of the 'post' field of a user-post
mkPost :: Record -> Record
mkPost input = Record ["post" =: getDocument post]
where
post = excludeFields skipFields input
skipFields = ["feedId", idLabel, "pubdate", "__v"]
-- ^
-- Add time-stamp dates to a user post record
addTimestamp
:: (MonadIO m)
=> Maybe Record -> Record -> m Record
addTimestamp existing new =
case existingUTCDate of
Nothing -> mergeDates new existingTextDate
Just _ -> mergeDates new existingUTCDate
where
existingTextDate :: Maybe Text
existingTextDate = existing >>= getValue createdAtLabel
existingUTCDate :: Maybe UTCTime
existingUTCDate = existing >>= getValue createdAtLabel
mergeDates r Nothing =
setTimestamp True r
mergeDates r (Just createdAt) =
setValue createdAtLabel createdAt <$> setTimestamp False r
-- ^
-- Get all ids required to create a user-post record
getIds :: Record -> Record -> (RecordId, [(RecordId, RecordId)])
getIds sub post = (recId, output)
where
output = get <$> input
input =
[ ("feedId", "feedId", sub)
, ("userId", "userId", sub)
, ("subscriptionId", idLabel, sub)
, ("postId", idLabel, post)
]
get (outLabel, label, r) = (outLabel, fromJust $ getValue label r)
subId' = lookup' "subscriptionId" output
postId' = lookup' "postId" output
recId = mkUserPostId subId' postId'
lookup' name = fromJust . lookup name
-- ^
-- Generate an id for a user post
mkUserPostId' :: Record -> RecordId
mkUserPostId' record =
mkUserPostId (getValue' "subscriptionId" record) (getValue' "postId" record)
-- ^
-- Generate an id given a subscription id and a post id
mkUserPostId :: RecordId -> RecordId -> RecordId
mkUserPostId subId' postId' = postId' <> "-" <> subId'
-- ^
-- Get the subscription id of a user-post
subId :: Record -> RecordId
subId = getValue' "subscriptionId"
-- ^
-- Get the post id of a user-post
postId :: Record -> RecordId
postId = getValue' "postId"
validateRecordTuple :: (Record, RecordId)
-> ((Record, RecordId), ValidationResult)
validateRecordTuple (r, rid) =
first (flip (,) rid) $ validateRecord userPostDefinition r
skipFieldsOnUpdate :: [Label]
skipFieldsOnUpdate = ["post", "feedId", "userId", "postId", "subscriptionId"]
skipFieldsOnCreate :: [Label]
skipFieldsOnCreate =
["post", "userId", "feedId", createdAtLabel, updatedAtLabel, idLabel]
|
gabesoft/kapi
|
src/Persistence/RssReaders/UserPosts.hs
|
bsd-3-clause
| 9,631
| 0
| 12
| 1,690
| 2,833
| 1,499
| 1,334
| 235
| 3
|
module Main where
import Lib
import Data.Char
import Prelude hiding (concat, map)
-- INTRO
runThrough :: List a -> List a
runThrough Nil = Nil
runThrough (Cons h t) = runThrough t
-- EXO 1
class Monade m where
point :: a -> m a
flatMap :: m a -> (a -> m b) -> m b
-- EXO 2
map :: m a -> (a -> b) -> m b
map ma f = error "todo"
ap :: m a -> m (a -> b) -> m b
ap ma mf = error "todo"
-- EXO 3
flatten :: m (m a) -> m a
flatten mma = error "todo"
-- EXO 4
instance Monade List where
point x = error "todo"
flatMap _ _ = error "todo"
instance Monade Box where
point = error "todo"
flatMap _ _ = error "todo"
-- EXO 5
-- TODO
-- EXO 9
composeListAndBox :: List (Box a) -> (a -> b) -> List (Box b)
composeListAndBox l f = error "todo"
composeBoxAndList :: Box (List a) -> (a -> b) -> Box (List b)
composeBoxAndList l f = error "todo"
-- EXO 8
-- TODO
-- EXO 10
-- TODO
main :: IO ()
main = do
putStrLn "Si ça compile, c'est que ça fonctionne :)"
{-
-- EXO 1
print $ list [1, 2, 3, 4]
print $ list [1, 2, 3, 4] `concat` list [5, 6, 7, 8]
-}
{-
-- EXO 2, 3, 4
print $ map (list [1, 2, 3, 4]) (+1)
print $ ap (list [1, 2, 3, 4]) (list [(+1), (+2)])
print $ flatten (list [list [1, 2, 3], list [4, 5, 6]])
print $ (list [(+1), (+2), (+3)]) <*> (list [10, 11, 12])
print $ flatMap (list [1, 2, 3, 4]) (\x -> (list [x-1, x, x+1]))
-}
{-
-- EXO 5
print $ box 4
print $ map (box 6) (+1)
print $ ap (box 4) (box (+1))
print $ flatten (box (box 2))
print $ flatten (box (box (box 7)))
print $ (box (+1)) <*> (box 10)
print $ flatMap (box 3) (\x -> (box (x-1)))
print $ flatMap (box 6) (\x -> (box [x-1, x, x+1]))
-}
{-
-- EXO 6, 7
print $ Full 6 >>= (\x -> Full (x + 10) >>= (\y -> Full (y + 100)))
-- TODO
-}
{-
-- EXO 8
print $ composeListAndBox (list [box 1, box 2]) (+1)
print $ composeBoxAndList (box (list [1, 2, 3, 4])) (+1)
print $ composeF (+1) (list [box 1, box 2])
print $ composeF (+1) (box (list [1, 2, 3, 4]))
-}
{-
-- EXO 9
-- TODO
-}
{-
-- EXO 10
print $ do
x <- write 3 "Put 3\n"
y <- write 5 "Put 5\n"
z <- write (x + y) "Add both\n"
return z
-}
|
mdulac/fp-in-haskell-monad
|
app/Main.hs
|
bsd-3-clause
| 2,129
| 0
| 11
| 551
| 432
| 222
| 210
| 29
| 1
|
{-# LANGUAGE TypeFamilies, FlexibleContexts, PackageImports #-}
module Network.XmlPush (XmlPusher(..), Zero(..), One(..), Two(..)) where
import "monads-tf" Control.Monad.Error
import Control.Monad.Base
import Control.Monad.Trans.Control
import Data.HandleLike
import Data.Pipe
import Text.XML.Pipe
import Network.PeyoTLS.Client
import Network.Sasl
class XmlPusher xp where
type NumOfHandle xp :: * -> *
type PusherArgs xp :: * -> *
generate :: (
ValidateHandle h, MonadBaseControl IO (HandleMonad h),
MonadError (HandleMonad h), SaslError (ErrorType (HandleMonad h))
) =>
NumOfHandle xp h -> PusherArgs xp h -> HandleMonad h (xp h)
readFrom :: (HandleLike h, MonadBase IO (HandleMonad h)) =>
xp h -> Pipe () XmlNode (HandleMonad h) ()
writeTo :: (HandleLike h, MonadBase IO (HandleMonad h)) =>
xp h -> Pipe XmlNode () (HandleMonad h) ()
data Zero a = Zero deriving Show
data One a = One a deriving Show
data Two a = Two (Maybe a) (Maybe a)
|
YoshikuniJujo/xml-push
|
src/Network/XmlPush.hs
|
bsd-3-clause
| 961
| 12
| 12
| 159
| 370
| 206
| 164
| 24
| 0
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE OverloadedStrings #-}
module SpecHelper
( runApiSpec
-- * Fixtures
, Fixture(..)
, fixture
-- * Re-exports
, module Test.Hspec
, module Test.Hspec.Expectations.Lifted
, module Network.Google.Drive
, getCurrentTime
) where
import Test.Hspec hiding
( expectationFailure
, shouldBe
, shouldContain
, shouldMatchList
, shouldReturn
, shouldSatisfy
)
import Test.Hspec.Expectations.Lifted
import Network.Google.Drive
#if __GLASGOW_HASKELL__ < 710
import Control.Applicative ((<$>), (<*>))
#endif
import Data.Time (getCurrentTime)
import LoadEnv (loadEnv)
import Network.Google.OAuth2
import System.Environment (getEnv)
data Fixture = Fixture
{ fPath :: FilePath
, fSize :: Int
, fContent :: String
}
fixture :: Fixture
fixture = Fixture
{ fPath = "test/files/upload.txt"
, fSize = 14
, fContent = "Local content\n"
}
-- | Run an API spec and cleanup after
--
-- Create a folder and hand it to the given action. That action should place any
-- working files within the folder. The folder will be removed at the end of the
-- spec run.
--
runApiSpec :: (File -> Api a) -> IO ()
runApiSpec spec = do
token <- getToken
runApi_ token $ do
Just root <- getFile "root"
folder <- createFile $
setParent root $ newFolder "google-drive-test" Nothing
spec folder `finally` deleteFile folder
where
f `finally` g = f >> g `catchError` \e -> g >> throwError e
getToken :: IO OAuth2Token
getToken = do
loadEnv
client <- OAuth2Client
<$> getEnv "CLIENT_ID"
<*> getEnv "CLIENT_SECRET"
getAccessToken client driveScopes . Just =<< getEnv "CACHE_FILE"
|
pbrisbin/google-drive
|
test/SpecHelper.hs
|
mit
| 1,754
| 0
| 13
| 430
| 395
| 227
| 168
| 49
| 1
|
-- | Re-export of Crypto modules.
module Pos.Crypto
( module Pos.Crypto.Configuration
, module Pos.Crypto.Encryption
, module Pos.Crypto.Hashing
, module Pos.Crypto.HD
, module Pos.Crypto.Random
, module Pos.Crypto.Scrypt
, module Pos.Crypto.SecretSharing
, module Pos.Crypto.Signing
) where
import Pos.Crypto.Configuration
import Pos.Crypto.Encryption
import Pos.Crypto.Hashing
import Pos.Crypto.HD
import Pos.Crypto.Random
import Pos.Crypto.Scrypt
import Pos.Crypto.SecretSharing
import Pos.Crypto.Signing
|
input-output-hk/pos-haskell-prototype
|
crypto/Pos/Crypto.hs
|
mit
| 653
| 0
| 5
| 195
| 113
| 78
| 35
| 17
| 0
|
-- |
-- Module: SwiftNav.SBP
-- Copyright: Copyright (C) 2015 Swift Navigation, Inc.
-- License: LGPL-3
-- Maintainer: Mark Fine <dev@swiftnav.com>
-- Stability: experimental
-- Portability: portable
--
-- SBP message containers.
module SwiftNav.SBP
( SBPMsg (..)
, module SwiftNav.SBP.Types
((*- for m in modules *))
, module (((m)))
((*- endfor *))
) where
import BasicPrelude hiding (lookup)
import Control.Lens hiding ((.=))
import Data.Aeson hiding (decode, decode')
import Data.Binary
import Data.Binary.Get
import Data.Binary.Put
import Data.ByteString.Lazy hiding (ByteString)
import Data.ByteString.Builder
import Data.HashMap.Strict
import SwiftNav.CRC16
((*- for m in modules *))
import (((m)))
((*- endfor *))
import SwiftNav.SBP.Types
((* for m in msgs *))
((*- if loop.first *))
-- | An SBP message ADT composed of all defined SBP messages.
--
-- Includes SBPMsgUnknown for valid SBP messages with undefined message
-- types and SBPMsgBadCRC for SBP messages with invalid CRC checksums.
data SBPMsg =
SBP(((m))) (((m))) Msg
((*- else *))
| SBP(((m))) (((m))) Msg
((*- endif *))
((*- if loop.last *))
| SBPMsgBadCrc Msg
| SBPMsgUnknown Msg
deriving ( Show, Read, Eq )
((*- endif *))
((*- endfor *))
instance Binary SBPMsg where
get = do
preamble <- getWord8
if preamble /= msgSBPPreamble then get else do
sbp <- get
return $ decode' sbp where
decode' sbp@Msg {..}
| checkCrc sbp /= _msgSBPCrc = SBPMsgBadCrc sbp
((*- for m in msgs *))
| _msgSBPType == (((m | to_global))) = SBP(((m))) (decode (fromStrict _msgSBPPayload)) sbp
((*- endfor *))
| otherwise = SBPMsgUnknown sbp
put msg = do
putWord8 msgSBPPreamble
put $ encode' msg
where
((*- for m in msgs *))
encode' (SBP(((m))) _ sbp) = sbp
((*- endfor *))
encode' (SBPMsgUnknown sbp) = sbp
encode' (SBPMsgBadCrc sbp) = sbp
instance FromJSON SBPMsg where
parseJSON obj@(Object o) = do
msgType <- o .: "msg_type"
decode' msgType where
decode' msgType
((*- for m in msgs *))
| msgType == (((m | to_global))) = SBP(((m))) <$> parseJSON obj <*> parseJSON obj
((*- endfor *))
| otherwise = SBPMsgUnknown <$> parseJSON obj
parseJSON _ = mzero
merge :: Value -> Value -> Value
merge (Object one) (Object two) = Object (one <> two)
merge _ (Object two) = Object two
merge (Object one) _ = Object one
merge _ v = v
instance ToJSON SBPMsg where
((*- for m in msgs *))
toJSON (SBP(((m))) msg sbp) = toJSON msg `merge` toJSON sbp
((*- endfor *))
toJSON (SBPMsgBadCrc sbp) = toJSON sbp
toJSON (SBPMsgUnknown sbp) = toJSON sbp
|
mookerji/libsbp
|
generator/sbpg/targets/resources/SbpTemplate.hs
|
lgpl-3.0
| 2,693
| 43
| 18
| 618
| 1,024
| 575
| 449
| -1
| -1
|
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PatternGuards #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-}
-- | Dealing with the 01-index file and all its cabal files.
module Stack.PackageIndex
( updateAllIndices
, getPackageCaches
, getPackageCachesIO
, getPackageVersions
, getPackageVersionsIO
, lookupPackageVersions
) where
import qualified Codec.Archive.Tar as Tar
import Control.Exception (Exception)
import Control.Exception.Safe (tryIO)
import Control.Monad (unless, when, liftM, void, guard)
import Control.Monad.Catch (throwM)
import qualified Control.Monad.Catch as C
import Control.Monad.IO.Class (MonadIO, liftIO)
import Control.Monad.Logger (logDebug, logInfo, logWarn)
import Control.Monad.Trans.Control
import Crypto.Hash as Hash (hashlazy, Digest, SHA1)
import Data.Aeson.Extended
import qualified Data.ByteArray.Encoding as Mem (convertToBase, Base(Base16))
import qualified Data.ByteString.Char8 as S8
import qualified Data.ByteString.Lazy as L
import Data.Conduit (($$), (=$), (.|), runConduitRes)
import Data.Conduit.Binary (sinkHandle, sourceHandle, sourceFile, sinkFile)
import Data.Conduit.Zlib (ungzip)
import Data.Foldable (forM_)
import Data.IORef
import Data.Int (Int64)
import Data.HashMap.Strict (HashMap)
import qualified Data.HashMap.Strict as HashMap
import Data.Map (Map)
import qualified Data.Map.Strict as Map
import Data.Monoid
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Store.Version
import Data.Store.VersionTagged
import Data.Text (Text)
import qualified Data.Text as T
import Data.Text.Unsafe (unsafeTail)
import Data.Time (getCurrentTime)
import Data.Traversable (forM)
import Data.Typeable (Typeable)
import qualified Hackage.Security.Client as HS
import qualified Hackage.Security.Client.Repository.Cache as HS
import qualified Hackage.Security.Client.Repository.Remote as HS
import qualified Hackage.Security.Client.Repository.HttpLib.HttpClient as HS
import qualified Hackage.Security.Util.Path as HS
import qualified Hackage.Security.Util.Pretty as HS
import Network.HTTP.Client.TLS (getGlobalManager)
import Network.HTTP.Download
import Network.URI (parseURI)
import Path (toFilePath, parseAbsFile)
import Path.IO
import Prelude -- Fix AMP warning
import Stack.Types.BuildPlan (GitSHA1 (..))
import Stack.Types.Config
import Stack.Types.PackageIdentifier
import Stack.Types.PackageIndex
import Stack.Types.PackageName
import Stack.Types.StackT
import Stack.Types.StringError
import Stack.Types.Version
import qualified System.Directory as D
import System.FilePath ((<.>))
import System.IO (IOMode (ReadMode, WriteMode), withBinaryFile)
-- | Populate the package index caches and return them.
populateCache
:: (StackMiniM env m, HasConfig env)
=> PackageIndex
-> m PackageCacheMap
populateCache index = do
requireIndex index
-- This uses full on lazy I/O instead of ResourceT to provide some
-- protections. Caveat emptor
path <- configPackageIndex (indexName index)
let loadPIS = do
$logSticky "Populating index cache ..."
lbs <- liftIO $ L.readFile $ Path.toFilePath path
loop 0 (Map.empty, HashMap.empty) (Tar.read lbs)
(pis, gitPIs) <- loadPIS `C.catch` \e -> do
$logWarn $ "Exception encountered when parsing index tarball: "
<> T.pack (show (e :: Tar.FormatError))
$logWarn "Automatically updating index and trying again"
updateIndex index
loadPIS
when (indexRequireHashes index) $ forM_ (Map.toList pis) $ \(ident, pc) ->
case pcDownload pc of
Just _ -> return ()
Nothing -> throwM $ MissingRequiredHashes (indexName index) ident
$logStickyDone "Populated index cache."
return $ PackageCacheMap pis gitPIs
where
loop !blockNo (!m, !hm) (Tar.Next e es) =
loop (blockNo + entrySizeInBlocks e) (goE blockNo m hm e) es
loop _ (m, hm) Tar.Done = return (m, hm)
loop _ _ (Tar.Fail e) = throwM e
goE blockNo m hm e =
case Tar.entryContent e of
Tar.NormalFile lbs size ->
case parseNameVersionSuffix $ Tar.entryPath e of
Just (ident, ".cabal") -> addCabal lbs ident size
Just (ident, ".json") -> (addJSON id ident lbs, hm)
_ ->
case parsePackageJSON $ Tar.entryPath e of
Just ident -> (addJSON unHSPackageDownload ident lbs, hm)
Nothing -> (m, hm)
_ -> (m, hm)
where
addCabal lbs ident size =
( Map.insertWith
(\_ pcOld -> pcNew { pcDownload = pcDownload pcOld })
ident
pcNew
m
, HashMap.insert gitSHA1 offsetSize hm
)
where
pcNew = PackageCache
{ pcOffsetSize = offsetSize
, pcDownload = Nothing
}
offsetSize = OffsetSize
((blockNo + 1) * 512)
size
-- Calculate the Git SHA1 of the contents. This uses the
-- Git algorithm of prepending "blob <size>\0" to the raw
-- contents. We use this to be able to share the same SHA
-- information between the Git and tarball backends.
gitSHA1 = GitSHA1 $ Mem.convertToBase Mem.Base16 $ hashSHA1 $ L.fromChunks
$ "blob "
: S8.pack (show $ L.length lbs)
: "\0"
: L.toChunks lbs
hashSHA1 :: L.ByteString -> Hash.Digest Hash.SHA1
hashSHA1 = Hash.hashlazy
addJSON :: FromJSON a
=> (a -> PackageDownload)
-> PackageIdentifier
-> L.ByteString
-> Map PackageIdentifier PackageCache
addJSON unwrap ident lbs =
case decode lbs of
Nothing -> m
Just (unwrap -> pd) -> Map.insertWith
(\_ pc -> pc { pcDownload = Just pd })
ident
PackageCache
{ pcOffsetSize = OffsetSize 0 0
, pcDownload = Just pd
}
m
breakSlash x
| T.null z = Nothing
| otherwise = Just (y, unsafeTail z)
where
(y, z) = T.break (== '/') x
parseNameVersion t1 = do
(p', t3) <- breakSlash
$ T.map (\c -> if c == '\\' then '/' else c)
$ T.pack t1
p <- parsePackageName p'
(v', t5) <- breakSlash t3
v <- parseVersion v'
return (p', p, v, t5)
parseNameVersionSuffix t1 = do
(p', p, v, t5) <- parseNameVersion t1
let (t6, suffix) = T.break (== '.') t5
guard $ t6 == p'
return (PackageIdentifier p v, suffix)
parsePackageJSON t1 = do
(_, p, v, t5) <- parseNameVersion t1
guard $ t5 == "package.json"
return $ PackageIdentifier p v
data PackageIndexException
= GitNotAvailable IndexName
| MissingRequiredHashes IndexName PackageIdentifier
deriving Typeable
instance Exception PackageIndexException
instance Show PackageIndexException where
show (GitNotAvailable name) = concat
[ "Package index "
, T.unpack $ indexNameText name
, " only provides Git access, and you do not have"
, " the git executable on your PATH"
]
show (MissingRequiredHashes name ident) = concat
[ "Package index "
, T.unpack $ indexNameText name
, " is configured to require package hashes, but no"
, " hash is available for "
, packageIdentifierString ident
]
-- | Require that an index be present, updating if it isn't.
requireIndex :: (StackMiniM env m, HasConfig env) => PackageIndex -> m ()
requireIndex index = do
tarFile <- configPackageIndex $ indexName index
exists <- doesFileExist tarFile
unless exists $ updateIndex index
-- | Update all of the package indices
updateAllIndices :: (StackMiniM env m, HasConfig env) => m ()
updateAllIndices = do
clearPackageCaches
view packageIndicesL >>= mapM_ updateIndex
-- | Update the index tarball
updateIndex :: (StackMiniM env m, HasConfig env) => PackageIndex -> m ()
updateIndex index =
do let name = indexName index
url = indexLocation index
$logSticky $ "Updating package index "
<> indexNameText (indexName index)
<> " (mirrored at "
<> url
<> ") ..."
case indexType index of
ITVanilla -> updateIndexHTTP name url
ITHackageSecurity hs -> updateIndexHackageSecurity name url hs
-- Copy to the 00-index.tar filename for backwards
-- compatibility. First wipe out the cache file if present.
tarFile <- configPackageIndex name
oldTarFile <- configPackageIndexOld name
oldCacheFile <- configPackageIndexCacheOld name
ignoringAbsence (removeFile oldCacheFile)
runConduitRes $ sourceFile (toFilePath tarFile) .| sinkFile (toFilePath oldTarFile)
-- | Update the index tarball via HTTP
updateIndexHTTP :: (StackMiniM env m, HasConfig env)
=> IndexName
-> Text -- ^ url
-> m ()
updateIndexHTTP indexName' url = do
req <- parseRequest $ T.unpack url
$logInfo ("Downloading package index from " <> url)
gz <- configPackageIndexGz indexName'
tar <- configPackageIndex indexName'
wasDownloaded <- redownload req gz
toUnpack <-
if wasDownloaded
then return True
else not `liftM` doesFileExist tar
when toUnpack $ do
let tmp = toFilePath tar <.> "tmp"
tmpPath <- parseAbsFile tmp
deleteCache indexName'
liftIO $ do
withBinaryFile (toFilePath gz) ReadMode $ \input ->
withBinaryFile tmp WriteMode $ \output ->
sourceHandle input
$$ ungzip
=$ sinkHandle output
renameFile tmpPath tar
-- | Update the index tarball via Hackage Security
updateIndexHackageSecurity
:: (StackMiniM env m, HasConfig env)
=> IndexName
-> Text -- ^ base URL
-> HackageSecurity
-> m ()
updateIndexHackageSecurity indexName' url (HackageSecurity keyIds threshold) = do
baseURI <-
case parseURI $ T.unpack url of
Nothing -> errorString $ "Invalid Hackage Security base URL: " ++ T.unpack url
Just x -> return x
manager <- liftIO getGlobalManager
root <- configPackageIndexRoot indexName'
logTUF <- embed_ ($logInfo . T.pack . HS.pretty)
let withRepo = HS.withRepository
(HS.makeHttpLib manager)
[baseURI]
HS.defaultRepoOpts
HS.Cache
{ HS.cacheRoot = HS.fromAbsoluteFilePath $ toFilePath root
, HS.cacheLayout = HS.cabalCacheLayout
-- Have Hackage Security write to a temporary file
-- to avoid invalidating the cache... continued
-- below at case didUpdate
{ HS.cacheLayoutIndexTar = HS.rootPath $ HS.fragment "01-index.tar-tmp"
}
}
HS.hackageRepoLayout
HS.hackageIndexLayout
logTUF
didUpdate <- liftIO $ withRepo $ \repo -> HS.uncheckClientErrors $ do
needBootstrap <- HS.requiresBootstrap repo
when needBootstrap $ do
HS.bootstrap
repo
(map (HS.KeyId . T.unpack) keyIds)
(HS.KeyThreshold (fromIntegral threshold))
now <- getCurrentTime
HS.checkForUpdates repo (Just now)
case didUpdate of
HS.HasUpdates -> do
-- The index actually updated. Delete the old cache, and
-- then move the temporary unpacked file to its real
-- location
tar <- configPackageIndex indexName'
deleteCache indexName'
liftIO $ D.renameFile (toFilePath tar ++ "-tmp") (toFilePath tar)
$logInfo "Updated package list downloaded"
HS.NoUpdates -> $logInfo "No updates to your package list were found"
-- | Delete the package index cache
deleteCache
:: (StackMiniM env m, HasConfig env)
=> IndexName -> m ()
deleteCache indexName' = do
fp <- configPackageIndexCache indexName'
eres <- liftIO $ tryIO $ removeFile fp
case eres of
Left e -> $logDebug $ "Could not delete cache: " <> T.pack (show e)
Right () -> $logDebug $ "Deleted index cache at " <> T.pack (toFilePath fp)
-- | Lookup a package's versions from 'IO'.
getPackageVersionsIO
:: (StackMiniM env m, HasConfig env)
=> m (PackageName -> IO (Set Version))
getPackageVersionsIO = do
getCaches <- getPackageCachesIO
return $ \name ->
fmap (lookupPackageVersions name . fst) getCaches
-- | Get the known versions for a given package from the package caches.
--
-- See 'getPackageCaches' for performance notes.
getPackageVersions
:: (StackMiniM env m, HasConfig env)
=> PackageName
-> m (Set Version)
getPackageVersions pkgName =
fmap (lookupPackageVersions pkgName . fst) getPackageCaches
lookupPackageVersions :: PackageName -> Map PackageIdentifier a -> Set Version
lookupPackageVersions pkgName pkgCaches =
Set.fromList [v | PackageIdentifier n v <- Map.keys pkgCaches, n == pkgName]
-- | Access the package caches from 'IO'.
--
-- FIXME: This is a temporary solution until a better solution
-- to access the package caches from Stack.Build.ConstructPlan
-- has been found.
getPackageCachesIO
:: (StackMiniM env m, HasConfig env)
=> m (IO ( Map PackageIdentifier (PackageIndex, PackageCache)
, HashMap GitSHA1 (PackageIndex, OffsetSize)))
getPackageCachesIO = toIO getPackageCaches
where
toIO :: (MonadIO m, MonadBaseControl IO m) => m a -> m (IO a)
toIO m = do
runInBase <- liftBaseWith $ \run -> return (void . run)
return $ do
i <- newIORef (error "Impossible evaluation in toIO")
runInBase $ do
x <- m
liftIO $ writeIORef i x
readIORef i
-- | Load the package caches, or create the caches if necessary.
--
-- This has two levels of caching: in memory, and the on-disk cache. So,
-- feel free to call this function multiple times.
getPackageCaches
:: (StackMiniM env m, HasConfig env)
=> m ( Map PackageIdentifier (PackageIndex, PackageCache)
, HashMap GitSHA1 (PackageIndex, OffsetSize)
)
getPackageCaches = do
config <- view configL
mcached <- liftIO $ readIORef (configPackageCaches config)
case mcached of
Just cached -> return cached
Nothing -> do
result <- liftM mconcat $ forM (configPackageIndices config) $ \index -> do
fp <- configPackageIndexCache (indexName index)
PackageCacheMap pis' gitPIs <-
$(versionedDecodeOrLoad (storeVersionConfig "pkg-v2" "WlAvAaRXlIMkjSmg5G3dD16UpT8="
:: VersionConfig PackageCacheMap))
fp
(populateCache index)
return (fmap (index,) pis', fmap (index,) gitPIs)
liftIO $ writeIORef (configPackageCaches config) (Just result)
return result
-- | Clear the in-memory hackage index cache. This is needed when the
-- hackage index is updated.
clearPackageCaches :: (StackMiniM env m, HasConfig env) => m ()
clearPackageCaches = do
cacheRef <- view packageCachesL
liftIO $ writeIORef cacheRef Nothing
--------------- Lifted from cabal-install, Distribution.Client.Tar:
-- | Return the number of blocks in an entry.
entrySizeInBlocks :: Tar.Entry -> Int64
entrySizeInBlocks entry = 1 + case Tar.entryContent entry of
Tar.NormalFile _ size -> bytesToBlocks size
Tar.OtherEntryType _ _ size -> bytesToBlocks size
_ -> 0
where
bytesToBlocks s = 1 + ((fromIntegral s - 1) `div` 512)
|
mrkkrp/stack
|
src/Stack/PackageIndex.hs
|
bsd-3-clause
| 17,246
| 0
| 23
| 5,353
| 3,991
| 2,083
| 1,908
| 355
| 10
|
-- (c) The University of Glasgow 2002-2006
{-# LANGUAGE RankNTypes, CPP #-}
module ETA.Iface.IfaceEnv (
newGlobalBinder, newImplicitBinder,
lookupIfaceTop,
lookupOrig, lookupOrigNameCache, extendNameCache,
newIfaceName, newIfaceNames,
extendIfaceIdEnv, extendIfaceTyVarEnv,
tcIfaceLclId, tcIfaceTyVar, lookupIfaceTyVar,
ifaceExportNames,
-- Name-cache stuff
allocateGlobalBinder, initNameCache, updNameCache,
getNameCache, mkNameCacheUpdater, NameCacheUpdater(..)
) where
import ETA.TypeCheck.TcRnMonad
import ETA.Prelude.TysWiredIn
import ETA.Main.HscTypes
import ETA.Types.Type
import ETA.BasicTypes.Var
import ETA.BasicTypes.Name
import ETA.BasicTypes.Avail
import ETA.BasicTypes.Module
import ETA.Utils.UniqFM
import ETA.Utils.FastString
import ETA.BasicTypes.UniqSupply
import ETA.BasicTypes.SrcLoc
import ETA.Utils.Util
import ETA.Utils.Outputable
import ETA.Utils.Exception ( evaluate )
import Data.IORef ( atomicModifyIORef, readIORef )
#include "HsVersions.h"
{-
*********************************************************
* *
Allocating new Names in the Name Cache
* *
*********************************************************
Note [The Name Cache]
~~~~~~~~~~~~~~~~~~~~~
The Name Cache makes sure that, during any invovcation of GHC, each
External Name "M.x" has one, and only one globally-agreed Unique.
* The first time we come across M.x we make up a Unique and record that
association in the Name Cache.
* When we come across "M.x" again, we look it up in the Name Cache,
and get a hit.
The functions newGlobalBinder, allocateGlobalBinder do the main work.
When you make an External name, you should probably be calling one
of them.
-}
newGlobalBinder :: Module -> OccName -> SrcSpan -> TcRnIf a b Name
-- Used for source code and interface files, to make the
-- Name for a thing, given its Module and OccName
-- See Note [The Name Cache]
--
-- The cache may already already have a binding for this thing,
-- because we may have seen an occurrence before, but now is the
-- moment when we know its Module and SrcLoc in their full glory
newGlobalBinder mod occ loc
= do mod `seq` occ `seq` return () -- See notes with lookupOrig
-- traceIf (text "newGlobalBinder" <+> ppr mod <+> ppr occ <+> ppr loc)
updNameCache $ \name_cache ->
allocateGlobalBinder name_cache mod occ loc
allocateGlobalBinder
:: NameCache
-> Module -> OccName -> SrcSpan
-> (NameCache, Name)
-- See Note [The Name Cache]
allocateGlobalBinder name_supply mod occ loc
= case lookupOrigNameCache (nsNames name_supply) mod occ of
-- A hit in the cache! We are at the binding site of the name.
-- This is the moment when we know the SrcLoc
-- of the Name, so we set this field in the Name we return.
--
-- Then (bogus) multiple bindings of the same Name
-- get different SrcLocs can can be reported as such.
--
-- Possible other reason: it might be in the cache because we
-- encountered an occurrence before the binding site for an
-- implicitly-imported Name. Perhaps the current SrcLoc is
-- better... but not really: it'll still just say 'imported'
--
-- IMPORTANT: Don't mess with wired-in names.
-- Their wired-in-ness is in their NameSort
-- and their Module is correct.
Just name | isWiredInName name
-> (name_supply, name)
| otherwise
-> (new_name_supply, name')
where
uniq = nameUnique name
name' = mkExternalName uniq mod occ loc
-- name' is like name, but with the right SrcSpan
new_cache = extendNameCache (nsNames name_supply) mod occ name'
new_name_supply = name_supply {nsNames = new_cache}
-- Miss in the cache!
-- Build a completely new Name, and put it in the cache
_ -> (new_name_supply, name)
where
(uniq, us') = takeUniqFromSupply (nsUniqs name_supply)
name = mkExternalName uniq mod occ loc
new_cache = extendNameCache (nsNames name_supply) mod occ name
new_name_supply = name_supply {nsUniqs = us', nsNames = new_cache}
newImplicitBinder :: Name -- Base name
-> (OccName -> OccName) -- Occurrence name modifier
-> TcRnIf m n Name -- Implicit name
-- Called in BuildTyCl to allocate the implicit binders of type/class decls
-- For source type/class decls, this is the first occurrence
-- For iface ones, the LoadIface has alrady allocated a suitable name in the cache
newImplicitBinder base_name mk_sys_occ
| Just mod <- nameModule_maybe base_name
= newGlobalBinder mod occ loc
| otherwise -- When typechecking a [d| decl bracket |],
-- TH generates types, classes etc with Internal names,
-- so we follow suit for the implicit binders
= do { uniq <- newUnique
; return (mkInternalName uniq occ loc) }
where
occ = mk_sys_occ (nameOccName base_name)
loc = nameSrcSpan base_name
ifaceExportNames :: [IfaceExport] -> TcRnIf gbl lcl [AvailInfo]
ifaceExportNames exports = return exports
lookupOrig :: Module -> OccName -> TcRnIf a b Name
lookupOrig mod occ
= do { -- First ensure that mod and occ are evaluated
-- If not, chaos can ensue:
-- we read the name-cache
-- then pull on mod (say)
-- which does some stuff that modifies the name cache
-- This did happen, with tycon_mod in TcIface.tcIfaceAlt (DataAlt..)
mod `seq` occ `seq` return ()
-- ; traceIf (text "lookup_orig" <+> ppr mod <+> ppr occ)
; updNameCache $ \name_cache ->
case lookupOrigNameCache (nsNames name_cache) mod occ of {
Just name -> (name_cache, name);
Nothing ->
case takeUniqFromSupply (nsUniqs name_cache) of {
(uniq, us) ->
let
name = mkExternalName uniq mod occ noSrcSpan
new_cache = extendNameCache (nsNames name_cache) mod occ name
in (name_cache{ nsUniqs = us, nsNames = new_cache }, name)
}}}
{-
************************************************************************
* *
Name cache access
* *
************************************************************************
See Note [The Name Cache] above.
Note [Built-in syntax and the OrigNameCache]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You might think that usin isBuiltInOcc_maybe in lookupOrigNameCache is
unnecessary because tuple TyCon/DataCons are parsed as Exact RdrNames
and *don't* appear as original names in interface files (because
serialization gives them special treatment), so we will never look
them up in the original name cache.
However, there are two reasons why we might look up an Orig RdrName:
* If you use setRdrNameSpace on an Exact RdrName it may be
turned into an Orig RdrName.
* Template Haskell turns a BuiltInSyntax Name into a TH.NameG
(DsMeta.globalVar), and parses a NameG into an Orig RdrName
(Convert.thRdrName). So, eg $(do { reify '(,); ... }) will
go this route (Trac #8954).
-}
lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name
lookupOrigNameCache nc mod occ
| Just name <- isBuiltInOcc_maybe occ
= -- See Note [Known-key names], 3(c) in PrelNames
-- Special case for tuples; there are too many
-- of them to pre-populate the original-name cache
Just name
| otherwise
= case lookupModuleEnv nc mod of
Nothing -> Nothing
Just occ_env -> lookupOccEnv occ_env occ
extendOrigNameCache :: OrigNameCache -> Name -> OrigNameCache
extendOrigNameCache nc name
= ASSERT2( isExternalName name, ppr name )
extendNameCache nc (nameModule name) (nameOccName name) name
extendNameCache :: OrigNameCache -> Module -> OccName -> Name -> OrigNameCache
extendNameCache nc mod occ name
= extendModuleEnvWith combine nc mod (unitOccEnv occ name)
where
combine _ occ_env = extendOccEnv occ_env occ name
getNameCache :: TcRnIf a b NameCache
getNameCache = do { HscEnv { hsc_NC = nc_var } <- getTopEnv;
readMutVar nc_var }
updNameCache :: (NameCache -> (NameCache, c)) -> TcRnIf a b c
updNameCache upd_fn = do
HscEnv { hsc_NC = nc_var } <- getTopEnv
atomicUpdMutVar' nc_var upd_fn
-- | A function that atomically updates the name cache given a modifier
-- function. The second result of the modifier function will be the result
-- of the IO action.
newtype NameCacheUpdater = NCU { updateNameCache :: forall c. (NameCache -> (NameCache, c)) -> IO c }
-- | Return a function to atomically update the name cache.
mkNameCacheUpdater :: TcRnIf a b NameCacheUpdater
mkNameCacheUpdater = do
nc_var <- hsc_NC `fmap` getTopEnv
let update_nc f = do r <- atomicModifyIORef nc_var f
_ <- evaluate =<< readIORef nc_var
return r
return (NCU update_nc)
initNameCache :: UniqSupply -> [Name] -> NameCache
initNameCache us names
= NameCache { nsUniqs = us,
nsNames = initOrigNames names }
initOrigNames :: [Name] -> OrigNameCache
initOrigNames names = foldl extendOrigNameCache emptyModuleEnv names
{-
************************************************************************
* *
Type variables and local Ids
* *
************************************************************************
-}
tcIfaceLclId :: FastString -> IfL Id
tcIfaceLclId occ
= do { lcl <- getLclEnv
; case (lookupUFM (if_id_env lcl) occ) of
Just ty_var -> return ty_var
Nothing -> failIfM (text "Iface id out of scope: " <+> ppr occ)
}
extendIfaceIdEnv :: [Id] -> IfL a -> IfL a
extendIfaceIdEnv ids thing_inside
= do { env <- getLclEnv
; let { id_env' = addListToUFM (if_id_env env) pairs
; pairs = [(occNameFS (getOccName id), id) | id <- ids] }
; setLclEnv (env { if_id_env = id_env' }) thing_inside }
tcIfaceTyVar :: FastString -> IfL TyVar
tcIfaceTyVar occ
= do { lcl <- getLclEnv
; case (lookupUFM (if_tv_env lcl) occ) of
Just ty_var -> return ty_var
Nothing -> failIfM (text "Iface type variable out of scope: " <+> ppr occ)
}
lookupIfaceTyVar :: FastString -> IfL (Maybe TyVar)
lookupIfaceTyVar occ
= do { lcl <- getLclEnv
; return (lookupUFM (if_tv_env lcl) occ) }
extendIfaceTyVarEnv :: [TyVar] -> IfL a -> IfL a
extendIfaceTyVarEnv tyvars thing_inside
= do { env <- getLclEnv
; let { tv_env' = addListToUFM (if_tv_env env) pairs
; pairs = [(occNameFS (getOccName tv), tv) | tv <- tyvars] }
; setLclEnv (env { if_tv_env = tv_env' }) thing_inside }
{-
************************************************************************
* *
Getting from RdrNames to Names
* *
************************************************************************
-}
lookupIfaceTop :: OccName -> IfL Name
-- Look up a top-level name from the current Iface module
lookupIfaceTop occ
= do { env <- getLclEnv; lookupOrig (if_mod env) occ }
newIfaceName :: OccName -> IfL Name
newIfaceName occ
= do { uniq <- newUnique
; return $! mkInternalName uniq occ noSrcSpan }
newIfaceNames :: [OccName] -> IfL [Name]
newIfaceNames occs
= do { uniqs <- newUniqueSupply
; return [ mkInternalName uniq occ noSrcSpan
| (occ,uniq) <- occs `zip` uniqsFromSupply uniqs] }
|
pparkkin/eta
|
compiler/ETA/Iface/IfaceEnv.hs
|
bsd-3-clause
| 12,473
| 0
| 21
| 3,685
| 2,043
| 1,100
| 943
| 155
| 2
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
-}
{-# LANGUAGE CPP, DeriveDataTypeable, ScopedTypeVariables #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE UndecidableInstances #-} -- Note [Pass sensitive types]
-- in module PlaceHolder
{-# LANGUAGE ConstraintKinds #-}
-- | Abstract Haskell syntax for expressions.
module Eta.HsSyn.HsExpr where
#include "HsVersions.h"
-- friends:
import Eta.HsSyn.HsDecls
import Eta.HsSyn.HsPat
import Eta.HsSyn.HsLit
import Eta.HsSyn.PlaceHolder ( PostTc, PostRn, DataId)
import Eta.HsSyn.HsTypes
import Eta.HsSyn.HsBinds
-- others:
import Eta.TypeCheck.TcEvidence
import Eta.Core.CoreSyn
import Eta.BasicTypes.Var
import Eta.BasicTypes.RdrName
import Eta.BasicTypes.Name
import Eta.BasicTypes.BasicTypes
import Eta.BasicTypes.DataCon
import Eta.BasicTypes.SrcLoc
import Eta.Utils.Util
import Eta.Main.StaticFlags( opt_PprStyle_Debug )
import Eta.Utils.Outputable
import Eta.Utils.FastString
import Eta.Types.Type
import Eta.REPL.RemoteTypes ( ForeignRef )
-- libraries:
import Data.Data hiding (Fixity(..))
import qualified Data.Data as Data (Fixity(..))
import Data.Maybe (isNothing)
{-
************************************************************************
* *
\subsection{Expressions proper}
* *
************************************************************************
-}
-- * Expressions proper
type LHsExpr id = Located (HsExpr id)
-- ^ May have 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnComma' when
-- in a list
-- For details on above see note [Api annotations] in ApiAnnotation
-------------------------
-- | PostTcExpr is an evidence expression attached to the syntax tree by the
-- type checker (c.f. postTcType).
type PostTcExpr = HsExpr Id
-- | We use a PostTcTable where there are a bunch of pieces of evidence, more
-- than is convenient to keep individually.
type PostTcTable = [(Name, PostTcExpr)]
noPostTcExpr :: PostTcExpr
noPostTcExpr = HsLit (HsString "" (fsLit "noPostTcExpr"))
noPostTcTable :: PostTcTable
noPostTcTable = []
-------------------------
-- | SyntaxExpr is like 'PostTcExpr', but it's filled in a little earlier,
-- by the renamer. It's used for rebindable syntax.
--
-- E.g. @(>>=)@ is filled in before the renamer by the appropriate 'Name' for
-- @(>>=)@, and then instantiated by the type checker with its type args
-- etc
type SyntaxExpr id = HsExpr id
noSyntaxExpr :: SyntaxExpr id -- Before renaming, and sometimes after,
-- (if the syntax slot makes no sense)
noSyntaxExpr = HsLit (HsString "" (fsLit "noSyntaxExpr"))
type CmdSyntaxTable id = [(Name, SyntaxExpr id)]
-- See Note [CmdSyntaxTable]
{-
Note [CmdSyntaxtable]
~~~~~~~~~~~~~~~~~~~~~
Used only for arrow-syntax stuff (HsCmdTop), the CmdSyntaxTable keeps
track of the methods needed for a Cmd.
* Before the renamer, this list is an empty list
* After the renamer, it takes the form @[(std_name, HsVar actual_name)]@
For example, for the 'arr' method
* normal case: (GHC.Control.Arrow.arr, HsVar GHC.Control.Arrow.arr)
* with rebindable syntax: (GHC.Control.Arrow.arr, arr_22)
where @arr_22@ is whatever 'arr' is in scope
* After the type checker, it takes the form [(std_name, <expression>)]
where <expression> is the evidence for the method. This evidence is
instantiated with the class, but is still polymorphic in everything
else. For example, in the case of 'arr', the evidence has type
forall b c. (b->c) -> a b c
where 'a' is the ambient type of the arrow. This polymorphism is
important because the desugarer uses the same evidence at multiple
different types.
This is Less Cool than what we normally do for rebindable syntax, which is to
make fully-instantiated piece of evidence at every use site. The Cmd way
is Less Cool because
* The renamer has to predict which methods are needed.
See the tedious RnExpr.methodNamesCmd.
* The desugarer has to know the polymorphic type of the instantiated
method. This is checked by Inst.tcSyntaxName, but is less flexible
than the rest of rebindable syntax, where the type is less
pre-ordained. (And this flexibility is useful; for example we can
typecheck do-notation with (>>=) :: m1 a -> (a -> m2 b) -> m2 b.)
-}
-- | A Haskell expression.
data HsExpr id
= HsVar id -- ^ Variable
| HsIPVar HsIPName -- ^ Implicit parameter
| HsOverLit (HsOverLit id) -- ^ Overloaded literals
| HsLit HsLit -- ^ Simple (non-overloaded) literals
| HsLam (MatchGroup id (LHsExpr id)) -- ^ Lambda abstraction. Currently always a single match
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnLam',
-- 'ApiAnnotation.AnnRarrow',
-- For details on above see note [Api annotations] in ApiAnnotation
| HsLamCase (PostTc id Type) (MatchGroup id (LHsExpr id)) -- ^ Lambda-case
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnLam',
-- 'ApiAnnotation.AnnCase','ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnClose'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsApp (LHsExpr id) (LHsExpr id) -- ^ Application
-- | Operator applications:
-- NB Bracketed ops such as (+) come out as Vars.
-- NB We need an expr for the operator in an OpApp/Section since
-- the typechecker may need to apply the operator to a few types.
| OpApp (LHsExpr id) -- left operand
(LHsExpr id) -- operator
(PostRn id Fixity) -- Renamer adds fixity; bottom until then
(LHsExpr id) -- right operand
-- | Negation operator. Contains the negated expression and the name
-- of 'negate'
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnMinus'
-- For details on above see note [Api annotations] in ApiAnnotation
| NegApp (LHsExpr id)
(SyntaxExpr id)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'('@,
-- 'ApiAnnotation.AnnClose' @')'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsPar (LHsExpr id) -- ^ Parenthesised expr; see Note [Parens in HsSyn]
| SectionL (LHsExpr id) -- operand; see Note [Sections in HsSyn]
(LHsExpr id) -- operator
| SectionR (LHsExpr id) -- operator; see Note [Sections in HsSyn]
(LHsExpr id) -- operand
-- | Used for explicit tuples and sections thereof
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnClose'
-- For details on above see note [Api annotations] in ApiAnnotation
| ExplicitTuple
[LHsTupArg id]
Boxity
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnCase',
-- 'ApiAnnotation.AnnOf','ApiAnnotation.AnnOpen' @'{'@,
-- 'ApiAnnotation.AnnClose' @'}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCase (LHsExpr id)
(MatchGroup id (LHsExpr id))
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnIf',
-- 'ApiAnnotation.AnnSemi',
-- 'ApiAnnotation.AnnThen','ApiAnnotation.AnnSemi',
-- 'ApiAnnotation.AnnElse',
-- For details on above see note [Api annotations] in ApiAnnotation
| HsIf (Maybe (SyntaxExpr id)) -- cond function
-- Nothing => use the built-in 'if'
-- See Note [Rebindable if]
(LHsExpr id) -- predicate
(LHsExpr id) -- then part
(LHsExpr id) -- else part
-- | Multi-way if
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnIf'
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnClose',
-- For details on above see note [Api annotations] in ApiAnnotation
| HsMultiIf (PostTc id Type) [LGRHS id (LHsExpr id)]
-- | let(rec)
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnLet',
-- 'ApiAnnotation.AnnOpen' @'{'@,
-- 'ApiAnnotation.AnnClose' @'}'@,'ApiAnnotation.AnnIn'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsLet (HsLocalBinds id)
(LHsExpr id)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnDo',
-- 'ApiAnnotation.AnnOpen', 'ApiAnnotation.AnnSemi',
-- 'ApiAnnotation.AnnVbar',
-- 'ApiAnnotation.AnnClose'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsDo (HsStmtContext Name) -- The parameterisation is unimportant
-- because in this context we never use
-- the PatGuard or ParStmt variant
[ExprLStmt id] -- "do":one or more stmts
(PostTc id Type) -- Type of the whole expression
-- | Syntactic list: [a,b,c,...]
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'['@,
-- 'ApiAnnotation.AnnClose' @']'@
-- For details on above see note [Api annotations] in ApiAnnotation
| ExplicitList
(PostTc id Type) -- Gives type of components of list
(Maybe (SyntaxExpr id)) -- For OverloadedLists, the fromListN witness
[LHsExpr id]
-- | Syntactic parallel array: [:e1, ..., en:]
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'[:'@,
-- 'ApiAnnotation.AnnDotdot','ApiAnnotation.AnnComma',
-- 'ApiAnnotation.AnnVbar'
-- 'ApiAnnotation.AnnClose' @':]'@
-- For details on above see note [Api annotations] in ApiAnnotation
| ExplicitPArr
(PostTc id Type) -- type of elements of the parallel array
[LHsExpr id]
-- | Record construction
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'{'@,
-- 'ApiAnnotation.AnnDotdot','ApiAnnotation.AnnClose' @'}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| RecordCon (Located id) -- The constructor. After type checking
-- it's the dataConWrapId of the constructor
PostTcExpr -- Data con Id applied to type args
(HsRecordBinds id)
-- | Record update
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'{'@,
-- 'ApiAnnotation.AnnDotdot','ApiAnnotation.AnnClose' @'}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| RecordUpd (LHsExpr id)
(HsRecordBinds id)
-- (HsMatchGroup Id) -- Filled in by the type checker to be
-- -- a match that does the job
[DataCon] -- Filled in by the type checker to the
-- _non-empty_ list of DataCons that have
-- all the upd'd fields
[PostTc id Type] -- Argument types of *input* record type
[PostTc id Type] -- and *output* record type
-- For a type family, the arg types are of the *instance* tycon,
-- not the family tycon
-- | Expression with an explicit type signature. @e :: type@
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnDcolon'
-- For details on above see note [Api annotations] in ApiAnnotation
| ExprWithTySig
(LHsExpr id)
(LHsType id)
(PostRn id [Name]) -- After renaming, the list of Names
-- contains the named and unnamed
-- wildcards brought in scope by the
-- signature
| ExprWithTySigOut -- TRANSLATION
(LHsExpr id)
(LHsType Name) -- Retain the signature for
-- round-tripping purposes
-- | Arithmetic sequence
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'['@,
-- 'ApiAnnotation.AnnComma','ApiAnnotation.AnnDotdot',
-- 'ApiAnnotation.AnnClose' @']'@
-- For details on above see note [Api annotations] in ApiAnnotation
| ArithSeq
PostTcExpr
(Maybe (SyntaxExpr id)) -- For OverloadedLists, the fromList witness
(ArithSeqInfo id)
-- | Arithmetic sequence for parallel array
--
-- > [:e1..e2:] or [:e1, e2..e3:]
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'[:'@,
-- 'ApiAnnotation.AnnComma','ApiAnnotation.AnnDotdot',
-- 'ApiAnnotation.AnnVbar',
-- 'ApiAnnotation.AnnClose' @':]'@
-- For details on above see note [Api annotations] in ApiAnnotation
| PArrSeq
PostTcExpr
(ArithSeqInfo id)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'{-\# SCC'@,
-- 'ApiAnnotation.AnnVal' or 'ApiAnnotation.AnnValStr',
-- 'ApiAnnotation.AnnClose' @'\#-}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsSCC SourceText -- Note [Pragma source text] in BasicTypes
FastString -- "set cost centre" SCC pragma
(LHsExpr id) -- expr whose cost is to be measured
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'{-\# CORE'@,
-- 'ApiAnnotation.AnnVal', 'ApiAnnotation.AnnClose' @'\#-}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCoreAnn SourceText -- Note [Pragma source text] in BasicTypes
FastString -- hdaume: core annotation
(LHsExpr id)
-----------------------------------------------------------
-- MetaHaskell Extensions
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnClose',
-- 'ApiAnnotation.AnnClose'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsBracket (HsBracket id)
-- See Note [Pending Splices]
| HsRnBracketOut
(HsBracket Name) -- Output of the renamer is the *original* renamed
-- expression, plus
[PendingRnSplice] -- _renamed_ splices to be type checked
| HsTcBracketOut
(HsBracket Name) -- Output of the type checker is the *original*
-- renamed expression, plus
[PendingTcSplice] -- _typechecked_ splices to be
-- pasted back in by the desugarer
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnClose'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsSpliceE (HsSplice id)
-----------------------------------------------------------
-- Arrow notation extension
-- | @proc@ notation for Arrows
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnProc',
-- 'ApiAnnotation.AnnRarrow'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsProc (LPat id) -- arrow abstraction, proc
(LHsCmdTop id) -- body of the abstraction
-- always has an empty stack
---------------------------------------
-- static pointers extension
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnStatic',
-- For details on above see note [Api annotations] in ApiAnnotation
| HsStatic (LHsExpr id)
---------------------------------------
-- The following are commands, not expressions proper
-- They are only used in the parsing stage and are removed
-- immediately in parser.RdrHsSyn.checkCommand
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.Annlarrowtail',
-- 'ApiAnnotation.Annrarrowtail','ApiAnnotation.AnnLarrowtail',
-- 'ApiAnnotation.AnnRarrowtail'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsArrApp -- Arrow tail, or arrow application (f -< arg)
(LHsExpr id) -- arrow expression, f
(LHsExpr id) -- input expression, arg
(PostTc id Type) -- type of the arrow expressions f,
-- of the form a t t', where arg :: t
HsArrAppType -- higher-order (-<<) or first-order (-<)
Bool -- True => right-to-left (f -< arg)
-- False => left-to-right (arg >- f)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'(|'@,
-- 'ApiAnnotation.AnnClose' @'|)'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsArrForm -- Command formation, (| e cmd1 .. cmdn |)
(LHsExpr id) -- the operator
-- after type-checking, a type abstraction to be
-- applied to the type of the local environment tuple
(Maybe Fixity) -- fixity (filled in by the renamer), for forms that
-- were converted from OpApp's by the renamer
[LHsCmdTop id] -- argument commands
---------------------------------------
-- Haskell program coverage (Hpc) Support
| HsTick
(Tickish id)
(LHsExpr id) -- sub-expression
| HsBinTick
Int -- module-local tick number for True
Int -- module-local tick number for False
(LHsExpr id) -- sub-expression
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnOpen' @'{-\# GENERATED'@,
-- 'ApiAnnotation.AnnVal','ApiAnnotation.AnnVal',
-- 'ApiAnnotation.AnnColon','ApiAnnotation.AnnVal',
-- 'ApiAnnotation.AnnMinus',
-- 'ApiAnnotation.AnnVal','ApiAnnotation.AnnColon',
-- 'ApiAnnotation.AnnVal',
-- 'ApiAnnotation.AnnClose' @'\#-}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsTickPragma -- A pragma introduced tick
SourceText -- Note [Pragma source text] in BasicTypes
(FastString,(Int,Int),(Int,Int)) -- external span for this tick
(LHsExpr id)
---------------------------------------
-- These constructors only appear temporarily in the parser.
-- The renamer translates them into the Right Thing.
| EWildPat -- wildcard
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnAt'
-- For details on above see note [Api annotations] in ApiAnnotation
| EAsPat (Located id) -- as pattern
(LHsExpr id)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnRarrow'
-- For details on above see note [Api annotations] in ApiAnnotation
| EViewPat (LHsExpr id) -- view pattern
(LHsExpr id)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnTilde'
-- For details on above see note [Api annotations] in ApiAnnotation
| ELazyPat (LHsExpr id) -- ~ pattern
| HsType (LHsType id) -- Explicit type argument; e.g f {| Int |} x y
---------------------------------------
-- Finally, HsWrap appears only in typechecker output
| HsWrap HsWrapper -- TRANSLATION
(HsExpr id)
| HsUnboundVar RdrName
| HsOverLabel FastString -- ^ Overloaded label (See Note [Overloaded labels]
deriving (Typeable)
deriving instance (DataId id) => Data (HsExpr id)
-- | HsTupArg is used for tuple sections
-- (,a,) is represented by ExplicitTuple [Missing ty1, Present a, Missing ty3]
-- Which in turn stands for (\x:ty1 \y:ty2. (x,a,y))
type LHsTupArg id = Located (HsTupArg id)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnComma'
-- For details on above see note [Api annotations] in ApiAnnotation
data HsTupArg id
= Present (LHsExpr id) -- ^ The argument
| Missing (PostTc id Type) -- ^ The argument is missing, but this is its type
deriving (Typeable)
deriving instance (DataId id) => Data (HsTupArg id)
tupArgPresent :: LHsTupArg id -> Bool
tupArgPresent (L _ (Present {})) = True
tupArgPresent (L _ (Missing {})) = False
{-
Note [Parens in HsSyn]
~~~~~~~~~~~~~~~~~~~~~~
HsPar (and ParPat in patterns, HsParTy in types) is used as follows
* Generally HsPar is optional; the pretty printer adds parens where
necessary. Eg (HsApp f (HsApp g x)) is fine, and prints 'f (g x)'
* HsPars are pretty printed as '( .. )' regardless of whether
or not they are strictly necssary
* HsPars are respected when rearranging operator fixities.
So a * (b + c) means what it says (where the parens are an HsPar)
Note [Sections in HsSyn]
~~~~~~~~~~~~~~~~~~~~~~~~
Sections should always appear wrapped in an HsPar, thus
HsPar (SectionR ...)
The parser parses sections in a wider variety of situations
(See Note [Parsing sections]), but the renamer checks for those
parens. This invariant makes pretty-printing easier; we don't need
a special case for adding the parens round sections.
Note [Rebindable if]
~~~~~~~~~~~~~~~~~~~~
The rebindable syntax for 'if' is a bit special, because when
rebindable syntax is *off* we do not want to treat
(if c then t else e)
as if it was an application (ifThenElse c t e). Why not?
Because we allow an 'if' to return *unboxed* results, thus
if blah then 3# else 4#
whereas that would not be possible using a all to a polymorphic function
(because you can't call a polymorphic function at an unboxed type).
So we use Nothing to mean "use the old built-in typing rule".
-}
instance OutputableBndr id => Outputable (HsExpr id) where
ppr expr = pprExpr expr
-----------------------
-- pprExpr, pprLExpr, pprBinds call pprDeeper;
-- the underscore versions do not
pprLExpr :: OutputableBndr id => LHsExpr id -> SDoc
pprLExpr (L _ e) = pprExpr e
pprExpr :: OutputableBndr id => HsExpr id -> SDoc
pprExpr e | isAtomicHsExpr e || isQuietHsExpr e = ppr_expr e
| otherwise = pprDeeper (ppr_expr e)
isQuietHsExpr :: HsExpr id -> Bool
-- Parentheses do display something, but it gives little info and
-- if we go deeper when we go inside them then we get ugly things
-- like (...)
isQuietHsExpr (HsPar _) = True
-- applications don't display anything themselves
isQuietHsExpr (HsApp _ _) = True
isQuietHsExpr (OpApp _ _ _ _) = True
isQuietHsExpr _ = False
pprBinds :: (OutputableBndr idL, OutputableBndr idR)
=> HsLocalBindsLR idL idR -> SDoc
pprBinds b = pprDeeper (ppr b)
-----------------------
ppr_lexpr :: OutputableBndr id => LHsExpr id -> SDoc
ppr_lexpr e = ppr_expr (unLoc e)
ppr_expr :: forall id. OutputableBndr id => HsExpr id -> SDoc
ppr_expr (HsVar v) = pprPrefixOcc v
ppr_expr (HsIPVar v) = ppr v
ppr_expr (HsOverLabel l) = char '#' <> ppr l
ppr_expr (HsLit lit) = ppr lit
ppr_expr (HsOverLit lit) = ppr lit
ppr_expr (HsPar e) = parens (ppr_lexpr e)
ppr_expr (HsCoreAnn _ s e)
= vcat [ptext (sLit "HsCoreAnn") <+> ftext s, ppr_lexpr e]
ppr_expr (HsApp e1 e2)
= let (fun, args) = collect_args e1 [e2] in
hang (ppr_lexpr fun) 2 (sep (map pprParendExpr args))
where
collect_args (L _ (HsApp fun arg)) args = collect_args fun (arg:args)
collect_args fun args = (fun, args)
ppr_expr (OpApp e1 op _ e2)
= case unLoc op of
HsVar v -> pp_infixly v
_ -> pp_prefixly
where
pp_e1 = pprDebugParendExpr e1 -- In debug mode, add parens
pp_e2 = pprDebugParendExpr e2 -- to make precedence clear
pp_prefixly
= hang (ppr op) 2 (sep [pp_e1, pp_e2])
pp_infixly v
= sep [pp_e1, sep [pprInfixOcc v, nest 2 pp_e2]]
ppr_expr (NegApp e _) = char '-' <+> pprDebugParendExpr e
ppr_expr (SectionL expr op)
= case unLoc op of
HsVar v -> pp_infixly v
_ -> pp_prefixly
where
pp_expr = pprDebugParendExpr expr
pp_prefixly = hang (hsep [text " \\ x_ ->", ppr op])
4 (hsep [pp_expr, ptext (sLit "x_ )")])
pp_infixly v = (sep [pp_expr, pprInfixOcc v])
ppr_expr (SectionR op expr)
= case unLoc op of
HsVar v -> pp_infixly v
_ -> pp_prefixly
where
pp_expr = pprDebugParendExpr expr
pp_prefixly = hang (hsep [text "( \\ x_ ->", ppr op, ptext (sLit "x_")])
4 (pp_expr <> rparen)
pp_infixly v = sep [pprInfixOcc v, pp_expr]
ppr_expr (ExplicitTuple exprs boxity)
= tupleParens (boxityNormalTupleSort boxity)
(fcat (ppr_tup_args $ map unLoc exprs))
where
ppr_tup_args [] = []
ppr_tup_args (Present e : es) = (ppr_lexpr e <> punc es) : ppr_tup_args es
ppr_tup_args (Missing _ : es) = punc es : ppr_tup_args es
punc (Present {} : _) = comma <> space
punc (Missing {} : _) = comma
punc [] = empty
--avoid using PatternSignatures for stage1 code portability
ppr_expr (HsLam matches)
= pprMatches (LambdaExpr :: HsMatchContext id) matches
ppr_expr (HsLamCase _ matches)
= sep [ sep [ptext (sLit "\\case {")],
nest 2 (pprMatches (CaseAlt :: HsMatchContext id) matches <+> char '}') ]
ppr_expr (HsCase expr matches)
= sep [ sep [ptext (sLit "case"), nest 4 (ppr expr), ptext (sLit "of {")],
nest 2 (pprMatches (CaseAlt :: HsMatchContext id) matches <+> char '}') ]
ppr_expr (HsIf _ e1 e2 e3)
= sep [hsep [ptext (sLit "if"), nest 2 (ppr e1), ptext (sLit "then")],
nest 4 (ppr e2),
ptext (sLit "else"),
nest 4 (ppr e3)]
ppr_expr (HsMultiIf _ alts)
= sep $ ptext (sLit "if") : map ppr_alt alts
where ppr_alt (L _ (GRHS guards expr)) =
sep [ char '|' <+> interpp'SP guards
, ptext (sLit "->") <+> pprDeeper (ppr expr) ]
-- special case: let ... in let ...
ppr_expr (HsLet binds expr@(L _ (HsLet _ _)))
= sep [hang (ptext (sLit "let")) 2 (hsep [pprBinds binds, ptext (sLit "in")]),
ppr_lexpr expr]
ppr_expr (HsLet binds expr)
= sep [hang (ptext (sLit "let")) 2 (pprBinds binds),
hang (ptext (sLit "in")) 2 (ppr expr)]
ppr_expr (HsDo do_or_list_comp stmts _) = pprDo do_or_list_comp stmts
ppr_expr (ExplicitList _ _ exprs)
= brackets (pprDeeperList fsep (punctuate comma (map ppr_lexpr exprs)))
ppr_expr (ExplicitPArr _ exprs)
= paBrackets (pprDeeperList fsep (punctuate comma (map ppr_lexpr exprs)))
ppr_expr (RecordCon con_id _ rbinds)
= hang (ppr con_id) 2 (ppr rbinds)
ppr_expr (RecordUpd aexp rbinds _ _ _)
= hang (pprParendExpr aexp) 2 (ppr rbinds)
ppr_expr (ExprWithTySig expr sig _)
= hang (nest 2 (ppr_lexpr expr) <+> dcolon)
4 (ppr sig)
ppr_expr (ExprWithTySigOut expr sig)
= hang (nest 2 (ppr_lexpr expr) <+> dcolon)
4 (ppr sig)
ppr_expr (ArithSeq _ _ info) = brackets (ppr info)
ppr_expr (PArrSeq _ info) = paBrackets (ppr info)
ppr_expr EWildPat = char '_'
ppr_expr (ELazyPat e) = char '~' <> pprParendExpr e
ppr_expr (EAsPat v e) = ppr v <> char '@' <> pprParendExpr e
ppr_expr (EViewPat p e) = ppr p <+> ptext (sLit "->") <+> ppr e
ppr_expr (HsSCC _ lbl expr)
= sep [ ptext (sLit "{-# SCC") <+> doubleQuotes (ftext lbl) <+> ptext (sLit "#-}"),
pprParendExpr expr ]
ppr_expr (HsWrap co_fn e) = pprHsWrapper (pprExpr e) co_fn
ppr_expr (HsType id) = ppr id
ppr_expr (HsSpliceE s) = pprSplice s
ppr_expr (HsBracket b) = pprHsBracket b
ppr_expr (HsRnBracketOut e []) = ppr e
ppr_expr (HsRnBracketOut e ps) = ppr e $$ ptext (sLit "pending(rn)") <+> ppr ps
ppr_expr (HsTcBracketOut e []) = ppr e
ppr_expr (HsTcBracketOut e ps) = ppr e $$ ptext (sLit "pending(tc)") <+> ppr ps
ppr_expr (HsProc pat (L _ (HsCmdTop cmd _ _ _)))
= hsep [ptext (sLit "proc"), ppr pat, ptext (sLit "->"), ppr cmd]
ppr_expr (HsStatic e)
= hsep [ptext (sLit "static"), pprParendExpr e]
ppr_expr (HsTick tickish exp)
= pprTicks (ppr exp) $
ppr tickish <+> ppr_lexpr exp
ppr_expr (HsBinTick tickIdTrue tickIdFalse exp)
= pprTicks (ppr exp) $
hcat [ptext (sLit "bintick<"),
ppr tickIdTrue,
ptext (sLit ","),
ppr tickIdFalse,
ptext (sLit ">("),
ppr exp,ptext (sLit ")")]
ppr_expr (HsTickPragma _ externalSrcLoc exp)
= pprTicks (ppr exp) $
hcat [ptext (sLit "tickpragma<"),
ppr externalSrcLoc,
ptext (sLit ">("),
ppr exp,
ptext (sLit ")")]
ppr_expr (HsArrApp arrow arg _ HsFirstOrderApp True)
= hsep [ppr_lexpr arrow, larrowt, ppr_lexpr arg]
ppr_expr (HsArrApp arrow arg _ HsFirstOrderApp False)
= hsep [ppr_lexpr arg, arrowt, ppr_lexpr arrow]
ppr_expr (HsArrApp arrow arg _ HsHigherOrderApp True)
= hsep [ppr_lexpr arrow, larrowtt, ppr_lexpr arg]
ppr_expr (HsArrApp arrow arg _ HsHigherOrderApp False)
= hsep [ppr_lexpr arg, arrowtt, ppr_lexpr arrow]
ppr_expr (HsArrForm (L _ (HsVar v)) (Just _) [arg1, arg2])
= sep [pprCmdArg (unLoc arg1), hsep [pprInfixOcc v, pprCmdArg (unLoc arg2)]]
ppr_expr (HsArrForm op _ args)
= hang (ptext (sLit "(|") <+> ppr_lexpr op)
4 (sep (map (pprCmdArg.unLoc) args) <+> ptext (sLit "|)"))
ppr_expr (HsUnboundVar nm)
= ppr nm
{-
HsSyn records exactly where the user put parens, with HsPar.
So generally speaking we print without adding any parens.
However, some code is internally generated, and in some places
parens are absolutely required; so for these places we use
pprParendExpr (but don't print double parens of course).
For operator applications we don't add parens, because the oprerator
fixities should do the job, except in debug mode (-dppr-debug) so we
can see the structure of the parse tree.
-}
pprDebugParendExpr :: OutputableBndr id => LHsExpr id -> SDoc
pprDebugParendExpr expr
= getPprStyle (\sty ->
if debugStyle sty then pprParendExpr expr
else pprLExpr expr)
pprParendExpr :: OutputableBndr id => LHsExpr id -> SDoc
pprParendExpr expr
| hsExprNeedsParens (unLoc expr) = parens (pprLExpr expr)
| otherwise = pprLExpr expr
-- Using pprLExpr makes sure that we go 'deeper'
-- I think that is usually (always?) right
hsExprNeedsParens :: HsExpr id -> Bool
-- True of expressions for which '(e)' and 'e'
-- mean the same thing
hsExprNeedsParens (ArithSeq {}) = False
hsExprNeedsParens (PArrSeq {}) = False
hsExprNeedsParens (HsLit {}) = False
hsExprNeedsParens (HsOverLit {}) = False
hsExprNeedsParens (HsVar {}) = False
hsExprNeedsParens (HsUnboundVar {}) = False
hsExprNeedsParens (HsIPVar {}) = False
hsExprNeedsParens (HsOverLabel {}) = False
hsExprNeedsParens (ExplicitTuple {}) = False
hsExprNeedsParens (ExplicitList {}) = False
hsExprNeedsParens (ExplicitPArr {}) = False
hsExprNeedsParens (HsPar {}) = False
hsExprNeedsParens (HsBracket {}) = False
hsExprNeedsParens (HsRnBracketOut {}) = False
hsExprNeedsParens (HsTcBracketOut {}) = False
hsExprNeedsParens (HsDo sc _ _)
| isListCompExpr sc = False
hsExprNeedsParens _ = True
isAtomicHsExpr :: HsExpr id -> Bool
-- True of a single token
isAtomicHsExpr (HsVar {}) = True
isAtomicHsExpr (HsLit {}) = True
isAtomicHsExpr (HsOverLit {}) = True
isAtomicHsExpr (HsIPVar {}) = True
isAtomicHsExpr (HsOverLabel {}) = True
isAtomicHsExpr (HsUnboundVar {}) = True
isAtomicHsExpr (HsWrap _ e) = isAtomicHsExpr e
isAtomicHsExpr (HsPar e) = isAtomicHsExpr (unLoc e)
isAtomicHsExpr _ = False
{-
************************************************************************
* *
\subsection{Commands (in arrow abstractions)}
* *
************************************************************************
We re-use HsExpr to represent these.
-}
type LHsCmd id = Located (HsCmd id)
data HsCmd id
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.Annlarrowtail',
-- 'ApiAnnotation.Annrarrowtail','ApiAnnotation.AnnLarrowtail',
-- 'ApiAnnotation.AnnRarrowtail'
-- For details on above see note [Api annotations] in ApiAnnotation
= HsCmdArrApp -- Arrow tail, or arrow application (f -< arg)
(LHsExpr id) -- arrow expression, f
(LHsExpr id) -- input expression, arg
(PostTc id Type) -- type of the arrow expressions f,
-- of the form a t t', where arg :: t
HsArrAppType -- higher-order (-<<) or first-order (-<)
Bool -- True => right-to-left (f -< arg)
-- False => left-to-right (arg >- f)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'(|'@,
-- 'ApiAnnotation.AnnClose' @'|)'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdArrForm -- Command formation, (| e cmd1 .. cmdn |)
(LHsExpr id) -- the operator
-- after type-checking, a type abstraction to be
-- applied to the type of the local environment tuple
(Maybe Fixity) -- fixity (filled in by the renamer), for forms that
-- were converted from OpApp's by the renamer
[LHsCmdTop id] -- argument commands
| HsCmdApp (LHsCmd id)
(LHsExpr id)
| HsCmdLam (MatchGroup id (LHsCmd id)) -- kappa
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnLam',
-- 'ApiAnnotation.AnnRarrow',
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdPar (LHsCmd id) -- parenthesised command
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen' @'('@,
-- 'ApiAnnotation.AnnClose' @')'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdCase (LHsExpr id)
(MatchGroup id (LHsCmd id)) -- bodies are HsCmd's
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnCase',
-- 'ApiAnnotation.AnnOf','ApiAnnotation.AnnOpen' @'{'@,
-- 'ApiAnnotation.AnnClose' @'}'@
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdIf (Maybe (SyntaxExpr id)) -- cond function
(LHsExpr id) -- predicate
(LHsCmd id) -- then part
(LHsCmd id) -- else part
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnIf',
-- 'ApiAnnotation.AnnSemi',
-- 'ApiAnnotation.AnnThen','ApiAnnotation.AnnSemi',
-- 'ApiAnnotation.AnnElse',
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdLet (HsLocalBinds id) -- let(rec)
(LHsCmd id)
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnLet',
-- 'ApiAnnotation.AnnOpen' @'{'@,
-- 'ApiAnnotation.AnnClose' @'}'@,'ApiAnnotation.AnnIn'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdDo [CmdLStmt id]
(PostTc id Type) -- Type of the whole expression
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnDo',
-- 'ApiAnnotation.AnnOpen', 'ApiAnnotation.AnnSemi',
-- 'ApiAnnotation.AnnVbar',
-- 'ApiAnnotation.AnnClose'
-- For details on above see note [Api annotations] in ApiAnnotation
| HsCmdCast TcCoercion -- A simpler version of HsWrap in HsExpr
(HsCmd id) -- If cmd :: arg1 --> res
-- co :: arg1 ~ arg2
-- Then (HsCmdCast co cmd) :: arg2 --> res
deriving (Typeable)
deriving instance (DataId id) => Data (HsCmd id)
data HsArrAppType = HsHigherOrderApp | HsFirstOrderApp
deriving (Data, Typeable)
{- | Top-level command, introducing a new arrow.
This may occur inside a proc (where the stack is empty) or as an
argument of a command-forming operator.
-}
type LHsCmdTop id = Located (HsCmdTop id)
data HsCmdTop id
= HsCmdTop (LHsCmd id)
(PostTc id Type) -- Nested tuple of inputs on the command's stack
(PostTc id Type) -- return type of the command
(CmdSyntaxTable id) -- See Note [CmdSyntaxTable]
deriving (Typeable)
deriving instance (DataId id) => Data (HsCmdTop id)
instance OutputableBndr id => Outputable (HsCmd id) where
ppr cmd = pprCmd cmd
-----------------------
-- pprCmd and pprLCmd call pprDeeper;
-- the underscore versions do not
pprLCmd :: OutputableBndr id => LHsCmd id -> SDoc
pprLCmd (L _ c) = pprCmd c
pprCmd :: OutputableBndr id => HsCmd id -> SDoc
pprCmd c | isQuietHsCmd c = ppr_cmd c
| otherwise = pprDeeper (ppr_cmd c)
isQuietHsCmd :: HsCmd id -> Bool
-- Parentheses do display something, but it gives little info and
-- if we go deeper when we go inside them then we get ugly things
-- like (...)
isQuietHsCmd (HsCmdPar _) = True
-- applications don't display anything themselves
isQuietHsCmd (HsCmdApp _ _) = True
isQuietHsCmd _ = False
-----------------------
ppr_lcmd :: OutputableBndr id => LHsCmd id -> SDoc
ppr_lcmd c = ppr_cmd (unLoc c)
ppr_cmd :: forall id. OutputableBndr id => HsCmd id -> SDoc
ppr_cmd (HsCmdPar c) = parens (ppr_lcmd c)
ppr_cmd (HsCmdApp c e)
= let (fun, args) = collect_args c [e] in
hang (ppr_lcmd fun) 2 (sep (map pprParendExpr args))
where
collect_args (L _ (HsCmdApp fun arg)) args = collect_args fun (arg:args)
collect_args fun args = (fun, args)
--avoid using PatternSignatures for stage1 code portability
ppr_cmd (HsCmdLam matches)
= pprMatches (LambdaExpr :: HsMatchContext id) matches
ppr_cmd (HsCmdCase expr matches)
= sep [ sep [ptext (sLit "case"), nest 4 (ppr expr), ptext (sLit "of {")],
nest 2 (pprMatches (CaseAlt :: HsMatchContext id) matches <+> char '}') ]
ppr_cmd (HsCmdIf _ e ct ce)
= sep [hsep [ptext (sLit "if"), nest 2 (ppr e), ptext (sLit "then")],
nest 4 (ppr ct),
ptext (sLit "else"),
nest 4 (ppr ce)]
-- special case: let ... in let ...
ppr_cmd (HsCmdLet binds cmd@(L _ (HsCmdLet _ _)))
= sep [hang (ptext (sLit "let")) 2 (hsep [pprBinds binds, ptext (sLit "in")]),
ppr_lcmd cmd]
ppr_cmd (HsCmdLet binds cmd)
= sep [hang (ptext (sLit "let")) 2 (pprBinds binds),
hang (ptext (sLit "in")) 2 (ppr cmd)]
ppr_cmd (HsCmdDo stmts _) = pprDo ArrowExpr stmts
ppr_cmd (HsCmdCast co cmd) = sep [ ppr_cmd cmd
, ptext (sLit "|>") <+> ppr co ]
ppr_cmd (HsCmdArrApp arrow arg _ HsFirstOrderApp True)
= hsep [ppr_lexpr arrow, larrowt, ppr_lexpr arg]
ppr_cmd (HsCmdArrApp arrow arg _ HsFirstOrderApp False)
= hsep [ppr_lexpr arg, arrowt, ppr_lexpr arrow]
ppr_cmd (HsCmdArrApp arrow arg _ HsHigherOrderApp True)
= hsep [ppr_lexpr arrow, larrowtt, ppr_lexpr arg]
ppr_cmd (HsCmdArrApp arrow arg _ HsHigherOrderApp False)
= hsep [ppr_lexpr arg, arrowtt, ppr_lexpr arrow]
ppr_cmd (HsCmdArrForm (L _ (HsVar v)) (Just _) [arg1, arg2])
= sep [pprCmdArg (unLoc arg1), hsep [pprInfixOcc v, pprCmdArg (unLoc arg2)]]
ppr_cmd (HsCmdArrForm op _ args)
= hang (ptext (sLit "(|") <> ppr_lexpr op)
4 (sep (map (pprCmdArg.unLoc) args) <> ptext (sLit "|)"))
pprCmdArg :: OutputableBndr id => HsCmdTop id -> SDoc
pprCmdArg (HsCmdTop cmd@(L _ (HsCmdArrForm _ Nothing [])) _ _ _)
= ppr_lcmd cmd
pprCmdArg (HsCmdTop cmd _ _ _)
= parens (ppr_lcmd cmd)
instance OutputableBndr id => Outputable (HsCmdTop id) where
ppr = pprCmdArg
{-
************************************************************************
* *
\subsection{Record binds}
* *
************************************************************************
-}
type HsRecordBinds id = HsRecFields id (LHsExpr id)
{-
************************************************************************
* *
\subsection{@Match@, @GRHSs@, and @GRHS@ datatypes}
* *
************************************************************************
@Match@es are sets of pattern bindings and right hand sides for
functions, patterns or case branches. For example, if a function @g@
is defined as:
\begin{verbatim}
g (x,y) = y
g ((x:ys),y) = y+1,
\end{verbatim}
then \tr{g} has two @Match@es: @(x,y) = y@ and @((x:ys),y) = y+1@.
It is always the case that each element of an @[Match]@ list has the
same number of @pats@s inside it. This corresponds to saying that
a function defined by pattern matching must have the same number of
patterns in each equation.
-}
data MatchGroup id body
= MG { mg_alts :: [LMatch id body] -- The alternatives
, mg_arg_tys :: [PostTc id Type] -- Types of the arguments, t1..tn
, mg_res_ty :: PostTc id Type -- Type of the result, tr
, mg_origin :: Origin }
-- The type is the type of the entire group
-- t1 -> ... -> tn -> tr
-- where there are n patterns
deriving (Typeable)
deriving instance (Data body,DataId id) => Data (MatchGroup id body)
type LMatch id body = Located (Match id body)
-- ^ May have 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnSemi' when in a
-- list
-- For details on above see note [Api annotations] in ApiAnnotation
data Match id body
= Match {
m_fun_id_infix :: (Maybe (Located id,Bool)),
-- fun_id and fun_infix for functions with multiple equations
-- only present for a RdrName. See note [fun_id in Match]
m_pats :: [LPat id], -- The patterns
m_type :: (Maybe (LHsType id)),
-- A type signature for the result of the match
-- Nothing after typechecking
m_grhss :: (GRHSs id body)
} deriving (Typeable)
deriving instance (Data body,DataId id) => Data (Match id body)
{-
Note [fun_id in Match]
~~~~~~~~~~~~~~~~~~~~~~
The parser initially creates a FunBind with a single Match in it for
every function definition it sees.
These are then grouped together by getMonoBind into a single FunBind,
where all the Matches are combined.
In the process, all the original FunBind fun_id's bar one are
discarded, including the locations.
This causes a problem for source to source conversions via API
Annotations, so the original fun_ids and infix flags are preserved in
the Match, when it originates from a FunBind.
Example infix function definition requiring individual API Annotations
(&&& ) [] [] = []
xs &&& [] = xs
( &&& ) [] ys = ys
-}
isEmptyMatchGroup :: MatchGroup id body -> Bool
isEmptyMatchGroup (MG { mg_alts = ms }) = null ms
matchGroupArity :: MatchGroup id body -> Arity
-- Precondition: MatchGroup is non-empty
-- This is called before type checking, when mg_arg_tys is not set
matchGroupArity (MG { mg_alts = alts })
| (alt1:_) <- alts = length (hsLMatchPats alt1)
| otherwise = panic "matchGroupArity"
hsLMatchPats :: LMatch id body -> [LPat id]
hsLMatchPats (L _ (Match _ pats _ _)) = pats
-- | GRHSs are used both for pattern bindings and for Matches
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnVbar',
-- 'ApiAnnotation.AnnEqual','ApiAnnotation.AnnWhere',
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnClose'
-- 'ApiAnnotation.AnnRarrow','ApiAnnotation.AnnSemi'
-- For details on above see note [Api annotations] in ApiAnnotation
data GRHSs id body
= GRHSs {
grhssGRHSs :: [LGRHS id body], -- ^ Guarded RHSs
grhssLocalBinds :: (HsLocalBinds id) -- ^ The where clause
} deriving (Typeable)
deriving instance (Data body,DataId id) => Data (GRHSs id body)
type LGRHS id body = Located (GRHS id body)
-- | Guarded Right Hand Side.
data GRHS id body = GRHS [GuardLStmt id] -- Guards
body -- Right hand side
deriving (Typeable)
deriving instance (Data body,DataId id) => Data (GRHS id body)
-- We know the list must have at least one @Match@ in it.
pprMatches :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> HsMatchContext idL -> MatchGroup idR body -> SDoc
pprMatches ctxt (MG { mg_alts = matches })
= vcat (map (pprMatch ctxt) (map unLoc matches))
-- Don't print the type; it's only a place-holder before typechecking
-- Exported to HsBinds, which can't see the defn of HsMatchContext
pprFunBind :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> idL -> Bool -> MatchGroup idR body -> SDoc
pprFunBind fun inf matches = pprMatches (FunRhs fun inf) matches
-- Exported to HsBinds, which can't see the defn of HsMatchContext
pprPatBind :: forall bndr id body. (OutputableBndr bndr, OutputableBndr id, Outputable body)
=> LPat bndr -> GRHSs id body -> SDoc
pprPatBind pat (grhss)
= sep [ppr pat, nest 2 (pprGRHSs (PatBindRhs :: HsMatchContext id) grhss)]
pprMatch :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> HsMatchContext idL -> Match idR body -> SDoc
pprMatch ctxt (Match _ pats maybe_ty grhss)
= sep [ sep (herald : map (nest 2 . pprParendLPat) other_pats)
, nest 2 ppr_maybe_ty
, nest 2 (pprGRHSs ctxt grhss) ]
where
(herald, other_pats)
= case ctxt of
FunRhs fun is_infix
| not is_infix -> (pprPrefixOcc fun, pats)
-- f x y z = e
-- Not pprBndr; the AbsBinds will
-- have printed the signature
| null pats2 -> (pp_infix, [])
-- x &&& y = e
| otherwise -> (parens pp_infix, pats2)
-- (x &&& y) z = e
where
pp_infix = pprParendLPat pat1 <+> pprInfixOcc fun <+> pprParendLPat pat2
LambdaExpr -> (char '\\', pats)
_ -> ASSERT( null pats1 )
(ppr pat1, []) -- No parens around the single pat
(pat1:pats1) = pats
(pat2:pats2) = pats1
ppr_maybe_ty = case maybe_ty of
Just ty -> dcolon <+> ppr ty
Nothing -> empty
pprGRHSs :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> HsMatchContext idL -> GRHSs idR body -> SDoc
pprGRHSs ctxt (GRHSs grhss binds)
= vcat (map (pprGRHS ctxt . unLoc) grhss)
$$ ppUnless (isEmptyLocalBinds binds)
(text "where" $$ nest 4 (pprBinds binds))
pprGRHS :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> HsMatchContext idL -> GRHS idR body -> SDoc
pprGRHS ctxt (GRHS [] body)
= pp_rhs ctxt body
pprGRHS ctxt (GRHS guards body)
= sep [char '|' <+> interpp'SP guards, pp_rhs ctxt body]
pp_rhs :: Outputable body => HsMatchContext idL -> body -> SDoc
pp_rhs ctxt rhs = matchSeparator ctxt <+> pprDeeper (ppr rhs)
{-
************************************************************************
* *
\subsection{Do stmts and list comprehensions}
* *
************************************************************************
-}
type LStmt id body = Located (StmtLR id id body)
type LStmtLR idL idR body = Located (StmtLR idL idR body)
type Stmt id body = StmtLR id id body
type CmdLStmt id = LStmt id (LHsCmd id)
type CmdStmt id = Stmt id (LHsCmd id)
type ExprLStmt id = LStmt id (LHsExpr id)
type ExprStmt id = Stmt id (LHsExpr id)
type GuardLStmt id = LStmt id (LHsExpr id)
type GuardStmt id = Stmt id (LHsExpr id)
type GhciLStmt id = LStmt id (LHsExpr id)
type GhciStmt id = Stmt id (LHsExpr id)
-- The SyntaxExprs in here are used *only* for do-notation and monad
-- comprehensions, which have rebindable syntax. Otherwise they are unused.
-- | API Annotations when in qualifier lists or guards
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnVbar',
-- 'ApiAnnotation.AnnComma','ApiAnnotation.AnnThen',
-- 'ApiAnnotation.AnnBy','ApiAnnotation.AnnBy',
-- 'ApiAnnotation.AnnGroup','ApiAnnotation.AnnUsing'
-- For details on above see note [Api annotations] in ApiAnnotation
data StmtLR idL idR body -- body should always be (LHs**** idR)
= LastStmt -- Always the last Stmt in ListComp, MonadComp, PArrComp,
-- and (after the renamer) DoExpr, MDoExpr
-- Not used for GhciStmtCtxt, PatGuard, which scope over other stuff
body
Bool -- True <=> return was stripped by ApplicativeDo
(SyntaxExpr idR) -- The return operator, used only for
-- MonadComp For ListComp, PArrComp, we
-- use the baked-in 'return' For DoExpr,
-- MDoExpr, we don't apply a 'return' at
-- all See Note [Monad Comprehensions] |
-- - 'ApiAnnotation.AnnKeywordId' :
-- 'ApiAnnotation.AnnLarrow'
-- For details on above see note [Api annotations] in ApiAnnotation
| BindStmt (LPat idL)
body
(SyntaxExpr idR) -- The (>>=) operator; see Note [The type of bind]
(SyntaxExpr idR) -- The fail operator
-- The fail operator is noSyntaxExpr
-- if the pattern match can't fail
-- | 'ApplicativeStmt' represents an applicative expression built with
-- <$> and <*>. It is generated by the renamer, and is desugared into the
-- appropriate applicative expression by the desugarer, but it is intended
-- to be invisible in error messages.
--
-- For full details, see Note [ApplicativeDo] in RnExpr
--
| ApplicativeStmt
[ ( SyntaxExpr idR
, ApplicativeArg idL) ]
-- [(<$>, e1), (<*>, e2), ..., (<*>, en)]
(Maybe (SyntaxExpr idR)) -- 'join', if necessary
(PostTc idR Type) -- Type of the body
| BodyStmt body -- See Note [BodyStmt]
(SyntaxExpr idR) -- The (>>) operator
(SyntaxExpr idR) -- The `guard` operator; used only in MonadComp
-- See notes [Monad Comprehensions]
(PostTc idR Type) -- Element type of the RHS (used for arrows)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnLet'
-- 'ApiAnnotation.AnnOpen' @'{'@,'ApiAnnotation.AnnClose' @'}'@,
-- For details on above see note [Api annotations] in ApiAnnotation
| LetStmt (HsLocalBindsLR idL idR)
-- ParStmts only occur in a list/monad comprehension
| ParStmt [ParStmtBlock idL idR]
(SyntaxExpr idR) -- Polymorphic `mzip` for monad comprehensions
(SyntaxExpr idR) -- The `>>=` operator
-- See notes [Monad Comprehensions]
-- After renaming, the ids are the binders
-- bound by the stmts and used after themp
| TransStmt {
trS_form :: TransForm,
trS_stmts :: [ExprLStmt idL], -- Stmts to the *left* of the 'group'
-- which generates the tuples to be grouped
trS_bndrs :: [(idR, idR)], -- See Note [TransStmt binder map]
trS_using :: LHsExpr idR,
trS_by :: Maybe (LHsExpr idR), -- "by e" (optional)
-- Invariant: if trS_form = GroupBy, then grp_by = Just e
trS_ret :: SyntaxExpr idR, -- The monomorphic 'return' function for
-- the inner monad comprehensions
trS_bind :: SyntaxExpr idR, -- The '(>>=)' operator
trS_fmap :: SyntaxExpr idR -- The polymorphic 'fmap' function for desugaring
-- Only for 'group' forms
} -- See Note [Monad Comprehensions]
-- Recursive statement (see Note [How RecStmt works] below)
-- | - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnRec'
-- For details on above see note [Api annotations] in ApiAnnotation
| RecStmt
{ recS_stmts :: [LStmtLR idL idR body]
-- The next two fields are only valid after renaming
, recS_later_ids :: [idR] -- The ids are a subset of the variables bound by the
-- stmts that are used in stmts that follow the RecStmt
, recS_rec_ids :: [idR] -- Ditto, but these variables are the "recursive" ones,
-- that are used before they are bound in the stmts of
-- the RecStmt.
-- An Id can be in both groups
-- Both sets of Ids are (now) treated monomorphically
-- See Note [How RecStmt works] for why they are separate
-- Rebindable syntax
, recS_bind_fn :: SyntaxExpr idR -- The bind function
, recS_ret_fn :: SyntaxExpr idR -- The return function
, recS_mfix_fn :: SyntaxExpr idR -- The mfix function
-- These fields are only valid after typechecking
, recS_later_rets :: [PostTcExpr] -- (only used in the arrow version)
, recS_rec_rets :: [PostTcExpr] -- These expressions correspond 1-to-1
-- with recS_later_ids and recS_rec_ids,
-- and are the expressions that should be
-- returned by the recursion.
-- They may not quite be the Ids themselves,
-- because the Id may be *polymorphic*, but
-- the returned thing has to be *monomorphic*,
-- so they may be type applications
, recS_ret_ty :: PostTc idR Type -- The type of
-- do { stmts; return (a,b,c) }
-- With rebindable syntax the type might not
-- be quite as simple as (m (tya, tyb, tyc)).
}
deriving (Typeable)
deriving instance (Data body, DataId idL, DataId idR)
=> Data (StmtLR idL idR body)
data TransForm -- The 'f' below is the 'using' function, 'e' is the by function
= ThenForm -- then f or then f by e (depending on trS_by)
| GroupForm -- then group using f or then group by e using f (depending on trS_by)
deriving (Data, Typeable)
data ParStmtBlock idL idR
= ParStmtBlock
[ExprLStmt idL]
[idR] -- The variables to be returned
(SyntaxExpr idR) -- The return operator
deriving( Typeable )
deriving instance (DataId idL, DataId idR) => Data (ParStmtBlock idL idR)
-- | Applicative Argument
data ApplicativeArg idL
= ApplicativeArgOne -- A single statement (BindStmt or BodyStmt)
(LPat idL) -- WildPat if it was a BodyStmt (see below)
(LHsExpr idL)
Bool -- True <=> was a BodyStmt
-- False <=> was a BindStmt
-- See Note [Applicative BodyStmt]
| ApplicativeArgMany -- do { stmts; return vars }
[ExprLStmt idL] -- stmts
(HsExpr idL) -- return (v1,..,vn), or just (v1,..,vn)
(LPat idL) -- (v1,...,vn)
deriving( Typeable )
deriving instance (DataId idL) => Data (ApplicativeArg idL)
{-
Note [The type of bind in Stmts]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some Stmts, notably BindStmt, keep the (>>=) bind operator.
We do NOT assume that it has type
(>>=) :: m a -> (a -> m b) -> m b
In some cases (see Trac #303, #1537) it might have a more
exotic type, such as
(>>=) :: m i j a -> (a -> m j k b) -> m i k b
So we must be careful not to make assumptions about the type.
In particular, the monad may not be uniform throughout.
Note [TransStmt binder map]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The [(idR,idR)] in a TransStmt behaves as follows:
* Before renaming: []
* After renaming:
[ (x27,x27), ..., (z35,z35) ]
These are the variables
bound by the stmts to the left of the 'group'
and used either in the 'by' clause,
or in the stmts following the 'group'
Each item is a pair of identical variables.
* After typechecking:
[ (x27:Int, x27:[Int]), ..., (z35:Bool, z35:[Bool]) ]
Each pair has the same unique, but different *types*.
Note [BodyStmt]
~~~~~~~~~~~~~~~
BodyStmts are a bit tricky, because what they mean
depends on the context. Consider the following contexts:
A do expression of type (m res_ty)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* BodyStmt E any_ty: do { ....; E; ... }
E :: m any_ty
Translation: E >> ...
A list comprehensions of type [elt_ty]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* BodyStmt E Bool: [ .. | .... E ]
[ .. | ..., E, ... ]
[ .. | .... | ..., E | ... ]
E :: Bool
Translation: if E then fail else ...
A guard list, guarding a RHS of type rhs_ty
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* BodyStmt E BooParStmtBlockl: f x | ..., E, ... = ...rhs...
E :: Bool
Translation: if E then fail else ...
A monad comprehension of type (m res_ty)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* BodyStmt E Bool: [ .. | .... E ]
E :: Bool
Translation: guard E >> ...
Array comprehensions are handled like list comprehensions.
Note [How RecStmt works]
~~~~~~~~~~~~~~~~~~~~~~~~
Example:
HsDo [ BindStmt x ex
, RecStmt { recS_rec_ids = [a, c]
, recS_stmts = [ BindStmt b (return (a,c))
, LetStmt a = ...b...
, BindStmt c ec ]
, recS_later_ids = [a, b]
, return (a b) ]
Here, the RecStmt binds a,b,c; but
- Only a,b are used in the stmts *following* the RecStmt,
- Only a,c are used in the stmts *inside* the RecStmt
*before* their bindings
Why do we need *both* rec_ids and later_ids? For monads they could be
combined into a single set of variables, but not for arrows. That
follows from the types of the respective feedback operators:
mfix :: MonadFix m => (a -> m a) -> m a
loop :: ArrowLoop a => a (b,d) (c,d) -> a b c
* For mfix, the 'a' covers the union of the later_ids and the rec_ids
* For 'loop', 'c' is the later_ids and 'd' is the rec_ids
Note [Typing a RecStmt]
~~~~~~~~~~~~~~~~~~~~~~~
A (RecStmt stmts) types as if you had written
(v1,..,vn, _, ..., _) <- mfix (\~(_, ..., _, r1, ..., rm) ->
do { stmts
; return (v1,..vn, r1, ..., rm) })
where v1..vn are the later_ids
r1..rm are the rec_ids
Note [Monad Comprehensions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monad comprehensions require separate functions like 'return' and
'>>=' for desugaring. These functions are stored in the statements
used in monad comprehensions. For example, the 'return' of the 'LastStmt'
expression is used to lift the body of the monad comprehension:
[ body | stmts ]
=>
stmts >>= \bndrs -> return body
In transform and grouping statements ('then ..' and 'then group ..') the
'return' function is required for nested monad comprehensions, for example:
[ body | stmts, then f, rest ]
=>
f [ env | stmts ] >>= \bndrs -> [ body | rest ]
BodyStmts require the 'Control.Monad.guard' function for boolean
expressions:
[ body | exp, stmts ]
=>
guard exp >> [ body | stmts ]
Parallel statements require the 'Control.Monad.Zip.mzip' function:
[ body | stmts1 | stmts2 | .. ]
=>
mzip stmts1 (mzip stmts2 (..)) >>= \(bndrs1, (bndrs2, ..)) -> return body
In any other context than 'MonadComp', the fields for most of these
'SyntaxExpr's stay bottom.
-}
instance (OutputableBndr idL, OutputableBndr idR)
=> Outputable (ParStmtBlock idL idR) where
ppr (ParStmtBlock stmts _ _) = interpp'SP stmts
instance (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> Outputable (StmtLR idL idR body) where
ppr stmt = pprStmt stmt
pprStmt :: forall idL idR body . (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> (StmtLR idL idR body) -> SDoc
pprStmt (LastStmt expr ret_stripped _)
= ifPprDebug (text "[last]") <+>
(if ret_stripped then text "return" else empty) <+>
ppr expr
pprStmt (BindStmt pat expr _ _) = hsep [ppr pat, larrow, ppr expr]
pprStmt (LetStmt binds) = hsep [ptext (sLit "let"), pprBinds binds]
pprStmt (BodyStmt expr _ _ _) = ppr expr
pprStmt (ParStmt stmtss _ _) = sep (punctuate (ptext (sLit " | ")) (map ppr stmtss))
pprStmt (TransStmt { trS_stmts = stmts, trS_by = by, trS_using = using, trS_form = form })
= sep $ punctuate comma (map ppr stmts ++ [pprTransStmt by using form])
pprStmt (RecStmt { recS_stmts = segment, recS_rec_ids = rec_ids
, recS_later_ids = later_ids })
= ptext (sLit "rec") <+>
vcat [ ppr_do_stmts segment
, ifPprDebug (vcat [ ptext (sLit "rec_ids=") <> ppr rec_ids
, ptext (sLit "later_ids=") <> ppr later_ids])]
pprStmt (ApplicativeStmt args mb_join _)
= getPprStyle $ \style ->
if userStyle style
then pp_for_user
else pp_debug
where
-- make all the Applicative stuff invisible in error messages by
-- flattening the whole ApplicativeStmt nest back to a sequence
-- of statements.
pp_for_user = vcat $ concatMap flattenArg args
-- ppr directly rather than transforming here, because we need to
-- inject a "return" which is hard when we're polymorphic in the id
-- type.
flattenStmt :: ExprLStmt idL -> [SDoc]
flattenStmt (L _ (ApplicativeStmt args _ _)) = concatMap flattenArg args
flattenStmt stmt = [ppr stmt]
flattenArg :: forall a . (a, ApplicativeArg idL) -> [SDoc]
flattenArg (_, ApplicativeArgOne pat expr isBody)
| isBody = -- See Note [Applicative BodyStmt]
[ppr (BodyStmt expr noSyntaxExpr noSyntaxExpr (panic "flattenArg: BodyStmt")
:: ExprStmt idL)]
| otherwise =
[ppr (BindStmt pat expr noSyntaxExpr noSyntaxExpr
:: ExprStmt idL)]
flattenArg (_, ApplicativeArgMany stmts _ _) =
concatMap flattenStmt stmts
pp_debug =
let
ap_expr = sep (punctuate (text " |") (map pp_arg args))
in
if isNothing mb_join
then ap_expr
else text "join" <+> parens ap_expr
pp_arg :: (a, ApplicativeArg idL) -> SDoc
pp_arg (_, ApplicativeArgOne pat expr isBody)
| isBody = -- See Note [Applicative BodyStmt]
ppr (BodyStmt expr noSyntaxExpr noSyntaxExpr (panic "pp_arg: pprStmt")
:: ExprStmt idL)
| otherwise =
ppr (BindStmt pat expr noSyntaxExpr noSyntaxExpr
:: ExprStmt idL)
pp_arg (_, ApplicativeArgMany stmts return pat) =
ppr pat <+>
text "<-" <+>
ppr (HsDo DoExpr
(stmts ++
[noLoc (LastStmt (noLoc return) False noSyntaxExpr)]) (panic "pprStmt"))
pprTransformStmt :: OutputableBndr id => [id] -> LHsExpr id -> Maybe (LHsExpr id) -> SDoc
pprTransformStmt bndrs using by
= sep [ ptext (sLit "then") <+> ifPprDebug (braces (ppr bndrs))
, nest 2 (ppr using)
, nest 2 (pprBy by)]
pprTransStmt :: Outputable body => Maybe body -> body -> TransForm -> SDoc
pprTransStmt by using ThenForm
= sep [ ptext (sLit "then"), nest 2 (ppr using), nest 2 (pprBy by)]
pprTransStmt by using GroupForm
= sep [ ptext (sLit "then group"), nest 2 (pprBy by), nest 2 (ptext (sLit "using") <+> ppr using)]
pprBy :: Outputable body => Maybe body -> SDoc
pprBy Nothing = empty
pprBy (Just e) = ptext (sLit "by") <+> ppr e
pprDo :: (OutputableBndr id, Outputable body)
=> HsStmtContext any -> [LStmt id body] -> SDoc
pprDo DoExpr stmts = ptext (sLit "do") <+> ppr_do_stmts stmts
pprDo GhciStmtCtxt stmts = ptext (sLit "do") <+> ppr_do_stmts stmts
pprDo ArrowExpr stmts = ptext (sLit "do") <+> ppr_do_stmts stmts
pprDo MDoExpr stmts = ptext (sLit "mdo") <+> ppr_do_stmts stmts
pprDo ListComp stmts = brackets $ pprComp stmts
pprDo PArrComp stmts = paBrackets $ pprComp stmts
pprDo MonadComp stmts = brackets $ pprComp stmts
pprDo _ _ = panic "pprDo" -- PatGuard, ParStmtCxt
ppr_do_stmts :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> [LStmtLR idL idR body] -> SDoc
-- Print a bunch of do stmts, with explicit braces and semicolons,
-- so that we are not vulnerable to layout bugs
ppr_do_stmts stmts
= lbrace <+> pprDeeperList vcat (punctuate semi (map ppr stmts))
<+> rbrace
pprComp :: (OutputableBndr id, Outputable body)
=> [LStmt id body] -> SDoc
pprComp quals -- Prints: body | qual1, ..., qualn
| not (null quals)
, L _ (LastStmt body _ _) <- last quals
= hang (ppr body <+> char '|') 2 (pprQuals (dropTail 1 quals))
| otherwise
= pprPanic "pprComp" (pprQuals quals)
pprQuals :: (OutputableBndr id, Outputable body)
=> [LStmt id body] -> SDoc
-- Show list comprehension qualifiers separated by commas
pprQuals quals = interpp'SP quals
{-
************************************************************************
* *
Template Haskell quotation brackets
* *
************************************************************************
-}
data HsSplice id
= HsTypedSplice -- $$z or $$(f 4)
SpliceDecoration -- Whether $$( ) variant found, for pretty printing
id -- A unique name to identify this splice point
(LHsExpr id) -- See Note [Pending Splices]
| HsUntypedSplice -- $z or $(f 4)
SpliceDecoration -- Whether $( ) variant found, for pretty printing
id -- A unique name to identify this splice point
(LHsExpr id) -- See Note [Pending Splices]
| HsQuasiQuote -- See Note [Quasi-quote overview] in TcSplice
id -- Splice point
id -- Quoter
SrcSpan -- The span of the enclosed string
FastString -- The enclosed string
-- AZ:TODO: use XSplice instead of HsSpliced
| HsSpliced -- See Note [Delaying modFinalizers in untyped splices] in
-- RnSplice.
-- This is the result of splicing a splice. It is produced by
-- the renamer and consumed by the typechecker. It lives only
-- between the two.
ThModFinalizers -- TH finalizers produced by the splice.
(HsSplicedThing id) -- The result of splicing
deriving instance (DataId id) => Data (HsSplice id)
-- | A splice can appear with various decorations wrapped around it. This data
-- type captures explicitly how it was originally written, for use in the pretty
-- printer.
data SpliceDecoration
= HasParens -- ^ $( splice ) or $$( splice )
| HasDollar -- ^ $splice or $$splice
| NoParens -- ^ bare splice
deriving (Data, Eq, Show)
instance Outputable SpliceDecoration where
ppr x = text $ show x
isTypedSplice :: HsSplice id -> Bool
isTypedSplice (HsTypedSplice {}) = True
isTypedSplice _ = False -- Quasi-quotes are untyped splices
-- | Finalizers produced by a splice with
-- 'Language.Eta.Meta.Syntax.addModFinalizer'
--
-- See Note [Delaying modFinalizers in untyped splices] in RnSplice. For how
-- this is used.
--
-- Think of the type as ForeignRef (TH.Q ()).
newtype ThModFinalizers = ThModFinalizers [ForeignRef ()]
-- A Data instance which ignores the argument of 'ThModFinalizers'.
instance Data ThModFinalizers where
gunfold _ z _ = z $ ThModFinalizers []
toConstr a = mkConstr (dataTypeOf a) "ThModFinalizers" [] Data.Prefix
dataTypeOf a = mkDataType "HsExpr.ThModFinalizers" [toConstr a]
-- | Haskell Spliced Thing
--
-- Values that can result from running a splice.
data HsSplicedThing id
= HsSplicedExpr (HsExpr id) -- ^ Haskell Spliced Expression
| HsSplicedTy (HsType id) -- ^ Haskell Spliced Type
| HsSplicedPat (Pat id) -- ^ Haskell Spliced Pattern
deriving instance (DataId id) => Data (HsSplicedThing id)
-- See Note [Pending Splices]
type SplicePointName = Name
-- | Pending Renamer Splice
data PendingRnSplice
= PendingRnSplice UntypedSpliceFlavour SplicePointName (LHsExpr Name)
deriving Data
data UntypedSpliceFlavour
= UntypedExpSplice
| UntypedPatSplice
| UntypedTypeSplice
| UntypedDeclSplice
deriving Data
-- | Pending Type-checker Splice
data PendingTcSplice
-- AZ:TODO: The hard-coded Id feels wrong.
= PendingTcSplice SplicePointName (LHsExpr Id)
deriving Data
{-
Note [Pending Splices]
~~~~~~~~~~~~~~~~~~~~~~
When we rename an untyped bracket, we name and lift out all the nested
splices, so that when the typechecker hits the bracket, it can
typecheck those nested splices without having to walk over the untyped
bracket code. So for example
[| f $(g x) |]
looks like
HsBracket (HsApp (HsVar "f") (HsSpliceE _ (g x)))
which the renamer rewrites to
HsRnBracketOut (HsApp (HsVar f) (HsSpliceE sn (g x)))
[PendingRnSplice UntypedExpSplice sn (g x)]
* The 'sn' is the Name of the splice point, the SplicePointName
* The PendingRnExpSplice gives the splice that splice-point name maps to;
and the typechecker can now conveniently find these sub-expressions
* The other copy of the splice, in the second argument of HsSpliceE
in the renamed first arg of HsRnBracketOut
is used only for pretty printing
There are four varieties of pending splices generated by the renamer,
distinguished by their UntypedSpliceFlavour
* Pending expression splices (UntypedExpSplice), e.g.,
[|$(f x) + 2|]
UntypedExpSplice is also used for
* quasi-quotes, where the pending expression expands to
$(quoter "...blah...")
(see RnSplice.makePending, HsQuasiQuote case)
* cross-stage lifting, where the pending expression expands to
$(lift x)
(see RnSplice.checkCrossStageLifting)
* Pending pattern splices (UntypedPatSplice), e.g.,
[| \$(f x) -> x |]
* Pending type splices (UntypedTypeSplice), e.g.,
[| f :: $(g x) |]
* Pending declaration (UntypedDeclSplice), e.g.,
[| let $(f x) in ... |]
There is a fifth variety of pending splice, which is generated by the type
checker:
* Pending *typed* expression splices, (PendingTcSplice), e.g.,
[||1 + $$(f 2)||]
It would be possible to eliminate HsRnBracketOut and use HsBracketOut for the
output of the renamer. However, when pretty printing the output of the renamer,
e.g., in a type error message, we *do not* want to print out the pending
splices. In contrast, when pretty printing the output of the type checker, we
*do* want to print the pending splices. So splitting them up seems to make
sense, although I hate to add another constructor to HsExpr.
-}
instance (OutputableBndr id)
=> Outputable (HsSplicedThing id) where
ppr (HsSplicedExpr e) = ppr_expr e
ppr (HsSplicedTy t) = ppr t
ppr (HsSplicedPat p) = ppr p
instance (OutputableBndr id) => Outputable (HsSplice id) where
ppr s = pprSplice s
pprPendingSplice :: (OutputableBndr id)
=> SplicePointName -> LHsExpr id -> SDoc
pprPendingSplice n e = angleBrackets (ppr n <> comma <+> ppr e)
pprSpliceDecl :: (OutputableBndr id) => HsSplice id -> SpliceExplicitFlag -> SDoc
pprSpliceDecl e@HsQuasiQuote{} _ = pprSplice e
pprSpliceDecl e ExplicitSplice = text "$(" <> ppr_splice_decl e <> text ")"
pprSpliceDecl e ImplicitSplice = ppr_splice_decl e
ppr_splice_decl :: (OutputableBndr id) => HsSplice id -> SDoc
ppr_splice_decl (HsUntypedSplice _ n e) = ppr_splice empty n e empty
ppr_splice_decl e = pprSplice e
pprSplice :: (OutputableBndr id) => HsSplice id -> SDoc
pprSplice (HsTypedSplice HasParens n e)
= ppr_splice (text "$$(") n e (text ")")
pprSplice (HsTypedSplice HasDollar n e)
= ppr_splice (text "$$") n e empty
pprSplice (HsTypedSplice NoParens n e)
= ppr_splice empty n e empty
pprSplice (HsUntypedSplice HasParens n e)
= ppr_splice (text "$(") n e (text ")")
pprSplice (HsUntypedSplice HasDollar n e)
= ppr_splice (text "$") n e empty
pprSplice (HsUntypedSplice NoParens n e)
= ppr_splice empty n e empty
pprSplice (HsQuasiQuote n q _ s) = ppr_quasi n q s
pprSplice (HsSpliced _ thing) = ppr thing
ppr_quasi :: OutputableBndr p => p -> p -> FastString -> SDoc
ppr_quasi n quoter quote = ifPprDebug (brackets (ppr n)) <>
char '[' <> ppr quoter <> vbar <>
ppr quote <> text "|]"
ppr_splice :: (OutputableBndr id)
=> SDoc -> id -> LHsExpr id -> SDoc -> SDoc
ppr_splice herald n e trail
= herald <> ifPprDebug (brackets (ppr n)) <> ppr e <> trail
data HsBracket id = ExpBr (LHsExpr id) -- [| expr |]
| PatBr (LPat id) -- [p| pat |]
| DecBrL [LHsDecl id] -- [d| decls |]; result of parser
| DecBrG (HsGroup id) -- [d| decls |]; result of renamer
| TypBr (LHsType id) -- [t| type |]
| VarBr Bool id -- True: 'x, False: ''T
-- (The Bool flag is used only in pprHsBracket)
| TExpBr (LHsExpr id) -- [|| expr ||]
deriving (Typeable)
deriving instance (DataId id) => Data (HsBracket id)
isTypedBracket :: HsBracket id -> Bool
isTypedBracket (TExpBr {}) = True
isTypedBracket _ = False
instance OutputableBndr id => Outputable (HsBracket id) where
ppr = pprHsBracket
pprHsBracket :: OutputableBndr id => HsBracket id -> SDoc
pprHsBracket (ExpBr e) = thBrackets empty (ppr e)
pprHsBracket (PatBr p) = thBrackets (char 'p') (ppr p)
pprHsBracket (DecBrG gp) = thBrackets (char 'd') (ppr gp)
pprHsBracket (DecBrL ds) = thBrackets (char 'd') (vcat (map ppr ds))
pprHsBracket (TypBr t) = thBrackets (char 't') (ppr t)
pprHsBracket (VarBr True n) = char '\'' <> ppr n
pprHsBracket (VarBr False n) = ptext (sLit "''") <> ppr n
pprHsBracket (TExpBr e) = thTyBrackets (ppr e)
thBrackets :: SDoc -> SDoc -> SDoc
thBrackets pp_kind pp_body = char '[' <> pp_kind <> char '|' <+>
pp_body <+> ptext (sLit "|]")
thTyBrackets :: SDoc -> SDoc
thTyBrackets pp_body = ptext (sLit "[||") <+> pp_body <+> ptext (sLit "||]")
instance Outputable PendingRnSplice where
ppr (PendingRnSplice _ n e) = pprPendingSplice n e
instance Outputable PendingTcSplice where
ppr (PendingTcSplice n e) = pprPendingSplice n e
{-
************************************************************************
* *
\subsection{Enumerations and list comprehensions}
* *
************************************************************************
-}
data ArithSeqInfo id
= From (LHsExpr id)
| FromThen (LHsExpr id)
(LHsExpr id)
| FromTo (LHsExpr id)
(LHsExpr id)
| FromThenTo (LHsExpr id)
(LHsExpr id)
(LHsExpr id)
deriving (Typeable)
deriving instance (DataId id) => Data (ArithSeqInfo id)
instance OutputableBndr id => Outputable (ArithSeqInfo id) where
ppr (From e1) = hcat [ppr e1, pp_dotdot]
ppr (FromThen e1 e2) = hcat [ppr e1, comma, space, ppr e2, pp_dotdot]
ppr (FromTo e1 e3) = hcat [ppr e1, pp_dotdot, ppr e3]
ppr (FromThenTo e1 e2 e3)
= hcat [ppr e1, comma, space, ppr e2, pp_dotdot, ppr e3]
pp_dotdot :: SDoc
pp_dotdot = ptext (sLit " .. ")
{-
************************************************************************
* *
\subsection{HsMatchCtxt}
* *
************************************************************************
-}
data HsMatchContext id -- Context of a Match
= FunRhs id Bool -- Function binding for f; True <=> written infix
| LambdaExpr -- Patterns of a lambda
| CaseAlt -- Patterns and guards on a case alternative
| IfAlt -- Guards of a multi-way if alternative
| ProcExpr -- Patterns of a proc
| PatBindRhs -- A pattern binding eg [y] <- e = e
| RecUpd -- Record update [used only in DsExpr to
-- tell matchWrapper what sort of
-- runtime error message to generate]
| StmtCtxt (HsStmtContext id) -- Pattern of a do-stmt, list comprehension,
-- pattern guard, etc
| ThPatSplice -- A Template Haskell pattern splice
| ThPatQuote -- A Template Haskell pattern quotation [p| (a,b) |]
| PatSyn -- A pattern synonym declaration
deriving (Data, Typeable)
data HsStmtContext id
= ListComp
| MonadComp
| PArrComp -- Parallel array comprehension
| DoExpr -- do { ... }
| MDoExpr -- mdo { ... } ie recursive do-expression
| ArrowExpr -- do-notation in an arrow-command context
| GhciStmtCtxt -- A command-line Stmt in GHCi pat <- rhs
| PatGuard (HsMatchContext id) -- Pattern guard for specified thing
| ParStmtCtxt (HsStmtContext id) -- A branch of a parallel stmt
| TransStmtCtxt (HsStmtContext id) -- A branch of a transform stmt
deriving (Data, Typeable)
isListCompExpr :: HsStmtContext id -> Bool
-- Uses syntax [ e | quals ]
isListCompExpr ListComp = True
isListCompExpr PArrComp = True
isListCompExpr MonadComp = True
isListCompExpr (ParStmtCtxt c) = isListCompExpr c
isListCompExpr (TransStmtCtxt c) = isListCompExpr c
isListCompExpr _ = False
isMonadCompExpr :: HsStmtContext id -> Bool
isMonadCompExpr MonadComp = True
isMonadCompExpr (ParStmtCtxt ctxt) = isMonadCompExpr ctxt
isMonadCompExpr (TransStmtCtxt ctxt) = isMonadCompExpr ctxt
isMonadCompExpr _ = False
matchSeparator :: HsMatchContext id -> SDoc
matchSeparator (FunRhs {}) = ptext (sLit "=")
matchSeparator CaseAlt = ptext (sLit "->")
matchSeparator IfAlt = ptext (sLit "->")
matchSeparator LambdaExpr = ptext (sLit "->")
matchSeparator ProcExpr = ptext (sLit "->")
matchSeparator PatBindRhs = ptext (sLit "=")
matchSeparator (StmtCtxt _) = ptext (sLit "<-")
matchSeparator RecUpd = panic "unused"
matchSeparator ThPatSplice = panic "unused"
matchSeparator ThPatQuote = panic "unused"
matchSeparator PatSyn = panic "unused"
pprMatchContext :: Outputable id => HsMatchContext id -> SDoc
pprMatchContext ctxt
| want_an ctxt = ptext (sLit "an") <+> pprMatchContextNoun ctxt
| otherwise = ptext (sLit "a") <+> pprMatchContextNoun ctxt
where
want_an (FunRhs {}) = True -- Use "an" in front
want_an ProcExpr = True
want_an _ = False
pprMatchContextNoun :: Outputable id => HsMatchContext id -> SDoc
pprMatchContextNoun (FunRhs fun _) = ptext (sLit "equation for")
<+> quotes (ppr fun)
pprMatchContextNoun CaseAlt = ptext (sLit "case alternative")
pprMatchContextNoun IfAlt = ptext (sLit "multi-way if alternative")
pprMatchContextNoun RecUpd = ptext (sLit "record-update construct")
pprMatchContextNoun ThPatSplice = ptext (sLit "Template Haskell pattern splice")
pprMatchContextNoun ThPatQuote = ptext (sLit "Template Haskell pattern quotation")
pprMatchContextNoun PatBindRhs = ptext (sLit "pattern binding")
pprMatchContextNoun LambdaExpr = ptext (sLit "lambda abstraction")
pprMatchContextNoun ProcExpr = ptext (sLit "arrow abstraction")
pprMatchContextNoun (StmtCtxt ctxt) = ptext (sLit "pattern binding in")
$$ pprStmtContext ctxt
pprMatchContextNoun PatSyn = ptext (sLit "pattern synonym declaration")
-----------------
pprAStmtContext, pprStmtContext :: Outputable id => HsStmtContext id -> SDoc
pprAStmtContext ctxt = article <+> pprStmtContext ctxt
where
pp_an = ptext (sLit "an")
pp_a = ptext (sLit "a")
article = case ctxt of
MDoExpr -> pp_an
PArrComp -> pp_an
GhciStmtCtxt -> pp_an
_ -> pp_a
-----------------
pprStmtContext GhciStmtCtxt = ptext (sLit "interactive Eta REPL command")
pprStmtContext DoExpr = ptext (sLit "'do' block")
pprStmtContext MDoExpr = ptext (sLit "'mdo' block")
pprStmtContext ArrowExpr = ptext (sLit "'do' block in an arrow command")
pprStmtContext ListComp = ptext (sLit "list comprehension")
pprStmtContext MonadComp = ptext (sLit "monad comprehension")
pprStmtContext PArrComp = ptext (sLit "array comprehension")
pprStmtContext (PatGuard ctxt) = ptext (sLit "pattern guard for") $$ pprMatchContext ctxt
-- Drop the inner contexts when reporting errors, else we get
-- Unexpected transform statement
-- in a transformed branch of
-- transformed branch of
-- transformed branch of monad comprehension
pprStmtContext (ParStmtCtxt c)
| opt_PprStyle_Debug = sep [ptext (sLit "parallel branch of"), pprAStmtContext c]
| otherwise = pprStmtContext c
pprStmtContext (TransStmtCtxt c)
| opt_PprStyle_Debug = sep [ptext (sLit "transformed branch of"), pprAStmtContext c]
| otherwise = pprStmtContext c
-- Used to generate the string for a *runtime* error message
matchContextErrString :: Outputable id => HsMatchContext id -> SDoc
matchContextErrString (FunRhs fun _) = ptext (sLit "function") <+> ppr fun
matchContextErrString CaseAlt = ptext (sLit "case")
matchContextErrString IfAlt = ptext (sLit "multi-way if")
matchContextErrString PatBindRhs = ptext (sLit "pattern binding")
matchContextErrString RecUpd = ptext (sLit "record update")
matchContextErrString LambdaExpr = ptext (sLit "lambda")
matchContextErrString ProcExpr = ptext (sLit "proc")
matchContextErrString ThPatSplice = panic "matchContextErrString" -- Not used at runtime
matchContextErrString ThPatQuote = panic "matchContextErrString" -- Not used at runtime
matchContextErrString PatSyn = panic "matchContextErrString" -- Not used at runtime
matchContextErrString (StmtCtxt (ParStmtCtxt c)) = matchContextErrString (StmtCtxt c)
matchContextErrString (StmtCtxt (TransStmtCtxt c)) = matchContextErrString (StmtCtxt c)
matchContextErrString (StmtCtxt (PatGuard _)) = ptext (sLit "pattern guard")
matchContextErrString (StmtCtxt GhciStmtCtxt) = ptext (sLit "interactive Eta REPL command")
matchContextErrString (StmtCtxt DoExpr) = ptext (sLit "'do' block")
matchContextErrString (StmtCtxt ArrowExpr) = ptext (sLit "'do' block")
matchContextErrString (StmtCtxt MDoExpr) = ptext (sLit "'mdo' block")
matchContextErrString (StmtCtxt ListComp) = ptext (sLit "list comprehension")
matchContextErrString (StmtCtxt MonadComp) = ptext (sLit "monad comprehension")
matchContextErrString (StmtCtxt PArrComp) = ptext (sLit "array comprehension")
pprMatchInCtxt :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> HsMatchContext idL -> Match idR body -> SDoc
pprMatchInCtxt ctxt match = hang (ptext (sLit "In") <+> pprMatchContext ctxt <> colon)
4 (pprMatch ctxt match)
pprStmtInCtxt :: (OutputableBndr idL, OutputableBndr idR, Outputable body)
=> HsStmtContext idL -> StmtLR idL idR body -> SDoc
pprStmtInCtxt ctxt (LastStmt e _ _)
| isListCompExpr ctxt -- For [ e | .. ], do not mutter about "stmts"
= hang (ptext (sLit "In the expression:")) 2 (ppr e)
pprStmtInCtxt ctxt stmt
= hang (ptext (sLit "In a stmt of") <+> pprAStmtContext ctxt <> colon)
2 (ppr_stmt stmt)
where
-- For Group and Transform Stmts, don't print the nested stmts!
ppr_stmt (TransStmt { trS_by = by, trS_using = using
, trS_form = form }) = pprTransStmt by using form
ppr_stmt stmt = pprStmt stmt
|
rahulmutt/ghcvm
|
compiler/Eta/HsSyn/HsExpr.hs
|
bsd-3-clause
| 85,615
| 0
| 18
| 23,892
| 15,234
| 8,008
| 7,226
| 992
| 9
|
module BenchLDA where
import Control.Monad.Trans.State
import Control.Monad (replicateM, forM)
import Control.Applicative ((<$>))
import Text.Printf
import Control.Concurrent (setNumCapabilities)
import Criterion
import Data.Random
import qualified Data.Set as S
import BayesStack.Core.Gibbs
import BayesStack.Models.Topic.LDA
data NetParams = NetParams { nNodes :: Int
, nItems :: Int
, nTopics :: Int
, nItemsPerNode :: Int
}
deriving (Show, Eq, Ord)
netParams = NetParams { nNodes = 50000
, nItems = nItemsPerNode netParams * nNodes netParams `div` 10
, nTopics = 100
, nItemsPerNode = 200
}
randomNetwork :: NetParams -> RVar NetData
randomNetwork net = do
let nodes = [Node i | i <- [1..nNodes net]]
items = [Item i | i <- [1..nItems net]]
nodeItem = do node <- randomElement nodes
item <- randomElement items
return (node, item)
edges <- replicateM (nItemsPerNode net) nodeItem
return $! NetData { dAlphaTheta = 0.1
, dAlphaPhi = 0.1
, dNodes = S.fromList nodes
, dItems = S.fromList items
, dTopics = S.fromList [Topic i | i <- [1..nTopics net]]
, dNodeItems = setupNodeItems edges
}
benchmarksForNetwork :: NetParams -> NetData -> ModelInit -> [Benchmark]
benchmarksForNetwork np net init = do
let sweeps = 100
updateBlock <- [10, 100, 1000]
threads <- [1, 2, 3, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26]
let name = printf "%d topics, %d threads, %d block, %d items per node" (nTopics np) threads updateBlock (nItemsPerNode np)
return $ bench name $ do
setNumCapabilities threads
gibbsUpdate threads updateBlock (model net init)
$ concat $ replicate sweeps (updateUnits net)
benchmarksForNetParams :: NetParams -> RVar [Benchmark]
benchmarksForNetParams np = do
net <- randomNetwork np
init <- randomInitialize net
return $ benchmarksForNetwork np net init
ldaBenchmarkParams :: RVar [[Benchmark]]
ldaBenchmarkParams =
mapM benchmarksForNetParams
$ do topics <- [100, 500, 1000]
return netParams {nTopics=topics}
ldaBenchmarks :: RVar Benchmark
ldaBenchmarks = bgroup "LDA" . concat <$> ldaBenchmarkParams
|
bgamari/bayes-stack
|
network-topic-models/BenchLDA.hs
|
bsd-3-clause
| 2,646
| 0
| 15
| 933
| 728
| 391
| 337
| 56
| 1
|
{-
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
\section[RnPat]{Renaming of patterns}
Basically dependency analysis.
Handles @Match@, @GRHSs@, @HsExpr@, and @Qualifier@ datatypes. In
general, all of these functions return a renamed thing, and a set of
free variables.
-}
{-# LANGUAGE RankNTypes, ScopedTypeVariables, CPP #-}
module Eta.Rename.RnPat (-- main entry points
rnPat, rnPats, rnBindPat, rnPatAndThen,
NameMaker, applyNameMaker, -- a utility for making names:
localRecNameMaker, topRecNameMaker, -- sometimes we want to make local names,
-- sometimes we want to make top (qualified) names.
isTopRecNameMaker,
rnHsRecFields, HsRecFieldContext(..),
-- CpsRn monad
CpsRn, liftCps,
-- Literals
rnLit, rnOverLit,
-- Pattern Error messages that are also used elsewhere
checkTupSize, patSigErr
) where
-- ENH: thin imports to only what is necessary for patterns
import {-# SOURCE #-} Eta.Rename.RnExpr ( rnLExpr )
import {-# SOURCE #-} Eta.Rename.RnSplice ( rnSplicePat )
import Eta.HsSyn.HsSyn
import Eta.TypeCheck.TcRnMonad
import Eta.TypeCheck.TcHsSyn ( hsOverLitName )
import Eta.Rename.RnEnv
import Eta.Rename.RnTypes
import Eta.Prelude.PrelNames
import Eta.Types.TyCon ( tyConName )
import Eta.BasicTypes.ConLike
import Eta.BasicTypes.DataCon ( dataConTyCon )
import Eta.Types.TypeRep ( TyThing(..) )
import Eta.BasicTypes.Name
import Eta.BasicTypes.NameSet
import Eta.BasicTypes.RdrName
import Eta.BasicTypes.BasicTypes
import Eta.Utils.Util
import Eta.Utils.ListSetOps ( removeDups )
import Eta.Utils.Outputable
import Eta.BasicTypes.SrcLoc
import Eta.Utils.FastString
import Eta.BasicTypes.Literal ( inCharRange )
import Eta.Prelude.TysWiredIn ( nilDataCon )
import Eta.BasicTypes.DataCon ( dataConName )
import Control.Monad ( when, liftM, ap )
import Data.Ratio
import qualified Eta.LanguageExtensions as LangExt
#include "HsVersions.h"
{-
*********************************************************
* *
The CpsRn Monad
* *
*********************************************************
Note [CpsRn monad]
~~~~~~~~~~~~~~~~~~
The CpsRn monad uses continuation-passing style to support this
style of programming:
do { ...
; ns <- bindNames rs
; ...blah... }
where rs::[RdrName], ns::[Name]
The idea is that '...blah...'
a) sees the bindings of ns
b) returns the free variables it mentions
so that bindNames can report unused ones
In particular,
mapM rnPatAndThen [p1, p2, p3]
has a *left-to-right* scoping: it makes the binders in
p1 scope over p2,p3.
-}
newtype CpsRn b = CpsRn { unCpsRn :: forall r. (b -> RnM (r, FreeVars))
-> RnM (r, FreeVars) }
-- See Note [CpsRn monad]
instance Functor CpsRn where
fmap = liftM
instance Applicative CpsRn where
pure = return
(<*>) = ap
instance Monad CpsRn where
return x = CpsRn (\k -> k x)
(CpsRn m) >>= mk = CpsRn (\k -> m (\v -> unCpsRn (mk v) k))
runCps :: CpsRn a -> RnM (a, FreeVars)
runCps (CpsRn m) = m (\r -> return (r, emptyFVs))
liftCps :: RnM a -> CpsRn a
liftCps rn_thing = CpsRn (\k -> rn_thing >>= k)
liftCpsFV :: RnM (a, FreeVars) -> CpsRn a
liftCpsFV rn_thing = CpsRn (\k -> do { (v,fvs1) <- rn_thing
; (r,fvs2) <- k v
; return (r, fvs1 `plusFV` fvs2) })
wrapSrcSpanCps :: (a -> CpsRn b) -> Located a -> CpsRn (Located b)
-- Set the location, and also wrap it around the value returned
wrapSrcSpanCps fn (L loc a)
= CpsRn (\k -> setSrcSpan loc $
unCpsRn (fn a) $ \v ->
k (L loc v))
lookupConCps :: Located RdrName -> CpsRn (Located Name)
lookupConCps con_rdr
= CpsRn (\k -> do { con_name <- lookupLocatedOccRn con_rdr
; (r, fvs) <- k con_name
; return (r, addOneFV fvs (unLoc con_name)) })
-- We add the constructor name to the free vars
-- See Note [Patterns are uses]
{-
Note [Patterns are uses]
~~~~~~~~~~~~~~~~~~~~~~~~
Consider
module Foo( f, g ) where
data T = T1 | T2
f T1 = True
f T2 = False
g _ = T1
Arguably we should report T2 as unused, even though it appears in a
pattern, because it never occurs in a constructed position. See
Trac #7336.
However, implementing this in the face of pattern synonyms would be
less straightforward, since given two pattern synonyms
pattern P1 <- P2
pattern P2 <- ()
we need to observe the dependency between P1 and P2 so that type
checking can be done in the correct order (just like for value
bindings). Dependencies between bindings is analyzed in the renamer,
where we don't know yet whether P2 is a constructor or a pattern
synonym. So for now, we do report conid occurrences in patterns as
uses.
*********************************************************
* *
Name makers
* *
*********************************************************
Externally abstract type of name makers,
which is how you go from a RdrName to a Name
-}
data NameMaker
= LamMk -- Lambdas
Bool -- True <=> report unused bindings
-- (even if True, the warning only comes out
-- if -fwarn-unused-matches is on)
| LetMk -- Let bindings, incl top level
-- Do *not* check for unused bindings
TopLevelFlag
MiniFixityEnv
topRecNameMaker :: MiniFixityEnv -> NameMaker
topRecNameMaker fix_env = LetMk TopLevel fix_env
isTopRecNameMaker :: NameMaker -> Bool
isTopRecNameMaker (LetMk TopLevel _) = True
isTopRecNameMaker _ = False
localRecNameMaker :: MiniFixityEnv -> NameMaker
localRecNameMaker fix_env = LetMk NotTopLevel fix_env
matchNameMaker :: HsMatchContext a -> NameMaker
matchNameMaker ctxt = LamMk report_unused
where
-- Do not report unused names in interactive contexts
-- i.e. when you type 'x <- e' at the GHCi prompt
report_unused = case ctxt of
StmtCtxt GhciStmtCtxt -> False
-- also, don't warn in pattern quotes, as there
-- is no RHS where the variables can be used!
ThPatQuote -> False
_ -> True
rnHsSigCps :: HsWithBndrs RdrName (LHsType RdrName)
-> CpsRn (HsWithBndrs Name (LHsType Name))
rnHsSigCps sig
= CpsRn (rnHsBndrSig PatCtx sig)
newPatLName :: NameMaker -> Located RdrName -> CpsRn (Located Name)
newPatLName name_maker rdr_name@(L loc _)
= do { name <- newPatName name_maker rdr_name
; return (L loc name) }
newPatName :: NameMaker -> Located RdrName -> CpsRn Name
newPatName (LamMk report_unused) rdr_name
= CpsRn (\ thing_inside ->
do { name <- newLocalBndrRn rdr_name
; (res, fvs) <- bindLocalNames [name] (thing_inside name)
; when report_unused $ warnUnusedMatches [name] fvs
; return (res, name `delFV` fvs) })
newPatName (LetMk is_top fix_env) rdr_name
= CpsRn (\ thing_inside ->
do { name <- case is_top of
NotTopLevel -> newLocalBndrRn rdr_name
TopLevel -> newTopSrcBinder rdr_name
; bindLocalNames [name] $ -- Do *not* use bindLocalNameFV here
-- See Note [View pattern usage]
addLocalFixities fix_env [name] $
thing_inside name })
-- Note: the bindLocalNames is somewhat suspicious
-- because it binds a top-level name as a local name.
-- however, this binding seems to work, and it only exists for
-- the duration of the patterns and the continuation;
-- then the top-level name is added to the global env
-- before going on to the RHSes (see RnSource.lhs).
{-
Note [View pattern usage]
~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
let (r, (r -> x)) = x in ...
Here the pattern binds 'r', and then uses it *only* in the view pattern.
We want to "see" this use, and in let-bindings we collect all uses and
report unused variables at the binding level. So we must use bindLocalNames
here, *not* bindLocalNameFV. Trac #3943.
*********************************************************
* *
External entry points
* *
*********************************************************
There are various entry points to renaming patterns, depending on
(1) whether the names created should be top-level names or local names
(2) whether the scope of the names is entirely given in a continuation
(e.g., in a case or lambda, but not in a let or at the top-level,
because of the way mutually recursive bindings are handled)
(3) whether the a type signature in the pattern can bind
lexically-scoped type variables (for unpacking existential
type vars in data constructors)
(4) whether we do duplicate and unused variable checking
(5) whether there are fixity declarations associated with the names
bound by the patterns that need to be brought into scope with them.
Rather than burdening the clients of this module with all of these choices,
we export the three points in this design space that we actually need:
-}
-- ----------- Entry point 1: rnPats -------------------
-- Binds local names; the scope of the bindings is entirely in the thing_inside
-- * allows type sigs to bind type vars
-- * local namemaker
-- * unused and duplicate checking
-- * no fixities
rnPats :: HsMatchContext Name -- for error messages
-> [LPat RdrName]
-> ([LPat Name] -> RnM (a, FreeVars))
-> RnM (a, FreeVars)
rnPats ctxt pats thing_inside
= do { envs_before <- getRdrEnvs
-- (1) rename the patterns, bringing into scope all of the term variables
-- (2) then do the thing inside.
; unCpsRn (rnLPatsAndThen (matchNameMaker ctxt) pats) $ \ pats' -> do
{ -- Check for duplicated and shadowed names
-- Must do this *after* renaming the patterns
-- See Note [Collect binders only after renaming] in HsUtils
-- Because we don't bind the vars all at once, we can't
-- check incrementally for duplicates;
-- Nor can we check incrementally for shadowing, else we'll
-- complain *twice* about duplicates e.g. f (x,x) = ...
; addErrCtxt doc_pat $
checkDupAndShadowedNames envs_before $
collectPatsBinders pats'
; thing_inside pats' } }
where
doc_pat = ptext (sLit "In") <+> pprMatchContext ctxt
rnPat :: HsMatchContext Name -- for error messages
-> LPat RdrName
-> (LPat Name -> RnM (a, FreeVars))
-> RnM (a, FreeVars) -- Variables bound by pattern do not
-- appear in the result FreeVars
rnPat ctxt pat thing_inside
= rnPats ctxt [pat] (\pats' -> let [pat'] = pats' in thing_inside pat')
applyNameMaker :: NameMaker -> Located RdrName -> RnM (Located Name)
applyNameMaker mk rdr = do { (n, _fvs) <- runCps (newPatLName mk rdr)
; return n }
-- ----------- Entry point 2: rnBindPat -------------------
-- Binds local names; in a recursive scope that involves other bound vars
-- e.g let { (x, Just y) = e1; ... } in ...
-- * does NOT allows type sig to bind type vars
-- * local namemaker
-- * no unused and duplicate checking
-- * fixities might be coming in
rnBindPat :: NameMaker
-> LPat RdrName
-> RnM (LPat Name, FreeVars)
-- Returned FreeVars are the free variables of the pattern,
-- of course excluding variables bound by this pattern
rnBindPat name_maker pat = runCps (rnLPatAndThen name_maker pat)
{-
*********************************************************
* *
The main event
* *
*********************************************************
-}
-- ----------- Entry point 3: rnLPatAndThen -------------------
-- General version: parametrized by how you make new names
rnLPatsAndThen :: NameMaker -> [LPat RdrName] -> CpsRn [LPat Name]
rnLPatsAndThen mk = mapM (rnLPatAndThen mk)
-- Despite the map, the monad ensures that each pattern binds
-- variables that may be mentioned in subsequent patterns in the list
--------------------
-- The workhorse
rnLPatAndThen :: NameMaker -> LPat RdrName -> CpsRn (LPat Name)
rnLPatAndThen nm lpat = wrapSrcSpanCps (rnPatAndThen nm) lpat
rnPatAndThen :: NameMaker -> Pat RdrName -> CpsRn (Pat Name)
rnPatAndThen _ (WildPat _) = return (WildPat placeHolderType)
rnPatAndThen mk (ParPat pat) = do { pat' <- rnLPatAndThen mk pat; return (ParPat pat') }
rnPatAndThen mk (LazyPat pat) = do { pat' <- rnLPatAndThen mk pat; return (LazyPat pat') }
rnPatAndThen mk (BangPat pat) = do { pat' <- rnLPatAndThen mk pat; return (BangPat pat') }
rnPatAndThen mk (VarPat rdr) = do { loc <- liftCps getSrcSpanM
; name <- newPatName mk (L loc rdr)
; return (VarPat name) }
-- we need to bind pattern variables for view pattern expressions
-- (e.g. in the pattern (x, x -> y) x needs to be bound in the rhs of the tuple)
rnPatAndThen mk (SigPatIn pat sig)
-- When renaming a pattern type signature (e.g. f (a :: T) = ...), it is
-- important to rename its type signature _before_ renaming the rest of the
-- pattern, so that type variables are first bound by the _outermost_ pattern
-- type signature they occur in. This keeps the type checker happy when
-- pattern type signatures happen to be nested (#7827)
--
-- f ((Just (x :: a) :: Maybe a)
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ `a' is first bound here
-- ~~~~~~~~~~~~~~~^ the same `a' then used here
= do { sig' <- rnHsSigCps sig
; pat' <- rnLPatAndThen mk pat
; return (SigPatIn pat' sig') }
rnPatAndThen mk (LitPat lit)
| HsString src s <- lit
= do { ovlStr <- liftCps (xoptM LangExt.OverloadedStrings)
; if ovlStr
then rnPatAndThen mk
(mkNPat (noLoc (mkHsIsString src s placeHolderType))
Nothing)
else normal_lit }
| otherwise = normal_lit
where
normal_lit = do { liftCps (rnLit lit); return (LitPat lit) }
rnPatAndThen _ (NPat (L l lit) mb_neg _eq)
= do { (lit', mb_neg') <- liftCpsFV $ rnOverLit lit
; mb_neg' -- See Note [Negative zero]
<- let negative = do { (neg, fvs) <- lookupSyntaxName negateName
; return (Just neg, fvs) }
positive = return (Nothing, emptyFVs)
in liftCpsFV $ case (mb_neg , mb_neg') of
(Nothing, Just _ ) -> negative
(Just _ , Nothing) -> negative
(Nothing, Nothing) -> positive
(Just _ , Just _ ) -> positive
; eq' <- liftCpsFV $ lookupSyntaxName eqName
; return (NPat (L l lit') mb_neg' eq') }
rnPatAndThen mk (NPlusKPat rdr (L l lit) _ _)
= do { new_name <- newPatName mk rdr
; (lit', _) <- liftCpsFV $ rnOverLit lit -- See Note [Negative zero]
-- We skip negateName as
-- negative zero doesn't make
-- sense in n + k patterns
; minus <- liftCpsFV $ lookupSyntaxName minusName
; ge <- liftCpsFV $ lookupSyntaxName geName
; return (NPlusKPat (L (nameSrcSpan new_name) new_name)
(L l lit') ge minus) }
-- The Report says that n+k patterns must be in Integral
rnPatAndThen mk (AsPat rdr pat)
= do { new_name <- newPatLName mk rdr
; pat' <- rnLPatAndThen mk pat
; return (AsPat new_name pat') }
rnPatAndThen mk p@(ViewPat expr pat _ty)
= do { liftCps $ do { vp_flag <- xoptM LangExt.ViewPatterns
; checkErr vp_flag (badViewPat p) }
-- Because of the way we're arranging the recursive calls,
-- this will be in the right context
; expr' <- liftCpsFV $ rnLExpr expr
; pat' <- rnLPatAndThen mk pat
-- Note: at this point the PreTcType in ty can only be a placeHolder
-- ; return (ViewPat expr' pat' ty) }
; return (ViewPat expr' pat' placeHolderType) }
rnPatAndThen mk (ConPatIn con stuff)
-- rnConPatAndThen takes care of reconstructing the pattern
-- The pattern for the empty list needs to be replaced by an empty explicit list pattern when overloaded lists is turned on.
= case unLoc con == nameRdrName (dataConName nilDataCon) of
True -> do { ol_flag <- liftCps $ xoptM LangExt.OverloadedLists
; if ol_flag then rnPatAndThen mk (ListPat [] placeHolderType Nothing)
else rnConPatAndThen mk con stuff}
False -> rnConPatAndThen mk con stuff
rnPatAndThen mk (ListPat pats _ _)
= do { opt_OverloadedLists <- liftCps $ xoptM LangExt.OverloadedLists
; pats' <- rnLPatsAndThen mk pats
; case opt_OverloadedLists of
True -> do { (to_list_name,_) <- liftCps $ lookupSyntaxName toListName
; return (ListPat pats' placeHolderType
(Just (placeHolderType, to_list_name)))}
False -> return (ListPat pats' placeHolderType Nothing) }
rnPatAndThen mk (PArrPat pats _)
= do { pats' <- rnLPatsAndThen mk pats
; return (PArrPat pats' placeHolderType) }
rnPatAndThen mk (TuplePat pats boxed _)
= do { liftCps $ checkTupSize (length pats)
; pats' <- rnLPatsAndThen mk pats
; return (TuplePat pats' boxed []) }
-- If a splice has been run already, just rename the result.
rnPatAndThen mk (SplicePat (HsSpliced mfs (HsSplicedPat pat)))
= SplicePat . HsSpliced mfs . HsSplicedPat <$> rnPatAndThen mk pat
rnPatAndThen mk (SplicePat splice)
= do { eith <- liftCpsFV $ rnSplicePat splice
; case eith of -- See Note [rnSplicePat] in RnSplice
Left not_yet_renamed -> rnPatAndThen mk not_yet_renamed
Right already_renamed -> return already_renamed }
rnPatAndThen _ pat = pprPanic "rnLPatAndThen" (ppr pat)
--------------------
rnConPatAndThen :: NameMaker
-> Located RdrName -- the constructor
-> HsConPatDetails RdrName
-> CpsRn (Pat Name)
rnConPatAndThen mk con (PrefixCon pats)
= do { con' <- lookupConCps con
; pats' <- rnLPatsAndThen mk pats
; return (ConPatIn con' (PrefixCon pats')) }
rnConPatAndThen mk con (InfixCon pat1 pat2)
= do { con' <- lookupConCps con
; pat1' <- rnLPatAndThen mk pat1
; pat2' <- rnLPatAndThen mk pat2
; fixity <- liftCps $ lookupFixityRn (unLoc con')
; liftCps $ mkConOpPatRn con' fixity pat1' pat2' }
rnConPatAndThen mk con (RecCon rpats)
= do { con' <- lookupConCps con
; rpats' <- rnHsRecPatsAndThen mk con' rpats
; return (ConPatIn con' (RecCon rpats')) }
--------------------
rnHsRecPatsAndThen :: NameMaker
-> Located Name -- Constructor
-> HsRecFields RdrName (LPat RdrName)
-> CpsRn (HsRecFields Name (LPat Name))
rnHsRecPatsAndThen mk (L _ con) hs_rec_fields@(HsRecFields { rec_dotdot = dd })
= do { flds <- liftCpsFV $ rnHsRecFields (HsRecFieldPat con) VarPat hs_rec_fields
; flds' <- mapM rn_field (flds `zip` [1..])
; return (HsRecFields { rec_flds = flds', rec_dotdot = dd }) }
where
rn_field (L l fld, n') = do { arg' <- rnLPatAndThen (nested_mk dd mk n')
(hsRecFieldArg fld)
; return (L l (fld { hsRecFieldArg = arg' })) }
-- Suppress unused-match reporting for fields introduced by ".."
nested_mk Nothing mk _ = mk
nested_mk (Just _) mk@(LetMk {}) _ = mk
nested_mk (Just n) (LamMk report_unused) n' = LamMk (report_unused && (n' <= n))
{-
************************************************************************
* *
Record fields
* *
************************************************************************
-}
data HsRecFieldContext
= HsRecFieldCon Name
| HsRecFieldPat Name
| HsRecFieldUpd
rnHsRecFields
:: forall arg.
HsRecFieldContext
-> (RdrName -> arg) -- When punning, use this to build a new field
-> HsRecFields RdrName (Located arg)
-> RnM ([LHsRecField Name (Located arg)], FreeVars)
-- This surprisingly complicated pass
-- a) looks up the field name (possibly using disambiguation)
-- b) fills in puns and dot-dot stuff
-- When we we've finished, we've renamed the LHS, but not the RHS,
-- of each x=e binding
rnHsRecFields ctxt mk_arg (HsRecFields { rec_flds = flds, rec_dotdot = dotdot })
= do { pun_ok <- xoptM LangExt.RecordPuns
; disambig_ok <- xoptM LangExt.DisambiguateRecordFields
; parent <- check_disambiguation disambig_ok mb_con
; flds1 <- mapM (rn_fld pun_ok parent) flds
; mapM_ (addErr . dupFieldErr ctxt) dup_flds
; dotdot_flds <- rn_dotdot dotdot mb_con flds1
-- Check for an empty record update e {}
-- NB: don't complain about e { .. }, because rn_dotdot has done that already
; case ctxt of
HsRecFieldUpd | Nothing <- dotdot
, null flds
-> addErr emptyUpdateErr
_ -> return ()
; let all_flds | null dotdot_flds = flds1
| otherwise = flds1 ++ dotdot_flds
; return (all_flds, mkFVs (getFieldIds all_flds)) }
where
mb_con = case ctxt of
HsRecFieldCon con | not (isUnboundName con) -> Just con
HsRecFieldPat con | not (isUnboundName con) -> Just con
_ {- update or isUnboundName con -} -> Nothing
-- The unbound name test is because if the constructor
-- isn't in scope the constructor lookup will add an error
-- add an error, but still return an unbound name.
-- We don't want that to screw up the dot-dot fill-in stuff.
doc = case mb_con of
Nothing -> ptext (sLit "constructor field name")
Just con -> ptext (sLit "field of constructor") <+> quotes (ppr con)
rn_fld pun_ok parent (L l (HsRecField { hsRecFieldId = fld
, hsRecFieldArg = arg
, hsRecPun = pun }))
= do { fld'@(L loc fld_nm) <- wrapLocM (lookupSubBndrOcc True parent doc) fld
; arg' <- if pun
then do { checkErr pun_ok (badPun fld)
; return (L loc (mk_arg (mkRdrUnqual (nameOccName fld_nm)))) }
else return arg
; return (L l (HsRecField { hsRecFieldId = fld'
, hsRecFieldArg = arg'
, hsRecPun = pun })) }
rn_dotdot :: Maybe Int -- See Note [DotDot fields] in HsPat
-> Maybe Name -- The constructor (Nothing for an update
-- or out of scope constructor)
-> [LHsRecField Name (Located arg)] -- Explicit fields
-> RnM [LHsRecField Name (Located arg)] -- Filled in .. fields
rn_dotdot Nothing _mb_con _flds -- No ".." at all
= return []
rn_dotdot (Just {}) Nothing _flds -- ".." on record update
= do { case ctxt of
HsRecFieldUpd -> addErr badDotDotUpd
_ -> return ()
; return [] }
rn_dotdot (Just n) (Just con) flds -- ".." on record construction / pat match
= ASSERT( n == length flds )
do { loc <- getSrcSpanM -- Rather approximate
; dd_flag <- xoptM LangExt.RecordWildCards
; checkErr dd_flag (needFlagDotDot ctxt)
; (rdr_env, lcl_env) <- getRdrEnvs
; con_fields <- lookupConstructorFields con
; when (null con_fields) (addErr (badDotDotCon con))
; let present_flds = getFieldIds flds
parent_tc = find_tycon rdr_env con
-- For constructor uses (but not patterns)
-- the arg should be in scope (unqualified)
-- ignoring the record field itself
-- Eg. data R = R { x,y :: Int }
-- f x = R { .. } -- Should expand to R {x=x}, not R{x=x,y=y}
arg_in_scope fld
= rdr `elemLocalRdrEnv` lcl_env
|| notNull [ gre | gre <- lookupGRE_RdrName rdr rdr_env
, case gre_par gre of
ParentIs p -> p /= parent_tc
_ -> True ]
where
rdr = mkRdrUnqual (nameOccName fld)
dot_dot_gres = [ head gres
| fld <- con_fields
, not (fld `elem` present_flds)
, let gres = lookupGRE_Name rdr_env fld
, not (null gres) -- Check field is in scope
, case ctxt of
HsRecFieldCon {} -> arg_in_scope fld
_other -> True ]
; addUsedRdrNames (map greRdrName dot_dot_gres)
; return [ L loc (HsRecField
{ hsRecFieldId = L loc fld
, hsRecFieldArg = L loc (mk_arg arg_rdr)
, hsRecPun = False })
| gre <- dot_dot_gres
, let fld = gre_name gre
arg_rdr = mkRdrUnqual (nameOccName fld) ] }
check_disambiguation :: Bool -> Maybe Name -> RnM Parent
-- When disambiguation is on,
check_disambiguation disambig_ok mb_con
| disambig_ok, Just con <- mb_con
= do { env <- getGlobalRdrEnv; return (ParentIs (find_tycon env con)) }
| otherwise = return NoParent
find_tycon :: GlobalRdrEnv -> Name {- DataCon -} -> Name {- TyCon -}
-- Return the parent *type constructor* of the data constructor
-- That is, the parent of the data constructor.
-- That's the parent to use for looking up record fields.
find_tycon env con
| Just (AConLike (RealDataCon dc)) <- wiredInNameTyThing_maybe con
= tyConName (dataConTyCon dc) -- Special case for [], which is built-in syntax
-- and not in the GlobalRdrEnv (Trac #8448)
| [GRE { gre_par = ParentIs p }] <- lookupGRE_Name env con
= p
| otherwise
= pprPanic "find_tycon" (ppr con $$ ppr (lookupGRE_Name env con))
dup_flds :: [[RdrName]]
-- Each list represents a RdrName that occurred more than once
-- (the list contains all occurrences)
-- Each list in dup_fields is non-empty
(_, dup_flds) = removeDups compare (getFieldIds flds)
getFieldIds :: [LHsRecField id arg] -> [id]
getFieldIds flds = map (unLoc . hsRecFieldId . unLoc) flds
needFlagDotDot :: HsRecFieldContext -> SDoc
needFlagDotDot ctxt = vcat [ptext (sLit "Illegal `..' in record") <+> pprRFC ctxt,
ptext (sLit "Use RecordWildCards to permit this")]
badDotDotCon :: Name -> SDoc
badDotDotCon con
= vcat [ ptext (sLit "Illegal `..' notation for constructor") <+> quotes (ppr con)
, nest 2 (ptext (sLit "The constructor has no labelled fields")) ]
badDotDotUpd :: SDoc
badDotDotUpd = ptext (sLit "You cannot use `..' in a record update")
emptyUpdateErr :: SDoc
emptyUpdateErr = ptext (sLit "Empty record update")
badPun :: Located RdrName -> SDoc
badPun fld = vcat [ptext (sLit "Illegal use of punning for field") <+> quotes (ppr fld),
ptext (sLit "Use NamedFieldPuns to permit this")]
dupFieldErr :: HsRecFieldContext -> [RdrName] -> SDoc
dupFieldErr ctxt dups
= hsep [ptext (sLit "duplicate field name"),
quotes (ppr (head dups)),
ptext (sLit "in record"), pprRFC ctxt]
pprRFC :: HsRecFieldContext -> SDoc
pprRFC (HsRecFieldCon {}) = ptext (sLit "construction")
pprRFC (HsRecFieldPat {}) = ptext (sLit "pattern")
pprRFC (HsRecFieldUpd {}) = ptext (sLit "update")
{-
************************************************************************
* *
\subsubsection{Literals}
* *
************************************************************************
When literals occur we have to make sure
that the types and classes they involve
are made available.
-}
rnLit :: HsLit -> RnM ()
rnLit (HsChar _ c) = checkErr (inCharRange c) (bogusCharError c)
rnLit _ = return ()
-- Turn a Fractional-looking literal which happens to be an integer into an
-- Integer-looking literal.
generalizeOverLitVal :: OverLitVal -> OverLitVal
generalizeOverLitVal (HsFractional (FL {fl_text=src,fl_neg=neg,fl_value=val}))
| denominator val == 1 = HsIntegral (IL {il_text=src,il_neg=neg,il_value=numerator val})
generalizeOverLitVal lit = lit
isNegativeZeroOverLit :: HsOverLit t -> Bool
isNegativeZeroOverLit lit
= case ol_val lit of
HsIntegral i -> 0 == il_value i && il_neg i
HsFractional f -> 0 == fl_value f && fl_neg f
_ -> False
{-
Note [Negative zero]
~~~~~~~~~~~~~~~~~~~~~~~~~
There were problems with negative zero in conjunction with Negative Literals
extension. Numeric literal value is contained in Integer and Rational types
inside IntegralLit and FractionalLit. These types cannot represent negative
zero value. So we had to add explicit field 'neg' which would hold information
about literal sign. Here in rnOverLit we use it to detect negative zeroes and
in this case return not only literal itself but also negateName so that users
can apply it explicitly. In this case it stays negative zero. Trac #13211
-}
rnOverLit :: HsOverLit t ->
RnM ((HsOverLit Name, Maybe (HsExpr Name)), FreeVars)
rnOverLit origLit
-- = do { opt_NumDecimals <- xoptM LangExt.NumDecimals
-- ; let { lit@(OverLit {ol_val=val})
-- | opt_NumDecimals = origLit {ol_val = generalizeOverLitVal (ol_val origLit)}
-- | otherwise = origLit
-- }
-- ; let std_name = hsOverLitName val
-- ; (from_thing_name, fvs) <- lookupSyntaxName std_name
-- ; let rebindable = case from_thing_name of
-- HsVar v -> v /= std_name
-- _ -> panic "rnOverLit"
-- ; return (lit { ol_witness = from_thing_name
-- , ol_rebindable = rebindable
-- , ol_type = placeHolderType }, fvs) }
= do { opt_NumDecimals <- xoptM LangExt.NumDecimals
; let { lit@(OverLit {ol_val=val})
| opt_NumDecimals = origLit {ol_val = generalizeOverLitVal (ol_val origLit)}
| otherwise = origLit
}
; let std_name = hsOverLitName val
; (from_thing_name, fvs1)
<- lookupSyntaxName std_name
; let rebindable = case from_thing_name of
HsVar v -> v /= std_name
_ -> panic "rnOverLit"
; let lit' = lit { ol_witness = from_thing_name
, ol_rebindable = rebindable
, ol_type = placeHolderType }
; if isNegativeZeroOverLit lit'
then do {(negate_name, fvs2)
<- lookupSyntaxName negateName
; return ((lit' { ol_val = negateOverLitVal val }, Just negate_name)
, fvs1 `plusFV` fvs2) }
else return ((lit', Nothing), fvs1) }
{-
************************************************************************
* *
\subsubsection{Errors}
* *
************************************************************************
-}
patSigErr :: Outputable a => a -> SDoc
patSigErr ty
= (ptext (sLit "Illegal signature in pattern:") <+> ppr ty)
$$ nest 4 (ptext (sLit "Use ScopedTypeVariables to permit it"))
bogusCharError :: Char -> SDoc
bogusCharError c
= ptext (sLit "character literal out of range: '\\") <> char c <> char '\''
badViewPat :: Pat RdrName -> SDoc
badViewPat pat = vcat [ptext (sLit "Illegal view pattern: ") <+> ppr pat,
ptext (sLit "Use ViewPatterns to enable view patterns")]
|
rahulmutt/ghcvm
|
compiler/Eta/Rename/RnPat.hs
|
bsd-3-clause
| 33,607
| 9
| 21
| 10,679
| 6,507
| 3,385
| 3,122
| -1
| -1
|
{-# LANGUAGE DeriveDataTypeable #-}
-----------------------------------------------------------------------------
-- |
-- Module : XMonad.Actions.DynamicWorkspaces
-- Copyright : (c) David Roundy <droundy@darcs.net>
-- License : BSD3-style (see LICENSE)
--
-- Maintainer : none
-- Stability : unstable
-- Portability : unportable
--
-- Provides bindings to add and delete workspaces.
--
-----------------------------------------------------------------------------
module XMonad.Actions.DynamicWorkspaces (
-- * Usage
-- $usage
addWorkspace, addWorkspacePrompt,
appendWorkspace, appendWorkspacePrompt,
addWorkspaceAt,
removeWorkspace,
removeWorkspaceByTag,
removeEmptyWorkspace,
removeEmptyWorkspaceByTag,
removeEmptyWorkspaceAfter,
removeEmptyWorkspaceAfterExcept,
addHiddenWorkspace, addHiddenWorkspaceAt,
withWorkspace,
selectWorkspace, renameWorkspace,
renameWorkspaceByName,
toNthWorkspace, withNthWorkspace,
setWorkspaceIndex, withWorkspaceIndex,
WorkspaceIndex
) where
import XMonad hiding (workspaces)
import XMonad.StackSet hiding (filter, modify, delete)
import XMonad.Prompt.Workspace ( Wor(Wor), workspacePrompt )
import XMonad.Prompt ( XPConfig, mkXPrompt )
import XMonad.Util.WorkspaceCompare ( getSortByIndex )
import Data.List (find)
import Data.Maybe (isNothing)
import Control.Monad (when)
import qualified Data.Map.Strict as Map
import qualified XMonad.Util.ExtensibleState as XS
-- $usage
-- You can use this module with the following in your @~\/.xmonad\/xmonad.hs@ file:
--
-- > import XMonad.Actions.DynamicWorkspaces
-- > import XMonad.Actions.CopyWindow(copy)
--
-- Then add keybindings like the following:
--
-- > , ((modm .|. shiftMask, xK_BackSpace), removeWorkspace)
-- > , ((modm .|. shiftMask, xK_v ), selectWorkspace def)
-- > , ((modm, xK_m ), withWorkspace def (windows . W.shift))
-- > , ((modm .|. shiftMask, xK_m ), withWorkspace def (windows . copy))
-- > , ((modm .|. shiftMask, xK_r ), renameWorkspace def)
--
-- > -- mod-[1..9] %! Switch to workspace N in the list of workspaces
-- > -- mod-shift-[1..9] %! Move client to workspace N in the list of workspaces
-- > ++
-- > zip (zip (repeat (modm)) [xK_1..xK_9]) (map (withNthWorkspace W.greedyView) [0..])
-- > ++
-- > zip (zip (repeat (modm .|. shiftMask)) [xK_1..xK_9]) (map (withNthWorkspace W.shift) [0..])
--
-- Alternatively, you can associate indexes (which don't depend of the
-- workspace list order) to workspaces by using following keybindings:
--
-- > -- mod-[1..9] %! Switch to workspace of index N
-- > -- mod-control-[1..9] %! Set index N to the current workspace
-- > ++
-- > zip (zip (repeat (modm)) [xK_1..xK_9]) (map (withWorkspaceIndex W.greedyView) [1..])
-- > ++
-- > zip (zip (repeat (modm .|. controlMask)) [xK_1..xK_9]) (map (setWorkspaceIndex) [1..])
--
-- For detailed instructions on editing your key bindings, see
-- "XMonad.Doc.Extending#Editing_key_bindings". See also the documentation for
-- "XMonad.Actions.CopyWindow", 'windows', 'shift', and 'XPConfig'.
type WorkspaceTag = String
-- | The workspace index is mapped to a workspace tag by the user and
-- can be updated.
type WorkspaceIndex = Int
-- | Internal dynamic project state that stores a mapping between
-- workspace indexes and workspace tags.
data DynamicWorkspaceState = DynamicWorkspaceState {workspaceIndexMap :: Map.Map WorkspaceIndex WorkspaceTag}
deriving (Typeable, Read, Show)
instance ExtensionClass DynamicWorkspaceState where
initialValue = DynamicWorkspaceState Map.empty
extensionType = PersistentExtension
-- | Set the index of the current workspace.
setWorkspaceIndex :: WorkspaceIndex -> X ()
setWorkspaceIndex widx = do
wtag <- gets (currentTag . windowset)
wmap <- XS.gets workspaceIndexMap
XS.modify $ \s -> s {workspaceIndexMap = Map.insert widx wtag wmap}
withWorkspaceIndex :: (String -> WindowSet -> WindowSet) -> WorkspaceIndex -> X ()
withWorkspaceIndex job widx = do
wtag <- ilookup widx
maybe (return ()) (windows . job) wtag
where
ilookup :: WorkspaceIndex -> X (Maybe WorkspaceTag)
ilookup idx = Map.lookup idx `fmap` XS.gets workspaceIndexMap
mkCompl :: [String] -> String -> IO [String]
mkCompl l s = return $ filter (\x -> take (length s) x == s) l
withWorkspace :: XPConfig -> (String -> X ()) -> X ()
withWorkspace c job = do ws <- gets (workspaces . windowset)
sort <- getSortByIndex
let ts = map tag $ sort ws
job' t | t `elem` ts = job t
| otherwise = addHiddenWorkspace t >> job t
mkXPrompt (Wor "") c (mkCompl ts) job'
renameWorkspace :: XPConfig -> X ()
renameWorkspace conf = workspacePrompt conf renameWorkspaceByName
renameWorkspaceByName :: String -> X ()
renameWorkspaceByName w = do old <- gets (currentTag . windowset)
windows $ \s -> let sett wk = wk { tag = w }
setscr scr = scr { workspace = sett $ workspace scr }
sets q = q { current = setscr $ current q }
in sets $ removeWorkspace' w s
updateIndexMap old w
where updateIndexMap old new = do
wmap <- XS.gets workspaceIndexMap
XS.modify $ \s -> s {workspaceIndexMap = Map.map (\t -> if t == old then new else t) wmap}
toNthWorkspace :: (String -> X ()) -> Int -> X ()
toNthWorkspace job wnum = do sort <- getSortByIndex
ws <- gets (map tag . sort . workspaces . windowset)
case drop wnum ws of
(w:_) -> job w
[] -> return ()
withNthWorkspace :: (String -> WindowSet -> WindowSet) -> Int -> X ()
withNthWorkspace job wnum = do sort <- getSortByIndex
ws <- gets (map tag . sort . workspaces . windowset)
case drop wnum ws of
(w:_) -> windows $ job w
[] -> return ()
selectWorkspace :: XPConfig -> X ()
selectWorkspace conf = workspacePrompt conf $ \w ->
do s <- gets windowset
if tagMember w s
then windows $ greedyView w
else addWorkspace w
-- | Add a new workspace with the given name, or do nothing if a
-- workspace with the given name already exists; then switch to the
-- newly created workspace.
addWorkspace :: String -> X ()
addWorkspace = addWorkspaceAt (:)
-- | Same as addWorkspace, but adds the workspace to the end of the list of workspaces
appendWorkspace :: String -> X()
appendWorkspace = addWorkspaceAt (flip (++) . return)
-- | Adds a new workspace with the given name to the current list of workspaces.
-- This function allows the user to pass a function that inserts an element
-- into a list at an arbitrary spot.
addWorkspaceAt :: (WindowSpace -> [WindowSpace] -> [WindowSpace]) -> String -> X ()
addWorkspaceAt add newtag = addHiddenWorkspaceAt add newtag >> windows (greedyView newtag)
-- | Prompt for the name of a new workspace, add it if it does not
-- already exist, and switch to it.
addWorkspacePrompt :: XPConfig -> X ()
addWorkspacePrompt conf = mkXPrompt (Wor "New workspace name: ") conf (const (return [])) addWorkspace
-- | Prompt for the name of a new workspace, appending it to the end of the list of workspaces
-- if it does not already exist, and switch to it.
appendWorkspacePrompt :: XPConfig -> X ()
appendWorkspacePrompt conf = mkXPrompt (Wor "New workspace name: ") conf (const (return [])) appendWorkspace
-- | Add a new hidden workspace with the given name, or do nothing if
-- a workspace with the given name already exists. Takes a function to insert
-- the workspace at an arbitrary spot in the list.
addHiddenWorkspaceAt :: (WindowSpace -> [WindowSpace] -> [WindowSpace]) -> String -> X ()
addHiddenWorkspaceAt add newtag =
whenX (gets (not . tagMember newtag . windowset)) $ do
l <- asks (layoutHook . config)
windows (addHiddenWorkspace' add newtag l)
-- | Add a new hidden workspace with the given name, or do nothing if
-- a workspace with the given name already exists.
addHiddenWorkspace :: String -> X ()
addHiddenWorkspace = addHiddenWorkspaceAt (:)
-- | Remove the current workspace if it contains no windows.
removeEmptyWorkspace :: X ()
removeEmptyWorkspace = gets (currentTag . windowset) >>= removeEmptyWorkspaceByTag
-- | Remove the current workspace.
removeWorkspace :: X ()
removeWorkspace = gets (currentTag . windowset) >>= removeWorkspaceByTag
-- | Remove workspace with specific tag if it contains no windows.
removeEmptyWorkspaceByTag :: String -> X ()
removeEmptyWorkspaceByTag t = whenX (isEmpty t) $ removeWorkspaceByTag t
-- | Remove workspace with specific tag.
removeWorkspaceByTag :: String -> X ()
removeWorkspaceByTag torem = do
s <- gets windowset
case s of
StackSet { current = Screen { workspace = cur }, hidden = (w:_) } -> do
when (torem==tag cur) $ windows $ view $ tag w
windows $ removeWorkspace' torem
_ -> return ()
-- | Remove the current workspace after an operation if it is empty and hidden.
-- Can be used to remove a workspace if it is empty when leaving it. The
-- operation may only change workspace once, otherwise the workspace will not
-- be removed.
removeEmptyWorkspaceAfter :: X () -> X ()
removeEmptyWorkspaceAfter = removeEmptyWorkspaceAfterExcept []
-- | Like 'removeEmptyWorkspaceAfter' but use a list of sticky workspaces,
-- whose entries will never be removed.
removeEmptyWorkspaceAfterExcept :: [String] -> X () -> X ()
removeEmptyWorkspaceAfterExcept sticky f = do
before <- gets (currentTag . windowset)
f
after <- gets (currentTag . windowset)
when (before/=after && before `notElem` sticky) $ removeEmptyWorkspaceByTag before
isEmpty :: String -> X Bool
isEmpty t = do wsl <- gets $ workspaces . windowset
let mws = find (\ws -> tag ws == t) wsl
return $ maybe True (isNothing . stack) mws
addHiddenWorkspace' :: (Workspace i l a -> [Workspace i l a] -> [Workspace i l a]) -> i -> l -> StackSet i l a sid sd -> StackSet i l a sid sd
addHiddenWorkspace' add newtag l s@(StackSet { hidden = ws }) = s { hidden = add (Workspace newtag l Nothing) ws }
-- | Remove the hidden workspace with the given tag from the StackSet, if
-- it exists. All the windows in that workspace are moved to the current
-- workspace.
removeWorkspace' :: (Eq i) => i -> StackSet i l a sid sd -> StackSet i l a sid sd
removeWorkspace' torem s@(StackSet { current = scr@(Screen { workspace = wc })
, hidden = hs })
= let (xs, ys) = break ((== torem) . tag) hs
in removeWorkspace'' xs ys
where meld Nothing Nothing = Nothing
meld x Nothing = x
meld Nothing x = x
meld (Just x) (Just y) = differentiate (integrate x ++ integrate y)
removeWorkspace'' xs (y:ys) = s { current = scr { workspace = wc { stack = meld (stack y) (stack wc) } }
, hidden = xs ++ ys }
removeWorkspace'' _ _ = s
|
f1u77y/xmonad-contrib
|
XMonad/Actions/DynamicWorkspaces.hs
|
bsd-3-clause
| 12,081
| 7
| 18
| 3,457
| 2,507
| 1,322
| 1,185
| 142
| 5
|
{-
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
************************************************************************
* *
\section[OccurAnal]{Occurrence analysis pass}
* *
************************************************************************
The occurrence analyser re-typechecks a core expression, returning a new
core expression with (hopefully) improved usage information.
-}
{-# LANGUAGE CPP, BangPatterns #-}
module ETA.SimplCore.OccurAnal (
occurAnalysePgm, occurAnalyseExpr, occurAnalyseExpr_NoBinderSwap
) where
#include "HsVersions.h"
import ETA.Core.CoreSyn
import ETA.Core.CoreFVs
import ETA.Core.CoreUtils ( exprIsTrivial, isDefaultAlt, isExpandableApp,
stripTicksTopE, mkTicks )
import ETA.BasicTypes.Id
import ETA.BasicTypes.Name( localiseName )
import ETA.BasicTypes.BasicTypes
import ETA.BasicTypes.Module( Module )
import ETA.Types.Coercion
import ETA.BasicTypes.VarSet
import ETA.BasicTypes.VarEnv
import ETA.BasicTypes.Var
import ETA.BasicTypes.Demand ( argOneShots, argsOneShots )
import ETA.Utils.Maybes ( orElse )
import ETA.Utils.Digraph ( SCC(..), stronglyConnCompFromEdgedVerticesR )
import ETA.BasicTypes.Unique
import ETA.Utils.UniqFM
import ETA.Utils.UniqSet
import ETA.Utils.Util
import ETA.Utils.Outputable
import ETA.Utils.FastString
import Data.List
import Control.Arrow ( second )
{-
************************************************************************
* *
\subsection[OccurAnal-main]{Counting occurrences: main function}
* *
************************************************************************
Here's the externally-callable interface:
-}
occurAnalysePgm :: Module -- Used only in debug output
-> (Activation -> Bool)
-> [CoreRule] -> [CoreVect] -> VarSet
-> CoreProgram -> CoreProgram
occurAnalysePgm this_mod active_rule imp_rules vects vectVars binds
| isEmptyVarEnv final_usage
= occ_anald_binds
| otherwise -- See Note [Glomming]
= WARN( True, hang (text "Glomming in" <+> ppr this_mod <> colon)
2 (ppr final_usage ) )
occ_anald_glommed_binds
where
init_env = initOccEnv active_rule
(final_usage, occ_anald_binds) = go init_env binds
(_, occ_anald_glommed_binds) = occAnalRecBind init_env imp_rules_edges
(flattenBinds occ_anald_binds)
initial_uds
-- It's crucial to re-analyse the glommed-together bindings
-- so that we establish the right loop breakers. Otherwise
-- we can easily create an infinite loop (Trac #9583 is an example)
initial_uds = addIdOccs emptyDetails
(rulesFreeVars imp_rules `unionVarSet`
vectsFreeVars vects `unionVarSet`
vectVars)
-- The RULES and VECTORISE declarations keep things alive! (For VECTORISE declarations,
-- we only get them *until* the vectoriser runs. Afterwards, these dependencies are
-- reflected in 'vectors' — see Note [Vectorisation declarations and occurrences].)
-- Note [Preventing loops due to imported functions rules]
imp_rules_edges = foldr (plusVarEnv_C unionVarSet) emptyVarEnv
[ mapVarEnv (const maps_to) $
getUniqSet (exprFreeIds arg `delVarSetList` ru_bndrs imp_rule)
| imp_rule <- imp_rules
, let maps_to = exprFreeIds (ru_rhs imp_rule)
`delVarSetList` ru_bndrs imp_rule
, arg <- ru_args imp_rule ]
go :: OccEnv -> [CoreBind] -> (UsageDetails, [CoreBind])
go _ []
= (initial_uds, [])
go env (bind:binds)
= (final_usage, bind' ++ binds')
where
(bs_usage, binds') = go env binds
(final_usage, bind') = occAnalBind env imp_rules_edges bind bs_usage
occurAnalyseExpr :: CoreExpr -> CoreExpr
-- Do occurrence analysis, and discard occurrence info returned
occurAnalyseExpr = occurAnalyseExpr' True -- do binder swap
occurAnalyseExpr_NoBinderSwap :: CoreExpr -> CoreExpr
occurAnalyseExpr_NoBinderSwap = occurAnalyseExpr' False -- do not do binder swap
occurAnalyseExpr' :: Bool -> CoreExpr -> CoreExpr
occurAnalyseExpr' enable_binder_swap expr
= snd (occAnal env expr)
where
env = (initOccEnv all_active_rules) {occ_binder_swap = enable_binder_swap}
-- To be conservative, we say that all inlines and rules are active
all_active_rules = \_ -> True
{-
************************************************************************
* *
\subsection[OccurAnal-main]{Counting occurrences: main function}
* *
************************************************************************
Bindings
~~~~~~~~
-}
occAnalBind :: OccEnv -- The incoming OccEnv
-> IdEnv IdSet -- Mapping from FVs of imported RULE LHSs to RHS FVs
-> CoreBind
-> UsageDetails -- Usage details of scope
-> (UsageDetails, -- Of the whole let(rec)
[CoreBind])
occAnalBind env imp_rules_edges (NonRec binder rhs) body_usage
= occAnalNonRecBind env imp_rules_edges binder rhs body_usage
occAnalBind env imp_rules_edges (Rec pairs) body_usage
= occAnalRecBind env imp_rules_edges pairs body_usage
-----------------
occAnalNonRecBind :: OccEnv -> IdEnv IdSet -> Var -> CoreExpr
-> UsageDetails -> (UsageDetails, [CoreBind])
occAnalNonRecBind env imp_rules_edges binder rhs body_usage
| isTyVar binder -- A type let; we don't gather usage info
= (body_usage, [NonRec binder rhs])
| not (binder `usedIn` body_usage) -- It's not mentioned
= (body_usage, [])
| otherwise -- It's mentioned in the body
= (body_usage' +++ rhs_usage4, [NonRec tagged_binder rhs'])
where
(body_usage', tagged_binder) = tagBinder body_usage binder
(rhs_usage1, rhs') = occAnalNonRecRhs env tagged_binder rhs
rhs_usage2 = addIdOccs rhs_usage1 (idUnfoldingVars binder)
rhs_usage3 = addIdOccs rhs_usage2 (idRuleVars binder)
-- See Note [Rules are extra RHSs] and Note [Rule dependency info]
rhs_usage4 = maybe rhs_usage3 (addIdOccs rhs_usage3) $ lookupVarEnv imp_rules_edges binder
-- See Note [Preventing loops due to imported functions rules]
-----------------
occAnalRecBind :: OccEnv -> IdEnv IdSet -> [(Var,CoreExpr)]
-> UsageDetails -> (UsageDetails, [CoreBind])
occAnalRecBind env imp_rules_edges pairs body_usage
= foldr occAnalRec (body_usage, []) sccs
-- For a recursive group, we
-- * occ-analyse all the RHSs
-- * compute strongly-connected components
-- * feed those components to occAnalRec
where
bndr_set = mkVarSet (map fst pairs)
sccs :: [SCC (Node Details)]
sccs = {-# SCC "occAnalBind.scc" #-} stronglyConnCompFromEdgedVerticesR nodes
nodes :: [Node Details]
nodes = {-# SCC "occAnalBind.assoc" #-} map (makeNode env imp_rules_edges bndr_set) pairs
{-
Note [Dead code]
~~~~~~~~~~~~~~~~
Dropping dead code for a cyclic Strongly Connected Component is done
in a very simple way:
the entire SCC is dropped if none of its binders are mentioned
in the body; otherwise the whole thing is kept.
The key observation is that dead code elimination happens after
dependency analysis: so 'occAnalBind' processes SCCs instead of the
original term's binding groups.
Thus 'occAnalBind' does indeed drop 'f' in an example like
letrec f = ...g...
g = ...(...g...)...
in
...g...
when 'g' no longer uses 'f' at all (eg 'f' does not occur in a RULE in
'g'). 'occAnalBind' first consumes 'CyclicSCC g' and then it consumes
'AcyclicSCC f', where 'body_usage' won't contain 'f'.
------------------------------------------------------------
Note [Forming Rec groups]
~~~~~~~~~~~~~~~~~~~~~~~~~
We put bindings {f = ef; g = eg } in a Rec group if "f uses g"
and "g uses f", no matter how indirectly. We do a SCC analysis
with an edge f -> g if "f uses g".
More precisely, "f uses g" iff g should be in scope wherever f is.
That is, g is free in:
a) the rhs 'ef'
b) or the RHS of a rule for f (Note [Rules are extra RHSs])
c) or the LHS or a rule for f (Note [Rule dependency info])
These conditions apply regardless of the activation of the RULE (eg it might be
inactive in this phase but become active later). Once a Rec is broken up
it can never be put back together, so we must be conservative.
The principle is that, regardless of rule firings, every variable is
always in scope.
* Note [Rules are extra RHSs]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
A RULE for 'f' is like an extra RHS for 'f'. That way the "parent"
keeps the specialised "children" alive. If the parent dies
(because it isn't referenced any more), then the children will die
too (unless they are already referenced directly).
To that end, we build a Rec group for each cyclic strongly
connected component,
*treating f's rules as extra RHSs for 'f'*.
More concretely, the SCC analysis runs on a graph with an edge
from f -> g iff g is mentioned in
(a) f's rhs
(b) f's RULES
These are rec_edges.
Under (b) we include variables free in *either* LHS *or* RHS of
the rule. The former might seems silly, but see Note [Rule
dependency info]. So in Example [eftInt], eftInt and eftIntFB
will be put in the same Rec, even though their 'main' RHSs are
both non-recursive.
* Note [Rule dependency info]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The VarSet in a SpecInfo is used for dependency analysis in the
occurrence analyser. We must track free vars in *both* lhs and rhs.
Hence use of idRuleVars, rather than idRuleRhsVars in occAnalBind.
Why both? Consider
x = y
RULE f x = v+4
Then if we substitute y for x, we'd better do so in the
rule's LHS too, so we'd better ensure the RULE appears to mention 'x'
as well as 'v'
* Note [Rules are visible in their own rec group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We want the rules for 'f' to be visible in f's right-hand side.
And we'd like them to be visible in other functions in f's Rec
group. E.g. in Note [Specialisation rules] we want f' rule
to be visible in both f's RHS, and fs's RHS.
This means that we must simplify the RULEs first, before looking
at any of the definitions. This is done by Simplify.simplRecBind,
when it calls addLetIdInfo.
------------------------------------------------------------
Note [Choosing loop breakers]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Loop breaking is surprisingly subtle. First read the section 4 of
"Secrets of the GHC inliner". This describes our basic plan.
We avoid infinite inlinings by choosing loop breakers, and
ensuring that a loop breaker cuts each loop.
Fundamentally, we do SCC analysis on a graph. For each recursive
group we choose a loop breaker, delete all edges to that node,
re-analyse the SCC, and iterate.
But what is the graph? NOT the same graph as was used for Note
[Forming Rec groups]! In particular, a RULE is like an equation for
'f' that is *always* inlined if it is applicable. We do *not* disable
rules for loop-breakers. It's up to whoever makes the rules to make
sure that the rules themselves always terminate. See Note [Rules for
recursive functions] in Simplify.lhs
Hence, if
f's RHS (or its INLINE template if it has one) mentions g, and
g has a RULE that mentions h, and
h has a RULE that mentions f
then we *must* choose f to be a loop breaker. Example: see Note
[Specialisation rules].
In general, take the free variables of f's RHS, and augment it with
all the variables reachable by RULES from those starting points. That
is the whole reason for computing rule_fv_env in occAnalBind. (Of
course we only consider free vars that are also binders in this Rec
group.) See also Note [Finding rule RHS free vars]
Note that when we compute this rule_fv_env, we only consider variables
free in the *RHS* of the rule, in contrast to the way we build the
Rec group in the first place (Note [Rule dependency info])
Note that if 'g' has RHS that mentions 'w', we should add w to
g's loop-breaker edges. More concretely there is an edge from f -> g
iff
(a) g is mentioned in f's RHS `xor` f's INLINE rhs
(see Note [Inline rules])
(b) or h is mentioned in f's RHS, and
g appears in the RHS of an active RULE of h
or a transitive sequence of active rules starting with h
Why "active rules"? See Note [Finding rule RHS free vars]
Note that in Example [eftInt], *neither* eftInt *nor* eftIntFB is
chosen as a loop breaker, because their RHSs don't mention each other.
And indeed both can be inlined safely.
Note again that the edges of the graph we use for computing loop breakers
are not the same as the edges we use for computing the Rec blocks.
That's why we compute
- rec_edges for the Rec block analysis
- loop_breaker_edges for the loop breaker analysis
* Note [Finding rule RHS free vars]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this real example from Data Parallel Haskell
tagZero :: Array Int -> Array Tag
{-# INLINE [1] tagZeroes #-}
tagZero xs = pmap (\x -> fromBool (x==0)) xs
{-# RULES "tagZero" [~1] forall xs n.
pmap fromBool <blah blah> = tagZero xs #-}
So tagZero's RHS mentions pmap, and pmap's RULE mentions tagZero.
However, tagZero can only be inlined in phase 1 and later, while
the RULE is only active *before* phase 1. So there's no problem.
To make this work, we look for the RHS free vars only for
*active* rules. That's the reason for the occ_rule_act field
of the OccEnv.
* Note [Weak loop breakers]
~~~~~~~~~~~~~~~~~~~~~~~~~
There is a last nasty wrinkle. Suppose we have
Rec { f = f_rhs
RULE f [] = g
h = h_rhs
g = h
...more...
}
Remember that we simplify the RULES before any RHS (see Note
[Rules are visible in their own rec group] above).
So we must *not* postInlineUnconditionally 'g', even though
its RHS turns out to be trivial. (I'm assuming that 'g' is
not choosen as a loop breaker.) Why not? Because then we
drop the binding for 'g', which leaves it out of scope in the
RULE!
Here's a somewhat different example of the same thing
Rec { g = h
; h = ...f...
; f = f_rhs
RULE f [] = g }
Here the RULE is "below" g, but we *still* can't postInlineUnconditionally
g, because the RULE for f is active throughout. So the RHS of h
might rewrite to h = ...g...
So g must remain in scope in the output program!
We "solve" this by:
Make g a "weak" loop breaker (OccInfo = IAmLoopBreaker True)
iff g is a "missing free variable" of the Rec group
A "missing free variable" x is one that is mentioned in an RHS or
INLINE or RULE of a binding in the Rec group, but where the
dependency on x may not show up in the loop_breaker_edges (see
note [Choosing loop breakers} above).
A normal "strong" loop breaker has IAmLoopBreaker False. So
Inline postInlineUnconditionally
IAmLoopBreaker False no no
IAmLoopBreaker True yes no
other yes yes
The **sole** reason for this kind of loop breaker is so that
postInlineUnconditionally does not fire. Ugh. (Typically it'll
inline via the usual callSiteInline stuff, so it'll be dead in the
next pass, so the main Ugh is the tiresome complication.)
Note [Rules for imported functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
f = /\a. B.g a
RULE B.g Int = 1 + f Int
Note that
* The RULE is for an imported function.
* f is non-recursive
Now we
can get
f Int --> B.g Int Inlining f
--> 1 + f Int Firing RULE
and so the simplifier goes into an infinite loop. This
would not happen if the RULE was for a local function,
because we keep track of dependencies through rules. But
that is pretty much impossible to do for imported Ids. Suppose
f's definition had been
f = /\a. C.h a
where (by some long and devious process), C.h eventually inlines to
B.g. We could only spot such loops by exhaustively following
unfoldings of C.h etc, in case we reach B.g, and hence (via the RULE)
f.
Note that RULES for imported functions are important in practice; they
occur a lot in the libraries.
We regard this potential infinite loop as a *programmer* error.
It's up the programmer not to write silly rules like
RULE f x = f x
and the example above is just a more complicated version.
Note [Preventing loops due to imported functions rules]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider:
import GHC.Base (foldr)
{-# RULES "filterList" forall p. foldr (filterFB (:) p) [] = filter p #-}
filter p xs = build (\c n -> foldr (filterFB c p) n xs)
filterFB c p = ...
f = filter p xs
Note that filter is not a loop-breaker, so what happens is:
f = filter p xs
= {inline} build (\c n -> foldr (filterFB c p) n xs)
= {inline} foldr (filterFB (:) p) [] xs
= {RULE} filter p xs
We are in an infinite loop.
A more elaborate example (that I actually saw in practice when I went to
mark GHC.List.filter as INLINABLE) is as follows. Say I have this module:
{-# LANGUAGE RankNTypes #-}
module GHCList where
import Prelude hiding (filter)
import GHC.Base (build)
{-# INLINABLE filter #-}
filter :: (a -> Bool) -> [a] -> [a]
filter p [] = []
filter p (x:xs) = if p x then x : filter p xs else filter p xs
{-# NOINLINE [0] filterFB #-}
filterFB :: (a -> b -> b) -> (a -> Bool) -> a -> b -> b
filterFB c p x r | p x = x `c` r
| otherwise = r
{-# RULES
"filter" [~1] forall p xs. filter p xs = build (\c n -> foldr
(filterFB c p) n xs)
"filterList" [1] forall p. foldr (filterFB (:) p) [] = filter p
#-}
Then (because RULES are applied inside INLINABLE unfoldings, but inlinings
are not), the unfolding given to "filter" in the interface file will be:
filter p [] = []
filter p (x:xs) = if p x then x : build (\c n -> foldr (filterFB c p) n xs)
else build (\c n -> foldr (filterFB c p) n xs
Note that because this unfolding does not mention "filter", filter is not
marked as a strong loop breaker. Therefore at a use site in another module:
filter p xs
= {inline}
case xs of [] -> []
(x:xs) -> if p x then x : build (\c n -> foldr (filterFB c p) n xs)
else build (\c n -> foldr (filterFB c p) n xs)
build (\c n -> foldr (filterFB c p) n xs)
= {inline} foldr (filterFB (:) p) [] xs
= {RULE} filter p xs
And we are in an infinite loop again, except that this time the loop is producing an
infinitely large *term* (an unrolling of filter) and so the simplifier finally
dies with "ticks exhausted"
Because of this problem, we make a small change in the occurrence analyser
designed to mark functions like "filter" as strong loop breakers on the basis that:
1. The RHS of filter mentions the local function "filterFB"
2. We have a rule which mentions "filterFB" on the LHS and "filter" on the RHS
So for each RULE for an *imported* function we are going to add
dependency edges between the *local* FVS of the rule LHS and the
*local* FVS of the rule RHS. We don't do anything special for RULES on
local functions because the standard occurrence analysis stuff is
pretty good at getting loop-breakerness correct there.
It is important to note that even with this extra hack we aren't always going to get
things right. For example, it might be that the rule LHS mentions an imported Id,
and another module has a RULE that can rewrite that imported Id to one of our local
Ids.
Note [Specialising imported functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BUT for *automatically-generated* rules, the programmer can't be
responsible for the "programmer error" in Note [Rules for imported
functions]. In paricular, consider specialising a recursive function
defined in another module. If we specialise a recursive function B.g,
we get
g_spec = .....(B.g Int).....
RULE B.g Int = g_spec
Here, g_spec doesn't look recursive, but when the rule fires, it
becomes so. And if B.g was mutually recursive, the loop might
not be as obvious as it is here.
To avoid this,
* When specialising a function that is a loop breaker,
give a NOINLINE pragma to the specialised function
Note [Glomming]
~~~~~~~~~~~~~~~
RULES for imported Ids can make something at the top refer to something at the bottom:
f = \x -> B.g (q x)
h = \y -> 3
RULE: B.g (q x) = h x
Applying this rule makes f refer to h, although f doesn't appear to
depend on h. (And, as in Note [Rules for imported functions], the
dependency might be more indirect. For example, f might mention C.t
rather than B.g, where C.t eventually inlines to B.g.)
NOTICE that this cannot happen for rules whose head is a
locally-defined function, because we accurately track dependencies
through RULES. It only happens for rules whose head is an imported
function (B.g in the example above).
Solution:
- When simplifying, bring all top level identifiers into
scope at the start, ignoring the Rec/NonRec structure, so
that when 'h' pops up in f's rhs, we find it in the in-scope set
(as the simplifier generally expects). This happens in simplTopBinds.
- In the occurrence analyser, if there are any out-of-scope
occurrences that pop out of the top, which will happen after
firing the rule: f = \x -> h x
h = \y -> 3
then just glom all the bindings into a single Rec, so that
the *next* iteration of the occurrence analyser will sort
them all out. This part happens in occurAnalysePgm.
------------------------------------------------------------
Note [Inline rules]
~~~~~~~~~~~~~~~~~~~
None of the above stuff about RULES applies to Inline Rules,
stored in a CoreUnfolding. The unfolding, if any, is simplified
at the same time as the regular RHS of the function (ie *not* like
Note [Rules are visible in their own rec group]), so it should be
treated *exactly* like an extra RHS.
Or, rather, when computing loop-breaker edges,
* If f has an INLINE pragma, and it is active, we treat the
INLINE rhs as f's rhs
* If it's inactive, we treat f as having no rhs
* If it has no INLINE pragma, we look at f's actual rhs
There is a danger that we'll be sub-optimal if we see this
f = ...f...
[INLINE f = ..no f...]
where f is recursive, but the INLINE is not. This can just about
happen with a sufficiently odd set of rules; eg
foo :: Int -> Int
{-# INLINE [1] foo #-}
foo x = x+1
bar :: Int -> Int
{-# INLINE [1] bar #-}
bar x = foo x + 1
{-# RULES "foo" [~1] forall x. foo x = bar x #-}
Here the RULE makes bar recursive; but it's INLINE pragma remains
non-recursive. It's tempting to then say that 'bar' should not be
a loop breaker, but an attempt to do so goes wrong in two ways:
a) We may get
$df = ...$cfoo...
$cfoo = ...$df....
[INLINE $cfoo = ...no-$df...]
But we want $cfoo to depend on $df explicitly so that we
put the bindings in the right order to inline $df in $cfoo
and perhaps break the loop altogether. (Maybe this
b)
Example [eftInt]
~~~~~~~~~~~~~~~
Example (from GHC.Enum):
eftInt :: Int# -> Int# -> [Int]
eftInt x y = ...(non-recursive)...
{-# INLINE [0] eftIntFB #-}
eftIntFB :: (Int -> r -> r) -> r -> Int# -> Int# -> r
eftIntFB c n x y = ...(non-recursive)...
{-# RULES
"eftInt" [~1] forall x y. eftInt x y = build (\ c n -> eftIntFB c n x y)
"eftIntList" [1] eftIntFB (:) [] = eftInt
#-}
Note [Specialisation rules]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this group, which is typical of what SpecConstr builds:
fs a = ....f (C a)....
f x = ....f (C a)....
{-# RULE f (C a) = fs a #-}
So 'f' and 'fs' are in the same Rec group (since f refers to fs via its RULE).
But watch out! If 'fs' is not chosen as a loop breaker, we may get an infinite loop:
- the RULE is applied in f's RHS (see Note [Self-recursive rules] in Simplify
- fs is inlined (say it's small)
- now there's another opportunity to apply the RULE
This showed up when compiling Control.Concurrent.Chan.getChanContents.
-}
type Node details = (details, Unique, [Unique]) -- The Ints are gotten from the Unique,
-- which is gotten from the Id.
data Details
= ND { nd_bndr :: Id -- Binder
, nd_rhs :: CoreExpr -- RHS, already occ-analysed
, nd_uds :: UsageDetails -- Usage from RHS, and RULES, and stable unfoldings
-- ignoring phase (ie assuming all are active)
-- See Note [Forming Rec groups]
, nd_inl :: IdSet -- Free variables of
-- the stable unfolding (if present and active)
-- or the RHS (if not)
-- but excluding any RULES
-- This is the IdSet that may be used if the Id is inlined
, nd_weak :: IdSet -- Binders of this Rec that are mentioned in nd_uds
-- but are *not* in nd_inl. These are the ones whose
-- dependencies might not be respected by loop_breaker_edges
-- See Note [Weak loop breakers]
, nd_active_rule_fvs :: IdSet -- Free variables of the RHS of active RULES
}
instance Outputable Details where
ppr nd = ptext (sLit "ND") <> braces
(sep [ ptext (sLit "bndr =") <+> ppr (nd_bndr nd)
, ptext (sLit "uds =") <+> ppr (nd_uds nd)
, ptext (sLit "inl =") <+> ppr (nd_inl nd)
, ptext (sLit "weak =") <+> ppr (nd_weak nd)
, ptext (sLit "rule =") <+> ppr (nd_active_rule_fvs nd)
])
makeNode :: OccEnv -> IdEnv IdSet -> VarSet -> (Var, CoreExpr) -> Node Details
makeNode env imp_rules_edges bndr_set (bndr, rhs)
= (details, varUnique bndr, nonDetKeysUniqSet node_fvs)
where
details = ND { nd_bndr = bndr
, nd_rhs = rhs'
, nd_uds = rhs_usage3
, nd_weak = node_fvs `minusVarSet` inl_fvs
, nd_inl = inl_fvs
, nd_active_rule_fvs = active_rule_fvs }
-- Constructing the edges for the main Rec computation
-- See Note [Forming Rec groups]
(rhs_usage1, rhs') = occAnalRecRhs env rhs
rhs_usage2 = addIdOccs rhs_usage1 all_rule_fvs -- Note [Rules are extra RHSs]
-- Note [Rule dependency info]
rhs_usage3 = case mb_unf_fvs of
Just unf_fvs -> addIdOccs rhs_usage2 unf_fvs
Nothing -> rhs_usage2
node_fvs = udFreeVars bndr_set rhs_usage3
-- Finding the free variables of the rules
is_active = occ_rule_act env :: Activation -> Bool
rules = filterOut isBuiltinRule (idCoreRules bndr)
rules_w_fvs :: [(Activation, VarSet)] -- Find the RHS fvs
rules_w_fvs = maybe id (\ids -> ((AlwaysActive, ids):)) (lookupVarEnv imp_rules_edges bndr)
-- See Note [Preventing loops due to imported functions rules]
[ (ru_act rule, fvs)
| rule <- rules
, let fvs = exprFreeVars (ru_rhs rule)
`delVarSetList` ru_bndrs rule
, not (isEmptyVarSet fvs) ]
all_rule_fvs = rule_lhs_fvs `unionVarSet` rule_rhs_fvs
rule_rhs_fvs = mapUnionVarSet snd rules_w_fvs
rule_lhs_fvs = mapUnionVarSet (\ru -> exprsFreeVars (ru_args ru)
`delVarSetList` ru_bndrs ru) rules
active_rule_fvs = unionVarSets [fvs | (a,fvs) <- rules_w_fvs, is_active a]
-- Finding the free variables of the INLINE pragma (if any)
unf = realIdUnfolding bndr -- Ignore any current loop-breaker flag
mb_unf_fvs = stableUnfoldingVars unf
-- Find the "nd_inl" free vars; for the loop-breaker phase
inl_fvs = case mb_unf_fvs of
Nothing -> udFreeVars bndr_set rhs_usage1 -- No INLINE, use RHS
Just unf_fvs -> unf_fvs
-- We could check for an *active* INLINE (returning
-- emptyVarSet for an inactive one), but is_active
-- isn't the right thing (it tells about
-- RULE activation), so we'd need more plumbing
-----------------------------
occAnalRec :: SCC (Node Details)
-> (UsageDetails, [CoreBind])
-> (UsageDetails, [CoreBind])
-- The NonRec case is just like a Let (NonRec ...) above
occAnalRec (AcyclicSCC (ND { nd_bndr = bndr, nd_rhs = rhs, nd_uds = rhs_uds}, _, _))
(body_uds, binds)
| not (bndr `usedIn` body_uds)
= (body_uds, binds) -- See Note [Dead code]
| otherwise -- It's mentioned in the body
= (body_uds' +++ rhs_uds,
NonRec tagged_bndr rhs : binds)
where
(body_uds', tagged_bndr) = tagBinder body_uds bndr
-- The Rec case is the interesting one
-- See Note [Loop breaking]
occAnalRec (CyclicSCC nodes) (body_uds, binds)
| not (any (`usedIn` body_uds) bndrs) -- NB: look at body_uds, not total_uds
= (body_uds, binds) -- See Note [Dead code]
| otherwise -- At this point we always build a single Rec
= -- pprTrace "occAnalRec" (vcat
-- [ text "tagged nodes" <+> ppr tagged_nodes
-- , text "lb edges" <+> ppr loop_breaker_edges])
(final_uds, Rec pairs : binds)
where
bndrs = [b | (ND { nd_bndr = b }, _, _) <- nodes]
bndr_set = mkVarSet bndrs
----------------------------
-- Tag the binders with their occurrence info
tagged_nodes = map tag_node nodes
total_uds = foldl add_uds body_uds nodes
final_uds = total_uds `minusVarEnv` (getUniqSet bndr_set)
add_uds usage_so_far (nd, _, _) = usage_so_far +++ nd_uds nd
tag_node :: Node Details -> Node Details
tag_node (details@ND { nd_bndr = bndr }, k, ks)
= (details { nd_bndr = setBinderOcc total_uds bndr }, k, ks)
---------------------------
-- Now reconstruct the cycle
pairs :: [(Id,CoreExpr)]
pairs | isEmptyVarSet weak_fvs = reOrderNodes 0 bndr_set weak_fvs tagged_nodes []
| otherwise = loopBreakNodes 0 bndr_set weak_fvs loop_breaker_edges []
-- If weak_fvs is empty, the loop_breaker_edges will include all
-- the edges in tagged_nodes, so there isn't any point in doing
-- a fresh SCC computation that will yield a single CyclicSCC result.
weak_fvs :: VarSet
weak_fvs = mapUnionVarSet (nd_weak . fstOf3) nodes
-- See Note [Choosing loop breakers] for loop_breaker_edges
loop_breaker_edges = map mk_node tagged_nodes
mk_node (details@(ND { nd_inl = inl_fvs }), k, _)
= (details, k, nonDetKeysUniqSet (extendFvs_ rule_fv_env inl_fvs))
------------------------------------
rule_fv_env :: IdEnv IdSet
-- Maps a variable f to the variables from this group
-- mentioned in RHS of active rules for f
-- Domain is *subset* of bound vars (others have no rule fvs)
rule_fv_env = transClosureFV (mkVarEnv init_rule_fvs)
init_rule_fvs -- See Note [Finding rule RHS free vars]
= [ (b, trimmed_rule_fvs)
| (ND { nd_bndr = b, nd_active_rule_fvs = rule_fvs },_,_) <- nodes
, let trimmed_rule_fvs = rule_fvs `intersectVarSet` bndr_set
, not (isEmptyVarSet trimmed_rule_fvs)]
{-
@loopBreakSCC@ is applied to the list of (binder,rhs) pairs for a cyclic
strongly connected component (there's guaranteed to be a cycle). It returns the
same pairs, but
a) in a better order,
b) with some of the Ids having a IAmALoopBreaker pragma
The "loop-breaker" Ids are sufficient to break all cycles in the SCC. This means
that the simplifier can guarantee not to loop provided it never records an inlining
for these no-inline guys.
Furthermore, the order of the binds is such that if we neglect dependencies
on the no-inline Ids then the binds are topologically sorted. This means
that the simplifier will generally do a good job if it works from top bottom,
recording inlinings for any Ids which aren't marked as "no-inline" as it goes.
-}
type Binding = (Id,CoreExpr)
mk_loop_breaker :: Node Details -> Binding
mk_loop_breaker (ND { nd_bndr = bndr, nd_rhs = rhs}, _, _)
= (setIdOccInfo bndr strongLoopBreaker, rhs)
mk_non_loop_breaker :: VarSet -> Node Details -> Binding
-- See Note [Weak loop breakers]
mk_non_loop_breaker used_in_rules (ND { nd_bndr = bndr, nd_rhs = rhs}, _, _)
| bndr `elemVarSet` used_in_rules = (setIdOccInfo bndr weakLoopBreaker, rhs)
| otherwise = (bndr, rhs)
udFreeVars :: VarSet -> UsageDetails -> VarSet
-- Find the subset of bndrs that are mentioned in uds
-- TODO: Change may cause bugs
udFreeVars bndrs uds = unsafeUFMToUniqSet $
intersectUFM_C (\b _ -> b) (getUniqSet bndrs) uds
loopBreakNodes :: Int
-> VarSet -- All binders
-> VarSet -- Binders whose dependencies may be "missing"
-- See Note [Weak loop breakers]
-> [Node Details]
-> [Binding] -- Append these to the end
-> [Binding]
-- Return the bindings sorted into a plausible order, and marked with loop breakers.
loopBreakNodes depth bndr_set weak_fvs nodes binds
= go (stronglyConnCompFromEdgedVerticesR nodes) binds
where
go [] binds = binds
go (scc:sccs) binds = loop_break_scc scc (go sccs binds)
loop_break_scc scc binds
= case scc of
AcyclicSCC node -> mk_non_loop_breaker weak_fvs node : binds
CyclicSCC [node] -> mk_loop_breaker node : binds
CyclicSCC nodes -> reOrderNodes depth bndr_set weak_fvs nodes binds
reOrderNodes :: Int -> VarSet -> VarSet -> [Node Details] -> [Binding] -> [Binding]
-- Choose a loop breaker, mark it no-inline,
-- do SCC analysis on the rest, and recursively sort them out
reOrderNodes _ _ _ [] _ = panic "reOrderNodes"
reOrderNodes depth bndr_set weak_fvs (node : nodes) binds
= -- pprTrace "reOrderNodes" (text "unchosen" <+> ppr unchosen $$
-- text "chosen" <+> ppr chosen_nodes) $
loopBreakNodes new_depth bndr_set weak_fvs unchosen $
(map mk_loop_breaker chosen_nodes ++ binds)
where
(chosen_nodes, unchosen) = choose_loop_breaker (score node) [node] [] nodes
approximate_loop_breaker = depth >= 2
new_depth | approximate_loop_breaker = 0
| otherwise = depth+1
-- After two iterations (d=0, d=1) give up
-- and approximate, returning to d=0
choose_loop_breaker :: Int -- Best score so far
-> [Node Details] -- Nodes with this score
-> [Node Details] -- Nodes with higher scores
-> [Node Details] -- Unprocessed nodes
-> ([Node Details], [Node Details])
-- This loop looks for the bind with the lowest score
-- to pick as the loop breaker. The rest accumulate in
choose_loop_breaker _ loop_nodes acc []
= (loop_nodes, acc) -- Done
-- If approximate_loop_breaker is True, we pick *all*
-- nodes with lowest score, else just one
-- See Note [Complexity of loop breaking]
choose_loop_breaker loop_sc loop_nodes acc (node : nodes)
| sc < loop_sc -- Lower score so pick this new one
= choose_loop_breaker sc [node] (loop_nodes ++ acc) nodes
| approximate_loop_breaker && sc == loop_sc
= choose_loop_breaker loop_sc (node : loop_nodes) acc nodes
| otherwise -- Higher score so don't pick it
= choose_loop_breaker loop_sc loop_nodes (node : acc) nodes
where
sc = score node
score :: Node Details -> Int -- Higher score => less likely to be picked as loop breaker
score (ND { nd_bndr = bndr, nd_rhs = rhs }, _, _)
| not (isId bndr) = 100 -- A type or cercion variable is never a loop breaker
| isDFunId bndr = 9 -- Never choose a DFun as a loop breaker
-- Note [DFuns should not be loop breakers]
| Just be_very_keen <- hasStableCoreUnfolding_maybe (idUnfolding bndr)
= if be_very_keen then 6 -- Note [Loop breakers and INLINE/INLINEABLE pragmas]
else 3
-- Data structures are more important than INLINE pragmas
-- so that dictionary/method recursion unravels
-- Note that this case hits all stable unfoldings, so we
-- never look at 'rhs' for stable unfoldings. That's right, because
-- 'rhs' is irrelevant for inlining things with a stable unfolding
| is_con_app rhs = 5 -- Data types help with cases: Note [Constructor applications]
| exprIsTrivial rhs = 10 -- Practically certain to be inlined
-- Used to have also: && not (isExportedId bndr)
-- But I found this sometimes cost an extra iteration when we have
-- rec { d = (a,b); a = ...df...; b = ...df...; df = d }
-- where df is the exported dictionary. Then df makes a really
-- bad choice for loop breaker
-- If an Id is marked "never inline" then it makes a great loop breaker
-- The only reason for not checking that here is that it is rare
-- and I've never seen a situation where it makes a difference,
-- so it probably isn't worth the time to test on every binder
-- | isNeverActive (idInlinePragma bndr) = -10
| isOneOcc (idOccInfo bndr) = 2 -- Likely to be inlined
| canUnfold (realIdUnfolding bndr) = 1
-- The Id has some kind of unfolding
-- Ignore loop-breaker-ness here because that is what we are setting!
| otherwise = 0
-- Checking for a constructor application
-- Cheap and cheerful; the simplifer moves casts out of the way
-- The lambda case is important to spot x = /\a. C (f a)
-- which comes up when C is a dictionary constructor and
-- f is a default method.
-- Example: the instance for Show (ST s a) in GHC.ST
--
-- However we *also* treat (\x. C p q) as a con-app-like thing,
-- Note [Closure conversion]
is_con_app (Var v) = isConLikeId v
is_con_app (App f _) = is_con_app f
is_con_app (Lam _ e) = is_con_app e
is_con_app (Tick _ e) = is_con_app e
is_con_app _ = False
{-
Note [Complexity of loop breaking]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The loop-breaking algorithm knocks out one binder at a time, and
performs a new SCC analysis on the remaining binders. That can
behave very badly in tightly-coupled groups of bindings; in the
worst case it can be (N**2)*log N, because it does a full SCC
on N, then N-1, then N-2 and so on.
To avoid this, we switch plans after 2 (or whatever) attempts:
Plan A: pick one binder with the lowest score, make it
a loop breaker, and try again
Plan B: pick *all* binders with the lowest score, make them
all loop breakers, and try again
Since there are only a small finite number of scores, this will
terminate in a constant number of iterations, rather than O(N)
iterations.
You might thing that it's very unlikely, but RULES make it much
more likely. Here's a real example from Trac #1969:
Rec { $dm = \d.\x. op d
{-# RULES forall d. $dm Int d = $s$dm1
forall d. $dm Bool d = $s$dm2 #-}
dInt = MkD .... opInt ...
dInt = MkD .... opBool ...
opInt = $dm dInt
opBool = $dm dBool
$s$dm1 = \x. op dInt
$s$dm2 = \x. op dBool }
The RULES stuff means that we can't choose $dm as a loop breaker
(Note [Choosing loop breakers]), so we must choose at least (say)
opInt *and* opBool, and so on. The number of loop breakders is
linear in the number of instance declarations.
Note [Loop breakers and INLINE/INLINEABLE pragmas]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Avoid choosing a function with an INLINE pramga as the loop breaker!
If such a function is mutually-recursive with a non-INLINE thing,
then the latter should be the loop-breaker.
It's vital to distinguish between INLINE and INLINEABLE (the
Bool returned by hasStableCoreUnfolding_maybe). If we start with
Rec { {-# INLINEABLE f #-}
f x = ...f... }
and then worker/wrapper it through strictness analysis, we'll get
Rec { {-# INLINEABLE $wf #-}
$wf p q = let x = (p,q) in ...f...
{-# INLINE f #-}
f x = case x of (p,q) -> $wf p q }
Now it is vital that we choose $wf as the loop breaker, so we can
inline 'f' in '$wf'.
Note [DFuns should not be loop breakers]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's particularly bad to make a DFun into a loop breaker. See
Note [How instance declarations are translated] in TcInstDcls
We give DFuns a higher score than ordinary CONLIKE things because
if there's a choice we want the DFun to be the non-looop breker. Eg
rec { sc = /\ a \$dC. $fBWrap (T a) ($fCT @ a $dC)
$fCT :: forall a_afE. (Roman.C a_afE) => Roman.C (Roman.T a_afE)
{-# DFUN #-}
$fCT = /\a \$dC. MkD (T a) ((sc @ a $dC) |> blah) ($ctoF @ a $dC)
}
Here 'sc' (the superclass) looks CONLIKE, but we'll never get to it
if we can't unravel the DFun first.
Note [Constructor applications]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's really really important to inline dictionaries. Real
example (the Enum Ordering instance from GHC.Base):
rec f = \ x -> case d of (p,q,r) -> p x
g = \ x -> case d of (p,q,r) -> q x
d = (v, f, g)
Here, f and g occur just once; but we can't inline them into d.
On the other hand we *could* simplify those case expressions if
we didn't stupidly choose d as the loop breaker.
But we won't because constructor args are marked "Many".
Inlining dictionaries is really essential to unravelling
the loops in static numeric dictionaries, see GHC.Float.
Note [Closure conversion]
~~~~~~~~~~~~~~~~~~~~~~~~~
We treat (\x. C p q) as a high-score candidate in the letrec scoring algorithm.
The immediate motivation came from the result of a closure-conversion transformation
which generated code like this:
data Clo a b = forall c. Clo (c -> a -> b) c
($:) :: Clo a b -> a -> b
Clo f env $: x = f env x
rec { plus = Clo plus1 ()
; plus1 _ n = Clo plus2 n
; plus2 Zero n = n
; plus2 (Succ m) n = Succ (plus $: m $: n) }
If we inline 'plus' and 'plus1', everything unravels nicely. But if
we choose 'plus1' as the loop breaker (which is entirely possible
otherwise), the loop does not unravel nicely.
@occAnalRhs@ deals with the question of bindings where the Id is marked
by an INLINE pragma. For these we record that anything which occurs
in its RHS occurs many times. This pessimistically assumes that ths
inlined binder also occurs many times in its scope, but if it doesn't
we'll catch it next time round. At worst this costs an extra simplifier pass.
ToDo: try using the occurrence info for the inline'd binder.
[March 97] We do the same for atomic RHSs. Reason: see notes with loopBreakSCC.
[June 98, SLPJ] I've undone this change; I don't understand it. See notes with loopBreakSCC.
-}
occAnalRecRhs :: OccEnv -> CoreExpr -- Rhs
-> (UsageDetails, CoreExpr)
-- Returned usage details covers only the RHS,
-- and *not* the RULE or INLINE template for the Id
occAnalRecRhs env rhs = occAnal (rhsCtxt env) rhs
occAnalNonRecRhs :: OccEnv
-> Id -> CoreExpr -- Binder and rhs
-- Binder is already tagged with occurrence info
-> (UsageDetails, CoreExpr)
-- Returned usage details covers only the RHS,
-- and *not* the RULE or INLINE template for the Id
occAnalNonRecRhs env bndr rhs
= occAnal rhs_env rhs
where
-- See Note [Use one-shot info]
env1 = env { occ_one_shots = argOneShots OneShotLam dmd }
-- See Note [Cascading inlines]
rhs_env | certainly_inline = env1
| otherwise = rhsCtxt env1
certainly_inline -- See Note [Cascading inlines]
= case idOccInfo bndr of
OneOcc in_lam one_br _ -> not in_lam && one_br && active && not_stable
_ -> False
dmd = idDemandInfo bndr
active = isAlwaysActive (idInlineActivation bndr)
not_stable = not (isStableUnfolding (idUnfolding bndr))
addIdOccs :: UsageDetails -> VarSet -> UsageDetails
addIdOccs usage id_set = foldVarSet add usage id_set
where
add v u | isId v = addOneOcc u v NoOccInfo
| otherwise = u
-- Give a non-committal binder info (i.e NoOccInfo) because
-- a) Many copies of the specialised thing can appear
-- b) We don't want to substitute a BIG expression inside a RULE
-- even if that's the only occurrence of the thing
-- (Same goes for INLINE.)
{-
Note [Cascading inlines]
~~~~~~~~~~~~~~~~~~~~~~~~
By default we use an rhsCtxt for the RHS of a binding. This tells the
occ anal n that it's looking at an RHS, which has an effect in
occAnalApp. In particular, for constructor applications, it makes
the arguments appear to have NoOccInfo, so that we don't inline into
them. Thus x = f y
k = Just x
we do not want to inline x.
But there's a problem. Consider
x1 = a0 : []
x2 = a1 : x1
x3 = a2 : x2
g = f x3
First time round, it looks as if x1 and x2 occur as an arg of a
let-bound constructor ==> give them a many-occurrence.
But then x3 is inlined (unconditionally as it happens) and
next time round, x2 will be, and the next time round x1 will be
Result: multiple simplifier iterations. Sigh.
So, when analysing the RHS of x3 we notice that x3 will itself
definitely inline the next time round, and so we analyse x3's rhs in
an ordinary context, not rhsCtxt. Hence the "certainly_inline" stuff.
Annoyingly, we have to approximate SimplUtils.preInlineUnconditionally.
If we say "yes" when preInlineUnconditionally says "no" the simplifier iterates
indefinitely:
x = f y
k = Just x
inline ==>
k = Just (f y)
float ==>
x1 = f y
k = Just x1
This is worse than the slow cascade, so we only want to say "certainly_inline"
if it really is certain. Look at the note with preInlineUnconditionally
for the various clauses.
Expressions
~~~~~~~~~~~
-}
occAnal :: OccEnv
-> CoreExpr
-> (UsageDetails, -- Gives info only about the "interesting" Ids
CoreExpr)
occAnal _ expr@(Type _) = (emptyDetails, expr)
occAnal _ expr@(Lit _) = (emptyDetails, expr)
occAnal env expr@(Var v) = (mkOneOcc env v False, expr)
-- At one stage, I gathered the idRuleVars for v here too,
-- which in a way is the right thing to do.
-- But that went wrong right after specialisation, when
-- the *occurrences* of the overloaded function didn't have any
-- rules in them, so the *specialised* versions looked as if they
-- weren't used at all.
occAnal _ (Coercion co)
= (addIdOccs emptyDetails (coVarsOfCo co), Coercion co)
-- See Note [Gather occurrences of coercion variables]
{-
Note [Gather occurrences of coercion variables]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We need to gather info about what coercion variables appear, so that
we can sort them into the right place when doing dependency analysis.
-}
occAnal env (Tick tickish body)
| tickish `tickishScopesLike` SoftScope
= (usage, Tick tickish body')
| Breakpoint _ ids <- tickish
= (usage_lam +++ mkVarEnv (zip ids (repeat NoOccInfo)), Tick tickish body')
-- never substitute for any of the Ids in a Breakpoint
| otherwise
= (usage_lam, Tick tickish body')
where
!(usage,body') = occAnal env body
-- for a non-soft tick scope, we can inline lambdas only
usage_lam = mapVarEnv markInsideLam usage
occAnal env (Cast expr co)
= case occAnal env expr of { (usage, expr') ->
let usage1 = markManyIf (isRhsEnv env) usage
usage2 = addIdOccs usage1 (coVarsOfCo co)
-- See Note [Gather occurrences of coercion variables]
in (usage2, Cast expr' co)
-- If we see let x = y `cast` co
-- then mark y as 'Many' so that we don't
-- immediately inline y again.
}
occAnal env app@(App _ _)
= occAnalApp env (collectArgsTicks tickishFloatable app)
-- Ignore type variables altogether
-- (a) occurrences inside type lambdas only not marked as InsideLam
-- (b) type variables not in environment
occAnal env (Lam x body) | isTyVar x
= case occAnal env body of { (body_usage, body') ->
(body_usage, Lam x body')
}
-- For value lambdas we do a special hack. Consider
-- (\x. \y. ...x...)
-- If we did nothing, x is used inside the \y, so would be marked
-- as dangerous to dup. But in the common case where the abstraction
-- is applied to two arguments this is over-pessimistic.
-- So instead, we just mark each binder with its occurrence
-- info in the *body* of the multiple lambda.
-- Then, the simplifier is careful when partially applying lambdas.
occAnal env expr@(Lam _ _)
= case occAnal env_body body of { (body_usage, body') ->
let
(final_usage, tagged_binders) = tagLamBinders body_usage binders'
-- Use binders' to put one-shot info on the lambdas
really_final_usage
| all isOneShotBndr binders' = final_usage
| otherwise = mapVarEnv markInsideLam final_usage
in
(really_final_usage, mkLams tagged_binders body') }
where
(binders, body) = collectBinders expr
(env_body, binders') = oneShotGroup env binders
occAnal env (Case scrut bndr ty alts)
= case occ_anal_scrut scrut alts of { (scrut_usage, scrut') ->
case mapAndUnzip occ_anal_alt alts of { (alts_usage_s, alts') ->
let
alts_usage = foldr combineAltsUsageDetails emptyDetails alts_usage_s
(alts_usage1, tagged_bndr) = tag_case_bndr alts_usage bndr
total_usage = scrut_usage +++ alts_usage1
in
total_usage `seq` (total_usage, Case scrut' tagged_bndr ty alts') }}
where
-- Note [Case binder usage]
-- ~~~~~~~~~~~~~~~~~~~~~~~~
-- The case binder gets a usage of either "many" or "dead", never "one".
-- Reason: we like to inline single occurrences, to eliminate a binding,
-- but inlining a case binder *doesn't* eliminate a binding.
-- We *don't* want to transform
-- case x of w { (p,q) -> f w }
-- into
-- case x of w { (p,q) -> f (p,q) }
tag_case_bndr usage bndr
= case lookupVarEnv usage bndr of
Nothing -> (usage, setIdOccInfo bndr IAmDead)
Just _ -> (usage `delVarEnv` bndr, setIdOccInfo bndr NoOccInfo)
alt_env = mkAltEnv env scrut bndr
occ_anal_alt = occAnalAlt alt_env
occ_anal_scrut (Var v) (alt1 : other_alts)
| not (null other_alts) || not (isDefaultAlt alt1)
= (mkOneOcc env v True, Var v) -- The 'True' says that the variable occurs
-- in an interesting context; the case has
-- at least one non-default alternative
occ_anal_scrut (Tick t e) alts
| t `tickishScopesLike` SoftScope
-- No reason to not look through all ticks here, but only
-- for soft-scoped ticks we can do so without having to
-- update returned occurance info (see occAnal)
= second (Tick t) $ occ_anal_scrut e alts
occ_anal_scrut scrut _alts
= occAnal (vanillaCtxt env) scrut -- No need for rhsCtxt
occAnal env (Let bind body)
= case occAnal env body of { (body_usage, body') ->
case occAnalBind env emptyVarEnv bind body_usage of { (final_usage, new_binds) ->
(final_usage, mkLets new_binds body') }}
occAnalArgs :: OccEnv -> [CoreExpr] -> [OneShots] -> (UsageDetails, [CoreExpr])
occAnalArgs _ [] _
= (emptyDetails, [])
occAnalArgs env (arg:args) one_shots
| isTypeArg arg
= case occAnalArgs env args one_shots of { (uds, args') ->
(uds, arg:args') }
| otherwise
= case argCtxt env one_shots of { (arg_env, one_shots') ->
case occAnal arg_env arg of { (uds1, arg') ->
case occAnalArgs env args one_shots' of { (uds2, args') ->
(uds1 +++ uds2, arg':args') }}}
{-
Applications are dealt with specially because we want
the "build hack" to work.
Note [Arguments of let-bound constructors]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
f x = let y = expensive x in
let z = (True,y) in
(case z of {(p,q)->q}, case z of {(p,q)->q})
We feel free to duplicate the WHNF (True,y), but that means
that y may be duplicated thereby.
If we aren't careful we duplicate the (expensive x) call!
Constructors are rather like lambdas in this way.
-}
occAnalApp :: OccEnv
-> (Expr CoreBndr, [Arg CoreBndr], [Tickish Id])
-> (UsageDetails, Expr CoreBndr)
occAnalApp env (Var fun, args, ticks)
| null ticks = (uds, mkApps (Var fun) args')
| otherwise = (uds, mkTicks ticks $ mkApps (Var fun) args')
where
uds = fun_uds +++ final_args_uds
!(args_uds, args') = occAnalArgs env args one_shots
!final_args_uds = markManyIf (isRhsEnv env && is_exp) args_uds
-- We mark the free vars of the argument of a constructor or PAP
-- as "many", if it is the RHS of a let(rec).
-- This means that nothing gets inlined into a constructor argument
-- position, which is what we want. Typically those constructor
-- arguments are just variables, or trivial expressions.
--
-- This is the *whole point* of the isRhsEnv predicate
-- See Note [Arguments of let-bound constructors]
n_val_args = valArgCount args
fun_uds = mkOneOcc env fun (n_val_args > 0)
is_exp = isExpandableApp fun n_val_args
-- See Note [CONLIKE pragma] in BasicTypes
-- The definition of is_exp should match that in
-- Simplify.prepareRhs
one_shots = argsOneShots (idStrictness fun) n_val_args
-- See Note [Use one-shot info]
occAnalApp env (fun, args, ticks)
= (fun_uds +++ args_uds, mkTicks ticks $ mkApps fun' args')
where
!(fun_uds, fun') = occAnal (addAppCtxt env args) fun
-- The addAppCtxt is a bit cunning. One iteration of the simplifier
-- often leaves behind beta redexs like
-- (\x y -> e) a1 a2
-- Here we would like to mark x,y as one-shot, and treat the whole
-- thing much like a let. We do this by pushing some True items
-- onto the context stack.
!(args_uds, args') = occAnalArgs env args []
markManyIf :: Bool -- If this is true
-> UsageDetails -- Then do markMany on this
-> UsageDetails
markManyIf True uds = mapVarEnv markMany uds
markManyIf False uds = uds
{-
Note [Use one-shot information]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The occurrrence analyser propagates one-shot-lambda information in two situation
* Applications: eg build (\cn -> blah)
Propagate one-shot info from the strictness signature of 'build' to
the \cn
* Let-bindings: eg let f = \c. let ... in \n -> blah
in (build f, build f)
Propagate one-shot info from the demanand-info on 'f' to the
lambdas in its RHS (which may not be syntactically at the top)
Some of this is done by the demand analyser, but this way it happens
much earlier, taking advantage of the strictness signature of
imported functions.
Note [Binders in case alternatives]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
case x of y { (a,b) -> f y }
We treat 'a', 'b' as dead, because they don't physically occur in the
case alternative. (Indeed, a variable is dead iff it doesn't occur in
its scope in the output of OccAnal.) It really helps to know when
binders are unused. See esp the call to isDeadBinder in
Simplify.mkDupableAlt
In this example, though, the Simplifier will bring 'a' and 'b' back to
life, beause it binds 'y' to (a,b) (imagine got inlined and
scrutinised y).
-}
occAnalAlt :: (OccEnv, Maybe (Id, CoreExpr))
-> CoreAlt
-> (UsageDetails, Alt IdWithOccInfo)
occAnalAlt (env, scrut_bind) (con, bndrs, rhs)
= case occAnal env rhs of { (rhs_usage1, rhs1) ->
let
(alt_usg, tagged_bndrs) = tagLamBinders rhs_usage1 bndrs
-- See Note [Binders in case alternatives]
(alt_usg', rhs2) =
wrapAltRHS env scrut_bind alt_usg tagged_bndrs rhs1
in
(alt_usg', (con, tagged_bndrs, rhs2)) }
wrapAltRHS :: OccEnv
-> Maybe (Id, CoreExpr) -- proxy mapping generated by mkAltEnv
-> UsageDetails -- usage for entire alt (p -> rhs)
-> [Var] -- alt binders
-> CoreExpr -- alt RHS
-> (UsageDetails, CoreExpr)
wrapAltRHS env (Just (scrut_var, let_rhs)) alt_usg bndrs alt_rhs
| occ_binder_swap env
, scrut_var `usedIn` alt_usg -- bndrs are not be present in alt_usg so this
-- handles condition (a) in Note [Binder swap]
, not captured -- See condition (b) in Note [Binder swap]
= ( alt_usg' +++ let_rhs_usg
, Let (NonRec tagged_scrut_var let_rhs') alt_rhs )
where
captured = any (`usedIn` let_rhs_usg) bndrs
-- The rhs of the let may include coercion variables
-- if the scrutinee was a cast, so we must gather their
-- usage. See Note [Gather occurrences of coercion variables]
(let_rhs_usg, let_rhs') = occAnal env let_rhs
(alt_usg', tagged_scrut_var) = tagBinder alt_usg scrut_var
wrapAltRHS _ _ alt_usg _ alt_rhs
= (alt_usg, alt_rhs)
{-
************************************************************************
* *
OccEnv
* *
************************************************************************
-}
data OccEnv
= OccEnv { occ_encl :: !OccEncl -- Enclosing context information
, occ_one_shots :: !OneShots -- Tells about linearity
, occ_gbl_scrut :: GlobalScruts
, occ_rule_act :: Activation -> Bool -- Which rules are active
-- See Note [Finding rule RHS free vars]
, occ_binder_swap :: !Bool -- enable the binder_swap
-- See CorePrep Note [Dead code in CorePrep]
}
type GlobalScruts = IdSet -- See Note [Binder swap on GlobalId scrutinees]
-----------------------------
-- OccEncl is used to control whether to inline into constructor arguments
-- For example:
-- x = (p,q) -- Don't inline p or q
-- y = /\a -> (p a, q a) -- Still don't inline p or q
-- z = f (p,q) -- Do inline p,q; it may make a rule fire
-- So OccEncl tells enought about the context to know what to do when
-- we encounter a contructor application or PAP.
data OccEncl
= OccRhs -- RHS of let(rec), albeit perhaps inside a type lambda
-- Don't inline into constructor args here
| OccVanilla -- Argument of function, body of lambda, scruintee of case etc.
-- Do inline into constructor args here
instance Outputable OccEncl where
ppr OccRhs = ptext (sLit "occRhs")
ppr OccVanilla = ptext (sLit "occVanilla")
type OneShots = [OneShotInfo]
-- [] No info
--
-- one_shot_info:ctxt Analysing a function-valued expression that
-- will be applied as described by one_shot_info
initOccEnv :: (Activation -> Bool) -> OccEnv
initOccEnv active_rule
= OccEnv { occ_encl = OccVanilla
, occ_one_shots = []
, occ_gbl_scrut = emptyVarSet -- PE emptyVarEnv emptyVarSet
, occ_rule_act = active_rule
, occ_binder_swap = True }
vanillaCtxt :: OccEnv -> OccEnv
vanillaCtxt env = env { occ_encl = OccVanilla, occ_one_shots = [] }
rhsCtxt :: OccEnv -> OccEnv
rhsCtxt env = env { occ_encl = OccRhs, occ_one_shots = [] }
argCtxt :: OccEnv -> [OneShots] -> (OccEnv, [OneShots])
argCtxt env []
= (env { occ_encl = OccVanilla, occ_one_shots = [] }, [])
argCtxt env (one_shots:one_shots_s)
= (env { occ_encl = OccVanilla, occ_one_shots = one_shots }, one_shots_s)
isRhsEnv :: OccEnv -> Bool
isRhsEnv (OccEnv { occ_encl = OccRhs }) = True
isRhsEnv (OccEnv { occ_encl = OccVanilla }) = False
oneShotGroup :: OccEnv -> [CoreBndr]
-> ( OccEnv
, [CoreBndr] )
-- The result binders have one-shot-ness set that they might not have had originally.
-- This happens in (build (\cn -> e)). Here the occurrence analyser
-- linearity context knows that c,n are one-shot, and it records that fact in
-- the binder. This is useful to guide subsequent float-in/float-out tranformations
oneShotGroup env@(OccEnv { occ_one_shots = ctxt }) bndrs
= go ctxt bndrs []
where
go ctxt [] rev_bndrs
= ( env { occ_one_shots = ctxt, occ_encl = OccVanilla }
, reverse rev_bndrs )
go [] bndrs rev_bndrs
= ( env { occ_one_shots = [], occ_encl = OccVanilla }
, reverse rev_bndrs ++ bndrs )
go ctxt (bndr:bndrs) rev_bndrs
| isId bndr
= case ctxt of
[] -> go [] bndrs (bndr : rev_bndrs)
(one_shot : ctxt) -> go ctxt bndrs (bndr': rev_bndrs)
where
bndr' = updOneShotInfo bndr one_shot
| otherwise
= go ctxt bndrs (bndr:rev_bndrs)
addAppCtxt :: OccEnv -> [Arg CoreBndr] -> OccEnv
addAppCtxt env@(OccEnv { occ_one_shots = ctxt }) args
= env { occ_one_shots = replicate (valArgCount args) OneShotLam ++ ctxt }
transClosureFV :: UniqFM VarSet -> UniqFM VarSet
-- If (f,g), (g,h) are in the input, then (f,h) is in the output
-- as well as (f,g), (g,h)
transClosureFV env
| no_change = env
| otherwise = transClosureFV (listToUFM new_fv_list)
where
(no_change, new_fv_list) = mapAccumL bump True (nonDetUFMToList env)
bump no_change (b,fvs)
| no_change_here = (no_change, (b,fvs))
| otherwise = (False, (b,new_fvs))
where
(new_fvs, no_change_here) = extendFvs env fvs
-------------
extendFvs_ :: UniqFM VarSet -> VarSet -> VarSet
extendFvs_ env s = fst (extendFvs env s) -- Discard the Bool flag
extendFvs :: UniqFM VarSet -> VarSet -> (VarSet, Bool)
-- (extendFVs env s) returns
-- (s `union` env(s), env(s) `subset` s)
extendFvs env s
| isNullUFM env
= (s, True)
| otherwise
= (s `unionVarSet` extras, extras `subVarSet` s)
where
extras :: VarSet -- env(s)
extras = foldUFM unionVarSet emptyVarSet $
intersectUFM_C (\x _ -> x) env (getUniqSet s)
{-
************************************************************************
* *
Binder swap
* *
************************************************************************
Note [Binder swap]
~~~~~~~~~~~~~~~~~~
We do these two transformations right here:
(1) case x of b { pi -> ri }
==>
case x of b { pi -> let x=b in ri }
(2) case (x |> co) of b { pi -> ri }
==>
case (x |> co) of b { pi -> let x = b |> sym co in ri }
Why (2)? See Note [Case of cast]
In both cases, in a particular alternative (pi -> ri), we only
add the binding if
(a) x occurs free in (pi -> ri)
(ie it occurs in ri, but is not bound in pi)
(b) the pi does not bind b (or the free vars of co)
We need (a) and (b) for the inserted binding to be correct.
For the alternatives where we inject the binding, we can transfer
all x's OccInfo to b. And that is the point.
Notice that
* The deliberate shadowing of 'x'.
* That (a) rapidly becomes false, so no bindings are injected.
The reason for doing these transformations here is because it allows
us to adjust the OccInfo for 'x' and 'b' as we go.
* Suppose the only occurrences of 'x' are the scrutinee and in the
ri; then this transformation makes it occur just once, and hence
get inlined right away.
* If we do this in the Simplifier, we don't know whether 'x' is used
in ri, so we are forced to pessimistically zap b's OccInfo even
though it is typically dead (ie neither it nor x appear in the
ri). There's nothing actually wrong with zapping it, except that
it's kind of nice to know which variables are dead. My nose
tells me to keep this information as robustly as possible.
The Maybe (Id,CoreExpr) passed to occAnalAlt is the extra let-binding
{x=b}; it's Nothing if the binder-swap doesn't happen.
There is a danger though. Consider
let v = x +# y
in case (f v) of w -> ...v...v...
And suppose that (f v) expands to just v. Then we'd like to
use 'w' instead of 'v' in the alternative. But it may be too
late; we may have substituted the (cheap) x+#y for v in the
same simplifier pass that reduced (f v) to v.
I think this is just too bad. CSE will recover some of it.
Note [Case of cast]
~~~~~~~~~~~~~~~~~~~
Consider case (x `cast` co) of b { I# ->
... (case (x `cast` co) of {...}) ...
We'd like to eliminate the inner case. That is the motivation for
equation (2) in Note [Binder swap]. When we get to the inner case, we
inline x, cancel the casts, and away we go.
Note [Binder swap on GlobalId scrutinees]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When the scrutinee is a GlobalId we must take care in two ways
i) In order to *know* whether 'x' occurs free in the RHS, we need its
occurrence info. BUT, we don't gather occurrence info for
GlobalIds. That's the reason for the (small) occ_gbl_scrut env in
OccEnv is for: it says "gather occurrence info for these".
ii) We must call localiseId on 'x' first, in case it's a GlobalId, or
has an External Name. See, for example, SimplEnv Note [Global Ids in
the substitution].
Note [Zap case binders in proxy bindings]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
From the original
case x of cb(dead) { p -> ...x... }
we will get
case x of cb(live) { p -> let x = cb in ...x... }
Core Lint never expects to find an *occurrence* of an Id marked
as Dead, so we must zap the OccInfo on cb before making the
binding x = cb. See Trac #5028.
Historical note [no-case-of-case]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We *used* to suppress the binder-swap in case expressions when
-fno-case-of-case is on. Old remarks:
"This happens in the first simplifier pass,
and enhances full laziness. Here's the bad case:
f = \ y -> ...(case x of I# v -> ...(case x of ...) ... )
If we eliminate the inner case, we trap it inside the I# v -> arm,
which might prevent some full laziness happening. I've seen this
in action in spectral/cichelli/Prog.hs:
[(m,n) | m <- [1..max], n <- [1..max]]
Hence the check for NoCaseOfCase."
However, now the full-laziness pass itself reverses the binder-swap, so this
check is no longer necessary.
Historical note [Suppressing the case binder-swap]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This old note describes a problem that is also fixed by doing the
binder-swap in OccAnal:
There is another situation when it might make sense to suppress the
case-expression binde-swap. If we have
case x of w1 { DEFAULT -> case x of w2 { A -> e1; B -> e2 }
...other cases .... }
We'll perform the binder-swap for the outer case, giving
case x of w1 { DEFAULT -> case w1 of w2 { A -> e1; B -> e2 }
...other cases .... }
But there is no point in doing it for the inner case, because w1 can't
be inlined anyway. Furthermore, doing the case-swapping involves
zapping w2's occurrence info (see paragraphs that follow), and that
forces us to bind w2 when doing case merging. So we get
case x of w1 { A -> let w2 = w1 in e1
B -> let w2 = w1 in e2
...other cases .... }
This is plain silly in the common case where w2 is dead.
Even so, I can't see a good way to implement this idea. I tried
not doing the binder-swap if the scrutinee was already evaluated
but that failed big-time:
data T = MkT !Int
case v of w { MkT x ->
case x of x1 { I# y1 ->
case x of x2 { I# y2 -> ...
Notice that because MkT is strict, x is marked "evaluated". But to
eliminate the last case, we must either make sure that x (as well as
x1) has unfolding MkT y1. The straightforward thing to do is to do
the binder-swap. So this whole note is a no-op.
It's fixed by doing the binder-swap in OccAnal because we can do the
binder-swap unconditionally and still get occurrence analysis
information right.
-}
mkAltEnv :: OccEnv -> CoreExpr -> Id -> (OccEnv, Maybe (Id, CoreExpr))
-- Does two things: a) makes the occ_one_shots = OccVanilla
-- b) extends the GlobalScruts if possible
-- c) returns a proxy mapping, binding the scrutinee
-- to the case binder, if possible
mkAltEnv env@(OccEnv { occ_gbl_scrut = pe }) scrut case_bndr
= case stripTicksTopE (const True) scrut of
Var v -> add_scrut v case_bndr'
Cast (Var v) co -> add_scrut v (Cast case_bndr' (mkSymCo co))
-- See Note [Case of cast]
_ -> (env { occ_encl = OccVanilla }, Nothing)
where
add_scrut v rhs = ( env { occ_encl = OccVanilla, occ_gbl_scrut = pe `extendVarSet` v }
, Just (localise v, rhs) )
case_bndr' = Var (zapIdOccInfo case_bndr) -- See Note [Zap case binders in proxy bindings]
localise scrut_var = mkLocalId (localiseName (idName scrut_var)) (idType scrut_var)
-- Localise the scrut_var before shadowing it; we're making a
-- new binding for it, and it might have an External Name, or
-- even be a GlobalId; Note [Binder swap on GlobalId scrutinees]
-- Also we don't want any INLINE or NOINLINE pragmas!
{-
************************************************************************
* *
\subsection[OccurAnal-types]{OccEnv}
* *
************************************************************************
-}
type UsageDetails = IdEnv OccInfo -- A finite map from ids to their usage
-- INVARIANT: never IAmDead
-- (Deadness is signalled by not being in the map at all)
(+++), combineAltsUsageDetails
:: UsageDetails -> UsageDetails -> UsageDetails
(+++) usage1 usage2
= plusVarEnv_C addOccInfo usage1 usage2
combineAltsUsageDetails usage1 usage2
= plusVarEnv_C orOccInfo usage1 usage2
addOneOcc :: UsageDetails -> Id -> OccInfo -> UsageDetails
addOneOcc usage id info
= plusVarEnv_C addOccInfo usage (unitVarEnv id info)
-- ToDo: make this more efficient
emptyDetails :: UsageDetails
emptyDetails = (emptyVarEnv :: UsageDetails)
usedIn :: Id -> UsageDetails -> Bool
v `usedIn` details = isExportedId v || v `elemVarEnv` details
type IdWithOccInfo = Id
tagLamBinders :: UsageDetails -- Of scope
-> [Id] -- Binders
-> (UsageDetails, -- Details with binders removed
[IdWithOccInfo]) -- Tagged binders
-- Used for lambda and case binders
-- It copes with the fact that lambda bindings can have a
-- stable unfolding, used for join points
tagLamBinders usage binders = usage' `seq` (usage', bndrs')
where
(usage', bndrs') = mapAccumR tag_lam usage binders
tag_lam usage bndr = (usage2, setBinderOcc usage bndr)
where
usage1 = usage `delVarEnv` bndr
usage2 | isId bndr = addIdOccs usage1 (idUnfoldingVars bndr)
| otherwise = usage1
tagBinder :: UsageDetails -- Of scope
-> Id -- Binders
-> (UsageDetails, -- Details with binders removed
IdWithOccInfo) -- Tagged binders
tagBinder usage binder
= let
usage' = usage `delVarEnv` binder
binder' = setBinderOcc usage binder
in
usage' `seq` (usage', binder')
setBinderOcc :: UsageDetails -> CoreBndr -> CoreBndr
setBinderOcc usage bndr
| isTyVar bndr = bndr
| isExportedId bndr = case idOccInfo bndr of
NoOccInfo -> bndr
_ -> setIdOccInfo bndr NoOccInfo
-- Don't use local usage info for visible-elsewhere things
-- BUT *do* erase any IAmALoopBreaker annotation, because we're
-- about to re-generate it and it shouldn't be "sticky"
| otherwise = setIdOccInfo bndr occ_info
where
occ_info = lookupVarEnv usage bndr `orElse` IAmDead
{-
************************************************************************
* *
\subsection{Operations over OccInfo}
* *
************************************************************************
-}
mkOneOcc :: OccEnv -> Id -> InterestingCxt -> UsageDetails
mkOneOcc env id int_cxt
| isLocalId id
= unitVarEnv id (OneOcc False True int_cxt)
| id `elemVarSet` occ_gbl_scrut env
= unitVarEnv id NoOccInfo
| otherwise
= emptyDetails
markMany, markInsideLam :: OccInfo -> OccInfo
markMany _ = NoOccInfo
markInsideLam (OneOcc _ one_br int_cxt) = OneOcc True one_br int_cxt
markInsideLam occ = occ
addOccInfo, orOccInfo :: OccInfo -> OccInfo -> OccInfo
addOccInfo a1 a2 = ASSERT( not (isDeadOcc a1 || isDeadOcc a2) )
NoOccInfo -- Both branches are at least One
-- (Argument is never IAmDead)
-- (orOccInfo orig new) is used
-- when combining occurrence info from branches of a case
orOccInfo (OneOcc in_lam1 _ int_cxt1)
(OneOcc in_lam2 _ int_cxt2)
= OneOcc (in_lam1 || in_lam2)
False -- False, because it occurs in both branches
(int_cxt1 && int_cxt2)
orOccInfo a1 a2 = ASSERT( not (isDeadOcc a1 || isDeadOcc a2) )
NoOccInfo
|
pparkkin/eta
|
compiler/ETA/SimplCore/OccurAnal.hs
|
bsd-3-clause
| 76,427
| 0
| 16
| 21,201
| 8,083
| 4,418
| 3,665
| 561
| 7
|
-- |
-- Module : Database.Enumerator
-- Copyright : (c) 2004 Oleg Kiselyov, Alistair Bayley
-- License : BSD-style
-- Maintainer : oleg@pobox.com, alistair@abayley.org
-- Stability : experimental
-- Portability : non-portable
-- Abstract database interface, providing a left-fold enumerator
-- and cursor operations.
-- There is a stub: "Database.Stub.Enumerator".
-- This lets you run the test cases without having a working DBMS installation.
-- This isn't so valuable now, because it's dead easy to install Sqlite,
-- but it's still there if you want to try it.
-- Additional reading:
-- * <http://pobox.com/~oleg/ftp/Haskell/misc.html#fold-stream>
-- * <http://pobox.com/~oleg/ftp/papers/LL3-collections-enumerators.txt>
-- * <http://www.eros-os.org/pipermail/e-lang/2004-March/009643.html>
-- Note that there are a few functions that are exported from each DBMS-specific
-- implementation which are exposed to the API user, and which are part of
-- the Takusen API, but are not (necessarily) in this module.
-- They include:
-- * @connect@ (obviously DBMS specific)
-- * @prepareQuery, prepareLargeQuery, prepareCommand, sql, sqlbind, prefetch, cmdbind@
-- These functions will typically have the same names and intentions,
-- but their specific types and usage may differ between DBMS.
{-# LANGUAGE CPP #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverlappingInstances #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE FunctionalDependencies #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Database.Enumerator
(
-- * Usage
-- $usage_example
-- ** Iteratee Functions
-- $usage_iteratee
-- ** result and result'
-- $usage_result
-- ** Rank-2 types, ($), and the monomorphism restriction
-- $usage_rank2_types
-- ** Bind Parameters
-- $usage_bindparms
-- ** Multiple (and nested) Result Sets
-- $usage_multiresultset
-- * Sessions and Transactions
DBM -- The data constructor is not exported
, IE.ISession, IE.ConnectA
, withSession, withContinuedSession
, commit, rollback, beginTransaction
, withTransaction
, IE.IsolationLevel(..)
, execDDL, execDML, inquire
-- * Exceptions and handlers
, IE.DBException(..)
, formatDBException, basicDBExceptionReporter
, reportRethrow, reportRethrowMsg
, catchDB, catchDBError, ignoreDBError, IE.throwDB
, IE.ColNum, IE.RowNum
, IE.SqlState, IE.SqlStateClass, IE.SqlStateSubClass
-- * Preparing and Binding
, IE.PreparedStmt -- data constructor not exported
, withPreparedStatement
, withBoundStatement
, IE.Statement, IE.Command, IE.EnvInquiry
, IE.PreparationA, IE.IPrepared
, IE.BindA, IE.DBBind, IE.bindP
-- * Iteratees and Cursors
, IE.IQuery, doQuery, IE.DBType
, IterResult, IterAct
, IE.currentRowNum, NextResultSet(..), RefCursor(..)
, cursorIsEOF, cursorCurrent, cursorNext
, withCursor
, IE.Position
-- * Utilities
, ifNull, result, result'
) where
import Prelude hiding (catch)
import Data.Dynamic
import Data.IORef
-- import Data.Time
import Data.Maybe (fromMaybe, isNothing)
-- import Control.Monad (liftM)
-- import Control.Monad.Trans (liftIO)
import Control.Applicative (Applicative)
import Control.Exception
import Control.Monad.Fix
import Control.Monad.Reader
import Control.Exception.MonadIO
import qualified Database.InternalEnumerator as IE
-- import System.Time
-- -----------------------------------------------------------
-- -----------------------------------------------------------
-- | 'IterResult' and 'IterAct' give us some type sugar.
-- Without them, the types of iteratee functions become
-- quite unwieldy.
type IterResult seedType = Either seedType seedType
type IterAct m seedType = seedType -> m (IterResult seedType)
-- | Catch 'Database.InteralEnumerator.DBException's thrown in the 'DBM'
-- monad.
catchDB :: CaughtMonadIO m => m a -> (IE.DBException -> m a) -> m a
catchDB = gcatch
-- |This simple handler reports the error to @stdout@ and swallows it
-- i.e. it doesn't propagate.
basicDBExceptionReporter :: CaughtMonadIO m => IE.DBException -> m ()
basicDBExceptionReporter e = liftIO (putStrLn (formatDBException e))
-- | This handler reports the error and propagates it
-- (usually to force the program to halt).
reportRethrow :: CaughtMonadIO m => IE.DBException -> m a
--reportRethrow e = basicDBExceptionReporter e >> IE.throwDB e
reportRethrow = reportRethrowMsg ""
-- | Same as reportRethrow, but you can prefix some text to the error
-- (perhaps to indicate which part of your program raised it).
reportRethrowMsg :: CaughtMonadIO m => String -> IE.DBException -> m a
reportRethrowMsg m e = liftIO (putStr m) >> basicDBExceptionReporter e >> IE.throwDB e
-- | A show for 'Database.InteralEnumerator.DBException's.
formatDBException :: IE.DBException -> String
formatDBException (IE.DBError (ssc, sssc) e m) =
ssc ++ sssc ++ " " ++ show e ++ ": " ++ m
formatDBException (IE.DBFatal (ssc, sssc) e m) =
ssc ++ sssc ++ " " ++ show e ++ ": " ++ m
formatDBException (IE.DBUnexpectedNull r c) =
"Unexpected null in row " ++ show r ++ ", column " ++ show c ++ "."
formatDBException (IE.DBNoData) = "Fetch: no more data."
-- |If you want to trap a specific error number, use this.
-- It passes anything else up.
catchDBError :: (CaughtMonadIO m) =>
Int -> m a -> (IE.DBException -> m a) -> m a
catchDBError n action handler = catchDB action
(\dberror ->
case dberror of
IE.DBError ss e m | e == n -> handler dberror
_ -> IE.throwDB dberror
)
-- | Analogous to 'catchDBError', but ignores specific errors instead
-- (propagates anything else).
ignoreDBError :: (CaughtMonadIO m) => Int -> m a -> m a
ignoreDBError n action = catchDBError n action (\e -> return undefined)
-- --------------------------------------------------------------------
-- -- ** Session monad
-- --------------------------------------------------------------------
-- The DBM data constructor is NOT exported.
-- One may think to quantify over sess in |withSession|. We won't need
-- any mark then, I gather.
-- The quantification over Session is quite bothersome: need to enumerate
-- all class constraints for the Session (like IQuery, DBType, etc).
newtype IE.ISession sess => DBM mark sess a = DBM (ReaderT sess IO a)
#ifndef __HADDOCK__
deriving (Functor, Monad, Applicative, MonadIO, MonadFix, MonadReader sess)
#else
-- Haddock can't cope with the "MonadReader sess" instance
deriving (Functor, Monad, Applicative, MonadIO, MonadFix)
#endif
unDBM (DBM x) = x
instance IE.ISession si => CaughtMonadIO (DBM mark si) where
gcatch a h = DBM ( gcatch (unDBM a) (unDBM . h) )
gcatchJust p a h = DBM ( gcatchJust p (unDBM a) (unDBM . h) )
-- | Typeable constraint is to prevent the leakage of Session and other
-- marked objects.
withSession :: (Typeable a, IE.ISession sess) =>
IE.ConnectA sess -> (forall mark. DBM mark sess a) -> IO a
withSession (IE.ConnectA connecta) m =
bracket connecta IE.disconnect (runReaderT (unDBM m))
-- | Persistent database connections.
-- This issue has been brought up by Shanky Surana. The following design
-- is inspired by that exchange.
-- On one hand, implementing persistent connections is easy. One may say we should
-- have added them long time ago, to match HSQL, HDBC, and similar
-- database interfaces. Alas, implementing persistent connection
-- safely is another matter. The simplest design is like the following
-- > withContinuedSession :: (Typeable a, IE.ISession sess) =>
-- > IE.ConnectA sess -> (forall mark. DBM mark sess a) ->
-- > IO (a, IE.ConnectA sess)
-- > withContinuedSession (IE.ConnectA connecta) m = do
-- > conn <- connecta
-- > r <- runReaderT (unDBM m) conn
-- > return (r,(return conn))
-- so that the connection object is returned as the result and can be
-- used again with withContinuedSession or withSession. The problem is
-- that nothing prevents us from writing:
-- > (r1,conn) <- withContinuedSession (connect "...") query1
-- > r2 <- withSession conn query2
-- > r3 <- withSession conn query3
-- That is, we store the suspended connection and then use it twice.
-- But the first withSession closes the connection. So, the second
-- withSession gets an invalid session object. Invalid in a sense that
-- even memory may be deallocated, so there is no telling what happens
-- next. Also, as we can see, it is difficult to handle errors and
-- automatically dispose of the connections if the fatal error is
-- encountered.
-- All these problems are present in other interfaces... In the
-- case of a suspended connection, the problem is how to enforce the
-- /linear/ access to a variable. It can be enforced, via a
-- state-changing monad. The implementation below makes
-- the non-linear use of a suspended connection a run-time checkable
-- condition. It will be generic and safe - fatal errors close the
-- connection, an attempt to use a closed connection raises an error, and
-- we cannot reuse a connection. We have to write:
-- > (r1, conn1) <- withContinuedSession conn ...
-- > (r2, conn2) <- withContinuedSession conn1 ...
-- > (r3, conn3) <- withContinuedSession conn2 ...
-- etc. If we reuse a suspended connection or use a closed connection,
-- we get a run-time (exception). That is of course not very
-- satisfactory - and yet better than a segmentation fault.
withContinuedSession :: (Typeable a, IE.ISession sess) =>
IE.ConnectA sess -> (forall mark. DBM mark sess a)
-> IO (a, IE.ConnectA sess)
withContinuedSession (IE.ConnectA connecta) m =
do conn <- connecta -- this invalidates connecta
-- Note: if there was an error, then disconnect,
-- but don't disconnect in the success case
-- (the connecta action will raise an error if the
-- connection is re-used).
r <- runReaderT (unDBM m) conn
`catch` (\e@(SomeException _) -> IE.disconnect conn >> throw e)
-- make a new, one-shot connecta
hasbeenused <- newIORef False
let connecta = do
fl <- readIORef hasbeenused
when fl $ error "connecta has been re-used"
writeIORef hasbeenused True
return conn
return (r,IE.ConnectA connecta)
beginTransaction ::
(MonadReader s (ReaderT s IO), IE.ISession s) =>
IE.IsolationLevel -> DBM mark s ()
beginTransaction il = DBM (ask >>= \s -> lift $ IE.beginTransaction s il)
commit :: IE.ISession s => DBM mark s ()
commit = DBM( ask >>= lift . IE.commit )
rollback :: IE.ISession s => DBM mark s ()
rollback = DBM( ask >>= lift . IE.rollback )
executeCommand :: IE.Command stmt s => stmt -> DBM mark s Int
executeCommand stmt = DBM( ask >>= \s -> lift $ IE.executeCommand s stmt )
-- | DDL operations don't manipulate data, so we return no information.
-- If there is a problem, an exception will be raised.
execDDL :: IE.Command stmt s => stmt -> DBM mark s ()
execDDL stmt = void (executeCommand stmt)
-- | Returns the number of rows affected.
execDML :: IE.Command stmt s => stmt -> DBM mark s Int
execDML = executeCommand
-- | Allows arbitrary actions to be run the DBM monad.
-- The back-end developer must supply instances of EnvInquiry,
-- which is hidden away in "Database.InternalEnumerator".
-- An example of this is 'Database.Sqlite.Enumerator.LastInsertRowid'.
inquire :: IE.EnvInquiry key s result => key -> DBM mark s result
inquire key = DBM( ask >>= \s -> lift $ IE.inquire key s )
-- --------------------------------------------------------------------
-- -- ** Statements; Prepared statements
-- --------------------------------------------------------------------
executePreparation :: IE.IPrepared stmt sess bstmt bo =>
IE.PreparationA sess stmt -> DBM mark sess (IE.PreparedStmt mark stmt)
executePreparation (IE.PreparationA action) =
DBM( ask >>= \sess -> lift $ liftM IE.PreparedStmt (action sess))
data NextResultSet mark stmt = NextResultSet (IE.PreparedStmt mark stmt)
data RefCursor a = RefCursor a
-- The exception handling in withPreparedStatement looks awkward,
-- but there's a good reason...
-- Suppose there's some sort of error when we call destroyStmt.
-- The exception handler also must call destroyStmt (because the exception
-- might have also come from the invocation of action), but calling destroyStmt
-- might also raise a new exception (for example, a different error is raised
-- if you re-try a failed CLOSE-cursor, because the transaction is aborted).
-- So we wrap this call with a catch, and ensure that the original exception
-- is preserved and re-raised.
-- | Prepare a statement and run a DBM action over it.
-- This gives us the ability to re-use a statement,
-- for example by passing different bind values for each execution.
-- The Typeable constraint is to prevent the leakage of marked things.
-- The type of bound statements should not be exported (and should not be
-- in Typeable) so the bound statement can't leak either.
withPreparedStatement ::
(Typeable a, IE.IPrepared stmt sess bstmt bo)
=> IE.PreparationA sess stmt
-- ^ preparation action to create prepared statement;
-- this action is usually created by @prepareQuery\/Command@
-> (IE.PreparedStmt mark stmt -> DBM mark sess a)
-- ^ DBM action that takes a prepared statement
-> DBM mark sess a
withPreparedStatement pa action = do
ps <- executePreparation pa
gcatch ( do
v <- action ps
destroyStmt ps
return v
) (\e@(SomeException _) -> gcatch (destroyStmt ps >> throw e) (\e2@(SomeException _) -> throw e))
-- Not exported.
destroyStmt :: (IE.ISession sess, IE.IPrepared stmt sess bstmt bo)
=> IE.PreparedStmt mark stmt -> DBM mark sess ()
destroyStmt (IE.PreparedStmt stmt) = DBM( ask >>= \s -> lift $ IE.destroyStmt s stmt)
-- | Applies a prepared statement to bind variables to get a bound statement,
-- which is passed to the provided action.
-- Note that by the time it is passed to the action, the query or command
-- has usually been executed.
-- A bound statement would normally be an instance of
-- 'Database.InternalEnumerator.Statement', so it can be passed to
-- 'Database.Enumerator.doQuery'
-- in order to process the result-set, and also an instance of
-- 'Database.InternalEnumerator.Command', so that we can write
-- re-usable DML statements (inserts, updates, deletes).
-- The Typeable constraint is to prevent the leakage of marked things.
-- The type of bound statements should not be exported (and should not be
-- in Typeable) so the bound statement can't leak either.
withBoundStatement ::
(Typeable a, IE.IPrepared stmt s bstmt bo)
=> IE.PreparedStmt mark stmt
-- ^ prepared statement created by withPreparedStatement
-> [IE.BindA s stmt bo]
-- ^ bind values
-> (bstmt -> DBM mark s a)
-- ^ action to run over bound statement
-> DBM mark s a
withBoundStatement (IE.PreparedStmt stmt) ba f =
DBM ( ask >>= \s ->
lift $ IE.bindRun s stmt ba (\b -> runReaderT (unDBM (f b)) s))
-- --------------------------------------------------------------------
-- -- ** Buffers and QueryIteratee
-- --------------------------------------------------------------------
-- |The class QueryIteratee is not for the end user. It provides the
-- interface between the low- and the middle-layers of Takusen. The
-- middle-layer - enumerator - is database-independent then.
class MonadIO m => QueryIteratee m q i seed b |
i -> m, i -> seed, q -> b where
iterApply :: q -> [b] -> seed -> i -> m (IterResult seed)
allocBuffers :: q -> i -> IE.Position -> m [b]
-- |This instance of the class is the terminating case
-- i.e. where the iteratee function has one argument left.
-- The argument is applied, and the result returned.
instance (IE.DBType a q b, MonadIO m) =>
QueryIteratee m q (a -> seed -> m (IterResult seed)) seed b where
iterApply q [buf] seed fn = do
v <- liftIO $ IE.fetchCol q buf
fn v seed
allocBuffers r _ n = liftIO $
sequence [IE.allocBufferFor (undefined::a) r n]
-- |This instance of the class implements the starting and continuation cases.
instance (QueryIteratee m q i' seed b, IE.DBType a q b)
=> QueryIteratee m q (a -> i') seed b where
iterApply q (buffer:moreBuffers) seed fn = do
v <- liftIO $ IE.fetchCol q buffer
iterApply q moreBuffers seed (fn v)
allocBuffers q fn n = do
buffer <- liftIO $ IE.allocBufferFor (undefined::a) q n
moreBuffers <- allocBuffers q (undefined::i') (n+1)
return (buffer:moreBuffers)
-- --------------------------------------------------------------------
-- -- ** A Query monad and cursors
-- --------------------------------------------------------------------
type CollEnumerator i m s = i -> s -> m s
type Self i m s = i -> s -> m s
type CFoldLeft i m s = Self i m s -> CollEnumerator i m s
-- |A DBCursor is an IORef-mutable-pair @(a, Maybe f)@, where @a@ is the result-set so far,
-- and @f@ is an IO action that fetches and returns the next row (when applied to True),
-- or closes the cursor (when applied to False).
-- If @Maybe@ f is @Nothing@, then the result-set has been exhausted
-- (or the iteratee function terminated early),
-- and the cursor has already been closed.
newtype DBCursor mark ms a =
DBCursor (IORef (a, Maybe (Bool-> ms (DBCursor mark ms a))))
-- | The left-fold interface.
doQuery :: (IE.Statement stmt sess q,
QueryIteratee (DBM mark sess) q i seed b,
IE.IQuery q sess b) =>
stmt -- ^ query
-> i -- ^ iteratee function
-> seed -- ^ seed value
-> DBM mark sess seed
doQuery stmt iteratee seed = do
(lFoldLeft, finalizer) <- doQueryMaker stmt iteratee
gcatch (fix lFoldLeft iteratee seed)
(\e@(SomeException _) -> do
finalizer
liftIO (throw e)
)
-- An auxiliary function, not seen by the user.
doQueryMaker stmt iteratee = do
sess <- ask
-- if buffer allocation raises an exception
-- (which it might) then we need to clean up the query object.
query <- liftIO (IE.makeQuery sess stmt)
buffers <- gcatch (allocBuffers query iteratee 1)
(\e@(SomeException _) -> liftIO (IE.destroyQuery query >> throw e) )
let
finaliser =
liftIO (mapM_ (IE.freeBuffer query) buffers >> IE.destroyQuery query)
hFoldLeft self iteratee initialSeed = do
let
handle seed True = iterApply query buffers seed iteratee
>>= handleIter
handle seed False = finaliser >> return seed
handleIter (Right seed) = self iteratee seed
handleIter (Left seed) = finaliser >> return seed
liftIO (IE.fetchOneRow query) >>= handle initialSeed
return (hFoldLeft, finaliser)
-- Another auxiliary function, also not seen by the user.
openCursor stmt iteratee seed = do
ref <- liftIO (newIORef (seed,Nothing))
(lFoldLeft, finalizer) <- doQueryMaker stmt iteratee
let update v = liftIO $ modifyIORef ref (\ (_, f) -> (v, f))
let
close finalseed = do
liftIO$ modifyIORef ref (const (finalseed, Nothing))
finalizer
return (DBCursor ref)
let
k' fni seed' =
let
k fni' seed'' = do
let k'' flag = if flag then k' fni' seed'' else close seed''
liftIO$ modifyIORef ref (const (seed'', Just k''))
return seed''
in do
liftIO$ modifyIORef ref (const (seed', Nothing))
lFoldLeft k fni seed' >>= update
return $ DBCursor ref
k' iteratee seed
-- |cursorIsEOF's return value tells you if there are any more rows or not.
-- If you call 'cursorNext' when there are no more rows,
-- a 'DBNoData' exception is thrown.
-- Cursors are automatically closed and freed when:
-- * the iteratee returns @Left a@
-- * the query result-set is exhausted.
-- To make life easier, we've created a 'withCursor' function,
-- which will clean up if an error (exception) occurs,
-- or the code exits early.
-- You can nest them to get interleaving, if you desire:
-- > withCursor query1 iter1 [] $ \c1 -> do
-- > withCursor query2 iter2 [] $ \c2 -> do
-- > r1 <- cursorCurrent c1
-- > r2 <- cursorCurrent c2
-- > ...
-- > return something
-- Note that the type of the functions below is set up so to perpetuate
-- the mark.
cursorIsEOF :: DBCursor mark (DBM mark s) a -> DBM mark s Bool
cursorIsEOF (DBCursor ref) = do
(_, maybeF) <- liftIO $ readIORef ref
return $ isNothing maybeF
-- |Returns the results fetched so far, processed by iteratee function.
cursorCurrent :: DBCursor mark (DBM mark s) a -> DBM mark s a
cursorCurrent (DBCursor ref) = do
(v, _) <- liftIO $ readIORef ref
return v
-- |Advance the cursor. Returns the cursor. The return value is usually ignored.
cursorNext :: DBCursor mark (DBM mark s) a
-> DBM mark s (DBCursor mark (DBM mark s) a)
cursorNext (DBCursor ref) = do
(_, maybeF) <- liftIO $ readIORef ref
maybe (IE.throwDB IE.DBNoData) ($ True) maybeF
-- Returns the cursor. The return value is usually ignored.
-- This function is not available to the end user (i.e. not exported).
-- The cursor is closed automatically when its region exits.
cursorClose c@(DBCursor ref) = do
(_, maybeF) <- liftIO $ readIORef ref
maybe (return c) ($ False) maybeF
-- |Ensures cursor resource is properly tidied up in exceptional cases.
-- Propagates exceptions after closing cursor.
-- The Typeable constraint is to prevent cursors and other marked values
-- (like cursor computations) from escaping.
withCursor ::
( Typeable a, IE.Statement stmt sess q
, QueryIteratee (DBM mark sess) q i seed b
, IE.IQuery q sess b
) =>
stmt -- ^ query
-> i -- ^ iteratee function
-> seed -- ^ seed value
-> (DBCursor mark (DBM mark sess) seed -> DBM mark sess a) -- ^ action taking cursor parameter
-> DBM mark sess a
withCursor stmt iteratee seed =
gbracket (openCursor stmt iteratee seed) cursorClose
-- Although withTransaction has the same structure as a bracket,
-- we can't use bracket because the resource-release action
-- (commit or rollback) differs between the success and failure cases.
-- |Perform an action as a transaction: commit afterwards,
-- unless there was an exception, in which case rollback.
withTransaction :: (IE.ISession s) =>
IE.IsolationLevel -> DBM mark s a -> DBM mark s a
withTransaction isolation action = do
beginTransaction isolation
gcatch ( do
v <- action
commit
return v
) (\e@(SomeException _) -> rollback >> throw e )
-- --------------------------------------------------------------------
-- -- ** Misc.
-- --------------------------------------------------------------------
-- |Useful utility function, for SQL weenies.
ifNull :: Maybe a -- ^ nullable value
-> a -- ^ value to substitute if first parameter is null i.e. 'Data.Maybe.Nothing'
-> a
ifNull value subst = fromMaybe subst value
-- | Another useful utility function.
-- Use this to return a value from an iteratee function (the one passed to
-- 'Database.Enumerator.doQuery').
-- Note that you should probably nearly always use the strict version.
result :: (Monad m) => IterAct m a
result x = return (Right x)
-- |A strict version. This is recommended unless you have a specific need for laziness,
-- as the lazy version will gobble stack and heap.
-- If you have a large result-set (in the order of 10-100K rows or more),
-- it is likely to exhaust the standard 1M GHC stack.
-- Whether or not 'result' eats memory depends on what @x@ does:
-- if it's a delayed computation then it almost certainly will.
-- This includes consing elements onto a list,
-- and arithmetic operations (counting, summing, etc).
result' :: (Monad m) => IterAct m a
result' x = return (Right $! x)
-- That's the code... now for the documentation.
-- ====================================================================
-- == Usage notes
-- ====================================================================
-- $usage_example
-- Let's look at some example code:
-- > -- sample code, doesn't necessarily compile
-- > module MyDbExample is
-- >
-- > import Database.Oracle.Enumerator
-- > import Database.Enumerator
-- > ...
-- >
-- > query1Iteratee :: (Monad m) => Int -> String -> Double -> IterAct m [(Int, String, Double)]
-- > query1Iteratee a b c accum = result' ((a, b, c):accum)
-- >
-- > -- non-query actions.
-- > otherActions session = do
-- > execDDL (sql "create table blah")
-- > execDML (cmdbind "insert into blah (...) values (?, ?, ?, ...)" [bindP "v1", bindP (1::Int), ...])
-- > commit
-- > -- Use withTransaction to delimit a transaction.
-- > -- It will commit at the end, or rollback if an error occurs.
-- > withTransaction Serialisable ( do
-- > execDML (sql "update blah ...")
-- > execDML (sql "insert into blah ...")
-- > )
-- >
-- > main :: IO ()
-- > main = do
-- > withSession (connect "user" "password" "server") ( do
-- > -- simple query, returning reversed list of rows.
-- > r <- doQuery (sql "select a, b, c from x") query1Iteratee []
-- > liftIO $ putStrLn $ show r
-- > otherActions session
-- > )
-- Notes:
-- * connection is made by 'Database.Enumerator.withSession',
-- which also disconnects when done i.e. 'Database.Enumerator.withSession'
-- delimits the connection.
-- You must pass it a connection action, which is back-end specific,
-- and created by calling the 'Database.Sqlite.Enumerator.connect'
-- function from the relevant back-end.
-- * inside the session, the usual transaction delimiter commands are usable
-- e.g. 'Database.Enumerator.beginTransaction' 'Database.InternalEnumerator.IsolationLevel',
-- 'Database.Enumerator.commit', 'Database.Enumerator.rollback', and
-- 'Database.Enumerator.withTransaction'.
-- We also provide 'Database.Enumerator.execDML' and 'Database.Enumerator.execDDL'.
-- * non-DML and -DDL commands - i.e. queries - are processed by
-- 'Database.Enumerator.doQuery' (this is the API for our left-fold).
-- See more explanation and examples below in /Iteratee Functions/ and
-- /Bind Parameters/ sections.
-- The first argument to 'Database.Enumerator.doQuery' must be an instance of
-- 'Database.InternalEnumerator.Statement'.
-- Each back-end will provide a useful set of @Statement@ instances
-- and associated constructor functions for them.
-- For example, currently all back-ends have:
-- * for basic, all-text statements (no bind variables, default row-caching)
-- which can be used as queries or commands:
-- > sql "select ..."
-- * for a select with bind variables:
-- > sqlbind "select ?, ... where col = ? and ..." [bindP ..., bindP ...]
-- * for a select with bind variables and row caching:
-- > prefetch 100 "select ?, ... where col = ? and ..." [bindP ..., bindP ...]
-- * for a DML command with bind variables:
-- > cmdbind "insert into ... values (?, ?, ...)" [bindP ..., bindP ...]
-- * for a reusable prepared statement: we have to first create the
-- prepared statement, and then bind in a separate step.
-- This separation lets us re-use prepared statements:
-- > let stmt = prepareQuery (sql "select ? ...")
-- > withPreparedStatement stmt $ \pstmt ->
-- > withBoundStatement pstmt [bindP ..., bindP ...] $ \bstmt -> do
-- > result <- doQuery bstmt iter seed
-- > ...
-- The PostgreSQL backend additionally requires that when preparing statements,
-- you (1) give a name to the prepared statement,
-- and (2) specify types for the bind parameters.
-- The list of bind-types is created by applying the
-- 'Database.PostgreSQL.Enumerator.bindType' function
-- to dummy values of the appropriate types. e.g.
-- > let stmt = prepareQuery "stmtname" (sql "select ...") [bindType "", bindType (0::Int)]
-- > withPreparedStatement stmt $ \pstmt -> ...
-- A longer explanation of prepared statements and
-- bind variables is in the Bind Parameters section below.
-- $usage_iteratee
-- 'Database.Enumerator.doQuery' takes an iteratee function, of n arguments.
-- Argument n is the accumulator (or seed).
-- For each row that is returned by the query,
-- the iteratee function is called with the data from that row in
-- arguments 1 to n-1, and the current accumulated value in the argument n.
-- The iteratee function returns the next value of the accumulator,
-- wrapped in an 'Data.Either.Either'.
-- If the 'Data.Either.Either' value is @Left@, then the query will terminate,
-- returning the wrapped accumulator\/seed value.
-- If the value is @Right@, then the query will continue, with the next row
-- begin fed to the iteratee function, along with the new accumulator\/seed value.
-- In the example above, @query1Iteratee@ simply conses the new row (as a tuple)
-- to the front of the accumulator.
-- The initial seed passed to 'Database.Enumerator.doQuery' was an empty list.
-- Consing the rows to the front of the list results in a list
-- with the rows in reverse order.
-- The types of values that can be used as arguments to the iteratee function
-- are back-end specific; they must be instances of the class
-- 'Database.InternalEnumerator.DBType'.
-- Most backends directly support the usual lowest-common-denominator set
-- supported by most DBMS's: 'Data.Int.Int', 'Data.Char.String',
-- 'Prelude.Double', 'Data.Time.UTCTime'.
-- ('Data.Int.Int64' is often, but not always, supported.)
-- By directly support we mean there is type-specific marshalling code
-- implemented.
-- Indirect support for 'Text.Read.Read'- and 'Text.Show.Show'-able types
-- is supported by marshalling to and from 'Data.Char.String's.
-- This is done automatically by the back-end;
-- there is no need for user-code to perform the marshalling,
-- as long as instances of 'Text.Read.Read' and 'Text.Show.Show' are defined.
-- The iteratee function operates in the 'DBM' monad,
-- so if you want to do IO in it you must use 'Control.Monad.Trans.liftIO'
-- (e.g. @liftIO $ putStrLn \"boo\"@ ) to lift the IO action into 'DBM'.
-- The iteratee function is not restricted to just constructing lists.
-- For example, a simple counter function would ignore its arguments,
-- and the accumulator would simply be the count e.g.
-- > counterIteratee :: (Monad m) => Int -> IterAct m Int
-- > counterIteratee _ i = result' $ (1 + i)
-- The iteratee function that you pass to 'Database.Enumerator.doQuery'
-- needs type information,
-- at least for the arguments if not the return type (which is typically
-- determined by the type of the seed).
-- The type synonyms 'IterAct' and 'IterResult' give some convenience
-- in writing type signatures for iteratee functions:
-- > type IterResult seedType = Either seedType seedType
-- > type IterAct m seedType = seedType -> m (IterResult seedType)
-- Without them, the type for @counterIteratee@ would be:
-- > counterIteratee :: (Monad m) => Int -> Int -> m (Either Int Int)
-- which doesn't seem so onerous, but for more elaborate seed types
-- (think large tuples) it certainly helps e.g.
-- > iter :: Monad m =>
-- > String -> Double -> UTCTime -> [(String, Double, UTCTime)]
-- > -> m (Either [(String, Double, UTCTime)] [(String, Double, UTCTime)] )
-- reduces to (by using 'IterAct' and 'IterResult'):
-- > iter :: Monad m =>
-- > String -> Double -> UTCTime -> IterAct m [(String, Double, UTCTime)]
-- $usage_result
-- The 'result' (lazy) and @result\'@ (strict) functions are another convenient shorthand
-- for returning values from iteratee functions. The return type from an iteratee is actually
-- @Either seed seed@, where you return @Right@ if you want processing to continue,
-- or @Left@ if you want processing to stop before the result-set is exhausted.
-- The common case is:
-- > query1Iteratee a b c accum = return (Right ((a, b, c):accum))
-- which we can write as
-- > query1Iteratee a b c accum = result $ (a, b, c):accum)
-- We have lazy and strict versions of @result@. The strict version is almost certainly
-- the one you want to use. If you come across a case where the lazy function is useful,
-- please tell us about it. The lazy function tends to exhaust the stack for large result-sets,
-- whereas the strict function does not.
-- This is due to the accumulation of a large number of unevaluated thunks,
-- and will happen even for simple arithmetic operations such as counting or summing.
-- If you use the lazy function and you have stack\/memory problems, do some profiling.
-- With GHC:
-- * ensure the iteratee has its own cost-centre (make it a top-level function)
-- * compile with @-prof -auto-all@
-- * run with @+RTS -p -hr -RTS@
-- * run @hp2ps@ over the resulting @.hp@ file to get a @.ps@ document, and take a look at it.
-- Retainer sets are listed on the RHS, and are prefixed with numbers e.g. (13)CAF, (2)SYSTEM.
-- At the bottom of the @.prof@ file you'll find the full descriptions of the retainer sets.
-- Match the number in parentheses on the @.ps@ graph with a SET in the @.prof@ file;
-- the one at the top of the @.ps@ graph is the one using the most memory.
-- You'll probably find that the lazy iteratee is consuming all of the stack with lazy thunks,
-- which is why we recommend the strict function.
-- $usage_rank2_types
-- In some examples we use the application operator ($) instead of parentheses
-- (some might argue that this is a sign of developer laziness).
-- At first glance, ($) and conventional function application via juxtaposition
-- seem to be interchangeable e.g.
-- > liftIO (putStrLn (show x))
-- looks equivalent to
-- > liftIO $ putStrLn $ show x
-- But they're not, because Haskell's type system gives us a nice compromise.
-- In a Hindley-Milner type system (like ML) there is no difference between
-- ($) and function application, because polymorphic functions are not
-- first-class and cannot be passed to other functions.
-- At the other end of the scale, ($) and function application in System F
-- are equivalent, because polymorphic functions can be passed to other
-- functions. However, type inference in System F is undecidable.
-- Haskell hits the sweet spot: maintaining full inference,
-- and permitting rank-2 polymorphism, in exchange for very few
-- type annotations. Only functions that take polymorphic functions (and
-- thus are higher-rank) need type signatures. Rank-2 types can't be
-- inferred. The function ($) is a regular, rank-1 function, and so
-- it can't take polymorphic functions as arguments and return
-- polymorphic functions.
-- Here's an example where ($) fails:
-- we supply a simple test program in the README file.
-- If you change the @withSession@ line to use ($), like so
-- (and remove the matching end-parenthese):
-- > withSession (connect "sqlite_db") $ do
-- then you get the error:
-- > Main.hs:7:38:
-- > Couldn't match expected type `forall mark. DBM mark Session a'
-- > against inferred type `a1 b'
-- > In the second argument of `($)', namely
-- > ...
-- Another way of rewriting it is like this, where we separate the
-- 'Database.Enumerator.DBM' action into another function:
-- > {-# OPTIONS -fglasgow-exts #-}
-- > module Main where
-- > import Database.Sqlite.Enumerator
-- > import Control.Monad.Trans (liftIO)
-- > main = flip catchDB reportRethrow $
-- > withSession (connect "sqlite_db") hello
-- >
-- > hello = withTransaction RepeatableRead $ do
-- > let iter (s::String) (_::String) = result s
-- > result <- doQuery (sql "select 'Hello world.'") iter ""
-- > liftIO (putStrLn result)
-- which gives this error:
-- > Main.hs:9:2:
-- > Inferred type is less polymorphic than expected
-- > Quantified type variable `mark' is mentioned in the environment:
-- > hello :: DBM mark Session () (bound at Main.hs:15:0)
-- > ...
-- This is just the monomorphism restriction in action.
-- Sans a type signature, the function `hello' is monomorphised
-- (that is, `mark' is replaced with (), per GHC rules).
-- This is easily fixed by adding this type declaration:
-- > hello :: DBM mark Session ()
-- $usage_bindparms
-- Support for bind variables varies between DBMS's.
-- We call 'Database.Enumerator.withPreparedStatement' function to prepare
-- the statement, and then call 'Database.Enumerator.withBoundStatement'
-- to provide the bind values and execute the query.
-- The value returned by 'Database.Enumerator.withBoundStatement'
-- is an instance of the 'Database.InternalEnumerator.Statement' class,
-- so it can be passed to 'Database.Enumerator.doQuery' for result-set processing.
-- When we call 'Database.Enumerator.withPreparedStatement', we must pass
-- it a \"preparation action\", which is simply an action that returns
-- the prepared query. The function to create this action varies between backends,
-- and by convention is called 'Database.PostgreSQL.Enumerator.prepareQuery'.
-- For DML statements, you must use 'Database.PostgreSQL.Enumerator.prepareCommand',
-- as the library needs to do something different depending on whether or not the
-- statement returns a result-set.
-- For queries with large result-sets, we provide
-- 'Database.PostgreSQL.Enumerator.prepareLargeQuery',
-- which takes an extra parameter: the number of rows to prefetch
-- in a network call to the server.
-- This aids performance in two ways:
-- 1. you can limit the number of rows that come back to the
-- client, in order to use less memory, and
-- 2. the client library will cache rows, so that a network call to
-- the server is not required for every row processed.
-- With PostgreSQL, we must specify the types of the bind parameters
-- when the query is prepared, so the 'Database.PostgreSQL.Enumerator.prepareQuery'
-- function takes a list of 'Database.PostgreSQL.Enumerator.bindType' values.
-- Also, PostgreSQL requires that prepared statements are named,
-- although you can use \"\" as the name.
-- With Sqlite and Oracle, we simply pass the query text to
-- 'Database.PostgreSQL.Sqlite.prepareQuery',
-- so things are slightly simpler for these backends.
-- Perhaps an example will explain it better:
-- > postgresBindExample = do
-- > let
-- > query = sql "select blah from blahblah where id = ? and code = ?"
-- > iter :: (Monad m) => String -> IterAct m [String]
-- > iter s acc = result $ s:acc
-- > bindVals = [bindP (12345::Int), bindP "CODE123"]
-- > bindTypes = [bindType (0::Int), bindType ""]
-- > withPreparedStatement (prepareQuery "stmt1" query bindTypes) $ \pstmt -> do
-- > withBoundStatement pstmt bindVals $ \bstmt -> do
-- > actual <- doQuery bstmt iter []
-- > liftIO (print actual)
-- Note that we pass @bstmt@ to 'Database.Enumerator.doQuery';
-- this is the bound statement object created by
-- 'Database.Enumerator.withBoundStatement'.
-- The Oracle\/Sqlite example code is almost the same, except for the
-- call to 'Database.Sqlite.Enumerator.prepareQuery':
-- > sqliteBindExample = do
-- > let
-- > query = sql "select blah from blahblah where id = ? and code = ?"
-- > iter :: (Monad m) => String -> IterAct m [String]
-- > iter s acc = result $ s:acc
-- > bindVals = [bindP (12345::Int), bindP "CODE123"]
-- > withPreparedStatement (prepareQuery query) $ \pstmt -> do
-- > withBoundStatement pstmt bindVals $ \bstmt -> do
-- > actual <- doQuery bstmt iter []
-- > liftIO (print actual)
-- It can be a bit tedious to always use the @withPreparedStatement+withBoundStatement@
-- combination, so for the case where you don't plan to re-use the query,
-- we support a short-cut for bundling the query text and parameters.
-- The next example is valid for PostgreSQL, Sqlite, and Oracle
-- (the Sqlite implementation provides a dummy 'Database.Sqlite.Enumerator.prefetch'
-- function to ensure we have a consistent API).
-- Sqlite has no facility for prefetching - it's an embedded database, so no
-- network round-trip - so the Sqlite implementation ignores the prefetch count:
-- > bindShortcutExample = do
-- > let
-- > iter :: (Monad m) => String -> IterAct m [String]
-- > iter s acc = result $ s:acc
-- > bindVals = [bindP (12345::Int), bindP "CODE123"]
-- > query = prefetch 1000 "select blah from blahblah where id = ? and code = ?" bindVals
-- > actual <- doQuery query iter []
-- > liftIO (print actual)
-- A caveat of using prefetch with PostgreSQL is that you must be inside a transaction.
-- This is because the PostgreSQL implementation uses a cursor and \"FETCH FORWARD\"
-- to implement fetching a block of rows in a single network call,
-- and PostgreSQL requires that cursors are only used inside transactions.
-- It can be as simple as wrapping calls to 'Database.Enumerator.doQuery' by
-- 'Database.Enumerator.withTransaction',
-- or you may prefer to delimit your transactions elsewhere (the API supports
-- 'Database.InternalEnumerator.beginTransaction' and
-- 'Database.InternalEnumerator.commit', if you prefer to use them):
-- > withTransaction RepeatableRead $ do
-- > actual <- doQuery query iter []
-- > liftIO (print actual)
-- You may have noticed that for 'Data.Int.Int' and 'Prelude.Double' literal
-- bind values, we have to tell the compiler the type of the literal.
-- This is due to interaction with the numeric literal defaulting mechanism.
-- For non-numeric literals the compiler can (usually) determine the correct types to use.
-- If you omit type information for numeric literals, from GHC the error
-- message looks something like this:
-- > Database/PostgreSQL/Test/Enumerator.lhs:194:4:
-- > Overlapping instances for Database.InternalEnumerator.DBBind a
-- > Session
-- > Database.PostgreSQL.PGEnumerator.PreparedStmt
-- > Database.PostgreSQL.PGEnumerator.BindObj
-- > arising from use of `bindP' at Database/PostgreSQL/Test/Enumerator.lhs:194:4-8
-- > Matching instances:
-- > Imported from Database.PostgreSQL.PGEnumerator:
-- > instance (Database.InternalEnumerator.DBBind (Maybe a)
-- > Session
-- > Database.PostgreSQL.PGEnumerator.PreparedStmt
-- > Database.PostgreSQL.PGEnumerator.BindObj) =>
-- > Database.InternalEnumerator.DBBind a
-- > Session
-- > Database.PostgreSQL.PGEnumerator.PreparedStmt
-- > Database.PostgreSQL.PGEnumerator.BindObj
-- > Imported from Database.PostgreSQL.PGEnumerator:
-- > instance Database.InternalEnumerator.DBBind (Maybe Double)
-- > ....
-- $usage_multiresultset
-- Support for returning multiple result sets from a single
-- statement exists for PostgreSQL and Oracle.
-- Such functionality does not exist in Sqlite.
-- The general idea is to invoke a database procedure or function which
-- returns cursor variables. The variables can be processed by
-- 'Database.Enumerator.doQuery' in one of two styles: linear or nested.
-- /Linear style:/
-- If we assume the existence of the following PostgreSQL function,
-- which is used in the test suite in "Database.PostgreSQL.Test.Enumerator":
-- > CREATE OR REPLACE FUNCTION takusenTestFunc() RETURNS SETOF refcursor AS $$
-- > DECLARE refc1 refcursor; refc2 refcursor;
-- > BEGIN
-- > OPEN refc1 FOR SELECT n*n from t_natural where n < 10 order by 1;
-- > RETURN NEXT refc1;
-- > OPEN refc2 FOR SELECT n, n*n, n*n*n from t_natural where n < 10 order by 1;
-- > RETURN NEXT refc2;
-- > END;$$ LANGUAGE plpgsql;
-- ... then this code shows how linear processing of cursors would be done:
-- > withTransaction RepeatableRead $ do
-- > withPreparedStatement (prepareQuery "stmt1" (sql "select * from takusenTestFunc()") []) $ \pstmt -> do
-- > withBoundStatement pstmt [] $ \bstmt -> do
-- > dummy <- doQuery bstmt iterMain []
-- > result1 <- doQuery (NextResultSet pstmt) iterRS1 []
-- > result2 <- doQuery (NextResultSet pstmt) iterRS2 []
-- > where
-- > iterMain :: (Monad m) => (RefCursor String) -> IterAct m [RefCursor String]
-- > iterMain c acc = result (acc ++ [c])
-- > iterRS1 :: (Monad m) => Int -> IterAct m [Int]
-- > iterRS1 i acc = result (acc ++ [i])
-- > iterRS2 :: (Monad m) => Int -> Int -> Int -> IterAct m [(Int, Int, Int)]
-- > iterRS2 i i2 i3 acc = result (acc ++ [(i, i2, i3)])
-- Notes:
-- * the use of a 'Database.Enumerator.RefCursor' 'Data.Char.String'
-- type in the iteratee function indicates
-- to the backend that it should save each cursor value returned,
-- which it does by stuffing them into a list attached to the
-- prepared statement object.
-- This means that we /must/ use 'Database.Enumerator.withPreparedStatement'
-- to create a prepared statement object; the prepared statament oject
-- is the container for the cursors returned.
-- * in this example we choose to discard the results of the first iteratee.
-- This is not necessary, but in this case the only column is a
-- 'Database.Enumerator.RefCursor', and the values are already saved
-- in the prepared statement object.
-- * saved cursors are consumed one-at-a-time by calling 'Database.Enumerator.doQuery',
-- passing 'Database.Enumerator.NextResultSet' @pstmt@
-- (i.e. passing the prepared statement oject wrapped by
-- 'Database.Enumerator.NextResultSet').
-- This simply pulls the next cursor off the list
-- - they're processed in the order they were pushed on (FIFO) -
-- and processes it with the given iteratee.
-- * if you try to process too many cursors i.e. make too many calls
-- to 'Database.Enumerator.doQuery' passing 'Database.Enumerator.NextResultSet' @pstmt@,
-- then an exception will be thrown.
-- OTOH, failing to process returned cursors will not raise errors,
-- but the cursors will remain open on the server according to whatever scoping
-- rules the server applies.
-- For PostgreSQL, this will be until the transaction (or session) ends.
-- /Nested style:/
-- The linear style of cursor processing is the only style supported by
-- MS SQL Server and ODBC (which we do not yet support).
-- However, PostgreSQL and Oracle also support using nested cursors in queries.
-- Again for PostgreSQL, assuming we have these functions in the database:
-- > CREATE OR REPLACE FUNCTION takusenTestFunc(lim int4) RETURNS refcursor AS $$
-- > DECLARE refc refcursor;
-- > BEGIN
-- > OPEN refc FOR SELECT n, takusenTestFunc2(n) from t_natural where n < lim order by n;
-- > RETURN refc;
-- > END; $$ LANGUAGE plpgsql;
-- > CREATE OR REPLACE FUNCTION takusenTestFunc2(lim int4) RETURNS refcursor AS $$
-- > DECLARE refc refcursor;
-- > BEGIN
-- > OPEN refc FOR SELECT n from t_natural where n < lim order by n;
-- > RETURN refc;
-- > END; $$ LANGUAGE plpgsql;
-- ... then this code shows how nested queries might work:
-- > selectNestedMultiResultSet = do
-- > let
-- > q = "SELECT n, takusenTestFunc(n) from t_natural where n < 10 order by n"
-- > iterMain (i::Int) (c::RefCursor String) acc = result' ((i,c):acc)
-- > iterInner (i::Int) (c::RefCursor String) acc = result' ((i,c):acc)
-- > iterInner2 (i::Int) acc = result' (i:acc)
-- > withTransaction RepeatableRead $ do
-- > rs <- doQuery (sql q) iterMain []
-- > flip mapM_ rs $ \(outer, c) -> do
-- > rs <- doQuery c iterInner []
-- > flip mapM_ rs $ \(inner, c) -> do
-- > rs <- doQuery c iterInner2 []
-- > flip mapM_ rs $ \i -> do
-- > liftIO (putStrLn (show outer ++ " " ++ show inner ++ " " ++ show i))
-- Just to make it clear: the outer query returns a result-set that includes
-- a 'Database.Enumerator.RefCursor' column. Each cursor from that column is passed to
-- 'Database.Enumerator.doQuery' to process it's result-set;
-- here we use 'Control.Monad.mapM_' to apply an IO action to the list returned by
-- 'Database.Enumerator.doQuery'.
-- For Oracle the example is slightly different.
-- The reason it's different is that:
-- * Oracle requires that the parent cursor must remain open
-- while processing the children
-- (in the PostgreSQL example, 'Database.Enumerator.doQuery'
-- closes the parent cursor after constructing the list,
-- before the list is processed. This is OK because PostgreSQL
-- keeps the child cursors open on the server until they are explicitly
-- closed, or the transaction or session ends).
-- * our current Oracle implementation prevents marshalling
-- of the cursor in the result-set buffer to a Haskell value,
-- so each fetch overwrites the buffer value with a new cursor.
-- This means you have to fully process a given cursor before
-- fetching the next one.
-- Contrast this with the PostgreSQL example above,
-- where the entire result-set is processed to give a
-- list of RefCursor values, and then we run a list of actions
-- over this list with 'Control.Monad.mapM_'.
-- This is possible because PostgreSQL refcursors are just the
-- database cursor names, which are Strings, which we can marshal
-- to Haskell values easily.
-- > selectNestedMultiResultSet = do
-- > let
-- > q = "select n, cursor(SELECT nat2.n, cursor"
-- > ++ " (SELECT nat3.n from t_natural nat3 where nat3.n < nat2.n order by n)"
-- > ++ " from t_natural nat2 where nat2.n < nat.n order by n)"
-- > ++ " from t_natural nat where n < 10 order by n"
-- > iterMain (outer::Int) (c::RefCursor StmtHandle) acc = do
-- > rs <- doQuery c (iterInner outer) []
-- > result' ((outer,c):acc)
-- > iterInner outer (inner::Int) (c::RefCursor StmtHandle) acc = do
-- > rs <- doQuery c (iterInner2 outer inner) []
-- > result' ((inner,c):acc)
-- > iterInner2 outer inner (i::Int) acc = do
-- > liftIO (putStrLn (show outer ++ " " ++ show inner ++ " " ++ show i))
-- > result' (i:acc)
-- > withTransaction RepeatableRead $ do
-- > rs <- doQuery (sql q) iterMain []
-- > return ()
-- Note that the PostgreSQL example can also be written like this
-- (except, of course, that the actual query text is that
-- from the PostgreSQL example).
-- --------------------------------------------------------------------
-- -- Haddock notes:
-- --------------------------------------------------------------------
-- The best way (that I've found) to get a decent introductory/explanatory
-- section for the module is to break the explanation into named chunks
-- (these begin with -- $<chunk-name>),
-- put the named chunks at the end, and reference them in the export list.
-- You *can* write the introduction inline, as part of the module description,
-- but Haddock has no way to make headings.
-- Instead, if you make an explicit export-list then you can use
-- the "-- *", "-- **", etc, syntax to give section headings.
-- (Note: if you don't use an explicit export list, then Haddock will use "-- *" etc
-- comments to make headings. The headings will appear in the docs in the the locations
-- as they do in the source, as do functions, data types, etc.)
-- - One blank line continues a comment block. Two or more end it.
-- - The module comment must contain a empty line between "Portability: ..." and the description.
-- - bullet-lists:
-- - items must be preceded by an empty line.
-- - each list item must start with "*".
-- - code-sections:
-- - must be preceded by an empty line.
-- - use " >" rather than @...@, because "@" allows markup translation, where " >" doesn't.
-- - @inline code (monospaced font)@
-- - /emphasised text/
-- - links: "Another.Module", 'someIdentifier' (same module),
-- 'Another.Module.someIdentifier', <http:/www.haskell.org/haddock>
|
paulrzcz/takusen-oracle
|
Database/Enumerator.hs
|
bsd-3-clause
| 53,838
| 0
| 21
| 12,133
| 4,910
| 2,859
| 2,051
| 265
| 3
|
module Machine where
-- $Id$
import Machine.Class
import Machine.Vorrechnen
import Machine.Akzeptieren
import Machine.Fun
|
Erdwolf/autotool-bonn
|
src/Machine.hs
|
gpl-2.0
| 128
| 0
| 4
| 19
| 25
| 16
| 9
| 5
| 0
|
-- | A description of the platform we're compiling for.
--
module Platform (
Platform(..),
Arch(..),
OS(..),
ArmISA(..),
ArmISAExt(..),
ArmABI(..),
target32Bit,
isARM,
osElfTarget,
osMachOTarget,
platformUsesFrameworks,
platformBinariesAreStaticLibs,
)
where
-- | Contains enough information for the native code generator to emit
-- code for this platform.
data Platform
= Platform {
platformArch :: Arch,
platformOS :: OS,
-- Word size in bytes (i.e. normally 4 or 8,
-- for 32bit and 64bit platforms respectively)
platformWordSize :: {-# UNPACK #-} !Int,
platformUnregisterised :: Bool,
platformHasGnuNonexecStack :: Bool,
platformHasIdentDirective :: Bool,
platformHasSubsectionsViaSymbols :: Bool
}
deriving (Read, Show, Eq)
-- | Architectures that the native code generator knows about.
-- TODO: It might be nice to extend these constructors with information
-- about what instruction set extensions an architecture might support.
--
data Arch
= ArchUnknown
| ArchX86
| ArchX86_64
| ArchPPC
| ArchPPC_64
| ArchSPARC
| ArchARM
{ armISA :: ArmISA
, armISAExt :: [ArmISAExt]
, armABI :: ArmABI
}
| ArchAlpha
| ArchMipseb
| ArchMipsel
| ArchJavaScript
deriving (Read, Show, Eq)
isARM :: Arch -> Bool
isARM (ArchARM {}) = True
isARM _ = False
-- | Operating systems that the native code generator knows about.
-- Having OSUnknown should produce a sensible default, but no promises.
data OS
= OSUnknown
| OSLinux
| OSDarwin
| OSiOS
| OSSolaris2
| OSMinGW32
| OSFreeBSD
| OSDragonFly
| OSOpenBSD
| OSNetBSD
| OSKFreeBSD
| OSHaiku
| OSOsf3
| OSQNXNTO
| OSAndroid
deriving (Read, Show, Eq)
-- | ARM Instruction Set Architecture, Extensions and ABI
--
data ArmISA
= ARMv5
| ARMv6
| ARMv7
deriving (Read, Show, Eq)
data ArmISAExt
= VFPv2
| VFPv3
| VFPv3D16
| NEON
| IWMMX2
deriving (Read, Show, Eq)
data ArmABI
= SOFT
| SOFTFP
| HARD
deriving (Read, Show, Eq)
target32Bit :: Platform -> Bool
target32Bit p = platformWordSize p == 4
-- | This predicates tells us whether the OS supports ELF-like shared libraries.
osElfTarget :: OS -> Bool
osElfTarget OSLinux = True
osElfTarget OSFreeBSD = True
osElfTarget OSDragonFly = True
osElfTarget OSOpenBSD = True
osElfTarget OSNetBSD = True
osElfTarget OSSolaris2 = True
osElfTarget OSDarwin = False
osElfTarget OSiOS = False
osElfTarget OSMinGW32 = False
osElfTarget OSKFreeBSD = True
osElfTarget OSHaiku = True
osElfTarget OSOsf3 = False -- I don't know if this is right, but as
-- per comment below it's safe
osElfTarget OSQNXNTO = False
osElfTarget OSAndroid = True
osElfTarget OSUnknown = False
-- Defaulting to False is safe; it means don't rely on any
-- ELF-specific functionality. It is important to have a default for
-- portability, otherwise we have to answer this question for every
-- new platform we compile on (even unreg).
-- | This predicate tells us whether the OS support Mach-O shared libraries.
osMachOTarget :: OS -> Bool
osMachOTarget OSDarwin = True
osMachOTarget _ = False
osUsesFrameworks :: OS -> Bool
osUsesFrameworks OSDarwin = True
osUsesFrameworks OSiOS = True
osUsesFrameworks _ = False
platformUsesFrameworks :: Platform -> Bool
platformUsesFrameworks = osUsesFrameworks . platformOS
osBinariesAreStaticLibs :: OS -> Bool
osBinariesAreStaticLibs OSiOS = True
osBinariesAreStaticLibs _ = False
platformBinariesAreStaticLibs :: Platform -> Bool
platformBinariesAreStaticLibs = osBinariesAreStaticLibs . platformOS
|
jwiegley/ghc-release
|
compiler/utils/Platform.hs
|
gpl-3.0
| 4,167
| 0
| 9
| 1,300
| 690
| 407
| 283
| 108
| 1
|
module Models.Chapter where
import BasicPrelude ( Show
, Text
)
import Data.Aeson ( ToJSON )
import GHC.Generics ( Generic )
import Models.SubChapter
import Models.Section
data Chapter =
Chapter {
name :: Text,
number :: Text,
url :: Text,
content :: ChapterContent
} deriving (Generic, Show)
data ChapterContent = SimpleChapterContent [Section]
| ComplexChapterContent [SubChapter]
deriving (Generic, Show)
instance ToJSON ChapterContent
instance ToJSON Chapter
|
dogweather/nevada-revised-statutes-parser
|
src/Models/Chapter.hs
|
bsd-3-clause
| 740
| 0
| 8
| 339
| 136
| 80
| 56
| 19
| 0
|
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="ru-RU">
<title>Правила активного сканирования - Альфа | Расширение ZAP </title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>СОДЕРЖАНИЕ </label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Индекс</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Поиск</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Избранное</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
thc202/zap-extensions
|
addOns/ascanrulesAlpha/src/main/javahelp/org/zaproxy/zap/extension/ascanrulesAlpha/resources/help_ru_RU/helpset_ru_RU.hs
|
apache-2.0
| 1,077
| 78
| 67
| 164
| 552
| 276
| 276
| -1
| -1
|
module Vectorise.Utils.Base
( voidType
, newLocalVVar
, mkDataConTag, dataConTagZ
, mkWrapType
, mkClosureTypes
, mkPReprType
, mkPDataType, mkPDatasType
, splitPrimTyCon
, mkBuiltinCo
, wrapNewTypeBodyOfWrap
, unwrapNewTypeBodyOfWrap
, wrapNewTypeBodyOfPDataWrap
, unwrapNewTypeBodyOfPDataWrap
, wrapNewTypeBodyOfPDatasWrap
, unwrapNewTypeBodyOfPDatasWrap
, pdataReprTyCon
, pdataReprTyConExact
, pdatasReprTyConExact
, pdataUnwrapScrut
, preprSynTyCon
) where
import Vectorise.Monad
import Vectorise.Vect
import Vectorise.Builtins
import CoreSyn
import CoreUtils
import FamInstEnv
import Coercion
import Type
import TyCon
import DataCon
import MkId
import DynFlags
import FastString
#include "HsVersions.h"
-- Simple Types ---------------------------------------------------------------
voidType :: VM Type
voidType = mkBuiltinTyConApp voidTyCon []
-- Name Generation ------------------------------------------------------------
newLocalVVar :: FastString -> Type -> VM VVar
newLocalVVar fs vty
= do
lty <- mkPDataType vty
vv <- newLocalVar fs vty
lv <- newLocalVar fs lty
return (vv,lv)
-- Constructors ---------------------------------------------------------------
mkDataConTag :: DynFlags -> DataCon -> CoreExpr
mkDataConTag dflags = mkIntLitInt dflags . dataConTagZ
dataConTagZ :: DataCon -> Int
dataConTagZ con = dataConTag con - fIRST_TAG
-- Type Construction ----------------------------------------------------------
-- |Make an application of the 'Wrap' type constructor.
--
mkWrapType :: Type -> VM Type
mkWrapType ty = mkBuiltinTyConApp wrapTyCon [ty]
-- |Make an application of the closure type constructor.
--
mkClosureTypes :: [Type] -> Type -> VM Type
mkClosureTypes = mkBuiltinTyConApps closureTyCon
-- |Make an application of the 'PRepr' type constructor.
--
mkPReprType :: Type -> VM Type
mkPReprType ty = mkBuiltinTyConApp preprTyCon [ty]
-- | Make an appliction of the 'PData' tycon to some argument.
--
mkPDataType :: Type -> VM Type
mkPDataType ty = mkBuiltinTyConApp pdataTyCon [ty]
-- | Make an application of the 'PDatas' tycon to some argument.
--
mkPDatasType :: Type -> VM Type
mkPDatasType ty = mkBuiltinTyConApp pdatasTyCon [ty]
-- Make an application of a builtin type constructor to some arguments.
--
mkBuiltinTyConApp :: (Builtins -> TyCon) -> [Type] -> VM Type
mkBuiltinTyConApp get_tc tys
= do { tc <- builtin get_tc
; return $ mkTyConApp tc tys
}
-- Make a cascading application of a builtin type constructor.
--
mkBuiltinTyConApps :: (Builtins -> TyCon) -> [Type] -> Type -> VM Type
mkBuiltinTyConApps get_tc tys ty
= do { tc <- builtin get_tc
; return $ foldr (mk tc) ty tys
}
where
mk tc ty1 ty2 = mkTyConApp tc [ty1,ty2]
-- Type decomposition ---------------------------------------------------------
-- |Checks if a type constructor is defined in 'GHC.Prim' (e.g., 'Int#'); if so, returns it.
--
splitPrimTyCon :: Type -> Maybe TyCon
splitPrimTyCon ty
| Just (tycon, []) <- splitTyConApp_maybe ty
, isPrimTyCon tycon
= Just tycon
| otherwise = Nothing
-- Coercion Construction -----------------------------------------------------
-- |Make a representational coersion to some builtin type.
--
mkBuiltinCo :: (Builtins -> TyCon) -> VM Coercion
mkBuiltinCo get_tc
= do { tc <- builtin get_tc
; return $ mkTyConAppCo Representational tc []
}
-- Wrapping and unwrapping the 'Wrap' newtype ---------------------------------
-- |Apply the constructor wrapper of the 'Wrap' /newtype/.
--
wrapNewTypeBodyOfWrap :: CoreExpr -> Type -> VM CoreExpr
wrapNewTypeBodyOfWrap e ty
= do { wrap_tc <- builtin wrapTyCon
; return $ wrapNewTypeBody wrap_tc [ty] e
}
-- |Strip the constructor wrapper of the 'Wrap' /newtype/.
--
unwrapNewTypeBodyOfWrap :: CoreExpr -> Type -> VM CoreExpr
unwrapNewTypeBodyOfWrap e ty
= do { wrap_tc <- builtin wrapTyCon
; return $ unwrapNewTypeBody wrap_tc [ty] e
}
-- |Apply the constructor wrapper of the 'PData' /newtype/ instance of 'Wrap'.
--
wrapNewTypeBodyOfPDataWrap :: CoreExpr -> Type -> VM CoreExpr
wrapNewTypeBodyOfPDataWrap e ty
= do { wrap_tc <- builtin wrapTyCon
; pwrap_tc <- pdataReprTyConExact wrap_tc
; return $ wrapNewTypeBody pwrap_tc [ty] e
}
-- |Strip the constructor wrapper of the 'PData' /newtype/ instance of 'Wrap'.
--
unwrapNewTypeBodyOfPDataWrap :: CoreExpr -> Type -> VM CoreExpr
unwrapNewTypeBodyOfPDataWrap e ty
= do { wrap_tc <- builtin wrapTyCon
; pwrap_tc <- pdataReprTyConExact wrap_tc
; return $ unwrapNewTypeBody pwrap_tc [ty] (unwrapFamInstScrut pwrap_tc [ty] e)
}
-- |Apply the constructor wrapper of the 'PDatas' /newtype/ instance of 'Wrap'.
--
wrapNewTypeBodyOfPDatasWrap :: CoreExpr -> Type -> VM CoreExpr
wrapNewTypeBodyOfPDatasWrap e ty
= do { wrap_tc <- builtin wrapTyCon
; pwrap_tc <- pdatasReprTyConExact wrap_tc
; return $ wrapNewTypeBody pwrap_tc [ty] e
}
-- |Strip the constructor wrapper of the 'PDatas' /newtype/ instance of 'Wrap'.
--
unwrapNewTypeBodyOfPDatasWrap :: CoreExpr -> Type -> VM CoreExpr
unwrapNewTypeBodyOfPDatasWrap e ty
= do { wrap_tc <- builtin wrapTyCon
; pwrap_tc <- pdatasReprTyConExact wrap_tc
; return $ unwrapNewTypeBody pwrap_tc [ty] (unwrapFamInstScrut pwrap_tc [ty] e)
}
-- 'PData' representation types ----------------------------------------------
-- |Get the representation tycon of the 'PData' data family for a given type.
--
-- This tycon does not appear explicitly in the source program — see Note [PData TyCons] in
-- 'Vectorise.Generic.Description':
--
-- @pdataReprTyCon {Sum2} = {PDataSum2}@
--
-- The type for which we look up a 'PData' instance may be more specific than the type in the
-- instance declaration. In that case the second component of the result will be more specific than
-- a set of distinct type variables.
--
pdataReprTyCon :: Type -> VM (TyCon, [Type])
pdataReprTyCon ty
= do
{ FamInstMatch { fim_instance = famInst
, fim_tys = tys } <- builtin pdataTyCon >>= (`lookupFamInst` [ty])
; return (dataFamInstRepTyCon famInst, tys)
}
-- |Get the representation tycon of the 'PData' data family for a given type constructor.
--
-- For example, for a binary type constructor 'T', we determine the representation type constructor
-- for 'PData (T a b)'.
--
pdataReprTyConExact :: TyCon -> VM TyCon
pdataReprTyConExact tycon
= do { -- look up the representation tycon; if there is a match at all, it will be be exact
; -- (i.e.,' _tys' will be distinct type variables)
; (ptycon, _tys) <- pdataReprTyCon (tycon `mkTyConApp` mkTyVarTys (tyConTyVars tycon))
; return ptycon
}
-- |Get the representation tycon of the 'PDatas' data family for a given type constructor.
--
-- For example, for a binary type constructor 'T', we determine the representation type constructor
-- for 'PDatas (T a b)'.
--
pdatasReprTyConExact :: TyCon -> VM TyCon
pdatasReprTyConExact tycon
= do { -- look up the representation tycon; if there is a match at all, it will be be exact
; (FamInstMatch { fim_instance = ptycon }) <- pdatasReprTyCon (tycon `mkTyConApp` mkTyVarTys (tyConTyVars tycon))
; return $ dataFamInstRepTyCon ptycon
}
where
pdatasReprTyCon ty = builtin pdatasTyCon >>= (`lookupFamInst` [ty])
-- |Unwrap a 'PData' representation scrutinee.
--
pdataUnwrapScrut :: VExpr -> VM (CoreExpr, CoreExpr, DataCon)
pdataUnwrapScrut (ve, le)
= do { (tc, arg_tys) <- pdataReprTyCon ty
; let [dc] = tyConDataCons tc
; return (ve, unwrapFamInstScrut tc arg_tys le, dc)
}
where
ty = exprType ve
-- 'PRepr' representation types ----------------------------------------------
-- |Get the representation tycon of the 'PRepr' type family for a given type.
--
preprSynTyCon :: Type -> VM FamInstMatch
preprSynTyCon ty = builtin preprTyCon >>= (`lookupFamInst` [ty])
|
lukexi/ghc-7.8-arm64
|
compiler/vectorise/Vectorise/Utils/Base.hs
|
bsd-3-clause
| 8,046
| 3
| 13
| 1,545
| 1,547
| 840
| 707
| 129
| 1
|
-- A missing kind check made GHC 6.4 crash on this one
module ShoudlFail where
class Foo f where
baa :: f a -> f
instance Foo Maybe where
baa z = z
|
snoyberg/ghc
|
testsuite/tests/typecheck/should_fail/tcfail135.hs
|
bsd-3-clause
| 167
| 0
| 8
| 52
| 42
| 22
| 20
| 5
| 0
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TupleSections #-}
-- | Test suite for Stack.Dot
module Stack.DotSpec where
import Control.Monad (filterM)
import Data.Foldable as F
import Data.Functor.Identity
import Data.List ((\\))
import qualified Data.Map as Map
import Data.Maybe (fromMaybe)
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Text (Text)
import Stack.Types
import Test.Hspec
import Test.Hspec.QuickCheck (prop)
import Test.QuickCheck (forAll,choose,Gen)
import Stack.Dot
dummyVersion :: Version
dummyVersion = fromMaybe (error "dotspec: parser error") (parseVersionFromString "0.0.0.0")
spec :: Spec
spec = do
let graph =
Map.mapKeys pkgName
. fmap (\p -> (Set.map pkgName p, Just dummyVersion))
. Map.fromList $ [("one",Set.fromList ["base","free"])
,("two",Set.fromList ["base","free","mtl","transformers","one"])
]
describe "Stack.Dot" $ do
it "does nothing if depth is 0" $
resolveDependencies (Just 0) graph stubLoader `shouldBe` return graph
it "with depth 1, more dependencies are resolved" $ do
let graph' = Map.insert (pkgName "cycle")
(Set.singleton (pkgName "cycle"), Just dummyVersion)
graph
resultGraph = runIdentity (resolveDependencies (Just 0) graph stubLoader)
resultGraph' = runIdentity (resolveDependencies (Just 1) graph' stubLoader)
Map.size resultGraph < Map.size resultGraph' `shouldBe` True
it "cycles are ignored" $ do
let graph' = Map.insert (pkgName "cycle")
(Set.singleton (pkgName "cycle"), Just dummyVersion)
graph
resultGraph = resolveDependencies Nothing graph stubLoader
resultGraph' = resolveDependencies Nothing graph' stubLoader
fmap Map.size resultGraph' `shouldBe` fmap ((+1) . Map.size) resultGraph
let graphElem e = Set.member e . Set.unions . Map.elems
prop "requested packages are pruned" $ do
let resolvedGraph = runIdentity (resolveDependencies Nothing graph stubLoader)
allPackages g = Set.map show (Map.keysSet g `Set.union` F.fold (fmap fst g))
forAll (sublistOf (Set.toList (allPackages resolvedGraph))) $ \toPrune ->
let pruned = pruneGraph [pkgName "one", pkgName "two"] toPrune resolvedGraph
in Set.null (allPackages pruned `Set.intersection` Set.fromList toPrune)
prop "pruning removes orhpans" $ do
let resolvedGraph = runIdentity (resolveDependencies Nothing graph stubLoader)
allPackages g = Set.map show (Map.keysSet g `Set.union` F.fold (fmap fst g))
orphans g = Map.filterWithKey (\k _ -> not (graphElem k g)) g
forAll (sublistOf (Set.toList (allPackages resolvedGraph))) $ \toPrune ->
let pruned = pruneGraph [pkgName "one", pkgName "two"] toPrune resolvedGraph
in null (Map.keys (orphans (fmap fst pruned)) \\ [pkgName "one", pkgName "two"])
{- Helper functions below -}
-- Backport from QuickCheck 2.8 to 2.7.6
sublistOf :: [a] -> Gen [a]
sublistOf = filterM (\_ -> choose (False, True))
-- Unsafe internal helper to create a package name
pkgName :: Text -> PackageName
pkgName = fromMaybe failure . parsePackageName
where
failure = error "Internal error during package name creation in DotSpec.pkgName"
-- Stub, simulates the function to load package dependecies
stubLoader :: PackageName -> Identity (Set PackageName, Maybe Version)
stubLoader name = return . (, Just dummyVersion) . Set.fromList . map pkgName $ case show name of
"StateVar" -> ["stm","transformers"]
"array" -> []
"bifunctors" -> ["semigroupoids","semigroups","tagged"]
"binary" -> ["array","bytestring","containers"]
"bytestring" -> ["deepseq","ghc-prim","integer-gmp"]
"comonad" -> ["containers","contravariant","distributive"
,"semigroups","tagged","transformers","transformers-compat"
]
"cont" -> ["StateVar","semigroups","transformers","transformers-compat","void"]
"containers" -> ["array","deepseq","ghc-prim"]
"deepseq" -> ["array"]
"distributive" -> ["ghc-prim","tagged","transformers","transformers-compat"]
"free" -> ["bifunctors","comonad","distributive","mtl"
,"prelude-extras","profunctors","semigroupoids"
,"semigroups","template-haskell","transformers"
]
"ghc" -> []
"hashable" -> ["bytestring","ghc-prim","integer-gmp","text"]
"integer" -> []
"mtl" -> ["transformers"]
"nats" -> []
"one" -> ["free"]
"prelude" -> []
"profunctors" -> ["comonad","distributive","semigroupoids","tagged","transformers"]
"semigroupoids" -> ["comonad","containers","contravariant","distributive"
,"semigroups","transformers","transformers-compat"
]
"semigroups" -> ["bytestring","containers","deepseq","hashable"
,"nats","text","unordered-containers"
]
"stm" -> ["array"]
"tagged" -> ["template-haskell"]
"template" -> []
"text" -> ["array","binary","bytestring","deepseq","ghc-prim","integer-gmp"]
"transformers" -> []
"two" -> ["free","mtl","one","transformers"]
"unordered" -> ["deepseq","hashable"]
"void" -> ["ghc-prim","hashable","semigroups"]
_ -> []
|
phadej/stack
|
src/test/Stack/DotSpec.hs
|
bsd-3-clause
| 5,466
| 0
| 24
| 1,255
| 1,555
| 836
| 719
| 100
| 30
|
main = putStrLn (case False of
True -> "Hello!"
False -> "Ney!")
|
seereason/ghcjs
|
test/fay/case2.hs
|
mit
| 103
| 0
| 9
| 50
| 28
| 14
| 14
| 3
| 2
|
module TH_bracket2 where
d_show = [d| data A = A
instance Show A where
show _ = "A"
|]
|
green-haskell/ghc
|
testsuite/tests/th/TH_bracket2.hs
|
bsd-3-clause
| 129
| 0
| 4
| 61
| 13
| 10
| 3
| -1
| -1
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE TemplateHaskell #-}
module Futhark.CodeGen.Backends.COpenCL.Boilerplate
( generateBoilerplate,
profilingEvent,
copyDevToDev,
copyDevToHost,
copyHostToDev,
copyScalarToDev,
copyScalarFromDev,
commonOptions,
failureSwitch,
costCentreReport,
kernelRuntime,
kernelRuns,
sizeLoggingCode,
)
where
import Control.Monad.State
import qualified Data.Map as M
import Data.Maybe
import qualified Data.Text as T
import qualified Futhark.CodeGen.Backends.GenericC as GC
import Futhark.CodeGen.Backends.GenericC.Options
import Futhark.CodeGen.ImpCode.OpenCL
import Futhark.CodeGen.OpenCL.Heuristics
import Futhark.CodeGen.RTS.C (freeListH, openclH)
import Futhark.Util (chunk, zEncodeString)
import Futhark.Util.Pretty (prettyOneLine)
import qualified Language.C.Quote.OpenCL as C
import qualified Language.C.Syntax as C
errorMsgNumArgs :: ErrorMsg a -> Int
errorMsgNumArgs = length . errorMsgArgTypes
failureSwitch :: [FailureMsg] -> C.Stm
failureSwitch failures =
let printfEscape =
let escapeChar '%' = "%%"
escapeChar c = [c]
in concatMap escapeChar
onPart (ErrorString s) = printfEscape s
-- FIXME: bogus for non-ints.
onPart ErrorVal {} = "%lld"
onFailure i (FailureMsg emsg@(ErrorMsg parts) backtrace) =
let msg = concatMap onPart parts ++ "\n" ++ printfEscape backtrace
msgargs = [[C.cexp|args[$int:j]|] | j <- [0 .. errorMsgNumArgs emsg -1]]
in [C.cstm|case $int:i: {ctx->error = msgprintf($string:msg, $args:msgargs); break;}|]
failure_cases =
zipWith onFailure [(0 :: Int) ..] failures
in [C.cstm|switch (failure_idx) { $stms:failure_cases }|]
copyDevToDev, copyDevToHost, copyHostToDev, copyScalarToDev, copyScalarFromDev :: Name
copyDevToDev = "copy_dev_to_dev"
copyDevToHost = "copy_dev_to_host"
copyHostToDev = "copy_host_to_dev"
copyScalarToDev = "copy_scalar_to_dev"
copyScalarFromDev = "copy_scalar_from_dev"
profilingEvent :: Name -> C.Exp
profilingEvent name =
[C.cexp|(ctx->profiling_paused || !ctx->profiling) ? NULL
: opencl_get_event(&ctx->opencl,
&ctx->$id:(kernelRuns name),
&ctx->$id:(kernelRuntime name))|]
-- | Called after most code has been generated to generate the bulk of
-- the boilerplate.
generateBoilerplate ::
T.Text ->
T.Text ->
[Name] ->
M.Map KernelName KernelSafety ->
[PrimType] ->
M.Map Name SizeClass ->
[FailureMsg] ->
GC.CompilerM OpenCL () ()
generateBoilerplate opencl_code opencl_prelude cost_centres kernels types sizes failures = do
final_inits <- GC.contextFinalInits
let (ctx_opencl_fields, ctx_opencl_inits, top_decls, later_top_decls) =
openClDecls cost_centres kernels (opencl_prelude <> opencl_code)
mapM_ GC.earlyDecl top_decls
let size_name_inits = map (\k -> [C.cinit|$string:(pretty k)|]) $ M.keys sizes
size_var_inits = map (\k -> [C.cinit|$string:(zEncodeString (pretty k))|]) $ M.keys sizes
size_class_inits = map (\c -> [C.cinit|$string:(pretty c)|]) $ M.elems sizes
num_sizes = M.size sizes
GC.earlyDecl [C.cedecl|static const char *tuning_param_names[] = { $inits:size_name_inits };|]
GC.earlyDecl [C.cedecl|static const char *tuning_param_vars[] = { $inits:size_var_inits };|]
GC.earlyDecl [C.cedecl|static const char *tuning_param_classes[] = { $inits:size_class_inits };|]
let size_decls = map (\k -> [C.csdecl|typename int64_t *$id:k;|]) $ M.keys sizes
GC.earlyDecl [C.cedecl|struct tuning_params { $sdecls:size_decls };|]
cfg <- GC.publicDef "context_config" GC.InitDecl $ \s ->
( [C.cedecl|struct $id:s;|],
[C.cedecl|struct $id:s { int in_use;
struct opencl_config opencl;
typename int64_t tuning_params[$int:num_sizes];
int num_build_opts;
const char **build_opts;
};|]
)
let size_value_inits = zipWith sizeInit [0 .. M.size sizes -1] (M.elems sizes)
sizeInit i size = [C.cstm|cfg->tuning_params[$int:i] = $int:val;|]
where
val = fromMaybe 0 $ sizeDefault size
GC.publicDef_ "context_config_new" GC.InitDecl $ \s ->
( [C.cedecl|struct $id:cfg* $id:s(void);|],
[C.cedecl|struct $id:cfg* $id:s(void) {
struct $id:cfg *cfg = (struct $id:cfg*) malloc(sizeof(struct $id:cfg));
if (cfg == NULL) {
return NULL;
}
cfg->in_use = 0;
cfg->num_build_opts = 0;
cfg->build_opts = (const char**) malloc(sizeof(const char*));
cfg->build_opts[0] = NULL;
$stms:size_value_inits
opencl_config_init(&cfg->opencl, $int:num_sizes,
tuning_param_names, tuning_param_vars,
cfg->tuning_params, tuning_param_classes);
return cfg;
}|]
)
GC.publicDef_ "context_config_free" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg) {
assert(!cfg->in_use);
free(cfg->build_opts);
free(cfg);
}|]
)
GC.publicDef_ "context_config_add_build_option" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *opt);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *opt) {
cfg->build_opts[cfg->num_build_opts] = opt;
cfg->num_build_opts++;
cfg->build_opts = (const char**) realloc(cfg->build_opts, (cfg->num_build_opts+1) * sizeof(const char*));
cfg->build_opts[cfg->num_build_opts] = NULL;
}|]
)
GC.publicDef_ "context_config_set_debugging" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int flag);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int flag) {
cfg->opencl.profiling = cfg->opencl.logging = cfg->opencl.debugging = flag;
}|]
)
GC.publicDef_ "context_config_set_profiling" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int flag);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int flag) {
cfg->opencl.profiling = flag;
}|]
)
GC.publicDef_ "context_config_set_logging" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int flag);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int flag) {
cfg->opencl.logging = flag;
}|]
)
GC.publicDef_ "context_config_set_device" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *s);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *s) {
set_preferred_device(&cfg->opencl, s);
}|]
)
GC.publicDef_ "context_config_set_platform" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *s);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *s) {
set_preferred_platform(&cfg->opencl, s);
}|]
)
GC.publicDef_ "context_config_select_device_interactively" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg) {
select_device_interactively(&cfg->opencl);
}|]
)
GC.publicDef_ "context_config_list_devices" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg) {
(void)cfg;
list_devices();
}|]
)
GC.publicDef_ "context_config_dump_program_to" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path) {
cfg->opencl.dump_program_to = path;
}|]
)
GC.publicDef_ "context_config_load_program_from" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path) {
cfg->opencl.load_program_from = path;
}|]
)
GC.publicDef_ "context_config_dump_binary_to" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path) {
cfg->opencl.dump_binary_to = path;
}|]
)
GC.publicDef_ "context_config_load_binary_from" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, const char *path) {
cfg->opencl.load_binary_from = path;
}|]
)
GC.publicDef_ "context_config_set_default_group_size" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int size);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int size) {
cfg->opencl.default_group_size = size;
cfg->opencl.default_group_size_changed = 1;
}|]
)
GC.publicDef_ "context_config_set_default_num_groups" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int num);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int num) {
cfg->opencl.default_num_groups = num;
}|]
)
GC.publicDef_ "context_config_set_default_tile_size" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int num);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int size) {
cfg->opencl.default_tile_size = size;
cfg->opencl.default_tile_size_changed = 1;
}|]
)
GC.publicDef_ "context_config_set_default_reg_tile_size" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int num);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int size) {
cfg->opencl.default_reg_tile_size = size;
}|]
)
GC.publicDef_ "context_config_set_default_threshold" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:cfg* cfg, int num);|],
[C.cedecl|void $id:s(struct $id:cfg* cfg, int size) {
cfg->opencl.default_threshold = size;
}|]
)
GC.publicDef_ "context_config_set_tuning_param" GC.InitDecl $ \s ->
( [C.cedecl|int $id:s(struct $id:cfg* cfg, const char *param_name, size_t new_value);|],
[C.cedecl|int $id:s(struct $id:cfg* cfg, const char *param_name, size_t new_value) {
for (int i = 0; i < $int:num_sizes; i++) {
if (strcmp(param_name, tuning_param_names[i]) == 0) {
cfg->tuning_params[i] = new_value;
return 0;
}
}
if (strcmp(param_name, "default_group_size") == 0) {
cfg->opencl.default_group_size = new_value;
return 0;
}
if (strcmp(param_name, "default_num_groups") == 0) {
cfg->opencl.default_num_groups = new_value;
return 0;
}
if (strcmp(param_name, "default_threshold") == 0) {
cfg->opencl.default_threshold = new_value;
return 0;
}
if (strcmp(param_name, "default_tile_size") == 0) {
cfg->opencl.default_tile_size = new_value;
return 0;
}
if (strcmp(param_name, "default_reg_tile_size") == 0) {
cfg->opencl.default_reg_tile_size = new_value;
return 0;
}
return 1;
}|]
)
(fields, init_fields, free_fields) <- GC.contextContents
ctx <- GC.publicDef "context" GC.InitDecl $ \s ->
( [C.cedecl|struct $id:s;|],
[C.cedecl|struct $id:s {
struct $id:cfg* cfg;
int detail_memory;
int debugging;
int profiling;
int profiling_paused;
int logging;
typename lock_t lock;
char *error;
typename FILE *log;
$sdecls:fields
$sdecls:ctx_opencl_fields
typename cl_mem global_failure;
typename cl_mem global_failure_args;
struct opencl_context opencl;
struct tuning_params tuning_params;
// True if a potentially failing kernel has been enqueued.
typename cl_int failure_is_an_option;
};|]
)
mapM_ GC.earlyDecl later_top_decls
GC.earlyDecl
[C.cedecl|static void init_context_early(struct $id:cfg *cfg, struct $id:ctx* ctx) {
ctx->opencl.cfg = cfg->opencl;
ctx->detail_memory = cfg->opencl.debugging;
ctx->debugging = cfg->opencl.debugging;
ctx->profiling = cfg->opencl.profiling;
ctx->profiling_paused = 0;
ctx->logging = cfg->opencl.logging;
ctx->error = NULL;
ctx->log = stderr;
ctx->opencl.profiling_records_capacity = 200;
ctx->opencl.profiling_records_used = 0;
ctx->opencl.profiling_records =
malloc(ctx->opencl.profiling_records_capacity *
sizeof(struct profiling_record));
create_lock(&ctx->lock);
ctx->failure_is_an_option = 0;
$stms:init_fields
$stms:ctx_opencl_inits
}|]
let set_tuning_params =
zipWith
(\i k -> [C.cstm|ctx->tuning_params.$id:k = &cfg->tuning_params[$int:i];|])
[(0 :: Int) ..]
$ M.keys sizes
max_failure_args =
foldl max 0 $ map (errorMsgNumArgs . failureError) failures
GC.earlyDecl
[C.cedecl|static int init_context_late(struct $id:cfg *cfg, struct $id:ctx* ctx, typename cl_program prog) {
typename cl_int error;
typename cl_int no_error = -1;
ctx->global_failure =
clCreateBuffer(ctx->opencl.ctx,
CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
sizeof(cl_int), &no_error, &error);
OPENCL_SUCCEED_OR_RETURN(error);
// The +1 is to avoid zero-byte allocations.
ctx->global_failure_args =
clCreateBuffer(ctx->opencl.ctx,
CL_MEM_READ_WRITE,
sizeof(int64_t)*($int:max_failure_args+1), NULL, &error);
OPENCL_SUCCEED_OR_RETURN(error);
// Load all the kernels.
$stms:(map loadKernel (M.toList kernels))
$stms:final_inits
$stms:set_tuning_params
init_constants(ctx);
// Clear the free list of any deallocations that occurred while initialising constants.
OPENCL_SUCCEED_OR_RETURN(opencl_free_all(&ctx->opencl));
// The program will be properly freed after all the kernels have also been freed.
OPENCL_SUCCEED_OR_RETURN(clReleaseProgram(prog));
return futhark_context_sync(ctx);
}|]
let set_required_types =
[ [C.cstm|required_types |= OPENCL_F64; |]
| FloatType Float64 `elem` types
]
GC.publicDef_ "context_new" GC.InitDecl $ \s ->
( [C.cedecl|struct $id:ctx* $id:s(struct $id:cfg* cfg);|],
[C.cedecl|struct $id:ctx* $id:s(struct $id:cfg* cfg) {
assert(!cfg->in_use);
struct $id:ctx* ctx = (struct $id:ctx*) malloc(sizeof(struct $id:ctx));
if (ctx == NULL) {
return NULL;
}
ctx->cfg = cfg;
ctx->cfg->in_use = 1;
int required_types = 0;
$stms:set_required_types
init_context_early(cfg, ctx);
typename cl_program prog = setup_opencl(&ctx->opencl, opencl_program, required_types, cfg->build_opts);
init_context_late(cfg, ctx, prog);
return ctx;
}|]
)
GC.publicDef_ "context_new_with_command_queue" GC.InitDecl $ \s ->
( [C.cedecl|struct $id:ctx* $id:s(struct $id:cfg* cfg, typename cl_command_queue queue);|],
[C.cedecl|struct $id:ctx* $id:s(struct $id:cfg* cfg, typename cl_command_queue queue) {
assert(!cfg->in_use);
struct $id:ctx* ctx = (struct $id:ctx*) malloc(sizeof(struct $id:ctx));
if (ctx == NULL) {
return NULL;
}
ctx->cfg = cfg;
ctx->cfg->in_use = 1;
int required_types = 0;
$stms:set_required_types
init_context_early(cfg, ctx);
typename cl_program prog = setup_opencl_with_command_queue(&ctx->opencl, queue, opencl_program, required_types, cfg->build_opts);
init_context_late(cfg, ctx, prog);
return ctx;
}|]
)
GC.publicDef_ "context_free" GC.InitDecl $ \s ->
( [C.cedecl|void $id:s(struct $id:ctx* ctx);|],
[C.cedecl|void $id:s(struct $id:ctx* ctx) {
$stms:free_fields
free_constants(ctx);
free_lock(&ctx->lock);
$stms:(map releaseKernel (M.toList kernels))
OPENCL_SUCCEED_FATAL(clReleaseMemObject(ctx->global_failure));
OPENCL_SUCCEED_FATAL(clReleaseMemObject(ctx->global_failure_args));
teardown_opencl(&ctx->opencl);
ctx->cfg->in_use = 0;
free(ctx);
}|]
)
GC.publicDef_ "context_sync" GC.MiscDecl $ \s ->
( [C.cedecl|int $id:s(struct $id:ctx* ctx);|],
[C.cedecl|int $id:s(struct $id:ctx* ctx) {
// Check for any delayed error.
typename cl_int failure_idx = -1;
if (ctx->failure_is_an_option) {
OPENCL_SUCCEED_OR_RETURN(
clEnqueueReadBuffer(ctx->opencl.queue,
ctx->global_failure,
CL_FALSE,
0, sizeof(typename cl_int), &failure_idx,
0, NULL, $exp:(profilingEvent copyScalarFromDev)));
ctx->failure_is_an_option = 0;
}
OPENCL_SUCCEED_OR_RETURN(clFinish(ctx->opencl.queue));
if (failure_idx >= 0) {
// We have to clear global_failure so that the next entry point
// is not considered a failure from the start.
typename cl_int no_failure = -1;
OPENCL_SUCCEED_OR_RETURN(
clEnqueueWriteBuffer(ctx->opencl.queue, ctx->global_failure, CL_TRUE,
0, sizeof(cl_int), &no_failure,
0, NULL, NULL));
typename int64_t args[$int:max_failure_args+1];
OPENCL_SUCCEED_OR_RETURN(
clEnqueueReadBuffer(ctx->opencl.queue,
ctx->global_failure_args,
CL_TRUE,
0, sizeof(args), &args,
0, NULL, $exp:(profilingEvent copyDevToHost)));
$stm:(failureSwitch failures)
return FUTHARK_PROGRAM_ERROR;
}
return 0;
}|]
)
GC.publicDef_ "context_get_command_queue" GC.InitDecl $ \s ->
( [C.cedecl|typename cl_command_queue $id:s(struct $id:ctx* ctx);|],
[C.cedecl|typename cl_command_queue $id:s(struct $id:ctx* ctx) {
return ctx->opencl.queue;
}|]
)
GC.onClear
[C.citem|if (ctx->error == NULL) {
ctx->error = OPENCL_SUCCEED_NONFATAL(opencl_free_all(&ctx->opencl));
}|]
GC.profileReport [C.citem|OPENCL_SUCCEED_FATAL(opencl_tally_profiling_records(&ctx->opencl));|]
mapM_ GC.profileReport $
costCentreReport $
cost_centres ++ M.keys kernels
openClDecls ::
[Name] ->
M.Map KernelName KernelSafety ->
T.Text ->
([C.FieldGroup], [C.Stm], [C.Definition], [C.Definition])
openClDecls cost_centres kernels opencl_program =
(ctx_fields, ctx_inits, openCL_boilerplate, openCL_load)
where
opencl_program_fragments =
-- Some C compilers limit the size of literal strings, so
-- chunk the entire program into small bits here, and
-- concatenate it again at runtime.
[ [C.cinit|$string:s|]
| s <- chunk 2000 $ T.unpack opencl_program
]
ctx_fields =
[ [C.csdecl|int total_runs;|],
[C.csdecl|long int total_runtime;|]
]
++ [ [C.csdecl|typename cl_kernel $id:name;|]
| name <- M.keys kernels
]
++ concat
[ [ [C.csdecl|typename int64_t $id:(kernelRuntime name);|],
[C.csdecl|int $id:(kernelRuns name);|]
]
| name <- cost_centres ++ M.keys kernels
]
ctx_inits =
[ [C.cstm|ctx->total_runs = 0;|],
[C.cstm|ctx->total_runtime = 0;|]
]
++ concat
[ [ [C.cstm|ctx->$id:(kernelRuntime name) = 0;|],
[C.cstm|ctx->$id:(kernelRuns name) = 0;|]
]
| name <- cost_centres ++ M.keys kernels
]
openCL_load =
[ [C.cedecl|
void post_opencl_setup(struct opencl_context *ctx, struct opencl_device_option *option) {
$stms:(map sizeHeuristicsCode sizeHeuristicsTable)
}|]
]
program_fragments = opencl_program_fragments ++ [[C.cinit|NULL|]]
openCL_boilerplate =
[C.cunit|
$esc:("typedef cl_mem fl_mem_t;")
$esc:(T.unpack freeListH)
$esc:(T.unpack openclH)
static const char *opencl_program[] = {$inits:program_fragments};|]
loadKernel :: (KernelName, KernelSafety) -> C.Stm
loadKernel (name, safety) =
[C.cstm|{
ctx->$id:name = clCreateKernel(prog, $string:(pretty (C.toIdent name mempty)), &error);
OPENCL_SUCCEED_FATAL(error);
$items:set_args
if (ctx->debugging) {
fprintf(ctx->log, "Created kernel %s.\n", $string:(pretty name));
}
}|]
where
set_global_failure =
[C.citem|OPENCL_SUCCEED_FATAL(
clSetKernelArg(ctx->$id:name, 0, sizeof(typename cl_mem),
&ctx->global_failure));|]
set_global_failure_args =
[C.citem|OPENCL_SUCCEED_FATAL(
clSetKernelArg(ctx->$id:name, 2, sizeof(typename cl_mem),
&ctx->global_failure_args));|]
set_args = case safety of
SafetyNone -> []
SafetyCheap -> [set_global_failure]
SafetyFull -> [set_global_failure, set_global_failure_args]
releaseKernel :: (KernelName, KernelSafety) -> C.Stm
releaseKernel (name, _) = [C.cstm|OPENCL_SUCCEED_FATAL(clReleaseKernel(ctx->$id:name));|]
kernelRuntime :: KernelName -> Name
kernelRuntime = (<> "_total_runtime")
kernelRuns :: KernelName -> Name
kernelRuns = (<> "_runs")
costCentreReport :: [Name] -> [C.BlockItem]
costCentreReport names = report_kernels ++ [report_total]
where
longest_name = foldl max 0 $ map (length . pretty) names
report_kernels = concatMap reportKernel names
format_string name =
let padding = replicate (longest_name - length name) ' '
in unwords
[ name ++ padding,
"ran %5d times; avg: %8ldus; total: %8ldus\n"
]
reportKernel name =
let runs = kernelRuns name
total_runtime = kernelRuntime name
in [ [C.citem|
str_builder(&builder,
$string:(format_string (pretty name)),
ctx->$id:runs,
(long int) ctx->$id:total_runtime / (ctx->$id:runs != 0 ? ctx->$id:runs : 1),
(long int) ctx->$id:total_runtime);
|],
[C.citem|ctx->total_runtime += ctx->$id:total_runtime;|],
[C.citem|ctx->total_runs += ctx->$id:runs;|]
]
report_total =
[C.citem|
str_builder(&builder, "%d operations with cumulative runtime: %6ldus\n",
ctx->total_runs, ctx->total_runtime);
|]
sizeHeuristicsCode :: SizeHeuristic -> C.Stm
sizeHeuristicsCode (SizeHeuristic platform_name device_type which (TPrimExp what)) =
[C.cstm|
if ($exp:which' == 0 &&
strstr(option->platform_name, $string:platform_name) != NULL &&
(option->device_type & $exp:(clDeviceType device_type)) == $exp:(clDeviceType device_type)) {
$items:get_size
}|]
where
clDeviceType DeviceGPU = [C.cexp|CL_DEVICE_TYPE_GPU|]
clDeviceType DeviceCPU = [C.cexp|CL_DEVICE_TYPE_CPU|]
which' = case which of
LockstepWidth -> [C.cexp|ctx->lockstep_width|]
NumGroups -> [C.cexp|ctx->cfg.default_num_groups|]
GroupSize -> [C.cexp|ctx->cfg.default_group_size|]
TileSize -> [C.cexp|ctx->cfg.default_tile_size|]
RegTileSize -> [C.cexp|ctx->cfg.default_reg_tile_size|]
Threshold -> [C.cexp|ctx->cfg.default_threshold|]
get_size =
let (e, m) = runState (GC.compilePrimExp onLeaf what) mempty
in concat (M.elems m) ++ [[C.citem|$exp:which' = $exp:e;|]]
onLeaf (DeviceInfo s) = do
let s' = "CL_DEVICE_" ++ s
v = s ++ "_val"
m <- get
case M.lookup s m of
Nothing ->
-- XXX: Cheating with the type here; works for the infos we
-- currently use because we zero-initialise and assume a
-- little-endian platform, but should be made more
-- size-aware in the future.
modify $
M.insert
s'
[C.citems|size_t $id:v = 0;
clGetDeviceInfo(ctx->device, $id:s',
sizeof($id:v), &$id:v,
NULL);|]
Just _ -> return ()
return [C.cexp|$id:v|]
-- Output size information if logging is enabled.
--
-- The autotuner depends on the format of this output, so use caution if
-- changing it.
sizeLoggingCode :: VName -> Name -> C.Exp -> GC.CompilerM op () ()
sizeLoggingCode v key x' = do
GC.stm
[C.cstm|if (ctx->logging) {
fprintf(ctx->log, "Compared %s <= %ld: %s.\n", $string:(prettyOneLine key), (long)$exp:x', $id:v ? "true" : "false");
}|]
-- Options that are common to multiple GPU-like backends.
commonOptions :: [Option]
commonOptions =
[ Option
{ optionLongName = "device",
optionShortName = Just 'd',
optionArgument = RequiredArgument "NAME",
optionDescription = "Use the first OpenCL device whose name contains the given string.",
optionAction = [C.cstm|futhark_context_config_set_device(cfg, optarg);|]
},
Option
{ optionLongName = "default-group-size",
optionShortName = Nothing,
optionArgument = RequiredArgument "INT",
optionDescription = "The default size of OpenCL workgroups that are launched.",
optionAction = [C.cstm|futhark_context_config_set_default_group_size(cfg, atoi(optarg));|]
},
Option
{ optionLongName = "default-num-groups",
optionShortName = Nothing,
optionArgument = RequiredArgument "INT",
optionDescription = "The default number of OpenCL workgroups that are launched.",
optionAction = [C.cstm|futhark_context_config_set_default_num_groups(cfg, atoi(optarg));|]
},
Option
{ optionLongName = "default-tile-size",
optionShortName = Nothing,
optionArgument = RequiredArgument "INT",
optionDescription = "The default tile size used when performing two-dimensional tiling.",
optionAction = [C.cstm|futhark_context_config_set_default_tile_size(cfg, atoi(optarg));|]
},
Option
{ optionLongName = "default-reg-tile-size",
optionShortName = Nothing,
optionArgument = RequiredArgument "INT",
optionDescription = "The default register tile size used when performing two-dimensional tiling.",
optionAction = [C.cstm|futhark_context_config_set_default_reg_tile_size(cfg, atoi(optarg));|]
},
Option
{ optionLongName = "default-threshold",
optionShortName = Nothing,
optionArgument = RequiredArgument "INT",
optionDescription = "The default parallelism threshold.",
optionAction = [C.cstm|futhark_context_config_set_default_threshold(cfg, atoi(optarg));|]
}
]
|
diku-dk/futhark
|
src/Futhark/CodeGen/Backends/COpenCL/Boilerplate.hs
|
isc
| 30,745
| 0
| 18
| 10,553
| 3,521
| 2,099
| 1,422
| 320
| 8
|
{- contains the main gui functions
-}
module Language.Astview.Gui.Actions where
import Language.Astview.Gui.Types
import Language.Astview.Language
import Language.Astview.SmallestSrcLocContainingCursor
(smallestSrcLocContainingCursorPos)
import Language.Astview.DataTree(flatten)
import Prelude hiding (span,writeFile)
import Data.List (find)
import Control.Monad (when,unless,void,zipWithM_)
import Data.Char (toLower)
import System.IO (withFile,IOMode(..),hPutStr,hClose)
import System.FilePath (takeExtension,takeFileName)
import qualified Data.ByteString.Char8 as BS (hGetContents,unpack)
import Data.Tree ( Tree(Node) )
#if __GLASGOW_HASKELL__ < 710
import Control.Applicative((<$>))
#endif
import Graphics.UI.Gtk hiding (Language,response,bufferChanged)
import Graphics.UI.Gtk.SourceView
-- -------------------------------------------------------------------
-- * filemenu menu actions
-- -------------------------------------------------------------------
clearTreeView :: TreeView -> IO ()
clearTreeView t = do
c <- treeViewGetColumn t 0
case c of
Just col-> treeViewRemoveColumn t col
Nothing -> return 0
return ()
-- | resets the GUI,
actionEmptyGUI :: AstAction ()
actionEmptyGUI ref = do
g <- getGui ref
clearTreeView =<< getTreeView ref
flip textBufferSetText ("" :: String) =<< getSourceBuffer ref
windowSetTitleSuffix (window g) unsavedDoc
-- | updates the sourceview with a given file and parses the file
actionLoadHeadless :: FilePath -> AstAction ()
actionLoadHeadless file ref = do
setCurrentFile file ref
w <- getWindow ref
windowSetTitleSuffix w (takeFileName file)
buffer <- getSourceBuffer ref
textBufferSetText buffer =<< withFile file ReadMode (fmap BS.unpack . BS.hGetContents)
deleteStar ref
actionReparse ref
-- |tries to find a language based on the extension of
-- current file name
getLanguageByExtension :: AstAction (Maybe Language)
getLanguageByExtension ref = do
file <- getCurrentFile ref
languages <- getKnownLanguages ref
return $ find (elem (takeExtension file) . exts) languages
getLanguage :: AstAction (Maybe Language)
getLanguage ref = do
maybeLang <- getActiveLanguage ref
case maybeLang of
Nothing -> getLanguageByExtension ref
Just lang -> return $ Just lang
actionGetAst :: Language -> AstAction (Either Error Ast)
actionGetAst l ref = do
plain <- getText =<< getSourceBuffer ref
flattening <- getFlattenLists ref
return $ (if flattening then flatten else id) <$> parse l plain
-- | parses the contents of the sourceview with the selected language
actionParse :: Language -> AstAction (Tree String)
actionParse l ref = do
buffer <- getSourceBuffer ref
view <- getTreeView ref
sourceBufferSetHighlightSyntax buffer True
setupSyntaxHighlighting buffer l
tree <- buildTree <$> actionGetAst l ref
clearTreeView view
model <- treeStoreNew [tree]
treeViewSetModel view model
col <- treeViewColumnNew
renderer <- cellRendererTextNew
cellLayoutPackStart col renderer True
fontsize <- getFontsize ref
cellLayoutSetAttributes
col
renderer
model
(\row -> [ cellText := row
, cellTextSize := (fromInteger . toInteger) fontsize
] )
treeViewAppendColumn view col
return tree
-- |constructs the tree which will be presented by our gtk-treeview
buildTree :: Either Error Ast -> Tree String
buildTree (Left Err) = Node "Parse error" []
buildTree (Left (ErrMessage m)) = Node m []
buildTree (Left (ErrLocation pos m)) = Node ("Parse error at:"++show pos++": "++m) []
buildTree (Right t) = label <$> ast t
-- |uses the name of given language to establish syntax highlighting in
-- source buffer
setupSyntaxHighlighting :: SourceBuffer -> Language -> IO ()
setupSyntaxHighlighting buffer language = do
langManager <- sourceLanguageManagerGetDefault
maybeLang <- sourceLanguageManagerGetLanguage
langManager
(map toLower $ syntax language)
case maybeLang of
Just lang -> do
sourceBufferSetHighlightSyntax buffer True
sourceBufferSetLanguage buffer (Just lang)
Nothing -> sourceBufferSetHighlightSyntax buffer False
-- |saves current file if a file is active or calls "save as"-dialog
actionSave :: AstAction ()
actionSave ref = do
file <- getCurrentFile ref
text <- getText =<< getSourceBuffer ref
case file of
"Unsaved document" -> actionDlgSave ref
_ -> do
deleteStar ref
writeFile file text
-- |sets up a simple filechooser dialog, whose response to Ok
-- is given by argument function
actionMkDialog :: FileChooserAction -> (FileChooserDialog -> t -> IO ()) -> t -> IO()
actionMkDialog fileChooser actionOnOkay ref = do
dia <- fileChooserDialogNew
(Just ("astview" :: String))
Nothing
fileChooser
[]
zipWithM_ (dialogAddButton dia) [stockCancel ,stockOpen]
[ResponseCancel,ResponseOk]
widgetShowAll dia
response <- dialogRun dia
case response of
ResponseCancel -> return ()
ResponseOk -> actionOnOkay dia ref
_ -> return ()
widgetHide dia
-- |lanches the "save as"-dialog
actionSaveAs :: AstAction ()
actionSaveAs = actionMkDialog FileChooserActionSave onOkay where
onOkay dia ref = do
maybeFile <- fileChooserGetFilename dia
case maybeFile of
Nothing-> return ()
Just file -> do
setCurrentFile file ref
writeFile file =<< getText =<< getSourceBuffer ref
-- |removes @*@ from window title if existing and updates state
deleteStar :: AstAction ()
deleteStar ref = do
w <- getWindow ref
(t :: String) <- get w windowTitle
bufferChanged <- getChanged ref
when bufferChanged $ set w [windowTitle := tail t]
setChanged False ref
-- -------------------------------------------------------------------
-- ** editmenu menu actions
-- -------------------------------------------------------------------
-- |moves selected source to clipboard (cut)
actionCutSource :: AstAction ()
actionCutSource ref = do
actionCopySource ref
actionDeleteSource ref
return ()
-- |copies selected source to clipboard
actionCopySource :: AstAction ()
actionCopySource ref = do
buffer <- getSourceBuffer ref
(start,end) <- textBufferGetSelectionBounds buffer
clipBoard <- clipboardGet selectionClipboard
s :: String <- textBufferGetText buffer start end True
clipboardSetText clipBoard s
-- |pastes text from clipboard at current cursor position
actionPasteSource :: AstAction ()
actionPasteSource ref = do
buffer <- getSourceBuffer ref
clipBoard <- clipboardGet selectionClipboard
clipboardRequestText clipBoard (insertAt buffer) where
insertAt :: SourceBuffer -> Maybe String -> IO ()
insertAt buff m = whenJust m (textBufferInsertAtCursor buff)
-- |deletes selected source
actionDeleteSource :: AstAction ()
actionDeleteSource ref = void $ do
buffer <- getSourceBuffer ref
textBufferDeleteSelection buffer False False
-- |launches a dialog which displays the text position associated to
-- last clicked tree node.
actionJumpToTextLoc :: AstAction ()
actionJumpToTextLoc ref = do
maybeLang <- getLanguage ref
case maybeLang of
Nothing -> return ()
Just lang -> do
astOrError <- actionGetAst lang ref
case astOrError of
Left _ -> return ()
Right (Ast ast) -> do
gtkPath <- getPath ref
unless (null gtkPath) $ do
let astPath = tail gtkPath
loc = ast `at` astPath
case loc of
Nothing -> return ()
Just l -> actionSelectSrcLoc l ref
-- |selects the given source location in gui textview
actionSelectSrcLoc :: SrcSpan -> AstAction ()
actionSelectSrcLoc (SrcSpan (SrcPos bl br) (SrcPos el er)) ref = do
textBuffer <- getSourceBuffer ref
let getIter line row = textBufferGetIterAtLineOffset textBuffer (line-1) (0 `max` row-1)
-- we need to subtract 1 since lines and offsets start with 0
begin <- getIter bl br
end <- getIter el er
textBufferSelectRange textBuffer begin end
at :: Tree AstNode -> Path -> Maybe SrcSpan
at (Node n _ ) [] = srcspan n
at (Node _ cs) (i:is) = get i cs >>= \tree -> tree `at` is where
get :: Int -> [a] -> Maybe a
get _ [] = Nothing
get n (x:xs)
| n < 0 = Nothing
| n > 0 = get (n-1) xs
| otherwise = Just x
-- |returns the current cursor position in a source view.
-- return type: (line,row)
getCursorPosition :: AstAction SrcSpan
getCursorPosition ref = do
(startIter,endIter) <- textBufferGetSelectionBounds =<< getSourceBuffer ref
lineStart <- textIterGetLine startIter
rowStart <- textIterGetLineOffset startIter
lineEnd <- textIterGetLine endIter
rowEnd <- textIterGetLineOffset endIter
return $ span (lineStart+1) (rowStart+1) (lineEnd+1) (rowEnd+1)
-- |opens tree position associated with current cursor position.
actionJumpToSrcLoc :: AstAction ()
actionJumpToSrcLoc ref = do
treePath <- actionGetAssociatedPath ref
case treePath of
Just p -> activatePath p ref
Nothing -> return ()
-- |returns the shortest path in tree which is associated with the
-- current selected source location.
actionGetAssociatedPath :: AstAction (Maybe Path)
actionGetAssociatedPath ref = do
sele <- getCursorPosition ref
maybeLang <- getLanguage ref
case maybeLang of
Nothing -> return Nothing
Just lang -> do
astOrError <- actionGetAst lang ref
case astOrError of
Left _ -> return Nothing
Right ast ->
return $ smallestSrcLocContainingCursorPos sele ast
-- |select tree path
activatePath :: Path -> AstAction ()
activatePath p ref = do
view <- getTreeView ref
treeViewExpandToPath view p
treeViewExpandRow view p True
treeViewSetCursor view p Nothing
-- -------------------------------------------------------------------
-- ** other actions
-- -------------------------------------------------------------------
-- | adds '*' to window title if file changed and sets state
actionBufferChanged :: AstAction ()
actionBufferChanged ref = do
w <- fmap window (getGui ref)
t <- get w windowTitle
c <- getChanged ref
unless c $ set w [windowTitle := '*':t]
setChanged True ref
-- | destroys window widget
actionQuit :: AstAction ()
actionQuit ref = do
isChanged <- getChanged ref
when isChanged $ actionQuitWorker ref
actionQuitForce ref
-- |ends program with force
actionQuitForce :: AstAction ()
actionQuitForce ref = do
widgetDestroy =<< fmap window (getGui ref)
actionQuitWorker :: AstAction ()
actionQuitWorker ref = do
file <- getCurrentFile ref
dialog <- messageDialogNew Nothing [] MessageQuestion ButtonsYesNo
("Save changes to document \""++takeFileName file ++ "\" before closing?")
containerSetBorderWidth dialog 2
widgetShowAll dialog
response <- dialogRun dialog
case response of
ResponseYes -> actionSave ref
_ -> actionQuitForce ref
widgetHide dialog
-- | launches open dialog
actionDlgOpen :: AstAction ()
actionDlgOpen = actionMkDialog FileChooserActionOpen onOkay where
onOkay dia ref = whenJustM (fileChooserGetFilename dia) $ \file ->
actionLoadHeadless file ref
-- | launches save dialog
actionDlgSave :: AstAction ()
actionDlgSave = actionMkDialog FileChooserActionSave onOkay where
onOkay dia ref = do
maybeFile <- fileChooserGetFilename dia
case maybeFile of
Nothing-> return ()
Just file -> do
g <- getGui ref
setChanged False ref
setCurrentFile file ref
writeFile file =<< getText =<< getSourceBuffer ref
set (window g) [windowTitle := takeFileName file]
-- |applies current parser to sourcebuffer
actionReparse :: AstAction ()
actionReparse ref =
whenJustM (getLanguage ref) $ \l -> void $ actionParse l ref
actionGetPath :: AstAction Path
actionGetPath ref = do
rows <- treeSelectionGetSelectedRows =<< treeViewGetSelection =<< getTreeView ref
return $ case rows of
[] -> []
(p:_) -> p
-- -------------------------------------------------------------------
-- ** Helpers
-- -------------------------------------------------------------------
-- |similar to @when@
whenJust :: Monad m => Maybe a -> (a -> m ()) -> m ()
whenJust Nothing _ = return ()
whenJust (Just x) action = action x
-- |similar to @whenJust@, but value is inside a monad
whenJustM :: Monad m => m(Maybe a) -> (a -> m ()) -> m ()
whenJustM val action = do
m <- val
whenJust m action
-- |returns the text in given text buffer
getText :: TextBufferClass c => c -> IO String
getText tb = do
start <- textBufferGetStartIter tb
end <- textBufferGetEndIter tb
textBufferGetText tb start end True
-- |uses the given string to set the title of given window with
-- suffix "-astview". Window titles should only be set by this
-- function, hence it replaces the corresponding gtk function.
windowSetTitleSuffix :: WindowClass w => w -> String -> IO ()
windowSetTitleSuffix win title = set win [windowTitle := title++" - astview" ]
-- |safe function to write files
writeFile :: FilePath -> String -> IO ()
writeFile f str = withFile f WriteMode (\h -> hPutStr h str >> hClose h)
|
jokusi/Astview
|
src/gui/Language/Astview/Gui/Actions.hs
|
mit
| 13,195
| 0
| 24
| 2,654
| 3,618
| 1,727
| 1,891
| -1
| -1
|
module P012Spec where
import qualified P012 as P
import Test.Hspec
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
describe "triangles" $
it "三角数の数列" $
take 10 P.triangles `shouldBe` [1,3,6,10,15,21,28,36,45,55]
describe "solveBasic" $
it "約数の個数がN個を超える最初の三角数" $
map P.solveBasic [0,1,2,5] `shouldBe` [1,3,6,28]
|
yyotti/euler_haskell
|
test/P012Spec.hs
|
mit
| 392
| 0
| 10
| 74
| 157
| 90
| 67
| 13
| 1
|
module KMC.Config where
import KMC.Types
import qualified Data.Vector as V
import qualified KMC.Lattice as KL
import qualified Data.HashMap.Strict as Map
import qualified Data.Heap as H
-- xdim = 300
-- ydim = 300
tEnd = 20.0
tIncrement = 1.0
--pressureA = 0.55
emptyState e = State (Site 0) e Empty 0
occState e s = State (Site 0) e s 0
co = Occupied 0
o = Occupied 1
vac = Empty
wild = Wild
twoLinkedPoints = V.fromList [[1],[0]]
lattice xdim ydim = Lattice
(KL.simpleCubicGraph xdim ydim)
(V.generate (xdim*ydim) emptyState)
(V.replicate (xdim*ydim) 0.0)
(KL.simpleCubic xdim ydim)
rData pA xdim ydim = ReactionData
(V.fromList [o_ads pA, co_ads pA, o_co])
(Map.empty)
(V.empty)
(V.replicate (xdim*ydim) [])
(H.empty)
([])
(V.replicate (xdim*ydim) V.empty)
o_ads pA = Reaction
(twoLinkedPoints)
(twoLinkedPoints)
(V.fromList [occState 0 vac, occState 1 vac])
(V.fromList [occState 0 o, occState 1 o])
(V.replicate 2 1)
((1.0 - pA)*10/8)
([])
co_ads pA = Reaction
(V.replicate 1 [])
(V.replicate 1 [])
(V.singleton (occState 0 vac))
(V.singleton (occState 0 co))
(V.singleton 0)
(10 * pA)
([])
o_co = Reaction
(twoLinkedPoints)
(twoLinkedPoints)
(V.fromList [occState 0 o, occState 1 co])
(V.fromList [occState 0 vac, occState 1 vac])
(V.replicate 2 1)
(0.125 * 10.0^5)
([])
|
NaevaTheCat/KMC-haskell
|
src/KMC/Config.hs
|
mit
| 1,675
| 0
| 10
| 593
| 643
| 344
| 299
| 52
| 1
|
main :: IO ()
main = do
putStrLn $ show $ smallMult 2520 --proven smallMult for [1..10]
smallMult :: Int -> Int
smallMult x = go x 20
where go x 11 = x
go x y
| x `mod` y == 0 = go x (y-1)
| otherwise = smallMult (x+2)
|
mre/the-coding-interview
|
problems/euler/5/nodivremainder.hs
|
mit
| 258
| 0
| 11
| 91
| 122
| 60
| 62
| 9
| 2
|
module ReverseCompliment
(reverseCompliment)
where
reverseCompliment :: String -> String
reverseCompliment ls =
reverse (map (\l ->
case l of
'A' -> 'T'
'T' -> 'A'
'C' -> 'G'
'G' -> 'C'
_ -> l) ls)
|
brodyberg/LearnHaskell
|
Web/sqlitetest/Rosalind/3_ComplimentingDNA/ReverseCompliment.hs
|
mit
| 263
| 0
| 12
| 100
| 79
| 42
| 37
| 11
| 5
|
module Channel.BitcoinSpec (main, spec) where
import Helper
import System.Random
import qualified Data.ByteString.Lazy as L
import Channel.Command
import Channel.Event
import Channel.Bitcoin
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
describe "bitcoin" $ do
it "makes Alonzo convert from and to BTC" $ do
run "alonzo" [] mockedHttp (mkStdGen 0) (bitcoin $ Message "you" "alonzo: 1 BTC") `shouldReturn` [Say "1 BTC is 100.20 EUR (at rate 100.20)"]
where
mockedHttp "http://data.mtgox.com/api/1/BTCEUR/ticker" = L.readFile "test/fixtures/bitcoin.json"
mockedHttp url = error ("unexpected request to " ++ url)
|
beni55/alonzo
|
test/Channel/BitcoinSpec.hs
|
mit
| 700
| 0
| 17
| 168
| 173
| 92
| 81
| 16
| 2
|
module Cabalg.Version where
import Data.Version (showVersion)
import Paths_cabalg (version)
version :: String
version = showVersion Paths_cabalg.version
|
dmalikov/cabalg
|
src/Cabalg/Version.hs
|
mit
| 175
| 0
| 6
| 38
| 39
| 23
| 16
| 5
| 1
|
{-# LANGUAGE BangPatterns #-}
-- | A reimplementation of Data.IntMap in terms of Data.WordMap.
module Data.IntMap.Bounded (
module Data.IntMap.Bounded.Lazy
, insertWith'
, insertWithKey'
, fold
, foldWithKey
) where
import Prelude hiding (foldr)
import Data.IntMap.Bounded.Lazy
import qualified Data.IntMap.Bounded.Strict as S
-- | /Deprecated./ As of version 0.5, replaced by
-- 'Data.IntMap.Strict.insertWith'.
--
-- /O(log n)/. Same as 'insertWith', but the result of the combining function
-- is evaluated to WHNF before inserted to the map.
{-# INLINE insertWith' #-}
insertWith' :: (a -> a -> a) -> Key -> a -> IntMap a -> IntMap a
insertWith' = S.insertWith
-- | /Deprecated./ As of version 0.5, replaced by
-- 'Data.IntMap.Strict.insertWithKey'.
--
-- /O(log n)/. Same as 'insertWithKey', but the result of the combining
-- function is evaluated to WHNF before inserted to the map.
{-# INLINE insertWithKey' #-}
insertWithKey' :: (Key -> a -> a -> a) -> Key -> a -> IntMap a -> IntMap a
insertWithKey' = S.insertWithKey
-- | /Deprecated./ As of version 0.5, replaced by 'foldr'.
--
-- /O(n)/. Fold the values in the map using the given
-- right-associative binary operator. This function is an equivalent
-- of 'foldr' and is present for compatibility only.
{-# INLINE fold #-}
fold :: (a -> b -> b) -> b -> IntMap a -> b
fold = foldr
-- | /Deprecated./ As of version 0.5, replaced by 'foldrWithKey'.
--
-- /O(n)/. Fold the keys and values in the map using the given
-- right-associative binary operator. This function is an equivalent
-- of 'foldrWithKey' and is present for compatibility only.
{-# INLINE foldWithKey #-}
foldWithKey :: (Key -> a -> b -> b) -> b -> IntMap a -> b
foldWithKey = foldrWithKey
|
gereeter/bounded-intmap
|
src/Data/IntMap/Bounded.hs
|
mit
| 1,745
| 0
| 9
| 315
| 251
| 153
| 98
| 22
| 1
|
{-# LANGUAGE CPP #-}
module GHCJS.DOM.SQLTransactionErrorCallback (
#if (defined(ghcjs_HOST_OS) && defined(USE_JAVASCRIPTFFI)) || !defined(USE_WEBKIT)
module GHCJS.DOM.JSFFI.Generated.SQLTransactionErrorCallback
#else
#endif
) where
#if (defined(ghcjs_HOST_OS) && defined(USE_JAVASCRIPTFFI)) || !defined(USE_WEBKIT)
import GHCJS.DOM.JSFFI.Generated.SQLTransactionErrorCallback
#else
#endif
|
plow-technologies/ghcjs-dom
|
src/GHCJS/DOM/SQLTransactionErrorCallback.hs
|
mit
| 394
| 0
| 5
| 33
| 33
| 26
| 7
| 4
| 0
|
import System.Exit
import System.Environment
import System.IO
import System.Console.GetOpt
import System.Directory
import System.Posix.Files (getFileStatus, isDirectory, fileExist)
import Control.Monad (when, sequence)
import qualified Control.Exception as Ex
import Data.Char (isSpace)
import Data.List (isSuffixOf)
import Text.VCard
import Text.VCard.Format.Directory
import qualified Data.ByteString.Lazy as B
import qualified Data.ByteString.Lazy.Char8 as CB
usageHeader = "Usage: pplqq [OPTIONS] DIRECTORY PATTERN"
usageString = usageInfo usageHeader options
printUsage = putStrLn usageString >> exitFailure
printErrorMsg msg = putStrLn msg >> exitFailure
data Flag = Name
options :: [OptDescr Flag]
options = [ Option ['n'] ["name"] (NoArg Name) "searches the name" ]
trim :: String -> String
trim = f . f
where f = reverse . dropWhile isSpace
fixDirName dir = case reverse trimmed of
'/':_ -> trimmed
_ -> trimmed ++ "/"
where trimmed = trim dir
getArgsAndOpts :: [String] -> IO( String, String, [Flag] )
getArgsAndOpts argv =
case getOpt RequireOrder options argv of
( o, [d,p], [] ) -> return (d,fixDirName p,o)
_ -> printUsage
checkDir :: String -> IO()
checkDir dir = do
exists <- fileExist dir
when (not exists) $ printErrorMsg (dir ++ " does not exist")
isDir <- fmap isDirectory $ getFileStatus dir
when (not isDir) $ printErrorMsg ( dir ++ " is not a direcoty" )
toVCards_unsave :: String -> IO( [VCard] )
toVCards_unsave filename = do
handle <- openFile filename ReadMode
input <- B.hGetContents handle
return $! readVCards filename (insertCarriageReturns input)
isVCard :: String -> Bool
isVCard = isSuffixOf ".vcf"
toVCards :: String -> IO( [VCard] )
toVCards path = Ex.catch ( toVCards_unsave path ) handler
where handler = (\e -> ( print ("Error in file " ++ path ++ (show e) ) >> (return [] ) ) )::( Ex.SomeException -> IO [VCard] )
allVCards :: String -> IO( [VCard] )
allVCards directory = do
_ <- checkDir directory
fileNames <- getDirectoryContents directory
let fullFilePaths = map (directory ++) fileNames
listOfLists <- sequence $ map toVCards ( filter isVCard fullFilePaths )
return $ concat listOfLists
toLines = CB.lines . (CB.filter (/= '\r') )
nl = CB.pack "\r\n"
insertCarriageReturns :: B.ByteString -> B.ByteString
insertCarriageReturns = ( CB.intercalate nl ) . toLines
main :: IO ()
main = do
( dir, pat, opts ) <- getArgs >>= getArgsAndOpts
cards <- allVCards dir
_ <- ( putStrLn . show . length ) $ cards
exitSuccess
|
axelGschaider/pplqq
|
pplqq.hs
|
mit
| 2,735
| 0
| 15
| 656
| 896
| 471
| 425
| 66
| 2
|
{-# LANGUAGE FlexibleContexts, NoMonomorphismRestriction #-}
{-# LANGUAGE TypeOperators, DataKinds #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
module Control.Eff.Writer.Strict.Test (testGroups) where
import Test.HUnit hiding (State)
import Control.Eff
import Control.Eff.Writer.Strict
import Utils
import Test.Framework.TH
import Test.Framework.Providers.HUnit
testGroups = [ $(testGroupGenerator) ]
case_Strict1_Writer_runLastWriter :: Assertion
case_Strict1_Writer_runLastWriter = let
((), Just m) = run $ runLastWriter $ mapM_ tell [undefined, ()]
in
assertUndefined (m :: ())
case_Strict1_Writer_monadBaseControl :: Assertion
case_Strict1_Writer_monadBaseControl = runLift (runListWriter act) @=? Just ((), [i])
where
i = 10 :: Int
act = doThing (tell i)
|
suhailshergill/extensible-effects
|
test/Control/Eff/Writer/Strict/Test.hs
|
mit
| 812
| 0
| 12
| 110
| 195
| 114
| 81
| 20
| 1
|
{-# LANGUAGE TypeSynonymInstances #-}
module Main (main) where
import BenchCommon (Map(..), commonMain)
import qualified Data.ByteString.Char8 as C
import Data.Maybe (fromJust)
import qualified Data.Trie as T
import Prelude hiding (lookup)
type DataTrie = T.Trie Int
instance Map DataTrie where
empty = T.empty
null = T.null
insert = T.insert
delete = T.delete
lookup k = fromJust . T.lookup k
keys = T.keys
prefixes k m = prefixes' k m []
where
prefixes' k m a
| C.null k = a
| T.member k m = prefixes' (C.take (C.length k - 1) k) (T.submap k m) (k:a)
| otherwise = prefixes' (C.take (C.length k - 1) k) m a
main :: IO ()
main = commonMain (empty :: DataTrie)
|
cpettitt/haskell-ptree
|
test/perf/BenchTrie.hs
|
mit
| 765
| 0
| 15
| 220
| 300
| 161
| 139
| 22
| 1
|
-- C->Haskell Compiler: main module
--
-- Copyright (c) [1999..2005] Manuel M T Chakravarty
--
-- This file is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation; either version 2 of the License, or
-- (at your option) any later version.
--
-- This file is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
--- Description ---------------------------------------------------------------
--
-- Language: Haskell 98
--
-- This is the main module of the compiler. It sets the version, processes
-- the command line arguments, and controls the compilation process.
--
-- Usage:
-- ------
--
-- c2hs [ option... ] [header-file] binding-file
--
-- The compiler is supposed to emit a Haskell program that expands all hooks
-- in the given binding file.
--
-- File name suffix:
-- -----------------
--
-- Note: These also depend on suffixes defined in the compiler proper.
--
-- .h C header file
-- .i pre-processeed C header file
-- .hs Haskell file
-- .chs Haskell file with C->Haskell hooks (binding file)
-- .chi C->Haskell interface file
--
-- Options:
-- --------
--
-- -C CPPOPTS
-- --cppopts=CPPOPTS
-- Pass the additional options CPPOPTS to the C preprocessor.
--
-- Repeated occurences accumulate.
--
-- -c CPP
-- --cpp=CPP
-- Use the executable CPP to invoke CPP.
--
-- In the case of repeated occurences, the last takes effect.
--
-- -d TYPE
-- --dump=TYPE
-- Dump intermediate representation:
--
-- + if TYPE is `trace', trace the compiler phases (to stderr)
-- + if TYPE is `genbind', trace binding generation (to stderr)
-- + if TYPE is `ctrav', trace C declaration traversal (to stderr)
-- + if TYPE is `chs', dump the binding file (insert `.dump' into the
-- file name to avoid overwriting the original file)
--
-- -h, -?
-- --help
-- Dump brief usage information to stderr.
--
-- -i DIRS
-- --include=DIRS
-- Search the colon separated list of directories DIRS when searching
-- for .chi files.
--
-- -k
-- --keep
-- Keep the intermediate file that contains the pre-processed C header
-- (it carries the suffix `.i').
--
-- -l
-- --copy-library
-- Copies the library module `C2HS' into the same directory where the
-- generated code from the binding file is placed.
--
-- -o FILE
-- --output=FILE
-- Place output in file FILE.
--
-- If `-o' is not specified, the default is to put the output for
-- `source.chs' in `source.hs' in the same directory that contains the
-- binding file. If specified, the emitted C header file is put into
-- the same directory as the output file. The same holds for
-- C->Haskell interface file. All generated files also share the
-- basename.
--
-- -p PLATFORM
-- --platform=PLATFORM
-- Generate output for the given PLATFORM. By default we generate
-- output for the platform that c2hs executes on.
--
-- -t PATH
-- --output-dir=PATH
-- Place generated files in the directory PATH.
--
-- If this option as well as the `-o' option is given, the basename of
-- the file specified with `-o' is put in the directory specified with
-- `-t'.
--
-- -v,
-- --version
-- Print (on standard output) the version and copyright
-- information of the compiler (before doing anything else).
--
--- TODO ----------------------------------------------------------------------
--
module Main (main)
where
-- standard libraries
import Data.List (intersperse, partition)
import Control.Monad (when, unless)
import Data.Version (showVersion)
import System.Console.GetOpt
(ArgOrder(..), OptDescr(..), ArgDescr(..), usageInfo, getOpt)
import qualified System.FilePath as FilePath
(takeExtension, dropExtension, takeBaseName)
import System.FilePath ((<.>), (</>))
import System.IO (stderr, openFile, IOMode(..))
import System.IO.Error (ioeGetErrorString, ioeGetFileName)
import System.Process (runProcess, waitForProcess)
-- c2hs modules
import C2HS.State (CST, runC2HS, fatal, fatalsHandledBy,
SwitchBoard(..), Traces(..), setTraces,
traceSet, setSwitch, getSwitch, putTraceStr)
import qualified System.CIO as CIO
import C2HS.C (hsuffix, isuffix, loadAttrC)
import C2HS.CHS (loadCHS, dumpCHS, hssuffix, chssuffix, dumpCHI)
import C2HS.Gen.Header (genHeader)
import C2HS.Gen.Bind (expandHooks)
import C2HS.Version (versnum, version, copyright, disclaimer)
import C2HS.Config (cppopts, libfname, PlatformSpec(..),
defaultPlatformSpec, platformSpecDB)
import qualified C2HS.Config as CConf
import Paths_c2hs (getDataDir)
-- | wrapper running the compiler
--
main :: IO ()
main = runC2HS compile
-- option handling
-- ===============
-- | header is output in case of help, before the descriptions of the options;
-- errTrailer is output after an error message
--
header :: String
header =
version ++ "\n" ++ copyright ++ "\n" ++ disclaimer
++ "\n\nUsage: c2hs [ option... ] [header-file] binding-file\n"
trailer, errTrailer :: String
trailer = "\n\
\The header file must be a C header file matching the given \
\binding file.\n\
\The dump TYPE can be\n\
\ trace -- trace compiler phases\n\
\ genbind -- trace binding generation\n\
\ ctrav -- trace C declaration traversal\n\
\ chs -- dump the binding file (adds `.dump' to the name)\n"
errTrailer = "Try the option `--help' on its own for more information.\n"
-- | supported option types
--
data Flag = CPPOpts String -- ^ additional options for C preprocessor
| CPP String -- ^ program name of C preprocessor
| Dump DumpType -- ^ dump internal information
| Help -- ^ print brief usage information
| Keep -- ^ keep the .i file
| Library -- ^ copy library module @C2HS@
| Include String -- ^ list of directories to search .chi files
| Output String -- ^ file where the generated file should go
| Platform String -- ^ target platform to generate code for
| OutDir String -- ^ directory where generates files should go
| Version -- ^ print version information on stdout
| NumericVersion -- ^ print numeric version on stdout
| Error String -- ^ error occured during processing of options
deriving Eq
data DumpType = Trace -- ^ compiler trace
| GenBind -- ^ trace "C2HS.Gen.Bind"
| CTrav -- ^ trace "C2HS.C.CTrav"
| CHS -- ^ dump binding file
deriving Eq
-- | option description suitable for "Distribution.GetOpt"
--
options :: [OptDescr Flag]
options = [
Option ['C']
["cppopts"]
(ReqArg CPPOpts "CPPOPTS")
"pass CPPOPTS to the C preprocessor",
Option ['c']
["cpp"]
(ReqArg CPP "CPP")
"use executable CPP to invoke C preprocessor",
Option ['d']
["dump"]
(ReqArg dumpArg "TYPE")
"dump internal information (for debugging)",
Option ['h', '?']
["help"]
(NoArg Help)
"brief help (the present message)",
Option ['i']
["include"]
(ReqArg Include "INCLUDE")
"include paths for .chi files",
Option ['k']
["keep"]
(NoArg Keep)
"keep pre-processed C header",
Option ['l']
["copy-library"]
(NoArg Library)
"copy `C2HS' library module in",
Option ['o']
["output"]
(ReqArg Output "FILE")
"output result to FILE (should end in .hs)",
Option ['p']
["platform"]
(ReqArg Platform "PLATFORM")
"platform to use for cross compilation",
Option ['t']
["output-dir"]
(ReqArg OutDir "PATH")
"place generated files in PATH",
Option ['v']
["version"]
(NoArg Version)
"show version information",
Option []
["numeric-version"]
(NoArg NumericVersion)
"show version number"]
-- | convert argument of 'Dump' option
--
dumpArg :: String -> Flag
dumpArg "trace" = Dump Trace
dumpArg "genbind" = Dump GenBind
dumpArg "ctrav" = Dump CTrav
dumpArg "chs" = Dump CHS
dumpArg _ = Error "Illegal dump type."
-- | main process (set up base configuration, analyse command line, and execute
-- compilation process)
--
-- * Exceptions are caught and reported
--
compile :: CST s ()
compile =
do
setup
cmdLine <- CIO.getArgs
case getOpt RequireOrder options cmdLine of
(opts, [] , [])
| noCompOpts opts -> doExecute opts Nothing
(opts, args, []) -> case parseArgs args of
justargs@(Just _) -> doExecute opts justargs
Nothing -> raiseErrs [wrongNoOfArgsErr]
(_ , _ , errs) -> raiseErrs errs
where
-- These options can be used without specifying a binding module. Then,
-- the corresponding action is executed without any compilation to take
-- place. (There can be --data and --output-dir (-t) options in addition
-- to the action.)
--
aloneOptions = [Help, Version, NumericVersion, Library]
--
noCompOpts opts = let nonDataOpts = filter nonDataOrDir opts
in
(not . null) nonDataOpts &&
all (`elem` aloneOptions) nonDataOpts
where
nonDataOrDir (OutDir _) = False
nonDataOrDir _ = True
--
parseArgs :: [FilePath] -> Maybe (FilePath, [FilePath])
parseArgs = parseArgs' [] Nothing
where parseArgs' hs (Just chs) [] = Just (chs, reverse hs)
parseArgs' hs Nothing (file:files)
| FilePath.takeExtension file == '.':chssuffix
= parseArgs' hs (Just file) files
parseArgs' hs chs (file:files)
| FilePath.takeExtension file == '.':hsuffix
= parseArgs' (file:hs) chs files
parseArgs' _ _ _ = Nothing
--
doExecute opts args = do
execute opts args
`fatalsHandledBy` failureHandler
CIO.exitWith CIO.ExitSuccess
--
wrongNoOfArgsErr =
"There must be exactly one binding file (suffix .chs),\n\
\and optionally one or more header files (suffix .h).\n"
--
-- exception handler
--
failureHandler err =
do
let msg = ioeGetErrorString err
fnMsg = case ioeGetFileName err of
Nothing -> ""
Just s -> " (file: `" ++ s ++ "')"
CIO.hPutStrLn stderr (msg ++ fnMsg)
CIO.exitWith $ CIO.ExitFailure 1
-- | set up base configuration
--
setup :: CST s ()
setup = do
setCPP CConf.cpp
addCPPOpts cppopts
-- | output error message
--
raiseErrs :: [String] -> CST s a
raiseErrs errs = do
CIO.hPutStr stderr (concat errs)
CIO.hPutStr stderr errTrailer
CIO.exitWith $ CIO.ExitFailure 1
-- Process tasks
-- -------------
-- | execute the compilation task
--
-- * if 'Help' is present, emit the help message and ignore the rest
-- * if 'Version' is present, do it first (and only once)
-- * actual compilation is only invoked if we have one or two extra arguments
-- (otherwise, it is just skipped)
--
execute :: [Flag] -> Maybe (FilePath, [FilePath]) -> CST s ()
execute opts args | Help `elem` opts = help
| otherwise =
do
let (vs,opts') = partition (\opt -> opt == Version
|| opt == NumericVersion) opts
mapM_ processOpt (atMostOne vs ++ opts')
case args of
Just (bndFile, headerFiles) -> do
let bndFileWithoutSuffix = FilePath.dropExtension bndFile
computeOutputName bndFileWithoutSuffix
process headerFiles bndFileWithoutSuffix
`fatalsHandledBy` die
Nothing ->
computeOutputName "." -- we need the output name for library copying
copyLibrary
`fatalsHandledBy` die
where
atMostOne = (foldl (\_ x -> [x]) [])
--
die ioerr =
do
name <- CIO.getProgName
CIO.putStr $ name ++ ": " ++ ioeGetErrorString ioerr ++ "\n"
CIO.exitWith $ CIO.ExitFailure 1
-- | emit help message
--
help :: CST s ()
help =
do
CIO.putStr (usageInfo header options)
CIO.putStr trailer
CIO.putStr $ "PLATFORM can be " ++ hosts ++ "\n"
CIO.putStr $ " (default is " ++ identPS defaultPlatformSpec ++ ")\n"
where
hosts = (concat . intersperse ", " . map identPS) platformSpecDB
-- | process an option
--
-- * 'Help' cannot occur
--
processOpt :: Flag -> CST s ()
processOpt (CPPOpts cppopt ) = addCPPOpts [cppopt]
processOpt (CPP cpp ) = setCPP cpp
processOpt (Dump dt ) = setDump dt
processOpt (Keep ) = setKeep
processOpt (Library ) = setLibrary
processOpt (Include dirs ) = setInclude dirs
processOpt (Output fname ) = setOutput fname
processOpt (Platform fname ) = setPlatform fname
processOpt (OutDir fname ) = setOutDir fname
processOpt Version = do
CIO.putStrLn version
platform <- getSwitch platformSB
CIO.putStr " build platform is "
CIO.print platform
processOpt NumericVersion = CIO.putStrLn (showVersion versnum)
processOpt (Error msg ) = abort msg
-- | emit error message and raise an error
--
abort :: String -> CST s ()
abort msg = do
CIO.hPutStrLn stderr msg
CIO.hPutStr stderr errTrailer
fatal "Error in command line options"
-- | Compute the base name for all generated files (Haskell, C header, and .chi
-- file)
--
-- * The result is available from the 'outputSB' switch
--
computeOutputName :: FilePath -> CST s ()
computeOutputName bndFileNoSuffix =
setSwitch $ \sb@SwitchBoard{ outputSB = output } ->
sb { outputSB = if null output then bndFileNoSuffix else output }
-- | Copy the C2HS library if requested
--
copyLibrary :: CST s ()
copyLibrary =
do
outdir <- getSwitch outDirSB
library <- getSwitch librarySB
datadir <- CIO.liftIO getDataDir
let libFullName = datadir </> libfname
libDestName = outdir </> libfname
when library $
CIO.readFile libFullName >>= CIO.writeFile libDestName
-- set switches
-- ------------
-- | set the options for the C proprocessor
--
addCPPOpts :: [String] -> CST s ()
addCPPOpts opts = setSwitch $ \sb -> sb {cppOptsSB = cppOptsSB sb ++ opts}
-- | set the program name of the C proprocessor
--
setCPP :: FilePath -> CST s ()
setCPP fname = setSwitch $ \sb -> sb {cppSB = fname}
-- set the given dump option
--
setDump :: DumpType -> CST s ()
setDump Trace = setTraces $ \ts -> ts {tracePhasesSW = True}
setDump GenBind = setTraces $ \ts -> ts {traceGenBindSW = True}
setDump CTrav = setTraces $ \ts -> ts {traceCTravSW = True}
setDump CHS = setTraces $ \ts -> ts {dumpCHSSW = True}
-- | set flag to keep the pre-processed header file
--
setKeep :: CST s ()
setKeep = setSwitch $ \sb -> sb {keepSB = True}
-- | set flag to copy library module in
--
setLibrary :: CST s ()
setLibrary = setSwitch $ \sb -> sb {librarySB = True}
-- | set the search directories for .chi files
--
-- * Several -i flags are accumulated. Later paths have higher priority.
--
-- * The current directory is always searched last because it is the
-- standard value in the compiler state.
--
setInclude :: String -> CST s ()
setInclude str = do
let fp = makePath str ""
setSwitch $ \sb -> sb {chiPathSB = fp ++ (chiPathSB sb)}
where
makePath ('\\':r:em) path = makePath em (path ++ ['\\',r])
makePath (' ':r) path = makePath r path
makePath (':':r) "" = makePath r ""
makePath (':':r) path = path : makePath r ""
makePath ('/':':':r) path = path : makePath r ""
makePath (r:emain) path = makePath emain (path ++ [r])
makePath "" "" = []
makePath "" path = [path]
-- | set the output file name
--
setOutput :: FilePath -> CST s ()
setOutput fname = do
when (FilePath.takeExtension fname /= '.':hssuffix) $
raiseErrs ["Output file should end in .hs!\n"]
setSwitch $ \sb -> sb {outputSB = FilePath.dropExtension fname}
-- | set platform
--
setPlatform :: String -> CST s ()
setPlatform platform =
case lookup platform platformAL of
Nothing -> raiseErrs ["Unknown platform `" ++ platform ++ "'\n"]
Just p -> setSwitch $ \sb -> sb {platformSB = p}
where
platformAL = [(identPS p, p) | p <- platformSpecDB]
-- | set the output directory
--
setOutDir :: FilePath -> CST s ()
setOutDir fname = setSwitch $ \sb -> sb {outDirSB = fname}
-- | set the name of the generated header file
--
setHeader :: FilePath -> CST s ()
setHeader fname = setSwitch $ \sb -> sb {headerSB = fname}
-- compilation process
-- -------------------
-- | read the binding module, construct a header, run it through CPP, read it,
-- and finally generate the Haskell target
--
-- * the header file name (first argument) may be empty; otherwise, it already
-- contains the right suffix
--
-- * the binding file name has been stripped of the .chs suffix
--
process :: [FilePath] -> FilePath -> CST s ()
process headerFiles bndFile =
do
-- load the Haskell binding module
--
(chsMod , warnmsgs) <- loadCHS bndFile
CIO.putStr warnmsgs
traceCHSDump chsMod
--
-- extract CPP and inline-C embedded in the .chs file (all CPP and
-- inline-C fragments are removed from the .chs tree and conditionals are
-- replaced by structured conditionals)
--
(header', strippedCHSMod, headerwarnmsgs) <- genHeader chsMod
CIO.putStr headerwarnmsgs
--
-- create new header file, make it #include `headerFile', and emit
-- CPP and inline-C of .chs file into the new header
--
outFName <- getSwitch outputSB
outDir <- getSwitch outDirSB
let newHeader = outFName <.> chssuffix <.> hsuffix
newHeaderFile = outDir </> newHeader
preprocFile = FilePath.takeBaseName outFName <.> isuffix
CIO.writeFile newHeaderFile $ concat $
[ "#include \"" ++ headerFile ++ "\"\n"
| headerFile <- headerFiles ]
++ header'
--
-- Check if we can get away without having to keep a separate .chs.h file
--
case headerFiles of
[headerFile] | null header
-> setHeader headerFile -- the generated .hs file will directly
-- refer to this header rather than going
-- through a one-line .chs.h file.
_ -> setHeader newHeader
--
-- run C preprocessor over the header
--
cpp <- getSwitch cppSB
cppOpts <- getSwitch cppOptsSB
let args = cppOpts ++ [newHeaderFile]
tracePreproc (unwords (cpp:args))
exitCode <- CIO.liftIO $ do
preprocHnd <- openFile preprocFile WriteMode
cppproc <- runProcess cpp args
Nothing Nothing Nothing (Just preprocHnd) Nothing
waitForProcess cppproc
case exitCode of
CIO.ExitFailure _ -> fatal "Error during preprocessing custom header file"
_ -> return ()
--
-- load and analyse the C header file
--
(cheader, preprocMsgs) <- loadAttrC preprocFile
CIO.putStr preprocMsgs
--
-- remove the pre-processed header and if we no longer need it, remove the
-- custom header file too.
--
keep <- getSwitch keepSB
unless keep $ do
CIO.removeFile preprocFile
case headerFiles of
[_headerFile] | null header
-> CIO.removeFile newHeaderFile
_ -> return () -- keep it since we'll need it to compile the .hs file
--
-- expand binding hooks into plain Haskell
--
(hsMod, chi, hooksMsgs) <- expandHooks cheader strippedCHSMod
CIO.putStr hooksMsgs
--
-- output the result
--
dumpCHS (outDir </> outFName) hsMod True
dumpCHI (outDir </> outFName) chi -- different suffix will be appended
where
tracePreproc cmd = putTraceStr tracePhasesSW $
"Invoking cpp as `" ++ cmd ++ "'...\n"
traceCHSDump mod' = do
flag <- traceSet dumpCHSSW
when flag $
(do
CIO.putStr ("...dumping CHS to `" ++ chsName
++ "'...\n")
dumpCHS chsName mod' False)
chsName = FilePath.takeBaseName bndFile <.> "dump"
|
jrockway/c2hs
|
src/Main.hs
|
gpl-2.0
| 21,478
| 0
| 17
| 6,368
| 4,056
| 2,210
| 1,846
| 323
| 9
|
{-# LANGUAGE OverloadedStrings, PackageImports, TupleSections #-}
module Robotics.Thingomatic.Monad where
import Robotics.Thingomatic.Points
import Robotics.Thingomatic.Config
import "monads-fd" Control.Monad.State
import "monads-fd" Control.Monad.Trans
import qualified Data.ByteString as B
type ByteString = B.ByteString
data PrinterState = PState {output::ByteString->IO (),
feedrate::Double,
conf::PrinterConfig
}
type Print = StateT (Point3,PrinterState) IO
getLocation::Print Point3
getLocation = fmap fst get
getPrinterState::Print PrinterState
getPrinterState = fmap snd get
setLocation::Point3->Print ()
setLocation l = get >>= put . (l,) . snd
setPrinterState::PrinterState->Print ()
setPrinterState s = get >>= put . (,s) . fst
emit::ByteString->Print ()
emit b = getPrinterState >>= (\s-> liftIO $ (output s) b)
runWithIOAction::PrinterConfig->(ByteString->IO ())->Print a->IO a
runWithIOAction conf dest instr = let init = ((0,0,0), PState {output = dest, feedrate = 1000, conf=conf})
in evalStateT instr init
getFeedrate::Print Double
getFeedrate = fmap feedrate getPrinterState
setFeedrate::Double->Print ()
setFeedrate r = getPrinterState >>= setPrinterState . (\s-> s {feedrate = r})
withRate::Double->Print a->Print a
withRate r action = do
r' <- getFeedrate
setFeedrate r
a <- action
setFeedrate r'
return a
withConfig::(PrinterConfig->Print a)->Print a
withConfig a = getPrinterState >>= a.conf
getConfig = fmap conf getPrinterState
|
matthewSorensen/weft
|
Robotics/Thingomatic/Monad.hs
|
gpl-3.0
| 1,616
| 0
| 11
| 337
| 531
| 284
| 247
| 39
| 1
|
{-# LANGUAGE RankNTypes, FlexibleContexts #-}
module Tests.AssertionSafety where
import Logic.SecPAL.AssertionSafety
import Logic.SecPAL.Language
import Logic.General.Pretty
import Text.Parsec
import Logic.SecPAL.Parser
import Data.Functor.Identity
import Tests.Testable
testFlatness :: [Test]
testFlatness = [ testIsFlat1
, testIsFlat2
]
make :: forall s t c. Stream s Identity t => Parsec s () c -> s -> c
make p str = case parse p "" str of
(Left err) -> error . show $ err
(Right a) -> a
makeFact :: String -> Fact
makeFact = make pFact
makeAssertion :: String -> Assertion
makeAssertion = make pAssertionUnsafe
testIsFlat1 :: Test
testIsFlat1 =
let secpal = makeFact "Bob can-read(f)"
in
Test{ description=pShow secpal, result=test .flat $ secpal }
testIsFlat2 :: Test
testIsFlat2 =
let secpal = makeFact "Charlie can-say 0 Bob can-read(f)"
in
Test{ description=pShow secpal, result=test . not . flat $ secpal }
testSafe :: [Test]
testSafe = [ testSafe1
, testSafe2
, testSafe3
, testSafe4
, testSafe5
, testSafe6
]
testUnsafe :: [Test]
testUnsafe = [ testUnSafe1
, testUnSafe2
, testUnSafe3
, testUnSafe4
, testUnSafe5
]
testSafe1 :: Test
testSafe1 =
let secpal = makeAssertion "A says B can-read(Foo);"
in Test{ description=pShow secpal, result=test . safe $ secpal }
testSafe2 :: Test
testSafe2 =
let secpal = makeAssertion "A says B can-read(Foo) if B can(x,y);"
in Test{description=pShow secpal, result=test . safe $ secpal }
testSafe3 :: Test
testSafe3 =
let secpal = makeAssertion "A says B can-read(Foo) if B can(x,y): ! x = y;"
in Test{description=pShow secpal, result=test . safe $ secpal }
testSafe4 :: Test
testSafe4 =
let secpal = makeAssertion "A says B can(x,y) if B can(x,y);"
in Test{description=pShow secpal, result=test . safe $ secpal }
testSafe5 :: Test
testSafe5 =
let secpal = makeAssertion "A says z can(x,y) if z can(x,Foo), z can-read(y);"
in Test{description=pShow secpal, result=test . safe $ secpal }
testSafe6 :: Test
testSafe6 =
let secpal = makeAssertion "A says B can-say 0 x can(y,z);"
in Test{description=pShow secpal, result=test . safe $ secpal }
testUnSafe1 :: Test
testUnSafe1 =
let secpal = makeAssertion "A says B can(x,Foo);"
in Test{description=pShow secpal, result=test . not . safe $ secpal }
testUnSafe2 :: Test
testUnSafe2 =
let secpal = makeAssertion "A says z can-read(Foo) if B can(x,y);"
in Test{description=pShow secpal, result=test . not . safe $ secpal }
testUnSafe3 :: Test
testUnSafe3 =
let secpal = makeAssertion "A says B can-read(Foo) if B can(x,y): ! w = y;"
in Test{description=pShow secpal, result=test . not . safe $ secpal }
testUnSafe4 :: Test
testUnSafe4 =
let secpal = makeAssertion "A says B can(x,y) if B can-say 0 C can(x,y);"
in Test{description=pShow secpal, result=test . not . safe $ secpal }
testUnSafe5 :: Test
testUnSafe5 =
let secpal = makeAssertion "A says w can-say 0 x can(y,z);"
in Test{description=pShow secpal, result=test . not . safe $ secpal }
-- The assertion context from ESSoS paper
testESSoS :: [Test]
testESSoS = [ agTest1
, agTest2
, agTest3
, agTest4
, agTest5
, agTest6
, agTest7
]
agTest1 :: Test
agTest1 =
let secpal = makeAssertion "anyone says app meets(policy) if evidence shows-meets(app, policy);"
in Test{description=pShow secpal, result=test . safe $ secpal }
agTest2 :: Test
agTest2 =
let secpal = makeAssertion "Phone says app is-installable if app meets(NotMalware), app meets(NoInfoLeaks);"
in Test{description=pShow secpal, result = test . safe $ secpal }
agTest3 :: Test
agTest3 =
let secpal = makeAssertion "Phone says Google can-say inf app meets(NotMalware);"
in Test{description=pShow secpal, result=test . safe $ secpal }
agTest4 :: Test
agTest4 =
let secpal = makeAssertion "Google says AVChecker can-say 0 app meets(NotMalware);"
in Test{description=pShow secpal, result=test . safe $ secpal }
agTest5 :: Test
agTest5 =
let secpal = makeAssertion "Phone says NILInferer can-say 0 app meets(NoInfoLeaks);"
in Test{description=pShow secpal, result=test . safe $ secpal }
agTest6 :: Test
agTest6 =
let secpal = makeAssertion "AVChecker says Game meets(NotMalware);"
in Test{description=pShow secpal, result=test . safe $ secpal }
agTest7 :: Test
agTest7 =
let secpal = makeAssertion "NILInferer says Evidence shows-meets(Game, Policy);"
in Test{description=pShow secpal, result=test . safe $ secpal }
|
bogwonch/SecPAL
|
tests/Tests/AssertionSafety.hs
|
gpl-3.0
| 4,735
| 1
| 11
| 1,081
| 1,292
| 701
| 591
| 121
| 2
|
module P24Anagram where
import Data.Char (isSpace)
main :: IO ()
main = do
putStrLn "Enter 2 strings and I'll tell you if they are anagrams:"
putStr "First: "
xs <- getLine
putStr "Second: "
ys <- getLine
putStrLn $ "'" ++ xs ++ "' and '" ++ ys ++ if xs `isAnagram` ys
then "' are anagrams."
else "' are not anagrams."
isAnagram :: String -> String -> Bool
isAnagram xs ys =
cleanxs `sameLength` cleanys && sortedxs == sortedys
where
sortedxs = mergeSort (<=) cleanxs
sortedys = mergeSort (<=) cleanys
cleanxs = filter (not . isSpace) xs
cleanys = filter (not . isSpace) ys
sameLength :: [a] -> [a] -> Bool
sameLength xs ys =
length xs == length ys
merge :: (a -> a -> Bool) -> [a] -> [a] -> [a]
merge _ [] ys = ys
merge _ xs [] = xs
merge p xs@(x:xt) ys@(y:yt) | x `p` y = x : merge p xt ys
| otherwise = y : merge p xs yt
split :: [a] -> ([a],[a])
split (x:y:zs) =
(x:xs,y:ys)
where
(xs,ys) = split zs
split [x] = ([x],[])
split [] = ([],[])
mergeSort :: (Ord a,Eq a) => (a -> a -> Bool) -> [a] -> [a]
mergeSort _ [] = []
mergeSort _ [x] = [x]
mergeSort p xs =
merge p (mergeSort p as) (mergeSort p bs)
where
(as,bs) = split xs
|
ciderpunx/57-exercises-for-programmers
|
src/P24Anagram.hs
|
gpl-3.0
| 1,337
| 0
| 11
| 441
| 615
| 327
| 288
| 39
| 2
|
{- |
Module : $Header$
Description : Module to control random number generation
Copyright : (c) Michal Parusinski
License : GPLv3
Maintainer : mparusinski@gmail.com
Stability : experimental
Portability : portable
<module description starting at first column>
-}
module Generator.RandomGenerator where
import Generator.Generator
import System.Random
import Control.Monad.State
simpleRandGenerator :: (Integral a, Random a) => Generator StdGen a
simpleRandGenerator
= Generator $
do randomGenerator <- get
let (output, nextGenerator) = random randomGenerator
put nextGenerator
return output
boundRandGenerator :: (Integral a) =>
Generator StdGen a -> a -> a -> Generator StdGen a
boundRandGenerator generator lowBound upBound
= boundRule |> generator
where boundRule = generateBoundedRule lowBound upBound
updateIORandomGenerator :: IO ()
updateIORandomGenerator
= do
actualRandomNumber <- randomIO
let generator = mkStdGen actualRandomNumber
setStdGen generator
updateIORandomGeneratorWithNum :: Int -> IO ()
updateIORandomGeneratorWithNum number
= do
let generator = mkStdGen number
setStdGen generator
|
mparusinski/Haskell-number-theory-library
|
Generator/RandomGenerator.hs
|
gpl-3.0
| 1,228
| 0
| 11
| 263
| 239
| 117
| 122
| 27
| 1
|
module Slash where
import Slash.Handler
import Data.Word
import Graphics.Vty
import System.Posix.IO (stdInput)
import System.Posix.Terminal
data Slash a = Slash
{ slashContent :: String
, point :: (Word, Word)
, handler :: Handler (Slash a)
, vty :: Vty
, userData :: a
}
data TextUnit = Word
slash :: a -> Handler (Slash a) -> IO ()
slash u h = do
oattrs <- getTerminalAttributes stdInput
v <- mkVty
input $ Slash "" (0, 0) h v u
setTerminalAttributes stdInput oattrs Immediately
input :: Slash a -> IO ()
input s = do
(update . vty $ s) $ pic_for_image (string def_attr (slashContent s))
ev <- next_event (vty s)
case ev of
EvKey KEsc _ -> putStrLn "Bye"
k -> input . handler s k $ s
changeUserData :: (a -> a) -> Slash a -> Slash a
changeUserData f s = s { userData = f . userData $ s }
visibleContent :: Slash a -> String
visibleContent = slashContent
changeText :: (String -> String) -> Slash a -> Slash a
changeText f s = s { slashContent = f . slashContent $ s }
putKey :: Char -> Slash a -> Slash a
putKey c = changeText (++[c])
putString :: String -> Slash a -> Slash a
putString s = changeText (++s)
delete :: Int -> Slash a -> Slash a
delete n = changeText (reverse . drop n . reverse)
deleteBy :: TextUnit -> Int -> Slash a -> Slash a
deleteBy Word n = changeText (unwords . reverse . drop n . reverse . words)
|
josuf107/Slash
|
Slash.hs
|
gpl-3.0
| 1,404
| 0
| 12
| 340
| 596
| 304
| 292
| 40
| 2
|
module Test where
import QHaskell.MyPrelude
import qualified Tests.ADTUntypedNamed as AUN
import qualified Tests.ADTUntypedDebruijn as AUD
import qualified Tests.GADTTyped as GTD
import qualified Tests.GADTFirstOrder as GFO
import qualified Tests.GADTHigherOrder as GHO
import qualified Tests.Conversion as CNV
import QHaskell.Normalisation ()
import QHaskell.Simplification ()
import QHaskell.CSE ()
import QHaskell.Expression.Utils.Reuse.GADTHigherOrder ()
import QHaskell.Expression.Utils.Equality.GADTHigherOrder ()
import QHaskell.Expression.Utils.Show.GADTFirstOrder ()
import QHaskell.Expression.Utils.Show.GADTHigherOrder ()
import QHaskell.Expression.Utils.ADTUntypedDebruijn ()
import QHaskell.Expression.Utils.ADTUntypedNamed ()
import QHaskell.Expression.Utils.GADTTyped ()
main :: IO ()
main = print (if AUN.test && AUD.test && GTD.test &&
GFO.test && GHO.test &&
CNV.test
then "Pass!"
else "Fail!")
-- Todo:
-- * Check for exotic terms
-- * Weakening of HOAS
-- + getting rid of Tmp
-- * Bidirectional translations
-- * Using Type classes to do lifting and colifting for ADTValue and GADTValue
-- to reuse Vanilla Prelude
-- * check for all exhaustive partterns and transform them
-- * Conversion of GHO (x :-> y) ~> (MFS x -> MFS y)
-- * Free Fusion for Church / Ahman's Containers
-- * Supporting F
-- * Scope Proofing Quotations (e.g. Sam's misunderstanding) [EncodingTypes.txt]
-- * Support for Syntactic Suggar in Quotations (e.g. use TH-Desugar)
-- * Add sqrt and memorize (for Float) to Preludes that do not have it
-- * Write the code required for memorize
-- * Use macros for polymorphic datatypes
-- * Generate polymorphic datatypes
-- * Check all external imports are only via MyPrelude
-- * Shift Whole Pipline to the compile time!
-- * Use let in FFT and CRC
-- * Use eta
|
shayan-najd/QHaskell
|
Test.hs
|
gpl-3.0
| 1,908
| 0
| 13
| 362
| 237
| 163
| 74
| 24
| 2
|
module IO.VideoInit where
--External libraries
import qualified Graphics.UI.SDL as SDL
--Local modules
import qualified IO.Color as Color
import qualified IO.CP437Display as CP437
import qualified IO.KeyInput as KeyInput
import qualified IO.Render as Render
--Eventually these should probably not be hard-coded.
width = 800
height = 600
--SDL initialization boilerplate
init = SDL.withInit [SDL.InitVideo] videoSetup
--SDL initialization boilerplate
videoSetup = do screen <- SDL.setVideoMode width height 24 [SDL.SWSurface]
SDL.setCaption "Test" ""
SDL.enableUnicode True
Render.display Render.image (0,0)
KeyInput.waitForKeyPress KeyInput.initialState
|
MortimerMcMire315/rogue_nads
|
src/IO/VideoInit.hs
|
gpl-3.0
| 724
| 0
| 10
| 146
| 153
| 89
| 64
| 14
| 1
|
{-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE FlexibleContexts, OverloadedStrings, ViewPatterns, ScopedTypeVariables,
Rank2Types, GeneralizedNewtypeDeriving
#-}
module HMumps.Runtime(RunState(..),
Env(..),
emptyState,
eval,
exec,
Normalizable(..),
setX, setY,
addX, addY,
RunMonad,
step
)
where
import Prelude hiding (lookup,break,map)
import Data.Char (chr)
import Data.String
import Data.Map
import Data.MValue hiding (join)
import qualified Data.MValue as M
import Data.MArray
import Data.Monoid
import HMumps.Routine
import HMumps.SyntaxTree
import HMumps.Parsers
import Control.Applicative hiding (empty)
import Control.Monad.State
import Control.Monad.Error
import System.Exit(exitWith, ExitCode(..))
newtype RunMonad a = RM {runRunMonad :: ErrorT String (StateT [RunState] IO) a}
deriving (Functor, Monad, MonadIO, MonadState [RunState], MonadError String)
step :: (MonadState [RunState] m, MonadIO m) => RunMonad a -> m (Either String a)
step k
= do
s <- get
(a, s') <- liftIO $ flip runStateT s $ runErrorT $ runRunMonad k
put s'
return a
-- |Anything you may ever want to strip indirection off of should
-- be an instance of this class
class Normalizable a where
normalize :: a -> RunMonad a
instance Normalizable Vn where
normalize (IndirectVn expr subs)
= do result <- eval expr
let str = asString result
case parse parseVn "Indirect VN" str of
Right (IndirectVn expr' subs') -> normalize $ IndirectVn expr' (subs' ++ subs)
Right (Lvn label subs') -> return $ Lvn label (subs' ++ subs)
Right (Gvn label subs') -> return $ Gvn label (subs' ++ subs)
Left err -> normalizeError err
normalize x = return x
instance Normalizable WriteArg where
normalize (WriteIndirect expr)
= do result <- eval expr
let str = asString result
case parse parseWriteArg "Indirect Write Argument" str of
Right wa -> case wa of
w@(WriteIndirect _) -> normalize w
w -> return w
Left err -> normalizeError err
normalize x = return x
instance Normalizable KillArg where
normalize (KillIndirect expr)
= do
result <- eval expr
let str = asString result
case parse (mlist1 parseKillArg) "Indirect KILL argument" str of
Right args -> do
args' <- mapM normalize args
case args' of
[arg] -> return arg
_ -> return $ KillArgList args'
Left err -> normalizeError err
normalize x = return x
instance Normalizable NewArg where
normalize (NewIndirect expr)
= do
result <- eval expr
let str = asString result
case parse (mlist1 parseNewArg) "Indirect NEW argument" str of
Right args -> do
args' <- mapM normalize args
case args' of
[arg] -> return arg
_ -> return $ NewArgList args'
Left err -> normalizeError err
normalize x = return x
instance Normalizable DoArg where
normalize (DoArgIndirect expr)
= do
result <- eval expr
let str = asString result
case parse (mlist1 parseDoArg) "Indirect DO argument" str of
Right args -> do
args' <- mapM normalize args
case args' of
[arg] -> return arg
_ -> return $ DoArgList args'
Left err -> normalizeError err
normalize x = return x
instance Normalizable Routineref where
normalize (RoutinerefIndirect expr)
= do
result <- eval expr
let str = asString result
case parse parseRoutineRef "Indirect routine ref" str of
Right ref -> normalize ref
Left err -> normalizeError err
normalize x = return x
instance Normalizable Label where
normalize (LabelIndirect expr)
= do
str <- asString <$> eval expr
case parse parseLabel "Indirect label" str of
Right lbl -> normalize lbl
Left err -> normalizeError err
normalize x = return x
instance Normalizable GotoArg where
normalize (GotoArgIndirect expr)
= do
str <- asString <$> eval expr
case parse parseGotoArg "GOTO argument" str of
Right arg -> return arg
Left err -> normalizeError err
normalize x = return x
normalizeError :: (Show a, MonadIO m) => a -> m b
normalizeError err = (liftIO . putStrLn . show $ err) >> fail ""
-- | Remove any KillArgList constructors
flattenKillArgs :: [KillArg] -> [KillArg]
flattenKillArgs (KillArgList args':args) = flattenKillArgs args' ++ flattenKillArgs args
flattenKillArgs [] = []
flattenKillArgs (x:xs) = x : flattenKillArgs xs
-- | Remove any NewArgList constructors
flattenNewArgs :: [NewArg] -> [NewArg]
flattenNewArgs (NewArgList args':args) = flattenNewArgs args' ++ flattenNewArgs args
flattenNewArgs [] = []
flattenNewArgs (x:xs) = x : flattenNewArgs xs
data RunState = RunState { env :: Maybe Env
, tags :: Routine
, gotoTags :: Routine
}
emptyState :: [RunState]
emptyState = [emptyFrame]
emptyFrame :: RunState
emptyFrame = RunState Nothing (\_ -> Nothing) (\_ -> Nothing)
data Env = Env EnvTag (Map String EnvEntry)
data EnvTag = NormalEnv
| StopEnv
deriving Eq
data EnvEntry = LookBack (Maybe Name)
| Entry MArray
killLocal :: Name -> RunMonad ()
killLocal = modify . go
where go _ [] = []
go label (f:fs)
| noEnvFrame f = f : go label fs
| otherwise
= case f of
RunState (Just (Env envTag envMap)) rou gRou
-> case label `lookup` envMap of
Nothing
| envTag == StopEnv -> f:fs
| otherwise -> f : go label fs
Just (Entry _ary)
-> (RunState (Just (Env envTag (label `delete` envMap))) rou gRou) : fs
Just (LookBack Nothing) -> f : go label fs
Just (LookBack (Just newLabel)) -> f : go newLabel fs
_ -> error "Fatal error in KILL"
noEnvFrame (RunState Nothing _ _) = True
noEnvFrame _ = False
new :: Name -> RunMonad ()
new label
= modify $ \state ->
case state of
[] -> fail "NEW called with an empty stack!"
(x:xs) -> go x : xs
where
go (RunState Nothing r gr) = RunState (Just (Env NormalEnv (insert label (Entry mEmpty) empty))) r gr
go (RunState (Just ev) r gr)
= let newEnv
= case ev of
Env NormalEnv eMap -> Env NormalEnv $ insert label (Entry mEmpty) eMap
Env StopEnv eMap -> Env StopEnv $ delete label eMap
in RunState (Just newEnv) r gr
newExclusive :: [Name] -> RunMonad ()
newExclusive labels
= modify $ \state ->
case state of
[] -> fail "NEW called with an empty stack!"
(x:xs) -> go x : xs
where
go (RunState oldEnv r gr)
= let newEnv = foldr addLabel (Env StopEnv mempty) labels
in RunState (Just newEnv) r gr
where
addLabel label@(inEnv oldEnv -> Just entry) (Env _StopEnv eMap)
= Env StopEnv $ insert label entry eMap
addLabel label (Env _StopEnv eMap)
= Env StopEnv $ insert label (LookBack Nothing) eMap
inEnv Nothing _ = Nothing
inEnv (Just (Env _ eMap)) lbl
= lbl `lookup` eMap
fetch' :: String -> [RunState] -> Maybe MArray
fetch' str xs = join . fst $ foldl helper (Nothing,str) [x | Just x <- fmap env xs] where
helper :: (Maybe (Maybe MArray),Name) -> Env -> (Maybe (Maybe MArray), Name)
helper rhs@(Just _, _) _ = rhs
helper (_,name) (Env tag m) = case tag of
NormalEnv -> case name `lookup` m of
Nothing -> (Nothing, name)
Just (Entry ma) -> (Just (Just ma), name)
Just (LookBack newname') -> case newname' of
Just newname -> (Nothing, newname)
Nothing -> (Nothing, name)
StopEnv -> case name `lookup` m of
Nothing -> (Just Nothing, name)
Just (Entry ma) -> (Just (Just ma), name)
Just (LookBack newname') -> case newname' of
Just newname -> (Nothing, newname)
Nothing -> (Nothing, name)
-- |Returns the MArray associated with the named local var, or the empty MArray
fetch :: String -> RunMonad MArray
fetch str = do result <- (fetch' str) `liftM` get
case result of
Just x -> return x
Nothing -> return mEmpty
put' :: String -> MArray -> [RunState] -> [RunState]
put' _ _ [] = error "SET called with an empty stack"
put' str ma (x:[]) = case (env x) of
Nothing -> x {env = Just $ Env NormalEnv (insert str (Entry ma) empty)} : []
Just (Env tag m) -> x {env = Just $ Env tag (insert str (Entry ma) m)} : []
put' str ma (x:xs) = case (env x) of
Nothing -> x : (put' str ma xs)
Just (Env tag m) -> let enter = x {env = Just $ Env tag (insert str (Entry ma) m)} : xs in
case str `lookup` m of
Nothing -> case tag of
NormalEnv -> x : (put' str ma xs)
StopEnv -> enter
Just (Entry _) -> enter
Just (LookBack Nothing) -> x : (put' str ma xs)
Just (LookBack (Just str')) -> x : (put' str' ma xs)
setVar :: String -> MArray -> RunMonad ()
setVar str ma = modify (put' str ma)
change :: String -> [MValue] -> MValue -> RunMonad ()
change name subs val = do ma <- fetch name
setVar name (arrayUpdate ma subs val)
kill :: Name -> [MValue] -> RunMonad ()
kill label [] = killLocal label
kill label subs = do
ma <- fetch label
setVar label (killSub ma subs)
orM :: Monad m => [m Bool] -> m Bool
orM [] = return False
orM (x:xs) = do x' <- x
if x'
then return True
else orM xs
-- |A return value of 'Nothing' indicates we did not quit, and should not unroll the stack.
-- A return value of 'Just Nothing' means we should quit with no return value.
-- A return value of 'Just (Just mv)' means that we should quit with a return value of mv.
exec :: Line -> RunMonad (Maybe (Maybe MValue))
exec [] = return Nothing
-- special commamds which (may) use the rest of the command list, or may
-- return without processing the entire list
exec (ForInf:cmds) = forInf (cycle cmds)
exec ((For vn farg):cmds) = case farg of
ForArg1 expr -> exec $ (Set Nothing [([vn],expr)]) : ForInf : cmds
ForArg2 exprStart exprInc ->
do mStart <- eval exprStart
mInc <- eval exprInc
exec $ (Set Nothing [([vn],ExpLit mStart)]) : ForInf : cmds ++
[Set Nothing [([vn],ExpBinop Add (ExpVn vn) (ExpLit mInc))]]
ForArg3 exprStart exprInc exprTest ->
do mStart <- eval exprStart
mInc <- eval exprInc
mTest <- eval exprTest
exec $ (Set Nothing [([vn],ExpLit mStart)]) : ForInf : cmds ++
[Quit (Just $ if mToBool (mTest `mLT` 0)
then ExpBinop LessThan (ExpVn vn) (ExpLit (mTest + 1))
else ExpBinop GreaterThan (ExpVn vn) (ExpLit (mTest - 1))) Nothing,
Set Nothing [([vn],ExpBinop Add (ExpVn vn) (ExpLit mInc))]]
exec ((Break cond):cmds) = do
condition <- evalCond cond
when condition $ break
exec cmds
exec (Else:cmds) = do t <- getTest
if not t
then exec cmds
else return Nothing
exec ((If xs):cmds) = do let xs' = fmap eval xs
cond <- orM $ (liftM . liftM) mToBool xs'
if cond
then setTest True >> exec cmds
else setTest False >> return Nothing
exec ((Halt cond):cmds)
= do
condition <- evalCond cond
if condition
then liftIO (exitWith ExitSuccess) >> return Nothing
else exec cmds
exec ((Quit cond arg):cmds)
= do
condition <- evalCond cond
if condition
then case arg of
Nothing -> return $ Just Nothing
Just expr -> do
mv <- eval expr
return $ Just $ Just mv
else exec cmds
exec ((Goto cond args):cmds)
= do
condition <- evalCond cond
if condition
then execGotoArgs args
else exec cmds
where
execGotoArgs [] = exec cmds
execGotoArgs (arg:rest)
= do
GotoArg argCond entryRef <- normalize arg
condition <- evalCond argCond
if condition
then do
(rou,tag) <- unpackEntryRef entryRef
liftM Just $ goto rou tag
else execGotoArgs rest
unpackEntryRef :: EntryRef -> RunMonad (Maybe Name, Name)
unpackEntryRef entryRef =
case entryRef of
Routine rRef -> do
Routineref name <- normalize rRef
return (Just name, "")
Subroutine label' Nothing Nothing -> do
label <- labelName label'
return (Nothing, label)
Subroutine label' Nothing (Just rRef) -> do
label <- labelName label'
Routineref name <- normalize rRef
return (Just name, label)
Subroutine _ Just{} _ -> fail "unable to process numberic offsets for DO or GOTO"
labelName :: Label -> RunMonad Name
labelName label' = do
label <- normalize label'
case label of
Label name -> return name
LabelInt{} -> fail "Unable to handle numeric labels"
_ -> error "Fatal error handling entry reference"
-- regular commands go through the command driver
exec (cmd:cmds)
= do
go cmd
exec cmds
where
go Nop = return ()
go (Write cond ws) = do
condition <- evalCond cond
when condition $ write ws
go (Set cond sas) = do
condition <- evalCond cond
when condition $ set sas
go (Xecute cond arg) = do
condition <- evalCond cond
when condition $ do
str <- asString `liftM` eval arg
case parse parseCommands "XECUTE" str of
Left _err -> fail "" -- todo, better error message
Right xcmds -> do
modify (emptyFrame:)
res <- exec $ xcmds ++ [Quit Nothing Nothing]
case res of
Just (Just{}) -> fail "XECUTE cannot return with a value"
_ -> return ()
modify tail
-- the "routine" argument is only for use with GOTO,
-- so we ignore it for now
go (Block cond rou doLines) = do
condition <- evalCond cond
when condition $ do
RunState _ r _ <- gets head
modify (emptyFrame {tags = r,gotoTags=rou}:)
doBlockLines doLines
modify tail
where
doBlockLines [] = return ()
doBlockLines (doCmds:rest)
= do
res <- exec doCmds
case res of
Nothing -> doBlockLines rest
Just Nothing -> return ()
Just Just{} -> fail "Argumentless DO block cannot quit with a value"
go (Kill cond args) = do
condition <- evalCond cond
when condition $ case args of
[] -> fail "Sorry, I don't know how to kill everything"
_ -> do
args' <- flattenKillArgs `liftM` (mapM normalize args)
forM_ args' $ \arg ->
case arg of
KillSelective vn'
-> do
vn <- normalize vn'
case vn of
Lvn name subs' -> do
subs <- mapM eval subs'
kill name subs
_ -> fail "I can only kill locals, sorry"
_ -> fail "I can only do selective kills, sorry!"
go (New cond args) = do
condition <- evalCond cond
when condition $ case args of
[] -> newExclusive []
_ -> do
args' <- flattenNewArgs `liftM` (mapM normalize args)
forM_ args' $ \arg ->
case arg of
NewSelective name -> new name
NewExclusive names -> newExclusive names
_ -> error "Fatal error processing arguments to NEW"
go (Do cond args) = do
condition <- evalCond cond
when condition $ forM_ args $ \arg' -> do
arg <- normalize arg'
case arg of
DoArgList argList -> mapM_ processDo argList
_ -> processDo arg
go c = fail $ "Sorry, I don't know how to execute: " ++ (takeWhile (\x -> not (x==' ')) $ show c)
processDo :: DoArg -> RunMonad ()
processDo (DoArg cond entryRef args)
= do
condition <- evalCond cond
when condition $ do
case entryRef of
Routine routineRef'
-> do
Routineref rou <- normalize routineRef'
sub (Just rou) "" args
Subroutine label' Nothing Nothing
-> do
label <- normalize label'
case label of
Label name -> sub Nothing name args
LabelInt{} -> fail "Cannot use numeric labels"
_ -> error "fatal error in DO"
Subroutine label' Nothing (Just rouRef')
-> do
Routineref rou <- normalize rouRef'
label <- normalize label'
case label of
Label name -> sub (Just rou) name args
LabelInt{} -> fail "Cannot use numeric labels"
_ -> error "fatal error in DO"
Subroutine _ Just{} _ -> fail "Unable to execute DO with a numeric offset"
processDo _ = error "fatal error in DO"
evalCond :: Maybe Expression -> RunMonad Bool
evalCond Nothing = return True
evalCond (Just e) = mToBool `liftM` eval e
set :: [SetArg] -> RunMonad ()
set [] = return ()
set ((vns,expr):ss) = do vns' <- mapM normalize vns
mv <- eval expr
mapM_ (setHelper mv) vns' >> set ss
where setHelper mv (Lvn name subs) = do subs' <- mapM eval subs
change name subs' mv
setHelper _ (Gvn _ _) = fail "We don't supposrt global variables yet. sorry."
setHelper _ (IndirectVn _ _) = fail "Variable name should be normalized"
write :: [WriteArg] -> RunMonad ()
write = mapM_ f
where f wa = do
wa' <- normalize wa
case wa' of
WriteExpression expr -> do m <- eval expr
let s = asString m
liftIO $ putStr s
addX $ fromIntegral $ length s
WriteFormat fs -> writeFormat fs
WriteIndirect _ -> fail "write argument should be normalized"
writeFormat :: [WriteFormatCode] -> RunMonad ()
writeFormat = mapM_ f
where
f Formfeed = liftIO (putChar '\f') >> setY 1
f Newline = liftIO (putChar '\n') >> setX 1 >> addY 1
f (Tab n) = do x <- getX
let n' = fromIntegral n
if x >= n'
then return ()
else do liftIO (putStr $ (replicate . floor) (n'-x) ' ')
setX n'
setX :: MValue -> RunMonad ()
setX = change "$x" []
setY :: MValue -> RunMonad ()
setY = change "$y" []
getX :: RunMonad MValue
getX = getLocal "$x" []
getY :: RunMonad MValue
getY = getLocal "y" []
addX :: Int -> RunMonad ()
addX n = do x <- getX
setX (x + fromIntegral n)
addY :: Int -> RunMonad ()
addY n = do y <- getY
setY (y + fromIntegral n)
getLocal :: String -> [MValue] -> RunMonad MValue
getLocal label subs = do ma <- fetch label
return $ case mIndex ma subs of
Just mv -> mv
Nothing -> fromString ""
getLocalArray :: String -> [MValue] -> RunMonad (Maybe MArray)
getLocalArray label subs = do
ma <- fetch label
return $ mSubArray ma subs
forInf :: Line -> RunMonad (Maybe (Maybe MValue))
forInf ((Quit cond Nothing):xs) = case cond of
Nothing -> return Nothing
Just expr -> do mv <- eval expr
if mToBool mv
then return Nothing
else forInf xs
forInf ((Quit _ _):_) = fail "QUIT with argument in a for loop"
forInf (cmd:xs) = exec [cmd] >> forInf xs
forInf [] = forInf [] -- dumb
break :: RunMonad ()
break = fail "BREAK not working"
getTest :: RunMonad Bool
getTest = mToBool `liftM` getLocal "$test" []
setTest :: Bool -> RunMonad ()
setTest = change "$test" [] . boolToM
eval :: Expression -> RunMonad MValue
eval (ExpLit m) = return m
eval (ExpVn vn) = do vn' <- normalize vn
case vn' of
Lvn label subs -> do mvs <- mapM eval subs
getLocal label mvs
Gvn _ _ -> fail "Globals not yet implemented"
IndirectVn _ _ -> fail "normalized VNs should not be indirect"
eval (ExpUnop unop expr) = do mv <- eval expr
return $ case unop of
UNot -> mNot mv
UPlus -> mv+0
UMinus -> negate mv
eval (ExpBinop binop lexp rexp)
= do lv <- eval lexp
rv <- eval rexp
return $ case binop of
Concat -> lv `mConcat` rv
Add -> lv + rv
Sub -> lv - rv
Mult -> lv * rv
Div -> lv / rv
Rem -> lv `mRem` rv
Quot -> lv `mQuot` rv
Pow -> lv `mPow` rv
And -> lv `mAnd` rv
Or -> lv `mOr` rv
Equal -> boolToM $ lv == rv
LessThan -> lv `mLT` rv
GreaterThan -> lv `mGT` rv
Follows -> lv `follows` rv
Contains -> lv `contains` rv
SortsAfter -> boolToM $ lv > rv
-- eval (Pattern _ _) = fail "Can't evaluate pattern matches"
eval (FunCall label "" args) = function Nothing label args
eval (FunCall label rou args) = function (Just rou) label args
eval (ExpBifCall bif) = evalBif bif
function :: Maybe Name -> Name -> [FunArg] -> RunMonad MValue
function routine tag args
= do
retVal <- call routine tag args
case retVal of
Nothing -> fail "Function quit without returning a value"
Just v -> return v
sub :: Maybe Name -> Name -> [FunArg] -> RunMonad ()
sub routine tag args
= do
retVal <- call routine tag args
case retVal of
Nothing -> return ()
Just{} -> fail "Subroutine quit with a value!"
call :: Maybe Name -> Name -> [FunArg] -> RunMonad (Maybe MValue)
call Nothing tag args = localCall tag args
call (Just routine) "" args = call (Just routine) routine args
call (Just routine) tag args = remoteCall tag routine args
localCall :: Name -> [FunArg] -> RunMonad (Maybe MValue)
localCall label args = do (r :: Routine) <- (tags . head) `liftM` get
case r label of
Nothing -> fail $ "Noline: " ++ label
Just (argnames, cmds) -> funcall args argnames cmds r
remoteCall :: Name -> Name -> [FunArg] -> RunMonad (Maybe MValue)
remoteCall label routine args
= openRemote routine $ \r ->
case r label of
Nothing -> fail $ "Noline: " ++ label ++ "^" ++ routine
Just (argnames, cmds) -> funcall args argnames cmds r
goto :: Maybe Name -> Name -> RunMonad (Maybe MValue)
goto Nothing tag
= do
s <- gets head
doGoto tag (tags s) (gotoTags s)
goto (Just rouName) tag
= openRemote rouName $ \r -> doGoto tag r r
doGoto :: Name -> Routine -> Routine -> RunMonad (Maybe MValue)
doGoto tag r gr
= case gr tag of
Nothing -> fail $ "Noline: " ++ tag
Just ([], cmds) -> do
modify $ \(s:ss) -> s {tags=r,gotoTags=gr} : ss
runLines cmds
Just{} -> fail "Error in GOTO: tag should not take arguments"
openRemote :: MonadIO m => Name -> (Routine -> m a) -> m a
openRemote routine k
= do
let filename = routine ++ ".hmumps"
text <- liftIO $ readFile filename
case parse parseFile filename text of
Left a -> (fail . show) a
Right f -> let r = pack $ transform f in
k r
evalBif :: BifCall -> RunMonad MValue
evalBif (BifChar args') = do
args <- mapM eval args'
let str = fmap (chr . asInt) args
return $ fromString str
evalBif BifX = getX
evalBif BifY = getY
evalBif BifTest = boolToM `liftM` getTest
evalBif (BifOrder vn' expForward) = do
vn <- normalize vn'
case vn of
Lvn label subs' -> do
subs <- mapM eval subs'
case unSnoc subs of
Nothing -> fail "Cannot $ORDER with no subscripts"
Just (rest,lastSub)
-> do
ma <- getLocalArray label rest
case ma of
Nothing -> return ""
Just a -> do
forward <- case expForward of
Nothing -> return True
Just ex -> mToBool `liftM` eval ex
case order a forward lastSub of
Nothing -> return ""
Just v -> return v
Gvn{} -> fail "$ORDER on globals is not supported"
_ -> error "Fatal error in ORDER"
evalBif (BifReplace haystack' needle' replacement') = do
haystack <- eval haystack'
needle <- eval needle'
replacement <- eval replacement'
return $ M.join replacement $ M.split needle haystack
-- evalBif bif = fail $ "oops! I don't know what to do with " ++ show bif
-- | returns the front of a list plus the last element.
-- returns Nothing if the list is empty.
unSnoc :: [a] -> Maybe ([a],a)
unSnoc [] = Nothing
unSnoc (x:xs) = Just $ case unSnoc xs of
Nothing -> ([],x)
Just ~(ys,y) -> (x:ys,y)
funcall :: [FunArg] -> [Name] -> [Line] -> Routine -> RunMonad (Maybe MValue)
funcall args' argnames cmds r =
let (pairs, remainder) = zipRem args' argnames in
case remainder of
Just (Left _) -> fail "Supplied too many parameters to function"
_ -> do m <- foldM helper empty pairs
let newframe = RunState (Just $ Env NormalEnv m) r r
modify (newframe:)
x <- runLines cmds
modify tail
return x
where helper :: Map Name EnvEntry -> (FunArg, Name) -> RunMonad (Map Name EnvEntry)
helper m (arg,name) = case arg of
FunArgExp expr -> do mval <- eval expr
let entry = Entry $ arrayUpdate mEmpty [] mval
return $ insert name entry m
FunArgName name' -> return $ insert name (LookBack $ Just name') m
runLines :: [Line] -> RunMonad (Maybe MValue)
runLines [] = return Nothing
runLines (x:xs) = do result <- exec x
case result of
Nothing -> runLines xs
Just x' -> return x'
zipRem :: [a] -> [b] -> ([(a,b)],Maybe (Either [a] [b]))
zipRem [] [] = ([],Nothing)
zipRem [] xs = ([],Just $ Right xs)
zipRem xs [] = ([],Just $ Left xs)
zipRem (x:xs) (y:ys) = let (pairs, remainder) = zipRem xs ys
in ((x,y):pairs,remainder)
|
aslatter/hmumps
|
HMumps/Runtime.hs
|
gpl-3.0
| 28,642
| 0
| 28
| 10,797
| 9,508
| 4,612
| 4,896
| 663
| 37
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.BigQueryDataTransfer.Projects.TransferConfigs.Runs.TransferLogs.List
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Returns user facing log messages for the data transfer run.
--
-- /See:/ <https://cloud.google.com/bigquery-transfer/ BigQuery Data Transfer API Reference> for @bigquerydatatransfer.projects.transferConfigs.runs.transferLogs.list@.
module Network.Google.Resource.BigQueryDataTransfer.Projects.TransferConfigs.Runs.TransferLogs.List
(
-- * REST Resource
ProjectsTransferConfigsRunsTransferLogsListResource
-- * Creating a Request
, projectsTransferConfigsRunsTransferLogsList
, ProjectsTransferConfigsRunsTransferLogsList
-- * Request Lenses
, ptcrtllParent
, ptcrtllXgafv
, ptcrtllUploadProtocol
, ptcrtllMessageTypes
, ptcrtllAccessToken
, ptcrtllUploadType
, ptcrtllPageToken
, ptcrtllPageSize
, ptcrtllCallback
) where
import Network.Google.BigQueryDataTransfer.Types
import Network.Google.Prelude
-- | A resource alias for @bigquerydatatransfer.projects.transferConfigs.runs.transferLogs.list@ method which the
-- 'ProjectsTransferConfigsRunsTransferLogsList' request conforms to.
type ProjectsTransferConfigsRunsTransferLogsListResource
=
"v1" :>
Capture "parent" Text :>
"transferLogs" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParams "messageTypes"
ProjectsTransferConfigsRunsTransferLogsListMessageTypes
:>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "pageToken" Text :>
QueryParam "pageSize" (Textual Int32) :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
Get '[JSON] ListTransferLogsResponse
-- | Returns user facing log messages for the data transfer run.
--
-- /See:/ 'projectsTransferConfigsRunsTransferLogsList' smart constructor.
data ProjectsTransferConfigsRunsTransferLogsList =
ProjectsTransferConfigsRunsTransferLogsList'
{ _ptcrtllParent :: !Text
, _ptcrtllXgafv :: !(Maybe Xgafv)
, _ptcrtllUploadProtocol :: !(Maybe Text)
, _ptcrtllMessageTypes :: !(Maybe [ProjectsTransferConfigsRunsTransferLogsListMessageTypes])
, _ptcrtllAccessToken :: !(Maybe Text)
, _ptcrtllUploadType :: !(Maybe Text)
, _ptcrtllPageToken :: !(Maybe Text)
, _ptcrtllPageSize :: !(Maybe (Textual Int32))
, _ptcrtllCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ProjectsTransferConfigsRunsTransferLogsList' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'ptcrtllParent'
--
-- * 'ptcrtllXgafv'
--
-- * 'ptcrtllUploadProtocol'
--
-- * 'ptcrtllMessageTypes'
--
-- * 'ptcrtllAccessToken'
--
-- * 'ptcrtllUploadType'
--
-- * 'ptcrtllPageToken'
--
-- * 'ptcrtllPageSize'
--
-- * 'ptcrtllCallback'
projectsTransferConfigsRunsTransferLogsList
:: Text -- ^ 'ptcrtllParent'
-> ProjectsTransferConfigsRunsTransferLogsList
projectsTransferConfigsRunsTransferLogsList pPtcrtllParent_ =
ProjectsTransferConfigsRunsTransferLogsList'
{ _ptcrtllParent = pPtcrtllParent_
, _ptcrtllXgafv = Nothing
, _ptcrtllUploadProtocol = Nothing
, _ptcrtllMessageTypes = Nothing
, _ptcrtllAccessToken = Nothing
, _ptcrtllUploadType = Nothing
, _ptcrtllPageToken = Nothing
, _ptcrtllPageSize = Nothing
, _ptcrtllCallback = Nothing
}
-- | Required. Transfer run name in the form:
-- \`projects\/{project_id}\/transferConfigs\/{config_id}\/runs\/{run_id}\`
-- or
-- \`projects\/{project_id}\/locations\/{location_id}\/transferConfigs\/{config_id}\/runs\/{run_id}\`
ptcrtllParent :: Lens' ProjectsTransferConfigsRunsTransferLogsList Text
ptcrtllParent
= lens _ptcrtllParent
(\ s a -> s{_ptcrtllParent = a})
-- | V1 error format.
ptcrtllXgafv :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Xgafv)
ptcrtllXgafv
= lens _ptcrtllXgafv (\ s a -> s{_ptcrtllXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
ptcrtllUploadProtocol :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Text)
ptcrtllUploadProtocol
= lens _ptcrtllUploadProtocol
(\ s a -> s{_ptcrtllUploadProtocol = a})
-- | Message types to return. If not populated - INFO, WARNING and ERROR
-- messages are returned.
ptcrtllMessageTypes :: Lens' ProjectsTransferConfigsRunsTransferLogsList [ProjectsTransferConfigsRunsTransferLogsListMessageTypes]
ptcrtllMessageTypes
= lens _ptcrtllMessageTypes
(\ s a -> s{_ptcrtllMessageTypes = a})
. _Default
. _Coerce
-- | OAuth access token.
ptcrtllAccessToken :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Text)
ptcrtllAccessToken
= lens _ptcrtllAccessToken
(\ s a -> s{_ptcrtllAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
ptcrtllUploadType :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Text)
ptcrtllUploadType
= lens _ptcrtllUploadType
(\ s a -> s{_ptcrtllUploadType = a})
-- | Pagination token, which can be used to request a specific page of
-- \`ListTransferLogsRequest\` list results. For multiple-page results,
-- \`ListTransferLogsResponse\` outputs a \`next_page\` token, which can be
-- used as the \`page_token\` value to request the next page of list
-- results.
ptcrtllPageToken :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Text)
ptcrtllPageToken
= lens _ptcrtllPageToken
(\ s a -> s{_ptcrtllPageToken = a})
-- | Page size. The default page size is the maximum value of 1000 results.
ptcrtllPageSize :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Int32)
ptcrtllPageSize
= lens _ptcrtllPageSize
(\ s a -> s{_ptcrtllPageSize = a})
. mapping _Coerce
-- | JSONP
ptcrtllCallback :: Lens' ProjectsTransferConfigsRunsTransferLogsList (Maybe Text)
ptcrtllCallback
= lens _ptcrtllCallback
(\ s a -> s{_ptcrtllCallback = a})
instance GoogleRequest
ProjectsTransferConfigsRunsTransferLogsList
where
type Rs ProjectsTransferConfigsRunsTransferLogsList =
ListTransferLogsResponse
type Scopes
ProjectsTransferConfigsRunsTransferLogsList
=
'["https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"]
requestClient
ProjectsTransferConfigsRunsTransferLogsList'{..}
= go _ptcrtllParent _ptcrtllXgafv
_ptcrtllUploadProtocol
(_ptcrtllMessageTypes ^. _Default)
_ptcrtllAccessToken
_ptcrtllUploadType
_ptcrtllPageToken
_ptcrtllPageSize
_ptcrtllCallback
(Just AltJSON)
bigQueryDataTransferService
where go
= buildClient
(Proxy ::
Proxy
ProjectsTransferConfigsRunsTransferLogsListResource)
mempty
|
brendanhay/gogol
|
gogol-bigquerydatatransfer/gen/Network/Google/Resource/BigQueryDataTransfer/Projects/TransferConfigs/Runs/TransferLogs/List.hs
|
mpl-2.0
| 8,034
| 0
| 19
| 1,713
| 989
| 575
| 414
| 153
| 1
|
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- |
-- Module : Test.AWS.Gen.S3
-- Copyright : (c) 2013-2015 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
module Test.AWS.Gen.S3 where
import Data.Proxy
import Test.AWS.Fixture
import Test.AWS.Prelude
import Test.Tasty
import Network.AWS.S3
import Test.AWS.S3.Internal
-- Auto-generated: the actual test selection needs to be manually placed into
-- the top-level so that real test data can be incrementally added.
--
-- This commented snippet is what the entire set should look like:
-- fixtures :: TestTree
-- fixtures =
-- [ testGroup "request"
-- [ testPutBucketRequestPayment $
-- putBucketRequestPayment
--
-- , testPutObject $
-- putObject
--
-- , testDeleteObject $
-- deleteObject
--
-- , testPutBucketLogging $
-- putBucketLogging
--
-- , testListBuckets $
-- listBuckets
--
-- , testDeleteBucket $
-- deleteBucket
--
-- , testCreateBucket $
-- createBucket
--
-- , testDeleteBucketTagging $
-- deleteBucketTagging
--
-- , testPutObjectACL $
-- putObjectACL
--
-- , testPutBucketTagging $
-- putBucketTagging
--
-- , testGetBucketLocation $
-- getBucketLocation
--
-- , testGetBucketNotificationConfiguration $
-- getBucketNotificationConfiguration
--
-- , testGetObject $
-- getObject
--
-- , testPutBucketReplication $
-- putBucketReplication
--
-- , testGetBucketWebsite $
-- getBucketWebsite
--
-- , testGetBucketRequestPayment $
-- getBucketRequestPayment
--
-- , testDeleteBucketReplication $
-- deleteBucketReplication
--
-- , testGetBucketLifecycle $
-- getBucketLifecycle
--
-- , testListObjectVersions $
-- listObjectVersions
--
-- , testHeadBucket $
-- headBucket
--
-- , testPutBucketLifecycle $
-- putBucketLifecycle
--
-- , testDeleteBucketLifecycle $
-- deleteBucketLifecycle
--
-- , testCreateMultipartUpload $
-- createMultipartUpload
--
-- , testUploadPart $
-- uploadPart
--
-- , testGetBucketReplication $
-- getBucketReplication
--
-- , testPutBucketWebsite $
-- putBucketWebsite
--
-- , testDeleteBucketWebsite $
-- deleteBucketWebsite
--
-- , testCompleteMultipartUpload $
-- completeMultipartUpload
--
-- , testListMultipartUploads $
-- listMultipartUploads
--
-- , testListObjects $
-- listObjects
--
-- , testDeleteBucketPolicy $
-- deleteBucketPolicy
--
-- , testAbortMultipartUpload $
-- abortMultipartUpload
--
-- , testPutBucketPolicy $
-- putBucketPolicy
--
-- , testGetObjectTorrent $
-- getObjectTorrent
--
-- , testDeleteObjects $
-- deleteObjects
--
-- , testPutBucketNotificationConfiguration $
-- putBucketNotificationConfiguration
--
-- , testGetBucketVersioning $
-- getBucketVersioning
--
-- , testDeleteBucketCORS $
-- deleteBucketCORS
--
-- , testPutBucketCORS $
-- putBucketCORS
--
-- , testGetBucketCORS $
-- getBucketCORS
--
-- , testGetObjectACL $
-- getObjectACL
--
-- , testRestoreObject $
-- restoreObject
--
-- , testHeadObject $
-- headObject
--
-- , testPutBucketVersioning $
-- putBucketVersioning
--
-- , testGetBucketTagging $
-- getBucketTagging
--
-- , testCopyObject $
-- copyObject
--
-- , testGetBucketPolicy $
-- getBucketPolicy
--
-- , testGetBucketLogging $
-- getBucketLogging
--
-- , testGetBucketACL $
-- getBucketACL
--
-- , testListParts $
-- listParts
--
-- , testUploadPartCopy $
-- uploadPartCopy
--
-- , testPutBucketACL $
-- putBucketACL
--
-- ]
-- , testGroup "response"
-- [ testPutBucketRequestPaymentResponse $
-- putBucketRequestPaymentResponse
--
-- , testPutObjectResponse $
-- putObjectResponse
--
-- , testDeleteObjectResponse $
-- deleteObjectResponse
--
-- , testPutBucketLoggingResponse $
-- putBucketLoggingResponse
--
-- , testListBucketsResponse $
-- listBucketsResponse
--
-- , testDeleteBucketResponse $
-- deleteBucketResponse
--
-- , testCreateBucketResponse $
-- createBucketResponse
--
-- , testDeleteBucketTaggingResponse $
-- deleteBucketTaggingResponse
--
-- , testPutObjectACLResponse $
-- putObjectACLResponse
--
-- , testPutBucketTaggingResponse $
-- putBucketTaggingResponse
--
-- , testGetBucketLocationResponse $
-- getBucketLocationResponse
--
-- , testGetBucketNotificationConfigurationResponse $
-- notificationConfiguration
--
-- , testGetObjectResponse $
-- getObjectResponse
--
-- , testPutBucketReplicationResponse $
-- putBucketReplicationResponse
--
-- , testGetBucketWebsiteResponse $
-- getBucketWebsiteResponse
--
-- , testGetBucketRequestPaymentResponse $
-- getBucketRequestPaymentResponse
--
-- , testDeleteBucketReplicationResponse $
-- deleteBucketReplicationResponse
--
-- , testGetBucketLifecycleResponse $
-- getBucketLifecycleResponse
--
-- , testListObjectVersionsResponse $
-- listObjectVersionsResponse
--
-- , testHeadBucketResponse $
-- headBucketResponse
--
-- , testPutBucketLifecycleResponse $
-- putBucketLifecycleResponse
--
-- , testDeleteBucketLifecycleResponse $
-- deleteBucketLifecycleResponse
--
-- , testCreateMultipartUploadResponse $
-- createMultipartUploadResponse
--
-- , testUploadPartResponse $
-- uploadPartResponse
--
-- , testGetBucketReplicationResponse $
-- getBucketReplicationResponse
--
-- , testPutBucketWebsiteResponse $
-- putBucketWebsiteResponse
--
-- , testDeleteBucketWebsiteResponse $
-- deleteBucketWebsiteResponse
--
-- , testCompleteMultipartUploadResponse $
-- completeMultipartUploadResponse
--
-- , testListMultipartUploadsResponse $
-- listMultipartUploadsResponse
--
-- , testListObjectsResponse $
-- listObjectsResponse
--
-- , testDeleteBucketPolicyResponse $
-- deleteBucketPolicyResponse
--
-- , testAbortMultipartUploadResponse $
-- abortMultipartUploadResponse
--
-- , testPutBucketPolicyResponse $
-- putBucketPolicyResponse
--
-- , testGetObjectTorrentResponse $
-- getObjectTorrentResponse
--
-- , testDeleteObjectsResponse $
-- deleteObjectsResponse
--
-- , testPutBucketNotificationConfigurationResponse $
-- putBucketNotificationConfigurationResponse
--
-- , testGetBucketVersioningResponse $
-- getBucketVersioningResponse
--
-- , testDeleteBucketCORSResponse $
-- deleteBucketCORSResponse
--
-- , testPutBucketCORSResponse $
-- putBucketCORSResponse
--
-- , testGetBucketCORSResponse $
-- getBucketCORSResponse
--
-- , testGetObjectACLResponse $
-- getObjectACLResponse
--
-- , testRestoreObjectResponse $
-- restoreObjectResponse
--
-- , testHeadObjectResponse $
-- headObjectResponse
--
-- , testPutBucketVersioningResponse $
-- putBucketVersioningResponse
--
-- , testGetBucketTaggingResponse $
-- getBucketTaggingResponse
--
-- , testCopyObjectResponse $
-- copyObjectResponse
--
-- , testGetBucketPolicyResponse $
-- getBucketPolicyResponse
--
-- , testGetBucketLoggingResponse $
-- getBucketLoggingResponse
--
-- , testGetBucketACLResponse $
-- getBucketACLResponse
--
-- , testListPartsResponse $
-- listPartsResponse
--
-- , testUploadPartCopyResponse $
-- uploadPartCopyResponse
--
-- , testPutBucketACLResponse $
-- putBucketACLResponse
--
-- ]
-- ]
-- Requests
testPutBucketRequestPayment :: PutBucketRequestPayment -> TestTree
testPutBucketRequestPayment = req
"PutBucketRequestPayment"
"fixture/PutBucketRequestPayment.yaml"
testDeleteObject :: DeleteObject -> TestTree
testDeleteObject = req
"DeleteObject"
"fixture/DeleteObject.yaml"
testPutBucketLogging :: PutBucketLogging -> TestTree
testPutBucketLogging = req
"PutBucketLogging"
"fixture/PutBucketLogging.yaml"
testListBuckets :: ListBuckets -> TestTree
testListBuckets = req
"ListBuckets"
"fixture/ListBuckets.yaml"
testDeleteBucket :: DeleteBucket -> TestTree
testDeleteBucket = req
"DeleteBucket"
"fixture/DeleteBucket.yaml"
testCreateBucket :: CreateBucket -> TestTree
testCreateBucket = req
"CreateBucket"
"fixture/CreateBucket.yaml"
testDeleteBucketTagging :: DeleteBucketTagging -> TestTree
testDeleteBucketTagging = req
"DeleteBucketTagging"
"fixture/DeleteBucketTagging.yaml"
testPutObjectACL :: PutObjectACL -> TestTree
testPutObjectACL = req
"PutObjectACL"
"fixture/PutObjectACL.yaml"
testPutBucketTagging :: PutBucketTagging -> TestTree
testPutBucketTagging = req
"PutBucketTagging"
"fixture/PutBucketTagging.yaml"
testGetBucketLocation :: GetBucketLocation -> TestTree
testGetBucketLocation = req
"GetBucketLocation"
"fixture/GetBucketLocation.yaml"
testGetBucketNotificationConfiguration :: GetBucketNotificationConfiguration -> TestTree
testGetBucketNotificationConfiguration = req
"GetBucketNotificationConfiguration"
"fixture/GetBucketNotificationConfiguration.yaml"
testGetObject :: GetObject -> TestTree
testGetObject = req
"GetObject"
"fixture/GetObject.yaml"
testPutBucketReplication :: PutBucketReplication -> TestTree
testPutBucketReplication = req
"PutBucketReplication"
"fixture/PutBucketReplication.yaml"
testGetBucketWebsite :: GetBucketWebsite -> TestTree
testGetBucketWebsite = req
"GetBucketWebsite"
"fixture/GetBucketWebsite.yaml"
testGetBucketRequestPayment :: GetBucketRequestPayment -> TestTree
testGetBucketRequestPayment = req
"GetBucketRequestPayment"
"fixture/GetBucketRequestPayment.yaml"
testDeleteBucketReplication :: DeleteBucketReplication -> TestTree
testDeleteBucketReplication = req
"DeleteBucketReplication"
"fixture/DeleteBucketReplication.yaml"
testGetBucketLifecycle :: GetBucketLifecycle -> TestTree
testGetBucketLifecycle = req
"GetBucketLifecycle"
"fixture/GetBucketLifecycle.yaml"
testListObjectVersions :: ListObjectVersions -> TestTree
testListObjectVersions = req
"ListObjectVersions"
"fixture/ListObjectVersions.yaml"
testHeadBucket :: HeadBucket -> TestTree
testHeadBucket = req
"HeadBucket"
"fixture/HeadBucket.yaml"
testPutBucketLifecycle :: PutBucketLifecycle -> TestTree
testPutBucketLifecycle = req
"PutBucketLifecycle"
"fixture/PutBucketLifecycle.yaml"
testDeleteBucketLifecycle :: DeleteBucketLifecycle -> TestTree
testDeleteBucketLifecycle = req
"DeleteBucketLifecycle"
"fixture/DeleteBucketLifecycle.yaml"
testCreateMultipartUpload :: CreateMultipartUpload -> TestTree
testCreateMultipartUpload = req
"CreateMultipartUpload"
"fixture/CreateMultipartUpload.yaml"
testGetBucketReplication :: GetBucketReplication -> TestTree
testGetBucketReplication = req
"GetBucketReplication"
"fixture/GetBucketReplication.yaml"
testPutBucketWebsite :: PutBucketWebsite -> TestTree
testPutBucketWebsite = req
"PutBucketWebsite"
"fixture/PutBucketWebsite.yaml"
testDeleteBucketWebsite :: DeleteBucketWebsite -> TestTree
testDeleteBucketWebsite = req
"DeleteBucketWebsite"
"fixture/DeleteBucketWebsite.yaml"
testCompleteMultipartUpload :: CompleteMultipartUpload -> TestTree
testCompleteMultipartUpload = req
"CompleteMultipartUpload"
"fixture/CompleteMultipartUpload.yaml"
testListMultipartUploads :: ListMultipartUploads -> TestTree
testListMultipartUploads = req
"ListMultipartUploads"
"fixture/ListMultipartUploads.yaml"
testListObjects :: ListObjects -> TestTree
testListObjects = req
"ListObjects"
"fixture/ListObjects.yaml"
testDeleteBucketPolicy :: DeleteBucketPolicy -> TestTree
testDeleteBucketPolicy = req
"DeleteBucketPolicy"
"fixture/DeleteBucketPolicy.yaml"
testAbortMultipartUpload :: AbortMultipartUpload -> TestTree
testAbortMultipartUpload = req
"AbortMultipartUpload"
"fixture/AbortMultipartUpload.yaml"
testPutBucketPolicy :: PutBucketPolicy -> TestTree
testPutBucketPolicy = req
"PutBucketPolicy"
"fixture/PutBucketPolicy.yaml"
testGetObjectTorrent :: GetObjectTorrent -> TestTree
testGetObjectTorrent = req
"GetObjectTorrent"
"fixture/GetObjectTorrent.yaml"
testDeleteObjects :: DeleteObjects -> TestTree
testDeleteObjects = req
"DeleteObjects"
"fixture/DeleteObjects.yaml"
testPutBucketNotificationConfiguration :: PutBucketNotificationConfiguration -> TestTree
testPutBucketNotificationConfiguration = req
"PutBucketNotificationConfiguration"
"fixture/PutBucketNotificationConfiguration.yaml"
testGetBucketVersioning :: GetBucketVersioning -> TestTree
testGetBucketVersioning = req
"GetBucketVersioning"
"fixture/GetBucketVersioning.yaml"
testDeleteBucketCORS :: DeleteBucketCORS -> TestTree
testDeleteBucketCORS = req
"DeleteBucketCORS"
"fixture/DeleteBucketCORS.yaml"
testPutBucketCORS :: PutBucketCORS -> TestTree
testPutBucketCORS = req
"PutBucketCORS"
"fixture/PutBucketCORS.yaml"
testGetBucketCORS :: GetBucketCORS -> TestTree
testGetBucketCORS = req
"GetBucketCORS"
"fixture/GetBucketCORS.yaml"
testGetObjectACL :: GetObjectACL -> TestTree
testGetObjectACL = req
"GetObjectACL"
"fixture/GetObjectACL.yaml"
testRestoreObject :: RestoreObject -> TestTree
testRestoreObject = req
"RestoreObject"
"fixture/RestoreObject.yaml"
testHeadObject :: HeadObject -> TestTree
testHeadObject = req
"HeadObject"
"fixture/HeadObject.yaml"
testPutBucketVersioning :: PutBucketVersioning -> TestTree
testPutBucketVersioning = req
"PutBucketVersioning"
"fixture/PutBucketVersioning.yaml"
testGetBucketTagging :: GetBucketTagging -> TestTree
testGetBucketTagging = req
"GetBucketTagging"
"fixture/GetBucketTagging.yaml"
testCopyObject :: CopyObject -> TestTree
testCopyObject = req
"CopyObject"
"fixture/CopyObject.yaml"
testGetBucketPolicy :: GetBucketPolicy -> TestTree
testGetBucketPolicy = req
"GetBucketPolicy"
"fixture/GetBucketPolicy.yaml"
testGetBucketLogging :: GetBucketLogging -> TestTree
testGetBucketLogging = req
"GetBucketLogging"
"fixture/GetBucketLogging.yaml"
testGetBucketACL :: GetBucketACL -> TestTree
testGetBucketACL = req
"GetBucketACL"
"fixture/GetBucketACL.yaml"
testListParts :: ListParts -> TestTree
testListParts = req
"ListParts"
"fixture/ListParts.yaml"
testUploadPartCopy :: UploadPartCopy -> TestTree
testUploadPartCopy = req
"UploadPartCopy"
"fixture/UploadPartCopy.yaml"
testPutBucketACL :: PutBucketACL -> TestTree
testPutBucketACL = req
"PutBucketACL"
"fixture/PutBucketACL.yaml"
-- Responses
testPutBucketRequestPaymentResponse :: PutBucketRequestPaymentResponse -> TestTree
testPutBucketRequestPaymentResponse = res
"PutBucketRequestPaymentResponse"
"fixture/PutBucketRequestPaymentResponse.proto"
s3
(Proxy :: Proxy PutBucketRequestPayment)
testPutObjectResponse :: PutObjectResponse -> TestTree
testPutObjectResponse = res
"PutObjectResponse"
"fixture/PutObjectResponse.proto"
s3
(Proxy :: Proxy PutObject)
testDeleteObjectResponse :: DeleteObjectResponse -> TestTree
testDeleteObjectResponse = res
"DeleteObjectResponse"
"fixture/DeleteObjectResponse.proto"
s3
(Proxy :: Proxy DeleteObject)
testPutBucketLoggingResponse :: PutBucketLoggingResponse -> TestTree
testPutBucketLoggingResponse = res
"PutBucketLoggingResponse"
"fixture/PutBucketLoggingResponse.proto"
s3
(Proxy :: Proxy PutBucketLogging)
testListBucketsResponse :: ListBucketsResponse -> TestTree
testListBucketsResponse = res
"ListBucketsResponse"
"fixture/ListBucketsResponse.proto"
s3
(Proxy :: Proxy ListBuckets)
testDeleteBucketResponse :: DeleteBucketResponse -> TestTree
testDeleteBucketResponse = res
"DeleteBucketResponse"
"fixture/DeleteBucketResponse.proto"
s3
(Proxy :: Proxy DeleteBucket)
testCreateBucketResponse :: CreateBucketResponse -> TestTree
testCreateBucketResponse = res
"CreateBucketResponse"
"fixture/CreateBucketResponse.proto"
s3
(Proxy :: Proxy CreateBucket)
testDeleteBucketTaggingResponse :: DeleteBucketTaggingResponse -> TestTree
testDeleteBucketTaggingResponse = res
"DeleteBucketTaggingResponse"
"fixture/DeleteBucketTaggingResponse.proto"
s3
(Proxy :: Proxy DeleteBucketTagging)
testPutObjectACLResponse :: PutObjectACLResponse -> TestTree
testPutObjectACLResponse = res
"PutObjectACLResponse"
"fixture/PutObjectACLResponse.proto"
s3
(Proxy :: Proxy PutObjectACL)
testPutBucketTaggingResponse :: PutBucketTaggingResponse -> TestTree
testPutBucketTaggingResponse = res
"PutBucketTaggingResponse"
"fixture/PutBucketTaggingResponse.proto"
s3
(Proxy :: Proxy PutBucketTagging)
testGetBucketLocationResponse :: GetBucketLocationResponse -> TestTree
testGetBucketLocationResponse = res
"GetBucketLocationResponse"
"fixture/GetBucketLocationResponse.proto"
s3
(Proxy :: Proxy GetBucketLocation)
testGetBucketNotificationConfigurationResponse :: NotificationConfiguration -> TestTree
testGetBucketNotificationConfigurationResponse = res
"GetBucketNotificationConfigurationResponse"
"fixture/GetBucketNotificationConfigurationResponse.proto"
s3
(Proxy :: Proxy GetBucketNotificationConfiguration)
testPutBucketReplicationResponse :: PutBucketReplicationResponse -> TestTree
testPutBucketReplicationResponse = res
"PutBucketReplicationResponse"
"fixture/PutBucketReplicationResponse.proto"
s3
(Proxy :: Proxy PutBucketReplication)
testGetBucketWebsiteResponse :: GetBucketWebsiteResponse -> TestTree
testGetBucketWebsiteResponse = res
"GetBucketWebsiteResponse"
"fixture/GetBucketWebsiteResponse.proto"
s3
(Proxy :: Proxy GetBucketWebsite)
testGetBucketRequestPaymentResponse :: GetBucketRequestPaymentResponse -> TestTree
testGetBucketRequestPaymentResponse = res
"GetBucketRequestPaymentResponse"
"fixture/GetBucketRequestPaymentResponse.proto"
s3
(Proxy :: Proxy GetBucketRequestPayment)
testDeleteBucketReplicationResponse :: DeleteBucketReplicationResponse -> TestTree
testDeleteBucketReplicationResponse = res
"DeleteBucketReplicationResponse"
"fixture/DeleteBucketReplicationResponse.proto"
s3
(Proxy :: Proxy DeleteBucketReplication)
testGetBucketLifecycleResponse :: GetBucketLifecycleResponse -> TestTree
testGetBucketLifecycleResponse = res
"GetBucketLifecycleResponse"
"fixture/GetBucketLifecycleResponse.proto"
s3
(Proxy :: Proxy GetBucketLifecycle)
testListObjectVersionsResponse :: ListObjectVersionsResponse -> TestTree
testListObjectVersionsResponse = res
"ListObjectVersionsResponse"
"fixture/ListObjectVersionsResponse.proto"
s3
(Proxy :: Proxy ListObjectVersions)
testHeadBucketResponse :: HeadBucketResponse -> TestTree
testHeadBucketResponse = res
"HeadBucketResponse"
"fixture/HeadBucketResponse.proto"
s3
(Proxy :: Proxy HeadBucket)
testPutBucketLifecycleResponse :: PutBucketLifecycleResponse -> TestTree
testPutBucketLifecycleResponse = res
"PutBucketLifecycleResponse"
"fixture/PutBucketLifecycleResponse.proto"
s3
(Proxy :: Proxy PutBucketLifecycle)
testDeleteBucketLifecycleResponse :: DeleteBucketLifecycleResponse -> TestTree
testDeleteBucketLifecycleResponse = res
"DeleteBucketLifecycleResponse"
"fixture/DeleteBucketLifecycleResponse.proto"
s3
(Proxy :: Proxy DeleteBucketLifecycle)
testCreateMultipartUploadResponse :: CreateMultipartUploadResponse -> TestTree
testCreateMultipartUploadResponse = res
"CreateMultipartUploadResponse"
"fixture/CreateMultipartUploadResponse.proto"
s3
(Proxy :: Proxy CreateMultipartUpload)
testUploadPartResponse :: UploadPartResponse -> TestTree
testUploadPartResponse = res
"UploadPartResponse"
"fixture/UploadPartResponse.proto"
s3
(Proxy :: Proxy UploadPart)
testGetBucketReplicationResponse :: GetBucketReplicationResponse -> TestTree
testGetBucketReplicationResponse = res
"GetBucketReplicationResponse"
"fixture/GetBucketReplicationResponse.proto"
s3
(Proxy :: Proxy GetBucketReplication)
testPutBucketWebsiteResponse :: PutBucketWebsiteResponse -> TestTree
testPutBucketWebsiteResponse = res
"PutBucketWebsiteResponse"
"fixture/PutBucketWebsiteResponse.proto"
s3
(Proxy :: Proxy PutBucketWebsite)
testDeleteBucketWebsiteResponse :: DeleteBucketWebsiteResponse -> TestTree
testDeleteBucketWebsiteResponse = res
"DeleteBucketWebsiteResponse"
"fixture/DeleteBucketWebsiteResponse.proto"
s3
(Proxy :: Proxy DeleteBucketWebsite)
testCompleteMultipartUploadResponse :: CompleteMultipartUploadResponse -> TestTree
testCompleteMultipartUploadResponse = res
"CompleteMultipartUploadResponse"
"fixture/CompleteMultipartUploadResponse.proto"
s3
(Proxy :: Proxy CompleteMultipartUpload)
testListMultipartUploadsResponse :: ListMultipartUploadsResponse -> TestTree
testListMultipartUploadsResponse = res
"ListMultipartUploadsResponse"
"fixture/ListMultipartUploadsResponse.proto"
s3
(Proxy :: Proxy ListMultipartUploads)
testListObjectsResponse :: ListObjectsResponse -> TestTree
testListObjectsResponse = res
"ListObjectsResponse"
"fixture/ListObjectsResponse.proto"
s3
(Proxy :: Proxy ListObjects)
testDeleteBucketPolicyResponse :: DeleteBucketPolicyResponse -> TestTree
testDeleteBucketPolicyResponse = res
"DeleteBucketPolicyResponse"
"fixture/DeleteBucketPolicyResponse.proto"
s3
(Proxy :: Proxy DeleteBucketPolicy)
testAbortMultipartUploadResponse :: AbortMultipartUploadResponse -> TestTree
testAbortMultipartUploadResponse = res
"AbortMultipartUploadResponse"
"fixture/AbortMultipartUploadResponse.proto"
s3
(Proxy :: Proxy AbortMultipartUpload)
testPutBucketPolicyResponse :: PutBucketPolicyResponse -> TestTree
testPutBucketPolicyResponse = res
"PutBucketPolicyResponse"
"fixture/PutBucketPolicyResponse.proto"
s3
(Proxy :: Proxy PutBucketPolicy)
testDeleteObjectsResponse :: DeleteObjectsResponse -> TestTree
testDeleteObjectsResponse = res
"DeleteObjectsResponse"
"fixture/DeleteObjectsResponse.proto"
s3
(Proxy :: Proxy DeleteObjects)
testPutBucketNotificationConfigurationResponse :: PutBucketNotificationConfigurationResponse -> TestTree
testPutBucketNotificationConfigurationResponse = res
"PutBucketNotificationConfigurationResponse"
"fixture/PutBucketNotificationConfigurationResponse.proto"
s3
(Proxy :: Proxy PutBucketNotificationConfiguration)
testGetBucketVersioningResponse :: GetBucketVersioningResponse -> TestTree
testGetBucketVersioningResponse = res
"GetBucketVersioningResponse"
"fixture/GetBucketVersioningResponse.proto"
s3
(Proxy :: Proxy GetBucketVersioning)
testDeleteBucketCORSResponse :: DeleteBucketCORSResponse -> TestTree
testDeleteBucketCORSResponse = res
"DeleteBucketCORSResponse"
"fixture/DeleteBucketCORSResponse.proto"
s3
(Proxy :: Proxy DeleteBucketCORS)
testPutBucketCORSResponse :: PutBucketCORSResponse -> TestTree
testPutBucketCORSResponse = res
"PutBucketCORSResponse"
"fixture/PutBucketCORSResponse.proto"
s3
(Proxy :: Proxy PutBucketCORS)
testGetBucketCORSResponse :: GetBucketCORSResponse -> TestTree
testGetBucketCORSResponse = res
"GetBucketCORSResponse"
"fixture/GetBucketCORSResponse.proto"
s3
(Proxy :: Proxy GetBucketCORS)
testGetObjectACLResponse :: GetObjectACLResponse -> TestTree
testGetObjectACLResponse = res
"GetObjectACLResponse"
"fixture/GetObjectACLResponse.proto"
s3
(Proxy :: Proxy GetObjectACL)
testRestoreObjectResponse :: RestoreObjectResponse -> TestTree
testRestoreObjectResponse = res
"RestoreObjectResponse"
"fixture/RestoreObjectResponse.proto"
s3
(Proxy :: Proxy RestoreObject)
testHeadObjectResponse :: HeadObjectResponse -> TestTree
testHeadObjectResponse = res
"HeadObjectResponse"
"fixture/HeadObjectResponse.proto"
s3
(Proxy :: Proxy HeadObject)
testPutBucketVersioningResponse :: PutBucketVersioningResponse -> TestTree
testPutBucketVersioningResponse = res
"PutBucketVersioningResponse"
"fixture/PutBucketVersioningResponse.proto"
s3
(Proxy :: Proxy PutBucketVersioning)
testGetBucketTaggingResponse :: GetBucketTaggingResponse -> TestTree
testGetBucketTaggingResponse = res
"GetBucketTaggingResponse"
"fixture/GetBucketTaggingResponse.proto"
s3
(Proxy :: Proxy GetBucketTagging)
testCopyObjectResponse :: CopyObjectResponse -> TestTree
testCopyObjectResponse = res
"CopyObjectResponse"
"fixture/CopyObjectResponse.proto"
s3
(Proxy :: Proxy CopyObject)
testGetBucketPolicyResponse :: GetBucketPolicyResponse -> TestTree
testGetBucketPolicyResponse = res
"GetBucketPolicyResponse"
"fixture/GetBucketPolicyResponse.proto"
s3
(Proxy :: Proxy GetBucketPolicy)
testGetBucketLoggingResponse :: GetBucketLoggingResponse -> TestTree
testGetBucketLoggingResponse = res
"GetBucketLoggingResponse"
"fixture/GetBucketLoggingResponse.proto"
s3
(Proxy :: Proxy GetBucketLogging)
testGetBucketACLResponse :: GetBucketACLResponse -> TestTree
testGetBucketACLResponse = res
"GetBucketACLResponse"
"fixture/GetBucketACLResponse.proto"
s3
(Proxy :: Proxy GetBucketACL)
testListPartsResponse :: ListPartsResponse -> TestTree
testListPartsResponse = res
"ListPartsResponse"
"fixture/ListPartsResponse.proto"
s3
(Proxy :: Proxy ListParts)
testUploadPartCopyResponse :: UploadPartCopyResponse -> TestTree
testUploadPartCopyResponse = res
"UploadPartCopyResponse"
"fixture/UploadPartCopyResponse.proto"
s3
(Proxy :: Proxy UploadPartCopy)
testPutBucketACLResponse :: PutBucketACLResponse -> TestTree
testPutBucketACLResponse = res
"PutBucketACLResponse"
"fixture/PutBucketACLResponse.proto"
s3
(Proxy :: Proxy PutBucketACL)
|
fmapfmapfmap/amazonka
|
amazonka-s3/test/Test/AWS/Gen/S3.hs
|
mpl-2.0
| 27,743
| 0
| 7
| 5,552
| 2,929
| 1,715
| 1,214
| 509
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.