code
stringlengths 2
1.05M
| repo_name
stringlengths 5
101
| path
stringlengths 4
991
| language
stringclasses 3
values | license
stringclasses 5
values | size
int64 2
1.05M
|
|---|---|---|---|---|---|
module Language.ContextSemantics.CallByNeedLambda where
import Language.ContextSemantics.Expressions
import Language.ContextSemantics.Utilities ()
import Language.ContextSemantics.Output
import Control.Arrow (second)
import Data.List (nub)
import Data.List.Zipper
import Data.Maybe
import Data.Nthable
import Prelude hiding (fst, snd)
--
-- Context semantics
--
data Token = White | Black | LeftT | RightT | Bracket [Token] [Token] | Symbol String
instance Show Token where
show White = "⚪"
show Black = "⚫"
show LeftT = "L"
show RightT = "R"
show (Bracket ts1 ts2) = "<" ++ show ts1 ++ "," ++ show ts2 ++ ">"
show (Symbol s) = s
showList = showCompactList
type Port = Zipper [Token] -> Either String (Output (Zipper [Token]))
popAtCursor :: Zipper [Token] -> Either String (Token, Zipper [Token])
popAtCursor tss = case cursor tss of
(t:ts) -> return (t, replace ts tss)
[] -> Left $ "popAtCursor: malformed incoming context " ++ show tss
pushAtCursor :: Token -> Zipper [Token] -> Zipper [Token]
pushAtCursor t tss = replace (t : cursor tss) tss
app :: Port -> Port -> Port -> (Port, Port, Port)
app princp_out cont_out arg_out = (princp_in, cont_in, arg_in)
where
princp_in tss = popAtCursor tss >>= \tss' -> case tss' of
(White, tss'') -> cont_out tss''
(Black, tss'') -> arg_out tss''
_ -> Left $ "app: principal port got malformed incoming context " ++ show tss
cont_in tss = princp_out (pushAtCursor White tss)
arg_in tss = princp_out (pushAtCursor Black tss)
lam :: Port -> Port -> Port -> (Port, Port, Port)
lam princp_out body_out param_out = (princp_in, body_in, param_in)
where
princp_in tss = popAtCursor tss >>= \tss' -> case tss' of
(White, tss'') -> body_out tss''
(Black, tss'') -> param_out tss''
_ -> Left $ "lam: principal port got malformed incoming context " ++ show tss
body_in tss = princp_out (pushAtCursor White tss)
param_in tss = princp_out (pushAtCursor Black tss)
share :: Port -> Port -> Port -> (Port, Port, Port)
share princp_out left_out right_out = (princp_in, left_in, right_in)
where
princp_in tss = popAtCursor tss >>= \tss' -> case tss' of
(LeftT, tss'') -> left_out tss''
(RightT, tss'') -> right_out tss''
_ -> Left $ "share: principal port got malformed incoming context " ++ show tss
left_in tss = princp_out (pushAtCursor LeftT tss)
right_in tss = princp_out (pushAtCursor RightT tss)
enterBox :: Port -> Port
enterBox entering ts = entering (right ts)
leaveBox :: Port -> Port
leaveBox leaving ts = leaving (left ts)
croissant :: String -> Port -> Port -> (Port, Port)
croissant s forced_out boxed_out = (forced_in, boxed_in)
where
forced_in tss = boxed_out (insert [Symbol s] tss)
boxed_in tss = case cursor tss of
[Symbol s'] | s == s' -> forced_out (delete tss)
_ -> Left $ "croissant: boxed port got malformed incoming context " ++ show tss
bracket :: Port -> Port -> (Port, Port)
bracket merged_out waiting_out = (merged_in, waiting_in)
where
merged_in tss = waiting_out (insert ([Bracket (cursor tss) (cursor (right tss))]) (delete (delete tss)))
waiting_in tss = case cursor tss of
[Bracket shallow deep] -> merged_out $ insert shallow $ insert deep $ delete tss
_ -> Left $ "bracket: waiting port got malformed incoming context " ++ show tss
fv :: String -> Port
fv = (Right .) . Output
--
-- Translation from traditional CBN lambda calculus
--
exprSemantics :: Expr -> (Port, [(String, Port)])
exprSemantics e = exprSemantics' (fv "Input") [(v, fv v) | v <- freeVars e] e
exprSemantics' :: Port -> [(String, Port)] -> Expr -> (Port, [(String, Port)])
exprSemantics' out_port env (V v) = (forced_port, [(v, boxed_port)])
where (forced_port, boxed_port) = croissant v out_port (lookupInEnv env v)
exprSemantics' out_port env (e1 :@ e2) = (c, usg)
where (e1_port, usg1) = exprSemantics' r env1 e1
-- If you send a signal out of e2 then it must leave the box - hence the modifications
-- to the environment and the port we supply
(e2_port, usg2) = exprSemantics' (leaveBox a) (map (second leaveBox) env2') e2
-- Both expressions in the application might refer to the same free variable, and we need
-- to insert share nodes if that happens
(env1, env2, usg) = combineUsages env usg1 usg2'
-- If you send a signal to the usages originating from e2 then you implicitly enter the box.
-- Furthermore, we need to make sure that before you enter the box you go through a bracket
-- node -- inserting these is the job of bracketUsages
(env2', usg2') = bracketUsages env2 (map (second enterBox) usg2)
-- Finally, build the app node. Remember that e2 is boxed, so we need to enterBox on its input port
(r, c, a) = app e1_port out_port (enterBox e2_port)
exprSemantics' out_port env (Lam v e) = (r, filter ((/= v) . fst) usg)
where (e_port, usg) = exprSemantics' b ((v, p) : env) e
v_port = (fv $ "Plug for " ++ v) `fromMaybe` lookup v usg
(r, b, p) = lam out_port e_port v_port
combineUsages :: [(String, Port)] -> [(String, Port)] -> [(String, Port)] -> ([(String, Port)], [(String, Port)], [(String, Port)])
combineUsages env usg1 usg2 = (catMaybes env1_mbs, catMaybes env2_mbs, usg)
where
(usg, env1_mbs, env2_mbs) = unzip3 [combineUsage v (lookup v usg1) (lookup v usg2)
| v <- nub $ map fst (usg1 ++ usg2)]
-- If both sides of the usage refer to the same variable, we need to insert a share node and
-- adjust the usage and environment appropriately to interdict all communication between the
-- use and definition sites
combineUsage v mb_p1 mb_p2 = case (mb_p1, mb_p2) of
(Nothing, Nothing) -> error "combineUsage"
(Just p1, Nothing) -> ((v, p1), Just (v, p), Nothing)
(Nothing, Just p2) -> ((v, p2), Nothing, Just (v, p))
(Just p1, Just p2) -> let (p_in, l_in, r_in) = share p p1 p2
in ((v, p_in), Just (v, l_in), Just (v, r_in))
where p = lookupInEnv env v
bracketUsages :: [(String, Port)] -> [(String, Port)] -> ([(String, Port)], [(String, Port)])
bracketUsages env = unzip . map bracketUsage
where
-- For every usage originating from the expression, add something to the environment that
-- brackets it before we go any further away from the box, adjusting the usage information
-- to now refer to the bracket
bracketUsage (v, p) = ((v, m), (v, w))
where (m, w) = bracket p (lookupInEnv env v)
lookupInEnv :: [(String, Port)] -> String -> Port
lookupInEnv env v = error ("No binding for " ++ v) `fromMaybe` lookup v env
--
-- Examples
--
examples :: IO ()
examples = do
printUTF8 $ identity $ fromList [[White]]
printUTF8 $ identity_app $ fromList [[]]
printUTF8 $ self_app $ fromList [[White]]
printUTF8 $ self_app $ fromList [[Black, LeftT, Symbol "x"], [Black, Symbol "α"]]
printUTF8 $ fst dead_var $ fromList [[Black]]
printUTF8 $ fst dead_var $ fromList [[White]]
printUTF8 $ snd dead_var $ fromList [[Symbol "x"], [Symbol "α"]]
printUTF8 $ fst app_to_fv $ fromList [[]]
printUTF8 $ fst app_to_fv_in_lam $ fromList [[White]]
printUTF8 $ snd app_to_fv_in_lam $ fromList [[Symbol "x"], [Black, Symbol "α"], [White]]
-- (\x.x) @ y
-- Port wired to the input of the lambda
identity :: Port
identity = r1
where
inp = fv "Input"
(r1, b1, p1) = lam inp f2 b2
(f2, b2) = croissant "x" b1 p1
-- (\x.x) @ y
-- Port wired to the input of the application
identity_app :: Port
identity_app = c1
where
inp = fv "Input"
y = fv "y"
(r1, c1, _a1) = app r2 inp (enterBox y)
(r2, b2, p2) = lam r1 f3 b3
(f3, b3) = croissant "x" b2 p2
self_app :: Port
self_app = fst $ exprSemantics $ Lam "x" $ V "x" :@ V "x"
dead_var :: (Port, Port)
dead_var = (p, lookupInEnv fvs "x")
where (p, fvs) = exprSemantics $ Lam "y" $ V "x"
app_to_fv :: (Port, Port, Port)
app_to_fv = (p, lookupInEnv fvs "x", lookupInEnv fvs "y")
where (p, fvs) = exprSemantics $ V "x" :@ V "y"
app_to_fv_in_lam :: (Port, Port)
app_to_fv_in_lam = (p, lookupInEnv fvs "x")
where (p, fvs) = exprSemantics $ Lam "y" $ V "x" :@ V "y"
|
batterseapower/context-semantics
|
Language/ContextSemantics/CallByNeedLambda.hs
|
Haskell
|
bsd-3-clause
| 8,490
|
module Chp82
where
{-- Derived instances --}
{--
We explained that a typeclass is a sort of an interface that defines some behavior. A type can be made an instance of a typeclass if it supports that behavior.
--}
{--
We also mentioned that they're often confused with classes in languages like Java, Python, C++ and the like, which then baffles a lot of people. In those languages, classes are a blueprint from which we then create objects that contain state and can do some actions.
Typeclasses are more like interfaces. We don't make data from typeclasses. Instead, we first make our data type and then we think about what it can act like.
If it can act like something that can be equated, we make it an instance of the Eq typeclass.
--}
{--
Haskell can derive the behavior of our types in these contexts if we use the deriving keyword when making our data type.
--}
data Person = Person { firstName :: String
, lastName :: String
, age :: Int
} deriving (Eq)
{--
When we derive the Eq instance for a type and then try to compare two values of that type with == or /=, Haskell will see if the value constructors match (there's only one value constructor here though) and then it will check if all the data contained inside matches by testing each pair of fields with ==.
There's only one catch though, the types of all the fields also have to be part of the Eq typeclass.
But since both String and Int are, we're OK. Let's test our Eq instance.
--}
mikeD = Person {firstName = "Michael", lastName = "Diamond", age = 43}
{--
mikeD == Person {firstName = "Michael", lastName = "Diamond", age = 43}
true
we can use it as the "a" for all functions that have a class constraint of "Eq a" in their type signature, such as "elem".
The Show and Read typeclasses are for things that can be converted to or from strings, respectively. Like with Eq, if a type's constructors have fields, their type has to be a part of Show or Read if we want to make our type an instance of them.
--}
data Person2 = Person2 { firstName2 :: String
, lastName2 :: String
, age2 :: Int
} deriving (Eq, Show, Read)
{--
`Read` is pretty much the inverse typeclass of `Show`. `Show` is for converting values of our a type to a string, `Read` is for converting strings to values of our type.
Remember though, when we use the `read` function, we have to use an explicit type annotation to tell Haskell which type we want to get as a result.
If we don't make the type we want as a result explicit, Haskell doesn't know which type we want.
--}
res3 = read "Person2 {firstName2 =\"Michael\", lastName2 =\"Diamond\", age2 = 43}" :: Person2
{--
If we use the result of our read later on in a way that Haskell can infer that it should read it as a person, we don't have to use type annotation.
read "Person {firstName =\"Michael\", lastName =\"Diamond\", age = 43}" == mikeD
True
We can also read parameterized types, but we have to fill in the type parameters.
So we can't do:
read "Just 't'" :: Maybe a
but we can do:
read "Just 't'" :: Maybe Char
--}
{--
We can derive instances for the `Ord` type class, which is for types that have values that can be ordered.
If we compare two values of the same type that were made using different constructors, the value which was made with a constructor that's defined first is considered smaller.
data Bool = False | True deriving (Ord)
--}
res4 = True `compare` False
res5 = True > False
{--
In the `Maybe a` data type, the `Nothing` value constructor is specified before the Just value constructor, so a value of `Nothing` is always smaller than a value of Just something, even if that something is minus one billion trillion.
But if we compare two Just values, then it goes to compare what's inside them.
But we can't do something like Just (*3) > Just (*2), because (*3) and (*2) are functions, which aren't instances of Ord.
--}
data Day1 = Monday1 | Tuesday1 | Wednesday1 | Thursday1 | Friday1 | Saturday1 | Sunday1
{--
Because all the value constructors are nullary (take no parameters, i.e. fields), we can make it part of the Enum typeclass. The Enum typeclass is for things that have predecessors and successors.
We can also make it part of the Bounded typeclass, which is for things that have a lowest possible value and highest possible value.
--}
data Day2 = Monday | Tuesday | Wednesday | Thursday | Friday | Saturday | Sunday
deriving (Eq, Ord, Show, Read, Bounded, Enum)
res6 = minBound :: Day2
-- Monday
{--
Type synonyms
Type synonyms don't really do anything per se, they're just about giving some types different names so that they make more sense to someone reading our code and documentation.
Here's how the standard library defines String as a synonym for [Char].
--}
type String2 = [Char]
{--
Type synonyms can also be parameterized. If we want a type that represents an association list type but still want it to be general so it can use any type as the keys and values, we can do this:
--}
type AssocList k v = [(k,v)]
{--
Now, a function that gets the value by a key in an association list can have a type of (Eq k) => k -> AssocList k v -> Maybe v.
AssocList is a type constructor that takes two types and produces a concrete type, like AssocList Int String, for instance.
When I talk about concrete types I mean like fully applied types like Map Int String or if we're dealin' with one of them polymorphic functions, [a] or (Ord a) => Maybe a and stuff.
And like, sometimes me and the boys say that Maybe is a type, but we don't mean that, cause every idiot knows Maybe is a type constructor.
When I apply an extra type to Maybe, like "Maybe String", then I have a concrete type. You know, values can only have types that are concrete types!
--}
{--
Partial applied type constructor
Just like we can partially apply functions to get new functions, we can partially apply type parameters and get new type constructors from them.
Just like we call a function with too few parameters to get back a new function, we can specify a type constructor with too few type parameters and get back a partially applied type constructor.
type IntMap v = Map Int v
or we can do this
type IntMap = Map Int
When you do a qualified import, type constructors also have to be preceeded with a module name. So you'd write type IntMap = Map.Map Int.
Make sure that you really understand the distinction between type constructors and value constructors.
Just because we made a type synonym called `IntMap` or `AssocList` doesn't mean that we can do stuff like:
AssocList [(1,2),(4,5),(7,9)]
All it means is that we can refer to its type by using different names.
We can do:
[(1,2),(3,5),(8,9)] :: AssocList Int Int
which will make the numbers inside assume a type of Int.
Type synonyms (and types generally) can only be used in the type portion of Haskell.
We're in Haskell's type portion whenever we're defining new types (so in data and type declarations) or when we're located after a "::". The "::" is in type declarations or in type annotations.
--}
data LockerState = Taken | Free deriving (Show, Eq)
type Code = String
{--
Simple stuff. We introduce a new data type to represent whether a locker is taken or free and we make a type synonym for the locker code.
--}
|
jamesyang124/haskell-playground
|
src/Chp82.hs
|
Haskell
|
bsd-3-clause
| 7,414
|
module Snap.Snaplet.Config.Tests where
------------------------------------------------------------------------------
import Control.Concurrent
import Control.Concurrent.Async
import Control.Monad
import qualified Data.ByteString.Char8 as BS
import qualified Data.Configurator.Types as C
import Data.Function
import qualified Data.Map as Map
#if !MIN_VERSION_base(4,11,0)
import Data.Semigroup
import Data.Monoid hiding ((<>))
#else
import Data.Monoid
#endif
import Data.Typeable
import System.Environment
------------------------------------------------------------------------------
import Snap.Core
import Snap.Http.Server.Config
import Snap.Snaplet
import Snap.Snaplet.Config
import Snap.Snaplet.Heist
import Snap.Snaplet.Test.Common.App
import Snap.Snaplet.Internal.Initializer
import qualified Snap.Test as ST
import Snap.Snaplet.Test
import Test.Framework
import Test.Framework.Providers.HUnit
import Test.Framework.Providers.QuickCheck2
import Test.QuickCheck
import Test.HUnit hiding (Test)
------------------------------------------------------------------------------
configTests :: Test
configTests = testGroup "Snaplet Config"
[ testProperty "Monoid left identity" monoidLeftIdentity
, testProperty "Monoid right identity" monoidRightIdentity
, testProperty "Monoid associativity" monoidAssociativity
, testCase "Verify Typeable instance" verTypeable
-- , testCase "Config options used" appConfigGetsToConfig
]
newtype ArbAppConfig = ArbAppConfig { unArbAppConfig :: AppConfig }
instance Show ArbAppConfig where
show (ArbAppConfig (AppConfig a)) =
"ArbAppConfig (AppConfig " ++ show a ++ ")"
instance Eq ArbAppConfig where
a == b = ((==) `on` (appEnvironment . unArbAppConfig)) a b
instance Arbitrary ArbAppConfig where
arbitrary = liftM (ArbAppConfig . AppConfig) arbitrary
instance Semigroup ArbAppConfig where
a <> b = ArbAppConfig $ ((<>) `on` unArbAppConfig) a b
instance Monoid ArbAppConfig where
mempty = ArbAppConfig mempty
#if !MIN_VERSION_base(4,11,0)
mappend = (<>)
#endif
monoidLeftIdentity :: ArbAppConfig -> Bool
monoidLeftIdentity a = mempty <> a == a
monoidRightIdentity :: ArbAppConfig -> Bool
monoidRightIdentity a = a <> mempty == a
monoidAssociativity :: ArbAppConfig -> ArbAppConfig -> ArbAppConfig
-> Bool
monoidAssociativity a b c = (a <> b) <> c == a <> (b <> c)
------------------------------------------------------------------------------
verTypeable :: Assertion
verTypeable =
assertEqual "Unexpected Typeable behavior"
#if MIN_VERSION_base(4,7,0)
"AppConfig"
#else
"Snap.Snaplet.Config.AppConfig"
#endif
(show . typeOf $ (undefined :: AppConfig))
------------------------------------------------------------------------------
appConfigGetsToConfig :: Assertion
appConfigGetsToConfig = do
opts <- completeConfig =<<
commandLineAppConfig defaultConfig :: IO (Config Snap AppConfig)
a <- async . withArgs ["-p", "8001","-e","otherEnv"] $
serveSnaplet opts appInit
threadDelay 500000
cancel a
b <- async . withArgs ["--environment","devel"] $ serveSnaplet defaultConfig appInit
threadDelay 500000
cancel b
--TODO - Don't just run the server to touch the config code. Check some values
|
snapframework/snap
|
test/suite/Snap/Snaplet/Config/Tests.hs
|
Haskell
|
bsd-3-clause
| 3,308
|
--------------------------------------------------------------------------------
{-# LANGUAGE OverloadedStrings #-}
module NumberSix.Handlers.TryRuby
( ruby
, handler
) where
--------------------------------------------------------------------------------
import Control.Applicative ((<$>))
import Control.Monad (mzero)
import Control.Monad.Trans (liftIO)
import Data.Aeson (FromJSON, Value (..), parseJSON, (.:))
import Data.Text (Text)
import qualified Data.Text.Encoding as T
import qualified Network.HTTP.Conduit as HC
--------------------------------------------------------------------------------
import NumberSix.Bang
import NumberSix.Irc
import NumberSix.Util
import NumberSix.Util.Error
import NumberSix.Util.Http
--------------------------------------------------------------------------------
data Result
= Success Text
| Error Text
deriving (Show)
--------------------------------------------------------------------------------
instance FromJSON Result where
parseJSON (Object o) = do
success <- o .: "success"
if success then Success <$> o .: "output" else Error <$> o .: "result"
parseJSON _ = mzero
--------------------------------------------------------------------------------
ruby :: Text -> IO Text
ruby cmd = do
bs <- http "http://tryruby.org/levels/1/challenges/0" (setPut . setCmd)
case parseJsonEither bs of
Right (Success x) -> return x
Right (Error x) -> return x
Left _ -> randomError
where
setCmd :: Monad m => HC.Request m -> HC.Request m
setCmd = HC.urlEncodedBody [("cmd", T.encodeUtf8 cmd)]
setPut rq = rq {HC.method = "PUT"}
--------------------------------------------------------------------------------
handler :: UninitializedHandler
handler = makeBangHandler "TryRuby" ["@","!ruby"] $ liftIO . ruby
|
itkovian/number-six
|
src/NumberSix/Handlers/TryRuby.hs
|
Haskell
|
bsd-3-clause
| 1,999
|
module Main where
import Network
import System.IO (hPutStrLn, hClose, Handle)
import Control.Concurrent (forkIO)
main :: IO ()
main = startServer >>= handleConnections qotdService
startServer :: IO Socket
startServer = listenOn $ PortNumber 17
handleConnections :: (Handle -> IO ()) -> Socket -> IO ()
handleConnections handler socket = do
putStrLn "Received connection..."
(handle, _, _) <- accept socket
_ <- forkIO $ handler handle >> hClose handle
handleConnections handler socket
qotdService :: Handle -> IO ()
qotdService file = hPutStrLn file "Hello world"
|
anler/tcp-quotes
|
app/Main.hs
|
Haskell
|
bsd-3-clause
| 581
|
--
-- xmonad example config file.
--
-- A template showing all available configuration hooks,
-- and how to override the defaults in your own xmonad.hs conf file.
--
-- Normally, you'd only override those defaults you care about.
--
import XMonad
import Data.Monoid
import System.Exit
import qualified XMonad.StackSet as W
import qualified Data.Map as M
-- The preferred terminal program, which is used in a binding below and by
-- certain contrib modules.
--
myTerminal = "xterm"
-- Whether focus follows the mouse pointer.
myFocusFollowsMouse :: Bool
myFocusFollowsMouse = True
-- Whether clicking on a window to focus also passes the click to the window
myClickJustFocuses :: Bool
myClickJustFocuses = False
-- Width of the window border in pixels.
--
myBorderWidth = 1
-- modMask lets you specify which modkey you want to use. The default
-- is mod1Mask ("left alt"). You may also consider using mod3Mask
-- ("right alt"), which does not conflict with emacs keybindings. The
-- "windows key" is usually mod4Mask.
--
myModMask = mod1Mask
-- The default number of workspaces (virtual screens) and their names.
-- By default we use numeric strings, but any string may be used as a
-- workspace name. The number of workspaces is determined by the length
-- of this list.
--
-- A tagging example:
--
-- > workspaces = ["web", "irc", "code" ] ++ map show [4..9]
--
myWorkspaces = ["1","2","3","4","5","6","7","8","9"]
-- Border colors for unfocused and focused windows, respectively.
--
myNormalBorderColor = "#dddddd"
myFocusedBorderColor = "#ff0000"
------------------------------------------------------------------------
-- Key bindings. Add, modify or remove key bindings here.
--
myKeys conf@(XConfig {XMonad.modMask = modm}) = M.fromList $
-- launch a terminal
[ ((modm .|. shiftMask, xK_Return), spawn $ XMonad.terminal conf)
-- launch dmenu
, ((modm, xK_p ), spawn "dmenu_run")
-- launch gmrun
, ((modm .|. shiftMask, xK_p ), spawn "gmrun")
-- close focused window
, ((modm .|. shiftMask, xK_c ), kill)
-- Rotate through the available layout algorithms
, ((modm, xK_space ), sendMessage NextLayout)
-- Reset the layouts on the current workspace to default
, ((modm .|. shiftMask, xK_space ), setLayout $ XMonad.layoutHook conf)
-- Resize viewed windows to the correct size
, ((modm, xK_n ), refresh)
-- Move focus to the next window
, ((modm, xK_Tab ), windows W.focusDown)
-- Move focus to the next window
, ((modm, xK_j ), windows W.focusDown)
-- Move focus to the previous window
, ((modm, xK_k ), windows W.focusUp )
-- Move focus to the master window
, ((modm, xK_m ), windows W.focusMaster )
-- Swap the focused window and the master window
, ((modm, xK_Return), windows W.swapMaster)
-- Swap the focused window with the next window
, ((modm .|. shiftMask, xK_j ), windows W.swapDown )
-- Swap the focused window with the previous window
, ((modm .|. shiftMask, xK_k ), windows W.swapUp )
-- Shrink the master area
, ((modm, xK_h ), sendMessage Shrink)
-- Expand the master area
, ((modm, xK_l ), sendMessage Expand)
-- Push window back into tiling
, ((modm, xK_t ), withFocused $ windows . W.sink)
-- Increment the number of windows in the master area
, ((modm , xK_comma ), sendMessage (IncMasterN 1))
-- Deincrement the number of windows in the master area
, ((modm , xK_period), sendMessage (IncMasterN (-1)))
-- Toggle the status bar gap
-- Use this binding with avoidStruts from Hooks.ManageDocks.
-- See also the statusBar function from Hooks.DynamicLog.
--
-- , ((modm , xK_b ), sendMessage ToggleStruts)
-- Quit xmonad
, ((modm .|. shiftMask, xK_q ), io (exitWith ExitSuccess))
-- Restart xmonad
, ((modm , xK_q ), spawn "xmonad --recompile; xmonad --restart")
-- Run xmessage with a summary of the default keybindings (useful for beginners)
, ((modMask .|. shiftMask, xK_slash ), spawn ("echo \"" ++ help ++ "\" | xmessage -file -"))
]
++
--
-- mod-[1..9], Switch to workspace N
-- mod-shift-[1..9], Move client to workspace N
--
[((m .|. modm, k), windows $ f i)
| (i, k) <- zip (XMonad.workspaces conf) [xK_1 .. xK_9]
, (f, m) <- [(W.greedyView, 0), (W.shift, shiftMask)]]
++
--
-- mod-{w,e,r}, Switch to physical/Xinerama screens 1, 2, or 3
-- mod-shift-{w,e,r}, Move client to screen 1, 2, or 3
--
[((m .|. modm, key), screenWorkspace sc >>= flip whenJust (windows . f))
| (key, sc) <- zip [xK_w, xK_e, xK_r] [0..]
, (f, m) <- [(W.view, 0), (W.shift, shiftMask)]]
------------------------------------------------------------------------
-- Mouse bindings: default actions bound to mouse events
--
myMouseBindings (XConfig {XMonad.modMask = modm}) = M.fromList $
-- mod-button1, Set the window to floating mode and move by dragging
[ ((modm, button1), (\w -> focus w >> mouseMoveWindow w
>> windows W.shiftMaster))
-- mod-button2, Raise the window to the top of the stack
, ((modm, button2), (\w -> focus w >> windows W.shiftMaster))
-- mod-button3, Set the window to floating mode and resize by dragging
, ((modm, button3), (\w -> focus w >> mouseResizeWindow w
>> windows W.shiftMaster))
-- you may also bind events to the mouse scroll wheel (button4 and button5)
]
------------------------------------------------------------------------
-- Layouts:
-- You can specify and transform your layouts by modifying these values.
-- If you change layout bindings be sure to use 'mod-shift-space' after
-- restarting (with 'mod-q') to reset your layout state to the new
-- defaults, as xmonad preserves your old layout settings by default.
--
-- The available layouts. Note that each layout is separated by |||,
-- which denotes layout choice.
--
myLayout = tiled ||| Mirror tiled ||| Full
where
-- default tiling algorithm partitions the screen into two panes
tiled = Tall nmaster delta ratio
-- The default number of windows in the master pane
nmaster = 1
-- Default proportion of screen occupied by master pane
ratio = 1/2
-- Percent of screen to increment by when resizing panes
delta = 3/100
------------------------------------------------------------------------
-- Window rules:
-- Execute arbitrary actions and WindowSet manipulations when managing
-- a new window. You can use this to, for example, always float a
-- particular program, or have a client always appear on a particular
-- workspace.
--
-- To find the property name associated with a program, use
-- > xprop | grep WM_CLASS
-- and click on the client you're interested in.
--
-- To match on the WM_NAME, you can use 'title' in the same way that
-- 'className' and 'resource' are used below.
--
myManageHook = composeAll
[ className =? "MPlayer" --> doFloat
, className =? "Gimp" --> doFloat
, resource =? "desktop_window" --> doIgnore
, resource =? "kdesktop" --> doIgnore ]
------------------------------------------------------------------------
-- Event handling
-- * EwmhDesktops users should change this to ewmhDesktopsEventHook
--
-- Defines a custom handler function for X Events. The function should
-- return (All True) if the default handler is to be run afterwards. To
-- combine event hooks use mappend or mconcat from Data.Monoid.
--
myEventHook = mempty
------------------------------------------------------------------------
-- Status bars and logging
-- Perform an arbitrary action on each internal state change or X event.
-- See the 'XMonad.Hooks.DynamicLog' extension for examples.
--
myLogHook = return ()
------------------------------------------------------------------------
-- Startup hook
-- Perform an arbitrary action each time xmonad starts or is restarted
-- with mod-q. Used by, e.g., XMonad.Layout.PerWorkspace to initialize
-- per-workspace layout choices.
--
-- By default, do nothing.
myStartupHook = return ()
------------------------------------------------------------------------
-- Now run xmonad with all the defaults we set up.
-- Run xmonad with the settings you specify. No need to modify this.
--
main = xmonad defaults
-- A structure containing your configuration settings, overriding
-- fields in the default config. Any you don't override, will
-- use the defaults defined in xmonad/XMonad/Config.hs
--
-- No need to modify this.
--
defaults = defaultConfig {
-- simple stuff
terminal = myTerminal,
focusFollowsMouse = myFocusFollowsMouse,
clickJustFocuses = myClickJustFocuses,
borderWidth = myBorderWidth,
modMask = myModMask,
workspaces = myWorkspaces,
normalBorderColor = myNormalBorderColor,
focusedBorderColor = myFocusedBorderColor,
-- key bindings
keys = myKeys,
mouseBindings = myMouseBindings,
-- hooks, layouts
layoutHook = myLayout,
manageHook = myManageHook,
handleEventHook = myEventHook,
logHook = myLogHook,
startupHook = myStartupHook
}
|
markus1189/xmonad-710
|
man/xmonad.hs
|
Haskell
|
bsd-3-clause
| 9,677
|
import Distribution.Simple
main = defaultMain
|
solidsnack/maccatcher
|
Setup.hs
|
Haskell
|
bsd-3-clause
| 74
|
{-|
This module provides the /Remove Weak Suffixes/ processor.
Let @Wl#@ be forward closed, then
@
|- <S# / W# + W, Q, T#> :f
-------------------------------------
|- <S# / W# + Wl# + W, Q, T#> :f
@
-}
module Tct.Trs.Processor.DP.DPGraph.RemoveWeakSuffixes
( removeWeakSuffixesDeclaration
, removeWeakSuffixes
) where
import qualified Data.Set as S
import qualified Data.Rewriting.Rule as R (Rule)
import qualified Tct.Core.Common.Pretty as PP
import qualified Tct.Core.Common.Xml as Xml
import qualified Tct.Core.Data as T
import Tct.Common.ProofCombinators
import Tct.Trs.Data
import qualified Tct.Trs.Data.Rules as RS
import Tct.Trs.Data.DependencyGraph
import qualified Tct.Trs.Data.Problem as Prob
data RemoveWeakSuffixes = RemoveWeakSuffixes deriving Show
data RemoveWeakSuffixesProof
= RemoveWeakSuffixesProof
{ wdg_ :: DG F V
, removable_ :: [(NodeId, R.Rule F V)] }
| RemoveWeakSuffixesFail
deriving Show
instance T.Processor RemoveWeakSuffixes where
type ProofObject RemoveWeakSuffixes = ApplicationProof RemoveWeakSuffixesProof
type In RemoveWeakSuffixes = Trs
type Out RemoveWeakSuffixes = Trs
-- an scc in the congruence graph is considered weak if all rules in the scc are weak
-- compute maximal weak suffix bottom-up
execute RemoveWeakSuffixes prob =
maybe remtail (\s -> T.abortWith (Inapplicable s :: ApplicationProof RemoveWeakSuffixesProof)) (Prob.isDTProblem' prob)
where
remtail
| null initials = T.abortWith (Applicable RemoveWeakSuffixesFail)
| otherwise = T.succeedWith1 (Applicable proof) T.fromId nprob
where
onlyWeaks = not . any (isStrict . snd) . theSCC
computeTails [] lfs = lfs
computeTails (n:ns) lfs
| n `S.member` lfs = computeTails ns lfs
| otherwise = computeTails (ns++preds) lfs'
where
(lpreds, _, cn, lsucs) = context cdg n
sucs = map snd lsucs
preds = map snd lpreds
lfs' = if S.fromList sucs `S.isSubsetOf` lfs && onlyWeaks cn
then S.insert n lfs
else lfs
-- congruence graph
cdg = Prob.congruenceGraph prob
initials = [n | (n,cn) <- withNodeLabels' cdg (leafs cdg), onlyWeaks cn]
cdgTail = S.toList $ computeTails initials S.empty
-- dependency graph
wdg = Prob.dependencyGraph prob
wdgLabTail = fmap theRule `fmap` concatMap (theSCC . lookupNodeLabel' cdg) cdgTail
(wdgTail, rs) = unzip wdgLabTail
nprob = prob
{ Prob.weakDPs = Prob.weakDPs prob `RS.difference` RS.fromList rs
, Prob.dpGraph = DependencyGraph
{ dependencyGraph = wdg `removeNodes` wdgTail
, congruenceGraph = cdg `removeNodes` cdgTail }}
proof = RemoveWeakSuffixesProof { wdg_ = wdg, removable_ = wdgLabTail }
--- * instances ------------------------------------------------------------------------------------------------------
removeWeakSuffixesDeclaration :: T.Declaration ('[] T.:-> TrsStrategy)
removeWeakSuffixesDeclaration = T.declare "removeWeakSuffixes" desc () (T.Apply RemoveWeakSuffixes) where
desc =
[ "Removes trailing paths that do not need to be oriented."
, "Only applicable if the strict component is empty."]
-- | Removes trailing weak paths.
-- A dependency pair is on a trailing weak path if it is from the weak components and all sucessors in the dependency
-- graph are on trailing weak paths.
--
-- Only applicable on DP-problems as obtained by 'dependencyPairs' or 'dependencyTuples'. Also
-- not applicable when @strictTrs prob \= RS.empty@.
removeWeakSuffixes :: TrsStrategy
removeWeakSuffixes = T.declFun removeWeakSuffixesDeclaration
--- * proofdata ------------------------------------------------------------------------------------------------------
instance PP.Pretty RemoveWeakSuffixesProof where
pretty RemoveWeakSuffixesFail = PP.text "The dependency graph contains no sub-graph of weak DPs closed under successors."
pretty p@RemoveWeakSuffixesProof{} = PP.vcat
[ PP.text "Consider the dependency graph"
, PP.indent 2 $ PP.pretty (wdg_ p)
, PP.text "The following weak DPs constitute a sub-graph of the DG that is closed under successors. The DPs are removed."
, PP.indent 2 $ PP.listing' (removable_ p) ]
instance Xml.Xml RemoveWeakSuffixesProof where
toXml RemoveWeakSuffixesFail = Xml.elt "removeWeakSuffixes" []
toXml p@RemoveWeakSuffixesProof{} = Xml.elt "removeWeakSuffixes"
[ Xml.toXml (wdg_ p)
, Xml.elt "removeWeakSuffix" $ map Xml.toXml (removable_ p) ]
|
ComputationWithBoundedResources/tct-trs
|
src/Tct/Trs/Processor/DP/DPGraph/RemoveWeakSuffixes.hs
|
Haskell
|
bsd-3-clause
| 4,815
|
{-# LANGUAGE UnicodeSyntax #-}
import Prelude.Unicode
data Tree a = Empty | Branch a (Tree a) (Tree a)
deriving (Show, Eq)
tree4 = Branch 1
(Branch 2 Empty (Branch 4 Empty Empty))
(Branch 2 Empty Empty)
countLeaves ∷ Tree a → Int
countLeaves Empty = 0
countLeaves (Branch _ Empty Empty) = 1
countLeaves (Branch _ l r) = countLeaves l + countLeaves r
leaves ∷ Tree a → [a]
leaves Empty = []
leaves (Branch x Empty Empty) = [x]
leaves (Branch x l r) = leaves l ++ leaves r
|
m00nlight/99-problems
|
haskell/p-61.hs
|
Haskell
|
bsd-3-clause
| 514
|
{-
******************************************************************************
* I N V A D E R S *
* *
* Module: Command *
* Purpose: The Invader command type. *
* Author: Henrik Nilsson *
* *
* Copyright (c) Yale University, 2003 *
* *
******************************************************************************
-}
module Command (
Command(..)
) where
data Command =
CmdQuit -- Quit Invaders.
| CmdNewGame -- Play game.
| CmdFreeze -- Freeze game.
| CmdResume -- Resume game.
|
ivanperez-keera/SpaceInvaders
|
src/Command.hs
|
Haskell
|
bsd-3-clause
| 1,079
|
{-# LANGUAGE MultiWayIf #-}
{-# LANGUAGE TemplateHaskell #-}
module Types.Posts
( ClientMessage
, newClientMessage
, cmDate
, cmType
, cmText
, ClientMessageType(..)
, Attachment
, mkAttachment
, attachmentName
, attachmentFileId
, attachmentURL
, ClientPostType(..)
, ClientPost
, toClientPost
, cpUserOverride
, cpMarkdownSource
, cpUser
, cpText
, cpType
, cpReactions
, cpPending
, cpOriginalPost
, cpInReplyToPost
, cpDate
, cpChannelId
, cpAttachments
, cpDeleted
, cpPostId
, unEmote
, postIsLeave
, postIsJoin
, postIsTopicChange
, postIsEmote
, getBlocks
)
where
import Prelude ()
import Prelude.MH
import Cheapskate ( Blocks )
import qualified Cheapskate as C
import qualified Data.Map.Strict as Map
import qualified Data.Sequence as Seq
import qualified Data.Text as T
import Data.Time.Clock ( getCurrentTime )
import Lens.Micro.Platform ( makeLenses )
import Network.Mattermost.Lenses
import Network.Mattermost.Types
import Types.Common
-- * Client Messages
-- | A 'ClientMessage' is a message given to us by our client,
-- like help text or an error message.
data ClientMessage = ClientMessage
{ _cmText :: Text
, _cmDate :: ServerTime
, _cmType :: ClientMessageType
} deriving (Eq, Show)
-- | Create a new 'ClientMessage' value. This is a message generated
-- by this Matterhorn client and not by (or visible to) the Server.
-- These should be visible, but not necessarily integrated into any
-- special position in the output stream (i.e., they should generally
-- appear at the bottom of the messages display, but subsequent
-- messages should follow them), so this is a special place where
-- there is an assumed approximation of equality between local time
-- and server time.
newClientMessage :: (MonadIO m) => ClientMessageType -> Text -> m ClientMessage
newClientMessage ty msg = do
now <- liftIO getCurrentTime
return (ClientMessage msg (ServerTime now) ty)
-- | We format 'ClientMessage' values differently depending on
-- their 'ClientMessageType'
data ClientMessageType =
Informative
| Error
| DateTransition
| NewMessagesTransition
| UnknownGap -- ^ marks region where server may have messages unknown locally
deriving (Eq, Show)
-- ** 'ClientMessage' Lenses
makeLenses ''ClientMessage
-- * Mattermost Posts
-- | A 'ClientPost' is a temporary internal representation of
-- the Mattermost 'Post' type, with unnecessary information
-- removed and some preprocessing done.
data ClientPost = ClientPost
{ _cpText :: Blocks
, _cpMarkdownSource :: Text
, _cpUser :: Maybe UserId
, _cpUserOverride :: Maybe Text
, _cpDate :: ServerTime
, _cpType :: ClientPostType
, _cpPending :: Bool
, _cpDeleted :: Bool
, _cpAttachments :: Seq Attachment
, _cpInReplyToPost :: Maybe PostId
, _cpPostId :: PostId
, _cpChannelId :: ChannelId
, _cpReactions :: Map.Map Text Int
, _cpOriginalPost :: Post
} deriving (Show)
-- | An attachment has a very long URL associated, as well as
-- an actual file URL
data Attachment = Attachment
{ _attachmentName :: Text
, _attachmentURL :: Text
, _attachmentFileId :: FileId
} deriving (Eq, Show)
mkAttachment :: Text -> Text -> FileId -> Attachment
mkAttachment = Attachment
-- | A Mattermost 'Post' value can represent either a normal
-- chat message or one of several special events.
data ClientPostType =
NormalPost
| Emote
| Join
| Leave
| TopicChange
deriving (Eq, Show)
-- ** Creating 'ClientPost' Values
-- | Parse text as Markdown and extract the AST
getBlocks :: Text -> Blocks
getBlocks s = bs where C.Doc _ bs = C.markdown C.def s
-- | Determine the internal 'PostType' based on a 'Post'
postClientPostType :: Post -> ClientPostType
postClientPostType cp =
if | postIsEmote cp -> Emote
| postIsJoin cp -> Join
| postIsLeave cp -> Leave
| postIsTopicChange cp -> TopicChange
| otherwise -> NormalPost
-- | Find out whether a 'Post' represents a topic change
postIsTopicChange :: Post -> Bool
postIsTopicChange p = postType p == PostTypeHeaderChange
-- | Find out whether a 'Post' is from a @/me@ command
postIsEmote :: Post -> Bool
postIsEmote p =
and [ p^.postPropsL.postPropsOverrideIconUrlL == Just (""::Text)
, ("*" `T.isPrefixOf` (sanitizeUserText $ postMessage p))
, ("*" `T.isSuffixOf` (sanitizeUserText $ postMessage p))
]
-- | Find out whether a 'Post' is a user joining a channel
postIsJoin :: Post -> Bool
postIsJoin p =
p^.postTypeL == PostTypeJoinChannel
-- | Find out whether a 'Post' is a user leaving a channel
postIsLeave :: Post -> Bool
postIsLeave p =
p^.postTypeL == PostTypeLeaveChannel
-- | Undo the automatic formatting of posts generated by @/me@-commands
unEmote :: ClientPostType -> Text -> Text
unEmote Emote t = if "*" `T.isPrefixOf` t && "*" `T.isSuffixOf` t
then T.init $ T.tail t
else t
unEmote _ t = t
-- | Convert a Mattermost 'Post' to a 'ClientPost', passing in a
-- 'ParentId' if it has a known one.
toClientPost :: Post -> Maybe PostId -> ClientPost
toClientPost p parentId =
let src = unEmote (postClientPostType p) $ sanitizeUserText $ postMessage p
in ClientPost { _cpText = getBlocks src <> getAttachmentText p
, _cpMarkdownSource = src
, _cpUser = postUserId p
, _cpUserOverride = p^.postPropsL.postPropsOverrideUsernameL
, _cpDate = postCreateAt p
, _cpType = postClientPostType p
, _cpPending = False
, _cpDeleted = False
, _cpAttachments = Seq.empty
, _cpInReplyToPost = parentId
, _cpPostId = p^.postIdL
, _cpChannelId = p^.postChannelIdL
, _cpReactions = Map.empty
, _cpOriginalPost = p
}
-- | Right now, instead of treating 'attachment' properties specially, we're
-- just going to roll them directly into the message text
getAttachmentText :: Post -> Blocks
getAttachmentText p =
case p^.postPropsL.postPropsAttachmentsL of
Nothing -> Seq.empty
Just attachments ->
fmap (C.Blockquote . render) attachments
where render att = getBlocks (att^.ppaTextL) <> getBlocks (att^.ppaFallbackL)
-- ** 'ClientPost' Lenses
makeLenses ''Attachment
makeLenses ''ClientPost
|
aisamanra/matterhorn
|
src/Types/Posts.hs
|
Haskell
|
bsd-3-clause
| 6,677
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
{-# LANGUAGE OverloadedStrings #-}
module Duckling.Numeral.HU.Corpus
( corpus ) where
import Data.String
import Prelude
import Duckling.Locale
import Duckling.Numeral.Types
import Duckling.Resolve
import Duckling.Testing.Types
corpus :: Corpus
corpus = (testContext {locale = makeLocale HU Nothing}, testOptions, allExamples)
allExamples :: [Example]
allExamples = concat
[ examples (NumeralValue 0)
[ "0"
, "nulla"
, "zéró"
]
, examples (NumeralValue 1)
[ "1"
, "egy"
]
, examples (NumeralValue 2)
[ "kettő"
]
, examples (NumeralValue 3)
[ "három"
]
, examples (NumeralValue 4)
[ "négy"
]
, examples (NumeralValue 5)
[ "öt"
]
, examples (NumeralValue 6)
[ "hat"
]
, examples (NumeralValue 7)
[ "hét"
]
, examples (NumeralValue 8)
[ "nyolc"
]
, examples (NumeralValue 9)
[ "kilenc"
]
, examples (NumeralValue 11)
[ "tizenegy"
]
, examples (NumeralValue 15)
[ "tizenöt"
]
, examples (NumeralValue 17)
[ "tizenhét"
]
, examples (NumeralValue 20)
[ "20"
, "húsz"
]
, examples (NumeralValue 22)
[ "huszonkettő"
]
, examples (NumeralValue 24)
[ "24"
, "huszonnégy"
]
, examples (NumeralValue 26)
[ "huszonhat"
]
, examples (NumeralValue 28)
[ "huszonnyolc"
]
, examples (NumeralValue 10)
[ "tíz"
]
, examples (NumeralValue 20)
[ "húsz"
]
, examples (NumeralValue 50)
[ "ötven"
]
, examples (NumeralValue 34)
[ "harmincnégy"
]
]
|
facebookincubator/duckling
|
Duckling/Numeral/HU/Corpus.hs
|
Haskell
|
bsd-3-clause
| 2,236
|
module AI
( search
, module AI.Types
) where
import Types
import AI.Types
import Text.Printf
import qualified AI.API.My as My
import qualified AI.API.Tzaar as Tzaar
import qualified AI.API.GameTree as GameTree
search :: Board b => Algorithm -> Implementation -> Evaluation -> Position b -> Depth -> (PV, Score)
search Minimax My = My.minimax
search AlphaBeta My = My.alphabeta
search Negascout My = My.negascout
search Minimax GameTree = GameTree.minimax
search AlphaBeta GameTree = GameTree.alphabeta
search Negascout GameTree = GameTree.negascout
search Minimax Tzaar = Tzaar.minimax
search AlphaBeta Tzaar = Tzaar.alphabeta
search Negascout Tzaar = Tzaar.negascout
search a i = error $ printf "Unsupported algorithm-implementation pair: (%s, %s)"
(show a) (show i)
|
sphynx/hamisado
|
AI.hs
|
Haskell
|
bsd-3-clause
| 795
|
{-# LANGUAGE BangPatterns #-}
-- |
-- Module: Data.Aeson.Encoding.Builder
-- Copyright: (c) 2011 MailRank, Inc.
-- (c) 2013 Simon Meier <iridcode@gmail.com>
-- License: BSD3
-- Maintainer: Bryan O'Sullivan <bos@serpentine.com>
-- Stability: experimental
-- Portability: portable
--
-- Efficiently serialize a JSON value using the UTF-8 encoding.
module Data.Aeson.Encoding.Builder
(
encodeToBuilder
, null_
, bool
, array
, emptyArray_
, emptyObject_
, object
, text
, string
, unquoted
, quote
, scientific
, day
, localTime
, utcTime
, timeOfDay
, zonedTime
, ascii2
, ascii4
, ascii5
) where
import Prelude ()
import Prelude.Compat
import Data.Aeson.Internal.Time
import Data.Aeson.Types.Internal (Value (..))
import Data.ByteString.Builder as B
import Data.ByteString.Builder.Prim as BP
import Data.ByteString.Builder.Scientific (scientificBuilder)
import Data.Char (chr, ord)
import Data.Monoid ((<>))
import Data.Scientific (Scientific, base10Exponent, coefficient)
import Data.Text.Encoding (encodeUtf8BuilderEscaped)
import Data.Time (UTCTime(..))
import Data.Time.Calendar (Day(..), toGregorian)
import Data.Time.LocalTime
import Data.Word (Word8)
import qualified Data.HashMap.Strict as HMS
import qualified Data.Text as T
import qualified Data.Vector as V
-- | Encode a JSON value to a "Data.ByteString" 'B.Builder'.
--
-- Use this function if you are encoding over the wire, or need to
-- prepend or append further bytes to the encoded JSON value.
encodeToBuilder :: Value -> Builder
encodeToBuilder Null = null_
encodeToBuilder (Bool b) = bool b
encodeToBuilder (Number n) = scientific n
encodeToBuilder (String s) = text s
encodeToBuilder (Array v) = array v
encodeToBuilder (Object m) = object m
-- | Encode a JSON null.
null_ :: Builder
null_ = BP.primBounded (ascii4 ('n',('u',('l','l')))) ()
-- | Encode a JSON boolean.
bool :: Bool -> Builder
bool = BP.primBounded (BP.condB id (ascii4 ('t',('r',('u','e'))))
(ascii5 ('f',('a',('l',('s','e'))))))
-- | Encode a JSON array.
array :: V.Vector Value -> Builder
array v
| V.null v = emptyArray_
| otherwise = B.char8 '[' <>
encodeToBuilder (V.unsafeHead v) <>
V.foldr withComma (B.char8 ']') (V.unsafeTail v)
where
withComma a z = B.char8 ',' <> encodeToBuilder a <> z
-- Encode a JSON object.
object :: HMS.HashMap T.Text Value -> Builder
object m = case HMS.toList m of
(x:xs) -> B.char8 '{' <> one x <> foldr withComma (B.char8 '}') xs
_ -> emptyObject_
where
withComma a z = B.char8 ',' <> one a <> z
one (k,v) = text k <> B.char8 ':' <> encodeToBuilder v
-- | Encode a JSON string.
text :: T.Text -> Builder
text t = B.char8 '"' <> unquoted t <> B.char8 '"'
-- | Encode a JSON string, without enclosing quotes.
unquoted :: T.Text -> Builder
unquoted = encodeUtf8BuilderEscaped escapeAscii
-- | Add quotes surrounding a builder
quote :: Builder -> Builder
quote b = B.char8 '"' <> b <> B.char8 '"'
-- | Encode a JSON string.
string :: String -> Builder
string t = B.char8 '"' <> BP.primMapListBounded go t <> B.char8 '"'
where go = BP.condB (> '\x7f') BP.charUtf8 (c2w >$< escapeAscii)
escapeAscii :: BP.BoundedPrim Word8
escapeAscii =
BP.condB (== c2w '\\' ) (ascii2 ('\\','\\')) $
BP.condB (== c2w '\"' ) (ascii2 ('\\','"' )) $
BP.condB (>= c2w '\x20') (BP.liftFixedToBounded BP.word8) $
BP.condB (== c2w '\n' ) (ascii2 ('\\','n' )) $
BP.condB (== c2w '\r' ) (ascii2 ('\\','r' )) $
BP.condB (== c2w '\t' ) (ascii2 ('\\','t' )) $
BP.liftFixedToBounded hexEscape -- fallback for chars < 0x20
where
hexEscape :: BP.FixedPrim Word8
hexEscape = (\c -> ('\\', ('u', fromIntegral c))) BP.>$<
BP.char8 >*< BP.char8 >*< BP.word16HexFixed
{-# INLINE escapeAscii #-}
c2w :: Char -> Word8
c2w c = fromIntegral (ord c)
-- | Encode a JSON number.
scientific :: Scientific -> Builder
scientific s
| e < 0 = scientificBuilder s
| otherwise = B.integerDec (coefficient s * 10 ^ e)
where
e = base10Exponent s
emptyArray_ :: Builder
emptyArray_ = BP.primBounded (ascii2 ('[',']')) ()
emptyObject_ :: Builder
emptyObject_ = BP.primBounded (ascii2 ('{','}')) ()
ascii2 :: (Char, Char) -> BP.BoundedPrim a
ascii2 cs = BP.liftFixedToBounded $ const cs BP.>$< BP.char7 >*< BP.char7
{-# INLINE ascii2 #-}
ascii4 :: (Char, (Char, (Char, Char))) -> BP.BoundedPrim a
ascii4 cs = BP.liftFixedToBounded $ const cs >$<
BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7
{-# INLINE ascii4 #-}
ascii5 :: (Char, (Char, (Char, (Char, Char)))) -> BP.BoundedPrim a
ascii5 cs = BP.liftFixedToBounded $ const cs >$<
BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7
{-# INLINE ascii5 #-}
ascii6 :: (Char, (Char, (Char, (Char, (Char, Char))))) -> BP.BoundedPrim a
ascii6 cs = BP.liftFixedToBounded $ const cs >$<
BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7
{-# INLINE ascii6 #-}
ascii8 :: (Char, (Char, (Char, (Char, (Char, (Char, (Char, Char)))))))
-> BP.BoundedPrim a
ascii8 cs = BP.liftFixedToBounded $ const cs >$<
BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7 >*<
BP.char7 >*< BP.char7 >*< BP.char7 >*< BP.char7
{-# INLINE ascii8 #-}
day :: Day -> Builder
day dd = encodeYear yr <>
BP.primBounded (ascii6 ('-',(mh,(ml,('-',(dh,dl)))))) ()
where (yr,m,d) = toGregorian dd
!(T mh ml) = twoDigits m
!(T dh dl) = twoDigits d
encodeYear y
| y >= 1000 = B.integerDec y
| y >= 0 = BP.primBounded (ascii4 (padYear y)) ()
| y >= -999 = BP.primBounded (ascii5 ('-',padYear (- y))) ()
| otherwise = B.integerDec y
padYear y =
let (ab,c) = fromIntegral y `quotRem` 10
(a,b) = ab `quotRem` 10
in ('0',(digit a,(digit b,digit c)))
{-# INLINE day #-}
timeOfDay :: TimeOfDay -> Builder
timeOfDay t = timeOfDay64 (toTimeOfDay64 t)
{-# INLINE timeOfDay #-}
timeOfDay64 :: TimeOfDay64 -> Builder
timeOfDay64 (TOD h m s)
| frac == 0 = hhmmss -- omit subseconds if 0
| otherwise = hhmmss <> BP.primBounded showFrac frac
where
hhmmss = BP.primBounded (ascii8 (hh,(hl,(':',(mh,(ml,(':',(sh,sl)))))))) ()
!(T hh hl) = twoDigits h
!(T mh ml) = twoDigits m
!(T sh sl) = twoDigits (fromIntegral real)
(real,frac) = s `quotRem` pico
showFrac = (\x -> ('.', x)) >$< (BP.liftFixedToBounded BP.char7 >*< trunc12)
trunc12 = (`quotRem` micro) >$<
BP.condB (\(_,y) -> y == 0) (fst >$< trunc6) (digits6 >*< trunc6)
digits6 = ((`quotRem` milli) . fromIntegral) >$< (digits3 >*< digits3)
trunc6 = ((`quotRem` milli) . fromIntegral) >$<
BP.condB (\(_,y) -> y == 0) (fst >$< trunc3) (digits3 >*< trunc3)
digits3 = (`quotRem` 10) >$< (digits2 >*< digits1)
digits2 = (`quotRem` 10) >$< (digits1 >*< digits1)
digits1 = BP.liftFixedToBounded (digit >$< BP.char7)
trunc3 = BP.condB (== 0) BP.emptyB $
(`quotRem` 100) >$< (digits1 >*< trunc2)
trunc2 = BP.condB (== 0) BP.emptyB $
(`quotRem` 10) >$< (digits1 >*< trunc1)
trunc1 = BP.condB (== 0) BP.emptyB digits1
pico = 1000000000000 -- number of picoseconds in 1 second
micro = 1000000 -- number of microseconds in 1 second
milli = 1000 -- number of milliseconds in 1 second
timeZone :: TimeZone -> Builder
timeZone (TimeZone off _ _)
| off == 0 = B.char7 'Z'
| otherwise = BP.primBounded (ascii6 (s,(hh,(hl,(':',(mh,ml)))))) ()
where !s = if off < 0 then '-' else '+'
!(T hh hl) = twoDigits h
!(T mh ml) = twoDigits m
(h,m) = abs off `quotRem` 60
{-# INLINE timeZone #-}
dayTime :: Day -> TimeOfDay64 -> Builder
dayTime d t = day d <> B.char7 'T' <> timeOfDay64 t
{-# INLINE dayTime #-}
utcTime :: UTCTime -> B.Builder
utcTime (UTCTime d s) = dayTime d (diffTimeOfDay64 s) <> B.char7 'Z'
{-# INLINE utcTime #-}
localTime :: LocalTime -> Builder
localTime (LocalTime d t) = dayTime d (toTimeOfDay64 t)
{-# INLINE localTime #-}
zonedTime :: ZonedTime -> Builder
zonedTime (ZonedTime t z) = localTime t <> timeZone z
{-# INLINE zonedTime #-}
data T = T {-# UNPACK #-} !Char {-# UNPACK #-} !Char
twoDigits :: Int -> T
twoDigits a = T (digit hi) (digit lo)
where (hi,lo) = a `quotRem` 10
digit :: Int -> Char
digit x = chr (x + 48)
|
sol/aeson
|
Data/Aeson/Encoding/Builder.hs
|
Haskell
|
bsd-3-clause
| 8,539
|
module Expression.AST where
data Variable = Variable String deriving Eq
data Expression = Const Int
| Var Variable
| BinExpr Expression BinOp Expression
| MultiExpr MultiOp [Expression]
| TimeDerivative Variable
| TimeDerivative2 Variable
data BinOp = Plus
| Minus
| Mult
| Quotient
| Exp
data MultiOp = Sum
| Product
|
Zomega/thesis
|
Wurm/CAS/Expression/AST.hs
|
Haskell
|
mit
| 354
|
module Main where
--import Test.Framework (defaultMain, Test, testGroup)
import qualified Data.ByteString.Lazy.Builder.BasicEncoding.Tests
import qualified Data.ByteString.Lazy.Builder.Tests
import TestFramework
main :: IO ()
main = defaultMain tests
tests :: [Test]
tests =
[ testGroup "Builder"
Data.ByteString.Lazy.Builder.Tests.tests
, testGroup "BasicEncoding"
Data.ByteString.Lazy.Builder.BasicEncoding.Tests.tests
]
|
meiersi/bytestring-builder
|
tests/builder/TestSuite.hs
|
Haskell
|
bsd-3-clause
| 470
|
{-# LANGUAGE ScopedTypeVariables #-}
module Types where
import Debug.Trace
import Test.QuickCheck
type Longitude = Double -- -180 .. 180
type Latitude = Double -- -90 .. 90
newtype Geographic = Geographic (Longitude,Latitude)
deriving Show
type ScreenX = Double -- -1 .. 1, + on right
type ScreenY = Double -- -1 .. 1, + on top
type Distance = Double -- nominally meters
type Inclination = Double -- "latitude", In Radians, 0 is ahead, + is up, normalized to +/- pi/2
type Azimuth = Double -- "longiture", In Radians, 0 is ahead, + is right, normalized to +/- pi
newtype Spherical = Spherical (Distance,Inclination,Azimuth)
deriving Show
-- x == in/out, in+
-- y == left/right, right+
-- z == up/down, up+
newtype Cartesian = Cartesian (Double,Double,Double) -- in nominal meters
deriving Show
newtype ScreenCoord = ScreenCoord (Double,Double)
deriving Show
type CanvasCoord = (Double,Double)
radian2degree :: Double -> Double
radian2degree = (* (180 / pi))
degree2radian :: Double -> Double
degree2radian = (/ (180 / pi))
-- From http://stackoverflow.com/questions/24234609/standard-way-to-normalize-an-angle-to-%CF%80-radians-in-java
-- Normalize a radian
radian :: Double -> Double
radian t = t - pi * 2 * fromIntegral (floor((t + pi) / (pi * 2)))
-- We define that the Spherical is a sphere 5 units (meters) radius
geographicToSpherical :: Geographic -> Spherical
geographicToSpherical (Geographic (long,lat)) = Spherical (5, degree2radian lat, degree2radian long)
cartesian2Spherical :: Cartesian -> Spherical
cartesian2Spherical (Cartesian (0,0,0)) = Spherical (0,0,0) -- choice
cartesian2Spherical (Cartesian (x,y,z)) = mkSpherical (r,t,u)
where
r = sqrt (x^2 + y^2 + z^2)
t = asin (z / r) -- polar angle, "latitude", 0 .. pi/2
u = atan2 y x -- azimuth angle, "longiture", -pi .. pi
mkSpherical :: (Double,Double,Double) -> Spherical
mkSpherical (r,t,u) = mkSpherical' (r,radian t, radian u)
where
mkSpherical' (r,t,u)
| traceShow ("mkSpherical",(r,t,u)) False = undefined
-- | t < -pi / 2 = mkSpherical (r,-t,u' + pi)
-- | t > pi / 2 = mkSpherical (r,-t,u' - pi)
| otherwise = Spherical (r,t,u) -- t is +/- pi/2 (+/-90), u is +/- pi (+/-180)
spherical2Cartesian :: Spherical -> Cartesian
spherical2Cartesian (Spherical (r,t,u)) = Cartesian (x,y,z)
where
x = r * cos t * cos u
y = r * cos t * sin u
z = r * sin t
class Coordinate c where
toSpherical :: c -> Spherical
toCartesian :: c -> Cartesian
instance Coordinate Cartesian where
toSpherical = cartesian2Spherical
toCartesian = id
instance Coordinate Spherical where
toSpherical = id
toCartesian = spherical2Cartesian
instance Coordinate Geographic where
toSpherical = toSpherical . geographicToSpherical
toCartesian = toCartesian . geographicToSpherical
class Lerp a where
lerp2 :: a -> a -> Double -> a
instance Lerp Double where
lerp2 a b s = b * s + a * (1 - s)
instance Lerp Geographic where
lerp2 (Geographic a) (Geographic b) s = Geographic (lerp2 a b s)
instance (Lerp a, Lerp b) => Lerp (a,b) where
lerp2 (a1,a2) (b1,b2) s = (lerp2 a1 b1 s,lerp2 a2 b2 s)
instance (Lerp a, Lerp b, Lerp c) => Lerp (a,b,c) where
lerp2 (a1,a2,a3) (b1,b2,b3) s = (lerp2 a1 b1 s,lerp2 a2 b2 s,lerp2 a3 b3 s)
interpolate :: (Monad m, Lerp a) => Int -> a -> a -> (a -> a -> m ()) -> m ()
interpolate n a b f = sequence_ [ f j j' | (j,j') <- js `zip` tail js ]
where js = joints n a b
joints :: Lerp a => Int -> a -> a -> [a]
joints n a b = [ lerp2 a b (fromIntegral s/fromIntegral n) | s <- [0..n]]
-- wrap around if line is too long
distance :: CanvasCoord -> CanvasCoord -> Double
distance (x,y) (x',y') = sqrt (xd * xd + yd * yd)
where
xd = x - x'
yd = y - y'
------------------------------------------------------------------------
-- QC
------------------------------------------------------------------------
instance Arbitrary Cartesian where
arbitrary = fmap Cartesian arbitrary
-- shrink (Cartesian a) = [ Cartesian c | c <- shrink a ]
prop_c2s_then_s2c (c@(Cartesian (x,y,z))) =
abs (x*x + y*y + z*z) > 1e-10 ==> -- make sure you are not *too* close to the origin
-- label (show (x,y,z)) $
c `eqish` spherical2Cartesian (cartesian2Spherical c)
class Eqish a where
eqish :: a -> a -> Bool
instance Eqish Double where
eqish a b = abs (a - b) < 1e-6
instance (Eqish a, Eqish b, Eqish c) => Eqish (a,b,c) where
eqish (a,b,c) (a',b',c') = eqish a a' && eqish b b' && eqish c c'
instance Eqish Cartesian where
eqish (Cartesian c1) (Cartesian c2) = c1 `eqish` c2
|
andygill/willowbrae
|
projections/Types.hs
|
Haskell
|
bsd-3-clause
| 4,672
|
{-# LANGUAGE CPP, NoMonomorphismRestriction #-}
#include "fusion-phases.h"
module Data.Array.Parallel.Unlifted.Stream.Segments
( streamSegsFromNestedUSSegd
, streamSegsFromVectorsUSSegd
, streamSegsFromVectorsUVSegd
, streamSegsFromVectorsUSSegdSegmap
, streamSegsFromVectorsUSSegd_split)
where
import Data.Vector.Fusion.Bundle.Monadic (Bundle(..), fromStream)
import Data.Vector.Fusion.Bundle.Size
import Data.Vector.Fusion.Stream.Monadic (Stream(..), Step(..))
import Data.Array.Parallel.Unlifted.Sequential.Vector (Unbox, Vector, index)
import Data.Array.Parallel.Unlifted.Vectors (Unboxes, Vectors)
import Data.Array.Parallel.Unlifted.Sequential.USegd (USegd(..))
import Data.Array.Parallel.Unlifted.Sequential.USSegd (USSegd(..))
import Data.Array.Parallel.Unlifted.Sequential.UVSegd (UVSegd(..))
import qualified Data.Array.Parallel.Unlifted.Vectors as US
import qualified Data.Array.Parallel.Unlifted.Sequential.USegd as USegd
import qualified Data.Array.Parallel.Unlifted.Sequential.USSegd as USSegd
import qualified Data.Array.Parallel.Unlifted.Sequential.Vector as U
import qualified Data.Vector as V
import qualified Data.Primitive.ByteArray as P
import System.IO.Unsafe
-- Nested -----------------------------------------------------------------------------------------
-- | Stream some physical segments from many data arrays.
---
-- * TODO: make this more efficient, and fix fusion.
-- We should be able to eliminate a lot of the indexing happening in the
-- inner loop by being cleverer about the loop state.
--
-- * TODO: If this is contiguous then we can stream the lot without worrying
-- about jumping between segments. EXCEPT that this information must be
-- statically visible else streamSegs won't fuse, so we can't have an
-- ifThenElse checking the manifest flag.
streamSegsFromNestedUSSegd
:: (Unbox a, Monad m)
=> V.Vector (Vector a) -- ^ Source arrays.
-> USSegd -- ^ Segment descriptor defining segments base on source vectors.
-> Bundle m v a
streamSegsFromNestedUSSegd
pdatas
ussegd@(USSegd _ starts sources usegd)
= let
here = "streamSegsFromNestedUSSegd"
-- length of each segment
pseglens = USegd.takeLengths usegd
-- We've finished streaming this pseg
{-# INLINE_INNER fn #-}
fn (pseg, ix)
-- All psegs are done.
| pseg >= USSegd.length ussegd
= return $ Done
-- Current pseg is done
| ix >= U.index here pseglens pseg
= return $ Skip (pseg + 1, 0)
-- Stream an element from this pseg
| otherwise
= let !srcid = index here sources pseg
!pdata = pdatas `V.unsafeIndex` srcid
!start = index here starts pseg
!result = index here pdata (start + ix)
in return $ Yield result (pseg, ix + 1)
in fromStream (Stream fn (0, 0)) Unknown
{-# INLINE_STREAM streamSegsFromNestedUSSegd #-}
-- Vectors ----------------------------------------------------------------------------------------
-- | Stream segments from a `Vectors`.
--
-- * There must be at least one segment in the `USSegd`, but this is not checked.
--
-- * No bounds checking is done for the `USSegd`.
--
streamSegsFromVectorsUSSegd
:: (Unboxes a, Monad m)
=> Vectors a -- ^ Vectors holding source data.
-> USSegd -- ^ Scattered segment descriptor
-> Bundle m v a
streamSegsFromVectorsUSSegd
vectors
ussegd@(USSegd _ segStarts segSources usegd)
= segStarts `seq` segSources `seq` usegd `seq` vectors `seq`
let here = "stremSegsFromVectorsUSSegd"
-- Length of each segment
!segLens = USegd.takeLengths usegd
-- Total number of segments.
!segsTotal = USSegd.length ussegd
-- Total number of elements to stream.
!elements = USegd.takeElements usegd
-- seg, ix of that seg in usegd, length of seg, elem in seg
{-# INLINE_INNER fnSeg #-}
fnSeg (ixSeg, baSeg, ixEnd, ixElem)
= ixSeg `seq` baSeg `seq`
if ixElem >= ixEnd -- Was that the last elem in the current seg?
then if ixSeg + 1 >= segsTotal -- Was that last seg?
-- That was the last seg, we're done.
then return $ Done
-- Move to the next seg.
else let ixSeg' = ixSeg + 1
sourceSeg = index here segSources ixSeg'
startSeg = index here segStarts ixSeg'
lenSeg = index here segLens ixSeg'
(arr, startArr, _)
= US.unsafeIndexUnpack vectors sourceSeg
in return $ Skip
( ixSeg'
, arr
, startArr + startSeg + lenSeg
, startArr + startSeg)
-- Stream the next element from the segment.
else let !result = P.indexByteArray baSeg ixElem
in return $ Yield result (ixSeg, baSeg, ixEnd, ixElem + 1)
-- Starting state of the stream.
-- CAREFUL:
-- The ussegd might not contain any segments, so we can't initialise the state
-- just by taking the first segment length etc from the ussegd.
-- On the other hand, we don't want to use an extra case expression to test for
-- this sitution, as that could break fusion.
-- Instead, start with a dummy state which forces the loop to grab the first
-- segment, if there are any.
!dummy = unsafePerformIO
$ P.newByteArray 0 >>= P.unsafeFreezeByteArray
!initState
= ( -1 -- force fnSeg loop to load first seg
, dummy -- dummy array data to start with
, 0 -- force fnSeg loop to load first seg
, 0)
-- It's important that we set the result stream size, so Data.Vector
-- doesn't need to add code to grow the result when it overflows.
in fromStream (Stream fnSeg initState) (Exact elements)
{-# INLINE_STREAM streamSegsFromVectorsUSSegd #-}
-- Vectors ----------------------------------------------------------------------------------------
-- | Stream segments from a `Vectors`.
--
-- * There must be at least one segment in the `USSegd`, but this is not checked.
--
-- * No bounds checking is done for the `USSegd`.
--
streamSegsFromVectorsUVSegd
:: (Unboxes a, Monad m)
=> Vectors a -- ^ Vectors holding source data.
-> UVSegd -- ^ Scattered segment descriptor
-> Bundle m v a
streamSegsFromVectorsUVSegd
vectors
(UVSegd _ _ segmap _ ussegd)
= streamSegsFromVectorsUSSegdSegmap vectors ussegd segmap
{-# INLINE_STREAM streamSegsFromVectorsUVSegd #-}
streamSegsFromVectorsUSSegdSegmap
:: (Unboxes a, Monad m)
=> Vectors a -- ^ Vectors holding source data.
-> USSegd -- ^ Scattered segment descriptor
-> Vector Int -- ^ Segmap
-> Bundle m v a
streamSegsFromVectorsUSSegdSegmap
vectors ussegd@(USSegd _ segStarts segSources usegd) segmap
= segStarts `seq` segSources `seq` usegd `seq` segmap `seq`
let here = "stremSegsFromVectorsUVSegd"
-- Total number of elements to be streamed
!lengths = USSegd.takeLengths ussegd
!elemsTotal = U.sum $ U.map (U.index here lengths) segmap
-- Total number of segments.
!segsTotal = U.length segmap
-- Length of each physical segment.
!segLens = USegd.takeLengths usegd
-- seg, ix of that seg in usegd, length of seg, elem in seg
{-# INLINE_INNER fnSeg #-}
fnSeg (ixSeg, baSeg, ixEnd, ixElem)
= ixSeg `seq` baSeg `seq`
if ixElem >= ixEnd -- Was that the last elem in the current seg?
then if ixSeg + 1 >= segsTotal -- Was that last seg?
-- That was the last seg, we're done.
then return $ Done
-- Move to the next seg.
else let ixSeg' = ixSeg + 1
ixPSeg = index here segmap ixSeg'
sourceSeg = index here segSources ixPSeg
startSeg = index here segStarts ixPSeg
lenSeg = index here segLens ixPSeg
(arr, startArr, _)
= US.unsafeIndexUnpack vectors sourceSeg
in return $ Skip
( ixSeg'
, arr
, startArr + startSeg + lenSeg
, startArr + startSeg)
-- Stream the next element from the segment.
else let !result = P.indexByteArray baSeg ixElem
in return $ Yield result (ixSeg, baSeg, ixEnd, ixElem + 1)
-- Starting state of the stream.
!dummy = unsafePerformIO
$ P.newByteArray 0 >>= P.unsafeFreezeByteArray
!initState
= ( -1 -- force fnSeg loop to load first seg
, dummy -- dummy array data to start with
, 0 -- force fnSeg loop to load first seg
, 0)
-- It's important that we set the result stream size, so Data.Vector
-- doesn't need to add code to grow the result when it overflows.
in fromStream (Stream fnSeg initState) (Exact elemsTotal)
{-# INLINE_STREAM streamSegsFromVectorsUSSegdSegmap #-}
streamSegsFromVectorsUSSegd_split
:: (Unboxes a, Monad m)
=> Vectors a -- ^ Vectors holding source data.
-> USSegd -- ^ Scattered segment descriptor
-> Vector Int -- ^ Virtual segment ids
-> ((USegd,Int),Int) -- ^ Segmap
-> Bundle m v a
streamSegsFromVectorsUSSegd_split
!vectors !ussegd
!vsegids ((!segd,!seg_off),!el_off)
= let here = "streamSegsFromVectorsUSSegd_split"
-- Total number of elements to be streamed
!lengths = USegd.takeLengths segd
!elemsTotal = U.sum lengths
-- Total number of segments.
!segsTotal = U.length lengths
!segStarts = USSegd.takeStarts ussegd
!segSources = USSegd.takeSources ussegd
vsegid seg = index here vsegids (seg + seg_off)
{-# INLINE vsegid #-}
source pseg = index here segSources pseg
{-# INLINE source #-}
start pseg = index here segStarts pseg
{-# INLINE start #-}
len seg = index here lengths seg
{-# INLINE len #-}
-- seg, ix of that seg in usegd, length of seg, elem in seg
{-# INLINE_INNER fnSeg #-}
fnSeg (!ixSeg, !baSeg, !ixEnd, !ixElem)
= if ixElem >= ixEnd -- Was that the last elem in the current seg?
then if ixSeg + 1 >= segsTotal -- Was that last seg?
-- That was the last seg, we're done.
then return $ Done
-- Move to the next seg.
else let ixSeg' = ixSeg + 1
ixPSeg = vsegid ixSeg'
sourceSeg = source ixPSeg
startSeg = start ixPSeg
lenSeg = len ixSeg'
el_off' = if ixSeg' == 0 then el_off else 0
(arr, startArr, _)
= US.unsafeIndexUnpack vectors sourceSeg
in return $ Skip
( ixSeg'
, arr
, startArr + startSeg + el_off' + lenSeg
, startArr + startSeg + el_off')
-- Stream the next element from the segment.
else let !result = P.indexByteArray baSeg ixElem
in return $ Yield result (ixSeg, baSeg, ixEnd, ixElem + 1)
-- Starting state of the stream.
!dummy = unsafePerformIO
$ P.newByteArray 0 >>= P.unsafeFreezeByteArray
!initState
= ( -1 -- force fnSeg loop to load first seg
, dummy -- dummy array data to start with
, 0 -- force fnSeg loop to load first seg
, 0)
-- It's important that we set the result stream size, so Data.Vector
-- doesn't need to add code to grow the result when it overflows.
in fromStream (Stream fnSeg initState) (Exact elemsTotal)
{-# INLINE_STREAM streamSegsFromVectorsUSSegd_split #-}
|
mainland/dph
|
dph-prim-seq/Data/Array/Parallel/Unlifted/Stream/Segments.hs
|
Haskell
|
bsd-3-clause
| 13,609
|
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE EmptyDataDecls #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TupleSections #-}
-- | Resolving a build plan for a set of packages in a given Stackage
-- snapshot.
module Stack.BuildPlan
( BuildPlanException (..)
, BuildPlanCheck (..)
, checkSnapBuildPlan
, DepError(..)
, DepErrors
, gpdPackageDeps
, gpdPackages
, gpdPackageName
, MiniBuildPlan(..)
, MiniPackageInfo(..)
, loadResolver
, loadMiniBuildPlan
, removeSrcPkgDefaultFlags
, resolveBuildPlan
, selectBestSnapshot
, getToolMap
, shadowMiniBuildPlan
, showItems
, showPackageFlags
, parseCustomMiniBuildPlan
) where
import Control.Applicative
import Control.Exception (assert)
import Control.Monad (liftM, forM, unless)
import Control.Monad.Catch
import Control.Monad.IO.Class
import Control.Monad.Logger
import Control.Monad.Reader (asks)
import Control.Monad.State.Strict (State, execState, get, modify,
put)
import Control.Monad.Trans.Control (MonadBaseControl)
import qualified Crypto.Hash.SHA256 as SHA256
import Data.Aeson.Extended (WithJSONWarnings(..), logJSONWarnings)
import Data.Store.VersionTagged
import qualified Data.ByteString as S
import qualified Data.ByteString.Base64.URL as B64URL
import qualified Data.ByteString.Char8 as S8
import Data.Either (partitionEithers)
import qualified Data.Foldable as F
import qualified Data.HashSet as HashSet
import Data.List (intercalate)
import Data.List.NonEmpty (NonEmpty(..))
import qualified Data.List.NonEmpty as NonEmpty
import Data.Map (Map)
import qualified Data.Map as Map
import Data.Maybe (fromMaybe, mapMaybe, isNothing)
import Data.Monoid
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Text (Text)
import qualified Data.Text as T
import Data.Text.Encoding (encodeUtf8)
import qualified Data.Traversable as Tr
import Data.Typeable (Typeable)
import Data.Yaml.Extra (decodeEither', decodeFileEither)
import qualified Distribution.Package as C
import Distribution.PackageDescription (GenericPackageDescription,
flagDefault, flagManual,
flagName, genPackageFlags,
executables, exeName, library, libBuildInfo, buildable)
import qualified Distribution.PackageDescription as C
import Distribution.System (Platform)
import Distribution.Text (display)
import qualified Distribution.Version as C
import Network.HTTP.Download
import Path
import Path.IO
import Prelude -- Fix AMP warning
import Stack.Constants
import Stack.Fetch
import Stack.Package
import Stack.PackageIndex
import Stack.Types.BuildPlan
import Stack.Types.FlagName
import Stack.Types.PackageIdentifier
import Stack.Types.PackageIndex
import Stack.Types.PackageName
import Stack.Types.Version
import Stack.Types.Config
import Stack.Types.Urls
import Stack.Types.Compiler
import Stack.Types.StackT
data BuildPlanException
= UnknownPackages
(Path Abs File) -- stack.yaml file
(Map PackageName (Maybe Version, Set PackageName)) -- truly unknown
(Map PackageName (Set PackageIdentifier)) -- shadowed
| SnapshotNotFound SnapName
| FilepathInDownloadedSnapshot T.Text
| NeitherCompilerOrResolverSpecified T.Text
deriving (Typeable)
instance Exception BuildPlanException
instance Show BuildPlanException where
show (SnapshotNotFound snapName) = unlines
[ "SnapshotNotFound " ++ snapName'
, "Non existing resolver: " ++ snapName' ++ "."
, "For a complete list of available snapshots see https://www.stackage.org/snapshots"
]
where snapName' = show $ renderSnapName snapName
show (UnknownPackages stackYaml unknown shadowed) =
unlines $ unknown' ++ shadowed'
where
unknown' :: [String]
unknown'
| Map.null unknown = []
| otherwise = concat
[ ["The following packages do not exist in the build plan:"]
, map go (Map.toList unknown)
, case mapMaybe goRecommend $ Map.toList unknown of
[] -> []
rec ->
("Recommended action: modify the extra-deps field of " ++
toFilePath stackYaml ++
" to include the following:")
: (rec
++ ["Note: further dependencies may need to be added"])
, case mapMaybe getNoKnown $ Map.toList unknown of
[] -> []
noKnown ->
[ "There are no known versions of the following packages:"
, intercalate ", " $ map packageNameString noKnown
]
]
where
go (dep, (_, users)) | Set.null users = packageNameString dep
go (dep, (_, users)) = concat
[ packageNameString dep
, " (used by "
, intercalate ", " $ map packageNameString $ Set.toList users
, ")"
]
goRecommend (name, (Just version, _)) =
Just $ "- " ++ packageIdentifierString (PackageIdentifier name version)
goRecommend (_, (Nothing, _)) = Nothing
getNoKnown (name, (Nothing, _)) = Just name
getNoKnown (_, (Just _, _)) = Nothing
shadowed' :: [String]
shadowed'
| Map.null shadowed = []
| otherwise = concat
[ ["The following packages are shadowed by local packages:"]
, map go (Map.toList shadowed)
, ["Recommended action: modify the extra-deps field of " ++
toFilePath stackYaml ++
" to include the following:"]
, extraDeps
, ["Note: further dependencies may need to be added"]
]
where
go (dep, users) | Set.null users = concat
[ packageNameString dep
, " (internal stack error: this should never be null)"
]
go (dep, users) = concat
[ packageNameString dep
, " (used by "
, intercalate ", "
$ map (packageNameString . packageIdentifierName)
$ Set.toList users
, ")"
]
extraDeps = map (\ident -> "- " ++ packageIdentifierString ident)
$ Set.toList
$ Set.unions
$ Map.elems shadowed
show (FilepathInDownloadedSnapshot url) = unlines
[ "Downloaded snapshot specified a 'resolver: { location: filepath }' "
, "field, but filepaths are not allowed in downloaded snapshots.\n"
, "Filepath specified: " ++ T.unpack url
]
show (NeitherCompilerOrResolverSpecified url) =
"Failed to load custom snapshot at " ++
T.unpack url ++
", because no 'compiler' or 'resolver' is specified."
-- | Determine the necessary packages to install to have the given set of
-- packages available.
--
-- This function will not provide test suite and benchmark dependencies.
--
-- This may fail if a target package is not present in the @BuildPlan@.
resolveBuildPlan :: (MonadThrow m, MonadIO m, MonadReader env m, HasBuildConfig env, MonadLogger m, HasHttpManager env, MonadBaseControl IO m,MonadCatch m)
=> MiniBuildPlan
-> (PackageName -> Bool) -- ^ is it shadowed by a local package?
-> Map PackageName (Set PackageName) -- ^ required packages, and users of it
-> m ( Map PackageName (Version, Map FlagName Bool)
, Map PackageName (Set PackageName)
)
resolveBuildPlan mbp isShadowed packages
| Map.null (rsUnknown rs) && Map.null (rsShadowed rs) = return (rsToInstall rs, rsUsedBy rs)
| otherwise = do
bconfig <- asks getBuildConfig
caches <- getPackageCaches
let maxVer =
Map.fromListWith max $
map toTuple $
Map.keys caches
unknown = flip Map.mapWithKey (rsUnknown rs) $ \ident x ->
(Map.lookup ident maxVer, x)
throwM $ UnknownPackages
(bcStackYaml bconfig)
unknown
(rsShadowed rs)
where
rs = getDeps mbp isShadowed packages
data ResolveState = ResolveState
{ rsVisited :: Map PackageName (Set PackageName) -- ^ set of shadowed dependencies
, rsUnknown :: Map PackageName (Set PackageName)
, rsShadowed :: Map PackageName (Set PackageIdentifier)
, rsToInstall :: Map PackageName (Version, Map FlagName Bool)
, rsUsedBy :: Map PackageName (Set PackageName)
}
toMiniBuildPlan :: (MonadIO m, MonadLogger m, MonadReader env m, HasHttpManager env, MonadMask m, HasConfig env, MonadBaseControl IO m)
=> CompilerVersion -- ^ Compiler version
-> Map PackageName Version -- ^ cores
-> Map PackageName (Version, Map FlagName Bool, [Text], Maybe GitSHA1) -- ^ non-core packages
-> m MiniBuildPlan
toMiniBuildPlan compilerVersion corePackages packages = do
-- Determine the dependencies of all of the packages in the build plan. We
-- handle core packages specially, because some of them will not be in the
-- package index. For those, we allow missing packages to exist, and then
-- remove those from the list of dependencies, since there's no way we'll
-- ever reinstall them anyway.
(cores, missingCores) <- addDeps True compilerVersion
$ fmap (, Map.empty, [], Nothing) corePackages
(extras, missing) <- addDeps False compilerVersion packages
assert (Set.null missing) $ return MiniBuildPlan
{ mbpCompilerVersion = compilerVersion
, mbpPackages = Map.unions
[ fmap (removeMissingDeps (Map.keysSet cores)) cores
, extras
, Map.fromList $ map goCore $ Set.toList missingCores
]
}
where
goCore (PackageIdentifier name version) = (name, MiniPackageInfo
{ mpiVersion = version
, mpiFlags = Map.empty
, mpiGhcOptions = []
, mpiPackageDeps = Set.empty
, mpiToolDeps = Set.empty
, mpiExes = Set.empty
, mpiHasLibrary = True
, mpiGitSHA1 = Nothing
})
removeMissingDeps cores mpi = mpi
{ mpiPackageDeps = Set.intersection cores (mpiPackageDeps mpi)
}
-- | Add in the resolved dependencies from the package index
addDeps :: (MonadIO m, MonadLogger m, MonadReader env m, HasHttpManager env, MonadMask m, HasConfig env, MonadBaseControl IO m)
=> Bool -- ^ allow missing
-> CompilerVersion -- ^ Compiler version
-> Map PackageName (Version, Map FlagName Bool, [Text], Maybe GitSHA1)
-> m (Map PackageName MiniPackageInfo, Set PackageIdentifier)
addDeps allowMissing compilerVersion toCalc = do
menv <- getMinimalEnvOverride
platform <- asks $ configPlatform . getConfig
(resolvedMap, missingIdents) <-
if allowMissing
then do
(missingNames, missingIdents, m) <-
resolvePackagesAllowMissing shaMap Set.empty
assert (Set.null missingNames)
$ return (m, missingIdents)
else do
m <- resolvePackages menv shaMap Set.empty
return (m, Set.empty)
let byIndex = Map.fromListWith (++) $ flip map (Map.toList resolvedMap)
$ \(ident, rp) ->
let (cache, ghcOptions, sha) =
case Map.lookup (packageIdentifierName ident) toCalc of
Nothing -> (Map.empty, [], Nothing)
Just (_, x, y, z) -> (x, y, z)
in (indexName $ rpIndex rp,
[( ident
, rpCache rp
, sha
, (cache, ghcOptions, sha)
)])
res <- forM (Map.toList byIndex) $ \(indexName', pkgs) -> withCabalFiles indexName' pkgs
$ \ident (flags, ghcOptions, mgitSha) cabalBS -> do
(_warnings,gpd) <- readPackageUnresolvedBS Nothing cabalBS
let packageConfig = PackageConfig
{ packageConfigEnableTests = False
, packageConfigEnableBenchmarks = False
, packageConfigFlags = flags
, packageConfigGhcOptions = ghcOptions
, packageConfigCompilerVersion = compilerVersion
, packageConfigPlatform = platform
}
name = packageIdentifierName ident
pd = resolvePackageDescription packageConfig gpd
exes = Set.fromList $ map (ExeName . T.pack . exeName) $ executables pd
notMe = Set.filter (/= name) . Map.keysSet
return (name, MiniPackageInfo
{ mpiVersion = packageIdentifierVersion ident
, mpiFlags = flags
, mpiGhcOptions = ghcOptions
, mpiPackageDeps = notMe $ packageDependencies pd
, mpiToolDeps = Map.keysSet $ packageToolDependencies pd
, mpiExes = exes
, mpiHasLibrary = maybe
False
(buildable . libBuildInfo)
(library pd)
, mpiGitSHA1 = mgitSha
})
return (Map.fromList $ concat res, missingIdents)
where
shaMap = Map.fromList
$ map (\(n, (v, _f, _ghcOptions, gitsha)) -> (PackageIdentifier n v, gitsha))
$ Map.toList toCalc
-- | Resolve all packages necessary to install for the needed packages.
getDeps :: MiniBuildPlan
-> (PackageName -> Bool) -- ^ is it shadowed by a local package?
-> Map PackageName (Set PackageName)
-> ResolveState
getDeps mbp isShadowed packages =
execState (mapM_ (uncurry goName) $ Map.toList packages) ResolveState
{ rsVisited = Map.empty
, rsUnknown = Map.empty
, rsShadowed = Map.empty
, rsToInstall = Map.empty
, rsUsedBy = Map.empty
}
where
toolMap = getToolMap mbp
-- | Returns a set of shadowed packages we depend on.
goName :: PackageName -> Set PackageName -> State ResolveState (Set PackageName)
goName name users = do
-- Even though we could check rsVisited first and short-circuit things
-- earlier, lookup in mbpPackages first so that we can produce more
-- usable error information on missing dependencies
rs <- get
put rs
{ rsUsedBy = Map.insertWith Set.union name users $ rsUsedBy rs
}
case Map.lookup name $ mbpPackages mbp of
Nothing -> do
modify $ \rs' -> rs'
{ rsUnknown = Map.insertWith Set.union name users $ rsUnknown rs'
}
return Set.empty
Just mpi -> case Map.lookup name (rsVisited rs) of
Just shadowed -> return shadowed
Nothing -> do
put rs { rsVisited = Map.insert name Set.empty $ rsVisited rs }
let depsForTools = Set.unions $ mapMaybe (flip Map.lookup toolMap) (Set.toList $ mpiToolDeps mpi)
let deps = Set.filter (/= name) (mpiPackageDeps mpi <> depsForTools)
shadowed <- fmap F.fold $ Tr.forM (Set.toList deps) $ \dep ->
if isShadowed dep
then do
modify $ \rs' -> rs'
{ rsShadowed = Map.insertWith
Set.union
dep
(Set.singleton $ PackageIdentifier name (mpiVersion mpi))
(rsShadowed rs')
}
return $ Set.singleton dep
else do
shadowed <- goName dep (Set.singleton name)
let m = Map.fromSet (\_ -> Set.singleton $ PackageIdentifier name (mpiVersion mpi)) shadowed
modify $ \rs' -> rs'
{ rsShadowed = Map.unionWith Set.union m $ rsShadowed rs'
}
return shadowed
modify $ \rs' -> rs'
{ rsToInstall = Map.insert name (mpiVersion mpi, mpiFlags mpi) $ rsToInstall rs'
, rsVisited = Map.insert name shadowed $ rsVisited rs'
}
return shadowed
-- | Map from tool name to package providing it
getToolMap :: MiniBuildPlan -> Map Text (Set PackageName)
getToolMap mbp =
Map.unionsWith Set.union
{- We no longer do this, following discussion at:
https://github.com/commercialhaskell/stack/issues/308#issuecomment-112076704
-- First grab all of the package names, for times where a build tool is
-- identified by package name
$ Map.fromList (map (packageNameByteString &&& Set.singleton) (Map.keys ps))
-}
-- And then get all of the explicit executable names
$ concatMap goPair (Map.toList ps)
where
ps = mbpPackages mbp
goPair (pname, mpi) =
map (flip Map.singleton (Set.singleton pname) . unExeName)
$ Set.toList
$ mpiExes mpi
loadResolver
:: (MonadIO m, MonadLogger m, MonadReader env m, HasHttpManager env, HasConfig env, HasGHCVariant env, MonadBaseControl IO m, MonadMask m)
=> Maybe (Path Abs File)
-> Resolver
-> m (MiniBuildPlan, LoadedResolver)
loadResolver mconfigPath resolver =
case resolver of
ResolverSnapshot snap ->
liftM (, ResolverSnapshot snap) $ loadMiniBuildPlan snap
-- TODO(mgsloan): Not sure what this FIXME means
-- FIXME instead of passing the stackYaml dir we should maintain
-- the file URL in the custom resolver always relative to stackYaml.
ResolverCustom name url -> do
(mbp, hash) <- parseCustomMiniBuildPlan mconfigPath url
return (mbp, ResolverCustomLoaded name url hash)
ResolverCompiler compiler -> return
( MiniBuildPlan
{ mbpCompilerVersion = compiler
, mbpPackages = mempty
}
, ResolverCompiler compiler
)
-- | Load up a 'MiniBuildPlan', preferably from cache
loadMiniBuildPlan
:: (MonadIO m, MonadLogger m, MonadReader env m, HasHttpManager env, HasConfig env, HasGHCVariant env, MonadBaseControl IO m, MonadMask m)
=> SnapName
-> m MiniBuildPlan
loadMiniBuildPlan name = do
path <- configMiniBuildPlanCache name
$(versionedDecodeOrLoad miniBuildPlanVC) path $ liftM buildPlanFixes $ do
bp <- loadBuildPlan name
toMiniBuildPlan
(siCompilerVersion $ bpSystemInfo bp)
(siCorePackages $ bpSystemInfo bp)
(fmap goPP $ bpPackages bp)
where
goPP pp =
( ppVersion pp
, pcFlagOverrides $ ppConstraints pp
-- TODO: store ghc options in BuildPlan?
, []
, ppCabalFileInfo pp
>>= fmap (GitSHA1 . encodeUtf8)
. Map.lookup "GitSHA1"
. cfiHashes
)
-- | Some hard-coded fixes for build plans, hopefully to be irrelevant over
-- time.
buildPlanFixes :: MiniBuildPlan -> MiniBuildPlan
buildPlanFixes mbp = mbp
{ mbpPackages = Map.fromList $ map go $ Map.toList $ mbpPackages mbp
}
where
go (name, mpi) =
(name, mpi
{ mpiFlags = goF (packageNameString name) (mpiFlags mpi)
})
goF "persistent-sqlite" = Map.insert $(mkFlagName "systemlib") False
goF "yaml" = Map.insert $(mkFlagName "system-libyaml") False
goF _ = id
-- | Load the 'BuildPlan' for the given snapshot. Will load from a local copy
-- if available, otherwise downloading from Github.
loadBuildPlan :: (MonadIO m, MonadThrow m, MonadLogger m, MonadReader env m, HasHttpManager env, HasConfig env)
=> SnapName
-> m BuildPlan
loadBuildPlan name = do
env <- ask
let stackage = getStackRoot env
file' <- parseRelFile $ T.unpack file
let fp = buildPlanDir stackage </> file'
$logDebug $ "Decoding build plan from: " <> T.pack (toFilePath fp)
eres <- liftIO $ decodeFileEither $ toFilePath fp
case eres of
Right bp -> return bp
Left e -> do
$logDebug $ "Decoding build plan from file failed: " <> T.pack (show e)
ensureDir (parent fp)
url <- buildBuildPlanUrl name file
req <- parseRequest $ T.unpack url
$logSticky $ "Downloading " <> renderSnapName name <> " build plan ..."
$logDebug $ "Downloading build plan from: " <> url
_ <- redownload req fp
$logStickyDone $ "Downloaded " <> renderSnapName name <> " build plan."
liftIO (decodeFileEither $ toFilePath fp) >>= either throwM return
where
file = renderSnapName name <> ".yaml"
buildBuildPlanUrl :: (MonadReader env m, HasConfig env) => SnapName -> Text -> m Text
buildBuildPlanUrl name file = do
urls <- asks (configUrls . getConfig)
return $
case name of
LTS _ _ -> urlsLtsBuildPlans urls <> "/" <> file
Nightly _ -> urlsNightlyBuildPlans urls <> "/" <> file
gpdPackages :: [GenericPackageDescription] -> Map PackageName Version
gpdPackages gpds = Map.fromList $
map (fromCabalIdent . C.package . C.packageDescription) gpds
where
fromCabalIdent (C.PackageIdentifier name version) =
(fromCabalPackageName name, fromCabalVersion version)
gpdPackageName :: GenericPackageDescription -> PackageName
gpdPackageName = fromCabalPackageName
. C.pkgName
. C.package
. C.packageDescription
gpdPackageDeps
:: GenericPackageDescription
-> CompilerVersion
-> Platform
-> Map FlagName Bool
-> Map PackageName VersionRange
gpdPackageDeps gpd cv platform flags =
Map.filterWithKey (const . (/= name)) (packageDependencies pkgDesc)
where
name = gpdPackageName gpd
pkgDesc = resolvePackageDescription pkgConfig gpd
pkgConfig = PackageConfig
{ packageConfigEnableTests = True
, packageConfigEnableBenchmarks = True
, packageConfigFlags = flags
, packageConfigGhcOptions = []
, packageConfigCompilerVersion = cv
, packageConfigPlatform = platform
}
-- Remove any src package flags having default values
-- Remove any package entries with no flags set
removeSrcPkgDefaultFlags :: [C.GenericPackageDescription]
-> Map PackageName (Map FlagName Bool)
-> Map PackageName (Map FlagName Bool)
removeSrcPkgDefaultFlags gpds flags =
let defaults = Map.unions (map gpdDefaultFlags gpds)
flags' = Map.differenceWith removeSame flags defaults
in Map.filter (not . Map.null) flags'
where
removeSame f1 f2 =
let diff v v' = if v == v' then Nothing else Just v
in Just $ Map.differenceWith diff f1 f2
gpdDefaultFlags gpd =
let tuples = map getDefault (C.genPackageFlags gpd)
in Map.singleton (gpdPackageName gpd) (Map.fromList tuples)
flagName' = fromCabalFlagName . C.flagName
getDefault f
| C.flagDefault f = (flagName' f, True)
| otherwise = (flagName' f, False)
-- | Find the set of @FlagName@s necessary to get the given
-- @GenericPackageDescription@ to compile against the given @BuildPlan@. Will
-- only modify non-manual flags, and will prefer default values for flags.
-- Returns the plan which produces least number of dep errors
selectPackageBuildPlan
:: Platform
-> CompilerVersion
-> Map PackageName Version
-> GenericPackageDescription
-> (Map PackageName (Map FlagName Bool), DepErrors)
selectPackageBuildPlan platform compiler pool gpd =
(selectPlan . limitSearchSpace . NonEmpty.map makePlan) flagCombinations
where
selectPlan :: NonEmpty (a, DepErrors) -> (a, DepErrors)
selectPlan = F.foldr1 fewerErrors
where
fewerErrors p1 p2
| nErrors p1 == 0 = p1
| nErrors p1 <= nErrors p2 = p1
| otherwise = p2
where nErrors = Map.size . snd
-- Avoid exponential complexity in flag combinations making us sad pandas.
-- See: https://github.com/commercialhaskell/stack/issues/543
limitSearchSpace :: NonEmpty a -> NonEmpty a
limitSearchSpace (x :| xs) = x :| take (maxFlagCombinations - 1) xs
where maxFlagCombinations = 128
makePlan :: [(FlagName, Bool)] -> (Map PackageName (Map FlagName Bool), DepErrors)
makePlan flags = checkPackageBuildPlan platform compiler pool (Map.fromList flags) gpd
flagCombinations :: NonEmpty [(FlagName, Bool)]
flagCombinations = mapM getOptions (genPackageFlags gpd)
where
getOptions :: C.Flag -> NonEmpty (FlagName, Bool)
getOptions f
| flagManual f = (fname, flagDefault f) :| []
| flagDefault f = (fname, True) :| [(fname, False)]
| otherwise = (fname, False) :| [(fname, True)]
where fname = (fromCabalFlagName . flagName) f
-- | Check whether with the given set of flags a package's dependency
-- constraints can be satisfied against a given build plan or pool of packages.
checkPackageBuildPlan
:: Platform
-> CompilerVersion
-> Map PackageName Version
-> Map FlagName Bool
-> GenericPackageDescription
-> (Map PackageName (Map FlagName Bool), DepErrors)
checkPackageBuildPlan platform compiler pool flags gpd =
(Map.singleton pkg flags, errs)
where
pkg = gpdPackageName gpd
errs = checkPackageDeps pkg constraints pool
constraints = gpdPackageDeps gpd compiler platform flags
-- | Checks if the given package dependencies can be satisfied by the given set
-- of packages. Will fail if a package is either missing or has a version
-- outside of the version range.
checkPackageDeps :: PackageName -- ^ package using dependencies, for constructing DepErrors
-> Map PackageName VersionRange -- ^ dependency constraints
-> Map PackageName Version -- ^ Available package pool or index
-> DepErrors
checkPackageDeps myName deps packages =
Map.unionsWith combineDepError $ map go $ Map.toList deps
where
go :: (PackageName, VersionRange) -> DepErrors
go (name, range) =
case Map.lookup name packages of
Nothing -> Map.singleton name DepError
{ deVersion = Nothing
, deNeededBy = Map.singleton myName range
}
Just v
| withinRange v range -> Map.empty
| otherwise -> Map.singleton name DepError
{ deVersion = Just v
, deNeededBy = Map.singleton myName range
}
type DepErrors = Map PackageName DepError
data DepError = DepError
{ deVersion :: !(Maybe Version)
, deNeededBy :: !(Map PackageName VersionRange)
} deriving Show
-- | Combine two 'DepError's for the same 'Version'.
combineDepError :: DepError -> DepError -> DepError
combineDepError (DepError a x) (DepError b y) =
assert (a == b) $ DepError a (Map.unionWith C.intersectVersionRanges x y)
-- | Given a bundle of packages (a list of @GenericPackageDescriptions@'s) to
-- build and an available package pool (snapshot) check whether the bundle's
-- dependencies can be satisfied. If flags is passed as Nothing flag settings
-- will be chosen automatically.
checkBundleBuildPlan
:: Platform
-> CompilerVersion
-> Map PackageName Version
-> Maybe (Map PackageName (Map FlagName Bool))
-> [GenericPackageDescription]
-> (Map PackageName (Map FlagName Bool), DepErrors)
checkBundleBuildPlan platform compiler pool flags gpds =
(Map.unionsWith dupError (map fst plans)
, Map.unionsWith combineDepError (map snd plans))
where
plans = map (pkgPlan flags) gpds
pkgPlan Nothing gpd =
selectPackageBuildPlan platform compiler pool' gpd
pkgPlan (Just f) gpd =
checkPackageBuildPlan platform compiler pool' (flags' f gpd) gpd
flags' f gpd = maybe Map.empty id (Map.lookup (gpdPackageName gpd) f)
pool' = Map.union (gpdPackages gpds) pool
dupError _ _ = error "Bug: Duplicate packages are not expected here"
data BuildPlanCheck =
BuildPlanCheckOk (Map PackageName (Map FlagName Bool))
| BuildPlanCheckPartial (Map PackageName (Map FlagName Bool)) DepErrors
| BuildPlanCheckFail (Map PackageName (Map FlagName Bool)) DepErrors
CompilerVersion
-- | Compare 'BuildPlanCheck', where GT means a better plan.
compareBuildPlanCheck :: BuildPlanCheck -> BuildPlanCheck -> Ordering
compareBuildPlanCheck (BuildPlanCheckPartial _ e1) (BuildPlanCheckPartial _ e2) =
-- Note: order of comparison flipped, since it's better to have fewer errors.
compare (Map.size e2) (Map.size e1)
compareBuildPlanCheck (BuildPlanCheckFail _ e1 _) (BuildPlanCheckFail _ e2 _) =
let numUserPkgs e = Map.size $ Map.unions (Map.elems (fmap deNeededBy e))
in compare (numUserPkgs e2) (numUserPkgs e1)
compareBuildPlanCheck BuildPlanCheckOk{} BuildPlanCheckOk{} = EQ
compareBuildPlanCheck BuildPlanCheckOk{} BuildPlanCheckPartial{} = GT
compareBuildPlanCheck BuildPlanCheckOk{} BuildPlanCheckFail{} = GT
compareBuildPlanCheck BuildPlanCheckPartial{} BuildPlanCheckFail{} = GT
compareBuildPlanCheck _ _ = LT
instance Show BuildPlanCheck where
show BuildPlanCheckOk {} = ""
show (BuildPlanCheckPartial f e) = T.unpack $ showDepErrors f e
show (BuildPlanCheckFail f e c) = T.unpack $ showCompilerErrors f e c
-- | Check a set of 'GenericPackageDescription's and a set of flags against a
-- given snapshot. Returns how well the snapshot satisfies the dependencies of
-- the packages.
checkSnapBuildPlan
:: ( MonadIO m, MonadMask m, MonadLogger m, MonadReader env m
, HasHttpManager env, HasConfig env, HasGHCVariant env
, MonadBaseControl IO m)
=> [GenericPackageDescription]
-> Maybe (Map PackageName (Map FlagName Bool))
-> SnapName
-> m BuildPlanCheck
checkSnapBuildPlan gpds flags snap = do
platform <- asks (configPlatform . getConfig)
mbp <- loadMiniBuildPlan snap
let
compiler = mbpCompilerVersion mbp
snapPkgs = fmap mpiVersion $ mbpPackages mbp
(f, errs) = checkBundleBuildPlan platform compiler snapPkgs flags gpds
cerrs = compilerErrors compiler errs
if Map.null errs then
return $ BuildPlanCheckOk f
else if Map.null cerrs then do
return $ BuildPlanCheckPartial f errs
else
return $ BuildPlanCheckFail f cerrs compiler
where
compilerErrors compiler errs
| whichCompiler compiler == Ghc = ghcErrors errs
-- FIXME not sure how to handle ghcjs boot packages
| otherwise = Map.empty
isGhcWiredIn p _ = p `HashSet.member` wiredInPackages
ghcErrors = Map.filterWithKey isGhcWiredIn
-- | Find a snapshot and set of flags that is compatible with and matches as
-- best as possible with the given 'GenericPackageDescription's.
selectBestSnapshot
:: ( MonadIO m, MonadMask m, MonadLogger m, MonadReader env m
, HasHttpManager env, HasConfig env, HasGHCVariant env
, MonadBaseControl IO m)
=> [GenericPackageDescription]
-> NonEmpty SnapName
-> m (SnapName, BuildPlanCheck)
selectBestSnapshot gpds snaps = do
$logInfo $ "Selecting the best among "
<> T.pack (show (NonEmpty.length snaps))
<> " snapshots...\n"
F.foldr1 go (NonEmpty.map getResult snaps)
where
go mold mnew = do
old@(_snap, bpc) <- mold
case bpc of
BuildPlanCheckOk {} -> return old
_ -> fmap (betterSnap old) mnew
getResult snap = do
result <- checkSnapBuildPlan gpds Nothing snap
reportResult result snap
return (snap, result)
betterSnap (s1, r1) (s2, r2)
| compareBuildPlanCheck r1 r2 /= LT = (s1, r1)
| otherwise = (s2, r2)
reportResult BuildPlanCheckOk {} snap = do
$logInfo $ "* Matches " <> renderSnapName snap
$logInfo ""
reportResult r@BuildPlanCheckPartial {} snap = do
$logWarn $ "* Partially matches " <> renderSnapName snap
$logWarn $ indent $ T.pack $ show r
reportResult r@BuildPlanCheckFail {} snap = do
$logWarn $ "* Rejected " <> renderSnapName snap
$logWarn $ indent $ T.pack $ show r
indent t = T.unlines $ fmap (" " <>) (T.lines t)
showItems :: Show a => [a] -> Text
showItems items = T.concat (map formatItem items)
where
formatItem item = T.concat
[ " - "
, T.pack $ show item
, "\n"
]
showPackageFlags :: PackageName -> Map FlagName Bool -> Text
showPackageFlags pkg fl =
if (not $ Map.null fl) then
T.concat
[ " - "
, T.pack $ packageNameString pkg
, ": "
, T.pack $ intercalate ", "
$ map formatFlags (Map.toList fl)
, "\n"
]
else ""
where
formatFlags (f, v) = (show f) ++ " = " ++ (show v)
showMapPackages :: Map PackageName a -> Text
showMapPackages mp = showItems $ Map.keys mp
showCompilerErrors
:: Map PackageName (Map FlagName Bool)
-> DepErrors
-> CompilerVersion
-> Text
showCompilerErrors flags errs compiler =
T.concat
[ compilerVersionText compiler
, " cannot be used for these packages:\n"
, showMapPackages $ Map.unions (Map.elems (fmap deNeededBy errs))
, showDepErrors flags errs -- TODO only in debug mode
]
showDepErrors :: Map PackageName (Map FlagName Bool) -> DepErrors -> Text
showDepErrors flags errs =
T.concat
[ T.concat $ map formatError (Map.toList errs)
, if T.null flagVals then ""
else ("Using package flags:\n" <> flagVals)
]
where
formatError (depName, DepError mversion neededBy) = T.concat
[ showDepVersion depName mversion
, T.concat (map showRequirement (Map.toList neededBy))
]
showDepVersion depName mversion = T.concat
[ T.pack $ packageNameString depName
, case mversion of
Nothing -> " not found"
Just version -> T.concat
[ " version "
, T.pack $ versionString version
, " found"
]
, "\n"
]
showRequirement (user, range) = T.concat
[ " - "
, T.pack $ packageNameString user
, " requires "
, T.pack $ display range
, "\n"
]
flagVals = T.concat (map showFlags userPkgs)
userPkgs = Map.keys $ Map.unions (Map.elems (fmap deNeededBy errs))
showFlags pkg = maybe "" (showPackageFlags pkg) (Map.lookup pkg flags)
shadowMiniBuildPlan :: MiniBuildPlan
-> Set PackageName
-> (MiniBuildPlan, Map PackageName MiniPackageInfo)
shadowMiniBuildPlan (MiniBuildPlan cv pkgs0) shadowed =
(MiniBuildPlan cv (Map.fromList met), Map.fromList unmet)
where
pkgs1 = Map.difference pkgs0 $ Map.fromSet (\_ -> ()) shadowed
depsMet = flip execState Map.empty $ mapM_ (check Set.empty) (Map.keys pkgs1)
check visited name
| name `Set.member` visited =
error $ "shadowMiniBuildPlan: cycle detected, your MiniBuildPlan is broken: " ++ show (visited, name)
| otherwise = do
m <- get
case Map.lookup name m of
Just x -> return x
Nothing ->
case Map.lookup name pkgs1 of
Nothing
| name `Set.member` shadowed -> return False
-- In this case, we have to assume that we're
-- constructing a build plan on a different OS or
-- architecture, and therefore different packages
-- are being chosen. The common example of this is
-- the Win32 package.
| otherwise -> return True
Just mpi -> do
let visited' = Set.insert name visited
ress <- mapM (check visited') (Set.toList $ mpiPackageDeps mpi)
let res = and ress
modify $ \m' -> Map.insert name res m'
return res
(met, unmet) = partitionEithers $ map toEither $ Map.toList pkgs1
toEither pair@(name, _) =
wrapper pair
where
wrapper =
case Map.lookup name depsMet of
Just True -> Left
Just False -> Right
Nothing -> assert False Right
-- This works differently for snapshots fetched from URL and those
-- fetched from file:
--
-- 1) If downloading the snapshot from a URL, assume the fetched data is
-- immutable. Hash the URL in order to determine the location of the
-- cached download. The file contents of the snapshot determines the
-- hash for looking up cached MBP.
--
-- 2) If loading the snapshot from a file, load all of the involved
-- snapshot files. The hash used to determine the cached MBP is the hash
-- of the concatenation of the parent's hash with the snapshot contents.
--
-- Why this difference? We want to make it easy to simply edit snapshots
-- in the filesystem, but we want caching for remote snapshots. In order
-- to avoid reparsing / reloading all the yaml for remote snapshots, we
-- need a different hash system.
-- TODO: This could probably be more efficient if it first merged the
-- custom snapshots, and then applied them to the MBP. It is nice to
-- apply directly, because then we have the guarantee that it's
-- semantically identical to snapshot extension. If this optimization is
-- implemented, note that the direct Monoid for CustomSnapshot is not
-- correct. Crucially, if a package is present in the snapshot, its
-- flags and ghc-options are not based on settings from prior snapshots.
-- TODO: This semantics should be discussed / documented more.
-- TODO: allow a hash check in the resolver. This adds safety /
-- correctness, allowing you to ensure that you are indeed getting the
-- right custom snapshot.
-- TODO: Allow custom plan to specify a name.
parseCustomMiniBuildPlan
:: (MonadIO m, MonadMask m, MonadLogger m, MonadReader env m, HasHttpManager env, HasConfig env, HasGHCVariant env, MonadBaseControl IO m)
=> Maybe (Path Abs File) -- ^ Root directory for when url is a filepath
-> T.Text
-> m (MiniBuildPlan, SnapshotHash)
parseCustomMiniBuildPlan mconfigPath0 url0 = do
$logDebug $ "Loading " <> url0 <> " build plan"
case parseUrlThrow $ T.unpack url0 of
Just req -> downloadCustom url0 req
Nothing ->
case mconfigPath0 of
Nothing -> throwM $ FilepathInDownloadedSnapshot url0
Just configPath -> do
(getMbp, hash) <- readCustom configPath url0
mbp <- getMbp
-- NOTE: We make the choice of only writing a cache
-- file for the full MBP, not the intermediate ones.
-- This isn't necessarily the best choice if we want
-- to share work extended snapshots. I think only
-- writing this one is more efficient for common
-- cases.
binaryPath <- getBinaryPath hash
alreadyCached <- doesFileExist binaryPath
unless alreadyCached $ $(versionedEncodeFile miniBuildPlanVC) binaryPath mbp
return (mbp, hash)
where
downloadCustom url req = do
let urlHash = S8.unpack $ trimmedSnapshotHash $ doHash $ encodeUtf8 url
hashFP <- parseRelFile $ urlHash ++ ".yaml"
customPlanDir <- getCustomPlanDir
let cacheFP = customPlanDir </> $(mkRelDir "yaml") </> hashFP
_ <- download req cacheFP
yamlBS <- liftIO $ S.readFile $ toFilePath cacheFP
let yamlHash = doHash yamlBS
binaryPath <- getBinaryPath yamlHash
liftM (, yamlHash) $ $(versionedDecodeOrLoad miniBuildPlanVC) binaryPath $ do
(cs, mresolver) <- decodeYaml yamlBS
parentMbp <- case (csCompilerVersion cs, mresolver) of
(Nothing, Nothing) -> throwM (NeitherCompilerOrResolverSpecified url)
(Just cv, Nothing) -> return (compilerBuildPlan cv)
-- NOTE: ignoring the parent's hash, even though
-- there could be one. URL snapshot's hash are
-- determined just from their contents.
(_, Just resolver) -> liftM fst (loadResolver Nothing resolver)
applyCustomSnapshot cs parentMbp
readCustom configPath path = do
yamlFP <- resolveFile (parent configPath) (T.unpack $ fromMaybe path $
T.stripPrefix "file://" path <|> T.stripPrefix "file:" path)
yamlBS <- liftIO $ S.readFile $ toFilePath yamlFP
(cs, mresolver) <- decodeYaml yamlBS
(getMbp, hash) <- case mresolver of
Just (ResolverCustom _ url ) ->
case parseUrlThrow $ T.unpack url of
Just req -> do
let getMbp = do
-- Ignore custom hash, under the
-- assumption that the URL is sufficient
-- for identity.
(mbp, _) <- downloadCustom url req
return mbp
return (getMbp, doHash yamlBS)
Nothing -> do
(getMbp0, SnapshotHash hash0) <- readCustom yamlFP url
let hash = doHash (hash0 <> yamlBS)
getMbp = do
binaryPath <- getBinaryPath hash
-- Idea here is to not waste time
-- writing out intermediate cache files,
-- but check for them.
exists <- doesFileExist binaryPath
if exists
then do
eres <- $(versionedDecodeFile miniBuildPlanVC) binaryPath
case eres of
Just mbp -> return mbp
-- Invalid format cache file, remove.
Nothing -> do
removeFile binaryPath
getMbp0
else getMbp0
return (getMbp, hash)
Just resolver -> do
-- NOTE: in the cases where we don't have a hash, the
-- normal resolver name is enough. Since this name is
-- part of the yaml file, it ends up in our hash.
let hash = doHash yamlBS
getMbp = do
(mbp, resolver') <- loadResolver (Just configPath) resolver
let mhash = customResolverHash resolver'
assert (isNothing mhash) (return mbp)
return (getMbp, hash)
Nothing -> do
case csCompilerVersion cs of
Nothing -> throwM (NeitherCompilerOrResolverSpecified path)
Just cv -> do
let hash = doHash yamlBS
getMbp = return (compilerBuildPlan cv)
return (getMbp, hash)
return (applyCustomSnapshot cs =<< getMbp, hash)
getBinaryPath hash = do
binaryFilename <- parseRelFile $ S8.unpack (trimmedSnapshotHash hash) ++ ".bin"
customPlanDir <- getCustomPlanDir
return $ customPlanDir </> $(mkRelDir "bin") </> binaryFilename
decodeYaml yamlBS = do
WithJSONWarnings res warnings <-
either (throwM . ParseCustomSnapshotException url0) return $
decodeEither' yamlBS
logJSONWarnings (T.unpack url0) warnings
return res
compilerBuildPlan cv = MiniBuildPlan
{ mbpCompilerVersion = cv
, mbpPackages = mempty
}
getCustomPlanDir = do
root <- asks $ configStackRoot . getConfig
return $ root </> $(mkRelDir "custom-plan")
doHash = SnapshotHash . B64URL.encode . SHA256.hash
applyCustomSnapshot
:: (MonadIO m, MonadLogger m, MonadReader env m, HasHttpManager env, HasConfig env, MonadBaseControl IO m, MonadMask m)
=> CustomSnapshot
-> MiniBuildPlan
-> m MiniBuildPlan
applyCustomSnapshot cs mbp0 = do
let CustomSnapshot mcompilerVersion
packages
dropPackages
(PackageFlags flags)
ghcOptions
= cs
addFlagsAndOpts :: PackageIdentifier -> (PackageName, (Version, Map FlagName Bool, [Text], Maybe GitSHA1))
addFlagsAndOpts (PackageIdentifier name ver) =
( name
, ( ver
, Map.findWithDefault Map.empty name flags
-- NOTE: similar to 'allGhcOptions' in Stack.Types.Build
, ghcOptionsFor name ghcOptions
-- we add a Nothing since we don't yet collect Git SHAs for custom snapshots
, Nothing
)
)
packageMap = Map.fromList $ map addFlagsAndOpts $ Set.toList packages
cv = fromMaybe (mbpCompilerVersion mbp0) mcompilerVersion
packages0 =
mbpPackages mbp0 `Map.difference` (Map.fromSet (\_ -> ()) dropPackages)
mbp1 <- toMiniBuildPlan cv mempty packageMap
return $ MiniBuildPlan
{ mbpCompilerVersion = cv
, mbpPackages = Map.union (mbpPackages mbp1) packages0
}
|
AndrewRademacher/stack
|
src/Stack/BuildPlan.hs
|
Haskell
|
bsd-3-clause
| 47,672
|
{-# LANGUAGE CPP, OverloadedStrings, RecordWildCards, ScopedTypeVariables #-}
import Control.Monad (forM)
import Data.Aeson (eitherDecode)
import Data.Aeson.Encode
import Data.Aeson.Parser (value)
import Data.Aeson.Types
import Data.Char (toUpper)
import Test.Framework (Test, defaultMain, testGroup)
import Test.Framework.Providers.QuickCheck2 (testProperty)
import Test.Framework.Providers.HUnit (testCase)
import Test.HUnit (Assertion, assertFailure, assertEqual)
import Test.QuickCheck (Arbitrary(..))
import qualified Data.Vector as V
import qualified Data.Attoparsec.Lazy as L
import qualified Data.ByteString.Lazy.Char8 as L
import qualified Data.Text as T
import qualified Data.Text.Lazy.Builder as TLB
import qualified Data.Text.Lazy.Encoding as TLE
import qualified Data.HashMap.Strict as H
import Data.Time.Clock (UTCTime(..))
import Data.Time (ZonedTime(..))
import Instances ()
import Types
import Encoders
import Properties.Deprecated (deprecatedTests)
#ifdef GHC_GENERICS
import Data.Int
import qualified Data.Map as Map
#endif
roundTripCamel :: String -> Assertion
roundTripCamel name = assertEqual "" name (camelFrom '_' $ camelTo '_' name)
where
camelFrom c s = let (p:ps) = split c s
in concat $ p : map capitalize ps
split c s = map L.unpack $ L.split c $ L.pack s
capitalize t = toUpper (head t) : tail t
encodeDouble :: Double -> Double -> Bool
encodeDouble num denom
| isInfinite d || isNaN d = encode d == "null"
| otherwise = (read . L.unpack . encode) d == d
where d = num / denom
encodeInteger :: Integer -> Bool
encodeInteger i = encode i == L.pack (show i)
toParseJSON :: (Arbitrary a, Eq a) => (Value -> Parser a) -> (a -> Value) -> a -> Bool
toParseJSON parsejson tojson x =
case parse parsejson . tojson $ x of
Error _ -> False
Success x' -> x == x'
roundTrip :: (FromJSON a, ToJSON a) => (a -> a -> Bool) -> a -> a -> Bool
roundTrip eq _ i =
case fmap fromJSON . L.parse value . encode . toJSON $ i of
L.Done _ (Success v) -> v `eq` i
_ -> False
roundTripEq :: (Eq a, FromJSON a, ToJSON a) => a -> a -> Bool
roundTripEq x y = roundTrip (==) x y
toFromJSON :: (Arbitrary a, Eq a, FromJSON a, ToJSON a) => a -> Bool
toFromJSON x = case fromJSON . toJSON $ x of
Error _ -> False
Success x' -> x == x'
modifyFailureProp :: String -> String -> Bool
modifyFailureProp orig added =
result == Error (added ++ orig)
where
parser = const $ modifyFailure (added ++) $ fail orig
result :: Result ()
result = parse parser ()
main :: IO ()
main = do
comparisonTest <- encoderComparisonTests
defaultMain (comparisonTest : tests)
#ifdef GHC_GENERICS
type P6 = Product6 Int Bool String (Approx Double) (Int, Approx Double) ()
type S4 = Sum4 Int8 ZonedTime T.Text (Map.Map String Int)
#endif
--------------------------------------------------------------------------------
-- Value properties
--------------------------------------------------------------------------------
isString :: Value -> Bool
isString (String _) = True
isString _ = False
is2ElemArray :: Value -> Bool
is2ElemArray (Array v) = V.length v == 2 && isString (V.head v)
is2ElemArray _ = False
isTaggedObjectValue :: Value -> Bool
isTaggedObjectValue (Object obj) = "tag" `H.member` obj &&
"contents" `H.member` obj
isTaggedObjectValue _ = False
isTaggedObject :: Value -> Bool
isTaggedObject (Object obj) = "tag" `H.member` obj
isTaggedObject _ = False
isObjectWithSingleField :: Value -> Bool
isObjectWithSingleField (Object obj) = H.size obj == 1
isObjectWithSingleField _ = False
--------------------------------------------------------------------------------
tests :: [Test]
tests = [
testGroup "encode" [
testProperty "encodeDouble" encodeDouble
, testProperty "encodeInteger" encodeInteger
],
testGroup "camelCase" [
testCase "camelTo" $ roundTripCamel "aName"
, testCase "camelTo" $ roundTripCamel "another"
, testCase "camelTo" $ roundTripCamel "someOtherName"
],
testGroup "roundTrip" [
testProperty "Bool" $ roundTripEq True
, testProperty "Double" $ roundTripEq (1 :: Approx Double)
, testProperty "Int" $ roundTripEq (1::Int)
, testProperty "Integer" $ roundTripEq (1::Integer)
, testProperty "String" $ roundTripEq (""::String)
, testProperty "Text" $ roundTripEq T.empty
, testProperty "Foo" $ roundTripEq (undefined::Foo)
, testProperty "DotNetTime" $ roundTripEq (undefined :: DotNetTime)
, testProperty "UTCTime" $ roundTripEq (undefined :: UTCTime)
, testProperty "ZonedTime" $ roundTripEq (undefined::ZonedTime)
#ifdef GHC_GENERICS
, testGroup "ghcGenerics" [
testProperty "OneConstructor" $ roundTripEq OneConstructor
, testProperty "Product2" $ roundTripEq (undefined :: Product2 Int Bool)
, testProperty "Product6" $ roundTripEq (undefined :: P6)
, testProperty "Sum4" $ roundTripEq (undefined :: S4)
]
#endif
],
testGroup "toFromJSON" [
testProperty "Integer" (toFromJSON :: Integer -> Bool)
, testProperty "Double" (toFromJSON :: Double -> Bool)
, testProperty "Maybe Integer" (toFromJSON :: Maybe Integer -> Bool)
, testProperty "Either Integer Double" (toFromJSON :: Either Integer Double -> Bool)
, testProperty "Either Integer Integer" (toFromJSON :: Either Integer Integer -> Bool)
],
testGroup "deprecated" deprecatedTests,
testGroup "failure messages" [
testProperty "modify failure" modifyFailureProp
],
testGroup "template-haskell" [
testGroup "Nullary" [
testProperty "string" (isString . thNullaryToJSONString)
, testProperty "2ElemArray" (is2ElemArray . thNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (isTaggedObjectValue . thNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . thNullaryToJSONObjectWithSingleField)
, testGroup "roundTrip" [
testProperty "string" (toParseJSON thNullaryParseJSONString thNullaryToJSONString)
, testProperty "2ElemArray" (toParseJSON thNullaryParseJSON2ElemArray thNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (toParseJSON thNullaryParseJSONTaggedObject thNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (toParseJSON thNullaryParseJSONObjectWithSingleField thNullaryToJSONObjectWithSingleField)
]
]
, testGroup "SomeType" [
testProperty "2ElemArray" (is2ElemArray . (thSomeTypeToJSON2ElemArray :: SomeTypeToJSON))
, testProperty "TaggedObject" (isTaggedObject . (thSomeTypeToJSONTaggedObject :: SomeTypeToJSON))
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . (thSomeTypeToJSONObjectWithSingleField :: SomeTypeToJSON))
, testGroup "roundTrip" [
testProperty "2ElemArray" (toParseJSON thSomeTypeParseJSON2ElemArray (thSomeTypeToJSON2ElemArray :: SomeTypeToJSON))
, testProperty "TaggedObject" (toParseJSON thSomeTypeParseJSONTaggedObject (thSomeTypeToJSONTaggedObject :: SomeTypeToJSON))
, testProperty "ObjectWithSingleField" (toParseJSON thSomeTypeParseJSONObjectWithSingleField (thSomeTypeToJSONObjectWithSingleField :: SomeTypeToJSON))
]
]
]
#ifdef GHC_GENERICS
, testGroup "GHC-generics" [
testGroup "Nullary" [
testProperty "string" (isString . gNullaryToJSONString)
, testProperty "2ElemArray" (is2ElemArray . gNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (isTaggedObjectValue . gNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . gNullaryToJSONObjectWithSingleField)
, testGroup "eq" [
testProperty "string" (\n -> gNullaryToJSONString n == thNullaryToJSONString n)
, testProperty "2ElemArray" (\n -> gNullaryToJSON2ElemArray n == thNullaryToJSON2ElemArray n)
, testProperty "TaggedObject" (\n -> gNullaryToJSONTaggedObject n == thNullaryToJSONTaggedObject n)
, testProperty "ObjectWithSingleField" (\n -> gNullaryToJSONObjectWithSingleField n == thNullaryToJSONObjectWithSingleField n)
]
, testGroup "roundTrip" [
testProperty "string" (toParseJSON gNullaryParseJSONString gNullaryToJSONString)
, testProperty "2ElemArray" (toParseJSON gNullaryParseJSON2ElemArray gNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (toParseJSON gNullaryParseJSONTaggedObject gNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (toParseJSON gNullaryParseJSONObjectWithSingleField gNullaryToJSONObjectWithSingleField)
]
]
, testGroup "SomeType" [
testProperty "2ElemArray" (is2ElemArray . (gSomeTypeToJSON2ElemArray :: SomeTypeToJSON))
, testProperty "TaggedObject" (isTaggedObject . (gSomeTypeToJSONTaggedObject :: SomeTypeToJSON))
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . (gSomeTypeToJSONObjectWithSingleField :: SomeTypeToJSON))
, testGroup "eq" [
testProperty "2ElemArray" (\n -> (gSomeTypeToJSON2ElemArray :: SomeTypeToJSON) n == thSomeTypeToJSON2ElemArray n)
, testProperty "TaggedObject" (\n -> (gSomeTypeToJSONTaggedObject :: SomeTypeToJSON) n == thSomeTypeToJSONTaggedObject n)
, testProperty "ObjectWithSingleField" (\n -> (gSomeTypeToJSONObjectWithSingleField :: SomeTypeToJSON) n == thSomeTypeToJSONObjectWithSingleField n)
]
, testGroup "roundTrip" [
testProperty "2ElemArray" (toParseJSON gSomeTypeParseJSON2ElemArray (gSomeTypeToJSON2ElemArray :: SomeTypeToJSON))
, testProperty "TaggedObject" (toParseJSON gSomeTypeParseJSONTaggedObject (gSomeTypeToJSONTaggedObject :: SomeTypeToJSON))
, testProperty "ObjectWithSingleField" (toParseJSON gSomeTypeParseJSONObjectWithSingleField (gSomeTypeToJSONObjectWithSingleField :: SomeTypeToJSON))
]
]
]
#endif
]
------------------------------------------------------------------------------
-- Comparison between bytestring and text encoders
------------------------------------------------------------------------------
encoderComparisonTests :: IO Test
encoderComparisonTests = do
encoderTests <- forM testFiles $ \file0 -> do
let file = "benchmarks/json-data/" ++ file0
return $ testCase file $ do
inp <- L.readFile file
case eitherDecode inp of
Left err -> assertFailure $ "Decoding failure: " ++ err
Right val -> assertEqual "" (encode val) (encodeViaText val)
return $ testGroup "Compare bytestring and text encoders" encoderTests
where
encodeViaText :: Value -> L.ByteString
encodeViaText =
TLE.encodeUtf8 . TLB.toLazyText . encodeToTextBuilder . toJSON
testFiles =
[ "example.json"
, "integers.json"
, "jp100.json"
, "numbers.json"
, "twitter10.json"
, "twitter20.json"
, "geometry.json"
, "jp10.json"
, "jp50.json"
, "twitter1.json"
, "twitter100.json"
, "twitter50.json"
]
|
maximkulkin/aeson
|
tests/Properties.hs
|
Haskell
|
bsd-3-clause
| 12,070
|
{-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE OverloadedStrings #-}
#ifndef MIN_VERSION_base
#define MIN_VERSION_base(x,y,z) 1
#endif
module Test.System.GPIO.TypesSpec (spec) where
import Protolude
import System.GPIO.Types
import Data.Bits (unsafeShiftL, unsafeShiftR)
import Test.Hspec
import Test.QuickCheck (property)
-- Test all our hand-derived instances and functions.
-- PinValue mimics Bool with respect to Bits and FiniteBits.
--
ib2v :: (Int -> Bool) -> Int -> PinValue
ib2v f n = boolToValue $ f n
bi2v :: (Bool -> Int) -> PinValue -> Int
bi2v f a = f (valueToBool a)
bb2v :: (Bool -> Bool) -> PinValue -> PinValue
bb2v f a = boolToValue $ f (valueToBool a)
bbb2v :: (Bool -> Bool -> Bool) -> PinValue -> PinValue -> PinValue
bbb2v f a b = boolToValue $ f (valueToBool a) (valueToBool b)
bib2v :: (Bool -> Int -> Bool) -> PinValue -> Int -> PinValue
bib2v f a n = boolToValue $ f (valueToBool a) n
#if MIN_VERSION_base(4,8,0)
newBase :: Spec
newBase =
do context "implements the new base-4.8.0.0 FiniteBits typeclass methods" $
do it "countLeadingZeros" $ property $
\a -> countLeadingZeros a == bi2v countLeadingZeros a
it "countTrailingZeros" $ property $
\a -> countTrailingZeros a == bi2v countTrailingZeros a
#else
newBase :: Spec
newBase = return ()
#endif
spec :: Spec
spec =
do describe "Pin" $
do it "pinNumber" $ property $
\p@(Pin n) -> n == pinNumber p
describe "PinDirection" $
do it "invertDirection" $
invertDirection In == Out
&& invertDirection Out == In
describe "PinValue" $
do it "invertValue" $ property $
\a -> invertValue a == complement a
it "valueToBool" $
valueToBool Low == False
&& valueToBool High == True
it "boolToValue" $
boolToValue False == Low
&& boolToValue True == High
context "implements the Bits typeclass" $
do it "(.&.)" $ property $
\a b -> a .&. b == bbb2v (.&.) a b
it "(.|.)" $ property $
\a b -> a .|. b == bbb2v (.|.) a b
it "xor" $ property $
\a b -> a `xor` b == bbb2v xor a b
it "complement" $ property $
\a -> complement a == bb2v complement a
it "shift" $ property $
\a n -> a `shift` n == bib2v shift a n
it "rotate" $ property $
\a n -> a `rotate` n == bib2v rotate a n
it "zeroBits" $ property $
(zeroBits :: PinValue) == boolToValue (zeroBits:: Bool)
it "bit" $ property $
\n -> bit n == ib2v bit n
it "setBit" $ property $
\a n -> a `setBit` n == bib2v setBit a n
it "clearBit" $ property $
\a n -> a `clearBit` n == bib2v clearBit a n
it "complementBit" $ property $
\a n -> a `complementBit` n == bib2v complementBit a n
it "testBit" $ property $
\a n -> testBit a n == testBit (valueToBool a) n
it "bitSizeMaybe" $ property $
\a -> bitSizeMaybe a == bitSizeMaybe (valueToBool a)
it "bitSize" $ property $
\a -> bitSize a == bitSize (valueToBool a)
it "isSigned" $ property $
\a -> isSigned a == isSigned (valueToBool a)
it "shiftL" $ property $
\a n -> a `shiftL` n == bib2v shiftL a n
it "unsafeShiftL" $ property $
\a n -> a `unsafeShiftL` n == bib2v unsafeShiftL a n
it "shiftR" $ property $
\a n -> a `shiftR` n == bib2v shiftR a n
it "unsafeShiftR" $ property $
\a n -> a `unsafeShiftR` n == bib2v unsafeShiftR a n
it "rotateL" $ property $
\a n -> a `rotateL` n == bib2v rotateL a n
it "rotateR" $ property $
\a n -> a `rotateR` n == bib2v rotateR a n
it "popCount" $ property $
\a -> popCount a == popCount (valueToBool a)
context "implements the FiniteBits typeclass" $
do it "finiteBitSize" $ property $
\a -> finiteBitSize a == bi2v finiteBitSize a
newBase
|
dhess/gpio
|
test/Test/System/GPIO/TypesSpec.hs
|
Haskell
|
bsd-3-clause
| 4,438
|
{-|
Module : System.GPIO.Linux.Sysfs.IO
Description : Linux @sysfs@ GPIO operations in IO
Copyright : (c) 2019, Drew Hess
License : BSD3
Maintainer : Drew Hess <src@drewhess.com>
Stability : experimental
Portability : non-portable
The actual Linux @sysfs@ implementation. This implementation will only
function properly on Linux systems with a @sysfs@ subsystem,
obviously.
-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE ForeignFunctionInterface #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE InterruptibleFFI #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE Trustworthy #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
module System.GPIO.Linux.Sysfs.IO
( -- * SysfsIOT transformer
SysfsIOT(..)
) where
import Protolude hiding (bracket)
import Control.Monad.Base (MonadBase)
import Control.Monad.Catch (MonadCatch, MonadMask, MonadThrow, bracket)
import Control.Monad.Cont (MonadCont)
import Control.Monad.Fix (MonadFix)
import Control.Monad.Logger (MonadLogger, MonadLoggerIO)
import Control.Monad.RWS (MonadRWS)
import Control.Monad.Trans.Class (MonadTrans(..))
import Control.Monad.Trans.Control
(ComposeSt, MonadBaseControl(..), MonadTransControl(..),
defaultLiftBaseWith, defaultRestoreM)
import Control.Monad.Writer (MonadWriter)
import qualified Data.ByteString as BS (readFile, writeFile)
import Foreign.C.Error (throwErrnoIfMinus1Retry)
import Foreign.C.Types (CInt(..))
import qualified System.Directory as D (doesDirectoryExist, doesFileExist, getDirectoryContents)
import "unix" System.Posix.IO (OpenMode(ReadOnly, WriteOnly), closeFd, defaultFileFlags, openFd)
import "unix-bytestring" System.Posix.IO.ByteString (fdWrite)
import System.GPIO.Linux.Sysfs.Monad (MonadSysfs(..))
-- | An instance of 'MonadSysfs' which runs 'MonadSysfs' operations in
-- IO. This instance must be run on an actual Linux @sysfs@ GPIO
-- filesystem and will fail in any other environment.
--
-- == Interactions with threads
--
-- Some parts of this implementation use the Haskell C FFI, and may
-- block on C I/O operations. (Specifically, 'pollFile' will block in
-- the C FFI until its event is triggered.) When using this
-- implementation with GHC, you should compile your program with the
-- @-threaded@ option, so that threads performing these blocking
-- operations do not block other Haskell threads in the system.
--
-- Note that the C FFI bits in this implementation are marked as
-- 'interruptible', so that, on versions of GHC later than 7.8.1,
-- functions such as 'Control.Concurent.throwTo' will work properly
-- when targeting a Haskell thread that uses this implementation.
--
-- (On Haskell implementations other than GHC, the threading
-- implications are unknown; see the implementation's notes on how its
-- threading system interacts with the C FFI.)
newtype SysfsIOT m a = SysfsIOT
{ runSysfsIOT :: m a
} deriving ( Functor
, Alternative
, Applicative
, Monad
, MonadBase b
, MonadFix
, MonadPlus
, MonadThrow
, MonadCatch
, MonadMask
, MonadCont
, MonadIO
, MonadReader r
, MonadError e
, MonadWriter w
, MonadState s
, MonadRWS r w s
, MonadLogger
, MonadLoggerIO
)
instance MonadTrans SysfsIOT where
lift = SysfsIOT
instance MonadBaseControl b m => MonadBaseControl b (SysfsIOT m) where
type StM (SysfsIOT m) a = ComposeSt SysfsIOT m a
liftBaseWith = defaultLiftBaseWith
restoreM = defaultRestoreM
{-# INLINABLE liftBaseWith #-}
{-# INLINABLE restoreM #-}
instance MonadTransControl SysfsIOT where
type StT SysfsIOT a = a
liftWith f = SysfsIOT $ f runSysfsIOT
restoreT = SysfsIOT
{-# INLINABLE liftWith #-}
{-# INLINABLE restoreT #-}
instance (MonadIO m, MonadThrow m) => MonadSysfs (SysfsIOT m) where
doesDirectoryExist = liftIO . D.doesDirectoryExist
doesFileExist = liftIO . D.doesFileExist
getDirectoryContents = liftIO . D.getDirectoryContents
readFile = liftIO . BS.readFile
writeFile fn bs = liftIO $ BS.writeFile fn bs
unlockedWriteFile fn bs = liftIO $ unlockedWriteFileIO fn bs
pollFile fn timeout = liftIO $ pollFileIO fn timeout
unlockedWriteFileIO :: FilePath -> ByteString -> IO ()
unlockedWriteFileIO fn bs =
bracket
(openFd fn WriteOnly Nothing defaultFileFlags)
closeFd
(\fd -> void $ fdWrite fd bs)
foreign import ccall interruptible "pollSysfs" pollSysfs :: CInt -> CInt -> IO CInt
pollFileIO :: FilePath -> Int -> IO CInt
pollFileIO fn timeout =
bracket
(openFd fn ReadOnly Nothing defaultFileFlags)
closeFd
(\fd -> throwErrnoIfMinus1Retry "pollSysfs" $ pollSysfs (fromIntegral fd) (fromIntegral timeout))
|
dhess/gpio
|
src/System/GPIO/Linux/Sysfs/IO.hs
|
Haskell
|
bsd-3-clause
| 4,945
|
{-# LANGUAGE CPP #-}
#ifndef MIN_VERSION_profunctors
#define MIN_VERSION_profunctors(x,y,z) 0
#endif
-----------------------------------------------------------------------------
-- |
-- Module : Data.Machine.Mealy
-- Copyright : (C) 2012 Edward Kmett
-- License : BSD-style (see the file LICENSE)
--
-- Maintainer : Edward Kmett <ekmett@gmail.com>
-- Stability : provisional
-- Portability : portable
--
-- <http://en.wikipedia.org/wiki/Mealy_machine>
----------------------------------------------------------------------------
module Data.Machine.Mealy
( Mealy(..)
, unfoldMealy
, logMealy
) where
import Control.Applicative
import Control.Arrow
import Control.Category
import Data.Machine.Plan
import Data.Machine.Type
import Data.Machine.Process
import Data.Profunctor
import Data.Pointed
import Data.Semigroup
import Data.Sequence as Seq
import Prelude hiding ((.),id)
-- | 'Mealy' machines
newtype Mealy a b = Mealy { runMealy :: a -> (b, Mealy a b) }
instance Functor (Mealy a) where
fmap f (Mealy m) = Mealy $ \a -> case m a of
(b, n) -> (f b, fmap f n)
{-# INLINE fmap #-}
b <$ _ = pure b
{-# INLINE (<$) #-}
instance Applicative (Mealy a) where
pure b = r where r = Mealy (const (b, r))
{-# INLINE pure #-}
Mealy m <*> Mealy n = Mealy $ \a -> case m a of
(f, m') -> case n a of
(b, n') -> (f b, m' <*> n')
m <* _ = m
{-# INLINE (<*) #-}
_ *> n = n
{-# INLINE (*>) #-}
instance Pointed (Mealy a) where
point b = r where r = Mealy (const (b, r))
{-# INLINE point #-}
-- | A 'Mealy' machine modeled with explicit state.
unfoldMealy :: (s -> a -> (b, s)) -> s -> Mealy a b
unfoldMealy f = go where
go s = Mealy $ \a -> case f s a of
(b, t) -> (b, go t)
{-# INLINE unfoldMealy #-}
-- | slow diagonalization
instance Monad (Mealy a) where
return b = r where r = Mealy (const (b, r))
{-# INLINE return #-}
m >>= f = Mealy $ \a -> case runMealy m a of
(b, m') -> (fst (runMealy (f b) a), m' >>= f)
{-# INLINE (>>=) #-}
_ >> n = n
{-# INLINE (>>) #-}
instance Profunctor Mealy where
rmap = fmap
{-# INLINE rmap #-}
lmap f = go where
go (Mealy m) = Mealy $ \a -> case m (f a) of
(b, n) -> (b, go n)
{-# INLINE lmap #-}
#if MIN_VERSION_profunctors(3,1,1)
dimap f g = go where
go (Mealy m) = Mealy $ \a -> case m (f a) of
(b, n) -> (g b, go n)
{-# INLINE dimap #-}
#endif
instance Automaton Mealy where
auto = construct . go where
go (Mealy f) = await >>= \a -> case f a of
(b, m) -> do
yield b
go m
{-# INLINE auto #-}
instance Category Mealy where
id = Mealy (\a -> (a, id))
Mealy bc . Mealy ab = Mealy $ \ a -> case ab a of
(b, nab) -> case bc b of
(c, nbc) -> (c, nbc . nab)
instance Arrow Mealy where
arr f = r where r = Mealy (\a -> (f a, r))
{-# INLINE arr #-}
first (Mealy m) = Mealy $ \(a,c) -> case m a of
(b, n) -> ((b, c), first n)
instance ArrowChoice Mealy where
left m = Mealy $ \a -> case a of
Left l -> case runMealy m l of
(b, m') -> (Left b, left m')
Right r -> (Right r, left m)
right m = Mealy $ \a -> case a of
Left l -> (Left l, right m)
Right r -> case runMealy m r of
(b, m') -> (Right b, right m')
m +++ n = Mealy $ \a -> case a of
Left b -> case runMealy m b of
(c, m') -> (Left c, m' +++ n)
Right b -> case runMealy n b of
(c, n') -> (Right c, m +++ n')
m ||| n = Mealy $ \a -> case a of
Left b -> case runMealy m b of
(d, m') -> (d, m' ||| n)
Right b -> case runMealy n b of
(d, n') -> (d, m ||| n')
#if MIN_VERSION_profunctors(3,2,0)
instance Strong Mealy where
first' = first
instance Choice Mealy where
left' = left
right' = right
#endif
-- | Fast forward a mealy machine forward
driveMealy :: Mealy a b -> Seq a -> a -> (b, Mealy a b)
driveMealy m xs z = case viewl xs of
y :< ys -> case runMealy m y of
(_, n) -> driveMealy n ys z
EmptyL -> runMealy m z
-- | Accumulate history.
logMealy :: Semigroup a => Mealy a a
logMealy = Mealy $ \a -> (a, h a) where
h a = Mealy $ \b -> let c = a <> b in (c, h c)
{-# INLINE logMealy #-}
instance ArrowApply Mealy where
app = go Seq.empty where
go xs = Mealy $ \(m,x) -> case driveMealy m xs x of
(c, _) -> (c, go (xs |> x))
{-# INLINE app #-}
|
fumieval/machines
|
src/Data/Machine/Mealy.hs
|
Haskell
|
bsd-3-clause
| 4,321
|
-- !!! make sure context of EQ is minimised in interface file.
--
module ShouldSucceed where
data NUM = ONE | TWO
class (Num a) => ORD a
class (ORD a, Show a) => EQ a where
(===) :: a -> a -> Bool
|
forked-upstream-packages-for-ghcjs/ghc
|
testsuite/tests/typecheck/should_compile/tc077.hs
|
Haskell
|
bsd-3-clause
| 200
|
{-# LANGUAGE Safe #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.STRef.Lazy
-- Copyright : (c) The University of Glasgow 2001
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- Maintainer : libraries@haskell.org
-- Stability : experimental
-- Portability : non-portable (uses Control.Monad.ST.Lazy)
--
-- Mutable references in the lazy ST monad.
--
-----------------------------------------------------------------------------
module Data.STRef.Lazy (
-- * STRefs
ST.STRef, -- abstract
newSTRef,
readSTRef,
writeSTRef,
modifySTRef
) where
import Control.Monad.ST.Lazy
import qualified Data.STRef as ST
newSTRef :: a -> ST s (ST.STRef s a)
readSTRef :: ST.STRef s a -> ST s a
writeSTRef :: ST.STRef s a -> a -> ST s ()
modifySTRef :: ST.STRef s a -> (a -> a) -> ST s ()
newSTRef = strictToLazyST . ST.newSTRef
readSTRef = strictToLazyST . ST.readSTRef
writeSTRef r a = strictToLazyST (ST.writeSTRef r a)
modifySTRef r f = strictToLazyST (ST.modifySTRef r f)
|
tolysz/prepare-ghcjs
|
spec-lts8/base/Data/STRef/Lazy.hs
|
Haskell
|
bsd-3-clause
| 1,132
|
{-# LANGUAGE DeriveGeneric, DatatypeContexts #-}
module CannotDoRep1_1 where
import GHC.Generics
-- We do not support datatypes with context
data (Show a) => Context a = Context a deriving Generic1
|
urbanslug/ghc
|
testsuite/tests/generics/GenCannotDoRep1_1.hs
|
Haskell
|
bsd-3-clause
| 201
|
module Rebase.GHC.Float
(
module GHC.Float
)
where
import GHC.Float
|
nikita-volkov/rebase
|
library/Rebase/GHC/Float.hs
|
Haskell
|
mit
| 71
|
{-# LANGUAGE ScopedTypeVariables #-}
module Model.Post where
import Import
import qualified Database.Esqueleto as E
import Database.Esqueleto((^.))
getPosts :: Int -> Int -> DB [Entity Post]
getPosts page postsPerPage
| page > 0 && postsPerPage > 0 = selectList
[]
[ Desc PostCreated
, LimitTo postsPerPage
, OffsetBy $ (page - 1) * postsPerPage
]
| otherwise = return []
mkPostFromEvent :: Event -> Post
mkPostFromEvent Event {..} =
Post eventUser eventCreated Nothing eventTitle eventContent
|
isankadn/yesod-testweb-full
|
Model/Post.hs
|
Haskell
|
mit
| 524
|
{-# OPTIONS -Wall -Werror #-}
import Control.Applicative
import Control.Exception
import Control.Monad
import Data.List
import System.Directory
import System.Environment
import System.IO
-- dist .lib include
main :: IO ()
main = getArgs >>= write
where
write (dist : lib : inc : _) = readFile dist >>= writeIncludes dist inc >> writePragmas dist lib
write _ = error "write"
writePragmas :: FilePath -> FilePath -> IO ()
writePragmas x = appendFile x . makeContents <=< getDirectoryContents
writeIncludes :: FilePath -> FilePath -> String -> IO ()
writeIncludes x y zs = bracketOnError (openTempFile "." "temp") finalize editTemp
where
finalize (tempName, tempHandle) = hClose tempHandle >> removeFile tempName
editTemp (tempName, tempHandle) = searchIncludes y
>>= hPutStr tempHandle . (++ zs) . unlines . map addinc
>> hClose tempHandle >> removeFile x >> renameFile tempName x
addinc s = "#include <llvm/" ++ s ++ ">"
searchIncludes :: FilePath -> IO [FilePath]
searchIncludes x = getDirectoryContents x >>= searchDir x
searchDir :: FilePath -> [FilePath] -> IO [FilePath]
searchDir _ [] = return []
searchDir x (z : zs)
| ".h" `isSuffixOf` z = (z :) <$> res
| '.' `elem` z = res
| otherwise = (++) <$> indir <*> res
where
x' = x ++ '/' : z
indir = map ((z ++) . ('/' :)) <$> searchIncludes x'
res = searchDir x zs
makeContents :: [FilePath] -> String
makeContents = unlines . ("" :) . foldr ff []
where
ff x acc
| ".lib" `isSuffixOf` x = ("#pragma comment( lib, \"" ++ x ++ "\" )") : acc
| otherwise = acc
|
MichaeGon/pragma-maker-for-LLVM
|
pragma-maker-old.hs
|
Haskell
|
mit
| 1,604
|
doubleMe x = x + x
doubleUs x y = doubleMe x + doubleMe y
doubleSmallNumber x = if x > 100
then x
else x*2
doubleSmallNumber' x = (if x > 100 then x else x*2) + 1
boomBangs xs = [ if x < 10 then "BOOM!" else "BANG!" | x <- xs, odd x]
|
Sgoettschkes/learning
|
haskell/LearnYouAHaskell/old/baby.hs
|
Haskell
|
mit
| 312
|
module Main where
import Test.Tasty (defaultMain, testGroup, TestTree)
import Utrecht.MasterMind.Test
main :: IO ()
main = defaultMain tests
tests :: TestTree
tests = testGroup "All tests" [
masterMindSuite
]
|
kandersen/Utrecht
|
test/Test.hs
|
Haskell
|
mit
| 215
|
{-# LANGUAGE FlexibleContexts, FlexibleInstances, GADTs #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE OverloadedStrings, PatternSynonyms #-}
{-# LANGUAGE RankNTypes, ScopedTypeVariables, StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell, TypeFamilies, ConstraintKinds #-}
{-# OPTIONS_GHC -fno-warn-partial-type-signatures -fno-warn-missing-signatures #-}
module Commands.RHS.Types where
import Commands.Extra (Exists)
import Control.Lens hiding (Empty) -- TODO
import Data.List.NonEmpty (NonEmpty (..))
import qualified Data.List.NonEmpty as NonEmpty
import Control.Applicative
import Data.Monoid
import GHC.Exts (IsList (..), IsString (..))
-- import Text.Printf
import Prelude
newtype ConstNonTerminal n t (f :: (* -> *)) a = ConstNonTerminal n
{-| a grammatical right hand side.
-}
data RHS n t f a where
Pure :: a -> RHS n t f a -- Applicative 'pure'
Apply :: (RHS n t f (x -> a)) -> (f x) -> RHS n t f a -- Applicative '<*>'
(:<*>) :: (RHS n t f (x -> a)) -> (RHS n t f x) -> RHS n t f a -- Applicative '<*>'
Alter :: [RHS n t f a] -> RHS n t f a -- Alternative '<|>' / Monoid '<>'
Opt :: (Maybe x -> a) -> RHS n t f x -> RHS n t f a -- Alternative 'optional'
Many :: ([x] -> a) -> RHS n t f x -> RHS n t f a -- Alternative 'many'
Some :: (NonEmpty x -> a) -> RHS n t f x -> RHS n t f a -- Alternative 'some'
-- grammar-specific stuff
Terminal :: (t -> a) -> !t -> RHS n t f a -- grammatical terminal symbol (Coyoneda'd)
NonTerminal :: (n t f a) -> RHS n t f a -> RHS n t f a -- grammatical non-terminal symbol
Terminals :: (t -> a) -> RHS n t f a -- a placeholder for a set of terminals (e.g. set of all terminal symbols in the grammar. see 'getTerminals')
-- | @pattern Empty = Alter []@
pattern Empty :: forall (n :: * -> (* -> *) -> * -> *) t (f :: * -> *) a. RHS n t f a
pattern Empty = Alter []
-- | @ConstraintKinds@
type Functor'RHS n t f = (Functor (n t f), Functor f)
-- type RHSFunctorC n t f = (Functor f, Functor (n t f)) ConstraintKinds
deriving instance (Functor (n t f)) => (Functor (RHS n t f)) -- TODO expects constraint:
-- deriving instance () => Data (RHS n t f a)
-- | lawful (coincides with 'Alternative' instance)
instance (Functor f, Functor (n t f)) => Monoid (RHS n t f a) where
mempty = Empty
mappend = (<|>)
{- | mostly lawful. 'fmap' and 'pure' behave lawfully.
left-distributivity of '<*>' over '<|>' is intentionally violated. that is, we want @(x \<|> y) \<*> z@ to be preserved, not to be distributed into @(x \<*> z) \<|> (y \<*> z)@. this helps:
* when @(x \<|> y)@ is actually the infinite @(x \<|> y \<|> ...)@, interpreting the undistributed @(x \<|> y) \<*> z@ might terminate while the @(x \<|> y \<|> ...) \<*> z@ may terminate while the distributed @(x \<*> z) \<|> (y \<*> z) \<|> ...@ will not.
* when the interpretation (e.g. a chart parser) can increase performance by sharing such "inner alternation".
'<*>' is left-associated.
-}
instance (Functor f, Functor (n t f)) => Applicative (RHS n t f) where
pure = Pure
Pure xa <*> tx = fmap xa tx -- Functor
-- Pure {id} <*> x = fmap {id} x = x -- Identity
-- Pure f <*> Pure x = fmap f (Pure x) = Pure (f x) -- Homomorphism
txa <*> Pure x = fmap ($ x) txa -- Interchange
Empty <*> _ = Empty -- left-Annihilation (?)
_ <*> Empty = Empty -- right-Annihilation
txa <*> (tyx `Apply` fy) = ((.) <$> txa <*> tyx) `Apply` fy -- Composition
txa <*> (tyx :<*> ty) = ((.) <$> txa <*> tyx) :<*> ty -- Composition
txa <*> r@(Alter _txs) = txa :<*> r -- NO left-Distributivity
txa <*> r@(Opt _ysa _ty) = txa :<*> r -- NOTE doesn't distribute, intentionally
txa <*> r@(Many _ysa _ty) = txa :<*> r -- NOTE doesn't distribute, intentionally
txa <*> r@(Some _ysa _ty) = txa :<*> r -- NOTE doesn't distribute, intentionally
txa <*> r@(Terminal _i _t) = txa :<*> r
txa <*> r@(NonTerminal _l _r) = txa :<*> r -- NOTE preserving sharing is critical for the observers sharing interface
txa <*> r@(Terminals _i) = txa :<*> r -- NOTE greatly simplifies "self-referential" grammars (self-recursive grammars are already simple)
-- | lawful.
instance (Functor f, Functor (n t f)) => Alternative (RHS n t f) where
empty = Empty
Empty <|> y = y -- Left-Identity
x <|> Empty = x -- Right-Identity
x <|> y = Alter (toRHSList x <> toRHSList y) -- Associativity
{-# INLINE (<|>) #-}
many = Many id
{-# INLINE many #-}
some = fmap NonEmpty.toList . Some id
{-# INLINE some #-}
{- | both token and result must be an (instance of) 'IsString'.
(see <http://chrisdone.com/posts/haskell-constraint-trick the constraint trick>)
@t@ can default to String.
-}
instance (IsString t, Show t, a ~ t) => IsString (RHS n t f a) where --TODO remove Show constraint or show it's needed for defaulting
fromString s = Terminal id t where t = fromString s
{-# INLINEABLE fromString #-}
-- instance (IsString t, Show t, a ~ t) => IsString (RHS n t f a) where fromString = Terminal id . fromString
-- instance (IsString t, Show t, a ~ String) => IsString (RHS n t f a) where fromString = Terminal show . fromString
-- instance (IsString t, Show t) => IsString (RHS n t f String) where fromString = Terminal show . fromString
-- instance (IsString t) => IsString (RHS n String f t) where fromString = Terminal fromString
-- instance (IsString t, Show t) => IsString (RHS n t f t) where fromString = Terminal id . fromString
-- | @([r1,r2,r3] :: RHS n t f a)@ is @('mconcat' [r1,r2,r3])@ is @('asum' [r1,r2,r3])@ is @(r1 '<|>' r2 '<|>' r3)@
instance IsList (RHS n t f a) where
type Item (RHS n t f a) = RHS n t f a
fromList = Alter -- the constructor (rather than a method like "asum") avoids the (Functor f) constraint
toList = toRHSList
-- ================================================================ --
{-| a "lowered" (existentially-quantified) right hand side.
-}
type RHS0 n t f = Exists (RHS n t f)
-- | e.g. @('RHS' (ConstName n) t f a)@
data ConstName n t (f :: * -> *) a = ConstName { _unConstName :: !n } deriving (Functor)
-- KindSignatures because: f being phantom, it's kind is inferred to be nullary (I think)
-- TODO is PolyKinds better? (f :: k)
deriving instance Show n => Show (ConstName n t f a)
data SomeRHS n t f = SomeRHS { _unSomeRHS :: forall x. RHS n t f x }
-- ================================================================ --
toRHSList :: RHS n t f a -> [RHS n t f a]
toRHSList (Alter xs) = xs
toRHSList x = [x]
{-# INLINE toRHSList #-}
-- ================================================================ --
-- lenses
_RHSName :: Traversal' (RHS n t f a) (n t f a)
_RHSName = _NonTerminal._1
_NonTerminal :: Prism' (RHS n t f a) (n t f a, RHS n t f a)
_NonTerminal = prism (uncurry NonTerminal) $ \case
NonTerminal l r -> Right (l, r)
r -> Left r
-- makePrisms ''RHS
makeLenses ''ConstName
makeLenses ''SomeRHS
--TODO refactor -? to .? conflicts with lens?
|
sboosali/commands
|
commands-core/sources/Commands/RHS/Types.hs
|
Haskell
|
mit
| 7,485
|
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TypeFamilies #-}
-------------------------------------------
-- |
-- Module : Web.Stripe.Dispute
-- Copyright : (c) David Johnson, 2014
-- Maintainer : djohnson.m@gmail.com
-- Stability : experimental
-- Portability : POSIX
--
-- < https:/\/\stripe.com/docs/api#diputes >
--
-- @
-- {-\# LANGUAGE OverloadedStrings \#-}
-- import Web.Stripe
-- import Web.Stripe.Charge
-- import Web.Stripe.Dispute
--
-- main :: IO ()
-- main = do
-- let config = StripeConfig (StripeKey "secret_key")
-- result <- stripe config $ getCharge (ChargeId "charge_id")
-- case result of
-- (Left stripeError) -> print stripeError
-- (Right (Charge { chargeDispute = dispute })) ->
-- case dispute of
-- (Just dispute) -> print dispute
-- Nothing -> print "no dispute on this charge"
-- @
module Web.Stripe.Dispute
( -- * API
UpdateDispute
, updateDispute
, CloseDispute
, closeDispute
-- * Types
, ChargeId (..)
, Dispute (..)
, DisputeReason (..)
, DisputeStatus (..)
, Evidence (..)
, MetaData (..)
) where
import Web.Stripe.StripeRequest (Method (POST),
StripeHasParam, StripeRequest (..),
StripeReturn,
mkStripeRequest)
import Web.Stripe.Util ((</>))
import Web.Stripe.Types (ChargeId (..), Dispute (..),
DisputeReason (..),
DisputeStatus (..),
Evidence (..), MetaData(..))
import Web.Stripe.Types.Util (getChargeId)
------------------------------------------------------------------------------
-- | `Dispute` to be updated
updateDispute
:: ChargeId -- ^ The ID of the Charge being disputed
-> StripeRequest UpdateDispute
updateDispute
chargeId = request
where request = mkStripeRequest POST url params
url = "charges" </> getChargeId chargeId </> "dispute"
params = []
data UpdateDispute
type instance StripeReturn UpdateDispute = Dispute
instance StripeHasParam UpdateDispute Evidence
instance StripeHasParam UpdateDispute MetaData
------------------------------------------------------------------------------
-- | `Dispute` to be closed
closeDispute
:: ChargeId -- ^ The ID of the Charge being disputed
-> StripeRequest CloseDispute
closeDispute
chargeId = request
where request = mkStripeRequest POST url params
url = "charges" </> getChargeId chargeId </> "dispute" </> "close"
params = []
data CloseDispute
type instance StripeReturn CloseDispute = Dispute
|
dmjio/stripe
|
stripe-core/src/Web/Stripe/Dispute.hs
|
Haskell
|
mit
| 2,913
|
module Game.Poker.Cards
( Suit(..)
, Card
, allCards
, cardSuit
, cardNumber
, cardStrength
) where
-- | 4 types of card
--
-- >>> Hearts -- Show
-- Hearts
--
-- >>> read "Hearts" :: Suit -- Read
-- Hearts
--
-- >>> Hearts == Hearts -- Eq
-- True
--
-- >>> Hearts == Spades -- Eq
-- False
--
-- >>> Hearts < Diamonds -- Ord
-- True
--
-- >>> succ Hearts -- Enum
-- Diamonds
--
data Suit = Hearts | Diamonds | Clubs | Spades
deriving (Show, Read, Eq, Ord, Enum)
-- | One playing card
--
-- >>> Card 1 Hearts == Card 2 Hearts -- Eq
-- False
--
-- >>> Card 1 Hearts < Card 2 Hearts -- Ord
-- True
data Card = Card Int Suit
deriving (Eq, Ord)
-- | For instance of show typeclass
--
-- In order that "K < A" equals True,
-- consider 14 as the ace
--
-- >>> showCardNumber 14
-- "A_"
--
-- >>> showCardNumber 4
-- "4_"
showCardNumber :: Int -> String
showCardNumber 14 = "A_"
showCardNumber 13 = "K_"
showCardNumber 12 = "Q_"
showCardNumber 11 = "J_"
showCardNumber 10 = "10"
showCardNumber x = show x ++ "_"
-- | Show typeclass of Card
--
-- >>> show $ Card 1 Hearts
-- "H1_"
--
-- >>> show $ Card 14 Diamonds
-- "DA_"
--
-- >>> show $ Card 11 Clubs
-- "CJ_"
--
-- >>> show $ Card 10 Spades
-- "S10"
instance Show Card where
show (Card i Hearts) = "H" ++ showCardNumber i
show (Card i Diamonds) = "D" ++ showCardNumber i
show (Card i Clubs) = "C" ++ showCardNumber i
show (Card i Spades) = "S" ++ showCardNumber i
-- | All cards
--
-- >>> length allCards
-- 52
--
-- >>> take 13 $ allCards
-- [H2_,H3_,H4_,H5_,H6_,H7_,H8_,H9_,H10,HJ_,HQ_,HK_,HA_]
--
-- >>> reverse $ take 13 $ reverse allCards
-- [S2_,S3_,S4_,S5_,S6_,S7_,S8_,S9_,S10,SJ_,SQ_,SK_,SA_]
--
allCards :: [Card]
allCards = [ Card num suit | suit <- [Hearts ..], num <- [2..14] ]
-- | Get Suit from card
--
-- >>> cardSuit $ Card 10 Hearts
-- Hearts
cardSuit :: Card -> Suit
cardSuit (Card _ card) = card
-- | Get Suit from card
--
-- >>> cardNumber $ Card 10 Hearts
-- 10
cardNumber :: Card -> Int
cardNumber (Card num _) = num
-- | Stregnth of card
--
-- >>> cardStrength . head $ allCards
-- 2
cardStrength = cardNumber
|
tobynet/java-poker
|
src/Game/Poker/Cards.hs
|
Haskell
|
mit
| 2,264
|
{-# LANGUAGE RankNTypes, ImpredicativeTypes, LiberalTypeSynonyms #-}
module Treb.Config (withTrebEnv) where
import qualified Hasql as H
import qualified Hasql.Postgres as HP
import qualified Data.Map as M
import qualified Data.ByteString.Lazy as B
import qualified Data.ByteString.Char8 as BC
import qualified Database.MySQL.Simple as MySQL
import Control.Concurrent.STM.TVar
import Control.Exception
import Control.Monad
import Control.Monad.IO.Class
import Control.Monad.Trans.Except
import Control.Monad.STM
import Data.Aeson
import Data.Bool
import Data.Bits (xor)
import Data.Either (either)
import Data.Maybe
import Network.URI
import System.FilePath
import System.INotify
import System.Directory
import System.Random
import System.Environment (getArgs)
import System.IO.Error
import Text.Read (readEither)
import Treb.Combinators
import Treb.JSON ()
import Treb.Types
import Treb.Routes.Types
-- Exported Functions --
-- | Construct a Trebuchet environment and pass it into a function in IO. This
-- environment includes database connections and similar.
withTrebEnv :: (TrebEnv -> IO ()) -> IO ()
withTrebEnv f = runExceptT getEnv >>= either (putStrLn . ("ERROR: " ++)) f
-- Hidden Functions --
getEnv :: ExceptT String IO TrebEnv
getEnv = do
-- Generate configuration from command line arguments
conf <- processArgs defaultTrebConfig =<< liftIO getArgs
-- Create a pool of connections to PostgreSQL
pgPool <- getPool conf
-- Check that SSL-related command line arguments are well formed
exceptIf
(isJust (confSSLCertPath conf) `xor` isJust (confSSLCertKeyPath conf))
$ "SSL requires both -c/--ssl-certificate and -k/--ssl-certificate-key to be set."
-- Check that the job template directory exists
let jobTemplateDir = confJobTemplateDir conf
cwd <- liftIO getCurrentDirectory
jobTemplateDirExists <- liftIO $ doesFileExist jobTemplateDir
exceptIf
jobTemplateDirExists
$ "Job template directory '" ++ (cwd </> jobTemplateDir) ++ "' not found."
-- Create TVar for updating the job templates available to HTTP request handlers
jobTemplates <- liftIO $ newTVarIO []
-- Begin watching job_templates directory and automatically update the internal job templates accordingly
liftIO $ do
let updateJobTemplates = getJobTemplates jobTemplateDir >>= atomically . writeTVar jobTemplates
putStrLn "Initializing event watchers for job templates directory."
inotify <- initINotify
addWatch inotify [Create, Delete, Modify, MoveIn, MoveOut] jobTemplateDir $
const $ updateJobTemplates
>> putStrLn "Job Templates Updated."
putStrLn "> Done."
putStrLn "Settings initial Job Templates."
updateJobTemplates
putStrLn "> Done."
-- Connect to the Drupal/OpenAtrium MySQL database for authentication and authorization
drupalMySQLConn <- unlessDebugMode conf $ do
mapM_ (\(attr, msg) ->
exceptIf
(isNothing $ attr conf)
$ msg ++ " for OpenAtrium database not given.")
[ (confOAHost, "Host")
, (confOAPort, "Port")
, (confOADatabase, "Database name")
, (confOAUsername, "Username")
, (confOAPassword, "Password") ] :: ExceptT String IO ()
liftIO $ putStrLn "Connecting to Drupal/OpenAtrium MySQL database."
oaPort <- either throwE return $ readEither $ fromJust $ confOAPort conf
ret <- liftIO $ MySQL.connect $
MySQL.defaultConnectInfo
{ MySQL.connectHost = fromJust $ confOAHost conf
, MySQL.connectPort = oaPort
, MySQL.connectDatabase = fromJust $ confOADatabase conf
, MySQL.connectUser = fromJust $ confOAUsername conf
, MySQL.connectPassword = fromJust $ confOAPassword conf }
liftIO $ putStrLn "> Done."
return ret
activeUploads <- liftIO $ newTVarIO M.empty
uploadIdGen <- liftIO $ newTVarIO =<< getStdGen
maybe (throwE "No --base-uri specified.")
(bool (throwE "Invalid --base-uri given.")
(return ()))
(isURI <$> confBaseURI conf)
baseURI <- fromMaybe
(throwE "Failed to parse value given to --base-uri.")
(confBaseURI conf >>= fmap pure . parseURI)
-- Construct the Trebuchet environment
return TrebEnv
{ trebEnvJobTemplates = jobTemplates
, trebEnvDrupalMySQLConn = drupalMySQLConn
, trebEnvUsername = Nothing
, trebEnvConfig = conf
, trebEnvPgPool = pgPool
, trebEnvActiveUploads = activeUploads
, trebEnvCurrentUser = Nothing
, trebEnvUploadIdGen = uploadIdGen
, trebEnvBaseURI = baseURI }
processArgs :: TrebConfig -> [String] -> ExceptT String IO TrebConfig
processArgs conf [] = pure conf
processArgs conf (x :xs) | x == "-d" || x == "--debug" = processArgs (conf { confDebugMode = True }) xs
processArgs conf (x:y:xs) | x == "-c" || x == "--ssl-certificate" = processArgs (conf { confSSLCertPath = Just y }) xs
processArgs conf (x:y:xs) | x == "-k" || x == "--ssl-certificate-key" = processArgs (conf { confSSLCertKeyPath = Just y }) xs
processArgs conf (x:y:xs) | x == "-t" || x == "--job-template-directory" = processArgs (conf { confJobTemplateDir = y }) xs
processArgs conf (x:y:xs) | x == "-p" || x == "--port" = either
throwE
(\p -> processArgs (conf { confPort = p }) xs)
(readEither y)
processArgs conf (x:y:xs) | x == "-H" || x == "--oa-host" = processArgs (conf { confOAHost = Just y }) xs
processArgs conf (x:y:xs) | x == "-P" || x == "--oa-port" = processArgs (conf { confOAPort = Just y }) xs
processArgs conf (x:y:xs) | x == "-D" || x == "--oa-database" = processArgs (conf { confOADatabase = Just y }) xs
processArgs conf (x:y:xs) | x == "-U" || x == "--oa-username" = processArgs (conf { confOAUsername = Just y }) xs
processArgs conf (x:y:xs) | x == "-P" || x == "--oa-password" = processArgs (conf { confOAPassword = Just y }) xs
processArgs conf (x:y:xs) | x == "-C" || x == "--oa-cookie-domain" = processArgs (conf { confOADomain = Just y }) xs
processArgs conf (x:y:xs) | x == "-h" || x == "--pg-host" = processArgs (conf { confPGHost = Just y }) xs
processArgs conf (x:y:xs) | x == "-b" || x == "--pg-port" = processArgs (conf { confPGPort = Just y }) xs
processArgs conf (x:y:xs) | x == "-u" || x == "--pg-username" = processArgs (conf { confPGUsername = Just y }) xs
processArgs conf (x:y:xs) | x == "-w" || x == "--pg-password" = processArgs (conf { confPGPassword = Just y }) xs
processArgs conf (x:y:xs) | x == "-s" || x == "--pg-database" = processArgs (conf { confPGDatabase = Just y }) xs
processArgs conf (x:y:xs) | x == "-m" || x == "--pg-pool-max" = processArgs (conf { confPGPoolMax = Just y }) xs
processArgs conf (x:y:xs) | x == "-l" || x == "--pg-conn-lifetime" = processArgs (conf { confPGConnLifetime = Just y }) xs
processArgs conf (x:y:xs) | x == "-B" || x == "--base-uri" = processArgs (conf { confBaseURI = Just y }) xs
processArgs conf (x:_) = throwE $ "ERROR: Invalid command-line argument \'" ++ x ++ "\'."
getPool :: TrebConfig -> ExceptT String IO (H.Pool HP.Postgres)
getPool conf = do
mapM_ (\(attr, msg) ->
exceptIf
(isNothing $ attr conf)
$ msg ++ " for PostgreSQL database not given.")
[ (confPGHost, "Host")
, (confPGPort, "Port")
, (confPGUsername, "Username")
, (confPGPassword, "Password")
, (confPGDatabase, "Database name")
, (confPGPoolMax, "Maximum pool size")
, (confPGConnLifetime, "Connection duration") ]
pgPort <- either throwE return $ readEither $ fromJust $ confPGPort conf
pgPoolMax <- either throwE return $ readEither $ fromJust $ confPGPoolMax conf
pgConnLifetime <- either throwE return $ readEither $ fromJust $ confPGConnLifetime conf
maybe
(throwE "Invalid PostgreSQL pool settings.")
(liftIO . uncurry H.acquirePool)
$ (,) <$> (HP.ParamSettings <$> fmap BC.pack (confPGHost conf)
<*> pure pgPort
<*> fmap BC.pack (confPGUsername conf)
<*> fmap BC.pack (confPGPassword conf)
<*> fmap BC.pack (confPGDatabase conf))
<*> (fromMaybe Nothing $ H.poolSettings <$> pure pgPoolMax
<*> pure pgConnLifetime)
getJobTemplates :: FilePath -> IO [JobTemplate]
getJobTemplates templateDir = do
-- Get a list of job template file names
templateFiles' <- getDirectoryContents templateDir `catch` \e ->
if isDoesNotExistError e then do
fullTemplateDir <- makeAbsolute templateDir
putStrLn $ "ERROR: Job template specification directory '" ++ fullTemplateDir ++ "' does not exist."
createDirectoryIfMissing False fullTemplateDir
putStrLn $ "Made new directory '" ++ fullTemplateDir ++ "'."
return []
else
throw e
templateFiles <- filterM doesFileExist $ map (templateDir </>) templateFiles'
-- Get a list of decoded job templates
jobTemplates <- mapM (fmap eitherDecode . B.readFile) templateFiles
-- Print an error on each failure to decode a job template.
let parseResults = [ either (Left . ((,) f)) Right t | (f, t) <- zip templateFiles jobTemplates ]
results <- mapM (either printError (return . Just)) parseResults
-- Return only successfully parsed job templates
return $ map fromJust $ filter isJust results
where
printError (file, error) = do
putStrLn $ "ERROR: Failed to parse job template JSON: " ++ file ++ "\n\n" ++ error
return Nothing
defaultTrebConfig = TrebConfig
{ confDebugMode = False
, confPort = 3000
, confJobTemplateDir = "job_templates"
, confSSLCertPath = Nothing
, confSSLCertKeyPath = Nothing
, confOAHost = Nothing
, confOAPort = Nothing
, confOADatabase = Nothing
, confOAUsername = Nothing
, confOAPassword = Nothing
, confOADomain = Nothing
, confPGHost = Nothing
, confPGPort = Nothing
, confPGUsername = Nothing
, confPGPassword = Nothing
, confPGDatabase = Nothing
, confPGPoolMax = Nothing
, confPGConnLifetime = Nothing
, confBaseURI = Nothing }
ifDebugMode :: Monad m => TrebConfig -> m a -> m (Maybe a)
ifDebugMode conf action = bool (return Nothing) (action >>= return . Just) (confDebugMode conf)
unlessDebugMode :: Monad m => TrebConfig -> m a -> m (Maybe a)
unlessDebugMode conf action = bool (action >>= return . Just) (return Nothing) (confDebugMode conf)
|
MadSciGuys/trebuchet
|
src/Treb/Config.hs
|
Haskell
|
mit
| 11,175
|
module Main where
import Codec.Picture
import Codec.Picture.Types
import Data.Maybe (fromJust)
import Data.Word (Word8)
import Data.List as L (transpose,foldl')
import Text.Printf (printf)
import Control.Arrow ((&&&))
import Options.Applicative
import qualified Data.ByteString as B
import System.IO (stdin)
data Options = Options
{ srcFile :: String
, width :: Int
, height :: Int
, trueColor :: Bool
}
options :: Parser Options
options = Options
<$> argument str (metavar "SRC" <> help "source file (or - for stdin)")
<*> argument auto (metavar "WIDTH" <> help "resulting width")
<*> argument auto (metavar "HEIGHT" <> help "resulting height")
<*> switch (long "256-colors" <> short 'c' <> help "only use 256-color-mode for old terminals")
opthelp :: ParserInfo Options
opthelp = info (helper <*> options)
( fullDesc
<> progDesc "An image to ASCII-Converter"
<> header "img2ascii - convert images to console-compatible text"
)
main :: IO ()
main = execParser opthelp >>= run
run :: Options -> IO ()
run (Options src w h redcol) = do
src' <- if src == "-" then B.getContents else B.readFile src
case decodeImage src' of
Left err -> putStrLn err
Right img ->
case extractDynImage img >>= pixelize w h of
Nothing -> return ()
Just (f,b) ->
let str = if redcol then img2ascii conv256 (f,b) else img2ascii conv (f,b)
in mapM_ (\x -> putStr x >> putStrLn "\x1b[0m") (concat <$> str)
chunksof :: Int -> [a] -> [[a]]
chunksof _ [] = []
chunksof c xs = take c xs : chunksof c (drop c xs)
conv :: (PixelRGB8,PixelRGB8) -> String
conv (fp@(PixelRGB8 fr fg fb),PixelRGB8 br bg bb) = printf "\x1b[48;2;%d;%d;%dm\x1b[38;2;%d;%d;%dm%c" br bg bb fr fg fb (lumi.computeLuma $ fp)
where
lumi :: Word8 -> Char
lumi x
| x > 225 = '@'
| x > 180 = 'O'
| x > 150 = 'X'
| x > 50 = 'o'
| x > 25 = 'x'
| x > 10 = '.'
| otherwise = ' '
conv256 :: (PixelRGB8,PixelRGB8) -> String
conv256 (fp@(PixelRGB8 fr fg fb),PixelRGB8 br bg bb) = printf "\x1b[48;5;%dm\x1b[38;5;%dm%c" bcolor fcolor (lumi.computeLuma $ fp)
where
-- converts [0..255] -> [0..5]
s = (`div` 51)
-- conversion: 6x6x6 rgb-cube so color is red * 36 + green * 6 + blue + 16 offset with red/green/blue in [0..5]
bcolor = s br * 36 + s bg * 6 + s bb + 16
fcolor = s fr * 36 + s fg * 6 + s fb + 16
lumi :: Word8 -> Char
lumi x
| x > 225 = '@'
| x > 180 = 'O'
| x > 150 = 'X'
| x > 50 = 'o'
| x > 25 = 'x'
| x > 10 = '.'
| otherwise = ' '
img2ascii :: ((PixelRGB8,PixelRGB8) -> String) -> (Image PixelRGB8,Image PixelRGB8) -> [[String]]
img2ascii c (fg@(Image w h _),bg@(Image w' h' _)) = (fmap.fmap) (c.(uncurry (pixelAt fg) &&& uncurry (pixelAt bg))) [[(x,y) | x <- [0..w-1]] | y <- [0..h-1]]
pixelize :: Int -> Int -> Image PixelRGB8 -> Maybe (Image PixelRGB8,Image PixelRGB8)
pixelize tw th im@(Image iw ih id) =
if windoww == 0 || windowh == 0 then
Nothing
else Just (snd $ generateFoldImage (folder filterfun windoww windowh) im tw th,
snd $ generateFoldImage (folder filterfuninv windoww windowh) im tw th)
where
windoww = (fromIntegral iw) / fromIntegral tw
windowh = fromIntegral ih / fromIntegral th
folder :: ((PixelRGB8, Int, Int) -> (PixelRGB8, Int, Int) -> (PixelRGB8, Int, Int)) -> Double -> Double -> Image PixelRGB8 -> Int -> Int -> (Image PixelRGB8, PixelRGB8)
folder f ww wh im@(Image iw ih id) x y = (im,(\(a,_,_) -> a) $ L.foldl' f (pixelAt im x' y',0,0)
[ (pixelAt im (x'+dx) (y'+dy),dx,dy)
| dx <- [-dw..dw]
, dy <- [-dw..dw]
, x'+dx > 0 && x'+dx < iw
, y'+dy > 0 && y'+dy < ih
])
where
dw = floor $ ww
x' = floor $ fromIntegral x * ww
y' = floor $ fromIntegral y * wh
filterfun :: (PixelRGB8,Int,Int) -> (PixelRGB8, Int, Int) -> (PixelRGB8,Int,Int)
filterfun (x@(PixelRGB8 r g b),_,_) (y@(PixelRGB8 r' g' b'),_,_) = if computeLuma x > computeLuma y then (x,0,0) else (y,0,0)
filterfuninv :: (PixelRGB8,Int,Int) -> (PixelRGB8, Int, Int) -> (PixelRGB8,Int,Int)
filterfuninv (x@(PixelRGB8 r g b),_,_) (y@(PixelRGB8 r' g' b'),_,_) = if computeLuma x < computeLuma y then (x,0,0) else (y,0,0)
extractDynImage :: DynamicImage -> Maybe (Image PixelRGB8)
extractDynImage image =
case image of
ImageY8 img -> Just $ promoteImage img
ImageY16 img -> Nothing
ImageYF img -> Nothing
ImageYA8 img -> Just $ promoteImage img
ImageYA16 img -> Nothing
ImageRGB8 img -> Just img
ImageRGB16 img -> Nothing
ImageRGBF img -> Nothing
ImageRGBA8 img -> Just $ pixelMap dropTransparency img
ImageRGBA16 img -> Nothing
ImageYCbCr8 img -> Just $ convertImage img
ImageCMYK8 img -> Just $ convertImage img
ImageCMYK16 img -> Nothing
|
Drezil/img2ascii
|
src/Main.hs
|
Haskell
|
mit
| 5,308
|
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
module Celtchar.Novel.Structure where
import Data.Yaml
import GHC.Generics
data Language = French | English
deriving (Generic)
data Document = Document FilePath
deriving (Generic, Show)
instance FromJSON Document where
parseJSON v = Document <$> parseJSON v
data Chapter = Chapter { chapterTitle :: Maybe String
, documents :: [Document]
}
deriving (Generic, Show)
instance FromJSON Chapter where
parseJSON (Object v) = Chapter <$> v .:? "title"
<*> v .: "documents"
data Part = Part { partTitle :: String
, chapters :: [Chapter]
}
deriving (Generic, Show)
instance FromJSON Part where
parseJSON (Object v) = Part <$> v .: "title"
<*> v .: "chapters"
data Manuscript = Manuscript [Part]
deriving (Generic, Show)
instance FromJSON Manuscript where
parseJSON v = Manuscript <$> parseJSON v
instance FromJSON Language where
parseJSON (String "english") = pure English
parseJSON (String "french") = pure French
parseJSON _ = fail "unknown language"
instance Show Language where
show English = "english"
show French = "french"
data Novel = Novel { author :: String
, language :: Language
, novelTitle :: String
, frontmatter :: Maybe [Chapter]
, manuscript :: Manuscript
, appendix :: Maybe [Chapter]
}
deriving (Generic, Show)
instance FromJSON Novel where
parseJSON (Object v) = Novel <$> v .: "author"
<*> v .: "language"
<*> v .: "title"
<*> v .:? "frontmatter"
<*> v .: "manuscript"
<*> v .:? "appendix"
getNovelStructure :: FilePath -> IO (Either String Novel)
getNovelStructure conf = do
ec <- decodeFileEither conf
case ec of
Right novel ->
pure (Right novel)
Left ex -> do
pure (Left $ prettyPrintParseException ex)
|
ogma-project/celtchar
|
src/Celtchar/Novel/Structure.hs
|
Haskell
|
mit
| 2,311
|
module Handler.EditThread where
import Authentification (isModeratorBySession, getThreadPermissions)
import Captcha
import CustomForms (threadMForm)
import Import
import Helper (spacesToMinus)
import Widgets (threadWidget, postWidget, accountLinksW)
getEditThreadR :: ThreadId -> Handler Html
getEditThreadR tid = do
-- db && auth
(thread, isMod) <- runDB $ do
t <- get404 tid
isMod <- isModeratorBySession
return (t, isMod)
isAuthor <- getThreadPermissions thread
case (isAuthor || isMod) of
True -> do
-- captcha
equation <- liftIO $ createMathEq
setSession "captcha" (eqResult equation)
-- form
(widget, enctype) <- generateFormPost $ threadMForm equation "Update thread" (Just $ threadTitle thread) (Just $ threadContent thread)
-- widgets
let headline = threadTitle thread
leftWidget = threadWidget isMod tid thread
rightWidget = postWidget enctype widget
defaultLayout $(widgetFile "left-right-layout")
(_) -> redirectUltDest HomeR
postEditThreadR :: ThreadId -> Handler Html
postEditThreadR tid = do
-- captcha
captcha <- getCaptchaBySession
equation <- liftIO $ createMathEq
setSession "captcha" (eqResult equation)
-- db & auth
(thread, isMod) <- runDB $ do
t <- get404 tid
isMod <- isModeratorBySession
return (t, isMod)
isAuthor <- getThreadPermissions thread
-- widgets
let headline = threadTitle thread
leftWidget = threadWidget isMod tid thread
case (isAuthor || isMod) of
True -> do
((result, widget),enctype)<- runFormPost $ threadMForm equation "Update thread" (Just $ threadTitle thread) (Just $ threadContent thread)
case result of
(FormSuccess mthread) -> do
let newThread = mthread (threadCreator thread)
case (threadCaptcha newThread) == captcha of
True -> do
runDB $ replace tid $ newThread {threadPosts = (threadPosts thread), threadCreator = (threadCreator thread)}
redirect $ ThreadR (spacesToMinus $ threadTitle newThread)
False -> do
let rightWidget = [whamlet|<span .simpleBlack> Sorry, the captcha is wrong|] >> postWidget enctype widget
defaultLayout $(widgetFile "left-right-layout")
(FormFailure (err:_)) -> do
let rightWidget = [whamlet|<span .simpleBlack> #{err}|] >> postWidget enctype widget
defaultLayout $(widgetFile "left-right-layout")
(_) -> do
let rightWidget = [whamlet|<span .simpleBlack> Something went wrong, please try again|] >> postWidget enctype widget
defaultLayout $(widgetFile "left-right-layout")
(_) -> redirectUltDest HomeR
|
cirquit/HaskellPie
|
HaskellPie/Handler/EditThread.hs
|
Haskell
|
mit
| 3,045
|
module Lexer where
import Text.Parsec.String (Parser)
import Text.Parsec.Language (emptyDef)
import qualified Text.Parsec.Token as Tok
lexer :: Tok.TokenParser ()
lexer = Tok.makeTokenParser style
where
ops = ["+","*","-",";"]
names = ["def","extern"]
style = emptyDef {
Tok.commentLine = "#"
,Tok.reservedOpNames = ops
,Tok.reservedNames = names
}
integer :: Parser Integer
integer = Tok.integer lexer
float :: Parser Double
float = Tok.float lexer
parens :: Parser a -> Parser a
parens = Tok.parens lexer
commaSep :: Parser a -> Parser [a]
commaSep = Tok.commaSep lexer
semiSep :: Parser a -> Parser [a]
semiSep = Tok.semiSep lexer
identifier :: Parser String
identifier = Tok.identifier lexer
reserved :: String -> Parser ()
reserved = Tok.reserved lexer
reservedOp :: String -> Parser ()
reservedOp = Tok.reservedOp lexer
|
raulfpl/kaleidoscope
|
src/chapter2/Lexer.hs
|
Haskell
|
mit
| 946
|
{-# LANGUAGE MultiParamTypeClasses #-}
module Core
(
NonUnitVector
, UnitVector
, Point(..)
, Ray(..)
, Transform(..)
, RayPosition
, VectorUnaryOps(..)
, VectorBinaryOps(..)
, RefractiveIndex
, RayWithMedium(..)
, vector
, normal
, unsafeForceUnitVector
, origin
, to
, normalize
, normalizeWithLength
, at
, toRayPosition
, magnitude
, magnitudeSquared
, unitX
, unitY
, unitZ
, perpendiculars
, calculateReflection
, calculateRefraction
, refractiveIndexAir
, refractiveIndexGlass
)
where
import Numeric.FastMath ( )
import Control.DeepSeq ( NFData(..) )
data Vector = Vector !Double !Double !Double
instance NFData Vector where
rnf !(Vector !_ !_ !_) = ()
newtype NonUnitVector = NonUnitVector Vector
instance NFData NonUnitVector where
rnf !(NonUnitVector !(Vector !_ !_ !_)) = ()
newtype UnitVector = UnitVector Vector
instance NFData UnitVector where
rnf !(UnitVector !(Vector !_ !_ !_)) = ()
data Point = Point !Double !Double !Double
instance NFData Point where
rnf !(Point !_ !_ !_) = ()
data Ray = Ray
{ rayOrigin :: !Point
, rayDirection :: !UnitVector
}
newtype RayPosition = RayPosition Double
deriving (Eq, Ord)
class Transform t where
translate :: NonUnitVector -> t -> t
class VectorUnaryOps v where
neg :: v -> v
vectorValues :: v -> (Double, Double, Double)
(|*|) :: v -> Double -> NonUnitVector
rotateAround :: v -> UnitVector -> Double -> v
class (VectorUnaryOps v1, VectorUnaryOps v2) => VectorBinaryOps v1 v2 where
(|.|) :: v1 -> v2 -> Double
(|+|) :: v1 -> v2 -> NonUnitVector
(|-|) :: v1 -> v2 -> NonUnitVector
(|-|) v1 v2 = v1 |+| neg v2
cross :: v1 -> v2 -> NonUnitVector
instance Transform Point where
translate (NonUnitVector (Vector !vx !vy !vz)) (Point !px !py !pz) =
Point (px + vx)
(py + vy)
(pz + vz)
instance Transform Ray where
translate !v (Ray !ro !rd) =
Ray { rayOrigin = translate v ro
, rayDirection = rd
}
instance VectorUnaryOps NonUnitVector where
neg (NonUnitVector (Vector !xv !yv !zv)) =
vector (-xv)
(-yv)
(-zv)
vectorValues (NonUnitVector (Vector !x !y !z)) =
(x, y, z)
(|*|) (NonUnitVector (Vector !vx !vy !vz)) !s =
vector (vx * s)
(vy * s)
(vz * s)
rotateAround v k theta =
(v |*| cosTheta ) |+| ((k `cross` v) |*| sinTheta) |+| (k |*| ((k |.| v) * (1.0 - cosTheta)))
where
cosTheta = cos theta
sinTheta = sin theta
instance VectorUnaryOps UnitVector where
neg (UnitVector (Vector !xv !yv !zv)) =
UnitVector (Vector (-xv)
(-yv)
(-zv))
vectorValues (UnitVector (Vector !x !y !z)) =
(x, y, z)
(|*|) (UnitVector (Vector !vx !vy !vz)) !s =
vector (vx * s)
(vy * s)
(vz * s)
rotateAround v k theta =
normalize ((v |*| cosTheta ) |+|
((k `cross` v) |*| sinTheta) |+|
(k |*| ((k |.| v) * (1.0 - cosTheta))))
where
cosTheta = cos theta
sinTheta = sin theta
instance VectorBinaryOps NonUnitVector NonUnitVector where
(|.|) (NonUnitVector (Vector !vx !vy !vz)) (NonUnitVector (Vector !wx !wy !wz)) =
vx * wx
+ vy * wy
+ vz * wz
(|+|) (NonUnitVector (Vector !vx !vy !vz)) (NonUnitVector (Vector !wx !wy !wz)) =
vector (vx + wx)
(vy + wy)
(vz + wz)
cross (NonUnitVector (Vector !vx !vy !vz)) (NonUnitVector (Vector !wx !wy !wz)) =
vector (vy * wz - vz * wy)
(vz * wx - vx * wz)
(vx * wy - vy * wx)
instance VectorBinaryOps NonUnitVector UnitVector where
(|.|) (NonUnitVector (Vector !vx !vy !vz)) (UnitVector (Vector !wx !wy !wz)) =
vx * wx
+ vy * wy
+ vz * wz
(|+|) (NonUnitVector (Vector !vx !vy !vz)) (UnitVector (Vector !wx !wy !wz)) =
vector (vx + wx)
(vy + wy)
(vz + wz)
cross (NonUnitVector (Vector !vx !vy !vz)) (UnitVector (Vector !wx !wy !wz)) =
vector (vy * wz - vz * wy)
(vz * wx - vx * wz)
(vx * wy - vy * wx)
instance VectorBinaryOps UnitVector UnitVector where
(|.|) (UnitVector (Vector !vx !vy !vz)) (UnitVector (Vector !wx !wy !wz)) =
vx * wx
+ vy * wy
+ vz * wz
(|+|) (UnitVector (Vector !vx !vy !vz)) (UnitVector (Vector !wx !wy !wz)) =
vector (vx + wx)
(vy + wy)
(vz + wz)
cross (UnitVector (Vector !vx !vy !vz)) (UnitVector (Vector !wx !wy !wz)) =
vector (vy * wz - vz * wy)
(vz * wx - vx * wz)
(vx * wy - vy * wx)
instance VectorBinaryOps UnitVector NonUnitVector where
(|.|) (UnitVector (Vector !vx !vy !vz)) (NonUnitVector (Vector !wx !wy !wz)) =
vx * wx
+ vy * wy
+ vz * wz
(|+|) (UnitVector (Vector !vx !vy !vz)) (NonUnitVector (Vector !wx !wy !wz)) =
vector (vx + wx)
(vy + wy)
(vz + wz)
cross (UnitVector (Vector !vx !vy !vz)) (NonUnitVector (Vector !wx !wy !wz)) =
vector (vy * wz - vz * wy)
(vz * wx - vx * wz)
(vx * wy - vy * wx)
vector :: Double -> Double -> Double -> NonUnitVector
vector !x !y !z =
NonUnitVector (Vector x y z)
unitX :: UnitVector
unitX = UnitVector (Vector 1.0 0.0 0.0)
unitY :: UnitVector
unitY = UnitVector (Vector 0.0 1.0 0.0)
unitZ :: UnitVector
unitZ = UnitVector (Vector 0.0 0.0 1.0)
normal :: Double -> Double -> Double -> UnitVector
normal !x !y !z =
normalize $ vector x y z
unsafeForceUnitVector :: NonUnitVector -> UnitVector
unsafeForceUnitVector (NonUnitVector v) =
UnitVector v
origin :: Point
origin = Point 0.0 0.0 0.0
to :: Point -> Point -> NonUnitVector
to (Point !px !py !pz) (Point !qx !qy !qz)
= vector (qx - px)
(qy - py)
(qz - pz)
normalize :: NonUnitVector -> UnitVector
normalize !v =
UnitVector (Vector nx ny nz)
where
!m = 1.0 / magnitude v
(NonUnitVector (Vector !nx !ny !nz)) = v |*| m
normalizeWithLength :: NonUnitVector -> (UnitVector, RayPosition)
normalizeWithLength !v =
(UnitVector (Vector nx ny nz), RayPosition mag)
where
!mag = magnitude v
!m = 1.0 / mag
(NonUnitVector (Vector !nx !ny !nz)) = v |*| m
magnitude :: NonUnitVector -> Double
magnitude =
sqrt . magnitudeSquared
magnitudeSquared :: NonUnitVector -> Double
magnitudeSquared (NonUnitVector (Vector !vx !vy !vz)) =
vx * vx
+ vy * vy
+ vz * vz
at :: Ray -> RayPosition -> Point
at (Ray !ro !rd) (RayPosition !t) =
translate (rd |*| t) ro
toRayPosition :: Double -> RayPosition
toRayPosition =
RayPosition
perpendiculars :: UnitVector -> (NonUnitVector, NonUnitVector)
perpendiculars n =
(vb1, vb2)
where
(!nx, !ny, _) = vectorValues n
!vb1pre = if abs nx > abs ny then unitY else unitX
vb1 = vb1pre |-| (n |*| (n |.| vb1pre))
vb2 = n `cross` vb1
calculateReflection :: UnitVector -> UnitVector -> UnitVector
calculateReflection !incoming !surfaceNormal =
normalize (incoming |+| (surfaceNormal |*| (2 * c1)))
where
!c1 = - (surfaceNormal |.| incoming)
newtype RefractiveIndex = RefractiveIndex Double
data RayWithMedium = RayWithMedium Ray RefractiveIndex
refractiveIndexAir :: RefractiveIndex
refractiveIndexAir = RefractiveIndex 1.0
refractiveIndexGlass :: RefractiveIndex
refractiveIndexGlass = RefractiveIndex 1.5
calculateRefraction :: UnitVector -> UnitVector -> RefractiveIndex -> RefractiveIndex -> (UnitVector, RefractiveIndex)
calculateRefraction !incoming !surfaceNormal (RefractiveIndex !ri1) (RefractiveIndex !ri2) =
if sin2ThetaT > 1.0
then (calculateReflection incoming surfaceNormal, (RefractiveIndex ri1))
else (normalize ((incoming |*| ri1ri2) |+| (surfaceNormal |*| factor)), (RefractiveIndex ri2))
where
!sin2ThetaT = (ri1ri2 * ri1ri2) * (1.0 - cosThetaI * cosThetaI)
!ri1ri2 = ri1 / ri2
!cosThetaI = acos (incoming |.| surfaceNormal)
factor = ri1ri2 * cosThetaI - sqrt (1.0 - sin2ThetaT)
|
stu-smith/rendering-in-haskell
|
src/experiment08/Core.hs
|
Haskell
|
mit
| 8,275
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TupleSections #-}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html
module Stratosphere.ResourceProperties.CloudFrontDistributionOrigin where
import Stratosphere.ResourceImports
import Stratosphere.ResourceProperties.CloudFrontDistributionCustomOriginConfig
import Stratosphere.ResourceProperties.CloudFrontDistributionOriginCustomHeader
import Stratosphere.ResourceProperties.CloudFrontDistributionS3OriginConfig
-- | Full data type definition for CloudFrontDistributionOrigin. See
-- 'cloudFrontDistributionOrigin' for a more convenient constructor.
data CloudFrontDistributionOrigin =
CloudFrontDistributionOrigin
{ _cloudFrontDistributionOriginCustomOriginConfig :: Maybe CloudFrontDistributionCustomOriginConfig
, _cloudFrontDistributionOriginDomainName :: Val Text
, _cloudFrontDistributionOriginId :: Val Text
, _cloudFrontDistributionOriginOriginCustomHeaders :: Maybe [CloudFrontDistributionOriginCustomHeader]
, _cloudFrontDistributionOriginOriginPath :: Maybe (Val Text)
, _cloudFrontDistributionOriginS3OriginConfig :: Maybe CloudFrontDistributionS3OriginConfig
} deriving (Show, Eq)
instance ToJSON CloudFrontDistributionOrigin where
toJSON CloudFrontDistributionOrigin{..} =
object $
catMaybes
[ fmap (("CustomOriginConfig",) . toJSON) _cloudFrontDistributionOriginCustomOriginConfig
, (Just . ("DomainName",) . toJSON) _cloudFrontDistributionOriginDomainName
, (Just . ("Id",) . toJSON) _cloudFrontDistributionOriginId
, fmap (("OriginCustomHeaders",) . toJSON) _cloudFrontDistributionOriginOriginCustomHeaders
, fmap (("OriginPath",) . toJSON) _cloudFrontDistributionOriginOriginPath
, fmap (("S3OriginConfig",) . toJSON) _cloudFrontDistributionOriginS3OriginConfig
]
-- | Constructor for 'CloudFrontDistributionOrigin' containing required fields
-- as arguments.
cloudFrontDistributionOrigin
:: Val Text -- ^ 'cfdoDomainName'
-> Val Text -- ^ 'cfdoId'
-> CloudFrontDistributionOrigin
cloudFrontDistributionOrigin domainNamearg idarg =
CloudFrontDistributionOrigin
{ _cloudFrontDistributionOriginCustomOriginConfig = Nothing
, _cloudFrontDistributionOriginDomainName = domainNamearg
, _cloudFrontDistributionOriginId = idarg
, _cloudFrontDistributionOriginOriginCustomHeaders = Nothing
, _cloudFrontDistributionOriginOriginPath = Nothing
, _cloudFrontDistributionOriginS3OriginConfig = Nothing
}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html#cfn-cloudfront-distribution-origin-customoriginconfig
cfdoCustomOriginConfig :: Lens' CloudFrontDistributionOrigin (Maybe CloudFrontDistributionCustomOriginConfig)
cfdoCustomOriginConfig = lens _cloudFrontDistributionOriginCustomOriginConfig (\s a -> s { _cloudFrontDistributionOriginCustomOriginConfig = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html#cfn-cloudfront-distribution-origin-domainname
cfdoDomainName :: Lens' CloudFrontDistributionOrigin (Val Text)
cfdoDomainName = lens _cloudFrontDistributionOriginDomainName (\s a -> s { _cloudFrontDistributionOriginDomainName = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html#cfn-cloudfront-distribution-origin-id
cfdoId :: Lens' CloudFrontDistributionOrigin (Val Text)
cfdoId = lens _cloudFrontDistributionOriginId (\s a -> s { _cloudFrontDistributionOriginId = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html#cfn-cloudfront-distribution-origin-origincustomheaders
cfdoOriginCustomHeaders :: Lens' CloudFrontDistributionOrigin (Maybe [CloudFrontDistributionOriginCustomHeader])
cfdoOriginCustomHeaders = lens _cloudFrontDistributionOriginOriginCustomHeaders (\s a -> s { _cloudFrontDistributionOriginOriginCustomHeaders = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html#cfn-cloudfront-distribution-origin-originpath
cfdoOriginPath :: Lens' CloudFrontDistributionOrigin (Maybe (Val Text))
cfdoOriginPath = lens _cloudFrontDistributionOriginOriginPath (\s a -> s { _cloudFrontDistributionOriginOriginPath = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html#cfn-cloudfront-distribution-origin-s3originconfig
cfdoS3OriginConfig :: Lens' CloudFrontDistributionOrigin (Maybe CloudFrontDistributionS3OriginConfig)
cfdoS3OriginConfig = lens _cloudFrontDistributionOriginS3OriginConfig (\s a -> s { _cloudFrontDistributionOriginS3OriginConfig = a })
|
frontrowed/stratosphere
|
library-gen/Stratosphere/ResourceProperties/CloudFrontDistributionOrigin.hs
|
Haskell
|
mit
| 4,862
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TupleSections #-}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html
module Stratosphere.Resources.SecretsManagerSecretTargetAttachment where
import Stratosphere.ResourceImports
-- | Full data type definition for SecretsManagerSecretTargetAttachment. See
-- 'secretsManagerSecretTargetAttachment' for a more convenient constructor.
data SecretsManagerSecretTargetAttachment =
SecretsManagerSecretTargetAttachment
{ _secretsManagerSecretTargetAttachmentSecretId :: Val Text
, _secretsManagerSecretTargetAttachmentTargetId :: Val Text
, _secretsManagerSecretTargetAttachmentTargetType :: Val Text
} deriving (Show, Eq)
instance ToResourceProperties SecretsManagerSecretTargetAttachment where
toResourceProperties SecretsManagerSecretTargetAttachment{..} =
ResourceProperties
{ resourcePropertiesType = "AWS::SecretsManager::SecretTargetAttachment"
, resourcePropertiesProperties =
hashMapFromList $ catMaybes
[ (Just . ("SecretId",) . toJSON) _secretsManagerSecretTargetAttachmentSecretId
, (Just . ("TargetId",) . toJSON) _secretsManagerSecretTargetAttachmentTargetId
, (Just . ("TargetType",) . toJSON) _secretsManagerSecretTargetAttachmentTargetType
]
}
-- | Constructor for 'SecretsManagerSecretTargetAttachment' containing
-- required fields as arguments.
secretsManagerSecretTargetAttachment
:: Val Text -- ^ 'smstaSecretId'
-> Val Text -- ^ 'smstaTargetId'
-> Val Text -- ^ 'smstaTargetType'
-> SecretsManagerSecretTargetAttachment
secretsManagerSecretTargetAttachment secretIdarg targetIdarg targetTypearg =
SecretsManagerSecretTargetAttachment
{ _secretsManagerSecretTargetAttachmentSecretId = secretIdarg
, _secretsManagerSecretTargetAttachmentTargetId = targetIdarg
, _secretsManagerSecretTargetAttachmentTargetType = targetTypearg
}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-secretid
smstaSecretId :: Lens' SecretsManagerSecretTargetAttachment (Val Text)
smstaSecretId = lens _secretsManagerSecretTargetAttachmentSecretId (\s a -> s { _secretsManagerSecretTargetAttachmentSecretId = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-targetid
smstaTargetId :: Lens' SecretsManagerSecretTargetAttachment (Val Text)
smstaTargetId = lens _secretsManagerSecretTargetAttachmentTargetId (\s a -> s { _secretsManagerSecretTargetAttachmentTargetId = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-secretsmanager-secrettargetattachment.html#cfn-secretsmanager-secrettargetattachment-targettype
smstaTargetType :: Lens' SecretsManagerSecretTargetAttachment (Val Text)
smstaTargetType = lens _secretsManagerSecretTargetAttachmentTargetType (\s a -> s { _secretsManagerSecretTargetAttachmentTargetType = a })
|
frontrowed/stratosphere
|
library-gen/Stratosphere/Resources/SecretsManagerSecretTargetAttachment.hs
|
Haskell
|
mit
| 3,164
|
{-
******************************************************************************
* JSHOP *
* *
* Module: TestSuite *
* Purpose: A set of tests to run on a selection of inputs *
* Author: Nick Brunt *
* *
* Copyright (c) Nick Brunt, 2011 - 2012 *
* Subject to MIT License as stated in root directory *
* *
******************************************************************************
-}
module TestSuite where
-- Standard library imports
import System.Directory
import System.CPUTime
import Data.Maybe
import Control.Monad
-- JSHOP module imports
import Utilities
-- Test result data structures
data TestResults =
TestResults {
testNum :: Int,
message :: String,
strucTests :: [Test],
libTests :: [Test],
time :: Double,
average :: Float
}
deriving (Read, Show)
data Test =
Test {
name :: String,
result :: Bool, -- True = pass, False = fail
errorMsg :: String,
inputSize :: Int,
outputSize :: Int,
reduction :: Int,
percentage :: Float
}
deriving (Read, Show)
defaultTest :: Test
defaultTest =
Test {
name = "",
result = False,
errorMsg = "",
inputSize = 0,
outputSize = 0,
reduction = 0,
percentage = 0
}
testResultsFile :: String
testResultsFile = "tests/testResults.log"
-- Structure tests
funcFile :: String
funcFile = "tests/structure/functions.js"
exprFile :: String
exprFile = "tests/structure/expressions.js"
statFile :: String
statFile = "tests/structure/statements.js"
runTests :: Maybe [String] -> IO()
runTests mbArgs = do
startTime <- getCPUTime
putStrLn "Starting test suite"
putStrLn "-------------------\n"
let msg = head $ fromMaybe ["No message"] mbArgs
-- Structure tests run every possible JavaScript control structure
-- through the program to test that they can be fully parsed.
funcTest <- runParseTest (defaultTest {name="Functions"}) funcFile
exprTest <- runParseTest (defaultTest {name="Expressions"}) exprFile
statTest <- runParseTest (defaultTest {name="Statements"}) statFile
-- Library tests run a set of JavaScript libraries through the program
-- to test real world code and also to check compression ratios.
-- Get list of files in libraries directory
files <- getDirectoryContents "tests/libraries"
-- Filter out ".." and "." and add path
let names = filter (\x -> head x /= '.') files
let libs = ["tests/libraries/" ++ f | f <- names]
let libTests = [defaultTest {name=libName} | libName <- names]
libTests' <- zipWithM runParseTest libTests libs
nextTestNum <- getNextTestNum
endTime <- getCPUTime
let testResults = TestResults {
testNum = nextTestNum,
message = msg,
strucTests = funcTest:exprTest:[statTest],
libTests = libTests',
time = calcTime startTime endTime,
average = mean $ map percentage libTests'
}
-- Pretty print results
putStrLn $ ppTestResults testResults ""
-- Write results to file
if msg /= "No message" then
if nextTestNum == 0 then
writeFile testResultsFile (show testResults)
else
appendFile testResultsFile ('\n':(show testResults))
else
putStr ""
runParseTest :: Test -> String -> IO Test
runParseTest test file = do
input <- readFile file
let parseOutput = parseJS input
case parseOutput of
Left error -> do
return (test {
result = False,
errorMsg = error,
inputSize = length input
})
Right (tree, state) -> do
let output = genJS tree
let outFile = outTestFile file
saveFile outFile output
-- Write to file
return (test {
result = True,
inputSize = length input,
outputSize = length output,
reduction = (length input) - (length output),
percentage = calcRatio input output
})
where
outTestFile :: String -> String
outTestFile inFile = "tests/outputLibraries/" ++ minFile
where
file = reverse $ takeWhile (/='/') $ reverse inFile
minFile = reverse $ takeWhile (/='.') (reverse file)
++ ".nim" ++ dropWhile (/='.') (reverse file)
showPastResults :: IO()
showPastResults = do
f <- readFile testResultsFile
let results = [ppTestResults tr "" | tr <- map read (lines f)]
mapM_ putStrLn results
showLastResult :: IO()
showLastResult = do
f <- readFile testResultsFile
putStrLn $ ppTestResults (read $ last (lines f)) ""
showResult :: Int -> IO()
showResult n = do
f <- readFile testResultsFile
putStrLn $ ppTestResults (read $ (lines f) !! n) ""
showAverages :: IO()
showAverages = do
putStrLn "Percentage of output to input:\n"
f <- readFile testResultsFile
let tests = [tr | tr <- map read (lines f)]
let strings = ["Test " ++ (show $ testNum t) ++
" average:\t" ++ (show $ average t) ++
" \t" ++ (message t) | t <- tests]
mapM_ putStrLn strings
getNextTestNum :: IO Int
getNextTestNum = do
f <- readFile testResultsFile
return $ length $ lines f
ppTestResults :: TestResults -> ShowS
ppTestResults (TestResults {testNum = n, message = m,
strucTests = sts, libTests = lts,
time = t, average = a}) =
showString "Test number" . spc . showString (show n) . nl
. indent 1 . showString "Message:" . spc . showString m . nl . nl
. indent 1 . showString "STRUCTURE TESTS" . nl
. ppSeq 1 ppTest sts . nl
. indent 1 . showString "LIBRARY TESTS" . nl
. ppSeq 1 ppTest lts . nl
. showString "Completed in" . spc . showString (take 5 (show t))
. spc . showString "seconds" . nl
. showString "Average compression:" . spc
. showString (take 5 (show a)) . showChar '%' . nl
ppTest :: Int -> Test -> ShowS
ppTest idnt (Test {name = n, result = r, errorMsg = e,
inputSize = i, outputSize = o,
reduction = d, percentage = p}) =
indent idnt . showString n . nl
. indent (idnt+1) . showString "Result:" . spc . ppResult r . nl
. ppErrorMsg (idnt+1) e
. indent (idnt+1) . showString "Input size:" . spc . showString (show i) . nl
. indent (idnt+1) . showString "Output size:" . spc . showString (show o) . nl
. indent (idnt+1) . showString "Reduced by:" . spc . showString (show d) . nl
. indent (idnt+1) . showString "Percentage of original:" . spc
. showString (take 5 (show p)) . showChar '%' . nl
ppResult :: Bool -> ShowS
ppResult True = showString "PASS"
ppResult False = showString "FAIL"
ppErrorMsg :: Int -> String -> ShowS
ppErrorMsg _ "" = showString ""
ppErrorMsg n msg = indent n . showString "Error message:"
. spc . showString msg . nl
|
nbrunt/JSHOP
|
src/TestSuite.hs
|
Haskell
|
mit
| 7,600
|
module Tables.A004489 (a004489) where
import Helpers.BaseRepresentation (toBase, fromBase)
import Helpers.ListHelpers (zipWithPadding)
import Helpers.Table (tableByAntidiagonals)
a004489 :: Int -> Int
a004489 i = fromBase 3 $ map tertSum $ zipWithPadding 0 (base3 n) (base3 k) where
(n, k) = tableByAntidiagonals i
tertSum (n', k') = (n' + k') `mod` 3
base3 = toBase 3
|
peterokagey/haskellOEIS
|
src/Tables/A004489.hs
|
Haskell
|
apache-2.0
| 376
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
module Openshift.V1beta1.Scale where
import GHC.Generics
import Data.Text
import Kubernetes.V1.ObjectMeta
import Openshift.V1beta1.ScaleSpec
import Openshift.V1beta1.ScaleStatus
import qualified Data.Aeson
-- | represents a scaling request for a resource.
data Scale = Scale
{ kind :: Maybe Text -- ^ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
, apiVersion :: Maybe Text -- ^ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
, metadata :: Maybe ObjectMeta -- ^ Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
, spec :: Maybe ScaleSpec -- ^ defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
, status :: Maybe ScaleStatus -- ^ current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
} deriving (Show, Eq, Generic)
instance Data.Aeson.FromJSON Scale
instance Data.Aeson.ToJSON Scale
|
minhdoboi/deprecated-openshift-haskell-api
|
openshift/lib/Openshift/V1beta1/Scale.hs
|
Haskell
|
apache-2.0
| 1,616
|
{-# LANGUAGE PackageImports, OverloadedStrings, TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module TlsIo (
TlsIo, evalTlsIo, liftIO, throwError, readCached, randomByteString,
Partner(..), opponent, isCiphered,
readContentType, writeContentType, readVersion, writeVersion,
readLen, writeLen,
setVersion, setClientRandom, setServerRandom,
getClientRandom, getServerRandom, getCipherSuite,
cacheCipherSuite, flushCipherSuite,
encryptRSA, generateKeys, updateHash, finishedHash, clientVerifySign,
encryptMessage, decryptMessage,
updateSequenceNumber, updateSequenceNumberSmart,
TlsServer, runOpen, tPut, tGetByte, tGetLine, tGet, tGetContent, tClose,
debugPrintKeys,
getRandomGen, setRandomGen,
SecretKey(..),
) where
import Prelude hiding (read)
import System.IO
import System.IO.Error
import Control.Concurrent.STM
import Control.Applicative
import "monads-tf" Control.Monad.Error
import "monads-tf" Control.Monad.State
import Data.Maybe
import Data.Word
import qualified Data.ByteString as BS
import qualified Data.ByteString.Char8 as BSC
import "crypto-random" Crypto.Random
import qualified Crypto.Hash.SHA256 as SHA256
import qualified Crypto.PubKey.HashDescr as RSA
import qualified Crypto.PubKey.RSA as RSA
import qualified Crypto.PubKey.RSA.Prim as RSA
import qualified Crypto.PubKey.RSA.PKCS15 as RSA
import qualified Crypto.PubKey.ECC.ECDSA as ECDSA
import qualified CryptoTools as CT
import Basic
import Data.HandleLike
import Data.ASN1.Encoding
import Data.ASN1.Types
import Data.ASN1.BinaryEncoding
import Content
type TlsIo cnt = ErrorT String (StateT (TlsClientState cnt) IO)
data TlsClientState cnt = TlsClientState {
tlssHandle :: Handle,
tlssContentCache :: [cnt],
tlssVersion :: Maybe CT.MSVersion,
tlssClientWriteCipherSuite :: CipherSuite,
tlssServerWriteCipherSuite :: CipherSuite,
tlssCachedCipherSuite :: CipherSuite,
tlssMasterSecret :: Maybe BS.ByteString,
tlssClientRandom :: Maybe BS.ByteString,
tlssServerRandom :: Maybe BS.ByteString,
tlssClientWriteMacKey :: Maybe BS.ByteString,
tlssServerWriteMacKey :: Maybe BS.ByteString,
tlssClientWriteKey :: Maybe BS.ByteString,
tlssServerWriteKey :: Maybe BS.ByteString,
tlssRandomGen :: SystemRNG,
tlssSha256Ctx :: SHA256.Ctx,
tlssClientSequenceNumber :: Word64,
tlssServerSequenceNumber :: Word64
}
instance HandleLike TlsServer where
type HandleMonad TlsServer = IO
hlPut = tPut
hlGet = tGet
hlGetLine = tGetLine
hlGetContent = tGetContent
hlClose = tClose
initTlsClientState :: EntropyPool -> Handle -> TlsClientState cnt
initTlsClientState ep sv = TlsClientState {
tlssHandle = sv,
tlssContentCache = [],
tlssVersion = Nothing,
tlssClientWriteCipherSuite = CipherSuite KeyExNULL MsgEncNULL,
tlssServerWriteCipherSuite = CipherSuite KeyExNULL MsgEncNULL,
tlssCachedCipherSuite = CipherSuite KeyExNULL MsgEncNULL,
tlssMasterSecret = Nothing,
tlssClientRandom = Nothing,
tlssServerRandom = Nothing,
tlssClientWriteMacKey = Nothing,
tlssServerWriteMacKey = Nothing,
tlssClientWriteKey = Nothing,
tlssServerWriteKey = Nothing,
tlssRandomGen = cprgCreate ep,
tlssSha256Ctx = SHA256.init,
tlssClientSequenceNumber = 0,
tlssServerSequenceNumber = 0
}
runOpen :: TlsIo cnt () -> Handle -> IO TlsServer
runOpen opn sv = do
ep <- createEntropyPool
(_, tlss) <- opn `runTlsIo` initTlsClientState ep sv
tvgen <- atomically . newTVar $ tlssRandomGen tlss
tvcsn <- atomically . newTVar $ tlssClientSequenceNumber tlss
tvssn <- atomically . newTVar $ tlssServerSequenceNumber tlss
tvbfr <- atomically $ newTVar ""
return TlsServer {
tlsVersion = fromJust $ tlssVersion tlss,
tlsCipherSuite = tlssClientWriteCipherSuite tlss,
tlsHandle = tlssHandle tlss,
tlsBuffer = tvbfr,
tlsRandomGen = tvgen,
tlsClientWriteMacKey = fromJust $ tlssClientWriteMacKey tlss,
tlsServerWriteMacKey = fromJust $ tlssServerWriteMacKey tlss,
tlsClientWriteKey = fromJust $ tlssClientWriteKey tlss,
tlsServerWriteKey = fromJust $ tlssServerWriteKey tlss,
tlsClientSequenceNumber = tvcsn,
tlsServerSequenceNumber = tvssn
}
runTlsIo :: TlsIo cnt a -> TlsClientState cnt -> IO (a, TlsClientState cnt)
runTlsIo io st = do
(ret, st') <- runErrorT io `runStateT` st
case ret of
Right r -> return (r, st')
Left err -> error err
evalTlsIo :: TlsIo cnt a -> EntropyPool -> Handle -> IO a
evalTlsIo io ep sv = do
ret <- runErrorT io `evalStateT` initTlsClientState ep sv
case ret of
Right r -> return r
Left err -> error err
readCached :: TlsIo cnt [cnt] -> TlsIo cnt cnt
readCached rd = do
tlss@TlsClientState{ tlssContentCache = cch } <- get
case cch of
[] -> do
r : cch' <- rd
put tlss { tlssContentCache = cch' }
return r
r : cch' -> do
put tlss { tlssContentCache = cch' }
return r
randomByteString :: Int -> TlsIo cnt BS.ByteString
randomByteString len = do
(r, gen) <- cprgGenerate len <$> gets tlssRandomGen
tlss <- get
put tlss{ tlssRandomGen = gen }
return r
data Partner = Server | Client deriving (Show, Eq)
opponent :: Partner -> Partner
opponent Server = Client
opponent Client = Server
isCiphered :: Partner -> TlsIo cnt Bool
isCiphered partner = (/= CipherSuite KeyExNULL MsgEncNULL) <$> gets (case partner of
Client -> tlssClientWriteCipherSuite
Server -> tlssServerWriteCipherSuite)
readContentType :: TlsIo cnt ContentType
readContentType = byteStringToContentType <$> read 1
writeContentType :: ContentType -> TlsIo cnt ()
writeContentType = write . contentTypeToByteString
readVersion :: TlsIo cnt Version
readVersion = byteStringToVersion <$> read 2
writeVersion :: Version -> TlsIo cnt ()
writeVersion = write . versionToByteString
readLen :: Int -> TlsIo cnt BS.ByteString
readLen n = read . byteStringToInt =<< read n
writeLen :: Int -> BS.ByteString -> TlsIo cnt ()
writeLen n bs = write (intToByteString n $ BS.length bs) >> write bs
read :: Int -> TlsIo cnt BS.ByteString
read n = do
r <- liftIO . flip BS.hGet n =<< gets tlssHandle
if BS.length r == n then return r else throwError $
"Basic.read:\n" ++
"\texpected: " ++ show n ++ "byte\n" ++
"\tactural : " ++ show (BS.length r) ++ "byte\n"
write :: BS.ByteString -> TlsIo cnt ()
write dat = liftIO . flip BS.hPut dat =<< gets tlssHandle
setVersion :: Version -> TlsIo cnt ()
setVersion v = do
tlss <- get
case CT.versionToVersion v of
Just v' -> put tlss { tlssVersion = Just v' }
_ -> throwError "setVersion: Not implemented"
setClientRandom, setServerRandom :: Random -> TlsIo cnt ()
setClientRandom (Random cr) = do
tlss <- get
put $ tlss { tlssClientRandom = Just cr }
setServerRandom (Random sr) = do
tlss <- get
put $ tlss { tlssServerRandom = Just sr }
getClientRandom, getServerRandom :: TlsIo cnt (Maybe BS.ByteString)
getClientRandom = gets tlssClientRandom
getServerRandom = gets tlssServerRandom
getCipherSuite :: TlsIo cnt CipherSuite
getCipherSuite = gets tlssCachedCipherSuite
cacheCipherSuite :: CipherSuite -> TlsIo cnt ()
cacheCipherSuite cs = do
tlss <- get
put $ tlss { tlssCachedCipherSuite = cs }
flushCipherSuite :: Partner -> TlsIo cnt ()
flushCipherSuite p = do
tlss <- get
case p of
Client -> put tlss {
tlssClientWriteCipherSuite = tlssCachedCipherSuite tlss }
Server -> put tlss {
tlssServerWriteCipherSuite = tlssCachedCipherSuite tlss }
encryptRSA :: RSA.PublicKey -> BS.ByteString -> TlsIo cnt BS.ByteString
encryptRSA pub pln = do
g <- gets tlssRandomGen
let (Right e, g') = RSA.encrypt g pub pln
tlss <- get
put tlss { tlssRandomGen = g' }
return e
generateKeys :: BS.ByteString -> TlsIo cnt ()
generateKeys pms = do
-- liftIO $ putStrLn $ "Pre Master Secret: " ++ show pms
mv <- gets tlssVersion
mcr <- gets $ (CT.ClientRandom <$>) . tlssClientRandom
msr <- gets $ (CT.ServerRandom <$>) . tlssServerRandom
mkl <- do
cs <- gets tlssCachedCipherSuite
case cs of
CipherSuite _ AES_128_CBC_SHA -> return 20
CipherSuite _ AES_128_CBC_SHA256 -> return 32
_ -> throwError "TlsIO.generateKeys: error"
case (mv, mcr, msr) of
(Just v, Just cr, Just sr) -> do
let ms = CT.generateMasterSecret v pms cr sr
ems = CT.generateKeyBlock v cr sr ms $
mkl * 2 + 32
[cwmk, swmk, cwk, swk] =
divide [ mkl, mkl, 16, 16 ] ems
-- liftIO . putStrLn $ "KEYS: " ++ show [cwmk, swmk, cwk, swk]
tlss <- get
put $ tlss {
tlssMasterSecret = Just ms,
tlssClientWriteMacKey = Just cwmk,
tlssServerWriteMacKey = Just swmk,
tlssClientWriteKey = Just cwk,
tlssServerWriteKey = Just swk }
_ -> throwError "No version / No (client/server) random"
where
divide [] _ = []
divide (n : ns) bs
| bs == BS.empty = []
| otherwise = let (x, xs) = BS.splitAt n bs in x : divide ns xs
updateHash :: BS.ByteString -> TlsIo cnt ()
updateHash bs = do
tlss@TlsClientState{ tlssSha256Ctx = sha256 } <- get
-- liftIO . putStrLn $ "PRE : " ++ show (SHA256.finalize sha256)
-- liftIO . putStrLn $ show bs
-- liftIO . putStrLn $ "POST: " ++ show (SHA256.finalize $ SHA256.update sha256 bs)
put tlss { tlssSha256Ctx = SHA256.update sha256 bs }
finishedHash :: Partner -> TlsIo cnt BS.ByteString
finishedHash partner = do
mms <- gets tlssMasterSecret
sha256 <- SHA256.finalize <$> gets tlssSha256Ctx
mv <- gets tlssVersion
case (mv, mms) of
(Just CT.TLS12, Just ms) -> return $ case partner of
Client -> CT.generateFinished CT.TLS12 True ms sha256
Server -> CT.generateFinished CT.TLS12 False ms sha256
_ -> throwError "finishedHash: No version / No master secrets"
class SecretKey sk where
sign :: sk -> BS.ByteString -> BS.ByteString
algorithm :: sk -> (HashAlgorithm, SignatureAlgorithm)
instance SecretKey RSA.PrivateKey where
sign sk bd = let
Right hashed = RSA.padSignature
(RSA.public_size $ RSA.private_pub sk)
(RSA.digestToASN1 RSA.hashDescrSHA256 bd) in
RSA.dp Nothing sk hashed
algorithm _ = (HashAlgorithmSha256, SignatureAlgorithmRsa)
instance SecretKey ECDSA.PrivateKey where
sign sk = encodeSignature . fromJust . ECDSA.signWith 4649 sk id
algorithm _ = (HashAlgorithmSha256, SignatureAlgorithmEcdsa)
encodeSignature :: ECDSA.Signature -> BS.ByteString
encodeSignature (ECDSA.Signature r s) =
encodeASN1' DER [Start Sequence, IntVal r, IntVal s, End Sequence]
clientVerifySign :: SecretKey sk => sk -> TlsIo cnt BS.ByteString
clientVerifySign pkys = do
sha256 <- gets $ SHA256.finalize . tlssSha256Ctx
return $ sign pkys sha256
getVsnCsMwkSnMmk :: Partner -> TlsIo cnt (Maybe CT.MSVersion, CipherSuite, Maybe BS.ByteString,
Word64, Maybe BS.ByteString)
getVsnCsMwkSnMmk partner = do
vrsn <- gets tlssVersion
cs <- cipherSuite partner
mwk <- writeKey partner
sn <- sequenceNumber partner
mmk <- macKey partner
return (vrsn, cs, mwk, sn, mmk)
encryptMessage :: Partner ->
ContentType -> Version -> BS.ByteString -> TlsIo cnt BS.ByteString
encryptMessage partner ct v msg = do
(vrsn, cs, mwk, sn, mmk) <- getVsnCsMwkSnMmk partner
gen <- gets tlssRandomGen
mhs <- case cs of
CipherSuite _ AES_128_CBC_SHA -> return $ Just CT.hashSha1
CipherSuite _ AES_128_CBC_SHA256 -> return $ Just CT.hashSha256
CipherSuite KeyExNULL MsgEncNULL -> return Nothing
_ -> throwError "TlsIo.encryptMessage"
case (vrsn, mhs, mwk, mmk) of
(Just CT.TLS12, Just hs, Just wk, Just mk)
-> do let (ret, gen') =
CT.encryptMessage hs gen wk sn mk ct v msg
tlss <- get
put tlss{ tlssRandomGen = gen' }
return ret
(_, Nothing, _, _) -> return msg
_ -> throwError $ "TlsIO.encryptMessage:\n" ++
"\tNo keys or not implemented cipher suite"
decryptMessage :: Partner ->
ContentType -> Version -> BS.ByteString -> TlsIo cnt BS.ByteString
decryptMessage partner ct v enc = do
(vrsn, cs, mwk, sn, mmk) <- getVsnCsMwkSnMmk partner
case (vrsn, cs, mwk, mmk) of
(Just CT.TLS12, CipherSuite _ AES_128_CBC_SHA, Just key, Just mk)
-> do let emsg = CT.decryptMessage CT.hashSha1 key sn mk ct v enc
case emsg of
Right msg -> return msg
Left err -> throwError err
(Just CT.TLS12, CipherSuite _ AES_128_CBC_SHA256, Just key, Just mk)
-> do let emsg = CT.decryptMessage CT.hashSha256 key sn mk ct v enc
case emsg of
Right msg -> return msg
Left err -> throwError err
(_, CipherSuite KeyExNULL MsgEncNULL, _, _) -> return enc
_ -> throwError "TlsIO.decryptMessage: No keys or Bad cipher suite"
cipherSuite :: Partner -> TlsIo cnt CipherSuite
cipherSuite partner = gets $ case partner of
Client -> tlssClientWriteCipherSuite
Server -> tlssServerWriteCipherSuite
writeKey :: Partner -> TlsIo cnt (Maybe BS.ByteString)
writeKey partner = gets $ case partner of
Client -> tlssClientWriteKey
Server -> tlssServerWriteKey
macKey :: Partner -> TlsIo cnt (Maybe BS.ByteString)
macKey partner = gets $ case partner of
Client -> tlssClientWriteMacKey
Server -> tlssServerWriteMacKey
sequenceNumber :: Partner -> TlsIo cnt Word64
sequenceNumber partner = gets $ case partner of
Client -> tlssClientSequenceNumber
Server -> tlssServerSequenceNumber
updateSequenceNumber :: Partner -> TlsIo cnt ()
updateSequenceNumber partner = do
sn <- gets $ case partner of
Client -> tlssClientSequenceNumber
Server -> tlssServerSequenceNumber
tlss <- get
put $ case partner of
Client -> tlss { tlssClientSequenceNumber = succ sn }
Server -> tlss { tlssServerSequenceNumber = succ sn }
updateSequenceNumberSmart :: Partner -> TlsIo cnt ()
updateSequenceNumberSmart partner =
flip when (updateSequenceNumber partner) =<< isCiphered partner
data TlsServer = TlsServer {
tlsVersion :: CT.MSVersion,
tlsCipherSuite :: CipherSuite,
tlsHandle :: Handle,
tlsBuffer :: TVar BS.ByteString,
tlsRandomGen :: TVar SystemRNG,
tlsClientWriteMacKey :: BS.ByteString,
tlsServerWriteMacKey :: BS.ByteString,
tlsClientWriteKey :: BS.ByteString,
tlsServerWriteKey :: BS.ByteString,
tlsClientSequenceNumber :: TVar Word64,
tlsServerSequenceNumber :: TVar Word64
}
tPut :: TlsServer -> BS.ByteString -> IO ()
tPut ts = tPutWithCT ts ContentTypeApplicationData
tPutWithCT :: TlsServer -> ContentType -> BS.ByteString -> IO ()
tPutWithCT ts ct msg = do
hs <- case cs of
CipherSuite _ AES_128_CBC_SHA -> return CT.hashSha1
CipherSuite _ AES_128_CBC_SHA256 -> return CT.hashSha256
_ -> error "TlsIo.tPutWithCT"
ebody <- atomically $ do
gen <- readTVar tvgen
sn <- readTVar tvsn
let (e, gen') = enc hs gen sn
writeTVar tvgen gen'
writeTVar tvsn $ succ sn
return e
BS.hPut h $ BS.concat [
contentTypeToByteString ct,
versionToByteString v,
lenBodyToByteString 2 ebody]
where
cs = tlsCipherSuite ts
h = tlsHandle ts
key = tlsClientWriteKey ts
mk = tlsClientWriteMacKey ts
v = Version 3 3
tvsn = tlsClientSequenceNumber ts
tvgen = tlsRandomGen ts
enc hs gen sn = CT.encryptMessage hs gen key sn mk ct v msg
tGetWhole :: TlsServer -> IO BS.ByteString
tGetWhole ts = do
ret <- tGetWholeWithCT ts
case ret of
(ContentTypeApplicationData, ad) -> return ad
(ContentTypeAlert, "\SOH\NUL") -> do
tPutWithCT ts ContentTypeAlert "\SOH\NUL"
ioError $ mkIOError
eofErrorType "tGetWhole" (Just h) Nothing
_ -> error "not impolemented yet"
where
h = tlsHandle ts
tGetWholeWithCT :: TlsServer -> IO (ContentType, BS.ByteString)
tGetWholeWithCT ts = do
hs <- case cs of
CipherSuite _ AES_128_CBC_SHA -> return CT.hashSha1
CipherSuite _ AES_128_CBC_SHA256 -> return CT.hashSha256
_ -> error "TlsIo.tGetWholeWithCT"
ct <- byteStringToContentType <$> BS.hGet h 1
v <- byteStringToVersion <$> BS.hGet h 2
enc <- BS.hGet h . byteStringToInt =<< BS.hGet h 2
sn <- atomically $ do
n <- readTVar tvsn
writeTVar tvsn $ succ n
return n
case dec hs sn ct v enc of
Right r -> return (ct, r)
Left err -> error err
where
cs = tlsCipherSuite ts
h = tlsHandle ts
key = tlsServerWriteKey ts
mk = tlsServerWriteMacKey ts
tvsn = tlsServerSequenceNumber ts
dec hs sn = CT.decryptMessage hs key sn mk
tGetByte :: TlsServer -> IO Word8
tGetByte ts = do
bfr <- atomically . readTVar $ tlsBuffer ts
if BS.null bfr then do
msg <- tGetWhole ts
atomically $ case BS.uncons msg of
Just (b, bs) -> do
writeTVar (tlsBuffer ts) bs
return b
_ -> error "tGetByte: empty data"
else atomically $ case BS.uncons bfr of
Just (b, bs) -> do
writeTVar (tlsBuffer ts) bs
return b
_ -> error "tGetByte: never occur"
tGet :: TlsServer -> Int -> IO BS.ByteString
tGet ts n = do
bfr <- atomically . readTVar $ tlsBuffer ts
if n <= BS.length bfr then atomically $ do
let (ret, bfr') = BS.splitAt n bfr
writeTVar (tlsBuffer ts) bfr'
return ret
else do msg <- tGetWhole ts
atomically $ writeTVar (tlsBuffer ts) msg
(bfr `BS.append`) <$> tGet ts (n - BS.length bfr)
splitOneLine :: BS.ByteString -> Maybe (BS.ByteString, BS.ByteString)
splitOneLine bs = case ('\r' `BSC.elem` bs, '\n' `BSC.elem` bs) of
(True, _) -> let
(l, ls) = BSC.span (/= '\r') bs
Just ('\r', ls') = BSC.uncons ls in
case BSC.uncons ls' of
Just ('\n', ls'') -> Just (l, ls'')
_ -> Just (l, ls')
(_, True) -> let
(l, ls) = BSC.span (/= '\n') bs
Just ('\n', ls') = BSC.uncons ls in Just (l, ls')
_ -> Nothing
tGetLine :: TlsServer -> IO BS.ByteString
tGetLine ts = do
bfr <- atomically . readTVar $ tlsBuffer ts
case splitOneLine bfr of
Just (l, ls) -> atomically $ do
writeTVar (tlsBuffer ts) ls
return l
_ -> do msg <- tGetWhole ts
atomically $ writeTVar (tlsBuffer ts) msg
(bfr `BS.append`) <$> tGetLine ts
tGetContent :: TlsServer -> IO BS.ByteString
tGetContent ts = do
bfr <- atomically . readTVar $ tlsBuffer ts
if BS.null bfr then tGetWhole ts else atomically $ do
writeTVar (tlsBuffer ts) BS.empty
return bfr
debugPrintKeys :: TlsIo cnt ()
debugPrintKeys = do
Just ms <- gets tlssMasterSecret
Just cwmk <- gets tlssClientWriteMacKey
Just swmk <- gets tlssServerWriteMacKey
Just cwk <- gets tlssClientWriteKey
Just swk <- gets tlssServerWriteKey
-- Just cwi <- gets tlssClientWriteIv
-- Just swi <- gets tlssServerWriteIv
liftIO $ do
putStrLn "### GENERATED KEYS ###"
putStrLn $ "\tMaster Secret : " ++ show ms
putStrLn $ "\tClntWr MAC Key: " ++ showKeySingle cwmk
putStrLn $ "\tSrvrWr MAC Key: " ++ showKeySingle swmk
putStrLn $ "\tClntWr Key : " ++ showKeySingle cwk
putStrLn $ "\tSrvrWr Key : " ++ showKeySingle swk
-- putStrLn $ "\tClntWr IV : " ++ showKeySingle cwi
-- putStrLn $ "\tSrvrWr IV : " ++ showKeySingle swi
tClose :: TlsServer -> IO ()
tClose ts = do
tPutWithCT ts ContentTypeAlert "\SOH\NUL"
tGetWholeWithCT ts >>= \c -> if c /= (ContentTypeAlert, "\SOH\NUL")
then print c else return ()
hClose h
where
h = tlsHandle ts
getRandomGen :: TlsIo cnt SystemRNG
getRandomGen = gets tlssRandomGen
setRandomGen :: SystemRNG -> TlsIo cnt ()
setRandomGen g = do
tlss <- get
put tlss{ tlssRandomGen = g }
|
YoshikuniJujo/forest
|
subprojects/tls-analysis/client/TlsIo.hs
|
Haskell
|
bsd-3-clause
| 18,760
|
-----------------------------------------------------------------------------
-- |
-- Module : Text.ParserCombinators.Parsec.Prim
-- Copyright : (c) Daan Leijen 1999-2001
-- License : BSD-style (see the file libraries/parsec/LICENSE)
--
-- Maintainer : daan@cs.uu.nl
-- Stability : provisional
-- Portability : portable
--
-- The primitive parser combinators.
--
-----------------------------------------------------------------------------
module Text.ParserCombinators.Parsec.Prim
( -- operators: label a parser, alternative
(<?>), (<|>)
-- basic types
, Parser, GenParser
, runParser, parse, parseFromFile, parseTest
-- primitive parsers:
-- instance Functor Parser : fmap
-- instance Monad Parser : return, >>=, fail
-- instance MonadPlus Parser : mzero (pzero), mplus (<|>)
, token, tokens, tokenPrim, tokenPrimEx
, try, label, labels, unexpected, pzero
-- primitive because of space behaviour
, many, skipMany
-- user state manipulation
, getState, setState, updateState
-- state manipulation
, getPosition, setPosition
, getInput, setInput
, getParserState, setParserState
) where
import Prelude
import Text.ParserCombinators.Parsec.Pos
import Text.ParserCombinators.Parsec.Error
import Control.Monad
{-# INLINE parsecMap #-}
{-# INLINE parsecReturn #-}
{-# INLINE parsecBind #-}
{-# INLINE parsecZero #-}
{-# INLINE parsecPlus #-}
{-# INLINE token #-}
{-# INLINE tokenPrim #-}
-----------------------------------------------------------
-- Operators:
-- <?> gives a name to a parser (which is used in error messages)
-- <|> is the choice operator
-----------------------------------------------------------
infix 0 <?>
infixr 1 <|>
(<?>) :: GenParser tok st a -> String -> GenParser tok st a
p <?> msg = label p msg
(<|>) :: GenParser tok st a -> GenParser tok st a -> GenParser tok st a
p1 <|> p2 = mplus p1 p2
-----------------------------------------------------------
-- User state combinators
-----------------------------------------------------------
getState :: GenParser tok st st
getState = do{ state <- getParserState
; return (stateUser state)
}
setState :: st -> GenParser tok st ()
setState st = do{ updateParserState (\(State input pos _) -> State input pos st)
; return ()
}
updateState :: (st -> st) -> GenParser tok st ()
updateState f = do{ updateParserState (\(State input pos user) -> State input pos (f user))
; return ()
}
-----------------------------------------------------------
-- Parser state combinators
-----------------------------------------------------------
getPosition :: GenParser tok st SourcePos
getPosition = do{ state <- getParserState; return (statePos state) }
getInput :: GenParser tok st [tok]
getInput = do{ state <- getParserState; return (stateInput state) }
setPosition :: SourcePos -> GenParser tok st ()
setPosition pos = do{ updateParserState (\(State input _ user) -> State input pos user)
; return ()
}
setInput :: [tok] -> GenParser tok st ()
setInput input = do{ updateParserState (\(State _ pos user) -> State input pos user)
; return ()
}
getParserState :: GenParser tok st (State tok st)
getParserState = updateParserState id
setParserState :: State tok st -> GenParser tok st (State tok st)
setParserState st = updateParserState (const st)
-----------------------------------------------------------
-- Parser definition.
-- GenParser tok st a:
-- General parser for tokens of type "tok",
-- a user state "st" and a result type "a"
-----------------------------------------------------------
type Parser a = GenParser Char () a
newtype GenParser tok st a = Parser (State tok st -> Consumed (Reply tok st a))
runP (Parser p) = p
data Consumed a = Consumed a --input is consumed
| Empty !a --no input is consumed
data Reply tok st a = Ok !a !(State tok st) ParseError --parsing succeeded with "a"
| Error ParseError --parsing failed
data State tok st = State { stateInput :: [tok]
, statePos :: !SourcePos
, stateUser :: !st
}
-----------------------------------------------------------
-- run a parser
-----------------------------------------------------------
parseFromFile :: Parser a -> SourceName -> IO (Either ParseError a)
parseFromFile p fname
= do{ input <- readFile fname
; return (parse p fname input)
}
parseTest :: Show a => GenParser tok () a -> [tok] -> IO ()
parseTest p input
= case (runParser p () "" input) of
Left err -> do{ putStr "parse error at "
; print err
}
Right x -> print x
parse :: GenParser tok () a -> SourceName -> [tok] -> Either ParseError a
parse p name input
= runParser p () name input
runParser :: GenParser tok st a -> st -> SourceName -> [tok] -> Either ParseError a
runParser p st name input
= case parserReply (runP p (State input (initialPos name) st)) of
Ok x _ _ -> Right x
Error err -> Left err
parserReply result
= case result of
Consumed reply -> reply
Empty reply -> reply
-----------------------------------------------------------
-- Functor: fmap
-----------------------------------------------------------
instance Functor (GenParser tok st) where
fmap f p = parsecMap f p
parsecMap :: (a -> b) -> GenParser tok st a -> GenParser tok st b
parsecMap f (Parser p)
= Parser (\state ->
case (p state) of
Consumed reply -> Consumed (mapReply reply)
Empty reply -> Empty (mapReply reply)
)
where
mapReply reply
= case reply of
Ok x state err -> let fx = f x
in seq fx (Ok fx state err)
Error err -> Error err
-----------------------------------------------------------
-- Monad: return, sequence (>>=) and fail
-----------------------------------------------------------
instance Monad (GenParser tok st) where
return x = parsecReturn x
p >>= f = parsecBind p f
fail msg = parsecFail msg
parsecReturn :: a -> GenParser tok st a
parsecReturn x
= Parser (\state -> Empty (Ok x state (unknownError state)))
parsecBind :: GenParser tok st a -> (a -> GenParser tok st b) -> GenParser tok st b
parsecBind (Parser p) f
= Parser (\state ->
case (p state) of
Consumed reply1
-> Consumed $
case (reply1) of
Ok x state1 err1 -> case runP (f x) state1 of
Empty reply2 -> mergeErrorReply err1 reply2
Consumed reply2 -> reply2
Error err1 -> Error err1
Empty reply1
-> case (reply1) of
Ok x state1 err1 -> case runP (f x) state1 of
Empty reply2 -> Empty (mergeErrorReply err1 reply2)
other -> other
Error err1 -> Empty (Error err1)
)
mergeErrorReply err1 reply
= case reply of
Ok x state err2 -> Ok x state (mergeError err1 err2)
Error err2 -> Error (mergeError err1 err2)
parsecFail :: String -> GenParser tok st a
parsecFail msg
= Parser (\state ->
Empty (Error (newErrorMessage (Message msg) (statePos state))))
-----------------------------------------------------------
-- MonadPlus: alternative (mplus) and mzero
-----------------------------------------------------------
instance MonadPlus (GenParser tok st) where
mzero = parsecZero
mplus p1 p2 = parsecPlus p1 p2
pzero :: GenParser tok st a
pzero = parsecZero
parsecZero :: GenParser tok st a
parsecZero
= Parser (\state -> Empty (Error (unknownError state)))
parsecPlus :: GenParser tok st a -> GenParser tok st a -> GenParser tok st a
parsecPlus (Parser p1) (Parser p2)
= Parser (\state ->
case (p1 state) of
Empty (Error err) -> case (p2 state) of
Empty reply -> Empty (mergeErrorReply err reply)
consumed -> consumed
other -> other
)
{-
-- variant that favors a consumed reply over an empty one, even it is not the first alternative.
empty@(Empty reply) -> case reply of
Error err ->
case (p2 state) of
Empty reply -> Empty (mergeErrorReply err reply)
consumed -> consumed
ok ->
case (p2 state) of
Empty reply -> empty
consumed -> consumed
consumed -> consumed
-}
-----------------------------------------------------------
-- Primitive Parsers:
-- try, token(Prim), label, unexpected and updateState
-----------------------------------------------------------
try :: GenParser tok st a -> GenParser tok st a
try (Parser p)
= Parser (\state@(State input pos user) ->
case (p state) of
Consumed (Error err) -> Empty (Error (setErrorPos pos err))
Consumed ok -> Consumed ok -- was: Empty ok
empty -> empty
)
token :: (tok -> String) -> (tok -> SourcePos) -> (tok -> Maybe a) -> GenParser tok st a
token show tokpos test
= tokenPrim show nextpos test
where
nextpos _ _ (tok:toks) = tokpos tok
nextpos _ tok [] = tokpos tok
tokenPrim :: (tok -> String) -> (SourcePos -> tok -> [tok] -> SourcePos) -> (tok -> Maybe a) -> GenParser tok st a
tokenPrim show nextpos test
= tokenPrimEx show nextpos Nothing test
-- | The most primitive token recogniser. The expression @tokenPrimEx show nextpos mbnextstate test@,
-- recognises tokens when @test@ returns @Just x@ (and returns the value @x@). Tokens are shown in
-- error messages using @show@. The position is calculated using @nextpos@, and finally, @mbnextstate@,
-- can hold a function that updates the user state on every token recognised (nice to count tokens :-).
-- The function is packed into a 'Maybe' type for performance reasons.
tokenPrimEx :: (tok -> String) ->
(SourcePos -> tok -> [tok] -> SourcePos) ->
Maybe (SourcePos -> tok -> [tok] -> st -> st) ->
(tok -> Maybe a) ->
GenParser tok st a
tokenPrimEx show nextpos mbNextState test
= case mbNextState of
Nothing
-> Parser (\state@(State input pos user) ->
case input of
(c:cs) -> case test c of
Just x -> let newpos = nextpos pos c cs
newstate = State cs newpos user
in seq newpos $ seq newstate $
Consumed (Ok x newstate (newErrorUnknown newpos))
Nothing -> Empty (sysUnExpectError (show c) pos)
[] -> Empty (sysUnExpectError "" pos)
)
Just nextState
-> Parser (\state@(State input pos user) ->
case input of
(c:cs) -> case test c of
Just x -> let newpos = nextpos pos c cs
newuser = nextState pos c cs user
newstate = State cs newpos newuser
in seq newpos $ seq newstate $
Consumed (Ok x newstate (newErrorUnknown newpos))
Nothing -> Empty (sysUnExpectError (show c) pos)
[] -> Empty (sysUnExpectError "" pos)
)
label :: GenParser tok st a -> String -> GenParser tok st a
label p msg
= labels p [msg]
labels :: GenParser tok st a -> [String] -> GenParser tok st a
labels (Parser p) msgs
= Parser (\state ->
case (p state) of
Empty reply -> Empty $
case (reply) of
Error err -> Error (setExpectErrors err msgs)
Ok x state1 err | errorIsUnknown err -> reply
| otherwise -> Ok x state1 (setExpectErrors err msgs)
other -> other
)
updateParserState :: (State tok st -> State tok st) -> GenParser tok st (State tok st)
updateParserState f
= Parser (\state -> let newstate = f state
in Empty (Ok state newstate (unknownError newstate)))
unexpected :: String -> GenParser tok st a
unexpected msg
= Parser (\state -> Empty (Error (newErrorMessage (UnExpect msg) (statePos state))))
setExpectErrors err [] = setErrorMessage (Expect "") err
setExpectErrors err [msg] = setErrorMessage (Expect msg) err
setExpectErrors err (msg:msgs) = foldr (\msg err -> addErrorMessage (Expect msg) err)
(setErrorMessage (Expect msg) err) msgs
sysUnExpectError msg pos = Error (newErrorMessage (SysUnExpect msg) pos)
unknownError state = newErrorUnknown (statePos state)
-----------------------------------------------------------
-- Parsers unfolded for space:
-- if many and skipMany are not defined as primitives,
-- they will overflow the stack on large inputs
-----------------------------------------------------------
many :: GenParser tok st a -> GenParser tok st [a]
many p
= do{ xs <- manyAccum (:) p
; return (reverse xs)
}
skipMany :: GenParser tok st a -> GenParser tok st ()
skipMany p
= do{ manyAccum (\x xs -> []) p
; return ()
}
manyAccum :: (a -> [a] -> [a]) -> GenParser tok st a -> GenParser tok st [a]
manyAccum accum (Parser p)
= Parser (\state ->
let walk xs state r = case r of
Empty (Error err) -> Ok xs state err
Empty ok -> error "Text.ParserCombinators.Parsec.Prim.many: combinator 'many' is applied to a parser that accepts an empty string."
Consumed (Error err) -> Error err
Consumed (Ok x state' err) -> let ys = accum x xs
in seq ys (walk ys state' (p state'))
in case (p state) of
Empty reply -> case reply of
Ok x state' err -> error "Text.ParserCombinators.Parsec.Prim.many: combinator 'many' is applied to a parser that accepts an empty string."
Error err -> Empty (Ok [] state err)
consumed -> Consumed $ walk [] state consumed)
-----------------------------------------------------------
-- Parsers unfolded for speed:
-- tokens
-----------------------------------------------------------
{- specification of @tokens@:
tokens showss nextposs s
= scan s
where
scan [] = return s
scan (c:cs) = do{ token show nextpos c <?> shows s; scan cs }
show c = shows [c]
nextpos pos c = nextposs pos [c]
-}
tokens :: Eq tok => ([tok] -> String) -> (SourcePos -> [tok] -> SourcePos) -> [tok] -> GenParser tok st [tok]
tokens shows nextposs s
= Parser (\state@(State input pos user) ->
let
ok cs = let newpos = nextposs pos s
newstate = State cs newpos user
in seq newpos $ seq newstate $
(Ok s newstate (newErrorUnknown newpos))
errEof = Error (setErrorMessage (Expect (shows s))
(newErrorMessage (SysUnExpect "") pos))
errExpect c = Error (setErrorMessage (Expect (shows s))
(newErrorMessage (SysUnExpect (shows [c])) pos))
walk [] cs = ok cs
walk xs [] = errEof
walk (x:xs) (c:cs)| x == c = walk xs cs
| otherwise = errExpect c
walk1 [] cs = Empty (ok cs)
walk1 xs [] = Empty (errEof)
walk1 (x:xs) (c:cs)| x == c = Consumed (walk xs cs)
| otherwise = Empty (errExpect c)
in walk1 s input)
|
OS2World/DEV-UTIL-HUGS
|
libraries/Text/ParserCombinators/Parsec/Prim.hs
|
Haskell
|
bsd-3-clause
| 17,579
|
--Basically a brute force attempt. For larger numbers, say where a+b+c=10000, this will take a long long time. At 1000 though, it takes a minute or so
triplet n = [(x,y,z) | x <- [1..(n-1)], y <- [1..(n-x)], z <- [1..(n-x-y)], x+y+z == n, z>y, z>x, y>x, x^2+y^2==z^2]
problem9 = triplet 1000
|
thomas-oo/projectEulerHaskell
|
src/Problem9.hs
|
Haskell
|
bsd-3-clause
| 293
|
{-# LANGUAGE GeneralizedNewtypeDeriving, DeriveGeneric #-}
module MidiRhythm.NotePress (
Duration(..),
Press(..),
NotePress(..),
Velocity(..),
Pitch(..),
ElapsedTime(..),
PressCount(..),
) where
import qualified Numeric.NonNegative.Wrapper as NonNeg
import GHC.Generics
newtype ElapsedTime = ElapsedTime NonNeg.Integer
deriving (Show, Eq, Ord, Num, Integral, Real, Enum)
newtype Velocity = Velocity NonNeg.Int
deriving (Show, Eq, Ord, Num, Integral, Real, Enum)
type Duration = ElapsedTime
newtype Pitch = Pitch NonNeg.Int
deriving (Show, Eq, Ord, Num, Integral, Real, Enum)
newtype PressCount = PressCount NonNeg.Int
deriving (Show, Eq, Ord, Num, Integral, Real, Enum)
data Press = Press ElapsedTime Velocity Duration deriving (Show, Eq, Ord)
data NotePress = NotePress {
notePressTime :: ElapsedTime,
notePressVelocity :: Velocity,
notePressDuration :: Duration,
notePressPitch :: Pitch
} deriving (Show, Eq, Ord, Generic)
|
a10nik/midiRhythm
|
src/MidiRhythm/NotePress.hs
|
Haskell
|
bsd-3-clause
| 979
|
{-# LANGUAGE StandaloneDeriving
#-}
import Data.List (concatMap, nub)
import Data.Ratio
import Data.ByteString.Char8 (pack)
import Test.QuickCheck
import Data.Trie
import qualified Data.ByteString.UTF8 as UTF8
import qualified Text.JSONb as JSONb
prop_structures_parse = samples structure_tests
samples tests = forAll (elements tests) with_classifiers
where
with_classifiers :: (String, JSONb.JSON, [String]) -> Property
with_classifiers = compound (property . array_parse) classifiers
where
compound = foldl (flip ($))
array_parse (s, j, info) = rt s == Right j
classifiers = (fmap classifier . nub . concatMap third) tests
where
third (_,_,t) = t
classifier string p x = classify (string `elem` third x) string $ p x
prop_integer_round_trip :: Integer -> Property
prop_integer_round_trip n = collect bin $ case (rt . show) n of
Right (JSONb.Number r) -> r == fromIntegral n
_ -> False
where
bin
| n == 0 = Bounds (Open (-1)) (Open 1)
| n >= 1 && n < 100 = Bounds (Closed 1) (Open 100)
| n > -100 && n <= -1 = Bounds (Open (-100)) (Closed (-1))
| n <= -100 = Bounds Infinite (Closed (-100))
| n >= -100 = Bounds (Closed (100)) Infinite
prop_double_round_trip :: Double -> Property
prop_double_round_trip n = collect bin $ case (rt . show) n of
Right (JSONb.Number r) -> fromRational r == n
_ -> False
where
bin
| n < 1 && n > -1 = Bounds (Open (-1)) (Open 1)
| n >= 1 && n < 100 = Bounds (Closed 1) (Open 100)
| n > -100 && n <= -1 = Bounds (Open (-100)) (Closed (-1))
| n <= -100 = Bounds Infinite (Closed (-100))
| n >= -100 = Bounds (Closed (100)) Infinite
prop_string_round_trip s = high . escapes $ case round_trip bytes of
Right (JSONb.String b) -> bytes == b
_ -> False
where
bytes = UTF8.fromString s
round_trip = JSONb.decode . JSONb.encode JSONb.Compact . JSONb.String
high = classify (any (> '\x7f') s) "above ASCII"
escapes = classify (any JSONb.escaped s) "escaped chars"
data Bounds n where
Bounds :: (Show n, Num n) => Bound n -> Bound n -> Bounds n
instance (Show n) => Show (Bounds n) where
show (Bounds l r) = case (l, r) of
(Open l, Open r) -> "(" ++ show l ++ ".." ++ show r ++ ")"
(Closed l, Closed r) -> "[" ++ show l ++ ".." ++ show r ++ "]"
(Closed l, Open r) -> "[" ++ show l ++ ".." ++ show r ++ ")"
(Open l, Closed r) -> "(" ++ show l ++ ".." ++ show r ++ "]"
(Closed l, Infinite) -> "[" ++ show l ++ ".."
(Infinite, Closed r) -> ".." ++ show r ++ "]"
(Open l, Infinite) -> "(" ++ show l ++ ".."
(Infinite, Open r) -> ".." ++ show r ++ ")"
(Infinite, Infinite) -> ".."
data Bound n where
Open :: (Show n, Num n) => n -> Bound n
Closed :: (Show n, Num n) => n -> Bound n
Infinite :: (Show n, Num n) => Bound n
rt = JSONb.decode . pack . (++ " ")
{- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
We have to add a space so that the number parser terminates.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -}
structure_tests =
[ ( "[ 7, 6 ]", (JSONb.Array . fmap JSONb.Number) [7, 6]
, ["array", "excessive spacing", "integers"] )
, ( "[]", JSONb.Array []
, ["array", "compact spacing", "empty"] )
, ( "[ ]", JSONb.Array []
, ["array", "normal spacing", "empty"] )
, ( "[7,6]", (JSONb.Array . fmap JSONb.Number) [7, 6]
, ["array", "compact spacing", "integers"] )
, ( "[7.6, 21]", (JSONb.Array . fmap JSONb.Number) [7.6, 21.0]
, ["array", "normal spacing", "floats"] )
, ( "[22.0 ,7.6,]", (JSONb.Array . fmap JSONb.Number) [22, 7.6]
, ["array", "weird comma spacing", "extra comma", "floats"] )
, ( "[\"22.0\" ,7.6,]"
, JSONb.Array [(JSONb.String . pack) "22.0", JSONb.Number 7.6]
, ["array", "weird comma spacing", "extra comma", "floats", "strings"] )
, ( "{ \"ixion\":6 }"
, (JSONb.Object . fromList) [(pack "ixion", JSONb.Number 6)]
, ["object", "no commas", "integers"] )
, ( "{ \"Ack\":\"Success\" ,\"Build\" :\"e605_core_Bundled_8000231_R1\"}"
, (JSONb.Object . fromList)
[ (pack "Ack", JSONb.String (pack "Success"))
, ( pack "Build"
, JSONb.String (pack "e605_core_Bundled_8000231_R1") ) ]
, ["object", "random spacing", "strings"] )
, ( "{\n\"Ack\"\n:\n\"Success\" , \"Build\":\"e605_core_Bundled_8000231_R1\"}"
, (JSONb.Object . fromList)
[ (pack "Ack", JSONb.String (pack "Success"))
, ( pack "Build"
, JSONb.String (pack "e605_core_Bundled_8000231_R1") ) ]
, ["object", "newlines", "strings"] )
, ( "{}", JSONb.Object empty
, ["object", "compact spacing", "empty"] )
, ( "{ }", JSONb.Object empty
, ["object", "normal spacing", "empty"] )
, ( "{\"Ack\":\"Success\",\"Build\":\"e605_core_Bundled_8000231_R1\"}"
, (JSONb.Object . fromList)
[ (pack "Ack", JSONb.String (pack "Success"))
, ( pack "Build"
, JSONb.String (pack "e605_core_Bundled_8000231_R1") ) ]
, ["object", "compact spacing", "strings"] )
]
|
solidsnack/JSONb
|
test/SimpleUnits.hs
|
Haskell
|
bsd-3-clause
| 5,575
|
{-
(c) The AQUA Project, Glasgow University, 1993-1998
\section[Simplify]{The main module of the simplifier}
-}
{-# LANGUAGE CPP #-}
module Simplify ( simplTopBinds, simplExpr ) where
#include "HsVersions.h"
import DynFlags
import SimplMonad
import Type hiding ( substTy, extendTvSubst, substTyVar )
import SimplEnv
import SimplUtils
import FamInstEnv ( FamInstEnv )
import Literal ( litIsLifted ) --, mkMachInt ) -- temporalily commented out. See #8326
import Id
import MkId ( seqId, voidPrimId )
import MkCore ( mkImpossibleExpr, castBottomExpr )
import IdInfo
import Name ( mkSystemVarName, isExternalName )
import Coercion hiding ( substCo, substTy, substCoVar, extendTvSubst )
import OptCoercion ( optCoercion )
import FamInstEnv ( topNormaliseType_maybe )
import DataCon ( DataCon, dataConWorkId, dataConRepStrictness
, isMarkedStrict ) --, dataConTyCon, dataConTag, fIRST_TAG )
--import TyCon ( isEnumerationTyCon ) -- temporalily commented out. See #8326
import CoreMonad ( Tick(..), SimplifierMode(..) )
import CoreSyn
import Demand ( StrictSig(..), dmdTypeDepth, isStrictDmd )
import PprCore ( pprCoreExpr )
import CoreUnfold
import CoreUtils
import CoreArity
--import PrimOp ( tagToEnumKey ) -- temporalily commented out. See #8326
import Rules ( lookupRule, getRules )
import TysPrim ( voidPrimTy ) --, intPrimTy ) -- temporalily commented out. See #8326
import BasicTypes ( TopLevelFlag(..), isTopLevel, RecFlag(..) )
import MonadUtils ( foldlM, mapAccumLM, liftIO )
import Maybes ( orElse )
--import Unique ( hasKey ) -- temporalily commented out. See #8326
import Control.Monad
import Data.List ( mapAccumL )
import Outputable
import FastString
import Pair
import Util
import ErrUtils
{-
The guts of the simplifier is in this module, but the driver loop for
the simplifier is in SimplCore.hs.
-----------------------------------------
*** IMPORTANT NOTE ***
-----------------------------------------
The simplifier used to guarantee that the output had no shadowing, but
it does not do so any more. (Actually, it never did!) The reason is
documented with simplifyArgs.
-----------------------------------------
*** IMPORTANT NOTE ***
-----------------------------------------
Many parts of the simplifier return a bunch of "floats" as well as an
expression. This is wrapped as a datatype SimplUtils.FloatsWith.
All "floats" are let-binds, not case-binds, but some non-rec lets may
be unlifted (with RHS ok-for-speculation).
-----------------------------------------
ORGANISATION OF FUNCTIONS
-----------------------------------------
simplTopBinds
- simplify all top-level binders
- for NonRec, call simplRecOrTopPair
- for Rec, call simplRecBind
------------------------------
simplExpr (applied lambda) ==> simplNonRecBind
simplExpr (Let (NonRec ...) ..) ==> simplNonRecBind
simplExpr (Let (Rec ...) ..) ==> simplify binders; simplRecBind
------------------------------
simplRecBind [binders already simplfied]
- use simplRecOrTopPair on each pair in turn
simplRecOrTopPair [binder already simplified]
Used for: recursive bindings (top level and nested)
top-level non-recursive bindings
Returns:
- check for PreInlineUnconditionally
- simplLazyBind
simplNonRecBind
Used for: non-top-level non-recursive bindings
beta reductions (which amount to the same thing)
Because it can deal with strict arts, it takes a
"thing-inside" and returns an expression
- check for PreInlineUnconditionally
- simplify binder, including its IdInfo
- if strict binding
simplStrictArg
mkAtomicArgs
completeNonRecX
else
simplLazyBind
addFloats
simplNonRecX: [given a *simplified* RHS, but an *unsimplified* binder]
Used for: binding case-binder and constr args in a known-constructor case
- check for PreInLineUnconditionally
- simplify binder
- completeNonRecX
------------------------------
simplLazyBind: [binder already simplified, RHS not]
Used for: recursive bindings (top level and nested)
top-level non-recursive bindings
non-top-level, but *lazy* non-recursive bindings
[must not be strict or unboxed]
Returns floats + an augmented environment, not an expression
- substituteIdInfo and add result to in-scope
[so that rules are available in rec rhs]
- simplify rhs
- mkAtomicArgs
- float if exposes constructor or PAP
- completeBind
completeNonRecX: [binder and rhs both simplified]
- if the the thing needs case binding (unlifted and not ok-for-spec)
build a Case
else
completeBind
addFloats
completeBind: [given a simplified RHS]
[used for both rec and non-rec bindings, top level and not]
- try PostInlineUnconditionally
- add unfolding [this is the only place we add an unfolding]
- add arity
Right hand sides and arguments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In many ways we want to treat
(a) the right hand side of a let(rec), and
(b) a function argument
in the same way. But not always! In particular, we would
like to leave these arguments exactly as they are, so they
will match a RULE more easily.
f (g x, h x)
g (+ x)
It's harder to make the rule match if we ANF-ise the constructor,
or eta-expand the PAP:
f (let { a = g x; b = h x } in (a,b))
g (\y. + x y)
On the other hand if we see the let-defns
p = (g x, h x)
q = + x
then we *do* want to ANF-ise and eta-expand, so that p and q
can be safely inlined.
Even floating lets out is a bit dubious. For let RHS's we float lets
out if that exposes a value, so that the value can be inlined more vigorously.
For example
r = let x = e in (x,x)
Here, if we float the let out we'll expose a nice constructor. We did experiments
that showed this to be a generally good thing. But it was a bad thing to float
lets out unconditionally, because that meant they got allocated more often.
For function arguments, there's less reason to expose a constructor (it won't
get inlined). Just possibly it might make a rule match, but I'm pretty skeptical.
So for the moment we don't float lets out of function arguments either.
Eta expansion
~~~~~~~~~~~~~~
For eta expansion, we want to catch things like
case e of (a,b) -> \x -> case a of (p,q) -> \y -> r
If the \x was on the RHS of a let, we'd eta expand to bring the two
lambdas together. And in general that's a good thing to do. Perhaps
we should eta expand wherever we find a (value) lambda? Then the eta
expansion at a let RHS can concentrate solely on the PAP case.
************************************************************************
* *
\subsection{Bindings}
* *
************************************************************************
-}
simplTopBinds :: SimplEnv -> [InBind] -> SimplM SimplEnv
simplTopBinds env0 binds0
= do { -- Put all the top-level binders into scope at the start
-- so that if a transformation rule has unexpectedly brought
-- anything into scope, then we don't get a complaint about that.
-- It's rather as if the top-level binders were imported.
-- See note [Glomming] in OccurAnal.
; env1 <- simplRecBndrs env0 (bindersOfBinds binds0)
; env2 <- simpl_binds env1 binds0
; freeTick SimplifierDone
; return env2 }
where
-- We need to track the zapped top-level binders, because
-- they should have their fragile IdInfo zapped (notably occurrence info)
-- That's why we run down binds and bndrs' simultaneously.
--
simpl_binds :: SimplEnv -> [InBind] -> SimplM SimplEnv
simpl_binds env [] = return env
simpl_binds env (bind:binds) = do { env' <- simpl_bind env bind
; simpl_binds env' binds }
simpl_bind env (Rec pairs) = simplRecBind env TopLevel pairs
simpl_bind env (NonRec b r) = simplRecOrTopPair env' TopLevel NonRecursive b b' r
where
(env', b') = addBndrRules env b (lookupRecBndr env b)
{-
************************************************************************
* *
\subsection{Lazy bindings}
* *
************************************************************************
simplRecBind is used for
* recursive bindings only
-}
simplRecBind :: SimplEnv -> TopLevelFlag
-> [(InId, InExpr)]
-> SimplM SimplEnv
simplRecBind env0 top_lvl pairs0
= do { let (env_with_info, triples) = mapAccumL add_rules env0 pairs0
; env1 <- go (zapFloats env_with_info) triples
; return (env0 `addRecFloats` env1) }
-- addFloats adds the floats from env1,
-- _and_ updates env0 with the in-scope set from env1
where
add_rules :: SimplEnv -> (InBndr,InExpr) -> (SimplEnv, (InBndr, OutBndr, InExpr))
-- Add the (substituted) rules to the binder
add_rules env (bndr, rhs) = (env', (bndr, bndr', rhs))
where
(env', bndr') = addBndrRules env bndr (lookupRecBndr env bndr)
go env [] = return env
go env ((old_bndr, new_bndr, rhs) : pairs)
= do { env' <- simplRecOrTopPair env top_lvl Recursive old_bndr new_bndr rhs
; go env' pairs }
{-
simplOrTopPair is used for
* recursive bindings (whether top level or not)
* top-level non-recursive bindings
It assumes the binder has already been simplified, but not its IdInfo.
-}
simplRecOrTopPair :: SimplEnv
-> TopLevelFlag -> RecFlag
-> InId -> OutBndr -> InExpr -- Binder and rhs
-> SimplM SimplEnv -- Returns an env that includes the binding
simplRecOrTopPair env top_lvl is_rec old_bndr new_bndr rhs
= do { dflags <- getDynFlags
; trace_bind dflags $
if preInlineUnconditionally dflags env top_lvl old_bndr rhs
-- Check for unconditional inline
then do tick (PreInlineUnconditionally old_bndr)
return (extendIdSubst env old_bndr (mkContEx env rhs))
else simplLazyBind env top_lvl is_rec old_bndr new_bndr rhs env }
where
trace_bind dflags thing_inside
| not (dopt Opt_D_verbose_core2core dflags)
= thing_inside
| otherwise
= pprTrace "SimplBind" (ppr old_bndr) thing_inside
-- trace_bind emits a trace for each top-level binding, which
-- helps to locate the tracing for inlining and rule firing
{-
simplLazyBind is used for
* [simplRecOrTopPair] recursive bindings (whether top level or not)
* [simplRecOrTopPair] top-level non-recursive bindings
* [simplNonRecE] non-top-level *lazy* non-recursive bindings
Nota bene:
1. It assumes that the binder is *already* simplified,
and is in scope, and its IdInfo too, except unfolding
2. It assumes that the binder type is lifted.
3. It does not check for pre-inline-unconditionally;
that should have been done already.
-}
simplLazyBind :: SimplEnv
-> TopLevelFlag -> RecFlag
-> InId -> OutId -- Binder, both pre-and post simpl
-- The OutId has IdInfo, except arity, unfolding
-> InExpr -> SimplEnv -- The RHS and its environment
-> SimplM SimplEnv
-- Precondition: rhs obeys the let/app invariant
simplLazyBind env top_lvl is_rec bndr bndr1 rhs rhs_se
= -- pprTrace "simplLazyBind" ((ppr bndr <+> ppr bndr1) $$ ppr rhs $$ ppr (seIdSubst rhs_se)) $
do { let rhs_env = rhs_se `setInScope` env
(tvs, body) = case collectTyBinders rhs of
(tvs, body) | not_lam body -> (tvs,body)
| otherwise -> ([], rhs)
not_lam (Lam _ _) = False
not_lam (Tick t e) | not (tickishFloatable t)
= not_lam e -- eta-reduction could float
not_lam _ = True
-- Do not do the "abstract tyyvar" thing if there's
-- a lambda inside, because it defeats eta-reduction
-- f = /\a. \x. g a x
-- should eta-reduce.
; (body_env, tvs') <- simplBinders rhs_env tvs
-- See Note [Floating and type abstraction] in SimplUtils
-- Simplify the RHS
; let rhs_cont = mkRhsStop (substTy body_env (exprType body))
; (body_env1, body1) <- simplExprF body_env body rhs_cont
-- ANF-ise a constructor or PAP rhs
; (body_env2, body2) <- prepareRhs top_lvl body_env1 bndr1 body1
; (env', rhs')
<- if not (doFloatFromRhs top_lvl is_rec False body2 body_env2)
then -- No floating, revert to body1
do { rhs' <- mkLam tvs' (wrapFloats body_env1 body1) rhs_cont
; return (env, rhs') }
else if null tvs then -- Simple floating
do { tick LetFloatFromLet
; return (addFloats env body_env2, body2) }
else -- Do type-abstraction first
do { tick LetFloatFromLet
; (poly_binds, body3) <- abstractFloats tvs' body_env2 body2
; rhs' <- mkLam tvs' body3 rhs_cont
; env' <- foldlM (addPolyBind top_lvl) env poly_binds
; return (env', rhs') }
; completeBind env' top_lvl bndr bndr1 rhs' }
{-
A specialised variant of simplNonRec used when the RHS is already simplified,
notably in knownCon. It uses case-binding where necessary.
-}
simplNonRecX :: SimplEnv
-> InId -- Old binder
-> OutExpr -- Simplified RHS
-> SimplM SimplEnv
-- Precondition: rhs satisfies the let/app invariant
simplNonRecX env bndr new_rhs
| isDeadBinder bndr -- Not uncommon; e.g. case (a,b) of c { (p,q) -> p }
= return env -- Here c is dead, and we avoid creating
-- the binding c = (a,b)
| Coercion co <- new_rhs
= return (extendCvSubst env bndr co)
| otherwise
= do { (env', bndr') <- simplBinder env bndr
; completeNonRecX NotTopLevel env' (isStrictId bndr) bndr bndr' new_rhs }
-- simplNonRecX is only used for NotTopLevel things
completeNonRecX :: TopLevelFlag -> SimplEnv
-> Bool
-> InId -- Old binder
-> OutId -- New binder
-> OutExpr -- Simplified RHS
-> SimplM SimplEnv
-- Precondition: rhs satisfies the let/app invariant
-- See Note [CoreSyn let/app invariant] in CoreSyn
completeNonRecX top_lvl env is_strict old_bndr new_bndr new_rhs
= do { (env1, rhs1) <- prepareRhs top_lvl (zapFloats env) new_bndr new_rhs
; (env2, rhs2) <-
if doFloatFromRhs NotTopLevel NonRecursive is_strict rhs1 env1
then do { tick LetFloatFromLet
; return (addFloats env env1, rhs1) } -- Add the floats to the main env
else return (env, wrapFloats env1 rhs1) -- Wrap the floats around the RHS
; completeBind env2 NotTopLevel old_bndr new_bndr rhs2 }
{-
{- No, no, no! Do not try preInlineUnconditionally in completeNonRecX
Doing so risks exponential behaviour, because new_rhs has been simplified once already
In the cases described by the folowing commment, postInlineUnconditionally will
catch many of the relevant cases.
-- This happens; for example, the case_bndr during case of
-- known constructor: case (a,b) of x { (p,q) -> ... }
-- Here x isn't mentioned in the RHS, so we don't want to
-- create the (dead) let-binding let x = (a,b) in ...
--
-- Similarly, single occurrences can be inlined vigourously
-- e.g. case (f x, g y) of (a,b) -> ....
-- If a,b occur once we can avoid constructing the let binding for them.
Furthermore in the case-binding case preInlineUnconditionally risks extra thunks
-- Consider case I# (quotInt# x y) of
-- I# v -> let w = J# v in ...
-- If we gaily inline (quotInt# x y) for v, we end up building an
-- extra thunk:
-- let w = J# (quotInt# x y) in ...
-- because quotInt# can fail.
| preInlineUnconditionally env NotTopLevel bndr new_rhs
= thing_inside (extendIdSubst env bndr (DoneEx new_rhs))
-}
----------------------------------
prepareRhs takes a putative RHS, checks whether it's a PAP or
constructor application and, if so, converts it to ANF, so that the
resulting thing can be inlined more easily. Thus
x = (f a, g b)
becomes
t1 = f a
t2 = g b
x = (t1,t2)
We also want to deal well cases like this
v = (f e1 `cast` co) e2
Here we want to make e1,e2 trivial and get
x1 = e1; x2 = e2; v = (f x1 `cast` co) v2
That's what the 'go' loop in prepareRhs does
-}
prepareRhs :: TopLevelFlag -> SimplEnv -> OutId -> OutExpr -> SimplM (SimplEnv, OutExpr)
-- Adds new floats to the env iff that allows us to return a good RHS
prepareRhs top_lvl env id (Cast rhs co) -- Note [Float coercions]
| Pair ty1 _ty2 <- coercionKind co -- Do *not* do this if rhs has an unlifted type
, not (isUnLiftedType ty1) -- see Note [Float coercions (unlifted)]
= do { (env', rhs') <- makeTrivialWithInfo top_lvl env sanitised_info rhs
; return (env', Cast rhs' co) }
where
sanitised_info = vanillaIdInfo `setStrictnessInfo` strictnessInfo info
`setDemandInfo` demandInfo info
info = idInfo id
prepareRhs top_lvl env0 _ rhs0
= do { (_is_exp, env1, rhs1) <- go 0 env0 rhs0
; return (env1, rhs1) }
where
go n_val_args env (Cast rhs co)
= do { (is_exp, env', rhs') <- go n_val_args env rhs
; return (is_exp, env', Cast rhs' co) }
go n_val_args env (App fun (Type ty))
= do { (is_exp, env', rhs') <- go n_val_args env fun
; return (is_exp, env', App rhs' (Type ty)) }
go n_val_args env (App fun arg)
= do { (is_exp, env', fun') <- go (n_val_args+1) env fun
; case is_exp of
True -> do { (env'', arg') <- makeTrivial top_lvl env' arg
; return (True, env'', App fun' arg') }
False -> return (False, env, App fun arg) }
go n_val_args env (Var fun)
= return (is_exp, env, Var fun)
where
is_exp = isExpandableApp fun n_val_args -- The fun a constructor or PAP
-- See Note [CONLIKE pragma] in BasicTypes
-- The definition of is_exp should match that in
-- OccurAnal.occAnalApp
go n_val_args env (Tick t rhs)
-- We want to be able to float bindings past this
-- tick. Non-scoping ticks don't care.
| tickishScoped t == NoScope
= do { (is_exp, env', rhs') <- go n_val_args env rhs
; return (is_exp, env', Tick t rhs') }
-- On the other hand, for scoping ticks we need to be able to
-- copy them on the floats, which in turn is only allowed if
-- we can obtain non-counting ticks.
| not (tickishCounts t) || tickishCanSplit t
= do { (is_exp, env', rhs') <- go n_val_args (zapFloats env) rhs
; let tickIt (id, expr) = (id, mkTick (mkNoCount t) expr)
floats' = seFloats $ env `addFloats` mapFloats env' tickIt
; return (is_exp, env' { seFloats = floats' }, Tick t rhs') }
go _ env other
= return (False, env, other)
{-
Note [Float coercions]
~~~~~~~~~~~~~~~~~~~~~~
When we find the binding
x = e `cast` co
we'd like to transform it to
x' = e
x = x `cast` co -- A trivial binding
There's a chance that e will be a constructor application or function, or something
like that, so moving the coerion to the usage site may well cancel the coersions
and lead to further optimisation. Example:
data family T a :: *
data instance T Int = T Int
foo :: Int -> Int -> Int
foo m n = ...
where
x = T m
go 0 = 0
go n = case x of { T m -> go (n-m) }
-- This case should optimise
Note [Preserve strictness when floating coercions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the Note [Float coercions] transformation, keep the strictness info.
Eg
f = e `cast` co -- f has strictness SSL
When we transform to
f' = e -- f' also has strictness SSL
f = f' `cast` co -- f still has strictness SSL
Its not wrong to drop it on the floor, but better to keep it.
Note [Float coercions (unlifted)]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BUT don't do [Float coercions] if 'e' has an unlifted type.
This *can* happen:
foo :: Int = (error (# Int,Int #) "urk")
`cast` CoUnsafe (# Int,Int #) Int
If do the makeTrivial thing to the error call, we'll get
foo = case error (# Int,Int #) "urk" of v -> v `cast` ...
But 'v' isn't in scope!
These strange casts can happen as a result of case-of-case
bar = case (case x of { T -> (# 2,3 #); F -> error "urk" }) of
(# p,q #) -> p+q
-}
makeTrivialArg :: SimplEnv -> ArgSpec -> SimplM (SimplEnv, ArgSpec)
makeTrivialArg env (ValArg e) = do { (env', e') <- makeTrivial NotTopLevel env e
; return (env', ValArg e') }
makeTrivialArg env arg = return (env, arg) -- CastBy, TyArg
makeTrivial :: TopLevelFlag -> SimplEnv -> OutExpr -> SimplM (SimplEnv, OutExpr)
-- Binds the expression to a variable, if it's not trivial, returning the variable
makeTrivial top_lvl env expr = makeTrivialWithInfo top_lvl env vanillaIdInfo expr
makeTrivialWithInfo :: TopLevelFlag -> SimplEnv -> IdInfo
-> OutExpr -> SimplM (SimplEnv, OutExpr)
-- Propagate strictness and demand info to the new binder
-- Note [Preserve strictness when floating coercions]
-- Returned SimplEnv has same substitution as incoming one
makeTrivialWithInfo top_lvl env info expr
| exprIsTrivial expr -- Already trivial
|| not (bindingOk top_lvl expr expr_ty) -- Cannot trivialise
-- See Note [Cannot trivialise]
= return (env, expr)
| otherwise -- See Note [Take care] below
= do { uniq <- getUniqueM
; let name = mkSystemVarName uniq (fsLit "a")
var = mkLocalIdWithInfo name expr_ty info
; env' <- completeNonRecX top_lvl env False var var expr
; expr' <- simplVar env' var
; return (env', expr') }
-- The simplVar is needed becase we're constructing a new binding
-- a = rhs
-- And if rhs is of form (rhs1 |> co), then we might get
-- a1 = rhs1
-- a = a1 |> co
-- and now a's RHS is trivial and can be substituted out, and that
-- is what completeNonRecX will do
-- To put it another way, it's as if we'd simplified
-- let var = e in var
where
expr_ty = exprType expr
bindingOk :: TopLevelFlag -> CoreExpr -> Type -> Bool
-- True iff we can have a binding of this expression at this level
-- Precondition: the type is the type of the expression
bindingOk top_lvl _ expr_ty
| isTopLevel top_lvl = not (isUnLiftedType expr_ty)
| otherwise = True
{-
Note [Cannot trivialise]
~~~~~~~~~~~~~~~~~~~~~~~~
Consider tih
f :: Int -> Addr#
foo :: Bar
foo = Bar (f 3)
Then we can't ANF-ise foo, even though we'd like to, because
we can't make a top-level binding for the Addr# (f 3). And if
so we don't want to turn it into
foo = let x = f 3 in Bar x
because we'll just end up inlining x back, and that makes the
simplifier loop. Better not to ANF-ise it at all.
A case in point is literal strings (a MachStr is not regarded as
trivial):
foo = Ptr "blob"#
We don't want to ANF-ise this.
************************************************************************
* *
\subsection{Completing a lazy binding}
* *
************************************************************************
completeBind
* deals only with Ids, not TyVars
* takes an already-simplified binder and RHS
* is used for both recursive and non-recursive bindings
* is used for both top-level and non-top-level bindings
It does the following:
- tries discarding a dead binding
- tries PostInlineUnconditionally
- add unfolding [this is the only place we add an unfolding]
- add arity
It does *not* attempt to do let-to-case. Why? Because it is used for
- top-level bindings (when let-to-case is impossible)
- many situations where the "rhs" is known to be a WHNF
(so let-to-case is inappropriate).
Nor does it do the atomic-argument thing
-}
completeBind :: SimplEnv
-> TopLevelFlag -- Flag stuck into unfolding
-> InId -- Old binder
-> OutId -> OutExpr -- New binder and RHS
-> SimplM SimplEnv
-- completeBind may choose to do its work
-- * by extending the substitution (e.g. let x = y in ...)
-- * or by adding to the floats in the envt
--
-- Precondition: rhs obeys the let/app invariant
completeBind env top_lvl old_bndr new_bndr new_rhs
| isCoVar old_bndr
= case new_rhs of
Coercion co -> return (extendCvSubst env old_bndr co)
_ -> return (addNonRec env new_bndr new_rhs)
| otherwise
= ASSERT( isId new_bndr )
do { let old_info = idInfo old_bndr
old_unf = unfoldingInfo old_info
occ_info = occInfo old_info
-- Do eta-expansion on the RHS of the binding
-- See Note [Eta-expanding at let bindings] in SimplUtils
; (new_arity, final_rhs) <- tryEtaExpandRhs env new_bndr new_rhs
-- Simplify the unfolding
; new_unfolding <- simplUnfolding env top_lvl old_bndr final_rhs old_unf
; dflags <- getDynFlags
; if postInlineUnconditionally dflags env top_lvl new_bndr occ_info
final_rhs new_unfolding
-- Inline and discard the binding
then do { tick (PostInlineUnconditionally old_bndr)
; return (extendIdSubst env old_bndr (DoneEx final_rhs)) }
-- Use the substitution to make quite, quite sure that the
-- substitution will happen, since we are going to discard the binding
else
do { let info1 = idInfo new_bndr `setArityInfo` new_arity
-- Unfolding info: Note [Setting the new unfolding]
info2 = info1 `setUnfoldingInfo` new_unfolding
-- Demand info: Note [Setting the demand info]
--
-- We also have to nuke demand info if for some reason
-- eta-expansion *reduces* the arity of the binding to less
-- than that of the strictness sig. This can happen: see Note [Arity decrease].
info3 | isEvaldUnfolding new_unfolding
|| (case strictnessInfo info2 of
StrictSig dmd_ty -> new_arity < dmdTypeDepth dmd_ty)
= zapDemandInfo info2 `orElse` info2
| otherwise
= info2
final_id = new_bndr `setIdInfo` info3
; -- pprTrace "Binding" (ppr final_id <+> ppr new_unfolding) $
return (addNonRec env final_id final_rhs) } }
-- The addNonRec adds it to the in-scope set too
------------------------------
addPolyBind :: TopLevelFlag -> SimplEnv -> OutBind -> SimplM SimplEnv
-- Add a new binding to the environment, complete with its unfolding
-- but *do not* do postInlineUnconditionally, because we have already
-- processed some of the scope of the binding
-- We still want the unfolding though. Consider
-- let
-- x = /\a. let y = ... in Just y
-- in body
-- Then we float the y-binding out (via abstractFloats and addPolyBind)
-- but 'x' may well then be inlined in 'body' in which case we'd like the
-- opportunity to inline 'y' too.
--
-- INVARIANT: the arity is correct on the incoming binders
addPolyBind top_lvl env (NonRec poly_id rhs)
= do { unfolding <- simplUnfolding env top_lvl poly_id rhs noUnfolding
-- Assumes that poly_id did not have an INLINE prag
-- which is perhaps wrong. ToDo: think about this
; let final_id = setIdInfo poly_id $
idInfo poly_id `setUnfoldingInfo` unfolding
; return (addNonRec env final_id rhs) }
addPolyBind _ env bind@(Rec _)
= return (extendFloats env bind)
-- Hack: letrecs are more awkward, so we extend "by steam"
-- without adding unfoldings etc. At worst this leads to
-- more simplifier iterations
------------------------------
simplUnfolding :: SimplEnv-> TopLevelFlag
-> InId
-> OutExpr
-> Unfolding -> SimplM Unfolding
-- Note [Setting the new unfolding]
simplUnfolding env top_lvl id new_rhs unf
= case unf of
DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }
-> do { (env', bndrs') <- simplBinders rule_env bndrs
; args' <- mapM (simplExpr env') args
; return (mkDFunUnfolding bndrs' con args') }
CoreUnfolding { uf_tmpl = expr, uf_src = src, uf_guidance = guide }
| isStableSource src
-> do { expr' <- simplExpr rule_env expr
; case guide of
UnfWhen { ug_arity = arity, ug_unsat_ok = sat_ok } -- Happens for INLINE things
-> let guide' = UnfWhen { ug_arity = arity, ug_unsat_ok = sat_ok
, ug_boring_ok = inlineBoringOk expr' }
-- Refresh the boring-ok flag, in case expr'
-- has got small. This happens, notably in the inlinings
-- for dfuns for single-method classes; see
-- Note [Single-method classes] in TcInstDcls.
-- A test case is Trac #4138
in return (mkCoreUnfolding src is_top_lvl expr' guide')
-- See Note [Top-level flag on inline rules] in CoreUnfold
_other -- Happens for INLINABLE things
-> bottoming `seq` -- See Note [Force bottoming field]
do { dflags <- getDynFlags
; return (mkUnfolding dflags src is_top_lvl bottoming expr') } }
-- If the guidance is UnfIfGoodArgs, this is an INLINABLE
-- unfolding, and we need to make sure the guidance is kept up
-- to date with respect to any changes in the unfolding.
_other -> bottoming `seq` -- See Note [Force bottoming field]
do { dflags <- getDynFlags
; return (mkUnfolding dflags InlineRhs is_top_lvl bottoming new_rhs) }
-- We make an unfolding *even for loop-breakers*.
-- Reason: (a) It might be useful to know that they are WHNF
-- (b) In TidyPgm we currently assume that, if we want to
-- expose the unfolding then indeed we *have* an unfolding
-- to expose. (We could instead use the RHS, but currently
-- we don't.) The simple thing is always to have one.
where
bottoming = isBottomingId id
is_top_lvl = isTopLevel top_lvl
act = idInlineActivation id
rule_env = updMode (updModeForStableUnfoldings act) env
-- See Note [Simplifying inside stable unfoldings] in SimplUtils
{-
Note [Force bottoming field]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We need to force bottoming, or the new unfolding holds
on to the old unfolding (which is part of the id).
Note [Arity decrease]
~~~~~~~~~~~~~~~~~~~~~
Generally speaking the arity of a binding should not decrease. But it *can*
legitimately happen because of RULES. Eg
f = g Int
where g has arity 2, will have arity 2. But if there's a rewrite rule
g Int --> h
where h has arity 1, then f's arity will decrease. Here's a real-life example,
which is in the output of Specialise:
Rec {
$dm {Arity 2} = \d.\x. op d
{-# RULES forall d. $dm Int d = $s$dm #-}
dInt = MkD .... opInt ...
opInt {Arity 1} = $dm dInt
$s$dm {Arity 0} = \x. op dInt }
Here opInt has arity 1; but when we apply the rule its arity drops to 0.
That's why Specialise goes to a little trouble to pin the right arity
on specialised functions too.
Note [Setting the new unfolding]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* If there's an INLINE pragma, we simplify the RHS gently. Maybe we
should do nothing at all, but simplifying gently might get rid of
more crap.
* If not, we make an unfolding from the new RHS. But *only* for
non-loop-breakers. Making loop breakers not have an unfolding at all
means that we can avoid tests in exprIsConApp, for example. This is
important: if exprIsConApp says 'yes' for a recursive thing, then we
can get into an infinite loop
If there's an stable unfolding on a loop breaker (which happens for
INLINEABLE), we hang on to the inlining. It's pretty dodgy, but the
user did say 'INLINE'. May need to revisit this choice.
Note [Setting the demand info]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If the unfolding is a value, the demand info may
go pear-shaped, so we nuke it. Example:
let x = (a,b) in
case x of (p,q) -> h p q x
Here x is certainly demanded. But after we've nuked
the case, we'll get just
let x = (a,b) in h a b x
and now x is not demanded (I'm assuming h is lazy)
This really happens. Similarly
let f = \x -> e in ...f..f...
After inlining f at some of its call sites the original binding may
(for example) be no longer strictly demanded.
The solution here is a bit ad hoc...
************************************************************************
* *
\subsection[Simplify-simplExpr]{The main function: simplExpr}
* *
************************************************************************
The reason for this OutExprStuff stuff is that we want to float *after*
simplifying a RHS, not before. If we do so naively we get quadratic
behaviour as things float out.
To see why it's important to do it after, consider this (real) example:
let t = f x
in fst t
==>
let t = let a = e1
b = e2
in (a,b)
in fst t
==>
let a = e1
b = e2
t = (a,b)
in
a -- Can't inline a this round, cos it appears twice
==>
e1
Each of the ==> steps is a round of simplification. We'd save a
whole round if we float first. This can cascade. Consider
let f = g d
in \x -> ...f...
==>
let f = let d1 = ..d.. in \y -> e
in \x -> ...f...
==>
let d1 = ..d..
in \x -> ...(\y ->e)...
Only in this second round can the \y be applied, and it
might do the same again.
-}
simplExpr :: SimplEnv -> CoreExpr -> SimplM CoreExpr
simplExpr env expr = simplExprC env expr (mkBoringStop expr_out_ty)
where
expr_out_ty :: OutType
expr_out_ty = substTy env (exprType expr)
simplExprC :: SimplEnv -> CoreExpr -> SimplCont -> SimplM CoreExpr
-- Simplify an expression, given a continuation
simplExprC env expr cont
= -- pprTrace "simplExprC" (ppr expr $$ ppr cont {- $$ ppr (seIdSubst env) -} $$ ppr (seFloats env) ) $
do { (env', expr') <- simplExprF (zapFloats env) expr cont
; -- pprTrace "simplExprC ret" (ppr expr $$ ppr expr') $
-- pprTrace "simplExprC ret3" (ppr (seInScope env')) $
-- pprTrace "simplExprC ret4" (ppr (seFloats env')) $
return (wrapFloats env' expr') }
--------------------------------------------------
simplExprF :: SimplEnv -> InExpr -> SimplCont
-> SimplM (SimplEnv, OutExpr)
simplExprF env e cont
= {- pprTrace "simplExprF" (vcat
[ ppr e
, text "cont =" <+> ppr cont
, text "inscope =" <+> ppr (seInScope env)
, text "tvsubst =" <+> ppr (seTvSubst env)
, text "idsubst =" <+> ppr (seIdSubst env)
, text "cvsubst =" <+> ppr (seCvSubst env)
{- , ppr (seFloats env) -}
]) $ -}
simplExprF1 env e cont
simplExprF1 :: SimplEnv -> InExpr -> SimplCont
-> SimplM (SimplEnv, OutExpr)
simplExprF1 env (Var v) cont = simplIdF env v cont
simplExprF1 env (Lit lit) cont = rebuild env (Lit lit) cont
simplExprF1 env (Tick t expr) cont = simplTick env t expr cont
simplExprF1 env (Cast body co) cont = simplCast env body co cont
simplExprF1 env (Coercion co) cont = simplCoercionF env co cont
simplExprF1 env (Type ty) cont = ASSERT( contIsRhsOrArg cont )
rebuild env (Type (substTy env ty)) cont
simplExprF1 env (App fun arg) cont
= simplExprF env fun $
case arg of
Type ty -> ApplyToTy { sc_arg_ty = substTy env ty
, sc_hole_ty = substTy env (exprType fun)
, sc_cont = cont }
_ -> ApplyToVal { sc_arg = arg, sc_env = env
, sc_dup = NoDup, sc_cont = cont }
simplExprF1 env expr@(Lam {}) cont
= simplLam env zapped_bndrs body cont
-- The main issue here is under-saturated lambdas
-- (\x1. \x2. e) arg1
-- Here x1 might have "occurs-once" occ-info, because occ-info
-- is computed assuming that a group of lambdas is applied
-- all at once. If there are too few args, we must zap the
-- occ-info, UNLESS the remaining binders are one-shot
where
(bndrs, body) = collectBinders expr
zapped_bndrs | need_to_zap = map zap bndrs
| otherwise = bndrs
need_to_zap = any zappable_bndr (drop n_args bndrs)
n_args = countArgs cont
-- NB: countArgs counts all the args (incl type args)
-- and likewise drop counts all binders (incl type lambdas)
zappable_bndr b = isId b && not (isOneShotBndr b)
zap b | isTyVar b = b
| otherwise = zapLamIdInfo b
simplExprF1 env (Case scrut bndr _ alts) cont
= simplExprF env scrut (Select NoDup bndr alts env cont)
simplExprF1 env (Let (Rec pairs) body) cont
= do { env' <- simplRecBndrs env (map fst pairs)
-- NB: bndrs' don't have unfoldings or rules
-- We add them as we go down
; env'' <- simplRecBind env' NotTopLevel pairs
; simplExprF env'' body cont }
simplExprF1 env (Let (NonRec bndr rhs) body) cont
= simplNonRecE env bndr (rhs, env) ([], body) cont
---------------------------------
simplType :: SimplEnv -> InType -> SimplM OutType
-- Kept monadic just so we can do the seqType
simplType env ty
= -- pprTrace "simplType" (ppr ty $$ ppr (seTvSubst env)) $
seqType new_ty `seq` return new_ty
where
new_ty = substTy env ty
---------------------------------
simplCoercionF :: SimplEnv -> InCoercion -> SimplCont
-> SimplM (SimplEnv, OutExpr)
simplCoercionF env co cont
= do { co' <- simplCoercion env co
; rebuild env (Coercion co') cont }
simplCoercion :: SimplEnv -> InCoercion -> SimplM OutCoercion
simplCoercion env co
= let opt_co = optCoercion (getCvSubst env) co
in seqCo opt_co `seq` return opt_co
-----------------------------------
-- | Push a TickIt context outwards past applications and cases, as
-- long as this is a non-scoping tick, to let case and application
-- optimisations apply.
simplTick :: SimplEnv -> Tickish Id -> InExpr -> SimplCont
-> SimplM (SimplEnv, OutExpr)
simplTick env tickish expr cont
-- A scoped tick turns into a continuation, so that we can spot
-- (scc t (\x . e)) in simplLam and eliminate the scc. If we didn't do
-- it this way, then it would take two passes of the simplifier to
-- reduce ((scc t (\x . e)) e').
-- NB, don't do this with counting ticks, because if the expr is
-- bottom, then rebuildCall will discard the continuation.
-- XXX: we cannot do this, because the simplifier assumes that
-- the context can be pushed into a case with a single branch. e.g.
-- scc<f> case expensive of p -> e
-- becomes
-- case expensive of p -> scc<f> e
--
-- So I'm disabling this for now. It just means we will do more
-- simplifier iterations that necessary in some cases.
-- | tickishScoped tickish && not (tickishCounts tickish)
-- = simplExprF env expr (TickIt tickish cont)
-- For unscoped or soft-scoped ticks, we are allowed to float in new
-- cost, so we simply push the continuation inside the tick. This
-- has the effect of moving the tick to the outside of a case or
-- application context, allowing the normal case and application
-- optimisations to fire.
| tickish `tickishScopesLike` SoftScope
= do { (env', expr') <- simplExprF env expr cont
; return (env', mkTick tickish expr')
}
-- Push tick inside if the context looks like this will allow us to
-- do a case-of-case - see Note [case-of-scc-of-case]
| Select {} <- cont, Just expr' <- push_tick_inside
= simplExprF env expr' cont
-- We don't want to move the tick, but we might still want to allow
-- floats to pass through with appropriate wrapping (or not, see
-- wrap_floats below)
--- | not (tickishCounts tickish) || tickishCanSplit tickish
-- = wrap_floats
| otherwise
= no_floating_past_tick
where
-- Try to push tick inside a case, see Note [case-of-scc-of-case].
push_tick_inside =
case expr0 of
Case scrut bndr ty alts
-> Just $ Case (tickScrut scrut) bndr ty (map tickAlt alts)
_other -> Nothing
where (ticks, expr0) = stripTicksTop movable (Tick tickish expr)
movable t = not (tickishCounts t) ||
t `tickishScopesLike` NoScope ||
tickishCanSplit t
tickScrut e = foldr mkTick e ticks
-- Alternatives get annotated with all ticks that scope in some way,
-- but we don't want to count entries.
tickAlt (c,bs,e) = (c,bs, foldr mkTick e ts_scope)
ts_scope = map mkNoCount $
filter (not . (`tickishScopesLike` NoScope)) ticks
no_floating_past_tick =
do { let (inc,outc) = splitCont cont
; (env', expr') <- simplExprF (zapFloats env) expr inc
; let tickish' = simplTickish env tickish
; (env'', expr'') <- rebuild (zapFloats env')
(wrapFloats env' expr')
(TickIt tickish' outc)
; return (addFloats env env'', expr'')
}
-- Alternative version that wraps outgoing floats with the tick. This
-- results in ticks being duplicated, as we don't make any attempt to
-- eliminate the tick if we re-inline the binding (because the tick
-- semantics allows unrestricted inlining of HNFs), so I'm not doing
-- this any more. FloatOut will catch any real opportunities for
-- floating.
--
-- wrap_floats =
-- do { let (inc,outc) = splitCont cont
-- ; (env', expr') <- simplExprF (zapFloats env) expr inc
-- ; let tickish' = simplTickish env tickish
-- ; let wrap_float (b,rhs) = (zapIdStrictness (setIdArity b 0),
-- mkTick (mkNoCount tickish') rhs)
-- -- when wrapping a float with mkTick, we better zap the Id's
-- -- strictness info and arity, because it might be wrong now.
-- ; let env'' = addFloats env (mapFloats env' wrap_float)
-- ; rebuild env'' expr' (TickIt tickish' outc)
-- }
simplTickish env tickish
| Breakpoint n ids <- tickish
= Breakpoint n (map (getDoneId . substId env) ids)
| otherwise = tickish
-- Push type application and coercion inside a tick
splitCont :: SimplCont -> (SimplCont, SimplCont)
splitCont cont@(ApplyToTy { sc_cont = tail }) = (cont { sc_cont = inc }, outc)
where (inc,outc) = splitCont tail
splitCont (CastIt co c) = (CastIt co inc, outc)
where (inc,outc) = splitCont c
splitCont other = (mkBoringStop (contHoleType other), other)
getDoneId (DoneId id) = id
getDoneId (DoneEx e) = getIdFromTrivialExpr e -- Note [substTickish] in CoreSubst
getDoneId other = pprPanic "getDoneId" (ppr other)
-- Note [case-of-scc-of-case]
-- It's pretty important to be able to transform case-of-case when
-- there's an SCC in the way. For example, the following comes up
-- in nofib/real/compress/Encode.hs:
--
-- case scctick<code_string.r1>
-- case $wcode_string_r13s wild_XC w1_s137 w2_s138 l_aje
-- of _ { (# ww1_s13f, ww2_s13g, ww3_s13h #) ->
-- (ww1_s13f, ww2_s13g, ww3_s13h)
-- }
-- of _ { (ww_s12Y, ww1_s12Z, ww2_s130) ->
-- tick<code_string.f1>
-- (ww_s12Y,
-- ww1_s12Z,
-- PTTrees.PT
-- @ GHC.Types.Char @ GHC.Types.Int wild2_Xj ww2_s130 r_ajf)
-- }
--
-- We really want this case-of-case to fire, because then the 3-tuple
-- will go away (indeed, the CPR optimisation is relying on this
-- happening). But the scctick is in the way - we need to push it
-- inside to expose the case-of-case. So we perform this
-- transformation on the inner case:
--
-- scctick c (case e of { p1 -> e1; ...; pn -> en })
-- ==>
-- case (scctick c e) of { p1 -> scc c e1; ...; pn -> scc c en }
--
-- So we've moved a constant amount of work out of the scc to expose
-- the case. We only do this when the continuation is interesting: in
-- for now, it has to be another Case (maybe generalise this later).
{-
************************************************************************
* *
\subsection{The main rebuilder}
* *
************************************************************************
-}
rebuild :: SimplEnv -> OutExpr -> SimplCont -> SimplM (SimplEnv, OutExpr)
-- At this point the substitution in the SimplEnv should be irrelevant
-- only the in-scope set and floats should matter
rebuild env expr cont
= case cont of
Stop {} -> return (env, expr)
TickIt t cont -> rebuild env (mkTick t expr) cont
CastIt co cont -> rebuild env (mkCast expr co) cont
-- NB: mkCast implements the (Coercion co |> g) optimisation
Select _ bndr alts se cont -> rebuildCase (se `setFloats` env) expr bndr alts cont
StrictArg info _ cont -> rebuildCall env (info `addValArgTo` expr) cont
StrictBind b bs body se cont -> do { env' <- simplNonRecX (se `setFloats` env) b expr
-- expr satisfies let/app since it started life
-- in a call to simplNonRecE
; simplLam env' bs body cont }
ApplyToTy { sc_arg_ty = ty, sc_cont = cont}
-> rebuild env (App expr (Type ty)) cont
ApplyToVal { sc_arg = arg, sc_env = se, sc_dup = dup_flag, sc_cont = cont}
-- See Note [Avoid redundant simplification]
| isSimplified dup_flag -> rebuild env (App expr arg) cont
| otherwise -> do { arg' <- simplExpr (se `setInScope` env) arg
; rebuild env (App expr arg') cont }
{-
************************************************************************
* *
\subsection{Lambdas}
* *
************************************************************************
-}
simplCast :: SimplEnv -> InExpr -> Coercion -> SimplCont
-> SimplM (SimplEnv, OutExpr)
simplCast env body co0 cont0
= do { co1 <- simplCoercion env co0
; -- pprTrace "simplCast" (ppr co1) $
simplExprF env body (addCoerce co1 cont0) }
where
addCoerce co cont = add_coerce co (coercionKind co) cont
add_coerce _co (Pair s1 k1) cont -- co :: ty~ty
| s1 `eqType` k1 = cont -- is a no-op
add_coerce co1 (Pair s1 _k2) (CastIt co2 cont)
| (Pair _l1 t1) <- coercionKind co2
-- e |> (g1 :: S1~L) |> (g2 :: L~T1)
-- ==>
-- e, if S1=T1
-- e |> (g1 . g2 :: S1~T1) otherwise
--
-- For example, in the initial form of a worker
-- we may find (coerce T (coerce S (\x.e))) y
-- and we'd like it to simplify to e[y/x] in one round
-- of simplification
, s1 `eqType` t1 = cont -- The coerces cancel out
| otherwise = CastIt (mkTransCo co1 co2) cont
add_coerce co (Pair s1s2 _t1t2) cont@(ApplyToTy { sc_arg_ty = arg_ty, sc_cont = tail })
-- (f |> g) ty ---> (f ty) |> (g @ ty)
-- This implements the PushT rule from the paper
| Just (tyvar,_) <- splitForAllTy_maybe s1s2
= ASSERT( isTyVar tyvar )
cont { sc_cont = addCoerce new_cast tail }
where
new_cast = mkInstCo co arg_ty
add_coerce co (Pair s1s2 t1t2) (ApplyToVal { sc_arg = arg, sc_env = arg_se
, sc_dup = dup, sc_cont = cont })
| isFunTy s1s2 -- This implements the Push rule from the paper
, isFunTy t1t2 -- Check t1t2 to ensure 'arg' is a value arg
-- (e |> (g :: s1s2 ~ t1->t2)) f
-- ===>
-- (e (f |> (arg g :: t1~s1))
-- |> (res g :: s2->t2)
--
-- t1t2 must be a function type, t1->t2, because it's applied
-- to something but s1s2 might conceivably not be
--
-- When we build the ApplyTo we can't mix the out-types
-- with the InExpr in the argument, so we simply substitute
-- to make it all consistent. It's a bit messy.
-- But it isn't a common case.
--
-- Example of use: Trac #995
= ApplyToVal { sc_arg = mkCast arg' (mkSymCo co1)
, sc_env = zapSubstEnv arg_se
, sc_dup = dup
, sc_cont = addCoerce co2 cont }
where
-- we split coercion t1->t2 ~ s1->s2 into t1 ~ s1 and
-- t2 ~ s2 with left and right on the curried form:
-- (->) t1 t2 ~ (->) s1 s2
[co1, co2] = decomposeCo 2 co
arg' = substExpr (text "move-cast") arg_se' arg
arg_se' = arg_se `setInScope` env
add_coerce co _ cont = CastIt co cont
{-
************************************************************************
* *
\subsection{Lambdas}
* *
************************************************************************
Note [Zap unfolding when beta-reducing]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lambda-bound variables can have stable unfoldings, such as
$j = \x. \b{Unf=Just x}. e
See Note [Case binders and join points] below; the unfolding for lets
us optimise e better. However when we beta-reduce it we want to
revert to using the actual value, otherwise we can end up in the
stupid situation of
let x = blah in
let b{Unf=Just x} = y
in ...b...
Here it'd be far better to drop the unfolding and use the actual RHS.
-}
simplLam :: SimplEnv -> [InId] -> InExpr -> SimplCont
-> SimplM (SimplEnv, OutExpr)
simplLam env [] body cont = simplExprF env body cont
-- Beta reduction
simplLam env (bndr:bndrs) body (ApplyToTy { sc_arg_ty = arg_ty, sc_cont = cont })
= do { tick (BetaReduction bndr)
; simplLam (extendTvSubst env bndr arg_ty) bndrs body cont }
simplLam env (bndr:bndrs) body (ApplyToVal { sc_arg = arg, sc_env = arg_se
, sc_cont = cont })
= do { tick (BetaReduction bndr)
; simplNonRecE env (zap_unfolding bndr) (arg, arg_se) (bndrs, body) cont }
where
zap_unfolding bndr -- See Note [Zap unfolding when beta-reducing]
| isId bndr, isStableUnfolding (realIdUnfolding bndr)
= setIdUnfolding bndr NoUnfolding
| otherwise = bndr
-- discard a non-counting tick on a lambda. This may change the
-- cost attribution slightly (moving the allocation of the
-- lambda elsewhere), but we don't care: optimisation changes
-- cost attribution all the time.
simplLam env bndrs body (TickIt tickish cont)
| not (tickishCounts tickish)
= simplLam env bndrs body cont
-- Not enough args, so there are real lambdas left to put in the result
simplLam env bndrs body cont
= do { (env', bndrs') <- simplLamBndrs env bndrs
; body' <- simplExpr env' body
; new_lam <- mkLam bndrs' body' cont
; rebuild env' new_lam cont }
------------------
simplNonRecE :: SimplEnv
-> InBndr -- The binder
-> (InExpr, SimplEnv) -- Rhs of binding (or arg of lambda)
-> ([InBndr], InExpr) -- Body of the let/lambda
-- \xs.e
-> SimplCont
-> SimplM (SimplEnv, OutExpr)
-- simplNonRecE is used for
-- * non-top-level non-recursive lets in expressions
-- * beta reduction
--
-- It deals with strict bindings, via the StrictBind continuation,
-- which may abort the whole process
--
-- Precondition: rhs satisfies the let/app invariant
-- Note [CoreSyn let/app invariant] in CoreSyn
--
-- The "body" of the binding comes as a pair of ([InId],InExpr)
-- representing a lambda; so we recurse back to simplLam
-- Why? Because of the binder-occ-info-zapping done before
-- the call to simplLam in simplExprF (Lam ...)
-- First deal with type applications and type lets
-- (/\a. e) (Type ty) and (let a = Type ty in e)
simplNonRecE env bndr (Type ty_arg, rhs_se) (bndrs, body) cont
= ASSERT( isTyVar bndr )
do { ty_arg' <- simplType (rhs_se `setInScope` env) ty_arg
; simplLam (extendTvSubst env bndr ty_arg') bndrs body cont }
simplNonRecE env bndr (rhs, rhs_se) (bndrs, body) cont
= do dflags <- getDynFlags
case () of
_ | preInlineUnconditionally dflags env NotTopLevel bndr rhs
-> do { tick (PreInlineUnconditionally bndr)
; -- pprTrace "preInlineUncond" (ppr bndr <+> ppr rhs) $
simplLam (extendIdSubst env bndr (mkContEx rhs_se rhs)) bndrs body cont }
| isStrictId bndr -- Includes coercions
-> simplExprF (rhs_se `setFloats` env) rhs
(StrictBind bndr bndrs body env cont)
| otherwise
-> ASSERT( not (isTyVar bndr) )
do { (env1, bndr1) <- simplNonRecBndr env bndr
; let (env2, bndr2) = addBndrRules env1 bndr bndr1
; env3 <- simplLazyBind env2 NotTopLevel NonRecursive bndr bndr2 rhs rhs_se
; simplLam env3 bndrs body cont }
{-
************************************************************************
* *
Variables
* *
************************************************************************
-}
simplVar :: SimplEnv -> InVar -> SimplM OutExpr
-- Look up an InVar in the environment
simplVar env var
| isTyVar var = return (Type (substTyVar env var))
| isCoVar var = return (Coercion (substCoVar env var))
| otherwise
= case substId env var of
DoneId var1 -> return (Var var1)
DoneEx e -> return e
ContEx tvs cvs ids e -> simplExpr (setSubstEnv env tvs cvs ids) e
simplIdF :: SimplEnv -> InId -> SimplCont -> SimplM (SimplEnv, OutExpr)
simplIdF env var cont
= case substId env var of
DoneEx e -> simplExprF (zapSubstEnv env) e cont
ContEx tvs cvs ids e -> simplExprF (setSubstEnv env tvs cvs ids) e cont
DoneId var1 -> completeCall env var1 cont
-- Note [zapSubstEnv]
-- The template is already simplified, so don't re-substitute.
-- This is VITAL. Consider
-- let x = e in
-- let y = \z -> ...x... in
-- \ x -> ...y...
-- We'll clone the inner \x, adding x->x' in the id_subst
-- Then when we inline y, we must *not* replace x by x' in
-- the inlined copy!!
---------------------------------------------------------
-- Dealing with a call site
completeCall :: SimplEnv -> OutId -> SimplCont -> SimplM (SimplEnv, OutExpr)
completeCall env var cont
= do { ------------- Try inlining ----------------
dflags <- getDynFlags
; let (lone_variable, arg_infos, call_cont) = contArgs cont
n_val_args = length arg_infos
interesting_cont = interestingCallContext call_cont
unfolding = activeUnfolding env var
maybe_inline = callSiteInline dflags var unfolding
lone_variable arg_infos interesting_cont
; case maybe_inline of {
Just expr -- There is an inlining!
-> do { checkedTick (UnfoldingDone var)
; dump_inline dflags expr cont
; simplExprF (zapSubstEnv env) expr cont }
; Nothing -> do -- No inlining!
{ rule_base <- getSimplRules
; let info = mkArgInfo var (getRules rule_base var) n_val_args call_cont
; rebuildCall env info cont
}}}
where
dump_inline dflags unfolding cont
| not (dopt Opt_D_dump_inlinings dflags) = return ()
| not (dopt Opt_D_verbose_core2core dflags)
= when (isExternalName (idName var)) $
liftIO $ printOutputForUser dflags alwaysQualify $
sep [text "Inlining done:", nest 4 (ppr var)]
| otherwise
= liftIO $ printOutputForUser dflags alwaysQualify $
sep [text "Inlining done: " <> ppr var,
nest 4 (vcat [text "Inlined fn: " <+> nest 2 (ppr unfolding),
text "Cont: " <+> ppr cont])]
rebuildCall :: SimplEnv
-> ArgInfo
-> SimplCont
-> SimplM (SimplEnv, OutExpr)
rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args, ai_strs = [] }) cont
-- When we run out of strictness args, it means
-- that the call is definitely bottom; see SimplUtils.mkArgInfo
-- Then we want to discard the entire strict continuation. E.g.
-- * case (error "hello") of { ... }
-- * (error "Hello") arg
-- * f (error "Hello") where f is strict
-- etc
-- Then, especially in the first of these cases, we'd like to discard
-- the continuation, leaving just the bottoming expression. But the
-- type might not be right, so we may have to add a coerce.
| not (contIsTrivial cont) -- Only do this if there is a non-trivial
= return (env, castBottomExpr res cont_ty) -- contination to discard, else we do it
where -- again and again!
res = argInfoExpr fun rev_args
cont_ty = contResultType cont
rebuildCall env info (CastIt co cont)
= rebuildCall env (addCastTo info co) cont
rebuildCall env info (ApplyToTy { sc_arg_ty = arg_ty, sc_cont = cont })
= rebuildCall env (info `addTyArgTo` arg_ty) cont
rebuildCall env info@(ArgInfo { ai_encl = encl_rules, ai_type = fun_ty
, ai_strs = str:strs, ai_discs = disc:discs })
(ApplyToVal { sc_arg = arg, sc_env = arg_se
, sc_dup = dup_flag, sc_cont = cont })
| isSimplified dup_flag -- See Note [Avoid redundant simplification]
= rebuildCall env (addValArgTo info' arg) cont
| str -- Strict argument
= -- pprTrace "Strict Arg" (ppr arg $$ ppr (seIdSubst env) $$ ppr (seInScope env)) $
simplExprF (arg_se `setFloats` env) arg
(StrictArg info' cci cont)
-- Note [Shadowing]
| otherwise -- Lazy argument
-- DO NOT float anything outside, hence simplExprC
-- There is no benefit (unlike in a let-binding), and we'd
-- have to be very careful about bogus strictness through
-- floating a demanded let.
= do { arg' <- simplExprC (arg_se `setInScope` env) arg
(mkLazyArgStop (funArgTy fun_ty) cci)
; rebuildCall env (addValArgTo info' arg') cont }
where
info' = info { ai_strs = strs, ai_discs = discs }
cci | encl_rules = RuleArgCtxt
| disc > 0 = DiscArgCtxt -- Be keener here
| otherwise = BoringCtxt -- Nothing interesting
rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args, ai_rules = rules }) cont
| null rules
= rebuild env (argInfoExpr fun rev_args) cont -- No rules, common case
| otherwise
= do { -- We've accumulated a simplified call in <fun,rev_args>
-- so try rewrite rules; see Note [RULEs apply to simplified arguments]
-- See also Note [Rules for recursive functions]
; let env' = zapSubstEnv env -- See Note [zapSubstEnv];
-- and NB that 'rev_args' are all fully simplified
; mb_rule <- tryRules env' rules fun (reverse rev_args) cont
; case mb_rule of {
Just (rule_rhs, cont') -> simplExprF env' rule_rhs cont'
-- Rules don't match
; Nothing -> rebuild env (argInfoExpr fun rev_args) cont -- No rules
} }
{-
Note [RULES apply to simplified arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's very desirable to try RULES once the arguments have been simplified, because
doing so ensures that rule cascades work in one pass. Consider
{-# RULES g (h x) = k x
f (k x) = x #-}
...f (g (h x))...
Then we want to rewrite (g (h x)) to (k x) and only then try f's rules. If
we match f's rules against the un-simplified RHS, it won't match. This
makes a particularly big difference when superclass selectors are involved:
op ($p1 ($p2 (df d)))
We want all this to unravel in one sweeep.
Note [Avoid redundant simplification]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Because RULES apply to simplified arguments, there's a danger of repeatedly
simplifying already-simplified arguments. An important example is that of
(>>=) d e1 e2
Here e1, e2 are simplified before the rule is applied, but don't really
participate in the rule firing. So we mark them as Simplified to avoid
re-simplifying them.
Note [Shadowing]
~~~~~~~~~~~~~~~~
This part of the simplifier may break the no-shadowing invariant
Consider
f (...(\a -> e)...) (case y of (a,b) -> e')
where f is strict in its second arg
If we simplify the innermost one first we get (...(\a -> e)...)
Simplifying the second arg makes us float the case out, so we end up with
case y of (a,b) -> f (...(\a -> e)...) e'
So the output does not have the no-shadowing invariant. However, there is
no danger of getting name-capture, because when the first arg was simplified
we used an in-scope set that at least mentioned all the variables free in its
static environment, and that is enough.
We can't just do innermost first, or we'd end up with a dual problem:
case x of (a,b) -> f e (...(\a -> e')...)
I spent hours trying to recover the no-shadowing invariant, but I just could
not think of an elegant way to do it. The simplifier is already knee-deep in
continuations. We have to keep the right in-scope set around; AND we have
to get the effect that finding (error "foo") in a strict arg position will
discard the entire application and replace it with (error "foo"). Getting
all this at once is TOO HARD!
************************************************************************
* *
Rewrite rules
* *
************************************************************************
-}
tryRules :: SimplEnv -> [CoreRule]
-> Id -> [ArgSpec] -> SimplCont
-> SimplM (Maybe (CoreExpr, SimplCont))
-- The SimplEnv already has zapSubstEnv applied to it
tryRules env rules fn args call_cont
| null rules
= return Nothing
{- Disabled until we fix #8326
| fn `hasKey` tagToEnumKey -- See Note [Optimising tagToEnum#]
, [_type_arg, val_arg] <- args
, Select dup bndr ((_,[],rhs1) : rest_alts) se cont <- call_cont
, isDeadBinder bndr
= do { dflags <- getDynFlags
; let enum_to_tag :: CoreAlt -> CoreAlt
-- Takes K -> e into tagK# -> e
-- where tagK# is the tag of constructor K
enum_to_tag (DataAlt con, [], rhs)
= ASSERT( isEnumerationTyCon (dataConTyCon con) )
(LitAlt tag, [], rhs)
where
tag = mkMachInt dflags (toInteger (dataConTag con - fIRST_TAG))
enum_to_tag alt = pprPanic "tryRules: tagToEnum" (ppr alt)
new_alts = (DEFAULT, [], rhs1) : map enum_to_tag rest_alts
new_bndr = setIdType bndr intPrimTy
-- The binder is dead, but should have the right type
; return (Just (val_arg, Select dup new_bndr new_alts se cont)) }
-}
| otherwise
= do { dflags <- getDynFlags
; case lookupRule dflags (getUnfoldingInRuleMatch env) (activeRule env)
fn (argInfoAppArgs args) rules of {
Nothing -> return Nothing ; -- No rule matches
Just (rule, rule_rhs) ->
do { checkedTick (RuleFired (ru_name rule))
; let cont' = pushSimplifiedArgs env
(drop (ruleArity rule) args)
call_cont
-- (ruleArity rule) says how many args the rule consumed
; dump dflags rule rule_rhs
; return (Just (rule_rhs, cont')) }}}
where
dump dflags rule rule_rhs
| dopt Opt_D_dump_rule_rewrites dflags
= log_rule dflags Opt_D_dump_rule_rewrites "Rule fired" $ vcat
[ text "Rule:" <+> ftext (ru_name rule)
, text "Before:" <+> hang (ppr fn) 2 (sep (map ppr args))
, text "After: " <+> pprCoreExpr rule_rhs
, text "Cont: " <+> ppr call_cont ]
| dopt Opt_D_dump_rule_firings dflags
= log_rule dflags Opt_D_dump_rule_firings "Rule fired:" $
ftext (ru_name rule)
| otherwise
= return ()
log_rule dflags flag hdr details
= liftIO . dumpSDoc dflags alwaysQualify flag "" $
sep [text hdr, nest 4 details]
{-
Note [Optimising tagToEnum#]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If we have an enumeration data type:
data Foo = A | B | C
Then we want to transform
case tagToEnum# x of ==> case x of
A -> e1 DEFAULT -> e1
B -> e2 1# -> e2
C -> e3 2# -> e3
thereby getting rid of the tagToEnum# altogether. If there was a DEFAULT
alternative we retain it (remember it comes first). If not the case must
be exhaustive, and we reflect that in the transformed version by adding
a DEFAULT. Otherwise Lint complains that the new case is not exhaustive.
See #8317.
Note [Rules for recursive functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You might think that we shouldn't apply rules for a loop breaker:
doing so might give rise to an infinite loop, because a RULE is
rather like an extra equation for the function:
RULE: f (g x) y = x+y
Eqn: f a y = a-y
But it's too drastic to disable rules for loop breakers.
Even the foldr/build rule would be disabled, because foldr
is recursive, and hence a loop breaker:
foldr k z (build g) = g k z
So it's up to the programmer: rules can cause divergence
************************************************************************
* *
Rebuilding a case expression
* *
************************************************************************
Note [Case elimination]
~~~~~~~~~~~~~~~~~~~~~~~
The case-elimination transformation discards redundant case expressions.
Start with a simple situation:
case x# of ===> let y# = x# in e
y# -> e
(when x#, y# are of primitive type, of course). We can't (in general)
do this for algebraic cases, because we might turn bottom into
non-bottom!
The code in SimplUtils.prepareAlts has the effect of generalise this
idea to look for a case where we're scrutinising a variable, and we
know that only the default case can match. For example:
case x of
0# -> ...
DEFAULT -> ...(case x of
0# -> ...
DEFAULT -> ...) ...
Here the inner case is first trimmed to have only one alternative, the
DEFAULT, after which it's an instance of the previous case. This
really only shows up in eliminating error-checking code.
Note that SimplUtils.mkCase combines identical RHSs. So
case e of ===> case e of DEFAULT -> r
True -> r
False -> r
Now again the case may be elminated by the CaseElim transformation.
This includes things like (==# a# b#)::Bool so that we simplify
case ==# a# b# of { True -> x; False -> x }
to just
x
This particular example shows up in default methods for
comparison operations (e.g. in (>=) for Int.Int32)
Note [Case elimination: lifted case]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If a case over a lifted type has a single alternative, and is being used
as a strict 'let' (all isDeadBinder bndrs), we may want to do this
transformation:
case e of r ===> let r = e in ...r...
_ -> ...r...
(a) 'e' is already evaluated (it may so if e is a variable)
Specifically we check (exprIsHNF e). In this case
we can just allocate the WHNF directly with a let.
or
(b) 'x' is not used at all and e is ok-for-speculation
The ok-for-spec bit checks that we don't lose any
exceptions or divergence.
NB: it'd be *sound* to switch from case to let if the
scrutinee was not yet WHNF but was guaranteed to
converge; but sticking with case means we won't build a
thunk
or
(c) 'x' is used strictly in the body, and 'e' is a variable
Then we can just substitute 'e' for 'x' in the body.
See Note [Eliminating redundant seqs]
For (b), the "not used at all" test is important. Consider
case (case a ># b of { True -> (p,q); False -> (q,p) }) of
r -> blah
The scrutinee is ok-for-speculation (it looks inside cases), but we do
not want to transform to
let r = case a ># b of { True -> (p,q); False -> (q,p) }
in blah
because that builds an unnecessary thunk.
Note [Eliminating redundant seqs]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If we have this:
case x of r { _ -> ..r.. }
where 'r' is used strictly in (..r..), the case is effectively a 'seq'
on 'x', but since 'r' is used strictly anyway, we can safely transform to
(...x...)
Note that this can change the error behaviour. For example, we might
transform
case x of { _ -> error "bad" }
--> error "bad"
which is might be puzzling if 'x' currently lambda-bound, but later gets
let-bound to (error "good").
Nevertheless, the paper "A semantics for imprecise exceptions" allows
this transformation. If you want to fix the evaluation order, use
'pseq'. See Trac #8900 for an example where the loss of this
transformation bit us in practice.
See also Note [Empty case alternatives] in CoreSyn.
Just for reference, the original code (added Jan 13) looked like this:
|| case_bndr_evald_next rhs
case_bndr_evald_next :: CoreExpr -> Bool
-- See Note [Case binder next]
case_bndr_evald_next (Var v) = v == case_bndr
case_bndr_evald_next (Cast e _) = case_bndr_evald_next e
case_bndr_evald_next (App e _) = case_bndr_evald_next e
case_bndr_evald_next (Case e _ _ _) = case_bndr_evald_next e
case_bndr_evald_next _ = False
(This came up when fixing Trac #7542. See also Note [Eta reduction of
an eval'd function] in CoreUtils.)
Note [Case elimination: unlifted case]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
case a +# b of r -> ...r...
Then we do case-elimination (to make a let) followed by inlining,
to get
.....(a +# b)....
If we have
case indexArray# a i of r -> ...r...
we might like to do the same, and inline the (indexArray# a i).
But indexArray# is not okForSpeculation, so we don't build a let
in rebuildCase (lest it get floated *out*), so the inlining doesn't
happen either.
This really isn't a big deal I think. The let can be
Further notes about case elimination
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider: test :: Integer -> IO ()
test = print
Turns out that this compiles to:
Print.test
= \ eta :: Integer
eta1 :: Void# ->
case PrelNum.< eta PrelNum.zeroInteger of wild { __DEFAULT ->
case hPutStr stdout
(PrelNum.jtos eta ($w[] @ Char))
eta1
of wild1 { (# new_s, a4 #) -> PrelIO.lvl23 new_s }}
Notice the strange '<' which has no effect at all. This is a funny one.
It started like this:
f x y = if x < 0 then jtos x
else if y==0 then "" else jtos x
At a particular call site we have (f v 1). So we inline to get
if v < 0 then jtos x
else if 1==0 then "" else jtos x
Now simplify the 1==0 conditional:
if v<0 then jtos v else jtos v
Now common-up the two branches of the case:
case (v<0) of DEFAULT -> jtos v
Why don't we drop the case? Because it's strict in v. It's technically
wrong to drop even unnecessary evaluations, and in practice they
may be a result of 'seq' so we *definitely* don't want to drop those.
I don't really know how to improve this situation.
-}
---------------------------------------------------------
-- Eliminate the case if possible
rebuildCase, reallyRebuildCase
:: SimplEnv
-> OutExpr -- Scrutinee
-> InId -- Case binder
-> [InAlt] -- Alternatives (inceasing order)
-> SimplCont
-> SimplM (SimplEnv, OutExpr)
--------------------------------------------------
-- 1. Eliminate the case if there's a known constructor
--------------------------------------------------
rebuildCase env scrut case_bndr alts cont
| Lit lit <- scrut -- No need for same treatment as constructors
-- because literals are inlined more vigorously
, not (litIsLifted lit)
= do { tick (KnownBranch case_bndr)
; case findAlt (LitAlt lit) alts of
Nothing -> missingAlt env case_bndr alts cont
Just (_, bs, rhs) -> simple_rhs bs rhs }
| Just (con, ty_args, other_args) <- exprIsConApp_maybe (getUnfoldingInRuleMatch env) scrut
-- Works when the scrutinee is a variable with a known unfolding
-- as well as when it's an explicit constructor application
= do { tick (KnownBranch case_bndr)
; case findAlt (DataAlt con) alts of
Nothing -> missingAlt env case_bndr alts cont
Just (DEFAULT, bs, rhs) -> simple_rhs bs rhs
Just (_, bs, rhs) -> knownCon env scrut con ty_args other_args
case_bndr bs rhs cont
}
where
simple_rhs bs rhs = ASSERT( null bs )
do { env' <- simplNonRecX env case_bndr scrut
-- scrut is a constructor application,
-- hence satisfies let/app invariant
; simplExprF env' rhs cont }
--------------------------------------------------
-- 2. Eliminate the case if scrutinee is evaluated
--------------------------------------------------
rebuildCase env scrut case_bndr alts@[(_, bndrs, rhs)] cont
-- See if we can get rid of the case altogether
-- See Note [Case elimination]
-- mkCase made sure that if all the alternatives are equal,
-- then there is now only one (DEFAULT) rhs
-- 2a. Dropping the case altogether, if
-- a) it binds nothing (so it's really just a 'seq')
-- b) evaluating the scrutinee has no side effects
| is_plain_seq
, exprOkForSideEffects scrut
-- The entire case is dead, so we can drop it
-- if the scrutinee converges without having imperative
-- side effects or raising a Haskell exception
-- See Note [PrimOp can_fail and has_side_effects] in PrimOp
= simplExprF env rhs cont
-- 2b. Turn the case into a let, if
-- a) it binds only the case-binder
-- b) unlifted case: the scrutinee is ok-for-speculation
-- lifted case: the scrutinee is in HNF (or will later be demanded)
| all_dead_bndrs
, if is_unlifted
then exprOkForSpeculation scrut -- See Note [Case elimination: unlifted case]
else exprIsHNF scrut -- See Note [Case elimination: lifted case]
|| scrut_is_demanded_var scrut
= do { tick (CaseElim case_bndr)
; env' <- simplNonRecX env case_bndr scrut
; simplExprF env' rhs cont }
-- 2c. Try the seq rules if
-- a) it binds only the case binder
-- b) a rule for seq applies
-- See Note [User-defined RULES for seq] in MkId
| is_plain_seq
= do { let rhs' = substExpr (text "rebuild-case") env rhs
env' = zapSubstEnv env
scrut_ty = substTy env (idType case_bndr)
out_args = [ TyArg { as_arg_ty = scrut_ty
, as_hole_ty = seq_id_ty }
, TyArg { as_arg_ty = exprType rhs'
, as_hole_ty = applyTy seq_id_ty scrut_ty }
, ValArg scrut, ValArg rhs']
-- Lazily evaluated, so we don't do most of this
; rule_base <- getSimplRules
; mb_rule <- tryRules env' (getRules rule_base seqId) seqId out_args cont
; case mb_rule of
Just (rule_rhs, cont') -> simplExprF env' rule_rhs cont'
Nothing -> reallyRebuildCase env scrut case_bndr alts cont }
where
is_unlifted = isUnLiftedType (idType case_bndr)
all_dead_bndrs = all isDeadBinder bndrs -- bndrs are [InId]
is_plain_seq = all_dead_bndrs && isDeadBinder case_bndr -- Evaluation *only* for effect
seq_id_ty = idType seqId
scrut_is_demanded_var :: CoreExpr -> Bool
-- See Note [Eliminating redundant seqs]
scrut_is_demanded_var (Cast s _) = scrut_is_demanded_var s
scrut_is_demanded_var (Var _) = isStrictDmd (idDemandInfo case_bndr)
scrut_is_demanded_var _ = False
rebuildCase env scrut case_bndr alts cont
= reallyRebuildCase env scrut case_bndr alts cont
--------------------------------------------------
-- 3. Catch-all case
--------------------------------------------------
reallyRebuildCase env scrut case_bndr alts cont
= do { -- Prepare the continuation;
-- The new subst_env is in place
(env', dup_cont, nodup_cont) <- prepareCaseCont env alts cont
-- Simplify the alternatives
; (scrut', case_bndr', alts') <- simplAlts env' scrut case_bndr alts dup_cont
; dflags <- getDynFlags
; let alts_ty' = contResultType dup_cont
; case_expr <- mkCase dflags scrut' case_bndr' alts_ty' alts'
-- Notice that rebuild gets the in-scope set from env', not alt_env
-- (which in any case is only build in simplAlts)
-- The case binder *not* scope over the whole returned case-expression
; rebuild env' case_expr nodup_cont }
{-
simplCaseBinder checks whether the scrutinee is a variable, v. If so,
try to eliminate uses of v in the RHSs in favour of case_bndr; that
way, there's a chance that v will now only be used once, and hence
inlined.
Historical note: we use to do the "case binder swap" in the Simplifier
so there were additional complications if the scrutinee was a variable.
Now the binder-swap stuff is done in the occurrence analyer; see
OccurAnal Note [Binder swap].
Note [knownCon occ info]
~~~~~~~~~~~~~~~~~~~~~~~~
If the case binder is not dead, then neither are the pattern bound
variables:
case <any> of x { (a,b) ->
case x of { (p,q) -> p } }
Here (a,b) both look dead, but come alive after the inner case is eliminated.
The point is that we bring into the envt a binding
let x = (a,b)
after the outer case, and that makes (a,b) alive. At least we do unless
the case binder is guaranteed dead.
Note [Case alternative occ info]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we are simply reconstructing a case (the common case), we always
zap the occurrence info on the binders in the alternatives. Even
if the case binder is dead, the scrutinee is usually a variable, and *that*
can bring the case-alternative binders back to life.
See Note [Add unfolding for scrutinee]
Note [Improving seq]
~~~~~~~~~~~~~~~~~~~
Consider
type family F :: * -> *
type instance F Int = Int
... case e of x { DEFAULT -> rhs } ...
where x::F Int. Then we'd like to rewrite (F Int) to Int, getting
case e `cast` co of x'::Int
I# x# -> let x = x' `cast` sym co
in rhs
so that 'rhs' can take advantage of the form of x'.
Notice that Note [Case of cast] (in OccurAnal) may then apply to the result.
Nota Bene: We only do the [Improving seq] transformation if the
case binder 'x' is actually used in the rhs; that is, if the case
is *not* a *pure* seq.
a) There is no point in adding the cast to a pure seq.
b) There is a good reason not to: doing so would interfere
with seq rules (Note [Built-in RULES for seq] in MkId).
In particular, this [Improving seq] thing *adds* a cast
while [Built-in RULES for seq] *removes* one, so they
just flip-flop.
You might worry about
case v of x { __DEFAULT ->
... case (v `cast` co) of y { I# -> ... }}
This is a pure seq (since x is unused), so [Improving seq] won't happen.
But it's ok: the simplifier will replace 'v' by 'x' in the rhs to get
case v of x { __DEFAULT ->
... case (x `cast` co) of y { I# -> ... }}
Now the outer case is not a pure seq, so [Improving seq] will happen,
and then the inner case will disappear.
The need for [Improving seq] showed up in Roman's experiments. Example:
foo :: F Int -> Int -> Int
foo t n = t `seq` bar n
where
bar 0 = 0
bar n = bar (n - case t of TI i -> i)
Here we'd like to avoid repeated evaluating t inside the loop, by
taking advantage of the `seq`.
At one point I did transformation in LiberateCase, but it's more
robust here. (Otherwise, there's a danger that we'll simply drop the
'seq' altogether, before LiberateCase gets to see it.)
-}
simplAlts :: SimplEnv
-> OutExpr
-> InId -- Case binder
-> [InAlt] -- Non-empty
-> SimplCont
-> SimplM (OutExpr, OutId, [OutAlt]) -- Includes the continuation
-- Like simplExpr, this just returns the simplified alternatives;
-- it does not return an environment
-- The returned alternatives can be empty, none are possible
simplAlts env scrut case_bndr alts cont'
= do { let env0 = zapFloats env
; (env1, case_bndr1) <- simplBinder env0 case_bndr
; fam_envs <- getFamEnvs
; (alt_env', scrut', case_bndr') <- improveSeq fam_envs env1 scrut
case_bndr case_bndr1 alts
; (imposs_deflt_cons, in_alts) <- prepareAlts scrut' case_bndr' alts
-- NB: it's possible that the returned in_alts is empty: this is handled
-- by the caller (rebuildCase) in the missingAlt function
; alts' <- mapM (simplAlt alt_env' (Just scrut') imposs_deflt_cons case_bndr' cont') in_alts
; -- pprTrace "simplAlts" (ppr case_bndr $$ ppr alts_ty $$ ppr alts_ty' $$ ppr alts $$ ppr cont') $
return (scrut', case_bndr', alts') }
------------------------------------
improveSeq :: (FamInstEnv, FamInstEnv) -> SimplEnv
-> OutExpr -> InId -> OutId -> [InAlt]
-> SimplM (SimplEnv, OutExpr, OutId)
-- Note [Improving seq]
improveSeq fam_envs env scrut case_bndr case_bndr1 [(DEFAULT,_,_)]
| not (isDeadBinder case_bndr) -- Not a pure seq! See Note [Improving seq]
, Just (co, ty2) <- topNormaliseType_maybe fam_envs (idType case_bndr1)
= do { case_bndr2 <- newId (fsLit "nt") ty2
; let rhs = DoneEx (Var case_bndr2 `Cast` mkSymCo co)
env2 = extendIdSubst env case_bndr rhs
; return (env2, scrut `Cast` co, case_bndr2) }
improveSeq _ env scrut _ case_bndr1 _
= return (env, scrut, case_bndr1)
------------------------------------
simplAlt :: SimplEnv
-> Maybe OutExpr -- The scrutinee
-> [AltCon] -- These constructors can't be present when
-- matching the DEFAULT alternative
-> OutId -- The case binder
-> SimplCont
-> InAlt
-> SimplM OutAlt
simplAlt env _ imposs_deflt_cons case_bndr' cont' (DEFAULT, bndrs, rhs)
= ASSERT( null bndrs )
do { let env' = addBinderUnfolding env case_bndr'
(mkOtherCon imposs_deflt_cons)
-- Record the constructors that the case-binder *can't* be.
; rhs' <- simplExprC env' rhs cont'
; return (DEFAULT, [], rhs') }
simplAlt env scrut' _ case_bndr' cont' (LitAlt lit, bndrs, rhs)
= ASSERT( null bndrs )
do { env' <- addAltUnfoldings env scrut' case_bndr' (Lit lit)
; rhs' <- simplExprC env' rhs cont'
; return (LitAlt lit, [], rhs') }
simplAlt env scrut' _ case_bndr' cont' (DataAlt con, vs, rhs)
= do { -- Deal with the pattern-bound variables
-- Mark the ones that are in ! positions in the
-- data constructor as certainly-evaluated.
-- NB: simplLamBinders preserves this eval info
; let vs_with_evals = add_evals (dataConRepStrictness con)
; (env', vs') <- simplLamBndrs env vs_with_evals
-- Bind the case-binder to (con args)
; let inst_tys' = tyConAppArgs (idType case_bndr')
con_app :: OutExpr
con_app = mkConApp2 con inst_tys' vs'
; env'' <- addAltUnfoldings env' scrut' case_bndr' con_app
; rhs' <- simplExprC env'' rhs cont'
; return (DataAlt con, vs', rhs') }
where
-- add_evals records the evaluated-ness of the bound variables of
-- a case pattern. This is *important*. Consider
-- data T = T !Int !Int
--
-- case x of { T a b -> T (a+1) b }
--
-- We really must record that b is already evaluated so that we don't
-- go and re-evaluate it when constructing the result.
-- See Note [Data-con worker strictness] in MkId.hs
add_evals the_strs
= go vs the_strs
where
go [] [] = []
go (v:vs') strs | isTyVar v = v : go vs' strs
go (v:vs') (str:strs)
| isMarkedStrict str = evald_v : go vs' strs
| otherwise = zapped_v : go vs' strs
where
zapped_v = zapIdOccInfo v -- See Note [Case alternative occ info]
evald_v = zapped_v `setIdUnfolding` evaldUnfolding
go _ _ = pprPanic "cat_evals" (ppr con $$ ppr vs $$ ppr the_strs)
addAltUnfoldings :: SimplEnv -> Maybe OutExpr -> OutId -> OutExpr -> SimplM SimplEnv
addAltUnfoldings env scrut case_bndr con_app
= do { dflags <- getDynFlags
; let con_app_unf = mkSimpleUnfolding dflags con_app
env1 = addBinderUnfolding env case_bndr con_app_unf
-- See Note [Add unfolding for scrutinee]
env2 = case scrut of
Just (Var v) -> addBinderUnfolding env1 v con_app_unf
Just (Cast (Var v) co) -> addBinderUnfolding env1 v $
mkSimpleUnfolding dflags (Cast con_app (mkSymCo co))
_ -> env1
; traceSmpl "addAltUnf" (vcat [ppr case_bndr <+> ppr scrut, ppr con_app])
; return env2 }
addBinderUnfolding :: SimplEnv -> Id -> Unfolding -> SimplEnv
addBinderUnfolding env bndr unf
| debugIsOn, Just tmpl <- maybeUnfoldingTemplate unf
= WARN( not (eqType (idType bndr) (exprType tmpl)),
ppr bndr $$ ppr (idType bndr) $$ ppr tmpl $$ ppr (exprType tmpl) )
modifyInScope env (bndr `setIdUnfolding` unf)
| otherwise
= modifyInScope env (bndr `setIdUnfolding` unf)
zapBndrOccInfo :: Bool -> Id -> Id
-- Consider case e of b { (a,b) -> ... }
-- Then if we bind b to (a,b) in "...", and b is not dead,
-- then we must zap the deadness info on a,b
zapBndrOccInfo keep_occ_info pat_id
| keep_occ_info = pat_id
| otherwise = zapIdOccInfo pat_id
{-
Note [Add unfolding for scrutinee]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In general it's unlikely that a variable scrutinee will appear
in the case alternatives case x of { ...x unlikely to appear... }
because the binder-swap in OccAnal has got rid of all such occcurrences
See Note [Binder swap] in OccAnal.
BUT it is still VERY IMPORTANT to add a suitable unfolding for a
variable scrutinee, in simplAlt. Here's why
case x of y
(a,b) -> case b of c
I# v -> ...(f y)...
There is no occurrence of 'b' in the (...(f y)...). But y gets
the unfolding (a,b), and *that* mentions b. If f has a RULE
RULE f (p, I# q) = ...
we want that rule to match, so we must extend the in-scope env with a
suitable unfolding for 'y'. It's *essential* for rule matching; but
it's also good for case-elimintation -- suppose that 'f' was inlined
and did multi-level case analysis, then we'd solve it in one
simplifier sweep instead of two.
Exactly the same issue arises in SpecConstr;
see Note [Add scrutinee to ValueEnv too] in SpecConstr
HOWEVER, given
case x of y { Just a -> r1; Nothing -> r2 }
we do not want to add the unfolding x -> y to 'x', which might seem cool,
since 'y' itself has different unfoldings in r1 and r2. Reason: if we
did that, we'd have to zap y's deadness info and that is a very useful
piece of information.
So instead we add the unfolding x -> Just a, and x -> Nothing in the
respective RHSs.
************************************************************************
* *
\subsection{Known constructor}
* *
************************************************************************
We are a bit careful with occurrence info. Here's an example
(\x* -> case x of (a*, b) -> f a) (h v, e)
where the * means "occurs once". This effectively becomes
case (h v, e) of (a*, b) -> f a)
and then
let a* = h v; b = e in f a
and then
f (h v)
All this should happen in one sweep.
-}
knownCon :: SimplEnv
-> OutExpr -- The scrutinee
-> DataCon -> [OutType] -> [OutExpr] -- The scrutinee (in pieces)
-> InId -> [InBndr] -> InExpr -- The alternative
-> SimplCont
-> SimplM (SimplEnv, OutExpr)
knownCon env scrut dc dc_ty_args dc_args bndr bs rhs cont
= do { env' <- bind_args env bs dc_args
; env'' <- bind_case_bndr env'
; simplExprF env'' rhs cont }
where
zap_occ = zapBndrOccInfo (isDeadBinder bndr) -- bndr is an InId
-- Ugh!
bind_args env' [] _ = return env'
bind_args env' (b:bs') (Type ty : args)
= ASSERT( isTyVar b )
bind_args (extendTvSubst env' b ty) bs' args
bind_args env' (b:bs') (arg : args)
= ASSERT( isId b )
do { let b' = zap_occ b
-- Note that the binder might be "dead", because it doesn't
-- occur in the RHS; and simplNonRecX may therefore discard
-- it via postInlineUnconditionally.
-- Nevertheless we must keep it if the case-binder is alive,
-- because it may be used in the con_app. See Note [knownCon occ info]
; env'' <- simplNonRecX env' b' arg -- arg satisfies let/app invariant
; bind_args env'' bs' args }
bind_args _ _ _ =
pprPanic "bind_args" $ ppr dc $$ ppr bs $$ ppr dc_args $$
text "scrut:" <+> ppr scrut
-- It's useful to bind bndr to scrut, rather than to a fresh
-- binding x = Con arg1 .. argn
-- because very often the scrut is a variable, so we avoid
-- creating, and then subsequently eliminating, a let-binding
-- BUT, if scrut is a not a variable, we must be careful
-- about duplicating the arg redexes; in that case, make
-- a new con-app from the args
bind_case_bndr env
| isDeadBinder bndr = return env
| exprIsTrivial scrut = return (extendIdSubst env bndr (DoneEx scrut))
| otherwise = do { dc_args <- mapM (simplVar env) bs
-- dc_ty_args are aready OutTypes,
-- but bs are InBndrs
; let con_app = Var (dataConWorkId dc)
`mkTyApps` dc_ty_args
`mkApps` dc_args
; simplNonRecX env bndr con_app }
-------------------
missingAlt :: SimplEnv -> Id -> [InAlt] -> SimplCont -> SimplM (SimplEnv, OutExpr)
-- This isn't strictly an error, although it is unusual.
-- It's possible that the simplifer might "see" that
-- an inner case has no accessible alternatives before
-- it "sees" that the entire branch of an outer case is
-- inaccessible. So we simply put an error case here instead.
missingAlt env case_bndr _ cont
= WARN( True, ptext (sLit "missingAlt") <+> ppr case_bndr )
return (env, mkImpossibleExpr (contResultType cont))
{-
************************************************************************
* *
\subsection{Duplicating continuations}
* *
************************************************************************
-}
prepareCaseCont :: SimplEnv
-> [InAlt] -> SimplCont
-> SimplM (SimplEnv,
SimplCont, -- Dupable part
SimplCont) -- Non-dupable part
-- We are considering
-- K[case _ of { p1 -> r1; ...; pn -> rn }]
-- where K is some enclosing continuation for the case
-- Goal: split K into two pieces Kdup,Knodup so that
-- a) Kdup can be duplicated
-- b) Knodup[Kdup[e]] = K[e]
-- The idea is that we'll transform thus:
-- Knodup[ (case _ of { p1 -> Kdup[r1]; ...; pn -> Kdup[rn] }
--
-- We may also return some extra bindings in SimplEnv (that scope over
-- the entire continuation)
--
-- When case-of-case is off, just make the entire continuation non-dupable
prepareCaseCont env alts cont
| not (sm_case_case (getMode env)) = return (env, mkBoringStop (contHoleType cont), cont)
| not (many_alts alts) = return (env, cont, mkBoringStop (contResultType cont))
| otherwise = mkDupableCont env cont
where
many_alts :: [InAlt] -> Bool -- True iff strictly > 1 non-bottom alternative
many_alts [] = False -- See Note [Bottom alternatives]
many_alts [_] = False
many_alts (alt:alts)
| is_bot_alt alt = many_alts alts
| otherwise = not (all is_bot_alt alts)
is_bot_alt (_,_,rhs) = exprIsBottom rhs
{-
Note [Bottom alternatives]
~~~~~~~~~~~~~~~~~~~~~~~~~~
When we have
case (case x of { A -> error .. ; B -> e; C -> error ..)
of alts
then we can just duplicate those alts because the A and C cases
will disappear immediately. This is more direct than creating
join points and inlining them away; and in some cases we would
not even create the join points (see Note [Single-alternative case])
and we would keep the case-of-case which is silly. See Trac #4930.
-}
mkDupableCont :: SimplEnv -> SimplCont
-> SimplM (SimplEnv, SimplCont, SimplCont)
mkDupableCont env cont
| contIsDupable cont
= return (env, cont, mkBoringStop (contResultType cont))
mkDupableCont _ (Stop {}) = panic "mkDupableCont" -- Handled by previous eqn
mkDupableCont env (CastIt ty cont)
= do { (env', dup, nodup) <- mkDupableCont env cont
; return (env', CastIt ty dup, nodup) }
-- Duplicating ticks for now, not sure if this is good or not
mkDupableCont env cont@(TickIt{})
= return (env, mkBoringStop (contHoleType cont), cont)
mkDupableCont env cont@(StrictBind {})
= return (env, mkBoringStop (contHoleType cont), cont)
-- See Note [Duplicating StrictBind]
mkDupableCont env (StrictArg info cci cont)
-- See Note [Duplicating StrictArg]
= do { (env', dup, nodup) <- mkDupableCont env cont
; (env'', args') <- mapAccumLM makeTrivialArg env' (ai_args info)
; return (env'', StrictArg (info { ai_args = args' }) cci dup, nodup) }
mkDupableCont env cont@(ApplyToTy { sc_cont = tail })
= do { (env', dup_cont, nodup_cont) <- mkDupableCont env tail
; return (env', cont { sc_cont = dup_cont }, nodup_cont ) }
mkDupableCont env (ApplyToVal { sc_arg = arg, sc_env = se, sc_cont = cont })
= -- e.g. [...hole...] (...arg...)
-- ==>
-- let a = ...arg...
-- in [...hole...] a
do { (env', dup_cont, nodup_cont) <- mkDupableCont env cont
; arg' <- simplExpr (se `setInScope` env') arg
; (env'', arg'') <- makeTrivial NotTopLevel env' arg'
; let app_cont = ApplyToVal { sc_arg = arg'', sc_env = zapSubstEnv env''
, sc_dup = OkToDup, sc_cont = dup_cont }
; return (env'', app_cont, nodup_cont) }
mkDupableCont env cont@(Select _ case_bndr [(_, bs, _rhs)] _ _)
-- See Note [Single-alternative case]
-- | not (exprIsDupable rhs && contIsDupable case_cont)
-- | not (isDeadBinder case_bndr)
| all isDeadBinder bs -- InIds
&& not (isUnLiftedType (idType case_bndr))
-- Note [Single-alternative-unlifted]
= return (env, mkBoringStop (contHoleType cont), cont)
mkDupableCont env (Select _ case_bndr alts se cont)
= -- e.g. (case [...hole...] of { pi -> ei })
-- ===>
-- let ji = \xij -> ei
-- in case [...hole...] of { pi -> ji xij }
do { tick (CaseOfCase case_bndr)
; (env', dup_cont, nodup_cont) <- prepareCaseCont env alts cont
-- NB: We call prepareCaseCont here. If there is only one
-- alternative, then dup_cont may be big, but that's ok
-- because we push it into the single alternative, and then
-- use mkDupableAlt to turn that simplified alternative into
-- a join point if it's too big to duplicate.
-- And this is important: see Note [Fusing case continuations]
; let alt_env = se `setInScope` env'
; (alt_env', case_bndr') <- simplBinder alt_env case_bndr
; alts' <- mapM (simplAlt alt_env' Nothing [] case_bndr' dup_cont) alts
-- Safe to say that there are no handled-cons for the DEFAULT case
-- NB: simplBinder does not zap deadness occ-info, so
-- a dead case_bndr' will still advertise its deadness
-- This is really important because in
-- case e of b { (# p,q #) -> ... }
-- b is always dead, and indeed we are not allowed to bind b to (# p,q #),
-- which might happen if e was an explicit unboxed pair and b wasn't marked dead.
-- In the new alts we build, we have the new case binder, so it must retain
-- its deadness.
-- NB: we don't use alt_env further; it has the substEnv for
-- the alternatives, and we don't want that
; (env'', alts'') <- mkDupableAlts env' case_bndr' alts'
; return (env'', -- Note [Duplicated env]
Select OkToDup case_bndr' alts'' (zapSubstEnv env'')
(mkBoringStop (contHoleType nodup_cont)),
nodup_cont) }
mkDupableAlts :: SimplEnv -> OutId -> [InAlt]
-> SimplM (SimplEnv, [InAlt])
-- Absorbs the continuation into the new alternatives
mkDupableAlts env case_bndr' the_alts
= go env the_alts
where
go env0 [] = return (env0, [])
go env0 (alt:alts)
= do { (env1, alt') <- mkDupableAlt env0 case_bndr' alt
; (env2, alts') <- go env1 alts
; return (env2, alt' : alts' ) }
mkDupableAlt :: SimplEnv -> OutId -> (AltCon, [CoreBndr], CoreExpr)
-> SimplM (SimplEnv, (AltCon, [CoreBndr], CoreExpr))
mkDupableAlt env case_bndr (con, bndrs', rhs') = do
dflags <- getDynFlags
if exprIsDupable dflags rhs' -- Note [Small alternative rhs]
then return (env, (con, bndrs', rhs'))
else
do { let rhs_ty' = exprType rhs'
scrut_ty = idType case_bndr
case_bndr_w_unf
= case con of
DEFAULT -> case_bndr
DataAlt dc -> setIdUnfolding case_bndr unf
where
-- See Note [Case binders and join points]
unf = mkInlineUnfolding Nothing rhs
rhs = mkConApp2 dc (tyConAppArgs scrut_ty) bndrs'
LitAlt {} -> WARN( True, ptext (sLit "mkDupableAlt")
<+> ppr case_bndr <+> ppr con )
case_bndr
-- The case binder is alive but trivial, so why has
-- it not been substituted away?
used_bndrs' | isDeadBinder case_bndr = filter abstract_over bndrs'
| otherwise = bndrs' ++ [case_bndr_w_unf]
abstract_over bndr
| isTyVar bndr = True -- Abstract over all type variables just in case
| otherwise = not (isDeadBinder bndr)
-- The deadness info on the new Ids is preserved by simplBinders
; (final_bndrs', final_args) -- Note [Join point abstraction]
<- if (any isId used_bndrs')
then return (used_bndrs', varsToCoreExprs used_bndrs')
else do { rw_id <- newId (fsLit "w") voidPrimTy
; return ([setOneShotLambda rw_id], [Var voidPrimId]) }
; join_bndr <- newId (fsLit "$j") (mkPiTypes final_bndrs' rhs_ty')
-- Note [Funky mkPiTypes]
; let -- We make the lambdas into one-shot-lambdas. The
-- join point is sure to be applied at most once, and doing so
-- prevents the body of the join point being floated out by
-- the full laziness pass
really_final_bndrs = map one_shot final_bndrs'
one_shot v | isId v = setOneShotLambda v
| otherwise = v
join_rhs = mkLams really_final_bndrs rhs'
join_arity = exprArity join_rhs
join_call = mkApps (Var join_bndr) final_args
; env' <- addPolyBind NotTopLevel env (NonRec (join_bndr `setIdArity` join_arity) join_rhs)
; return (env', (con, bndrs', join_call)) }
-- See Note [Duplicated env]
{-
Note [Fusing case continuations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's important to fuse two successive case continuations when the
first has one alternative. That's why we call prepareCaseCont here.
Consider this, which arises from thunk splitting (see Note [Thunk
splitting] in WorkWrap):
let
x* = case (case v of {pn -> rn}) of
I# a -> I# a
in body
The simplifier will find
(Var v) with continuation
Select (pn -> rn) (
Select [I# a -> I# a] (
StrictBind body Stop
So we'll call mkDupableCont on
Select [I# a -> I# a] (StrictBind body Stop)
There is just one alternative in the first Select, so we want to
simplify the rhs (I# a) with continuation (StricgtBind body Stop)
Supposing that body is big, we end up with
let $j a = <let x = I# a in body>
in case v of { pn -> case rn of
I# a -> $j a }
This is just what we want because the rn produces a box that
the case rn cancels with.
See Trac #4957 a fuller example.
Note [Case binders and join points]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
case (case .. ) of c {
I# c# -> ....c....
If we make a join point with c but not c# we get
$j = \c -> ....c....
But if later inlining scrutines the c, thus
$j = \c -> ... case c of { I# y -> ... } ...
we won't see that 'c' has already been scrutinised. This actually
happens in the 'tabulate' function in wave4main, and makes a significant
difference to allocation.
An alternative plan is this:
$j = \c# -> let c = I# c# in ...c....
but that is bad if 'c' is *not* later scrutinised.
So instead we do both: we pass 'c' and 'c#' , and record in c's inlining
(a stable unfolding) that it's really I# c#, thus
$j = \c# -> \c[=I# c#] -> ...c....
Absence analysis may later discard 'c'.
NB: take great care when doing strictness analysis;
see Note [Lamba-bound unfoldings] in DmdAnal.
Also note that we can still end up passing stuff that isn't used. Before
strictness analysis we have
let $j x y c{=(x,y)} = (h c, ...)
in ...
After strictness analysis we see that h is strict, we end up with
let $j x y c{=(x,y)} = ($wh x y, ...)
and c is unused.
Note [Duplicated env]
~~~~~~~~~~~~~~~~~~~~~
Some of the alternatives are simplified, but have not been turned into a join point
So they *must* have an zapped subst-env. So we can't use completeNonRecX to
bind the join point, because it might to do PostInlineUnconditionally, and
we'd lose that when zapping the subst-env. We could have a per-alt subst-env,
but zapping it (as we do in mkDupableCont, the Select case) is safe, and
at worst delays the join-point inlining.
Note [Small alternative rhs]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is worth checking for a small RHS because otherwise we
get extra let bindings that may cause an extra iteration of the simplifier to
inline back in place. Quite often the rhs is just a variable or constructor.
The Ord instance of Maybe in PrelMaybe.hs, for example, took several extra
iterations because the version with the let bindings looked big, and so wasn't
inlined, but after the join points had been inlined it looked smaller, and so
was inlined.
NB: we have to check the size of rhs', not rhs.
Duplicating a small InAlt might invalidate occurrence information
However, if it *is* dupable, we return the *un* simplified alternative,
because otherwise we'd need to pair it up with an empty subst-env....
but we only have one env shared between all the alts.
(Remember we must zap the subst-env before re-simplifying something).
Rather than do this we simply agree to re-simplify the original (small) thing later.
Note [Funky mkPiTypes]
~~~~~~~~~~~~~~~~~~~~~~
Notice the funky mkPiTypes. If the contructor has existentials
it's possible that the join point will be abstracted over
type variables as well as term variables.
Example: Suppose we have
data T = forall t. C [t]
Then faced with
case (case e of ...) of
C t xs::[t] -> rhs
We get the join point
let j :: forall t. [t] -> ...
j = /\t \xs::[t] -> rhs
in
case (case e of ...) of
C t xs::[t] -> j t xs
Note [Join point abstraction]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Join points always have at least one value argument,
for several reasons
* If we try to lift a primitive-typed something out
for let-binding-purposes, we will *caseify* it (!),
with potentially-disastrous strictness results. So
instead we turn it into a function: \v -> e
where v::Void#. The value passed to this function is void,
which generates (almost) no code.
* CPR. We used to say "&& isUnLiftedType rhs_ty'" here, but now
we make the join point into a function whenever used_bndrs'
is empty. This makes the join-point more CPR friendly.
Consider: let j = if .. then I# 3 else I# 4
in case .. of { A -> j; B -> j; C -> ... }
Now CPR doesn't w/w j because it's a thunk, so
that means that the enclosing function can't w/w either,
which is a lose. Here's the example that happened in practice:
kgmod :: Int -> Int -> Int
kgmod x y = if x > 0 && y < 0 || x < 0 && y > 0
then 78
else 5
* Let-no-escape. We want a join point to turn into a let-no-escape
so that it is implemented as a jump, and one of the conditions
for LNE is that it's not updatable. In CoreToStg, see
Note [What is a non-escaping let]
* Floating. Since a join point will be entered once, no sharing is
gained by floating out, but something might be lost by doing
so because it might be allocated.
I have seen a case alternative like this:
True -> \v -> ...
It's a bit silly to add the realWorld dummy arg in this case, making
$j = \s v -> ...
True -> $j s
(the \v alone is enough to make CPR happy) but I think it's rare
There's a slight infelicity here: we pass the overall
case_bndr to all the join points if it's used in *any* RHS,
because we don't know its usage in each RHS separately
Note [Duplicating StrictArg]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The original plan had (where E is a big argument)
e.g. f E [..hole..]
==> let $j = \a -> f E a
in $j [..hole..]
But this is terrible! Here's an example:
&& E (case x of { T -> F; F -> T })
Now, && is strict so we end up simplifying the case with
an ArgOf continuation. If we let-bind it, we get
let $j = \v -> && E v
in simplExpr (case x of { T -> F; F -> T })
(ArgOf (\r -> $j r)
And after simplifying more we get
let $j = \v -> && E v
in case x of { T -> $j F; F -> $j T }
Which is a Very Bad Thing
What we do now is this
f E [..hole..]
==> let a = E
in f a [..hole..]
Now if the thing in the hole is a case expression (which is when
we'll call mkDupableCont), we'll push the function call into the
branches, which is what we want. Now RULES for f may fire, and
call-pattern specialisation. Here's an example from Trac #3116
go (n+1) (case l of
1 -> bs'
_ -> Chunk p fpc (o+1) (l-1) bs')
If we can push the call for 'go' inside the case, we get
call-pattern specialisation for 'go', which is *crucial* for
this program.
Here is the (&&) example:
&& E (case x of { T -> F; F -> T })
==> let a = E in
case x of { T -> && a F; F -> && a T }
Much better!
Notice that
* Arguments to f *after* the strict one are handled by
the ApplyToVal case of mkDupableCont. Eg
f [..hole..] E
* We can only do the let-binding of E because the function
part of a StrictArg continuation is an explicit syntax
tree. In earlier versions we represented it as a function
(CoreExpr -> CoreEpxr) which we couldn't take apart.
Do *not* duplicate StrictBind and StritArg continuations. We gain
nothing by propagating them into the expressions, and we do lose a
lot.
The desire not to duplicate is the entire reason that
mkDupableCont returns a pair of continuations.
Note [Duplicating StrictBind]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unlike StrictArg, there doesn't seem anything to gain from
duplicating a StrictBind continuation, so we don't.
Note [Single-alternative cases]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This case is just like the ArgOf case. Here's an example:
data T a = MkT !a
...(MkT (abs x))...
Then we get
case (case x of I# x' ->
case x' <# 0# of
True -> I# (negate# x')
False -> I# x') of y {
DEFAULT -> MkT y
Because the (case x) has only one alternative, we'll transform to
case x of I# x' ->
case (case x' <# 0# of
True -> I# (negate# x')
False -> I# x') of y {
DEFAULT -> MkT y
But now we do *NOT* want to make a join point etc, giving
case x of I# x' ->
let $j = \y -> MkT y
in case x' <# 0# of
True -> $j (I# (negate# x'))
False -> $j (I# x')
In this case the $j will inline again, but suppose there was a big
strict computation enclosing the orginal call to MkT. Then, it won't
"see" the MkT any more, because it's big and won't get duplicated.
And, what is worse, nothing was gained by the case-of-case transform.
So, in circumstances like these, we don't want to build join points
and push the outer case into the branches of the inner one. Instead,
don't duplicate the continuation.
When should we use this strategy? We should not use it on *every*
single-alternative case:
e.g. case (case ....) of (a,b) -> (# a,b #)
Here we must push the outer case into the inner one!
Other choices:
* Match [(DEFAULT,_,_)], but in the common case of Int,
the alternative-filling-in code turned the outer case into
case (...) of y { I# _ -> MkT y }
* Match on single alternative plus (not (isDeadBinder case_bndr))
Rationale: pushing the case inwards won't eliminate the construction.
But there's a risk of
case (...) of y { (a,b) -> let z=(a,b) in ... }
Now y looks dead, but it'll come alive again. Still, this
seems like the best option at the moment.
* Match on single alternative plus (all (isDeadBinder bndrs))
Rationale: this is essentially seq.
* Match when the rhs is *not* duplicable, and hence would lead to a
join point. This catches the disaster-case above. We can test
the *un-simplified* rhs, which is fine. It might get bigger or
smaller after simplification; if it gets smaller, this case might
fire next time round. NB also that we must test contIsDupable
case_cont *too, because case_cont might be big!
HOWEVER: I found that this version doesn't work well, because
we can get let x = case (...) of { small } in ...case x...
When x is inlined into its full context, we find that it was a bad
idea to have pushed the outer case inside the (...) case.
Note [Single-alternative-unlifted]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here's another single-alternative where we really want to do case-of-case:
data Mk1 = Mk1 Int# | Mk2 Int#
M1.f =
\r [x_s74 y_s6X]
case
case y_s6X of tpl_s7m {
M1.Mk1 ipv_s70 -> ipv_s70;
M1.Mk2 ipv_s72 -> ipv_s72;
}
of
wild_s7c
{ __DEFAULT ->
case
case x_s74 of tpl_s7n {
M1.Mk1 ipv_s77 -> ipv_s77;
M1.Mk2 ipv_s79 -> ipv_s79;
}
of
wild1_s7b
{ __DEFAULT -> ==# [wild1_s7b wild_s7c];
};
};
So the outer case is doing *nothing at all*, other than serving as a
join-point. In this case we really want to do case-of-case and decide
whether to use a real join point or just duplicate the continuation:
let $j s7c = case x of
Mk1 ipv77 -> (==) s7c ipv77
Mk1 ipv79 -> (==) s7c ipv79
in
case y of
Mk1 ipv70 -> $j ipv70
Mk2 ipv72 -> $j ipv72
Hence: check whether the case binder's type is unlifted, because then
the outer case is *not* a seq.
-}
|
gcampax/ghc
|
compiler/simplCore/Simplify.hs
|
Haskell
|
bsd-3-clause
| 118,609
|
#!/usr/bin/env runhaskell
import Distribution.PackageDescription
import Distribution.Simple
import Distribution.Simple.LocalBuildInfo
main :: IO ()
main = defaultMain
|
jeffwheeler/pointedlist
|
Setup.hs
|
Haskell
|
bsd-3-clause
| 168
|
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FunctionalDependencies #-}
module Data.StreamLike where
import Data.ListLike as LL
import Data.Word
import Data.Bits
import Data.Functor
import Data.ByteString as B
import Control.Comonad
-- | Stream type. s is a stream like list or ByteString; c is a type of
-- stream's element; i is stream size type.
class StreamLike s c where
head :: s -> c
tail :: s -> s
-- take :: Int -> s -> s
-- drop :: Int -> s -> s
-- splitAt :: Int -> s -> (s,s)
-- null :: s -> Bool
-- empty :: s
-- length :: s -> Int
-- span :: (c->Bool) -> s -> (s,s)
-- toList :: s -> [c]
-- conv :: (Integral a, Integral b) => a -> b
-- conv = fromInteger . toInteger
data Container s c a = Container (a -> c -> a) s a
-- instance (LL.ListLike s c) => Functor (Container a s c) where
-- fmap f (Container a b) = Container a (f b)
-- class Listable s c where
-- foobar :: s -> c
-- instance (LL.ListLike s c) => Listable (Container x s) c where
instance (LL.ListLike s c) => StreamLike (Container s c a) c where
head (Container f s x) = LL.head s
tail (Container f s x) = Container f (LL.tail s) (f x (LL.head s))
-- take (Container a b) = LL.take
-- drop (Container a b) = LL.drop
-- splitAt (Container a b) = LL.splitAt
-- null (Container a b) = LL.null
-- empty (Container a b) = LL.empty
-- length (Container a b) = LL.length
-- span (Container a b) = LL.span
-- toList (Container a b) = LL.toList
-- instance StreamLike ByteString Word8 Int where
-- head = B.head
-- tail = B.tail
-- take = B.take
-- drop = B.drop
-- splitAt n = B.splitAt n
-- null = B.null
-- empty = B.empty
-- length = B.length
-- span = B.span
-- toList = B.unpack
|
ierton/yteratee
|
src/Data/StreamLike.hs
|
Haskell
|
bsd-3-clause
| 1,841
|
{-# LANGUAGE MultiParamTypeClasses #-}
-- | The Scale module implements scales.
module Music.Diatonic.Scale (
Scale, Scl(..),
majorScale, minorScale, majorPentatonicScale, minorPentatonicScale, minorHarmonicScale, minorMelodicScale,
tetrachord
) where
import Music.Diatonic
data Scale = Diatonic Quality Note
| Pentatonic Quality Note
| Harmonic Note
| Melodic Note
deriving (Eq)
class Scl a where
scale :: a -> Scale
instance Nte Scale where
noteMap f (Diatonic Major n) = majorScale . f $ n
noteMap f (Diatonic Minor n) = minorScale . f $ n
noteMap f (Pentatonic Major n) = majorPentatonicScale . f $ n
noteMap f (Pentatonic Minor n) = minorPentatonicScale . f $ n
noteMap f (Harmonic n) = minorHarmonicScale . f $ n
noteMap f (Melodic n) = minorMelodicScale . f $ n
notePlus f s1 s2 = f (tonic s1) (tonic s2)
instance Nts Scale where
notes (Diatonic Major n) = init $ tc1 ++ tc2
where tc1 = tetrachord n
tc2 = tetrachord (Maj2nd `above` last tc1)
notes (Diatonic Minor n) = zipWith ($) [id, id, lower, id, id, lower, lower] (notes . majorScale $ n)
notes (Pentatonic Major n) = concat . zipWith ($) [return, return, return, const [], return, return, const []] $ (notes . majorScale $ n)
notes (Pentatonic Minor n) = concat . zipWith ($) [return, const [], return, return, return, const [], return] $ (notes . minorScale $ n)
notes (Harmonic n) = zipWith ($) [id, id, id, id, id, id, raise] $ (notes . minorScale $ n)
notes (Melodic n) = zipWith ($) [id, id, id, id, id, raise, raise] $ (notes . minorScale $ n)
instance Qual Scale where
quality (Diatonic q _) = q
quality (Pentatonic q _) = q
quality (Harmonic _) = Minor
quality (Melodic _) = Minor
instance Show Scale where
show s@(Harmonic n) = (show n) ++ "m (harmonic)"
show s@(Melodic n) = (show n) ++ "m (melodic)"
show s@(Pentatonic q n) = (show n) ++ (if quality s == Minor then "m" else "") ++ " (pentatonic)"
show s@(Diatonic q n) = (show n) ++ (if quality s == Minor then "m" else "")
instance Deg Scale Note where
first (Diatonic q t) = t
first (Pentatonic q t) = t
first (Harmonic t) = t
first (Melodic t) = t
degrees s = map (\n -> (notePlus degree (first s) n, n)) ns
where ns = notes s
instance Equiv Scale where
equiv s1 s2 = enote && etype
where enote = notePlus equiv s1 s2
etype = (toC $# s1) == (toC $# s2)
toC = const C
-- | Creates a 'Major' diatonic 'Scale' using the given 'Note' as the tonic.
majorScale :: Note -> Scale
majorScale = Diatonic Major
-- | Creates a 'Minor' diatonic 'Scale' using the given 'Note' as the tonic.
minorScale :: Note -> Scale
minorScale = Diatonic Minor
-- | Creates a 'Major' pentatonic 'Scale' using the given 'Note' as the tonic.
majorPentatonicScale :: Note -> Scale
majorPentatonicScale = Pentatonic Major
-- | Creates a 'Minor' pentatonic 'Scale' using the given 'Note' as the tonic.
minorPentatonicScale :: Note -> Scale
minorPentatonicScale = Pentatonic Minor
-- | Creates a 'Minor' harmonic 'Scale' using the given 'Note' as the tonic.
minorHarmonicScale :: Note -> Scale
minorHarmonicScale = Harmonic
-- | Creates a 'Minor' melodic 'Scale' using the given 'Note' as the tonic.
minorMelodicScale :: Note -> Scale
minorMelodicScale = Melodic
-- | Returns a tetrachord using the given 'Note' as the starting note.
--
-- > tetrachord G == [G,A,B,C]
tetrachord :: Note -> [Note]
tetrachord n = scanl (\n i -> i `above` n) n [Maj2nd, Maj2nd, Min2nd]
|
xpika/music-diatonic
|
Music/Diatonic/Scale.hs
|
Haskell
|
bsd-3-clause
| 3,559
|
-----------------------------------------------------------------------------
-- |
-- Module : Network.HTTP.HandleStream
-- Copyright : (c) 2008- Sigbjorn Finne
-- License : BSD
--
-- Maintainer : Sigbjorn Finne <sigbjorn.finne@gmail.com>
-- Stability : experimental
-- Portability : non-portable (not tested)
--
-- A 'HandleStream'-based version of "Network.HTTP" interface.
--
-- For more detailed information about what the individual exports do, please consult
-- the documentation for "Network.HTTP". /Notice/ however that the functions here do
-- not perform any kind of normalization prior to transmission (or receipt); you are
-- responsible for doing any such yourself, or, if you prefer, just switch to using
-- "Network.HTTP" function instead.
--
-----------------------------------------------------------------------------
module Network.HTTP.HandleStream
( simpleHTTP -- :: Request ty -> IO (Result (Response ty))
, simpleHTTP_ -- :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty))
, sendHTTP -- :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty))
, sendHTTP_notify -- :: HStream ty => HandleStream ty -> Request ty -> IO () -> IO (Result (Response ty))
, receiveHTTP -- :: HStream ty => HandleStream ty -> IO (Result (Request ty))
, respondHTTP -- :: HStream ty => HandleStream ty -> Response ty -> IO ()
, simpleHTTP_debug -- :: FilePath -> Request DebugString -> IO (Response DebugString)
) where
-----------------------------------------------------------------
------------------ Imports --------------------------------------
-----------------------------------------------------------------
import Network.BufferType
import Network.Stream ( fmapE, Result )
import Network.StreamDebugger ( debugByteStream )
import Network.TCP (HStream(..), HandleStream )
import Network.HTTP.Base
import Network.HTTP.Headers
import Network.HTTP.Utils ( trim, readsOne )
import Data.Char (toLower)
import Data.Maybe (fromMaybe)
import Control.Monad (when)
-----------------------------------------------------------------
------------------ Misc -----------------------------------------
-----------------------------------------------------------------
-- | @simpleHTTP@ transmits a resource across a non-persistent connection.
simpleHTTP :: HStream ty => Request ty -> IO (Result (Response ty))
simpleHTTP r = do
auth <- getAuth r
c <- openStream (host auth) (fromMaybe 80 (port auth))
simpleHTTP_ c r
-- | @simpleHTTP_debug debugFile req@ behaves like 'simpleHTTP', but logs
-- the HTTP operation via the debug file @debugFile@.
simpleHTTP_debug :: HStream ty => FilePath -> Request ty -> IO (Result (Response ty))
simpleHTTP_debug httpLogFile r = do
auth <- getAuth r
c0 <- openStream (host auth) (fromMaybe 80 (port auth))
c <- debugByteStream httpLogFile c0
simpleHTTP_ c r
-- | Like 'simpleHTTP', but acting on an already opened stream.
simpleHTTP_ :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty))
simpleHTTP_ s r = sendHTTP s r
-- | @sendHTTP hStream httpRequest@ transmits @httpRequest@ over
-- @hStream@, but does not alter the status of the connection, nor request it to be
-- closed upon receiving the response.
sendHTTP :: HStream ty => HandleStream ty -> Request ty -> IO (Result (Response ty))
sendHTTP conn rq = sendHTTP_notify conn rq (return ())
-- | @sendHTTP_notify hStream httpRequest action@ behaves like 'sendHTTP', but
-- lets you supply an IO @action@ to execute once the request has been successfully
-- transmitted over the connection. Useful when you want to set up tracing of
-- request transmission and its performance.
sendHTTP_notify :: HStream ty
=> HandleStream ty
-> Request ty
-> IO ()
-> IO (Result (Response ty))
sendHTTP_notify conn rq onSendComplete = do
when providedClose $ (closeOnEnd conn True)
catchIO (sendMain conn rq onSendComplete)
(\e -> do { close conn; ioError e })
where
providedClose = findConnClose (rqHeaders rq)
-- From RFC 2616, section 8.2.3:
-- 'Because of the presence of older implementations, the protocol allows
-- ambiguous situations in which a client may send "Expect: 100-
-- continue" without receiving either a 417 (Expectation Failed) status
-- or a 100 (Continue) status. Therefore, when a client sends this
-- header field to an origin server (possibly via a proxy) from which it
-- has never seen a 100 (Continue) status, the client SHOULD NOT wait
-- for an indefinite period before sending the request body.'
--
-- Since we would wait forever, I have disabled use of 100-continue for now.
sendMain :: HStream ty
=> HandleStream ty
-> Request ty
-> (IO ())
-> IO (Result (Response ty))
sendMain conn rqst onSendComplete = do
--let str = if null (rqBody rqst)
-- then show rqst
-- else show (insertHeader HdrExpect "100-continue" rqst)
writeBlock conn (buf_fromStr bufferOps $ show rqst)
-- write body immediately, don't wait for 100 CONTINUE
writeBlock conn (rqBody rqst)
onSendComplete
rsp <- getResponseHead conn
switchResponse conn True False rsp rqst
-- Hmmm, this could go bad if we keep getting "100 Continue"
-- responses... Except this should never happen according
-- to the RFC.
switchResponse :: HStream ty
=> HandleStream ty
-> Bool {- allow retry? -}
-> Bool {- is body sent? -}
-> Result ResponseData
-> Request ty
-> IO (Result (Response ty))
switchResponse _ _ _ (Left e) _ = return (Left e)
-- retry on connreset?
-- if we attempt to use the same socket then there is an excellent
-- chance that the socket is not in a completely closed state.
switchResponse conn allow_retry bdy_sent (Right (cd,rn,hdrs)) rqst =
case matchResponse (rqMethod rqst) cd of
Continue
| not bdy_sent -> do {- Time to send the body -}
writeBlock conn (rqBody rqst) >>= either (return . Left)
(\ _ -> do
rsp <- getResponseHead conn
switchResponse conn allow_retry True rsp rqst)
| otherwise -> do {- keep waiting -}
rsp <- getResponseHead conn
switchResponse conn allow_retry bdy_sent rsp rqst
Retry -> do {- Request with "Expect" header failed.
Trouble is the request contains Expects
other than "100-Continue" -}
writeBlock conn ((buf_append bufferOps)
(buf_fromStr bufferOps (show rqst))
(rqBody rqst))
rsp <- getResponseHead conn
switchResponse conn False bdy_sent rsp rqst
Done -> do
when (findConnClose hdrs)
(closeOnEnd conn True)
return (Right $ Response cd rn hdrs (buf_empty bufferOps))
DieHorribly str -> do
close conn
return (responseParseError "Invalid response:" str)
ExpectEntity -> do
r <- fmapE (\ (ftrs,bdy) -> Right (Response cd rn (hdrs++ftrs) bdy)) $
maybe (maybe (hopefulTransfer bo (readLine conn) [])
(\ x ->
readsOne (linearTransfer (readBlock conn))
(return$responseParseError "unrecognized content-length value" x)
x)
cl)
(ifChunked (chunkedTransfer bo (readLine conn) (readBlock conn))
(uglyDeathTransfer "sendHTTP"))
tc
case r of
Left{} -> do
close conn
return r
Right (Response _ _ hs _) -> do
when (findConnClose hs)
(closeOnEnd conn True)
return r
where
tc = lookupHeader HdrTransferEncoding hdrs
cl = lookupHeader HdrContentLength hdrs
bo = bufferOps
-- reads and parses headers
getResponseHead :: HStream ty => HandleStream ty -> IO (Result ResponseData)
getResponseHead conn =
fmapE (\es -> parseResponseHead (map (buf_toStr bufferOps) es))
(readTillEmpty1 bufferOps (readLine conn))
-- | @receiveHTTP hStream@ reads a 'Request' from the 'HandleStream' @hStream@
receiveHTTP :: HStream bufTy => HandleStream bufTy -> IO (Result (Request bufTy))
receiveHTTP conn = getRequestHead >>= either (return . Left) processRequest
where
-- reads and parses headers
getRequestHead :: IO (Result RequestData)
getRequestHead = do
fmapE (\es -> parseRequestHead (map (buf_toStr bufferOps) es))
(readTillEmpty1 bufferOps (readLine conn))
processRequest (rm,uri,hdrs) =
fmapE (\ (ftrs,bdy) -> Right (Request uri rm (hdrs++ftrs) bdy)) $
maybe
(maybe (return (Right ([], buf_empty bo))) -- hopefulTransfer ""
(\ x -> readsOne (linearTransfer (readBlock conn))
(return$responseParseError "unrecognized Content-Length value" x)
x)
cl)
(ifChunked (chunkedTransfer bo (readLine conn) (readBlock conn))
(uglyDeathTransfer "receiveHTTP"))
tc
where
-- FIXME : Also handle 100-continue.
tc = lookupHeader HdrTransferEncoding hdrs
cl = lookupHeader HdrContentLength hdrs
bo = bufferOps
-- | @respondHTTP hStream httpResponse@ transmits an HTTP 'Response' over
-- the 'HandleStream' @hStream@. It could be used to implement simple web
-- server interactions, performing the dual role to 'sendHTTP'.
respondHTTP :: HStream ty => HandleStream ty -> Response ty -> IO ()
respondHTTP conn rsp = do
writeBlock conn (buf_fromStr bufferOps $ show rsp)
-- write body immediately, don't wait for 100 CONTINUE
writeBlock conn (rspBody rsp)
return ()
------------------------------------------------------------------------------
headerName :: String -> String
headerName x = map toLower (trim x)
ifChunked :: a -> a -> String -> a
ifChunked a b s =
case headerName s of
"chunked" -> a
_ -> b
|
astro/HTTPbis
|
Network/HTTP/HandleStream.hs
|
Haskell
|
bsd-3-clause
| 10,063
|
module Sexy.Instances.Plus.Double where
import Sexy.Classes (Plus(..))
import Sexy.Data (Double)
import qualified Prelude as P
instance Plus Double where
(+) = (P.+)
|
DanBurton/sexy
|
src/Sexy/Instances/Plus/Double.hs
|
Haskell
|
bsd-3-clause
| 169
|
-----------------------------------------------------------------------------
--
-- Module : GameOfLife.Ui.Text
-- Copyright : 2016 Author name here
-- License : BSD3
--
-- Maintainer : bnazariy@gmail.com
-- Stability :
-- Portability :
--
-- |
--
-----------------------------------------------------------------------------
module GameOfLife.Ui.Text (
showGridEffective, runGameContiniously
) where
import GameOfLife
import Data.List(intercalate, elemIndex)
import Data.List.Split(splitOn)
import System.Console.ANSI
import Control.Monad(forM_, when)
import Data.Maybe(fromMaybe)
import Control.Concurrent(threadDelay)
-- Redraws only lines with cells .
showGridEffective :: RenderFunc
showGridEffective _ [] = return ()
showGridEffective True g = return ()
showGridEffective False g = do
-- All indexes of lines with True ( cell ).
let lns = filter (\x -> x /= -1) $
map (\(i,_) -> fromMaybe (-1) i ) $
-- All lines with true.
filter (\(_, x) -> id `any` x) $
-- List elements with indexes.
map (\x -> (elemIndex x g, x)) g
forM_ lns (
\x -> do
setCursorPosition x 0
clearLine
forM_ (render x) (\ch -> do
when (ch == '@') $
setSGR [SetColor Foreground Vivid Red]
putChar ch
setSGR [Reset]
)
return ())
where
render x = [if x' then '@' else ' ' | x' <- g !! x]
-- Run game generation after generation.
-- Takes GameOptions delay and render function.
-- Render function takes Grid and bool argument
-- If argument is true than function should clear screen.
-- If false it should draw new generation.
runGameContiniously :: GameFunc
runGameContiniously opts delay renderF = do
let gen = nextGeneration $ grid opts
renderF True gen
renderF False gen
threadDelay ((100 * 60) * delay)
runGameContiniously (createOpts gen 10) delay renderF
return ()
runGame :: GameOptions -> IO ()
runGame opts = putStr $ intercalate "\n" $ map showGrid (take (runs opts) $ iterate nextGeneration (grid opts))
showGrid :: Grid -> String
showGrid [] = ""
showGrid g =
let
w = length (head g)
h = length g
in
intercalate "\n" [
[
if (g !! y) !! x then '@' else '-' | x <- [0 .. w - 1]
] | y <- [0 .. h - 1]
] ++ "\n"
|
AM636E/HaskellGameOfLife
|
src/GameOfLife/Ui/Text.hs
|
Haskell
|
bsd-3-clause
| 2,502
|
import NLP.DictParser
main :: IO ()
main = getContents >>= \c -> print (parseString c)
|
mwotton/dictparser
|
src/main.hs
|
Haskell
|
bsd-3-clause
| 99
|
module Main where
import Data.Graph.Inductive.Graph
import Data.Graph.Inductive.Tree (Gr)
import System.Exit
import Flow
source = 1
sink = 2
node = 3
graph :: Gr String Int
graph = insEdge (node, sink, 1) . insEdge (source, node, 3) . insEdge (source, sink, 10) $
insNodes [(source, "source"), (sink, "sink"), (node, "a")] empty
noSolution :: (Show a, Show b, DynGraph g) => g a b -> IO ()
noSolution graph = do
putStrLn "There is no solution for:"
prettyPrint graph
exitFailure
printProblem :: DynGraph g => AFlow g -> IO ()
printProblem (Flow cap flow) = do
putStrLn "-- Problem"
prettyPrint cap
putStrLn "\n"
putStrLn "-- Solution"
prettyPrint flow
main :: IO ()
main = do
let prob = initialFlow graph source
putStrLn "\n#\n# Initial Flow\n#\n"
printProblem prob
putStrLn "\n#\n# Solved Problem\n#\n"
maybe (noSolution graph) printProblem $ maximalFlow graph source sink
|
thsutton/mf
|
src/Main.hs
|
Haskell
|
bsd-3-clause
| 911
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE Strict #-}
module Layers.Pool where
import Network
import Util
import Static
import Data.Singletons.TypeLits
import Data.Array.Repa
import Data.Serialize
data Pool = Pool
instance Serialize Pool where
put _ = return ()
get = return Pool
instance Creatable Pool where
seeded _ = Pool
instance Updatable Pool where
type Gradient Pool = ()
instance ( KnownNat h, KnownNat (Halve h), KnownNat w, KnownNat (Halve w), KnownNat bat, KnownNat d
) => Layer (ZZ ::. bat ::. d ::. h ::. w) Pool where
type LOutput (ZZ ::. bat ::. d ::. h ::. w) Pool =
(ZZ ::. bat ::. d ::. Halve h ::. Halve w)
runForward _ x = sComputeP$ sTraverse x f
where f lx (b:.y:.x) = maximum [ lx$ b :. 2*y :. 2*x
, lx$ b :. 2*y+1 :. 2*x
, lx$ b :. 2*y :. 2*x+1
, lx$ b :. 2*y+1 :. 2*x+1 ]
runBackwards _ (SArray x) (SArray y) (SArray dy)
= do dx <- sComputeP$ sFromFunction f
return ((), dx)
where
halve (b:.y:.x) = b:. y `div` 2 :. x `div` 2
f pos
| x ! pos == y ! halve pos = dy ! halve pos
| otherwise = 0
|
jonascarpay/convoluted
|
src/Layers/Pool.hs
|
Haskell
|
bsd-3-clause
| 1,383
|
-- | Quick, hacky sendmail wrapper
module Sihemo.Sendmail
( sendmail
) where
import System.Process (readProcess)
sendmail :: String -- ^ Recipient
-> String -- ^ Subject
-> [String] -- ^ Content (lines)
-> IO () -- ^ Blocks until mail is sent
sendmail recipient subject body = do
_ <- readProcess "/usr/sbin/sendmail" ["-t"] $ unlines $
[ "To: " ++ recipient
, "Subject: " ++ subject
, ""
] ++ body
return ()
|
jaspervdj/sihemo
|
src/Sihemo/Sendmail.hs
|
Haskell
|
bsd-3-clause
| 497
|
module Derivative where
import qualified Data.Map as M
import Control.Monad (sequence)
data Expr = Con String
| Num Double
| Fun String Int
| Add [Expr]
| Mul [Expr]
| Div Expr Expr
| Neg Expr
| Cos Expr
| Sin Expr
| Ln Expr
| Exp Expr
| Pow Expr Double deriving (Eq, Ord)
evaluate :: Expr -> M.Map Expr Double -> Maybe Double
evaluate (Num a) _ = Just a
evaluate (Con a) t = t M.!? Con a
evaluate (Fun a b) t = t M.!? Fun a b
evaluate (Add xs) t = fmap sum (traverse (`evaluate` t) xs)
evaluate (Mul xs) t = fmap product (traverse (`evaluate` t) xs)
evaluate (Div a b) t = (/) <$> evaluate a t <*> evaluate b t
evaluate (Neg a) t = negate <$> evaluate a t
evaluate (Cos a) t = cos <$> evaluate a t
evaluate (Sin a) t = sin <$> evaluate a t
evaluate (Ln a) t = (/(log $ exp 1)) <$> (log <$> evaluate a t)
evaluate (Exp a) t = exp <$> evaluate a t
evaluate (Pow a b) t = (**b) <$> evaluate a t
derivative :: Expr -> Expr
derivative (Num _) = Num 0
derivative (Con _) = Num 0
derivative (Fun f o) = Fun f (o+1)
derivative (Add es) = Add (fmap derivative es)
derivative (Mul []) = Num 0
derivative (Mul (e:es)) = Add [Mul ((derivative e):es), Mul [derivative (Mul es),e]]
derivative (Div e1 e2) = Add [Mul [(derivative e1), e2], (Neg (Mul [e1, (derivative e2)]))]
derivative (Neg e) = Neg (derivative e)
derivative (Cos e) = Neg (Mul [(derivative e), (Sin e)])
derivative (Sin e) = Mul [(derivative e), (Cos e)]
derivative (Exp e) = Mul [(derivative e), (Exp e)]
derivative (Ln e) = Div (derivative e) e
derivative (Pow _ 0) = Num 0
derivative (Pow e n) = Mul [(Num n), (derivative e), (Pow e (n-1))]
partialDerivative :: Expr -> Expr -> Expr
partialDerivative (Num _) _ = Num 0
partialDerivative (Con _) _ = Num 0
partialDerivative (Fun f o) (Fun f2 o2) = if f == f2 && o ==o2 then Num 1 else Num 0
partialDerivative (Add es) f = Add (fmap ((flip partialDerivative) f) es)
partialDerivative (Mul []) _ = Num 0
partialDerivative (Mul (e:es)) f = Add [Mul ((partialDerivative e f):es), Mul [partialDerivative (Mul es) f,e]]
partialDerivative (Div e1 e2) f = Add [Mul [(partialDerivative e1 f), e2], (Neg (Mul [e1, (partialDerivative e2 f)]))]
partialDerivative (Neg e) f = Neg (partialDerivative e f)
partialDerivative (Cos e) f = Neg (Mul [(partialDerivative e f), (Sin e)])
partialDerivative (Sin e) f = Mul [(partialDerivative e f), (Cos e)]
partialDerivative (Exp e) f = Mul [(partialDerivative e f), (Exp e)]
partialDerivative (Ln e) f = Div (partialDerivative e f) e
partialDerivative (Pow _ 0) _ = Num 0
partialDerivative (Pow e n) f = Mul [(Num n), (partialDerivative e f), (Pow e (n-1))]
simplify :: Expr -> Expr
simplify (Mul []) = Num 1
simplify (Mul es) = if elem (Num 0) es then Num 0 else Mul (fmap simplify es)
simplify (Add []) = Num 0
simplify (Add es) = Add $ fmap simplify (filter (/= (Num 0)) es)
simplify (Div (Num 0) _) = Num 0
simplify (Div e1 e2) = Div (simplify e1) (simplify e2)
simplify (Exp (Num 0)) = Num 1
simplify (Exp e) = Exp (simplify e)
simplify (Neg e) = Neg (simplify e)
simplify (Fun s o) = Fun s o
simplify (Con s) = Con s
simplify o = o
instance Show Expr where
show (Con s) = s
show (Num f) = show f
show (Fun s o) = s ++ (replicate o '\'')
show (Add [] ) = show ""
show (Add (e:[]) ) = show e
show (Add (e:es) ) = (show e) ++ " + " ++ (show (Add es))
show (Mul [] ) = show ""
show (Mul (e:[])) = show e
show (Mul (e:es)) = (show e) ++ "." ++ (show (Mul es))
show (Div e1 e2) = "(" ++ show e1 ++ " / " ++ show e2 ++ ")"
show (Neg e) = "-" ++ show e
show (Cos e) = "cos(" ++ show e ++ ")"
show (Sin e) = "sin(" ++ show e ++ ")"
show (Ln e) = "ln" ++ show e
show (Exp e) = "e^("++show e++")"
show (Pow e f) = show e ++ "^(" ++ show f++")"
|
GintMist/double-pendulum
|
src/derivative.hs
|
Haskell
|
bsd-3-clause
| 3,964
|
module Main where
import System.Environment
import Data.Tree
import Data.Char
data Op = Plus | Minus | Times | Div deriving Show
data Elem = Op Op | Int Int deriving Show
type Expr = Tree Elem
lexer :: String -> [ Elem ]
lexer "" = [ ]
lexer ( ' ' : cs ) = lexer cs
lexer ( '+' : cs ) = Op Plus : lexer cs
lexer ( '-' : cs ) = Op Minus : lexer cs
lexer ( '*' : cs ) = Op Times : lexer cs
lexer ( '/' : cs ) = Op Div : lexer cs
lexer ca@( c : _ )
| isDigit c = let ( ret, rest ) = span isDigit ca in
Int ( read ret ) : lexer rest
lexer _ = error "lex error"
parser :: [ Elem ] -> Expr
parser [ e@( Int _ ) ] = Node e [ ]
parser ( e1@( Int _ ) : e2@( Op _ ) : rest ) = Node e2 [ Node e1 [ ], parser rest ]
eval :: Expr -> Int
eval ( Node ( Int i ) [ ] ) = i
eval ( Node ( Op Plus ) [ e1, e2 ] ) = eval e1 + eval e2
eval ( Node ( Op Minus ) [ e1, e2 ] ) = eval e1 - eval e2
eval ( Node ( Op Times ) [ e1, e2 ] ) = eval e1 * eval e2
eval ( Node ( Op Div ) [ e1, e2 ] ) = eval e1 `div` eval e2
main :: IO ()
main = do
[ expr ] <- getArgs
print $ eval $ parser $ lexer expr
|
YoshikuniJujo/toyhaskell_haskell
|
tests/testOp.hs
|
Haskell
|
bsd-3-clause
| 1,089
|
-- |
-- Module: WildBind.X11.KeySym
-- Description: Re-export KeySyms
-- Maintainer: Toshio Ito <debug.ito@gmail.com>
--
-- This module re-exports X11 'KeySym's.
--
-- @since 0.2.0.0
module WildBind.X11.KeySym
( -- * The type
KeySym,
-- * Alphabet
xK_a,
xK_b,
xK_c,
xK_d,
xK_e,
xK_f,
xK_g,
xK_h,
xK_i,
xK_j,
xK_k,
xK_l,
xK_m,
xK_n,
xK_o,
xK_p,
xK_q,
xK_r,
xK_s,
xK_t,
xK_u,
xK_v,
xK_w,
xK_x,
xK_y,
xK_z,
xK_A,
xK_B,
xK_C,
xK_D,
xK_E,
xK_F,
xK_G,
xK_H,
xK_I,
xK_J,
xK_K,
xK_L,
xK_M,
xK_N,
xK_O,
xK_P,
xK_Q,
xK_R,
xK_S,
xK_T,
xK_U,
xK_V,
xK_W,
xK_X,
xK_Y,
xK_Z,
-- * Numbers
xK_0,
xK_1,
xK_2,
xK_3,
xK_4,
xK_5,
xK_6,
xK_7,
xK_8,
xK_9,
-- * ASCII symbols
xK_space,
xK_exclam,
xK_quotedbl,
xK_numbersign,
xK_dollar,
xK_percent,
xK_ampersand,
xK_apostrophe,
xK_quoteright,
xK_parenleft,
xK_parenright,
xK_asterisk,
xK_plus,
xK_comma,
xK_minus,
xK_period,
xK_slash,
xK_colon,
xK_semicolon,
xK_less,
xK_equal,
xK_greater,
xK_question,
xK_at,
xK_bracketleft,
xK_backslash,
xK_bracketright,
xK_asciicircum,
xK_underscore,
xK_grave,
xK_quoteleft,
xK_braceleft,
xK_bar,
xK_braceright,
xK_asciitilde,
-- * Control keys
xK_BackSpace,
xK_Tab,
xK_Linefeed,
xK_Clear,
xK_Return,
xK_Pause,
xK_Scroll_Lock,
xK_Sys_Req,
xK_Escape,
xK_Delete,
xK_Multi_key,
xK_Codeinput,
xK_SingleCandidate,
xK_MultipleCandidate,
xK_PreviousCandidate,
xK_Home,
xK_Left,
xK_Up,
xK_Right,
xK_Down,
xK_Prior,
xK_Page_Up,
xK_Next,
xK_Page_Down,
xK_End,
xK_Begin,
xK_Select,
xK_Print,
xK_Execute,
xK_Insert,
xK_Undo,
xK_Redo,
xK_Menu,
xK_Find,
xK_Cancel,
xK_Help,
xK_Break,
xK_Mode_switch,
xK_script_switch,
xK_Num_Lock,
-- * Number pad keys
xK_KP_Space,
xK_KP_Tab,
xK_KP_Enter,
xK_KP_F1,
xK_KP_F2,
xK_KP_F3,
xK_KP_F4,
xK_KP_Home,
xK_KP_Left,
xK_KP_Up,
xK_KP_Right,
xK_KP_Down,
xK_KP_Prior,
xK_KP_Page_Up,
xK_KP_Next,
xK_KP_Page_Down,
xK_KP_End,
xK_KP_Begin,
xK_KP_Insert,
xK_KP_Delete,
xK_KP_Equal,
xK_KP_Multiply,
xK_KP_Add,
xK_KP_Separator,
xK_KP_Subtract,
xK_KP_Decimal,
xK_KP_Divide,
xK_KP_0,
xK_KP_1,
xK_KP_2,
xK_KP_3,
xK_KP_4,
xK_KP_5,
xK_KP_6,
xK_KP_7,
xK_KP_8,
xK_KP_9,
-- * Function keys
xK_F1,
xK_F2,
xK_F3,
xK_F4,
xK_F5,
xK_F6,
xK_F7,
xK_F8,
xK_F9,
xK_F10,
xK_F11,
xK_L1,
xK_F12,
xK_L2,
xK_F13,
xK_L3,
xK_F14,
xK_L4,
xK_F15,
xK_L5,
xK_F16,
xK_L6,
xK_F17,
xK_L7,
xK_F18,
xK_L8,
xK_F19,
xK_L9,
xK_F20,
xK_L10,
xK_F21,
xK_R1,
xK_F22,
xK_R2,
xK_F23,
xK_R3,
xK_F24,
xK_R4,
xK_F25,
xK_R5,
xK_F26,
xK_R6,
xK_F27,
xK_R7,
xK_F28,
xK_R8,
xK_F29,
xK_R9,
xK_F30,
xK_R10,
xK_F31,
xK_R11,
xK_F32,
xK_R12,
xK_F33,
xK_R13,
xK_F34,
xK_R14,
xK_F35,
xK_R15,
-- * Modifier keys
xK_Shift_L,
xK_Shift_R,
xK_Control_L,
xK_Control_R,
xK_Caps_Lock,
xK_Shift_Lock,
xK_Meta_L,
xK_Meta_R,
xK_Alt_L,
xK_Alt_R,
xK_Super_L,
xK_Super_R,
xK_Hyper_L,
xK_Hyper_R,
-- * Alphabet with accent and ligatures
xK_Agrave,
xK_Aacute,
xK_Acircumflex,
xK_Atilde,
xK_Adiaeresis,
xK_Aring,
xK_AE,
xK_Ccedilla,
xK_Egrave,
xK_Eacute,
xK_Ecircumflex,
xK_Ediaeresis,
xK_Igrave,
xK_Iacute,
xK_Icircumflex,
xK_Idiaeresis,
xK_ETH,
xK_Eth,
xK_Ntilde,
xK_Ograve,
xK_Oacute,
xK_Ocircumflex,
xK_Otilde,
xK_Odiaeresis,
xK_multiply,
xK_Ooblique,
xK_Ugrave,
xK_Uacute,
xK_Ucircumflex,
xK_Udiaeresis,
xK_Yacute,
xK_THORN,
xK_Thorn,
xK_ssharp,
xK_agrave,
xK_aacute,
xK_acircumflex,
xK_atilde,
xK_adiaeresis,
xK_aring,
xK_ae,
xK_ccedilla,
xK_egrave,
xK_eacute,
xK_ecircumflex,
xK_ediaeresis,
xK_igrave,
xK_iacute,
xK_icircumflex,
xK_idiaeresis,
xK_eth,
xK_ntilde,
xK_ograve,
xK_oacute,
xK_ocircumflex,
xK_otilde,
xK_odiaeresis,
xK_division,
xK_oslash,
xK_ugrave,
xK_uacute,
xK_ucircumflex,
xK_udiaeresis,
xK_yacute,
xK_thorn,
xK_ydiaeresis,
-- * Other symbols
xK_nobreakspace,
xK_exclamdown,
xK_cent,
xK_sterling,
xK_currency,
xK_yen,
xK_brokenbar,
xK_section,
xK_diaeresis,
xK_copyright,
xK_ordfeminine,
xK_guillemotleft,
xK_notsign,
xK_hyphen,
xK_registered,
xK_macron,
xK_degree,
xK_plusminus,
xK_twosuperior,
xK_threesuperior,
xK_acute,
xK_mu,
xK_paragraph,
xK_periodcentered,
xK_cedilla,
xK_onesuperior,
xK_masculine,
xK_guillemotright,
xK_onequarter,
xK_onehalf,
xK_threequarters,
xK_questiondown,
-- * special keysym
xK_VoidSymbol,
) where
import Graphics.X11.Xlib
|
debug-ito/wild-bind
|
wild-bind-x11/src/WildBind/X11/KeySym.hs
|
Haskell
|
bsd-3-clause
| 7,315
|
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeOperators #-}
-- |
-- Module : Data.Array.Accelerate.Math.DFT
-- Copyright : [2012] Manuel M T Chakravarty, Gabriele Keller, Trevor L. McDonell
-- License : BSD3
--
-- Maintainer : Manuel M T Chakravarty <chak@cse.unsw.edu.au>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Compute the Discrete Fourier Transform (DFT) along the lower order dimension
-- of an array.
--
-- This uses a naïve algorithm which takes O(n^2) time. However, you can
-- transform an array with an arbitrary extent, unlike with FFT which requires
-- each dimension to be a power of two.
--
-- The `dft` and `idft` functions compute the roots of unity as needed. If you
-- need to transform several arrays with the same extent than it is faster to
-- compute the roots once using `rootsOfUnity` or `inverseRootsOfUnity`
-- respectively, then call `dftG` directly.
--
-- You can also compute single values of the transform using `dftGS`
--
module Data.Array.Accelerate.Math.DFT (
dft, idft, dftG, dftGS,
) where
import Prelude as P hiding ((!!))
import Data.Array.Accelerate as A
import Data.Array.Accelerate.Math.DFT.Roots
import Data.Array.Accelerate.Data.Complex
-- | Compute the DFT along the low order dimension of an array
--
dft :: (Shape sh, Slice sh, Elt e, IsFloating e)
=> Acc (Array (sh:.Int) (Complex e))
-> Acc (Array (sh:.Int) (Complex e))
dft v = dftG (rootsOfUnity (shape v)) v
-- | Compute the inverse DFT along the low order dimension of an array
--
idft :: (Shape sh, Slice sh, Elt e, IsFloating e)
=> Acc (Array (sh:.Int) (Complex e))
-> Acc (Array (sh:.Int) (Complex e))
idft v
= let sh = shape v
n = indexHead sh
roots = inverseRootsOfUnity sh
scale = lift (A.fromIntegral n :+ constant 0)
in
A.map (/scale) $ dftG roots v
-- | Generic function for computation of forward and inverse DFT. This function
-- is also useful if you transform many arrays of the same extent, and don't
-- want to recompute the roots for each one.
--
-- The extent of the input and roots must match.
--
dftG :: forall sh e. (Shape sh, Slice sh, Elt e, IsFloating e)
=> Acc (Array (sh:.Int) (Complex e)) -- ^ roots of unity
-> Acc (Array (sh:.Int) (Complex e)) -- ^ input array
-> Acc (Array (sh:.Int) (Complex e))
dftG roots arr
= A.fold (+) (constant (0 :+ 0))
$ A.zipWith (*) arr' roots'
where
base = shape arr
l = indexHead base
extend = lift (base :. shapeSize base)
-- Extend the entirety of the input arrays into a higher dimension, reading
-- roots from the appropriate places and then reduce along this axis.
--
-- In the calculation for 'roots'', 'i' is the index into the extended
-- dimension, with corresponding base index 'ix' which we are attempting to
-- calculate the single DFT value of. The rest proceeds as per 'dftGS'.
--
arr' = A.generate extend (\ix' -> let i = indexHead ix' in arr !! i)
roots' = A.generate extend (\ix' -> let ix :. i = unlift ix'
sh :. n = unlift (fromIndex base i) :: Exp sh :. Exp Int
k = indexHead ix
in
roots ! lift (sh :. (k*n) `mod` l))
-- | Compute a single value of the DFT.
--
dftGS :: forall sh e. (Shape sh, Slice sh, Elt e, IsFloating e)
=> Exp (sh :. Int) -- ^ index of the value we want
-> Acc (Array (sh:.Int) (Complex e)) -- ^ roots of unity
-> Acc (Array (sh:.Int) (Complex e)) -- ^ input array
-> Acc (Scalar (Complex e))
dftGS ix roots arr
= let k = indexHead ix
l = indexHead (shape arr)
-- all the roots we need to multiply with
roots' = A.generate (shape arr)
(\ix' -> let sh :. n = unlift ix' :: Exp sh :. Exp Int
in roots ! lift (sh :. (k*n) `mod` l))
in
A.foldAll (+) (constant (0 :+ 0)) $ A.zipWith (*) arr roots'
|
thielema/accelerate-fft
|
Data/Array/Accelerate/Math/DFT.hs
|
Haskell
|
bsd-3-clause
| 4,284
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE ForeignFunctionInterface #-}
{-# LANGUAGE JavaScriptFFI #-}
{-# OPTIONS_HADDOCK hide #-}
module JavaScript.Blob (
Blob
, readBlob
, isBlob
) where
import Control.Exception (mask_)
import Data.ByteString (ByteString)
#ifdef ghcjs_HOST_OS
import GHCJS.Foreign (bufferByteString)
import GHCJS.Types (JSRef)
#else
import JavaScript.NoGHCJS
#endif
data Blob_
type Blob = JSRef Blob_
#ifdef ghcjs_HOST_OS
foreign import javascript interruptible "var reader = new FileReader();\
reader.addEventListener('loadend', function() {\
$c(reader.result);\
});\
reader.readAsArrayBuffer($1);"
ffi_readBlob :: Blob -> IO (JSRef a)
foreign import javascript unsafe "$1 instanceof Blob"
ffi_blobCheck :: JSRef a -> IO Bool
#else
ffi_readBlob :: Blob -> IO (JSRef a)
ffi_blobCheck :: JSRef a -> IO Bool
ffi_readBlob = error "ffi_readBlob: only available in JavaScript"
ffi_blobCheck = error "ffi_blobCheck: only available in JavaScript"
#endif
readBlob :: Blob -> IO ByteString
readBlob b = bufferByteString 0 0 =<< mask_ (ffi_readBlob b)
isBlob :: JSRef a -> IO Bool
isBlob = ffi_blobCheck
|
mstksg/ghcjs-websockets
|
src/JavaScript/Blob.hs
|
Haskell
|
mit
| 1,311
|
{-# LANGUAGE ScopedTypeVariables #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- Various orphan instances and functions that we don't want to appear in client
module Unison.ABT.Extra where
import Control.Applicative
import Data.Bytes.Serial (Serial(..), Serial1(..))
import Data.Bytes.VarInt (VarInt(..))
import Data.List hiding (cycle)
import Data.Ord
import Data.Vector ((!))
import Prelude hiding (abs,cycle)
import Unison.ABT
import Unison.Var (Var)
import qualified Data.Bytes.Get as Get
import qualified Data.Bytes.Put as Put
import qualified Data.Set as Set
import qualified Data.Foldable as Foldable
import qualified Data.Map as Map
import qualified Data.Vector as Vector
import qualified Unison.Digest as Digest
import qualified Unison.Var as Var
-- | We ignore annotations in the `Term`, as these should never affect the
-- meaning of the term.
hash :: forall f v a . (Foldable f, Digest.Digestable1 f, Var v) => Term f v a -> Digest.Hash
hash t = hash' [] t where
hash' :: [Either [v] v] -> Term f v a -> Digest.Hash
hash' env (Term _ _ t) = case t of
Var v -> maybe die hashInt ind
where lookup (Left cycle) = elem v cycle
lookup (Right v') = v == v'
ind = findIndex lookup env
-- env not likely to be very big, prefer to encode in one byte if possible
hashInt :: Int -> Digest.Hash
hashInt i = Digest.run (serialize (VarInt i))
die = error $ "unknown var in environment: " ++ show (Var.name v)
Cycle (AbsN' vs t) -> hash' (Left vs : env) t
Cycle t -> hash' env t
Abs v t -> hash' (Right v : env) t
Tm t -> Digest.digest1 (hashCycle env) (hash' env) $ t
hashCycle :: [Either [v] v] -> [Term f v a] -> Digest.DigestM (Term f v a -> Digest.Hash)
hashCycle env@(Left cycle : envTl) ts | length cycle == length ts =
let
permute p xs = case Vector.fromList xs of xs -> map (xs !) p
hashed = map (\(i,t) -> ((i,t), hash' env t)) (zip [0..] ts)
pt = map fst (sortBy (comparing snd) hashed)
(p,ts') = unzip pt
in case map Right (permute p cycle) ++ envTl of
env -> Foldable.traverse_ (serialize . hash' env) ts'
*> pure (hash' env)
hashCycle env ts = Foldable.traverse_ (serialize . hash' env) ts *> pure (hash' env)
-- | Use the `hash` function to efficiently remove duplicates from the list, preserving order.
distinct :: (Foldable f, Digest.Digestable1 f, Var v) => [Term f v a] -> [Term f v a]
distinct ts = map fst (sortBy (comparing snd) m)
where m = Map.elems (Map.fromList (map hash ts `zip` (ts `zip` [0 :: Int .. 1])))
-- | Use the `hash` function to remove elements from `t1s` that exist in `t2s`, preserving order.
subtract :: (Foldable f, Digest.Digestable1 f, Var v) => [Term f v a] -> [Term f v a] -> [Term f v a]
subtract t1s t2s =
let skips = Set.fromList (map hash t2s)
in filter (\t -> Set.notMember (hash t) skips) t1s
instance (Foldable f, Serial a, Serial v, Ord v, Serial1 f) => Serial (Term f v a) where
serialize (Term _ a e) = serialize a *> case e of
Var v -> Put.putWord8 0 *> serialize v
Cycle body -> Put.putWord8 1 *> serialize body
Abs v body -> Put.putWord8 2 *> serialize v *> serialize body
Tm v -> Put.putWord8 3 *> serializeWith serialize v
deserialize = do
ann <- deserialize
b <- Get.getWord8
case b of
0 -> annotatedVar ann <$> deserialize
1 -> cycle' ann <$> deserialize
2 -> abs' ann <$> deserialize <*> deserialize
3 -> tm' ann <$> deserializeWith deserialize
_ -> fail ("unknown byte tag, expected one of {0,1,2}, got: " ++ show b)
|
CGenie/platform
|
node/src/Unison/ABT/Extra.hs
|
Haskell
|
mit
| 3,610
|
{-# Language TemplateHaskell #-}
{-# Language OverloadedStrings #-}
module BitcoinCore.Keys
( PublicKeyRep(..)
, Address(..)
, WIFPrivateKey(..)
, genKeys
, getAddress
, getWIFPrivateKey
, getPrivateKeyFromWIF
, getPubKey
, btcCurve
, serializePrivateKey
, deserializePrivateKey
, serializePublicKeyRep
, deserializePublicKeyRep
, PubKeyFormat(..)
, PubKeyHash(..)
, addressToPubKeyHash
, hashPubKeyRep
, addrTxt
) where
import General.Util
import General.Types (Network(..))
import General.Hash
( Hash(..)
, hashObject
, ripemdSha256
)
import Prelude hiding (take, concat)
import Data.ByteString (ByteString)
import Crypto.PubKey.ECC.Types
( Curve
, getCurveByName
, Point(..)
, CurveName(SEC_p256k1)
)
import Crypto.PubKey.ECC.Generate (generate, generateQ)
import Crypto.PubKey.ECC.ECDSA ( PublicKey(..)
, PrivateKey(..))
import Crypto.OpenSSL.ECC
( ecGroupFromCurveOID
, EcGroup
, ecPointFromOct
, ecPointToAffineGFp
)
import qualified Data.Text as T
import Data.Binary (Binary(..))
import Data.Binary.Put (Put)
import qualified Data.Binary.Put as Put
import Data.Binary.Get (Get)
import qualified Data.Binary.Get as Get
import qualified Data.ByteString.Lazy as BL
import Data.Maybe (fromMaybe)
import Control.Lens (makeLenses, (^.))
data PublicKeyRep = PublicKeyRep PubKeyFormat PublicKey
deriving (Eq, Show)
data PubKeyFormat = Compressed | Uncompressed
deriving (Eq, Show)
-- WIFPrivateKey and Address have base58 -> use text rep
-- TODO: add base58 type?
newtype WIFPrivateKey = WIF T.Text
deriving (Eq, Show)
newtype Address = Address
{ _addrTxt :: T.Text }
deriving (Eq, Show)
makeLenses ''Address
type PubKeyHash = Hash PublicKeyRep
-- Bitcoin uses a specefic eliptic curve, secp256k1,
-- to generate public private key pairs
btcCurve :: Curve
btcCurve = getCurveByName SEC_p256k1
btcEcGroup :: EcGroup
btcEcGroup = fromMaybe
(error "Unable to get secp256k1 ec group. This should never happen.")
(ecGroupFromCurveOID "secp256k1")
genKeys :: IO (PublicKey, PrivateKey)
genKeys = generate btcCurve
getPubKey :: PrivateKey -> PublicKey
getPubKey privKey =
PublicKey btcCurve pubPoint
where pubPoint = generateQ btcCurve (private_d privKey)
-- Addresses are generated from public key by
-- SHA256, then RIPEMD160 hashing of the public key
-- Then Base58 encoding the resulting hash
-- https://github.com/bitcoinbook/bitcoinbook/blob/first_edition/ch04.asciidoc#bitcoin-addresses
getAddress :: PublicKeyRep -> Network -> Address
getAddress pubKeyRep network =
Address $ encodeBase58Check (addressPrefix network) payload
where payload = Payload . hash . hashPubKeyRep $ pubKeyRep
addressPrefix MainNet = Prefix 0x00
addressPrefix TestNet3 = Prefix 0x6F
addressToPubKeyHash :: Address -> PubKeyHash
addressToPubKeyHash address =
Hash hash
where
(_, Payload hash, _) = decodeBase58Check $ address^.addrTxt
getWIFPrivateKey :: PrivateKey -> WIFPrivateKey
getWIFPrivateKey privateKey =
WIF $ encodeBase58Check privateKeyPrefix (Payload . serializePrivateKey $ privateKey)
getPrivateKeyFromWIF :: WIFPrivateKey -> PrivateKey
getPrivateKeyFromWIF (WIF wifText) =
if prefix == privateKeyPrefix
then deserializePrivateKey payload
else error $ "Unable to read WIF PrivateKey. Invalid prefix: " ++ show prefix
where
(prefix, Payload payload, checksum) = decodeBase58Check wifText
privateKeyPrefix :: Prefix
privateKeyPrefix = Prefix 0x80
serializePrivateKey :: PrivateKey -> ByteString
serializePrivateKey =
BL.toStrict
. Put.runPut
. Put.putByteString
. unrollWithPad BE 32
. fromIntegral
. private_d
deserializePrivateKey :: ByteString -> PrivateKey
deserializePrivateKey =
PrivateKey btcCurve
. roll BE
. Get.runGet (Get.getByteString 32)
. getLazyBS
where
getLazyBS bs = BL.fromChunks [bs]
instance Binary PublicKeyRep where
get = deserializePublicKeyRep
put = serializePublicKeyRep
hashPubKeyRep :: PublicKeyRep -> Hash PublicKeyRep
hashPubKeyRep = hashObject ripemdSha256
serializePublicKeyRep :: PublicKeyRep -> Put
-- See: https://github.com/bitcoinbook/bitcoinbook/blob/first_edition/ch04.asciidoc#public-key-formats
serializePublicKeyRep (PublicKeyRep Uncompressed pubKey) = do
Put.putWord8 4
Put.putByteString . unrollWithPad BE 32 $ x
Put.putByteString . unrollWithPad BE 32 $ y
where Point x y = public_q pubKey
-- See: https://github.com/bitcoinbook/bitcoinbook/blob/first_edition/ch04.asciidoc#compressed-public-keys
serializePublicKeyRep (PublicKeyRep Compressed pubKey) = do
Put.putWord8 prefix
Put.putByteString . unrollWithPad BE 32 $ x
where
Point x y = public_q pubKey
prefix = if isEven y
then 2
else 3
isEven n = n `mod` 2 == 0
deserializePublicKeyRep :: Get PublicKeyRep
deserializePublicKeyRep = do
prefix <- Get.lookAhead Get.getWord8
let pubKeyFormat = case prefix of
0x04 -> Uncompressed
0x03 -> Compressed
0x02 -> Compressed
bs <- Get.getByteString $ repLength pubKeyFormat
case getPubKey bs of
Left error -> fail $ "failed deserializing public key: " ++ error
Right pubKey -> return $ PublicKeyRep pubKeyFormat pubKey
where
getPubKey :: ByteString -> Either String PublicKey
getPubKey bs = do
ecPoint <- ecPointFromOct btcEcGroup bs
let (x, y) = ecPointToAffineGFp btcEcGroup ecPoint
btcPubKey = PublicKey btcCurve (Point x y)
return btcPubKey
repLength Uncompressed = 65
repLength Compressed = 33
|
clample/lamdabtc
|
backend/src/BitcoinCore/Keys.hs
|
Haskell
|
bsd-3-clause
| 5,582
|
module NestedImporting2 where
import NestedImporting2.A
main :: Fay ()
main = print r
|
fpco/fay
|
tests/NestedImporting2.hs
|
Haskell
|
bsd-3-clause
| 88
|
module Root.Src.Main where
main = do putStrLn "Hello Haskell World!"
|
codeboardio/kali
|
test/src_examples/haskell/several_files3/Root/Src/Main.hs
|
Haskell
|
mit
| 68
|
-- {-# LANGUAGE NoImplicitPrelude #-}
-- {-# LANGUAGE QuasiQuotes #-}
-- {-# LANGUAGE TemplateHaskell #-}
-- | Test suite for GHCi like applications including both GHCi and Intero.
module Stack.GhciSpec where
import Test.Hspec
spec :: Spec
spec = return ()
{- Commented out as part of the fix for https://github.com/commercialhaskell/stack/issues/3309
Not sure if maintaining this test is worth the effort.
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import Distribution.License (License (BSD3))
import qualified Distribution.ModuleName as ModuleName
import Distribution.PackageDescription (BuildType(..))
import Stack.Prelude
import Stack.Types.Package
import Stack.Types.PackageName
import Stack.Types.Version
import Test.Hspec
import NeatInterpolation
import Path
import Path.Extra (pathToText)
import qualified System.FilePath as FP
import Stack.Ghci
import Stack.Ghci.Script (scriptToLazyByteString)
import Stack.Ghci.PortableFakePaths
textToLazy :: Text -> LBS.ByteString
textToLazy = LBS.fromStrict . T.encodeUtf8
-- | Matches two strings, after converting line-ends in the second to Unix ones
-- (in a hacky way) and converting both to the same type. Workaround for
-- https://github.com/nikita-volkov/neat-interpolation/issues/14.
shouldBeLE :: LBS.ByteString -> Text -> Expectation
shouldBeLE actual expected = shouldBe actual (textToLazy $ T.filter (/= '\r') expected)
baseProjDir, projDirA, projDirB :: Path Abs Dir
baseProjDir = $(mkAbsDir $ defaultDrive FP.</> "Users" FP.</> "someone" FP.</> "src")
projDirA = baseProjDir </> $(mkRelDir "project-a")
projDirB = baseProjDir </> $(mkRelDir "project-b")
relFile :: Path Rel File
relFile = $(mkRelFile $ "exe" FP.</> "Main.hs")
absFile :: Path Abs File
absFile = projDirA </> relFile
projDirAT, projDirBT, relFileT, absFileT :: Text
projDirAT = pathToText projDirA
projDirBT = pathToText projDirB
relFileT = pathToText relFile
absFileT = pathToText absFile
spec :: Spec
spec = do
describe "GHCi" $ do
describe "Script rendering" $ do
describe "should render GHCi scripts" $ do
it "with one library package" $ do
let res = scriptToLazyByteString $ renderScriptGhci packages_singlePackage Nothing []
res `shouldBeLE` ghciScript_projectWithLib
it "with one main package" $ do
let res = scriptToLazyByteString $ renderScriptGhci []
(Just absFile)
[]
res `shouldBeLE` ghciScript_projectWithMain
it "with one library and main package" $ do
let res = scriptToLazyByteString $ renderScriptGhci packages_singlePackage
(Just absFile)
[]
res `shouldBeLE` ghciScript_projectWithLibAndMain
it "with multiple library packages" $ do
let res = scriptToLazyByteString $ renderScriptGhci packages_multiplePackages Nothing []
res `shouldBeLE` ghciScript_multipleProjectsWithLib
describe "should render intero scripts" $ do
it "with one library package" $ do
let res = scriptToLazyByteString $ renderScriptIntero packages_singlePackage Nothing []
res `shouldBeLE` interoScript_projectWithLib
it "with one main package" $ do
let res = scriptToLazyByteString $ renderScriptIntero packages_singlePackage
(Just absFile)
[]
res `shouldBeLE` interoScript_projectWithMain
it "with one library and main package" $ do
let res = scriptToLazyByteString $ renderScriptIntero packages_singlePackage
(Just absFile)
[]
res `shouldBeLE` interoScript_projectWithLibAndMain
it "with multiple library packages" $ do
let res = scriptToLazyByteString $ renderScriptIntero packages_multiplePackages Nothing []
res `shouldBeLE` interoScript_multipleProjectsWithLib
-- Exptected Intero scripts
interoScript_projectWithLib :: Text
interoScript_projectWithLib = [text|
:cd-ghc $projDirAT
:add Lib.A
:module + Lib.A
|]
interoScript_projectWithMain :: Text
interoScript_projectWithMain = [text|
:cd-ghc $projDirAT
:add Lib.A
:cd-ghc $projDirAT
:add $absFileT
:module + Lib.A
|]
interoScript_projectWithLibAndMain :: Text
interoScript_projectWithLibAndMain = [text|
:cd-ghc $projDirAT
:add Lib.A
:cd-ghc $projDirAT
:add $absFileT
:module + Lib.A
|]
interoScript_multipleProjectsWithLib :: Text
interoScript_multipleProjectsWithLib = [text|
:cd-ghc $projDirAT
:add Lib.A
:cd-ghc $projDirBT
:add Lib.B
:module + Lib.A Lib.B
|]
-- Expected GHCi Scripts
ghciScript_projectWithLib :: Text
ghciScript_projectWithLib = [text|
:add Lib.A
:module + Lib.A
|]
ghciScript_projectWithMain :: Text
ghciScript_projectWithMain = [text|
:add $absFileT
:module +
|]
ghciScript_projectWithLibAndMain :: Text
ghciScript_projectWithLibAndMain = [text|
:add Lib.A
:add $absFileT
:module + Lib.A
|]
ghciScript_multipleProjectsWithLib :: Text
ghciScript_multipleProjectsWithLib = [text|
:add Lib.A
:add Lib.B
:module + Lib.A Lib.B
|]
-- Expected Legacy GHCi scripts
ghciLegacyScript_projectWithMain :: Text
ghciLegacyScript_projectWithMain = [text|
:add
:add $absFileT
:module +
|]
ghciLegacyScript_projectWithLibAndMain :: Text
ghciLegacyScript_projectWithLibAndMain = [text|
:add Lib.A
:add $absFileT
:module + Lib.A
|]
ghciLegacyScript_multipleProjectsWithLib :: Text
ghciLegacyScript_multipleProjectsWithLib = [text|
:add Lib.A Lib.B
:module + Lib.A Lib.B
|]
-- Sample GHCi load configs
packages_singlePackage :: [GhciPkgInfo]
packages_singlePackage =
[ GhciPkgInfo
{ ghciPkgModules = S.fromList [ModuleName.fromString "Lib.A"]
, ghciPkgDir = projDirA
, ghciPkgName = $(mkPackageName "package-a")
, ghciPkgOpts = []
, ghciPkgModFiles = S.empty
, ghciPkgCFiles = S.empty
, ghciPkgMainIs = M.empty
, ghciPkgTargetFiles = Nothing
, ghciPkgPackage =
Package
{ packageName = $(mkPackageName "package-a")
, packageVersion = $(mkVersion "0.1.0.0")
, packageLicense = BSD3
, packageFiles = GetPackageFiles undefined
, packageDeps = M.empty
, packageTools = []
, packageAllDeps = S.empty
, packageGhcOptions = []
, packageFlags = M.empty
, packageDefaultFlags = M.empty
, packageHasLibrary = True
, packageTests = M.empty
, packageBenchmarks = S.empty
, packageExes = S.empty
, packageOpts = GetPackageOpts undefined
, packageHasExposedModules = True
, packageBuildType = Just Simple
, packageSetupDeps = Nothing
}
}
]
packages_multiplePackages :: [GhciPkgInfo]
packages_multiplePackages =
[ GhciPkgInfo
{ ghciPkgModules = S.fromList [ModuleName.fromString "Lib.A"]
, ghciPkgDir = projDirA
, ghciPkgName = $(mkPackageName "package-a")
, ghciPkgOpts = []
, ghciPkgModFiles = S.empty
, ghciPkgCFiles = S.empty
, ghciPkgMainIs = M.empty
, ghciPkgTargetFiles = Nothing
, ghciPkgPackage =
Package
{ packageName = $(mkPackageName "package-a")
, packageVersion = $(mkVersion "0.1.0.0")
, packageLicense = BSD3
, packageFiles = GetPackageFiles undefined
, packageDeps = M.empty
, packageTools = []
, packageAllDeps = S.empty
, packageGhcOptions = []
, packageFlags = M.empty
, packageDefaultFlags = M.empty
, packageHasLibrary = True
, packageTests = M.empty
, packageBenchmarks = S.empty
, packageExes = S.empty
, packageOpts = GetPackageOpts undefined
, packageHasExposedModules = True
, packageBuildType = Just Simple
, packageSetupDeps = Nothing
}
}
, GhciPkgInfo
{ ghciPkgModules = S.fromList [ModuleName.fromString "Lib.B"]
, ghciPkgDir = projDirB
, ghciPkgName = $(mkPackageName "package-b")
, ghciPkgOpts = []
, ghciPkgModFiles = S.empty
, ghciPkgCFiles = S.empty
, ghciPkgMainIs = M.empty
, ghciPkgTargetFiles = Nothing
, ghciPkgPackage =
Package
{ packageName = $(mkPackageName "package-b")
, packageVersion = $(mkVersion "0.1.0.0")
, packageLicense = BSD3
, packageFiles = GetPackageFiles undefined
, packageDeps = M.empty
, packageTools = []
, packageAllDeps = S.empty
, packageGhcOptions = []
, packageFlags = M.empty
, packageDefaultFlags = M.empty
, packageHasLibrary = True
, packageTests = M.empty
, packageBenchmarks = S.empty
, packageExes = S.empty
, packageOpts = GetPackageOpts undefined
, packageHasExposedModules = True
, packageBuildType = Just Simple
, packageSetupDeps = Nothing
}
}
]
-}
|
MichielDerhaeg/stack
|
src/test/Stack/GhciSpec.hs
|
Haskell
|
bsd-3-clause
| 9,341
|
{-# LANGUAGE TypeFamilies, QuasiQuotes, TemplateHaskell, MultiParamTypeClasses, OverloadedStrings #-}
module YesodCoreTest.Csrf (csrfSpec, Widget, resourcesApp) where
import Yesod.Core
import Test.Hspec
import Network.Wai
import Network.Wai.Test
import Web.Cookie
import qualified Data.Map as Map
import Data.ByteString.Lazy (fromStrict)
import Data.Monoid ((<>))
data App = App
mkYesod "App" [parseRoutes|
/ HomeR GET POST
|]
instance Yesod App where
yesodMiddleware = defaultYesodMiddleware . defaultCsrfMiddleware
getHomeR :: Handler Html
getHomeR = defaultLayout
[whamlet|
<p>
Welcome to my test application.
|]
postHomeR :: Handler Html
postHomeR = defaultLayout
[whamlet|
<p>
Welcome to my test application.
|]
runner :: Session () -> IO ()
runner f = toWaiApp App >>= runSession f
csrfSpec :: Spec
csrfSpec = describe "A Yesod application with the defaultCsrfMiddleware" $ do
it "serves a includes a cookie in a GET request" $ runner $ do
res <- request defaultRequest
assertStatus 200 res
assertClientCookieExists "Should have an XSRF-TOKEN cookie" defaultCsrfCookieName
it "200s write requests with the correct CSRF header, but no param" $ runner $ do
getRes <- request defaultRequest
assertStatus 200 getRes
csrfValue <- fmap setCookieValue requireCsrfCookie
postRes <- request (defaultRequest { requestMethod = "POST", requestHeaders = [(defaultCsrfHeaderName, csrfValue)] })
assertStatus 200 postRes
it "200s write requests with the correct CSRF param, but no header" $ runner $ do
getRes <- request defaultRequest
assertStatus 200 getRes
csrfValue <- fmap setCookieValue requireCsrfCookie
let body = "_token=" <> csrfValue
postRes <- srequest $ SRequest (defaultRequest { requestMethod = "POST", requestHeaders = [("Content-Type","application/x-www-form-urlencoded")] }) (fromStrict body)
assertStatus 200 postRes
it "403s write requests without the CSRF header" $ runner $ do
res <- request (defaultRequest { requestMethod = "POST" })
assertStatus 403 res
it "403s write requests with the wrong CSRF header" $ runner $ do
getRes <- request defaultRequest
assertStatus 200 getRes
csrfValue <- fmap setCookieValue requireCsrfCookie
res <- request (defaultRequest { requestMethod = "POST", requestHeaders = [(defaultCsrfHeaderName, csrfValue <> "foo")] })
assertStatus 403 res
it "403s write requests with the wrong CSRF param" $ runner $ do
getRes <- request defaultRequest
assertStatus 200 getRes
csrfValue <- fmap setCookieValue requireCsrfCookie
let body = "_token=" <> (csrfValue <> "foo")
postRes <- srequest $ SRequest (defaultRequest { requestMethod = "POST", requestHeaders = [("Content-Type","application/x-www-form-urlencoded")] }) (fromStrict body)
assertStatus 403 postRes
requireCsrfCookie :: Session SetCookie
requireCsrfCookie = do
cookies <- getClientCookies
case Map.lookup defaultCsrfCookieName cookies of
Just c -> return c
Nothing -> error "Failed to lookup CSRF cookie"
|
MaxGabriel/yesod
|
yesod-core/test/YesodCoreTest/Csrf.hs
|
Haskell
|
mit
| 3,256
|
{-# LANGUAGE OverloadedStrings #-}
module TestImport
( module Yesod.Test
, module Model
, module Foundation
, module Database.Persist
, runDB
, Spec
, Example
) where
import Yesod.Test
import Database.Persist hiding (get)
import Database.Persist.Sql (SqlPersistM, runSqlPersistMPool)
import Control.Monad.IO.Class (liftIO)
import Foundation
import Model
type Spec = YesodSpec App
type Example = YesodExample App
runDB :: SqlPersistM a -> Example a
runDB query = do
pool <- fmap connPool getTestYesod
liftIO $ runSqlPersistMPool query pool
|
zhy0216/haskell-learning
|
yosog/tests/TestImport.hs
|
Haskell
|
mit
| 583
|
{-# LANGUAGE OverloadedStrings #-}
module SymBoilerPlate where
import SymMap
import Control.Monad
import Data.Aeson
import Data.HashMap.Strict as H
import System.IO.Unsafe
import System.Random
{-@ nonDet :: a -> x:Int -> {v:Int | 0 <= v && v < x } @-}
nonDet :: a -> Int -> Int
nonDet _ x = nonDetRange 0 x
{-@ nonDetRange :: x:Int -> y:Int -> {v:Int | x <= v && v < y} @-}
nonDetRange :: Int -> Int -> Int
nonDetRange x y = unsafePerformIO $ do g <- getStdGen
(x, _) <- return $ randomR (x, y-1) g
return x
instance DefaultMap Int where
def = 0
instance DefaultMap (Val p) where
def = VUnInit
{-@
data Val p = VUnit {}
| VUnInit {}
| VInt { vInt :: Int }
| VString { vString :: String }
| VSet { vSetName :: String }
| VPid { vPid :: p }
| VInR { vInR :: Val p }
| VInL { vInL :: Val p }
| VPair { vLeft :: Val p, vRight :: Val p }
@-}
data Val p = VUnit {}
| VUnInit {}
| VInt { vInt :: Int }
| VString { vString :: String }
| VSet { vSetName :: String }
| VPid { vPid :: p }
| VInR { vInR :: Val p }
| VInL { vInL :: Val p }
| VPair { vLeft :: Val p, vRight :: Val p }
deriving (Show)
instance (FromJSON p) => FromJSON (Val p) where
parseJSON (Object o) = case H.toList o of
[(key,val)]
| key == "VUnit" -> return VUnit
| key == "VUnInit" -> return VUnInit
| key == "VInt" -> VInt <$> parseJSON val
| key == "VString" -> VString <$> parseJSON val
| key == "VSet" -> VSet <$> parseJSON val
| key == "VPid" -> VPid <$> parseJSON val
| key == "VInR" -> VInR <$> parseJSON val
| key == "VInL" -> VInL <$> parseJSON val
| key == "VPair" -> do (l,r) <- parseJSON val
return (VPair l r)
| otherwise -> mzero
parseJSON _ = mzero
instance (ToJSON p) => ToJSON (Val p) where
toJSON VUnit = object [ "VUnit" .= Null ]
toJSON VUnInit = object [ "VUnInit" .= Null ]
toJSON (VInt i) = object [ "VInt" .= toJSON i ]
toJSON (VString s) = object [ "VString" .= toJSON s ]
toJSON (VSet s) = object [ "VSet" .= toJSON s ]
toJSON (VPid p) = object [ "VPid" .= toJSON p ]
toJSON (VInR v) = object [ "VInR" .= toJSON v ]
toJSON (VInL v) = object [ "VInL" .= toJSON v ]
toJSON (VPair l r) = object [ "VPair" .= toJSON (l,r) ]
liquidAssert p x = if p
then Right x
else Left x
isVUnit, isVUnInit, isVInt, isVString, isVPid, isVInR, isVInL, isVPair, isVSet :: Val p -> Bool
isVUnit VUnit{} = True
isVUnit _ = False
isVUnInit VUnInit{} = True
isVUnInit _ = False
isVInt VInt{} = True
isVInt _ = False
isVString VString{} = True
isVString _ = False
isVSet VSet{} = True
isVSet _ = False
isVPid VPid{} = True
isVPid _ = False
isVInR VInR{} = True
isVInR _ = False
isVInL VInL{} = True
isVInL _ = False
isVPair VPair{} = True
isVPair _ = False
{-@ measure isVUnit @-}
{-@ measure isVUnInit @-}
{-@ measure isVInt @-}
{-@ measure isVString @-}
{-@ measure isVPid @-}
{-@ measure isVInL @-}
{-@ measure isVInR @-}
{-@ measure isVPair @-}
|
abakst/symmetry
|
checker/include/SymBoilerPlateQC.hs
|
Haskell
|
mit
| 3,533
|
-- Get the lowest common multiple of all integers between 1 and 20, that is, the lowest number that is divisible by all numbers from 1 to 20
main = print getProblem5Value
getProblem5Value :: Integer
getProblem5Value = getLeastCommonMultiple [1..20]
-- Lowest Common Multiple: takes a list of numbers and returns the lowest common multiple of those numbers
getLeastCommonMultiple :: [Integer] -> Integer
getLeastCommonMultiple list = getLCM 1 [] list -- getLCM n factn is: 'n' is the product so far, 'factn' is the tracked prime factorization of 'n', 'is' is the remaining numbers to factor into the multiple
where getLCM n _ [] = n
getLCM n factn (i:is)
| i' == 1 = getLCM n factn is -- if i' is 1, we can ignore it
| otherwise = getLCM (n*i') (i':factn) is -- otherwise we multiply n by i', add it to the prime factors of n, and continue
where i' = divByAll factn i -- we want the lowest multiple, so any numbers multiplied into n already should be factored out of i
-- this will factor each number in a list of numbers out of a target number, only if the target is divisible by each number
divByAll :: [Integer] -> Integer -> Integer
divByAll [] target = target
divByAll (i:is) target
| target `mod` i == 0 = divByAll is (target `div` i)
| otherwise = divByAll is target
|
jchitel/ProjectEuler.hs
|
Problems/Problem0005.hs
|
Haskell
|
mit
| 1,334
|
{-# Language BangPatterns #-}
{-# Language GeneralizedNewtypeDeriving #-}
{-# Language Rank2Types #-}
module Unison.Runtime.Bits where
import Data.Tuple (swap)
import Data.List
import Unison.Runtime.Unfold (Unfold)
import qualified Unison.Runtime.Unfold as U
newtype Bits = Bits { bitstream :: Unfold Bit } deriving (Eq,Ord,Show)
data Bit = Zero | One | Both deriving (Eq,Ord,Show)
matches :: Bit -> Bool -> Bool
matches Both _ = True
matches Zero False = True
matches One True = True
matches _ _ = False
type Score = Double
from01s :: [Int] -> Bits
from01s bs = fromList (map f bs) where
f 0 = Zero
f 1 = One
f i = error ("from01s: must be 0 or 1, got " ++ show i)
fromList :: [Bit] -> Bits
fromList bs = Bits (U.fromList bs)
toList :: Bits -> [Bit]
toList (Bits bs) = U.toList bs
-- | Achieves maximum value of n/2 when both `zeros` and `ones` are n/2.
-- As distribution is more skewed toward either bit, score approaches 0.
-- Satisfies: `score n n 0 == 0`, `score n 0 n == 0`, `score n 0 0 == 0`.
-- There is a linear penalty if zeros + ones < n. So `score 10 4 4` will
-- be less than `score 10 5 5`.
score :: Double -> Double -> Double -> Score
score n zeros ones =
let p0 = zeros / n; p1 = ones / n
in p0 * (n - zeros) + p1 * (n - ones)
bitCounts' :: (Double -> Double -> Bool) -> [Bit] -> (Double,Double)
bitCounts' halt bs = go 0 0 bs where
go !zeros !ones [] = (zeros, ones)
go !zeros !ones (b:bs)
| halt zeros ones = (zeros, ones)
| otherwise = case b of
Zero -> go (zeros + 1) ones bs
One -> go zeros (ones + 1) bs
Both -> go (zeros + 1) (ones + 1) bs
mostSignificantBit :: [Bits] -> Maybe (Int, Score)
mostSignificantBit bs = go (Nothing,0) (U.columns (map bitstream bs)) where
n = fromIntegral (length bs)
lengthGT xs n = not (null (dropWhile (\(_,m) -> m <= n) (xs `zip` [1..])))
value = maybe 0 snd
rem z o = (n-z) `min` (n-o)
maxPossible z o = score n (z `max` o) (m + ((n/2 - m) `min` rem z o)) where
m = z `min` o
stop best z o | z `max` o > n/2 = maxPossible z o <= best
stop _ _ _ = False
go (!best,!_) [] = best
go (!best,!i) (bs:tl)
| not (lengthGT bs (value best * 2)) = best
| otherwise = case bitCounts' (stop (value best)) bs of
(z,o) -> go (if s > value best then Just (i, s) else best, i + 1) tl
where s = score n z o
bitCounts :: [Bits] -> [(Int,Int)]
bitCounts bs = sums (map bitstream bs) where
sumCol = foldl' step (0,0) where
step (z,o) b = case b of Zero -> (z+1,o); One -> (z,o+1); Both -> (z+1,o+1)
sums [] = []
sums bs =
let (col, bs') = unzip [ (b, tl) | Just (b, tl) <- map U.uncons bs ]
in (if null bs' then [] else sumCol col : sums bs')
mostSignificantBits :: [Bits] -> [(Int,Score)]
mostSignificantBits bs = go (map rank $ bitCounts bs) where
rank = let n = fromIntegral (length bs)
in \(zeros, ones) -> score n (fromIntegral zeros) (fromIntegral ones)
go ranks = map swap $ sortBy (flip compare) (ranks `zip` [0..])
sample :: [Bits]
sample =
[ from01s[1,0]
, from01s[1,0]
, from01s[1,1,0,1]
, from01s[1,1,1,1]
, from01s[1,1,0,1]
, from01s[1,1,0,1,1]
, from01s[1,1,1,1]
, from01s[1,1,0,1]
, from01s[1,1,1,1]
]
sampleMsb :: Maybe (Int,Score)
sampleMsb = mostSignificantBit sample
|
nightscape/platform
|
node/src/Unison/Runtime/Bits.hs
|
Haskell
|
mit
| 3,275
|
{-# LANGUAGE MultiParamTypeClasses #-}
-- module
module RCL.Error where
-- imports
import Control.Monad.Error
-- exported functions
withError :: MonadError a m => Either a b -> m b
withError = either throwError return
testError :: MonadError e m => m a -> m Bool
testError e = (e >> return False) `catchError` const (return True)
|
nicuveo/RCL
|
src/RCL/Error.hs
|
Haskell
|
mit
| 353
|
{-# LANGUAGE OverloadedStrings #-}
module DarkSky.Response.DataBlock where
import DarkSky.Response.Icon
import DarkSky.Response.DataPoint (DataPoint)
import Data.Aeson
import Data.Text (Text)
data DataBlock = DataBlock
{ data' :: [DataPoint]
, summary :: Maybe Text
, icon :: Maybe Icon
} deriving (Eq, Show)
instance FromJSON DataBlock where
parseJSON =
withObject "datablock" $
\o -> do
data'' <- o .: "data"
summary' <- o .:? "summary"
icon' <- o .:? "icon"
return
DataBlock
{ data' = data''
, summary = summary'
, icon = icon'
}
emptyDataBlock :: DataBlock
emptyDataBlock =
DataBlock
{ data' = []
, summary = Nothing
, icon = Nothing
}
|
peterstuart/dark-sky
|
src/DarkSky/Response/DataBlock.hs
|
Haskell
|
mit
| 737
|
import Chorale.Test.Common as ChoraleTestCommon
import Test.Framework
main :: IO ()
main = defaultMainWithArgs testsToRun ["--maximum-generated-tests=1000"]
testsToRun :: [Test]
testsToRun = ChoraleTestCommon.tests
|
mocnik-science/chorale
|
tests/Test.hs
|
Haskell
|
mit
| 218
|
-- 54 - 60
-- https://wiki.haskell.org/99_questions/54A_to_60
module NinetyNine.P5X where
import Data.List (findIndex, genericIndex)
import Data.Maybe (fromJust)
data BTree a = Empty | Branch a (BTree a) (BTree a)
deriving (Eq, Ord, Show)
{-
54A. Check whether a given term represents a binary tree.
In Prolog or Lisp, one writes a predicate to do this.
Example in Lisp:
* (istree (a (b nil nil) nil))
T
* (istree (a (b nil nil)))
NIL
Haskell's type system ensures that all terms of type Tree a are binary trees:
it is just not possible to construct an invalid tree with this type.
Hence, it is redundant to introduce a predicate to check this property:
it would always return True.
-}
{-
55. Construct completely balanced binary trees.
In a completely balanced binary tree, the following property holds for
every node: The number of nodes in its left subtree and the number of
nodes in its right subtree are almost equal, which means their difference
is not greater than one.
Write a function cbal-tree to construct completely balanced binary trees
for a given number of nodes. The predicate should generate all solutions
via backtracking. Put the letter 'x' as information into all nodes of
the tree.
Example:
* cbal-tree(4,T).
T = t(x, t(x, nil, nil), t(x, nil, t(x, nil, nil))) ;
T = t(x, t(x, nil, nil), t(x, t(x, nil, nil), nil)) ;
etc......No
Example in Haskell,
whitespace and "comment diagrams" added for clarity and exposition:
*Main> cbalTrees 4
[ -- permutation 1
-- x
-- / \
-- x x
-- \
-- x
Branch 'x' (Branch 'x' Empty Empty)
(Branch 'x' Empty
(Branch 'x' Empty Empty))
, -- permutation 2
-- x
-- / \
-- x x
-- /
-- x
Branch 'x' (Branch 'x' Empty Empty)
(Branch 'x' (Branch 'x' Empty Empty)
Empty)
, -- permutation 3
-- x
-- / \
-- x x
-- \
-- x
Branch 'x' (Branch 'x' Empty
(Branch 'x' Empty Empty))
(Branch 'x' Empty Empty)
, -- permutation 4
-- x
-- / \
-- x x
-- /
-- x
Branch 'x' (Branch 'x' (Branch 'x' Empty Empty)
Empty)
(Branch 'x' Empty Empty)
]
-}
cbalTrees :: Integral n => a -> n -> [BTree a]
cbalTrees _ 0 = [Empty]
cbalTrees x n = f =<< [d .. d + m]
where
(d, m) = divMod (pred n) 2
f i = Branch x <$> cbalTrees x i <*> cbalTrees x (pred n - i)
{-
56. Symmetric binary trees.
Let us call a binary tree symmetric if you can draw a vertical line through
the root node and then the right subtree is the mirror image of the left
subtree. Write a predicate symmetric/1 to check whether a given binary tree
is symmetric. Hint: Write a predicate mirror/2 first to check whether one
tree is the mirror image of another. We are only interested in the structure,
not in the contents of the nodes.
Example in Haskell:
*Main> symmetric (Branch 'x' (Branch 'x' Empty Empty) Empty)
False
*Main> symmetric (Branch 'x' (Branch 'x' Empty Empty)
(Branch 'x' Empty Empty))
True
-}
symmetric :: BTree a -> Bool
symmetric t = h t t
where
h Empty Empty = True
h (Branch _ lx rx) (Branch _ ly ry) = h lx ry && h rx ly
h _ _ = False
{-
57. Binary search trees (dictionaries).
Use the predicate add/3, developed in chapter 4 of the course,
to write a predicate to construct a binary search tree from a list of
integer numbers.
Example:
* construct([3,2,5,7,1],T).
T = t(3, t(2, t(1, nil, nil), nil), t(5, nil, t(7, nil, nil)))
Then use this predicate to test the solution of the problem P56.
Example:
* test-symmetric([5,3,18,1,4,12,21]).
Yes
* test-symmetric([3,2,5,7,4]).
No
Example in Haskell:
*Main> construct [3, 2, 5, 7, 1]
Branch 3 (Branch 2 (Branch 1 Empty Empty) Empty)
(Branch 5 Empty (Branch 7 Empty Empty))
*Main> symmetric . construct $ [5, 3, 18, 1, 4, 12, 21]
True
*Main> symmetric . construct $ [3, 2, 5, 7, 1]
True
-}
construct :: Ord a => [a] -> BTree a
construct = foldl (flip add) Empty
where
add x Empty = Branch x Empty Empty
add x (Branch y l r) | x < y = Branch y (add x l) r
add x (Branch y l r) | x > y = Branch y l (add x r)
add _ t = t
{-
58. Generate-and-test paradigm.
Apply the generate-and-test paradigm to construct all symmetric,
completely balanced binary trees with a given number of nodes.
Example:
* sym-cbal-trees(5,Ts).
Ts = [t(x, t(x, nil, t(x, nil, nil)), t(x, t(x, nil, nil), nil)),
t(x, t(x, t(x, nil, nil), nil), t(x, nil, t(x, nil, nil)))]
Example in Haskell:
*Main> symCbalTrees 5
[ Branch 'x' (Branch 'x' Empty (Branch 'x' Empty Empty))
(Branch 'x' (Branch 'x' Empty Empty) Empty)
, Branch 'x' (Branch 'x' (Branch 'x' Empty Empty) Empty)
(Branch 'x' Empty (Branch 'x' Empty Empty))
]
-}
symCbalTrees :: Integral n => a -> n -> [BTree a]
symCbalTrees x = filter symmetric . cbalTrees x
{-
59. Construct height-balanced binary trees.
In a height-balanced binary tree, the following property holds for every node:
The height of its left subtree and the height of its right subtree are almost
equal, which means their difference is not greater than one.
Construct a list of all height-balanced binary trees with the given element
and the given maximum height.
Example:
?- hbal_tree(3,T).
T = t(x, t(x, t(x, nil, nil), t(x, nil, nil)),
t(x, t(x, nil, nil), t(x, nil, nil))) ;
T = t(x, t(x, t(x, nil, nil), t(x, nil, nil)),
t(x, t(x, nil, nil), nil)) ;
etc......No
Example in Haskell:
*Main> take 4 $ hbalTreesH 'x' 3
[ Branch 'x' (Branch 'x' Empty Empty)
(Branch 'x' Empty (Branch 'x' Empty Empty))
, Branch 'x' (Branch 'x' Empty Empty)
(Branch 'x' (Branch 'x' Empty Empty) Empty)
, Branch 'x' (Branch 'x' Empty Empty)
(Branch 'x' (Branch 'x' Empty Empty) (Branch 'x' Empty Empty))
, Branch 'x' (Branch 'x' Empty (Branch 'x' Empty Empty))
(Branch 'x' Empty Empty)
]
-}
hbalTreesH :: Integral n => a -> n -> [BTree a]
hbalTreesH _ 0 = [Empty]
hbalTreesH x 1 = [Branch x Empty Empty]
hbalTreesH x h = f =<< lrhs
where
lrhs = [(pred . pred, pred), (pred, pred), (pred, pred . pred)]
f (lh, rh) = Branch x <$> hbalTreesH x (lh h) <*> hbalTreesH x (rh h)
hbalTreesH' :: Integral n => a -> n -> [BTree a]
hbalTreesH' x = genericIndex tss
where
tss = [Empty] : [Branch x Empty Empty] : zipWith h tss (tail tss)
h xs ys = f =<< [(xs, ys), (ys, ys), (ys, xs)]
f (ls, rs) = Branch x <$> ls <*> rs
{-
60. Construct height-balanced binary trees with a given number of nodes.
Consider a height-balanced binary tree of height H. What is the maximum
number of nodes it can contain?
Clearly, MaxN = 2**H - 1. However, what is the minimum number MinN?
This question is more difficult.
Try to find a recursive statement and turn it into a function minNodes
that returns the minimum number of nodes in a height-balanced binary tree
of height H.
On the other hand, we might ask:
what is the maximum height H a height-balanced binary tree with N nodes
can have?
Write a function maxHeight that computes this.
Now, we can attack the main problem:
construct all the height-balanced binary trees with a given number of nodes.
Find out how many height-balanced trees exist for N = 15.
Example in Prolog:
?- count_hbal_trees(15,C).
C = 1553
Example in Haskell:
*Main> length $ hbalTrees 'x' 15
1553
*Main> map (hbalTrees 'x') [0..3]
[ [ Empty ]
, [ Branch 'x' Empty Empty ]
, [ Branch 'x' Empty (Branch 'x' Empty Empty)
, Branch 'x' (Branch 'x' Empty Empty) Empty ]
, [ Branch 'x' (Branch 'x' Empty Empty) (Branch 'x' Empty Empty) ]
]
-}
maxNodesByHeight :: Integral n => n -> n
maxNodesByHeight = pred . (2 ^)
minNodesByHeight :: Integral n => n -> n
minNodesByHeight = genericIndex minNodesSequence
maxHeightByNodes :: Integral n => n -> n
maxHeightByNodes = fromIntegral . pred . fromJust . flip findIndex minNodesSequence . (<)
minHeightByNodes :: Integral n => n -> n
minHeightByNodes = ceiling . logBase 2 . fromIntegral . succ
minNodesSequence :: Integral n => [n]
minNodesSequence = fromIntegral <$> ns
where
ns = 0 : 1 : zipWith ((+) . succ) ns (tail ns) :: [Integer]
countNodes :: Integral n => BTree a -> n
countNodes Empty = 0
countNodes (Branch _ l r) = succ $ countNodes l + countNodes r
hbalTrees :: Integral n => a -> n -> [BTree a]
hbalTrees x n = [t | h <- [minHeightByNodes n .. maxHeightByNodes n], t <- hbalTreesH x h, countNodes t == n]
|
airt/Haskell-99
|
src/NinetyNine/P5X.hs
|
Haskell
|
mit
| 8,529
|
--------------------------------------------------------------------------------
{-# LANGUAGE OverloadedStrings, TupleSections, LambdaCase #-}
module PrevNextPost where
import Control.Applicative (Alternative (..))
import Data.Char
import Data.Maybe
import Data.Monoid
import qualified Data.Set as S
import Hakyll
import Text.Pandoc.Options
import System.FilePath (takeBaseName, takeFileName, takeDirectory, joinPath, splitPath, replaceExtension)
import Control.Lens hiding (Context)
import Control.Monad
import Data.List
import qualified Data.Map as M
import qualified Data.MultiMap as MM
import Text.Printf
--import qualified Data.Tree as T
import Debug.Trace
import Utilities
import HakyllUtils
import Data.Time.Format (parseTime, defaultTimeLocale)
-- import System.Locale (defaultTimeLocale)
import Data.Time.Clock (UTCTime)
prevNextContext :: Pattern -> Context String
prevNextContext postsGlob = field "nextPost" (nextPostUrl postsGlob) <>
field "prevPost" (previousPostUrl postsGlob)
previousPostUrl :: Pattern -> Item String -> Compiler String
previousPostUrl postsGlob post = do
let ident = itemIdentifier post
posts <- getMatches postsGlob
dates <- mapM (getItemUTC defaultTimeLocale) posts
let sorted = sort $ zip dates posts
(_, ordPosts) = unzip sorted
let ident' = itemBefore ordPosts ident
case ident' of
Just i -> (fmap (maybe empty $ toUrl) . getRoute) i
Nothing -> empty
nextPostUrl :: Pattern -> Item String -> Compiler String
nextPostUrl postsGlob post = do
let ident = itemIdentifier post
posts <- getMatches postsGlob
dates <- mapM (getItemUTC defaultTimeLocale) posts
let sorted = sort $ zip dates posts
(_, ordPosts) = unzip sorted
let ident' = itemAfter ordPosts ident
case ident' of
Just i -> (fmap (maybe empty $ toUrl) . getRoute) i
Nothing -> empty
itemAfter' :: Eq a => [(a,b)] -> a -> Maybe b
itemAfter' xys x = do
let (xs, ys) = unzip xys
x' <- lookup x $ zip xs (tail xs)
lookup x' $ zip xs ys
itemAfter :: Eq a => [a] -> a -> Maybe a
itemAfter xs x =
lookup x $ zip xs (tail xs)
itemBefore' :: Eq a => [(a,b)] -> a -> Maybe b
itemBefore' xys x = do
let (xs, ys) = unzip xys
x' <- lookup x $ zip (tail xs) xs
lookup x' $ zip xs ys
itemBefore :: Eq a => [a] -> a -> Maybe a
itemBefore xs x =
lookup x $ zip (tail xs) xs
urlOfPost :: Item String -> Compiler String
urlOfPost =
fmap (maybe empty $ toUrl) . getRoute . itemIdentifier
|
holdenlee/philosophocle
|
src/PrevNextPost.hs
|
Haskell
|
mit
| 2,716
|
module Api.Controllers.User
( authenticate
, create
, unverifiedEdit
, verifyEdit
) where
import Api.Types.Fields (UserToken (..))
import Api.Types.Server (ApiActionM, ApiException (..), mailer)
import Control.Monad.Reader (asks, lift)
import Control.Applicative ((<$>), (<|>))
import Control.Monad.IO.Class (liftIO)
import Control.Monad.Trans.Maybe (MaybeT (..), runMaybeT)
import Web.Scotty.Trans (header, json, raise)
import qualified Api.Mailers.Verify as Verify
import qualified Api.Mappers.Resource as Resource
import qualified Api.Mappers.PendingUserResource as Pending
import qualified Api.Mappers.User as User
import qualified Data.Text.Lazy as LT
import qualified Data.Text as ST
import Api.Helpers.Controller
import Api.Types.Resource
import Api.Types.PendingUserResource
import Api.Types.User
authenticate :: ApiActionM s User
authenticate = do
foundUser <- loginFromHeader
case foundUser of
Just user -> return user
_ -> raise UnauthorizedUser
-- only used to register a new device
create :: ApiActionM s ()
create = reqQuery User.insert >>= json
-- only used after registration when a user needs to initially connect to a
-- contact
unverifiedEdit :: User -> ApiActionM s ()
unverifiedEdit _ = do
sendEmail <- lift $ asks mailer
fields <- fromParams
pending <- reqQuery $ Pending.insert fields
liftIO . sendEmail $ Verify.mkEmail pending
json ("ok" :: ST.Text)
verifyEdit :: ApiActionM s ()
verifyEdit = do
uuid <- reqParam "uuid"
user <- reqQuery $ runMaybeT $ do
pending <- MaybeT $ Pending.findByUuid uuid
_ <- MaybeT $ Just <$> Pending.delete (pend_id pending)
resource <- MaybeT (Resource.findByEmail $ email pending)
<|> MaybeT (Resource.insert . fromEmail $ email pending)
MaybeT . User.update $ User (uid pending) (Just $ res_id resource)
json user
where
uid = pend_userId . pend_fields
email = pend_resourceEmail . pend_fields
-- private functions
loginFromHeader :: ApiActionM s (Maybe User)
loginFromHeader = do
authToken <- header "Authorization"
case LT.words <$> authToken of
Just ["Token", token] -> do
uid <- reqParam "user_id"
query $ User.findByLogin . Login uid . UserToken $ LT.toStrict token
_ -> raise MissingAuthToken
|
bendyworks/api-server
|
lib/Api/Controllers/User.hs
|
Haskell
|
mit
| 2,392
|
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad (foldM)
import Test.Hspec (Spec, describe, it, shouldBe)
import Test.Hspec.Runner (configFastFail, defaultConfig, hspecWith)
import Forth (ForthError(..), emptyState, evalText, toList)
main :: IO ()
main = hspecWith defaultConfig {configFastFail = True} specs
specs :: Spec
specs = do
let runTexts = fmap toList . foldM (flip evalText) emptyState
describe "parsing and numbers" $
it "numbers just get pushed onto the stack" $
runTexts ["1 2 3 4 5"] `shouldBe` Right [1, 2, 3, 4, 5]
describe "addition" $ do
it "can add two numbers" $
runTexts ["1 2 +"] `shouldBe` Right [3]
it "errors if there is nothing on the stack" $
runTexts ["+"] `shouldBe` Left StackUnderflow
it "errors if there is only one value on the stack" $
runTexts ["1 +"] `shouldBe` Left StackUnderflow
describe "subtraction" $ do
it "can subtract two numbers" $
runTexts ["3 4 -"] `shouldBe` Right [-1]
it "errors if there is nothing on the stack" $
runTexts ["-"] `shouldBe` Left StackUnderflow
it "errors if there is only one value on the stack" $
runTexts ["1 -"] `shouldBe` Left StackUnderflow
describe "multiplication" $ do
it "can multiply two numbers" $
runTexts ["2 4 *"] `shouldBe` Right [8]
it "errors if there is nothing on the stack" $
runTexts ["*"] `shouldBe` Left StackUnderflow
it "errors if there is only one value on the stack" $
runTexts ["1 *"] `shouldBe` Left StackUnderflow
describe "division" $ do
it "can divide two numbers" $
runTexts ["12 3 /"] `shouldBe` Right [4]
it "performs integer division" $
runTexts ["8 3 /"] `shouldBe` Right [2]
it "errors if dividing by zero" $
runTexts ["4 0 /"] `shouldBe` Left DivisionByZero
it "errors if there is nothing on the stack" $
runTexts ["/"] `shouldBe` Left StackUnderflow
it "errors if there is only one value on the stack" $
runTexts ["1 /"] `shouldBe` Left StackUnderflow
describe "combined arithmetic" $ do
it "addition and subtraction" $
runTexts ["1 2 + 4 -"] `shouldBe` Right [-1]
it "multiplication and division" $
runTexts ["2 4 * 3 /"] `shouldBe` Right [2]
describe "dup" $ do
it "copies a value on the stack" $
runTexts ["1 dup" ] `shouldBe` Right [1, 1]
it "copies the top value on the stack" $
runTexts ["1 2 dup"] `shouldBe` Right [1, 2, 2]
it "errors if there is nothing on the stack" $
runTexts ["dup" ] `shouldBe` Left StackUnderflow
describe "drop" $ do
it "removes the top value on the stack if it is the only one" $
runTexts ["1 drop" ] `shouldBe` Right []
it "removes the top value on the stack if it is not the only one" $
runTexts ["1 2 drop"] `shouldBe` Right [1]
it "errors if there is nothing on the stack" $
runTexts ["drop" ] `shouldBe` Left StackUnderflow
describe "swap" $ do
it "swaps the top two values on the stack if they are the only ones" $
runTexts ["1 2 swap" ] `shouldBe` Right [2, 1]
it "swaps the top two values on the stack if they are not the only ones" $
runTexts ["1 2 3 swap"] `shouldBe` Right [1, 3, 2]
it "errors if there is nothing on the stack" $
runTexts ["swap" ] `shouldBe` Left StackUnderflow
it "errors if there is only one value on the stack" $
runTexts ["1 swap" ] `shouldBe` Left StackUnderflow
describe "over" $ do
it "copies the second element if there are only two" $
runTexts ["1 2 over" ] `shouldBe` Right [1, 2, 1]
it "copies the second element if there are more than two" $
runTexts ["1 2 3 over"] `shouldBe` Right [1, 2, 3, 2]
it "errors if there is nothing on the stack" $
runTexts ["over" ] `shouldBe` Left StackUnderflow
it "errors if there is only one value on the stack" $
runTexts ["1 over" ] `shouldBe` Left StackUnderflow
describe "user-defined words" $ do
it "can consist of built-in words" $
runTexts [ ": dup-twice dup dup ;"
, "1 dup-twice" ] `shouldBe` Right [1, 1, 1]
it "execute in the right order" $
runTexts [ ": countup 1 2 3 ;"
, "countup" ] `shouldBe` Right [1, 2, 3]
it "can override other user-defined words" $
runTexts [ ": foo dup ;"
, ": foo dup dup ;"
, "1 foo" ] `shouldBe` Right [1, 1, 1]
it "can override built-in words" $
runTexts [ ": swap dup ;"
, "1 swap" ] `shouldBe` Right [1, 1]
it "can override built-in operators" $
runTexts [ ": + * ;"
, "3 4 +" ] `shouldBe` Right [12]
it "can use different words with the same name" $
runTexts [ ": foo 5 ;"
, ": bar foo ;"
, ": foo 6 ;"
, "bar foo" ] `shouldBe` Right [5, 6]
it "can define word that uses word with the same name" $
runTexts [ ": foo 10 ;"
, ": foo foo 1 + ;"
, "foo" ] `shouldBe` Right [11]
it "cannot redefine numbers" $
runTexts [": 1 2 ;"] `shouldBe` Left InvalidWord
it "errors if executing a non-existent word" $
runTexts ["1 foo"] `shouldBe` Left (UnknownWord "foo")
describe "case-insensitivity" $ do
it "DUP is case-insensitive" $
runTexts ["1 DUP Dup dup" ] `shouldBe` Right [1, 1, 1, 1]
it "DROP is case-insensitive" $
runTexts ["1 2 3 4 DROP Drop drop"] `shouldBe` Right [1]
it "SWAP is case-insensitive" $
runTexts ["1 2 SWAP 3 Swap 4 swap"] `shouldBe` Right [2, 3, 4, 1]
it "OVER is case-insensitive" $
runTexts ["1 2 OVER Over over" ] `shouldBe` Right [1, 2, 1, 2, 1]
it "user-defined words are case-insensitive" $
runTexts [ ": foo dup ;"
, "1 FOO Foo foo" ] `shouldBe` Right [1, 1, 1, 1]
it "definitions are case-insensitive" $
runTexts [ ": SWAP DUP Dup dup ;"
, "1 swap" ] `shouldBe` Right [1, 1, 1, 1]
-- ab8d473c39114365fb88f8406ea7a1783f0a40f4
|
exercism/xhaskell
|
exercises/practice/forth/test/Tests.hs
|
Haskell
|
mit
| 6,377
|
{-# LANGUAGE DataKinds, FlexibleInstances, MultiParamTypeClasses, OverloadedStrings, ScopedTypeVariables, TypeOperators #-}
module Hevents.Eff.Demo where
-- * Imports, stuff to make the compiler happy
import Control.Category
import Control.Concurrent.Async
import Control.Concurrent.STM
import Control.Exception (finally, throwIO)
import Control.Monad.Except
import qualified Control.Monad.State as ST
import Control.Monad.Trans.Either
import qualified Data.ByteString.Builder as BS
import Data.Either (rights)
import Data.Proxy
import Data.Serialize (Serialize, get, put)
import Data.Typeable
import Data.Void
import Hevents.Eff as W
import Network.HTTP.Client (Manager, defaultManagerSettings, newManager)
import Prelude hiding (init, (.))
import Servant
import Servant.Client
import System.Environment
import Test.Hspec
import Test.QuickCheck as Q
import Test.QuickCheck.Monadic as Q
-- * Let's start writing a test...
aCounter :: Spec
aCounter = describe "Counter Model" $ do
it "should apply events from commands given they respect bounds" $ property $
prop_shouldApplyCommandRespectingBounds
it "should not apply commands over bounds" $ property $
prop_shouldNotApplyCommandsOverBounds
prop_shouldApplyCommandRespectingBounds :: Command Counter -> Bool
prop_shouldApplyCommandRespectingBounds c@(Increment n) = let OK result = init `act` c
in init `apply` result == Counter n
prop_shouldApplyCommandRespectingBounds c@(Decrement n) = let counter20 = Counter 20
OK result = counter20 `act` c
in counter20 `apply` result == Counter (20 - n)
prop_shouldNotApplyCommandsOverBounds :: [ Command Counter ] -> Bool
prop_shouldNotApplyCommandsOverBounds commands =
let finalCounter = counter $ ST.execState (mapM updateModel commands) init
in finalCounter >= 0 && finalCounter <= 100
newtype Counter = Counter { counter :: Int } deriving (Eq,Show)
data CCounter = Increment Int
| Decrement Int
deriving (Eq, Show)
data ECounter = Added Int deriving (Eq,Show)
data ErCounter = OutOfBounds deriving (Eq,Show)
type instance Command Counter = CCounter
type instance Event Counter = ECounter
type instance Error Counter = ErCounter
instance Model Counter where
init = Counter 0
Counter k `act` Increment n = if k + n <= 100
then OK $ Added n
else KO OutOfBounds
Counter k `act` Decrement n = if k - n >= 0
then OK $ Added (-n)
else KO OutOfBounds
Counter k `apply` Added n = Counter $ k + n
instance Arbitrary CCounter where
arbitrary = oneof [ Increment <$> choose (0,20)
, Decrement <$> choose (0,20)
]
-- * We now have a fully functional event-sourced bounded counter *Model*
-- let's expose some services that end users could access...
--
-- First write tests representing services interactions
data CounterAction = GetCounter
| IncCounter Int
| DecCounter Int
deriving (Show)
instance Arbitrary CounterAction where
-- we use frequency to represent some expected (or observed) behaviour
-- our users' behaviour model could be much more complex...
arbitrary = frequency [ (3, return GetCounter)
, (2, IncCounter <$> choose (0,10))
, (1, DecCounter <$> choose (0,10))
]
prop_servicesRespectCounterBounds :: [ CounterAction ] -> Property
prop_servicesRespectCounterBounds actions = Q.monadicIO $ do
results <- Q.run $ do
(model, storage) <- prepareContext
mapM (effect storage model . interpret) actions
assert $ all (\c -> c >= 0 && c <= 100) (rights results)
-- this is where we define the initial state of our services and model
prepareContext = (,) <$>
newTVarIO (W.init :: Counter) <*>
atomically W.makeMemoryStore
-- defines how to interpret our action model in terms of actual services
type EventSourced m a = Eff (State m :> Store :> Exc ServantErr :> Lift STM :> Void) a
interpret GetCounter = getCounter
interpret (IncCounter n) = increment n
interpret (DecCounter n) = decrement n
getCounter :: EventSourced Counter Int
getCounter = counter <$> getState
increment :: Int -> EventSourced Counter Int
increment n = applyCommand (Increment n) >>= storeEvent
decrement :: Int -> EventSourced Counter Int
decrement n = applyCommand (Decrement n) >>= storeEvent
storeEvent :: Either ErCounter ECounter
-> EventSourced Counter Int
storeEvent = either
(throwExc . fromModelError)
(either (throwExc . fromDBError) (const $ counter <$> getState) <=< store)
where
fromModelError e = err400 { errBody = BS.toLazyByteString $ BS.stringUtf8 $ "Invalid command " ++ show e }
fromDBError e = err500 { errBody = BS.toLazyByteString $ BS.stringUtf8 $ "DB Error " ++ show e }
instance Serialize ECounter where
put (Added i) = put i
get = Added <$> get
instance Versionable ECounter
-- * Expose our counter services through a REST API
type CounterApi = "counter" :> (Get '[JSON] Int
:<|> "increment" :> Capture "inc" Int :> Get '[JSON] Int
:<|> "decrement" :> Capture "dec" Int :> Get '[JSON] Int)
counterApi :: Proxy CounterApi
counterApi = Proxy
-- * Let's write a test for our API against actual services, using user-centric actions
prop_counterServerImplementsCounterApi :: [ CounterAction ] -> Property
prop_counterServerImplementsCounterApi actions = Q.monadicIO $ do
let baseUrl = BaseUrl Http "localhost" 8082 ""
results <- Q.run $ do
mgr <- newManager defaultManagerSettings
(model, storage) <- prepareContext
server <- W.runWebServerErr 8082 counterApi (Nat $ ExceptT . effect storage model) handler
mapM (runClient mgr baseUrl) actions `finally` cancel server
assert $ all (\c -> c >= 0 && c <= 100) results
runClient m b GetCounter = either throwIO return =<< runExceptT (counterState m b)
runClient m b (IncCounter n) = either throwIO return =<< runExceptT (incCounter n m b)
runClient m b (DecCounter n) = either throwIO return =<< runExceptT (decCounter n m b)
counterState :<|> incCounter :<|> decCounter = client counterApi
handler = getCounter :<|> increment :<|> decrement
-- * Main server
main :: IO ()
main = do
[port] <- getArgs
(model, storage) <- prepareContext
W.runWebServerErr (Prelude.read port) counterApi (Nat $ ExceptT . effect storage model) handler >>= wait
|
abailly/hevents
|
test/Hevents/Eff/Demo.hs
|
Haskell
|
mit
| 7,032
|
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE AutoDeriveTypeable #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleContexts #-}
module IHaskell.Display.Widgets.Layout.Types where
import Prelude hiding (Right,Left)
import Control.Monad (unless)
import qualified Control.Exception as Ex
import Data.List (intercalate)
import Data.Vinyl (Rec(..))
import qualified IHaskell.Display.Widgets.Singletons as S
import IHaskell.Display.Widgets.Types
import IHaskell.Display.Widgets.Layout.Common
type LayoutClass = [ 'S.ModelModule
, 'S.ModelModuleVersion
, 'S.ModelName
, 'S.ViewModule
, 'S.ViewModuleVersion
, 'S.ViewName
, 'S.LAlignContent
, 'S.LAlignItems
, 'S.LAlignSelf
, 'S.LBorder
, 'S.LBottom
, 'S.LDisplay
, 'S.LFlex
, 'S.LFlexFlow
, 'S.LGridArea
, 'S.LGridAutoColumns
, 'S.LGridAutoFlow
, 'S.LGridAutoRows
, 'S.LGridColumn
, 'S.LGridGap
, 'S.LGridRow
, 'S.LGridTemplateAreas
, 'S.LGridTemplateColumns
, 'S.LGridTemplateRows
, 'S.LHeight
, 'S.LJustifyContent
, 'S.LJustifyItems
, 'S.LLeft
, 'S.LMargin
, 'S.LMaxHeight
, 'S.LMaxWidth
, 'S.LMinHeight
, 'S.LMinWidth
, 'S.LOrder
, 'S.LOverflow
, 'S.LOverflowX
, 'S.LOverflowY
, 'S.LPadding
, 'S.LRight
, 'S.LTop
, 'S.LVisibility
, 'S.LWidth
]
type instance FieldType 'S.LAlignContent = Maybe String
type instance FieldType 'S.LAlignItems = Maybe String
type instance FieldType 'S.LAlignSelf = Maybe String
type instance FieldType 'S.LBorder = Maybe String
type instance FieldType 'S.LBottom = Maybe String
type instance FieldType 'S.LDisplay = Maybe String
type instance FieldType 'S.LFlex = Maybe String
type instance FieldType 'S.LFlexFlow = Maybe String
type instance FieldType 'S.LGridArea = Maybe String
type instance FieldType 'S.LGridAutoColumns = Maybe String
type instance FieldType 'S.LGridAutoFlow = Maybe String
type instance FieldType 'S.LGridAutoRows = Maybe String
type instance FieldType 'S.LGridColumn = Maybe String
type instance FieldType 'S.LGridGap = Maybe String
type instance FieldType 'S.LGridRow = Maybe String
type instance FieldType 'S.LGridTemplateAreas = Maybe String
type instance FieldType 'S.LGridTemplateColumns = Maybe String
type instance FieldType 'S.LGridTemplateRows = Maybe String
type instance FieldType 'S.LHeight = Maybe String
type instance FieldType 'S.LJustifyContent = Maybe String
type instance FieldType 'S.LJustifyItems = Maybe String
type instance FieldType 'S.LLeft = Maybe String
type instance FieldType 'S.LMargin = Maybe String
type instance FieldType 'S.LMaxHeight = Maybe String
type instance FieldType 'S.LMaxWidth = Maybe String
type instance FieldType 'S.LMinHeight = Maybe String
type instance FieldType 'S.LMinWidth = Maybe String
type instance FieldType 'S.LOrder = Maybe String
type instance FieldType 'S.LOverflow = Maybe String
type instance FieldType 'S.LOverflowX = Maybe String
type instance FieldType 'S.LOverflowY = Maybe String
type instance FieldType 'S.LPadding = Maybe String
type instance FieldType 'S.LRight = Maybe String
type instance FieldType 'S.LTop = Maybe String
type instance FieldType 'S.LVisibility = Maybe String
type instance FieldType 'S.LWidth = Maybe String
-- type family WidgetFields (w :: WidgetType) :: [Field] where
type instance WidgetFields 'LayoutType = LayoutClass
-- | A record representing a widget of the Layour class from IPython
defaultLayoutWidget :: Rec Attr LayoutClass
defaultLayoutWidget = (S.SModelModule =:! "@jupyter-widgets/base")
:& (S.SModelModuleVersion =:! "1.1.0")
:& (S.SModelName =:! "LayoutModel")
:& (S.SViewModule =:! "@jupyter-widgets/base")
:& (S.SViewModuleVersion =:! "1.1.0")
:& (S.SViewName =:! "LayoutView")
:& (AlignContent =:. (Nothing, venum alignContentProps))
:& (AlignItems =:. (Nothing, venum alignItemProps))
:& (AlignSelf =:. (Nothing, venum alignSelfProps))
:& (Border =:: Nothing)
:& (Bottom =:: Nothing)
:& (Display =:: Nothing)
:& (Flex =:: Nothing)
:& (FlexFlow =:: Nothing)
:& (GridArea =:: Nothing)
:& (GridAutoColumns =:: Nothing)
:& (GridAutoFlow =:. (Nothing, venum gridAutoFlowProps))
:& (GridAutoRows =:: Nothing)
:& (GridColumn =:: Nothing)
:& (GridGap =:: Nothing)
:& (GridRow =:: Nothing)
:& (GridTemplateAreas =:: Nothing)
:& (GridTemplateColumns =:: Nothing)
:& (GridTemplateRows =:: Nothing)
:& (Height =:: Nothing)
:& (JustifyContent =:: Nothing)
:& (JustifyItems =:: Nothing)
:& (Left =:: Nothing)
:& (Margin =:: Nothing)
:& (MaxHeight =:: Nothing)
:& (MaxWidth =:: Nothing)
:& (MinHeight =:: Nothing)
:& (MinWidth =:: Nothing)
:& (Order =:: Nothing)
:& (Overflow =:. (Nothing, venum overflowProps))
:& (OverflowX =:. (Nothing, venum overflowProps))
:& (OverflowY =:. (Nothing, venum overflowProps))
:& (Padding =:: Nothing)
:& (Right =:: Nothing)
:& (Top =:: Nothing)
:& (Visibility =:. (Nothing, venum visibilityProps))
:& (Width =:: Nothing)
:& RNil
where venum :: [String] -> Maybe String -> IO (Maybe String)
venum _ Nothing = return Nothing
venum xs (Just f) = do
unless (f `elem` xs) (Ex.throw $ Ex.AssertionFailed ("The value should be one of: " ++ intercalate ", " xs))
return $ Just f
|
gibiansky/IHaskell
|
ihaskell-display/ihaskell-widgets/src/IHaskell/Display/Widgets/Layout/Types.hs
|
Haskell
|
mit
| 7,299
|
module Wyas.Types
( LispVal(..)
, LispError(..)
, ThrowsError
, trapError
, extractValue
) where
import Control.Monad.Except
import Text.ParserCombinators.Parsec (ParseError)
data LispVal = Atom String
| Bool Bool
| Character Char
| DottedList [LispVal] LispVal
| List [LispVal]
| Number Integer
| String String
instance Show LispVal where
show = showVal
showVal :: LispVal -> String
showVal (Atom val) = val
showVal (Bool True) = "#t"
showVal (Bool False) = "#f"
showVal (Character val) = show val
showVal (Number val) = show val
showVal (DottedList xs val) = concat ["(", unwordsShowVal xs, " . ", showVal val, ")"]
showVal (List xs) = concat ["(", unwordsShowVal xs, ")"]
showVal (String val) = concat ["\"", val, "\""]
unwordsShowVal :: [LispVal] -> String
unwordsShowVal = unwords . map showVal
data LispError = NumArgs Integer [LispVal]
| TypeMismatch String LispVal
| Parser ParseError
| BadSpecialForm String LispVal
| NotFunction String String
| UnboundVar String String
| Default String
instance Show LispError where
show = showError
showError :: LispError -> String
showError (UnboundVar message varName) = concat [message, ": ", varName]
showError (BadSpecialForm message form) = concat [message, ": ", show form]
showError (NotFunction message func) = concat [message, ": ", func]
showError (NumArgs expected found) =
concat ["Expected: ", show expected, " args; found values ", unwordsShowVal found]
showError (TypeMismatch expected found) =
concat ["Invalid type: expected ", expected, " found, ", show found]
showError (Parser parseErr) = "Parse error at " ++ show parseErr
showError (Default message) = message
type ThrowsError = Either LispError
trapError :: ThrowsError String -> ThrowsError String
trapError action = catchError action (return . show)
extractValue :: ThrowsError a -> a
extractValue (Right val) = val
|
saclark/wyas
|
src/Wyas/Types.hs
|
Haskell
|
mit
| 2,120
|
{-# LANGUAGE ScopedTypeVariables #-}
module Jabara.Util.MonthSpec (spec) where
import Data.Time.Calendar
import Jabara.Util.Month
import Test.Hspec
import Test.Hspec.QuickCheck (prop)
import Text.Read
spec :: Spec
spec = do
describe "read month" $ do
it "readMaybe \"\"" $ do
let r::Maybe Month = readMaybe ""
r `shouldBe` Nothing
prop "read test by QuickCheck" test_read
test_read :: String -> Bool
test_read s = case readMaybe s :: Maybe Month of
Nothing -> True
Just m -> case toGregorian $ mDay m of
(_, mon, _) -> 1 <= mon && mon <= 12
|
jabaraster/jabara-util
|
test/Jabara/Util/MonthSpec.hs
|
Haskell
|
mit
| 701
|
module Y2018.M07.D03.Exercise where
{--
Yesterday, we translated JSON that was a mapping of String -> String to a
Codex that was a mapping Vertex -> Relations
where Relations was Map Vertex (Vertex, Strength) and we learned that the
keys of the relations were really just indices of the arrayed (Vertex,Strength)
pairing.
Our PhD comfirmed this fact, and said he did it this way because, and I quote:
"I'm an idiot."
An honest PhD. What did I do to deserve this honor today?
So, today, let's upload the codex to a graph database. I'm partial to neo4j,
but you can choose any of them, even d3js or the sigma graphing library or
whatever works for you.
I'm easy.
--}
-- below imports available via 1HaskellADay git repository
import Data.Relation
import Graph.Query
import Y2018.M06.D27.Exercise
graphMeBAYBEE :: Codex -> IO ()
graphMeBAYBEE codex = undefined
{--
graphMeBAYBEE takes the codex you created in Y2018.M06.D27 and returns
(in the real world) a graph database, it does this by converting the codex
to a set of Relation values.
>>> honk <- readMap (exDir ++ honkin)
>>> codex = mapping2Codex honk
--}
codex2Rels :: Codex -> [Relation Vert Arr Vert]
codex2Rels codex = undefined
-- of course, you neet the vert and arr types
data Vert = V Vertex
data Arr = E Strength
-- both of which have to be defined as instances in the Relation domain
instance Node Vert where
asNode vert = undefined
instance Edge Arr where
asEdge arr = undefined
-- from this we should be able the Cyph the codex. What do you get?
|
geophf/1HaskellADay
|
exercises/HAD/Y2018/M07/D03/Exercise.hs
|
Haskell
|
mit
| 1,543
|
module Tools.BlankChopperSpec (main, spec) where
import Test.Hspec
import Tools.BlankChopper
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
describe "chop" $ do
context "breaks on spaces" $ do
itChopsAsSpecSamples
[ ("x y", ["x", "y"])
, ("a b", ["a", "b"])
, (" m n ", ["m", "n"])
, ("more then one", ["more", "then", "one"])
, (" blanked front", ["blanked", "front"])
, ("tails should be empty ", ["tails", "should", "be", "empty"])
, (" shrouded with void ", ["shrouded", "with", "void"])
]
context "breaks on tabs" $ do
itChopsAsSpecSamples
[ ("t\tt", ["t", "t"])
, ("q\t\t\ty", ["q", "y"])
, ("\ttabs\tevery\twhere\t", ["tabs", "every", "where"])
, ("\t\t\t\t\t\tmuch\ttabs\tat\tstart", ["much", "tabs", "at", "start"])
, ("only\tend\tis\ttabed\t\t\t\t", ["only", "end", "is", "tabed"])
, ("\t\t\t\tlike\tfog\twithin\ttabs\t\t\t\t", ["like", "fog", "within", "tabs"])
]
context "breaks on new lines" $ do
itChopsAsSpecSamples
[ ("1\n2", ["1", "2"])
, ("\n4\n3\n2\n1\n", ["4", "3", "2", "1"])
, ("\n\n\n\nmagic\n\n\nnumber\n\n\n\nseven\n\n", ["magic", "number", "seven"])
]
itChopsAsSpecSamples specs = mapM_ itChops specs
itChops (input, result) = let
chopHeader i r = "choped as: " ++ show r ++ ", for an input: '" ++ i ++ "'"
assertChop i r = chop i `shouldBe` r
in it (chopHeader input result) $ do assertChop input result
|
DominikJaniec/LearnHaskell
|
problems/calculator/test/Tools/BlankChopperSpec.hs
|
Haskell
|
mit
| 1,578
|
module Handler.CacheSpec (spec) where
import TestImport
spec :: Spec
spec = withApp $ do
describe "getCacheR" $ do
error "Spec not implemented: getCacheR"
|
swamp-agr/carbuyer-advisor
|
test/Handler/CacheSpec.hs
|
Haskell
|
mit
| 171
|
{-# LANGUAGE
TypeFamilies,
KindSignatures,
ConstraintKinds,
ExplicitNamespaces,
GADTs,
TypeOperators,
DataKinds,
RankNTypes,
AllowAmbiguousTypes,
RecordWildCards
#-}
module ConstraintWitness.Internal (
(:~:)(..),
Witness,
canonicalWitness,
expose, useWitness
) where
import Data.Type.Equality ((:~:)(..))
import Data.HList
import GHC.Prim (Constraint)
-- | Magical type family that turns a constraint into a (value-level) witness.
-- The rules are:
-- Witness () -> ()
-- Witness (a ~ b) -> a :~: b
-- Witness <a typeclass constraint> -> <a dictionary for that typeclass>
-- Witness (?p :: a) -> a
-- Witness (x, y, ..., z) -> HList '[Witness x, Witness y, .., Witness z]
type family Witness (ct :: Constraint) :: * where
Witness () = () -- Bogus equation so GHC doesn't choke
-- class IsTypeClass (ct :: Constraint) where
-- type Dict ct :: *
--
-- class HasClasses (cts :: [Constraint]) where
-- type Dicts cts :: [*]
--
-- instance HasClasses '[] where type Dicts '[] = '[]
--
-- instance {-# OVERLAPS #-} (IsTypeClass ct, HasClasses cts) => HasClasses (ct ': cts) where
-- type Dicts (ct ': cts) = Dict ct ': Dicts cts
--
-- instance HasClasses cts => HasClasses (ct ': cts) where type Dicts (ct ': cts) = Dicts cts
-- | Tries to provide a canonical witness for the given constraint. This is:
-- - () for empty constraints
-- - Refl for equality constraints
-- - the dictionary for typeclass constraints
-- - the implicit parameter's value for ImplicitParams
-- - a HList of the component constraints' canonical witnesses for conjoined constraints.
-- It's implemented by CoreToCore magic, so we leave it as `undefined` here and make sure
-- it won't be inlined so that we can still find it in the CoreToCore passes.
canonicalWitness :: forall (ct :: Constraint). ct => Witness ct
canonicalWitness = undefined -- implemented by compiler plug in
{-# NOINLINE canonicalWitness #-}
-- | Transforms a constraint into a witness-value argument, *exposing* it.
expose :: forall (ct :: Constraint) a. (ct => a) -> (Witness ct -> a)
expose thing witness = undefined
-- | Alias of expose, with the arguments flipped. This is mostly useful as an argument to higher-order functions.
useWitness :: forall (ct :: Constraint) a. Witness ct -> (ct => a) -> a
useWitness witness thing = undefined
data Isomorphism a b = Iso {appIso :: a -> b, appRevIso :: b -> a}
mkIso :: (a -> b) -> (b -> a) -> Isomorphism a b
mkIso fwd bwd = Iso {appIso = fwd, appRevIso = bwd}
revIso :: Isomorphism a b -> Isomorphism b a
revIso Iso{..} = Iso {appIso = appRevIso, appRevIso = appIso}
|
Solonarv/constraint-witness
|
plugin/ConstraintWitness/Internal.hs
|
Haskell
|
mit
| 2,684
|
-- Project Euler Problem 22 - names scores
--
-- Sum of letter values in names weighted by position in list
--
-- import Data.String
import Data.List
alph = zip ['A'..'Z'] [1..26]
elim_just :: Maybe a -> a
elim_just (Just a) = a
wordscore x = sum [ elim_just (lookup a alph) | a <- x]
-- str = "\"MARY\",\"PATRICIA\",\"BOB\""
split :: (Eq c) => c -> [c] -> [[c]]
split _ [] = []
-- split d (s:d:t) = s:(split d (s:d:t))
split d x = a:(if b==[] then [] else split d (tail b))
where (a,b) = span (/= d) x
delfromlist :: (Eq c) => c -> [c] -> [c]
delfromlist d x = [a | a<-x, a /= d]
sorted_list str = (sort (split ',' (delfromlist '\"' str)))
para_score :: [(Int, String)] -> Int
para_score [] = 0
para_score (x:xs) = ((fst x)*(wordscore (snd x))) + (para_score xs)
main = do
-- print (words (replace "\"" "" (replace "," " " str)))
-- print (zip [1..] sorted_list)
str <- readFile "p022_names.txt"
print (para_score (zip [1..] (sorted_list str)))
|
yunwilliamyu/programming-exercises
|
project_euler/p022_names_scores.hs
|
Haskell
|
cc0-1.0
| 978
|
{-
Copyrights (c) 2016. Samsung Electronics Ltd. All right reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-}
{-# LANGUAGE ImplicitParams, TupleSections #-}
module MiniNet.MiniNet (generateMininetTopology, NodeMap) where
import Text.JSON
import Data.Maybe
import Control.Monad.State
import Data.List
import Numeric
import Topology
import Util
import Syntax
hstep = 100
vstep = 100
type Switches = [JSValue]
type Hosts = [JSValue]
type NodeMap = [(InstanceDescr, String)]
generateMininetTopology :: Refine -> Topology -> (String, NodeMap)
generateMininetTopology r topology = (encode $ toJSObject attrs, nmap)
where -- max number of nodes in a layer
width = (maximum $ map (length . (uncurry instMapFlatten)) topology) * hstep
-- render nodes
(sws, hs, nmap) = execState (mapIdxM (renderNodes width) topology) ([],[],[])
-- render links
links = let ?r = r in
let ?t = topology in
mapMaybe (renderLink nmap) $ topologyLinks topology
attrs = [ ("controllers", JSArray [])
, ("hosts" , JSArray hs)
, ("switches" , JSArray sws)
, ("links" , JSArray links)
, ("version" , JSRational False 2)
]
renderNodes :: Int -> (Node, InstanceMap PortLinks) -> Int -> State (Switches, Hosts, NodeMap) ()
renderNodes w (n, imap) voffset = do
let nodes = instMapFlatten n imap
offset = (w `div` length nodes) `div` 2
step = w `div` length nodes
nodeoff = mapIdx (\nd i -> (nd, offset + i * step)) nodes
mapM_ (renderNode voffset n) nodeoff
renderNode :: Int -> Node -> ((InstanceDescr, PortLinks), Int) -> State (Switches, Hosts, NodeMap) ()
renderNode voffset node ((descr, _), hoffset) = do
(sws, hs, nmap) <- get
let (letter, number) = if' (nodeType node == NodeSwitch) ("s", length sws) ("h", length hs)
ndname = letter ++ show number
opts = [ ("controllers", JSArray [])
, ("hostname" , JSString $ toJSString ndname)
, ("nodeNum" , JSRational False $ fromIntegral number)
, ("switchType" , JSString $ toJSString "bmv2")] ++
if (nodeType node == NodeHost) && (not $ null $ idescKeys descr)
then case head $ idescKeys descr of
e@(EStruct _ _ _) -> [("ip4", JSString $ toJSString $ formatIP e)]
(EInt _ 48 m) -> [("mac", JSString $ toJSString $ formatMAC m)] ++
if length (idescKeys descr) >= 2
then case idescKeys descr !! 1 of
e@(EStruct _ _ _) -> [("ip4", JSString $ toJSString $ formatIP e)]
_ -> []
else []
_ -> []
else []
attrs = [ ("number", JSString $ toJSString $ show number)
, ("opts" , JSObject $ toJSObject opts)
, ("x" , JSString $ toJSString $ show $ hoffset)
, ("y" , JSString $ toJSString $ show $ (voffset + 1) * vstep)]
n = JSObject $ toJSObject attrs
nmap' = (descr, ndname):nmap
put $ if' (nodeType node == NodeSwitch) ((n:sws), hs, nmap') (sws, (n:hs), nmap')
formatIP :: Expr -> String
formatIP (EStruct _ _ fs) = intercalate "." $ map (show . exprIVal) fs
formatIP e = error $ "MiniNet.formatIP " ++ show e
formatMAC :: Integer -> String
formatMAC i =
( showHex b0 . colon . showHex b1 . colon . showHex b2 . colon
. showHex b3 . colon . showHex b4 . colon . showHex b5) ""
where colon = showString ":"
b5 = bitSlice i 7 0
b4 = bitSlice i 15 8
b3 = bitSlice i 23 16
b2 = bitSlice i 31 24
b1 = bitSlice i 39 32
b0 = bitSlice i 47 40
renderLink :: (?t::Topology,?r::Refine) => NodeMap -> (PortInstDescr, PortInstDescr) -> Maybe JSValue
renderLink nmap (srcport, dstport) = if isPort ?r $ pdescPort dstport
then if (srcndname, srcpnum) < (dstndname,dstpnum)
then Just $ JSObject $ toJSObject attrs
else Nothing
else Nothing
where dstnode = nodeFromPort ?r dstport
srcnode = nodeFromPort ?r srcport
srcndname = fromJust $ lookup srcnode nmap
dstndname = fromJust $ lookup dstnode nmap
dstpnum = phyPortNum ?t dstnode (pdescPort dstport) (fromInteger $ exprIVal $ last $ pdescKeys dstport)
srcpnum = phyPortNum ?t srcnode (pdescPort srcport) (fromInteger $ exprIVal $ last $ pdescKeys srcport)
attrs = [ ("src" , JSString $ toJSString srcndname)
, ("srcport" , JSRational False $ fromIntegral $ srcpnum)
, ("dest" , JSString $ toJSString dstndname)
, ("destport", JSRational False $ fromIntegral dstpnum)
, ("opts" , JSObject $ toJSObject ([]::[(String, JSValue)]))]
|
ryzhyk/cocoon
|
cocoon/MiniNet/MiniNet.hs
|
Haskell
|
apache-2.0
| 5,845
|
-----------------------------------------------------------------------------
-- Copyright 2019, Ideas project team. This file is distributed under the
-- terms of the Apache License 2.0. For more information, see the files
-- "LICENSE.txt" and "NOTICE.txt", which are included in the distribution.
-----------------------------------------------------------------------------
-- |
-- Maintainer : bastiaan.heeren@ou.nl
-- Stability : provisional
-- Portability : portable (depends on ghc)
--
-- Run a feedbackscript
--
-----------------------------------------------------------------------------
module Ideas.Service.FeedbackScript.Run
( Script
, Environment(..), newEnvironment
, feedbackDiagnosis, feedbackHint, feedbackHints
, ruleToString, feedbackIds, attributeIds, conditionIds
, eval
) where
import Data.List
import Data.Maybe
import Ideas.Common.Library hiding (ready, Environment)
import Ideas.Service.BasicServices
import Ideas.Service.Diagnose
import Ideas.Service.FeedbackScript.Syntax
import Ideas.Service.State
data Environment a = Env
{ oldReady :: Bool
, expected :: Maybe (Rule (Context a))
, recognized :: Maybe (Rule (Context a))
, motivation :: Maybe (Rule (Context a))
, diffPair :: Maybe (String, String)
, before :: Maybe Term
, after :: Maybe Term
, afterText :: Maybe String
}
newEnvironment :: State a -> Maybe (Rule (Context a)) -> Environment a
newEnvironment st motivationRule = newEnvironmentFor st motivationRule next
where
next = either (const Nothing) Just (onefirst st)
newEnvironmentFor :: State a -> Maybe (Rule (Context a)) -> Maybe ((Rule (Context a), b, c), State a) -> Environment a
newEnvironmentFor st motivationRule next = Env
{ oldReady = finished st
, expected = fmap (\((x,_,_),_) -> x) next
, motivation = motivationRule
, recognized = Nothing
, diffPair = Nothing
, before = f st
, after = fmap snd next >>= f
, afterText = fmap snd next >>= g
}
where
f s = fmap (`build` stateTerm s) (hasTermView (exercise s))
g s = return $ prettyPrinter (exercise s) (stateTerm s)
toText :: Environment a -> Script -> Text -> Maybe Text
toText env script = eval env script . Right
ruleToString :: Environment a -> Script -> Rule b -> String
ruleToString env script r =
let f = eval env script . Left . getId
in maybe (showId r) show (f r)
eval :: Environment a -> Script -> Either Id Text -> Maybe Text
eval env script = either (return . findIdRef) evalText
where
evalText :: Text -> Maybe Text
evalText = fmap mconcat . mapM unref . textItems
where
unref (TextRef a)
| a == expectedId = fmap (findIdRef . getId) (expected env)
| a == recognizedId = fmap (findIdRef . getId) (recognized env)
| a == diffbeforeId = fmap (TextString . fst) (diffPair env)
| a == diffafterId = fmap (TextString . snd) (diffPair env)
| a == beforeId = fmap TextTerm (before env)
| a == afterId = fmap TextTerm (after env)
| a == afterTextId = fmap TextString (afterText env)
| a == motivationId = fmap (findIdRef . getId) (motivation env)
| otherwise = findRef (==a)
unref t = Just t
evalBool :: Condition -> Bool
evalBool (RecognizedIs a) = maybe False (eqId a . getId) (recognized env)
evalBool (MotivationIs a) = maybe False (eqId a . getId) (motivation env)
evalBool (CondNot c) = not (evalBool c)
evalBool (CondConst b) = b
evalBool (CondRef a)
| a == oldreadyId = oldReady env
| a == hasexpectedId = isJust (expected env)
| a == hasrecognizedId = isJust (recognized env)
| a == hasmotivationId = isJust (motivation env)
| a == recognizedbuggyId = maybe False isBuggy (recognized env)
| otherwise = False
namespaces = nub $ mempty : [ a | NameSpace as <- scriptDecls script, a <- as ]
-- equality with namespaces
eqId :: Id -> Id -> Bool
eqId a b = any (\n -> n#a == b) namespaces
findIdRef :: Id -> Text
findIdRef x = fromMaybe (TextString (showId x)) (findRef (`eqId` x))
findRef :: (Id -> Bool) -> Maybe Text
findRef p = listToMaybe $ catMaybes
[ evalText t
| (as, c, t) <- allDecls
, any p as && evalBool c
]
allDecls =
let f (Simple _ as t) = [ (as, CondConst True, t) ]
f (Guarded _ as xs) = [ (as, c, t) | (c, t) <- xs ]
f _ = []
in concatMap f (scriptDecls script)
feedbackDiagnosis :: Diagnosis a -> Environment a -> Script -> Text
feedbackDiagnosis diagnosis env =
case diagnosis of
SyntaxError s -> const (makeText s)
Buggy _ r -> makeWrong "buggy" env {recognized = Just r}
NotEquivalent s -> makeNotEq s "noteq" env
Expected _ _ r -> makeOk "ok" env {recognized = Just r}
WrongRule _ _ mr -> makeWrong "wrongrule" env {recognized = mr}
Similar _ _ mr -> makeOk "same" env {recognized = mr}
Detour _ _ _ r -> makeOk "detour" env {recognized = Just r}
Correct _ _ -> makeOk "correct" env
Unknown _ _ -> makeOk "unknown" env
where
makeOk = makeDefault "Well done!"
makeWrong = makeDefault "This is incorrect."
makeNotEq s = if null s then makeWrong else makeDefault s
makeDefault dt s e = fromMaybe (TextString dt) . make (newId s) e
feedbackHint :: Id -> Environment a -> Script -> Text
feedbackHint feedbackId env script =
fromMaybe (defaultHint env script) $ make feedbackId env script
feedbackHints :: Id -> [((Rule (Context a), b, c), State a)] -> State a -> Maybe (Rule (Context a)) -> Script -> [Text]
feedbackHints feedbackId nexts state motivationRule script =
map (\env -> fromMaybe (defaultHint env script) $
make feedbackId env script) envs
where
envs = map (newEnvironmentFor state motivationRule . Just) nexts
defaultHint :: Environment a -> Script -> Text
defaultHint env script = makeText $
case expected env of
Just r -> ruleToString env script r
Nothing -> "Sorry, no hint available."
make :: Id -> Environment a -> Script -> Maybe Text
make feedbackId env script = toText env script (TextRef feedbackId)
feedbackIds :: [Id]
feedbackIds = map newId
["same", "noteq", "correct", "unknown", "ok", "buggy", "detour", "wrongrule", "hint", "step", "label"]
attributeIds :: [Id]
attributeIds =
[expectedId, recognizedId, diffbeforeId, diffafterId, beforeId, afterId, afterTextId, motivationId]
conditionIds :: [Id]
conditionIds = [oldreadyId, hasexpectedId, hasrecognizedId, hasmotivationId, recognizedbuggyId]
expectedId, recognizedId, diffbeforeId, diffafterId, beforeId, afterId, afterTextId, motivationId :: Id
expectedId = newId "expected"
recognizedId = newId "recognized"
diffbeforeId = newId "diffbefore"
diffafterId = newId "diffafter"
beforeId = newId "before"
afterId = newId "after"
afterTextId = newId "aftertext"
motivationId = newId "motivation"
oldreadyId, hasexpectedId, hasrecognizedId, hasmotivationId, recognizedbuggyId :: Id
oldreadyId = newId "oldready"
hasexpectedId = newId "hasexpected"
hasrecognizedId = newId "hasrecognized"
hasmotivationId = newId "hasmotivation"
recognizedbuggyId = newId "recognizedbuggy"
|
ideas-edu/ideas
|
src/Ideas/Service/FeedbackScript/Run.hs
|
Haskell
|
apache-2.0
| 7,515
|
-------------------------------------------------------------------------------
-- Experimental test for evaluating Queues performance
--
-- Data Structures. Grado en Informática. UMA.
-- Pepe Gallardo, 2012
-------------------------------------------------------------------------------
module Demos.Queue.QueuesPerformance where
import DataStructures.Util.Random
import DataStructures.Queue.TwoListsQueue -- LinearQueue
import System.CPUTime
data Operation = Enqueue | Dequeue
-- on average, do 2 enqueues for each dequeue
randomOperations :: Seed -> [Operation]
randomOperations s = randomsIn [Enqueue, Enqueue, Dequeue] s
-- forces queue evaluation by summing its elements
sumQ :: (Num a) => Queue a -> a
sumQ q
| isEmpty q = 0
| otherwise = first q + sumQ (dequeue q)
test :: Seed -> Int -> Int
test s n = sumQ (foldr simulate empty (take n (randomOperations s)))
simulate :: Operation -> Queue Int -> Queue Int
simulate Enqueue q = enqueue 0 q
simulate Dequeue q = if isEmpty q then q else dequeue q
main = do
let tests = 10
let numOperations = 10000
t0 <- getCPUTime
let xs = [ test s numOperations | s <- [0..tests-1]]
print (sum xs) -- force evaluation
t1 <- getCPUTime
let average = toSecs (t1-t0) / fromIntegral tests
putStrLn ("Tests took "++ show average ++ " secs on average")
toSecs :: Integer -> Double
toSecs x = fromIntegral x / 10^12
|
Saeron/haskell
|
data.structures/haskell/Demos/Queue/QueuesPerformance.hs
|
Haskell
|
apache-2.0
| 1,417
|
-----------------------------------------------------------------------------
-- |
-- Module : Text.PrettyPrint.HughesPJ
-- Copyright : (c) The University of Glasgow 2001
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- Maintainer : libraries@haskell.org
-- Stability : provisional
-- Portability : portable
--
-- John Hughes's and Simon Peyton Jones's Pretty Printer Combinators
--
-- Based on /The Design of a Pretty-printing Library/
-- in Advanced Functional Programming,
-- Johan Jeuring and Erik Meijer (eds), LNCS 925
-- <http://www.cs.chalmers.se/~rjmh/Papers/pretty.ps>
--
-- Heavily modified by Simon Peyton Jones, Dec 96
--
-----------------------------------------------------------------------------
{-
Version 3.0 28 May 1997
* Cured massive performance bug. If you write
foldl <> empty (map (text.show) [1..10000])
you get quadratic behaviour with V2.0. Why? For just the same
reason as you get quadratic behaviour with left-associated (++)
chains.
This is really bad news. One thing a pretty-printer abstraction
should certainly guarantee is insensivity to associativity. It
matters: suddenly GHC's compilation times went up by a factor of
100 when I switched to the new pretty printer.
I fixed it with a bit of a hack (because I wanted to get GHC back
on the road). I added two new constructors to the Doc type, Above
and Beside:
<> = Beside
$$ = Above
Then, where I need to get to a "TextBeside" or "NilAbove" form I
"force" the Doc to squeeze out these suspended calls to Beside and
Above; but in so doing I re-associate. It's quite simple, but I'm
not satisfied that I've done the best possible job. I'll send you
the code if you are interested.
* Added new exports:
punctuate, hang
int, integer, float, double, rational,
lparen, rparen, lbrack, rbrack, lbrace, rbrace,
* fullRender's type signature has changed. Rather than producing a
string it now takes an extra couple of arguments that tells it how
to glue fragments of output together:
fullRender :: Mode
-> Int -- Line length
-> Float -- Ribbons per line
-> (TextDetails -> a -> a) -- What to do with text
-> a -- What to do at the end
-> Doc
-> a -- Result
The "fragments" are encapsulated in the TextDetails data type:
data TextDetails = Chr Char
| Str String
| PStr FAST_STRING
The Chr and Str constructors are obvious enough. The PStr
constructor has a packed string (FAST_STRING) inside it. It's
generated by using the new "ptext" export.
An advantage of this new setup is that you can get the renderer to
do output directly (by passing in a function of type (TextDetails
-> IO () -> IO ()), rather than producing a string that you then
print.
Version 2.0 24 April 1997
* Made empty into a left unit for <> as well as a right unit;
it is also now true that
nest k empty = empty
which wasn't true before.
* Fixed an obscure bug in sep that occassionally gave very weird behaviour
* Added $+$
* Corrected and tidied up the laws and invariants
======================================================================
Relative to John's original paper, there are the following new features:
1. There's an empty document, "empty". It's a left and right unit for
both <> and $$, and anywhere in the argument list for
sep, hcat, hsep, vcat, fcat etc.
It is Really Useful in practice.
2. There is a paragraph-fill combinator, fsep, that's much like sep,
only it keeps fitting things on one line until it can't fit any more.
3. Some random useful extra combinators are provided.
<+> puts its arguments beside each other with a space between them,
unless either argument is empty in which case it returns the other
hcat is a list version of <>
hsep is a list version of <+>
vcat is a list version of $$
sep (separate) is either like hsep or like vcat, depending on what fits
cat behaves like sep, but it uses <> for horizontal conposition
fcat behaves like fsep, but it uses <> for horizontal conposition
These new ones do the obvious things:
char, semi, comma, colon, space,
parens, brackets, braces,
quotes, doubleQuotes
4. The "above" combinator, $$, now overlaps its two arguments if the
last line of the top argument stops before the first line of the
second begins.
For example: text "hi" $$ nest 5 (text "there")
lays out as
hi there
rather than
hi
there
There are two places this is really useful
a) When making labelled blocks, like this:
Left -> code for left
Right -> code for right
LongLongLongLabel ->
code for longlonglonglabel
The block is on the same line as the label if the label is
short, but on the next line otherwise.
b) When laying out lists like this:
[ first
, second
, third
]
which some people like. But if the list fits on one line
you want [first, second, third]. You can't do this with
John's original combinators, but it's quite easy with the
new $$.
The combinator $+$ gives the original "never-overlap" behaviour.
5. Several different renderers are provided:
* a standard one
* one that uses cut-marks to avoid deeply-nested documents
simply piling up in the right-hand margin
* one that ignores indentation (fewer chars output; good for machines)
* one that ignores indentation and newlines (ditto, only more so)
6. Numerous implementation tidy-ups
Use of unboxed data types to speed up the implementation
-}
module Text.PrettyPrint.HughesPJ (
-- * The document type
Doc, -- Abstract
-- * Constructing documents
-- ** Converting values into documents
char, text, ptext,
int, integer, float, double, rational,
-- ** Simple derived documents
semi, comma, colon, space, equals,
lparen, rparen, lbrack, rbrack, lbrace, rbrace,
-- ** Wrapping documents in delimiters
parens, brackets, braces, quotes, doubleQuotes,
-- ** Combining documents
empty,
(<>), (<+>), hcat, hsep,
($$), ($+$), vcat,
sep, cat,
fsep, fcat,
nest,
hang, punctuate,
-- * Predicates on documents
isEmpty,
-- * Rendering documents
-- ** Default rendering
render,
-- ** Rendering with a particular style
Style(..),
style,
renderStyle,
-- ** General rendering
fullRender,
Mode(..), TextDetails(..),
) where
import Prelude
infixl 6 <>
infixl 6 <+>
infixl 5 $$, $+$
-- ---------------------------------------------------------------------------
-- The interface
-- The primitive Doc values
isEmpty :: Doc -> Bool; -- ^ Returns 'True' if the document is empty
-- | The empty document, with no height and no width.
-- 'empty' is the identity for '<>', '<+>', '$$' and '$+$', and anywhere
-- in the argument list for 'sep', 'hcat', 'hsep', 'vcat', 'fcat' etc.
empty :: Doc
semi :: Doc; -- ^ A ';' character
comma :: Doc; -- ^ A ',' character
colon :: Doc; -- ^ A ':' character
space :: Doc; -- ^ A space character
equals :: Doc; -- ^ A '=' character
lparen :: Doc; -- ^ A '(' character
rparen :: Doc; -- ^ A ')' character
lbrack :: Doc; -- ^ A '[' character
rbrack :: Doc; -- ^ A ']' character
lbrace :: Doc; -- ^ A '{' character
rbrace :: Doc; -- ^ A '}' character
-- | A document of height and width 1, containing a literal character.
char :: Char -> Doc
-- | A document of height 1 containing a literal string.
-- 'text' satisfies the following laws:
--
-- * @'text' s '<>' 'text' t = 'text' (s'++'t)@
--
-- * @'text' \"\" '<>' x = x@, if @x@ non-empty
--
-- The side condition on the last law is necessary because @'text' \"\"@
-- has height 1, while 'empty' has no height.
text :: String -> Doc
-- | An obsolete function, now identical to 'text'.
ptext :: String -> Doc
int :: Int -> Doc; -- ^ @int n = text (show n)@
integer :: Integer -> Doc; -- ^ @integer n = text (show n)@
float :: Float -> Doc; -- ^ @float n = text (show n)@
double :: Double -> Doc; -- ^ @double n = text (show n)@
rational :: Rational -> Doc; -- ^ @rational n = text (show n)@
parens :: Doc -> Doc; -- ^ Wrap document in @(...)@
brackets :: Doc -> Doc; -- ^ Wrap document in @[...]@
braces :: Doc -> Doc; -- ^ Wrap document in @{...}@
quotes :: Doc -> Doc; -- ^ Wrap document in @\'...\'@
doubleQuotes :: Doc -> Doc; -- ^ Wrap document in @\"...\"@
-- Combining @Doc@ values
-- | Beside.
-- '<>' is associative, with identity 'empty'.
(<>) :: Doc -> Doc -> Doc
-- | Beside, separated by space, unless one of the arguments is 'empty'.
-- '<+>' is associative, with identity 'empty'.
(<+>) :: Doc -> Doc -> Doc
-- | Above, except that if the last line of the first argument stops
-- at least one position before the first line of the second begins,
-- these two lines are overlapped. For example:
--
-- > text "hi" $$ nest 5 (text "there")
--
-- lays out as
--
-- > hi there
--
-- rather than
--
-- > hi
-- > there
--
-- '$$' is associative, with identity 'empty', and also satisfies
--
-- * @(x '$$' y) '<>' z = x '$$' (y '<>' z)@, if @y@ non-empty.
--
($$) :: Doc -> Doc -> Doc
-- | Above, with no overlapping.
-- '$+$' is associative, with identity 'empty'.
($+$) :: Doc -> Doc -> Doc
hcat :: [Doc] -> Doc; -- ^List version of '<>'.
hsep :: [Doc] -> Doc; -- ^List version of '<+>'.
vcat :: [Doc] -> Doc; -- ^List version of '$$'.
cat :: [Doc] -> Doc; -- ^ Either 'hcat' or 'vcat'.
sep :: [Doc] -> Doc; -- ^ Either 'hsep' or 'vcat'.
fcat :: [Doc] -> Doc; -- ^ \"Paragraph fill\" version of 'cat'.
fsep :: [Doc] -> Doc; -- ^ \"Paragraph fill\" version of 'sep'.
-- | Nest (or indent) a document by a given number of positions
-- (which may also be negative). 'nest' satisfies the laws:
--
-- * @'nest' 0 x = x@
--
-- * @'nest' k ('nest' k' x) = 'nest' (k+k') x@
--
-- * @'nest' k (x '<>' y) = 'nest' k z '<>' 'nest' k y@
--
-- * @'nest' k (x '$$' y) = 'nest' k x '$$' 'nest' k y@
--
-- * @'nest' k 'empty' = 'empty'@
--
-- * @x '<>' 'nest' k y = x '<>' y@, if @x@ non-empty
--
-- The side condition on the last law is needed because
-- 'empty' is a left identity for '<>'.
nest :: Int -> Doc -> Doc
-- GHC-specific ones.
-- | @hang d1 n d2 = sep [d1, nest n d2]@
hang :: Doc -> Int -> Doc -> Doc
-- | @punctuate p [d1, ... dn] = [d1 \<> p, d2 \<> p, ... dn-1 \<> p, dn]@
punctuate :: Doc -> [Doc] -> [Doc]
-- Displaying @Doc@ values.
instance Show Doc where
showsPrec prec doc cont = showDoc doc cont
-- | Renders the document as a string using the default 'style'.
render :: Doc -> String
-- | The general rendering interface.
fullRender :: Mode -- ^Rendering mode
-> Int -- ^Line length
-> Float -- ^Ribbons per line
-> (TextDetails -> a -> a) -- ^What to do with text
-> a -- ^What to do at the end
-> Doc -- ^The document
-> a -- ^Result
-- | Render the document as a string using a specified style.
renderStyle :: Style -> Doc -> String
-- | A rendering style.
data Style
= Style { mode :: Mode -- ^ The rendering mode
, lineLength :: Int -- ^ Length of line, in chars
, ribbonsPerLine :: Float -- ^ Ratio of ribbon length to line length
}
-- | The default style (@mode=PageMode, lineLength=100, ribbonsPerLine=1.5@).
style :: Style
style = Style { lineLength = 100, ribbonsPerLine = 1.5, mode = PageMode }
-- | Rendering mode.
data Mode = PageMode -- ^Normal
| ZigZagMode -- ^With zig-zag cuts
| LeftMode -- ^No indentation, infinitely long lines
| OneLineMode -- ^All on one line
-- ---------------------------------------------------------------------------
-- The Doc calculus
-- The Doc combinators satisfy the following laws:
{-
Laws for $$
~~~~~~~~~~~
<a1> (x $$ y) $$ z = x $$ (y $$ z)
<a2> empty $$ x = x
<a3> x $$ empty = x
...ditto $+$...
Laws for <>
~~~~~~~~~~~
<b1> (x <> y) <> z = x <> (y <> z)
<b2> empty <> x = empty
<b3> x <> empty = x
...ditto <+>...
Laws for text
~~~~~~~~~~~~~
<t1> text s <> text t = text (s++t)
<t2> text "" <> x = x, if x non-empty
Laws for nest
~~~~~~~~~~~~~
<n1> nest 0 x = x
<n2> nest k (nest k' x) = nest (k+k') x
<n3> nest k (x <> y) = nest k z <> nest k y
<n4> nest k (x $$ y) = nest k x $$ nest k y
<n5> nest k empty = empty
<n6> x <> nest k y = x <> y, if x non-empty
** Note the side condition on <n6>! It is this that
** makes it OK for empty to be a left unit for <>.
Miscellaneous
~~~~~~~~~~~~~
<m1> (text s <> x) $$ y = text s <> ((text "" <> x)) $$
nest (-length s) y)
<m2> (x $$ y) <> z = x $$ (y <> z)
if y non-empty
Laws for list versions
~~~~~~~~~~~~~~~~~~~~~~
<l1> sep (ps++[empty]++qs) = sep (ps ++ qs)
...ditto hsep, hcat, vcat, fill...
<l2> nest k (sep ps) = sep (map (nest k) ps)
...ditto hsep, hcat, vcat, fill...
Laws for oneLiner
~~~~~~~~~~~~~~~~~
<o1> oneLiner (nest k p) = nest k (oneLiner p)
<o2> oneLiner (x <> y) = oneLiner x <> oneLiner y
You might think that the following verion of <m1> would
be neater:
<3 NO> (text s <> x) $$ y = text s <> ((empty <> x)) $$
nest (-length s) y)
But it doesn't work, for if x=empty, we would have
text s $$ y = text s <> (empty $$ nest (-length s) y)
= text s <> nest (-length s) y
-}
-- ---------------------------------------------------------------------------
-- Simple derived definitions
semi = char ';'
colon = char ':'
comma = char ','
space = char ' '
equals = char '='
lparen = char '('
rparen = char ')'
lbrack = char '['
rbrack = char ']'
lbrace = char '{'
rbrace = char '}'
int n = text (show n)
integer n = text (show n)
float n = text (show n)
double n = text (show n)
rational n = text (show n)
-- SIGBJORN wrote instead:
-- rational n = text (show (fromRationalX n))
quotes p = char '\'' <> p <> char '\''
doubleQuotes p = char '"' <> p <> char '"'
parens p = char '(' <> p <> char ')'
brackets p = char '[' <> p <> char ']'
braces p = char '{' <> p <> char '}'
hcat = foldr (<>) empty
hsep = foldr (<+>) empty
vcat = foldr ($$) empty
hang d1 n d2 = sep [d1, nest n d2]
punctuate p [] = []
punctuate p (d:ds) = go d ds
where
go d [] = [d]
go d (e:es) = (d <> p) : go e es
-- ---------------------------------------------------------------------------
-- The Doc data type
-- A Doc represents a *set* of layouts. A Doc with
-- no occurrences of Union or NoDoc represents just one layout.
-- | The abstract type of documents.
-- The 'Show' instance is equivalent to using 'render'.
data Doc
= Empty -- empty
| NilAbove Doc -- text "" $$ x
| TextBeside TextDetails !Int Doc -- text s <> x
| Nest !Int Doc -- nest k x
| Union Doc Doc -- ul `union` ur
| NoDoc -- The empty set of documents
| Beside Doc Bool Doc -- True <=> space between
| Above Doc Bool Doc -- True <=> never overlap
type RDoc = Doc -- RDoc is a "reduced Doc", guaranteed not to have a top-level Above or Beside
reduceDoc :: Doc -> RDoc
reduceDoc (Beside p g q) = beside p g (reduceDoc q)
reduceDoc (Above p g q) = above p g (reduceDoc q)
reduceDoc p = p
data TextDetails = Chr Char
| Str String
| PStr String
space_text = Chr ' '
nl_text = Chr '\n'
{-
Here are the invariants:
* The argument of NilAbove is never Empty. Therefore
a NilAbove occupies at least two lines.
* The arugment of @TextBeside@ is never @Nest@.
* The layouts of the two arguments of @Union@ both flatten to the same
string.
* The arguments of @Union@ are either @TextBeside@, or @NilAbove@.
* The right argument of a union cannot be equivalent to the empty set
(@NoDoc@). If the left argument of a union is equivalent to the
empty set (@NoDoc@), then the @NoDoc@ appears in the first line.
* An empty document is always represented by @Empty@. It can't be
hidden inside a @Nest@, or a @Union@ of two @Empty@s.
* The first line of every layout in the left argument of @Union@ is
longer than the first line of any layout in the right argument.
(1) ensures that the left argument has a first line. In view of
(3), this invariant means that the right argument must have at
least two lines.
-}
-- Arg of a NilAbove is always an RDoc
nilAbove_ p = NilAbove p
-- Arg of a TextBeside is always an RDoc
textBeside_ s sl p = TextBeside s sl p
-- Arg of Nest is always an RDoc
nest_ k p = Nest k p
-- Args of union are always RDocs
union_ p q = Union p q
-- Notice the difference between
-- * NoDoc (no documents)
-- * Empty (one empty document; no height and no width)
-- * text "" (a document containing the empty string;
-- one line high, but has no width)
-- ---------------------------------------------------------------------------
-- @empty@, @text@, @nest@, @union@
empty = Empty
isEmpty Empty = True
isEmpty _ = False
char c = textBeside_ (Chr c) 1 Empty
text s = case length s of {sl -> textBeside_ (Str s) sl Empty}
ptext s = case length s of {sl -> textBeside_ (PStr s) sl Empty}
nest k p = mkNest k (reduceDoc p) -- Externally callable version
-- mkNest checks for Nest's invariant that it doesn't have an Empty inside it
mkNest k _ | k `seq` False = undefined
mkNest k (Nest k1 p) = mkNest (k + k1) p
mkNest k NoDoc = NoDoc
mkNest k Empty = Empty
mkNest 0 p = p -- Worth a try!
mkNest k p = nest_ k p
-- mkUnion checks for an empty document
mkUnion Empty q = Empty
mkUnion p q = p `union_` q
-- ---------------------------------------------------------------------------
-- Vertical composition @$$@
above_ :: Doc -> Bool -> Doc -> Doc
above_ p _ Empty = p
above_ Empty _ q = q
above_ p g q = Above p g q
p $$ q = above_ p False q
p $+$ q = above_ p True q
above :: Doc -> Bool -> RDoc -> RDoc
above (Above p g1 q1) g2 q2 = above p g1 (above q1 g2 q2)
above p@(Beside _ _ _) g q = aboveNest (reduceDoc p) g 0 (reduceDoc q)
above p g q = aboveNest p g 0 (reduceDoc q)
aboveNest :: RDoc -> Bool -> Int -> RDoc -> RDoc
-- Specfication: aboveNest p g k q = p $g$ (nest k q)
aboveNest _ _ k _ | k `seq` False = undefined
aboveNest NoDoc g k q = NoDoc
aboveNest (p1 `Union` p2) g k q = aboveNest p1 g k q `union_`
aboveNest p2 g k q
aboveNest Empty g k q = mkNest k q
aboveNest (Nest k1 p) g k q = nest_ k1 (aboveNest p g (k - k1) q)
-- p can't be Empty, so no need for mkNest
aboveNest (NilAbove p) g k q = nilAbove_ (aboveNest p g k q)
aboveNest (TextBeside s sl p) g k q = k1 `seq` textBeside_ s sl rest
where
k1 = k - sl
rest = case p of
Empty -> nilAboveNest g k1 q
other -> aboveNest p g k1 q
nilAboveNest :: Bool -> Int -> RDoc -> RDoc
-- Specification: text s <> nilaboveNest g k q
-- = text s <> (text "" $g$ nest k q)
nilAboveNest _ k _ | k `seq` False = undefined
nilAboveNest g k Empty = Empty -- Here's why the "text s <>" is in the spec!
nilAboveNest g k (Nest k1 q) = nilAboveNest g (k + k1) q
nilAboveNest g k q | (not g) && (k > 0) -- No newline if no overlap
= textBeside_ (Str (spaces k)) k q
| otherwise -- Put them really above
= nilAbove_ (mkNest k q)
-- ---------------------------------------------------------------------------
-- Horizontal composition @<>@
beside_ :: Doc -> Bool -> Doc -> Doc
beside_ p _ Empty = p
beside_ Empty _ q = q
beside_ p g q = Beside p g q
p <> q = beside_ p False q
p <+> q = beside_ p True q
beside :: Doc -> Bool -> RDoc -> RDoc
-- Specification: beside g p q = p <g> q
beside NoDoc g q = NoDoc
beside (p1 `Union` p2) g q = (beside p1 g q) `union_` (beside p2 g q)
beside Empty g q = q
beside (Nest k p) g q = nest_ k (beside p g q) -- p non-empty
beside p@(Beside p1 g1 q1) g2 q2
{- (A `op1` B) `op2` C == A `op1` (B `op2` C) iff op1 == op2
[ && (op1 == <> || op1 == <+>) ] -}
| g1 == g2 = beside p1 g1 (beside q1 g2 q2)
| otherwise = beside (reduceDoc p) g2 q2
beside p@(Above _ _ _) g q = beside (reduceDoc p) g q
beside (NilAbove p) g q = nilAbove_ (beside p g q)
beside (TextBeside s sl p) g q = textBeside_ s sl rest
where
rest = case p of
Empty -> nilBeside g q
other -> beside p g q
nilBeside :: Bool -> RDoc -> RDoc
-- Specification: text "" <> nilBeside g p
-- = text "" <g> p
nilBeside g Empty = Empty -- Hence the text "" in the spec
nilBeside g (Nest _ p) = nilBeside g p
nilBeside g p | g = textBeside_ space_text 1 p
| otherwise = p
-- ---------------------------------------------------------------------------
-- Separate, @sep@, Hughes version
-- Specification: sep ps = oneLiner (hsep ps)
-- `union`
-- vcat ps
sep = sepX True -- Separate with spaces
cat = sepX False -- Don't
sepX x [] = empty
sepX x (p:ps) = sep1 x (reduceDoc p) 0 ps
-- Specification: sep1 g k ys = sep (x : map (nest k) ys)
-- = oneLiner (x <g> nest k (hsep ys))
-- `union` x $$ nest k (vcat ys)
sep1 :: Bool -> RDoc -> Int -> [Doc] -> RDoc
sep1 g _ k ys | k `seq` False = undefined
sep1 g NoDoc k ys = NoDoc
sep1 g (p `Union` q) k ys = sep1 g p k ys
`union_`
(aboveNest q False k (reduceDoc (vcat ys)))
sep1 g Empty k ys = mkNest k (sepX g ys)
sep1 g (Nest n p) k ys = nest_ n (sep1 g p (k - n) ys)
sep1 g (NilAbove p) k ys = nilAbove_ (aboveNest p False k (reduceDoc (vcat ys)))
sep1 g (TextBeside s sl p) k ys = textBeside_ s sl (sepNB g p (k - sl) ys)
-- Specification: sepNB p k ys = sep1 (text "" <> p) k ys
-- Called when we have already found some text in the first item
-- We have to eat up nests
sepNB g (Nest _ p) k ys = sepNB g p k ys
sepNB g Empty k ys = oneLiner (nilBeside g (reduceDoc rest))
`mkUnion`
nilAboveNest False k (reduceDoc (vcat ys))
where
rest | g = hsep ys
| otherwise = hcat ys
sepNB g p k ys = sep1 g p k ys
-- ---------------------------------------------------------------------------
-- @fill@
fsep = fill True
fcat = fill False
-- Specification:
-- fill [] = empty
-- fill [p] = p
-- fill (p1:p2:ps) = oneLiner p1 <#> nest (length p1)
-- (fill (oneLiner p2 : ps))
-- `union`
-- p1 $$ fill ps
fill g [] = empty
fill g (p:ps) = fill1 g (reduceDoc p) 0 ps
fill1 :: Bool -> RDoc -> Int -> [Doc] -> Doc
fill1 g _ k ys | k `seq` False = undefined
fill1 g NoDoc k ys = NoDoc
fill1 g (p `Union` q) k ys = fill1 g p k ys
`union_`
(aboveNest q False k (fill g ys))
fill1 g Empty k ys = mkNest k (fill g ys)
fill1 g (Nest n p) k ys = nest_ n (fill1 g p (k - n) ys)
fill1 g (NilAbove p) k ys = nilAbove_ (aboveNest p False k (fill g ys))
fill1 g (TextBeside s sl p) k ys = textBeside_ s sl (fillNB g p (k - sl) ys)
fillNB g _ k ys | k `seq` False = undefined
fillNB g (Nest _ p) k ys = fillNB g p k ys
fillNB g Empty k [] = Empty
fillNB g Empty k (y:ys) = nilBeside g (fill1 g (oneLiner (reduceDoc y)) k1 ys)
`mkUnion`
nilAboveNest False k (fill g (y:ys))
where
k1 | g = k - 1
| otherwise = k
fillNB g p k ys = fill1 g p k ys
-- ---------------------------------------------------------------------------
-- Selecting the best layout
best :: Mode
-> Int -- Line length
-> Int -- Ribbon length
-> RDoc
-> RDoc -- No unions in here!
best OneLineMode w r p
= get p
where
get Empty = Empty
get NoDoc = NoDoc
get (NilAbove p) = nilAbove_ (get p)
get (TextBeside s sl p) = textBeside_ s sl (get p)
get (Nest k p) = get p -- Elide nest
get (p `Union` q) = first (get p) (get q)
best mode w r p
= get w p
where
get :: Int -- (Remaining) width of line
-> Doc -> Doc
get w _ | w==0 && False = undefined
get w Empty = Empty
get w NoDoc = NoDoc
get w (NilAbove p) = nilAbove_ (get w p)
get w (TextBeside s sl p) = textBeside_ s sl (get1 w sl p)
get w (Nest k p) = nest_ k (get (w - k) p)
get w (p `Union` q) = nicest w r (get w p) (get w q)
get1 :: Int -- (Remaining) width of line
-> Int -- Amount of first line already eaten up
-> Doc -- This is an argument to TextBeside => eat Nests
-> Doc -- No unions in here!
get1 w _ _ | w==0 && False = undefined
get1 w sl Empty = Empty
get1 w sl NoDoc = NoDoc
get1 w sl (NilAbove p) = nilAbove_ (get (w - sl) p)
get1 w sl (TextBeside t tl p) = textBeside_ t tl (get1 w (sl + tl) p)
get1 w sl (Nest k p) = get1 w sl p
get1 w sl (p `Union` q) = nicest1 w r sl (get1 w sl p)
(get1 w sl q)
nicest w r p q = nicest1 w r 0 p q
nicest1 w r sl p q | fits ((w `minn` r) - sl) p = p
| otherwise = q
fits :: Int -- Space available
-> Doc
-> Bool -- True if *first line* of Doc fits in space available
fits n p | n < 0 = False
fits n NoDoc = False
fits n Empty = True
fits n (NilAbove _) = True
fits n (TextBeside _ sl p) = fits (n - sl) p
minn x y | x < y = x
| otherwise = y
-- @first@ and @nonEmptySet@ are similar to @nicest@ and @fits@, only simpler.
-- @first@ returns its first argument if it is non-empty, otherwise its second.
first p q | nonEmptySet p = p
| otherwise = q
nonEmptySet NoDoc = False
nonEmptySet (p `Union` q) = True
nonEmptySet Empty = True
nonEmptySet (NilAbove p) = True -- NoDoc always in first line
nonEmptySet (TextBeside _ _ p) = nonEmptySet p
nonEmptySet (Nest _ p) = nonEmptySet p
-- @oneLiner@ returns the one-line members of the given set of @Doc@s.
oneLiner :: Doc -> Doc
oneLiner NoDoc = NoDoc
oneLiner Empty = Empty
oneLiner (NilAbove p) = NoDoc
oneLiner (TextBeside s sl p) = textBeside_ s sl (oneLiner p)
oneLiner (Nest k p) = nest_ k (oneLiner p)
oneLiner (p `Union` q) = oneLiner p
-- ---------------------------------------------------------------------------
-- Displaying the best layout
renderStyle style doc
= fullRender (mode style)
(lineLength style)
(ribbonsPerLine style)
string_txt
""
doc
render doc = showDoc doc ""
showDoc doc rest = fullRender PageMode 100 1.5 string_txt rest doc
string_txt (Chr c) s = c:s
string_txt (Str s1) s2 = s1 ++ s2
string_txt (PStr s1) s2 = s1 ++ s2
fullRender OneLineMode _ _ txt end doc = easy_display space_text txt end (reduceDoc doc)
fullRender LeftMode _ _ txt end doc = easy_display nl_text txt end (reduceDoc doc)
fullRender mode line_length ribbons_per_line txt end doc
= display mode line_length ribbon_length txt end best_doc
where
best_doc = best mode hacked_line_length ribbon_length (reduceDoc doc)
hacked_line_length, ribbon_length :: Int
ribbon_length = round (fromIntegral line_length / ribbons_per_line)
hacked_line_length = case mode of { ZigZagMode -> maxBound; other -> line_length }
display mode page_width ribbon_width txt end doc
= case page_width - ribbon_width of { gap_width ->
case gap_width `quot` 2 of { shift ->
let
lay k _ | k `seq` False = undefined
lay k (Nest k1 p) = lay (k + k1) p
lay k Empty = end
lay k (NilAbove p) = nl_text `txt` lay k p
lay k (TextBeside s sl p)
= case mode of
ZigZagMode | k >= gap_width
-> nl_text `txt` (
Str (multi_ch shift '/') `txt` (
nl_text `txt` (
lay1 (k - shift) s sl p)))
| k < 0
-> nl_text `txt` (
Str (multi_ch shift '\\') `txt` (
nl_text `txt` (
lay1 (k + shift) s sl p )))
other -> lay1 k s sl p
lay1 k _ sl _ | k+sl `seq` False = undefined
lay1 k s sl p = Str (indent k) `txt` (s `txt` lay2 (k + sl) p)
lay2 k _ | k `seq` False = undefined
lay2 k (NilAbove p) = nl_text `txt` lay k p
lay2 k (TextBeside s sl p) = s `txt` (lay2 (k + sl) p)
lay2 k (Nest _ p) = lay2 k p
lay2 k Empty = end
in
lay 0 doc
}}
cant_fail = error "easy_display: NoDoc"
easy_display nl_text txt end doc
= lay doc cant_fail
where
lay NoDoc no_doc = no_doc
lay (Union p q) no_doc = {- lay p -} (lay q cant_fail) -- Second arg can't be NoDoc
lay (Nest k p) no_doc = lay p no_doc
lay Empty no_doc = end
lay (NilAbove p) no_doc = nl_text `txt` lay p cant_fail -- NoDoc always on first line
lay (TextBeside s sl p) no_doc = s `txt` lay p no_doc
-- OLD version: we shouldn't rely on tabs being 8 columns apart in the output.
-- indent n | n >= 8 = '\t' : indent (n - 8)
-- | otherwise = spaces n
indent n = spaces n
multi_ch 0 ch = ""
multi_ch n ch = ch : multi_ch (n - 1) ch
-- (spaces n) generates a list of n spaces
--
-- It should never be called with 'n' < 0, but that can happen for reasons I don't understand
-- Here's a test case:
-- ncat x y = nest 4 $ cat [ x, y ]
-- d1 = foldl1 ncat $ take 50 $ repeat $ char 'a'
-- d2 = parens $ sep [ d1, text "+" , d1 ]
-- main = print d2
-- I don't feel motivated enough to find the Real Bug, so meanwhile we just test for n<=0
spaces n | n <= 0 = ""
| otherwise = ' ' : spaces (n - 1)
{- Comments from Johannes Waldmann about what the problem might be:
In the example above, d2 and d1 are deeply nested, but `text "+"' is not,
so the layout function tries to "out-dent" it.
when I look at the Doc values that are generated, there are lots of
Nest constructors with negative arguments. see this sample output of
d1 (obtained with hugs, :s -u)
tBeside (TextDetails_Chr 'a') 1 Doc_Empty) (Doc_NilAbove (Doc_Nest
(-241) (Doc_TextBeside (TextDetails_Chr 'a') 1 Doc_Empty)))))
(Doc_NilAbove (Doc_Nest (-236) (Doc_TextBeside (TextDetails_Chr 'a') 1
(Doc_NilAbove (Doc_Nest (-5) (Doc_TextBeside (TextDetails_Chr 'a') 1
Doc_Empty)))))))) (Doc_NilAbove (Doc_Nest (-231) (Doc_TextBeside
(TextDetails_Chr 'a') 1 (Doc_NilAbove (Doc_Nest (-5) (Doc_TextBeside
(TextDetails_Chr 'a') 1 (Doc_NilAbove (Doc_Nest (-5) (Doc_TextBeside
(TextDetails_Chr 'a') 1 Doc_Empty))))))))))) (Doc_NilAbove (Doc_Nest
-}
|
lwchkg/sunlight-x
|
test/code-snippets/haskell.hs
|
Haskell
|
apache-2.0
| 34,428
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
module Openshift.V1.SecurityContext where
import GHC.Generics
import Openshift.V1.Capabilities
import Openshift.V1.SELinuxOptions
import qualified Data.Aeson
-- | SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.
data SecurityContext = SecurityContext
{ capabilities :: Maybe Capabilities -- ^ The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
, privileged :: Maybe Bool -- ^ Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
, seLinuxOptions :: Maybe SELinuxOptions -- ^ The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
, runAsUser :: Maybe Integer -- ^ The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
, runAsNonRoot :: Maybe Bool -- ^ Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
} deriving (Show, Eq, Generic)
instance Data.Aeson.FromJSON SecurityContext
instance Data.Aeson.ToJSON SecurityContext
|
minhdoboi/deprecated-openshift-haskell-api
|
openshift/lib/Openshift/V1/SecurityContext.hs
|
Haskell
|
apache-2.0
| 2,184
|
module Ticket75 where
data a :- b = Q
-- | A reference to ':-'
f :: Int
f = undefined
|
nominolo/haddock2
|
tests/golden-tests/tests/Ticket75.hs
|
Haskell
|
bsd-2-clause
| 88
|
{-Joseph Eremondi UU# 4229924
Utrecht University, APA 2015
Project one: dataflow analysis
March 17, 2015 -}
{-# LANGUAGE RecordWildCards #-}
{-|
General framework for constructing lattices and finding fixpoints
of monotone functions.
|-}
module Optimize.MonotoneFramework (
AnalysisDirection(..),
ProgramInfo(..),
Lattice(..),
joinAll,
minFP,
printGraph
)where
import qualified Data.HashMap.Strict as Map
import qualified Data.Graph.Inductive.Graph as Graph
import qualified Data.Graph.Inductive.PatriciaTree as Gr
import qualified Data.GraphViz as Viz
import qualified Data.GraphViz.Attributes.Complete as VA
import Data.GraphViz.Printing (renderDot)
import Data.List (foldl')
import Data.Hashable
import Data.Text.Lazy (pack, unpack)
newtype FlowEdge label = FlowEdge (label, label)
data AnalysisDirection = ForwardAnalysis | BackwardAnalysis
data ProgramInfo label = ProgramInfo {
edgeMap :: label -> [label],
--labelRange :: (label, label),
allLabels :: [label],
labelPairs :: [(label, label)],
isExtremal :: label -> Bool
}
{-|
Useful for debugging. Prints the graphviz string to render
a representation of a control-flow graph
|-}
printGraph
:: (Ord label)
=> (label -> Int)
-> (label -> String)
-> ProgramInfo label
-> String
printGraph intMap strMap pInfo =
let
nodes = map (\n -> (intMap n, strMap n)) $ allLabels pInfo
grWithNodes = (Graph.insNodes nodes Graph.empty) :: (Gr.Gr String ())
edges = map (\(n1, n2) -> (intMap n1, intMap n2, () ) ) (labelPairs pInfo)
theGraph = (Graph.insEdges edges grWithNodes) :: (Gr.Gr String () )
defaultParams = Viz.defaultParams :: (Viz.GraphvizParams Graph.Node String () () String )
ourParams = defaultParams {Viz.fmtNode = \(_,s) -> [VA.Label $ VA.StrLabel $ pack s]}
in unpack $ renderDot $ Viz.toDot $ Viz.graphToDot ourParams theGraph
{-|
Either reverse and edge, or don't, depending on whether we are doing
forwards or backwards analysis
|-}
getFlowEdge :: AnalysisDirection -> (label,label) -> FlowEdge label
getFlowEdge ForwardAnalysis e = FlowEdge e
getFlowEdge BackwardAnalysis (l1, l2) = FlowEdge (l2, l1)
{-|
Abstract type representing a lattice and the operations that can
be performed on it.
|-}
data Lattice a = Lattice {
--latticeTop :: a
latticeBottom :: a,
latticeJoin :: a -> a -> a,
iota :: a, --Extremal value for our analysis
lleq :: a -> a -> Bool,
flowDirection :: AnalysisDirection
}
{-|
Iteratively join all the lattice elements in a list
|-}
joinAll :: (Lattice a) -> [a] -> a
joinAll Lattice{..} = foldl' latticeJoin latticeBottom
{-|
Given a Lattice,
a transfer which takes current stored values, a block label, and a payload
and produces a new payload,
and the flow information for our program,
generate the dictionaries representing the open and closed fix-points
of the given transfer function.
|-}
minFP :: (Hashable label, Eq label, Show label, Show payload) =>
Lattice payload
-> (Map.HashMap label payload -> label -> payload -> payload)
-> ProgramInfo label
-> (Map.HashMap label payload, Map.HashMap label payload)
minFP lat@(Lattice{..}) f info = (mfpOpen, mfpClosed)
where
mfpClosed = Map.mapWithKey (f mfpOpen) mfpOpen
--stResult :: ST s [(label, payload)]
initialSolns = foldr (\l solnsSoFar ->
if isExtremal info l
then Map.insert l iota solnsSoFar
else Map.insert l latticeBottom solnsSoFar
) Map.empty (allLabels info)
mfpOpen = iterateSolns initialSolns (labelPairs info)
iterateSolns currentSolns [] = currentSolns
iterateSolns currentSolns (cfgEdge:rest) = let
flowEdge = getFlowEdge flowDirection cfgEdge
(FlowEdge (l,l')) = flowEdge
al = currentSolns Map.! l
al' = currentSolns Map.! l'
fal = f currentSolns l al
(newPairs, newSolns) =
if ( not $ fal `lleq` al')
then let
theMap = Map.insert l' (latticeJoin fal al') currentSolns
thePairs = map (\lNeighbour -> (l', lNeighbour) ) $ edgeMap info l'
in (thePairs, theMap)
else ([], currentSolns)
in iterateSolns newSolns (newPairs ++ rest)
|
JoeyEremondi/utrecht-apa-p1
|
src/Optimize/MonotoneFramework.hs
|
Haskell
|
bsd-3-clause
| 4,433
|
{-# LANGUAGE NoMonomorphismRestriction,
ScopedTypeVariables#-}
module Scan where
import Obsidian
import Data.Word
import Data.Bits
import Control.Monad
import Prelude hiding (map,zipWith,zip,sum,replicate,take,drop,iterate,last)
import qualified Prelude as P
---------------------------------------------------------------------------
--
---------------------------------------------------------------------------
---------------------------------------------------------------------------
-- Kernel1 (Thread acceses element tid and tid+1
---------------------------------------------------------------------------
-- Kernel1 is just a reduction!
kernel1 :: Storable a
=> (a -> a -> a)
-> SPull a
-> BProgram (SPush Block a)
kernel1 f arr
| len arr == 1 = return (push arr)
| otherwise =
do
let (a1,a2) = evenOdds arr
arr' <- forcePull (zipWith f a1 a2)
kernel1 f arr'
mapKernel1 :: Storable a => (a -> a -> a) -> DPull (SPull a) -> DPush Grid a
mapKernel1 f arr = pConcat (fmap body arr)
where
body arr = runPush (kernel1 f arr)
---------------------------------------------------------------------------
-- Sklansky
---------------------------------------------------------------------------
sklansky :: (Choice a, Storable a)
=> Int
-> (a -> a -> a)
-> Pull Word32 a
-> Program Block (Push Block Word32 a)
sklansky 0 op arr = return (push arr)
sklansky n op arr =
do
let arr1 = binSplit (n-1) (fan op) arr
arr2 <- forcePull arr1
sklansky (n-1) op arr2
fan :: Choice a
=> (a -> a -> a)
-> SPull a
-> SPull a
fan op arr = a1 `append` fmap (op c) a2
where
(a1,a2) = halve arr
c = a1 ! fromIntegral (len a1 - 1)
pushM = liftM push
mapScan1 :: (Choice a, Storable a) => Int -> (a -> a -> a) -> DPull (SPull a) -> DPush Grid a
mapScan1 n f arr = pConcat (fmap body arr)
where
body arr = runPush (sklansky n f arr)
---------------------------------------------------------------------------
-- Pushy phases for Sklansky
---------------------------------------------------------------------------
phase :: Int
-> (a -> a -> a)
-> Pull Word32 a
-> Push Block Word32 a
phase i f arr =
mkPush l (\wf -> forAll sl2 (\tid ->
do
let ix1 = insertZero i tid
ix2 = flipBit i ix1
ix3 = zeroBits i ix2 - 1
wf (arr ! ix1) ix1
wf (f (arr ! ix3) (arr ! ix2) ) ix2))
where
l = len arr
l2 = l `div` 2
sl2 = fromIntegral l2
sklansky2 :: Storable a
=> Int
-> (a -> a -> a)
-> Pull Word32 a
-> Program Block (Push Block Word32 a)
sklansky2 l f = compose [phase i f | i <- [0..(l-1)]]
compose :: Storable a
=> [Pull Word32 a -> Push Block Word32 a]
-> Pull Word32 a
-> Program Block (Push Block Word32 a)
compose [f] arr = return (f arr)
compose (f:fs) arr =
do
let arr1 = f arr
arr2 <- force arr1
compose fs arr2
insertZero :: Int -> Exp Word32 -> Exp Word32
insertZero 0 a = a `shiftL` 1
insertZero i a = a + zeroBits i a
zeroBits :: Int -> EWord32 -> EWord32
zeroBits i a = a .&. fromIntegral (complement (oneBits i :: Word32))
flipBit :: (Num a, Bits a) => Int -> a -> a
flipBit i = (`xor` (1 `shiftL` i))
oneBits :: (Num a, Bits a) => Int -> a
oneBits i = (2^i) - 1
mapScan2 :: (Choice a, Storable a) => Int -> (a -> a -> a) -> DPull (SPull a) -> DPush Grid a
mapScan2 n f arr = pConcat $ fmap body arr -- sklansky2 n f
where
body arr = runPush (sklansky2 n f arr)
-- getScan2 n = namedPrint ("scanB" ++ show (2^n)) (mapScan2 n (+) . splitUp (2^n)) (input :- ())
----------------------------------------------------------------------------
-- TWEAK LOADS
----------------------------------------------------------------------------
sklansky3 :: Storable a
=> Int
-> (a -> a -> a)
-> Pull Word32 a
-> Program Block (Push Block Word32 a)
sklansky3 l f arr =
do
im <- force (load 2 arr)
compose [phase i f | i <- [0..(l-1)]] im
mapScan3 :: (Choice a, Storable a) => Int -> (a -> a -> a) -> DPull (SPull a) -> DPush Grid a
mapScan3 n f arr = pConcat (fmap body arr)
where
body arr = runPush (sklansky3 n f arr)
--getScan3 n = namedPrint ("scanC" ++ show (2^n)) (mapScan3 n (+) . splitUp (2^n)) (input :- ())
|
svenssonjoel/ObsidianGFX
|
Examples/ScanBench/Scan.hs
|
Haskell
|
bsd-3-clause
| 4,464
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.