code
stringlengths 5
1.03M
| repo_name
stringlengths 5
90
| path
stringlengths 4
158
| license
stringclasses 15
values | size
int64 5
1.03M
| n_ast_errors
int64 0
53.9k
| ast_max_depth
int64 2
4.17k
| n_whitespaces
int64 0
365k
| n_ast_nodes
int64 3
317k
| n_ast_terminals
int64 1
171k
| n_ast_nonterminals
int64 1
146k
| loc
int64 -1
37.3k
| cycloplexity
int64 -1
1.31k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|
module Unison.Util.Map
( unionWithM
) where
import qualified Control.Monad as Monad
import qualified Data.Map as Map
import Unison.Prelude
unionWithM :: forall m k a.
(Monad m, Ord k) => (a -> a -> m a) -> Map k a -> Map k a -> m (Map k a)
unionWithM f m1 m2 = Monad.foldM go m1 $ Map.toList m2 where
go :: Map k a -> (k, a) -> m (Map k a)
go m1 (k, a2) = case Map.lookup k m1 of
Just a1 -> do a <- f a1 a2; pure $ Map.insert k a m1
Nothing -> pure $ Map.insert k a2 m1
|
unisonweb/platform
|
parser-typechecker/src/Unison/Util/Map.hs
|
mit
| 491
| 0
| 14
| 127
| 256
| 133
| 123
| -1
| -1
|
-- file ch02/myDrop.hs
-- From chapter 2, http://book.realworldhaskell.org/read/types-and-functions.html
myDrop n xs = if n <= 0 || null xs
then xs
else myDrop (n - 1) (tail xs)
|
Sgoettschkes/learning
|
haskell/RealWorldHaskell/ch02/myDrop.hs
|
mit
| 187
| 0
| 8
| 37
| 50
| 26
| 24
| 3
| 2
|
pent n = n * (3*n-1) `div` 2
pentn = take 10000 $ map pent [1..]
intPairsSum s = [ (i, j) | i <- [1..(s-1)], let j=s-i, j>i ]
intPairs = concat $ map intPairsSum [1..]
absPentn = [ (n1, n2, n2-n1) | (i,j) <- intPairs, let n1 = pent i, let n2 = pent j, isPentagonal (n2-n1), isPentagonal (n1+n2)]
main = print absPentn
isPerfectSquare :: Integer -> Bool
isPerfectSquare n = (round $ sqrt $ fromIntegral n)^2 == n
isPentagonal :: Integer -> Bool
isPentagonal n = isPerfectSquare discriminant && (1 + (round $ sqrt $ fromIntegral discriminant)) `mod` 6 == 0
where discriminant = 1 + 24*n
|
arekfu/project_euler
|
p0044/p0044.hs
|
mit
| 600
| 0
| 12
| 125
| 332
| 175
| 157
| 11
| 1
|
module AnsibleModules.Apt where
import Data.Monoid
import Data.Sansible
import Data.Sansible.Playbook
import qualified Data.Aeson.TH as A
import qualified Data.Text as T
data State = Latest | Absent | Present
$(A.deriveToJSON encodingOptions ''State)
data Upgrade = Yes | Safe | Full | Dist
$(A.deriveToJSON encodingOptions ''Upgrade)
data Apt = Apt
{ name :: Maybe T.Text
, state :: Maybe State
, updateCache :: Maybe Bool
, cacheValidTime :: Maybe Int
, purge :: Maybe Bool
, defaultRelease :: Maybe T.Text
, installRecommends :: Maybe Bool
, force :: Maybe Bool
, upgrade :: Maybe Upgrade
, dpkgOptions :: Maybe T.Text
, deb :: Maybe FilePath
}
defaultApt :: Apt
defaultApt = Apt
{ name = Nothing
, state = Nothing
, updateCache = Nothing
, cacheValidTime = Nothing
, purge = Nothing
, defaultRelease = Nothing
, installRecommends = Nothing
, force = Nothing
, upgrade = Nothing
, dpkgOptions = Nothing
, deb = Nothing
}
instance ModuleCall Apt where
moduleLabel _ = "apt"
$(A.deriveToJSON encodingOptions ''Apt)
oneDay :: Int
oneDay = 86400
-- | Install an apt package
aptInstall :: T.Text -> CompiledModuleCall
aptInstall pkg = compile $
defaultApt { name = Just pkg
, cacheValidTime = Just oneDay
, updateCache = Just False
}
aptInstallTask :: T.Text -> Task
aptInstallTask p =
task ("Install apt package " <> p) (aptInstall p)
-- | Update apt
aptUpdate :: CompiledModuleCall
aptUpdate = compile $
defaultApt { cacheValidTime = Just oneDay
, updateCache = Just True
}
aptUpdateTask :: Task
aptUpdateTask =
task "Run apt-get update" aptUpdate
forceAptUpdateTask :: Task
forceAptUpdateTask =
task "Run apt-get update" $ compile $ defaultApt { cacheValidTime = Nothing
, updateCache = Just True
}
-- | Install a Debian package from file
aptDebInstall :: FilePath -> CompiledModuleCall
aptDebInstall path = compile $
defaultApt { deb = Just path }
aptDebInstallTask :: FilePath -> Task
aptDebInstallTask p =
task ("Install deb package " <> T.pack p) (aptDebInstall p)
|
ostapneko/sansible
|
src/AnsibleModules/Apt.hs
|
mit
| 2,610
| 0
| 10
| 960
| 585
| 329
| 256
| -1
| -1
|
{- |
module: $Header$
description: Higher order logic terms
license: MIT
maintainer: Joe Leslie-Hurd <joe@gilith.com>
stability: provisional
portability: portable
-}
module HOL.Term
where
import Data.Maybe (isJust)
import qualified Data.Map.Strict as Map
import qualified Data.Set as Set
import System.IO.Unsafe (unsafePerformIO)
import System.Mem.StableName (makeStableName)
import qualified HOL.Const as Const
import HOL.Data
import HOL.Name
import qualified HOL.TermData as TermData
import qualified HOL.Type as Type
import qualified HOL.TypeVar as TypeVar
import HOL.Util
import qualified HOL.Var as Var
-------------------------------------------------------------------------------
-- Constructors and destructors
-------------------------------------------------------------------------------
dest :: Term -> TermData
dest (Term d _ _ _ _ _) = d
mk :: TermData -> Term
mk d =
Term d i sz ty tvs fvs
where
i = unsafePerformIO (makeStableName $! d)
sz = TermData.size d
ty = TermData.typeOf d
tvs = TypeVar.vars d
fvs = Var.free d
-- Constants
mkConst :: Const -> Type -> Term
mkConst c = mk . TermData.mkConst c
destConst :: Term -> Maybe (Const,Type)
destConst = TermData.destConst . dest
isConst :: Term -> Bool
isConst = isJust . destConst
destGivenConst :: Const -> Term -> Maybe Type
destGivenConst c = TermData.destGivenConst c . dest
isGivenConst :: Const -> Term -> Bool
isGivenConst c = isJust . destGivenConst c
-- Variables
mkVar :: Var -> Term
mkVar = mk . TermData.mkVar
destVar :: Term -> Maybe Var
destVar = TermData.destVar . dest
isVar :: Term -> Bool
isVar = isJust . destVar
eqVar :: Var -> Term -> Bool
eqVar v = TermData.eqVar v . dest
-- Function application
mkApp :: Term -> Term -> Maybe Term
mkApp f x = fmap mk $ TermData.mkApp f x
mkAppUnsafe :: Term -> Term -> Term
mkAppUnsafe = mkUnsafe2 "HOL.Term.mkApp" mkApp
destApp :: Term -> Maybe (Term,Term)
destApp = TermData.destApp . dest
isApp :: Term -> Bool
isApp = isJust . destApp
rator :: Term -> Maybe Term
rator = fmap fst . destApp
rand :: Term -> Maybe Term
rand = fmap snd . destApp
land :: Term -> Maybe Term
land tm = do
f <- rator tm
rand f
listMkApp :: Term -> [Term] -> Maybe Term
listMkApp tm [] = Just tm
listMkApp f (x : xs) = do
fx <- mkApp f x
listMkApp fx xs
listMkAppUnsafe :: Term -> [Term] -> Term
listMkAppUnsafe = mkUnsafe2 "HOL.Term.listMkApp" listMkApp
stripApp :: Term -> (Term,[Term])
stripApp =
go []
where
go xs tm =
case destApp tm of
Nothing -> (tm,xs)
Just (f,x) -> go (x : xs) f
-- Lambda abstraction
mkAbs :: Var -> Term -> Term
mkAbs v b = mk $ TermData.mkAbs v b
destAbs :: Term -> Maybe (Var,Term)
destAbs = TermData.destAbs . dest
isAbs :: Term -> Bool
isAbs = isJust . destAbs
listMkAbs :: [Var] -> Term -> Term
listMkAbs [] tm = tm
listMkAbs (v : vs) b = mkAbs v $ listMkAbs vs b
stripAbs :: Term -> ([Var],Term)
stripAbs tm =
case destAbs tm of
Nothing -> ([],tm)
Just (v,t) -> (v : vs, b) where (vs,b) = stripAbs t
-------------------------------------------------------------------------------
-- Size is measured as the number of TermData constructors
-------------------------------------------------------------------------------
size :: Term -> Size
size (Term _ _ s _ _ _) = s
-------------------------------------------------------------------------------
-- The type of a (well-formed) term
-------------------------------------------------------------------------------
typeOf :: Term -> Type
typeOf (Term _ _ _ ty _ _) = ty
isBool :: Term -> Bool
isBool = Type.isBool . typeOf
sameType :: Term -> Term -> Bool
sameType tm1 tm2 = typeOf tm1 == typeOf tm2
sameTypeVar :: Var -> Term -> Bool
sameTypeVar v tm = Var.typeOf v == typeOf tm
-------------------------------------------------------------------------------
-- Free variables in terms
-------------------------------------------------------------------------------
freeInMultiple :: Var -> Term -> Bool
freeInMultiple v = TermData.freeInMultiple v . dest
freeInOnce :: Var -> Term -> Bool
freeInOnce v tm = Var.freeIn v tm && not (freeInMultiple v tm)
-------------------------------------------------------------------------------
-- A total order on terms modulo alpha-equivalence
-------------------------------------------------------------------------------
alphaCompare :: Term -> Term -> Ordering
alphaCompare =
tcmp 0 True bvEmpty bvEmpty
where
bvEmpty :: Map.Map Var Int
bvEmpty = Map.empty
tcmp n bvEq bv1 bv2 tm1 tm2 =
if bvEq && iEq tm1 tm2 then EQ
else case compare (size tm1) (size tm2) of
LT -> LT
EQ -> dcmp n bvEq bv1 bv2 (dest tm1) (dest tm2)
GT -> GT
iEq (Term _ i1 _ _ _ _) (Term _ i2 _ _ _ _) = i1 == i2
dcmp _ _ bv1 bv2 (VarTerm v1) (VarTerm v2) =
case (Map.lookup v1 bv1, Map.lookup v2 bv2) of
(Nothing,Nothing) -> compare v1 v2
(Just _, Nothing) -> LT
(Nothing, Just _) -> GT
(Just i1, Just i2) -> compare i1 i2
dcmp n bvEq bv1 bv2 (AbsTerm v1 b1) (AbsTerm v2 b2) =
case compare ty1 ty2 of
LT -> LT
EQ -> tcmp n' bvEq' bv1' bv2' b1 b2
GT -> GT
where
(n1,ty1) = Var.dest v1
(n2,ty2) = Var.dest v2
n' = n + 1
bvEq' = bvEq && n1 == n2
bv1' = Map.insert v1 n bv1
bv2' = if bvEq' then bv1' else Map.insert v2 n bv2
dcmp n bvEq bv1 bv2 (AppTerm f1 x1) (AppTerm f2 x2) =
case tcmp n bvEq bv1 bv2 f1 f2 of
LT -> LT
EQ -> tcmp n bvEq bv1 bv2 x1 x2
GT -> GT
dcmp _ _ _ _ d1 d2 = compare d1 d2
alphaEqual :: Term -> Term -> Bool
alphaEqual tm1 tm2 = alphaCompare tm1 tm2 == EQ
-------------------------------------------------------------------------------
-- Rename all bound variables to fresh names
-------------------------------------------------------------------------------
renameFresh :: Term -> Term
renameFresh = rename
where
rename tm = fst $ renameTerm bvs tm ns
where
bvs = Map.empty
ns = filter (flip Set.notMember avoid) freshSupply
avoid = Set.map Var.name $ Var.free tm
renameTerm bvs tm ns = renameData bvs (dest tm) ns
renameData _ (ConstTerm c ty) ns = (mkConst c ty, ns)
renameData bvs (VarTerm v) ns = (renameVar bvs v, ns)
renameData bvs (AppTerm f x) ns = (mkAppUnsafe f' x', ns'')
where
(f',ns') = renameTerm bvs f ns
(x',ns'') = renameTerm bvs x ns'
renameData bvs (AbsTerm v b) ns = (mkAbs v' b', ns'')
where
(v',ns') = case ns of
[] -> error "exhausted supply"
n : l -> (Var.mk n (Var.typeOf v), l)
bvs' = Map.insert v (mkVar v') bvs
(b',ns'') = renameTerm bvs' b ns'
renameVar bvs v = case Map.lookup v bvs of
Just tm -> tm
Nothing -> mkVar v
-------------------------------------------------------------------------------
-- Primitive constants
-------------------------------------------------------------------------------
-- Equality
mkEqConst :: Type -> Term
mkEqConst a = mkConst Const.eq $ Type.mkEq a
destEqConst :: Term -> Maybe Type
destEqConst tm = do
ty <- destGivenConst Const.eq tm
Type.destEq ty
isEqConst :: Term -> Bool
isEqConst = isJust . destEqConst
mkEq :: Term -> Term -> Maybe Term
mkEq l r = listMkApp c [l,r] where c = mkEqConst (typeOf l)
mkEqUnsafe :: Term -> Term -> Term
mkEqUnsafe = mkUnsafe2 "HOL.Term.mkEq" mkEq
destEq :: Term -> Maybe (Term,Term)
destEq tm = do
(el,r) <- destApp tm
(e,l) <- destApp el
if isEqConst e then Just (l,r) else Nothing
isEq :: Term -> Bool
isEq = isJust . destEq
lhs :: Term -> Maybe Term
lhs = fmap fst . destEq
rhs :: Term -> Maybe Term
rhs = fmap snd . destEq
rhsUnsafe :: Term -> Term
rhsUnsafe = mkUnsafe1 "HOL.Term.rhs" rhs
mkRefl :: Term -> Term
mkRefl tm = mkEqUnsafe tm tm
destRefl :: Term -> Maybe Term
destRefl tm = do
(l,r) <- destEq tm
if l == r then Just l else Nothing
isRefl :: Term -> Bool
isRefl = isJust . destRefl
-- Hilbert's choice operator
mkSelectConst :: Type -> Term
mkSelectConst a = mkConst Const.select $ Type.mkSelect a
destSelectConst :: Term -> Maybe Type
destSelectConst tm = do
ty <- destGivenConst Const.select tm
Type.destSelect ty
isSelectConst :: Term -> Bool
isSelectConst = isJust . destSelectConst
mkSelect :: Var -> Term -> Term
mkSelect v b =
mkAppUnsafe c (mkAbs v b)
where
c = mkSelectConst $ Var.typeOf v
destSelect :: Term -> Maybe (Var,Term)
destSelect tm = do
(c,vb) <- destApp tm
if isSelectConst c then destAbs vb else Nothing
isSelect :: Term -> Bool
isSelect = isJust . destSelect
|
gilith/hol
|
src/HOL/Term.hs
|
mit
| 8,839
| 0
| 16
| 2,065
| 2,999
| 1,559
| 1,440
| 209
| 15
|
module Config.Load
( loadConfig
, ConfigLocation(..)
) where
import Control.Applicative
import Data.Aeson
import qualified Data.ByteString.Char8 as BS
import Network.AWS.AWSConnection
import Network.AWS.S3Object
import System.Environment
data ConfigLocation =
ConfigFile FilePath
| ConfigS3 Bucket Key
type Bucket = String
type Key = String
loadConfig :: FromJSON a => ConfigLocation -> IO a
loadConfig (ConfigFile path) = do
decoded <- eitherDecodeStrict <$> BS.readFile path
case decoded of
Right config -> pure config
Left e -> error $ "Unable to load config from file"
++ path
++ ": "
++ e
loadConfig (ConfigS3 bucket key) = do
conn <- connection
result <- getObject conn $ S3Object {
obj_bucket = bucket
, obj_name = key
, content_type = "application/json"
, obj_headers = []
, obj_data = ""
}
case result of
Left reqError -> error $ show reqError
Right obj ->
case eitherDecode $ obj_data obj of
Right config -> pure config
Left e -> error $ "Unable to load config from file"
++ "s3://" ++ bucket ++ "/" ++ key
++ ": "
++ e
connection :: IO AWSConnection
connection = do
accessKeyId <- getEnv "CONFIG_ACCESS_KEY_ID"
secretAccessKey <- getEnv "CONFIG_SECRET_ACCESS_KEY"
pure $ AWSConnection {
awsHost = "s3.amazonaws.com"
, awsPort = 80
, awsAccessKey = accessKeyId
, awsSecretKey = secretAccessKey
}
|
flipstone/glados
|
src/Config/Load.hs
|
mit
| 1,604
| 0
| 19
| 503
| 404
| 212
| 192
| 49
| 4
|
{-|
Module : Main
Description : Main module for 'breadu-exe' program
Stability : experimental
Portability : POSIX
Main module for 'breadu-exe' program. This program is very simple,
it just uses 'breadu' library to run a server.
-}
module Main where
-- This space after 'import' keyword is for 'qualified' word.
-- So if some module(s) is qualified-imported, modules keep vertical aligned by names.
-- Good practice: specify imported types/functions explicitly, it'll help to project's maintainers.
import CLI ( optionsParser, makeSureOptionsAreValid )
import BreadU ( startBreadU )
-- Special autogenerated module with metadata obtained from .cabal-file.
import Paths_breadu ( version )
import Data.Version ( showVersion )
import Options.Applicative.Simple ( simpleOptions, empty )
-- | Main function, app's entry point.
main :: IO ()
main =
simpleOptions (showVersion version)
"Bread Unit calculator for diabetics"
"Runs server, listen defined port at localhost"
optionsParser
empty
>>= makeSureOptionsAreValid
>>= startBreadU
|
denisshevchenko/breadu.info
|
src/app/Main.hs
|
mit
| 1,266
| 0
| 9
| 383
| 101
| 61
| 40
| 15
| 1
|
module Data (Nucleotide, s2n) where
import Data.Char (toUpper)
data Nucleotide = A|B|C|D|H|G|K|M|R deriving (Ord,Eq,Enum,Show)
charToNucleotide :: Char -> Nucleotide
charToNucleotide 'A' = A
charToNucleotide 'B' = B
charToNucleotide 'C' = C
charToNucleotide 'D' = D
charToNucleotide 'H' = H
charToNucleotide 'G' = G
charToNucleotide 'K' = K
charToNucleotide 'M' = M
charToNucleotide 'R' = R
charToNucleotide _ = error "Unviable letters in sequence"
s2n :: String -> [Nucleotide]
s2n = stringToNucleotideSequence
stringToNucleotideSequence :: String -> [Nucleotide]
stringToNucleotideSequence = map (charToNucleotide . toUpper)
|
epsilonhalbe/Nucleotide
|
Data.hs
|
mit
| 632
| 0
| 7
| 87
| 208
| 115
| 93
| 18
| 1
|
{-|
Module : PostgREST.Request.Parsers
Description : PostgREST parser combinators
This module is in charge of parsing all the querystring values in an url, e.g. the select, id, order in `/projects?select=id,name&id=eq.1&order=id,name.desc`.
-}
module PostgREST.Request.Parsers
( pColumns
, pLogicPath
, pLogicSingleVal
, pLogicTree
, pOrder
, pOrderTerm
, pRequestColumns
, pRequestFilter
, pRequestLogicTree
, pRequestOnConflict
, pRequestOrder
, pRequestRange
, pRequestSelect
, pSingleVal
, pTreePath
) where
import qualified Data.HashMap.Strict as M
import qualified Data.Set as S
import Data.Either.Combinators (mapLeft)
import Data.Foldable (foldl1)
import Data.List (init, last)
import Data.Text (intercalate, replace, strip)
import Data.Tree (Tree (..))
import Text.Parsec.Error (errorMessages,
showErrorMessages)
import Text.ParserCombinators.Parsec (GenParser, ParseError, Parser,
anyChar, between, char, digit,
eof, errorPos, letter,
lookAhead, many1, noneOf,
notFollowedBy, oneOf, option,
optionMaybe, parse, sepBy1,
string, try, (<?>))
import PostgREST.DbStructure.Identifiers (FieldName)
import PostgREST.Error (ApiRequestError (ParseRequestError))
import PostgREST.Query.SqlFragment (ftsOperators, operators)
import PostgREST.RangeQuery (NonnegRange)
import PostgREST.Request.Types
import Protolude hiding (intercalate, option, replace, toS, try)
import Protolude.Conv (toS)
pRequestSelect :: Text -> Either ApiRequestError [Tree SelectItem]
pRequestSelect selStr =
mapError $ parse pFieldForest ("failed to parse select parameter (" <> toS selStr <> ")") (toS selStr)
pRequestOnConflict :: Text -> Either ApiRequestError [FieldName]
pRequestOnConflict oncStr =
mapError $ parse pColumns ("failed to parse on_conflict parameter (" <> toS oncStr <> ")") (toS oncStr)
pRequestFilter :: (Text, Text) -> Either ApiRequestError (EmbedPath, Filter)
pRequestFilter (k, v) = mapError $ (,) <$> path <*> (Filter <$> fld <*> oper)
where
treePath = parse pTreePath ("failed to parser tree path (" ++ toS k ++ ")") $ toS k
oper = parse (pOpExpr pSingleVal) ("failed to parse filter (" ++ toS v ++ ")") $ toS v
path = fst <$> treePath
fld = snd <$> treePath
pRequestOrder :: (Text, Text) -> Either ApiRequestError (EmbedPath, [OrderTerm])
pRequestOrder (k, v) = mapError $ (,) <$> path <*> ord'
where
treePath = parse pTreePath ("failed to parser tree path (" ++ toS k ++ ")") $ toS k
path = fst <$> treePath
ord' = parse pOrder ("failed to parse order (" ++ toS v ++ ")") $ toS v
pRequestRange :: (ByteString, NonnegRange) -> Either ApiRequestError (EmbedPath, NonnegRange)
pRequestRange (k, v) = mapError $ (,) <$> path <*> pure v
where
treePath = parse pTreePath ("failed to parser tree path (" ++ toS k ++ ")") $ toS k
path = fst <$> treePath
pRequestLogicTree :: (Text, Text) -> Either ApiRequestError (EmbedPath, LogicTree)
pRequestLogicTree (k, v) = mapError $ (,) <$> embedPath <*> logicTree
where
path = parse pLogicPath ("failed to parser logic path (" ++ toS k ++ ")") $ toS k
embedPath = fst <$> path
logicTree = do
op <- snd <$> path
-- Concat op and v to make pLogicTree argument regular,
-- in the form of "?and=and(.. , ..)" instead of "?and=(.. , ..)"
parse pLogicTree ("failed to parse logic tree (" ++ toS v ++ ")") $ toS (op <> v)
pRequestColumns :: Maybe Text -> Either ApiRequestError (Maybe (S.Set FieldName))
pRequestColumns colStr =
case colStr of
Just str ->
mapError $ Just . S.fromList <$> parse pColumns ("failed to parse columns parameter (" <> toS str <> ")") (toS str)
_ -> Right Nothing
ws :: Parser Text
ws = toS <$> many (oneOf " \t")
lexeme :: Parser a -> Parser a
lexeme p = ws *> p <* ws
pTreePath :: Parser (EmbedPath, Field)
pTreePath = do
p <- pFieldName `sepBy1` pDelimiter
jp <- option [] pJsonPath
return (init p, (last p, jp))
pFieldForest :: Parser [Tree SelectItem]
pFieldForest = pFieldTree `sepBy1` lexeme (char ',')
where
pFieldTree :: Parser (Tree SelectItem)
pFieldTree = try (Node <$> pRelationSelect <*> between (char '(') (char ')') pFieldForest) <|>
Node <$> pFieldSelect <*> pure []
pStar :: Parser Text
pStar = toS <$> (string "*" $> ("*"::ByteString))
pFieldName :: Parser Text
pFieldName =
pQuotedValue <|>
intercalate "-" . map toS <$> (many1 (letter <|> digit <|> oneOf "_ ") `sepBy1` dash) <?>
"field name (* or [a..z0..9_])"
where
isDash :: GenParser Char st ()
isDash = try ( char '-' >> notFollowedBy (char '>') )
dash :: Parser Char
dash = isDash $> '-'
pJsonPath :: Parser JsonPath
pJsonPath = many pJsonOperation
where
pJsonOperation :: Parser JsonOperation
pJsonOperation = pJsonArrow <*> pJsonOperand
pJsonArrow =
try (string "->>" $> J2Arrow) <|>
try (string "->" $> JArrow)
pJsonOperand =
let pJKey = JKey . toS <$> pFieldName
pJIdx = JIdx . toS <$> ((:) <$> option '+' (char '-') <*> many1 digit) <* pEnd
pEnd = try (void $ lookAhead (string "->")) <|>
try (void $ lookAhead (string "::")) <|>
try eof in
try pJIdx <|> try pJKey
pField :: Parser Field
pField = lexeme $ (,) <$> pFieldName <*> option [] pJsonPath
aliasSeparator :: Parser ()
aliasSeparator = char ':' >> notFollowedBy (char ':')
pRelationSelect :: Parser SelectItem
pRelationSelect = lexeme $ try ( do
alias <- optionMaybe ( try(pFieldName <* aliasSeparator) )
fld <- pField
hint <- optionMaybe (
try ( char '!' *> pFieldName) <|>
-- deprecated, remove in next major version
try ( char '.' *> pFieldName)
)
return (fld, Nothing, alias, hint)
)
pFieldSelect :: Parser SelectItem
pFieldSelect = lexeme $
try (
do
alias <- optionMaybe ( try(pFieldName <* aliasSeparator) )
fld <- pField
cast' <- optionMaybe (string "::" *> many letter)
return (fld, toS <$> cast', alias, Nothing)
)
<|> do
s <- pStar
return ((s, []), Nothing, Nothing, Nothing)
pOpExpr :: Parser SingleVal -> Parser OpExpr
pOpExpr pSVal = try ( string "not" *> pDelimiter *> (OpExpr True <$> pOperation)) <|> OpExpr False <$> pOperation
where
pOperation :: Parser Operation
pOperation =
Op . toS <$> foldl1 (<|>) (try . ((<* pDelimiter) . string) . toS <$> M.keys ops) <*> pSVal
<|> In <$> (try (string "in" *> pDelimiter) *> pListVal)
<|> pFts
<?> "operator (eq, gt, ...)"
pFts = do
op <- foldl1 (<|>) (try . string . toS <$> ftsOps)
lang <- optionMaybe $ try (between (char '(') (char ')') (many (letter <|> digit <|> oneOf "_")))
pDelimiter >> Fts (toS op) (toS <$> lang) <$> pSVal
ops = M.filterWithKey (const . flip notElem ("in":ftsOps)) operators
ftsOps = M.keys ftsOperators
pSingleVal :: Parser SingleVal
pSingleVal = toS <$> many anyChar
pListVal :: Parser ListVal
pListVal = lexeme (char '(') *> pListElement `sepBy1` char ',' <* lexeme (char ')')
pListElement :: Parser Text
pListElement = try (pQuotedValue <* notFollowedBy (noneOf ",)")) <|> (toS <$> many (noneOf ",)"))
pQuotedValue :: Parser Text
pQuotedValue = toS <$> (char '"' *> many (noneOf "\"") <* char '"')
pDelimiter :: Parser Char
pDelimiter = char '.' <?> "delimiter (.)"
pOrder :: Parser [OrderTerm]
pOrder = lexeme pOrderTerm `sepBy1` char ','
pOrderTerm :: Parser OrderTerm
pOrderTerm = do
fld <- pField
dir <- optionMaybe $
try (pDelimiter *> string "asc" $> OrderAsc) <|>
try (pDelimiter *> string "desc" $> OrderDesc)
nls <- optionMaybe pNulls <* pEnd <|>
pEnd $> Nothing
return $ OrderTerm fld dir nls
where
pNulls = try (pDelimiter *> string "nullsfirst" $> OrderNullsFirst) <|>
try (pDelimiter *> string "nullslast" $> OrderNullsLast)
pEnd = try (void $ lookAhead (char ',')) <|>
try eof
pLogicTree :: Parser LogicTree
pLogicTree = Stmnt <$> try pLogicFilter
<|> Expr <$> pNot <*> pLogicOp <*> (lexeme (char '(') *> pLogicTree `sepBy1` lexeme (char ',') <* lexeme (char ')'))
where
pLogicFilter :: Parser Filter
pLogicFilter = Filter <$> pField <* pDelimiter <*> pOpExpr pLogicSingleVal
pNot :: Parser Bool
pNot = try (string "not" *> pDelimiter $> True)
<|> pure False
<?> "negation operator (not)"
pLogicOp :: Parser LogicOperator
pLogicOp = try (string "and" $> And)
<|> string "or" $> Or
<?> "logic operator (and, or)"
pLogicSingleVal :: Parser SingleVal
pLogicSingleVal = try (pQuotedValue <* notFollowedBy (noneOf ",)")) <|> try pPgArray <|> (toS <$> many (noneOf ",)"))
where
pPgArray :: Parser Text
pPgArray = do
a <- string "{"
b <- many (noneOf "{}")
c <- string "}"
pure (toS $ a ++ b ++ c)
pLogicPath :: Parser (EmbedPath, Text)
pLogicPath = do
path <- pFieldName `sepBy1` pDelimiter
let op = last path
notOp = "not." <> op
return (filter (/= "not") (init path), if "not" `elem` path then notOp else op)
pColumns :: Parser [FieldName]
pColumns = pFieldName `sepBy1` lexeme (char ',')
mapError :: Either ParseError a -> Either ApiRequestError a
mapError = mapLeft translateError
where
translateError e =
ParseRequestError message details
where
message = show $ errorPos e
details = strip $ replace "\n" " " $ toS
$ showErrorMessages "or" "unknown parse error" "expecting" "unexpected" "end of input" (errorMessages e)
|
steve-chavez/postgrest
|
src/PostgREST/Request/Parsers.hs
|
mit
| 9,983
| 0
| 20
| 2,565
| 3,228
| 1,673
| 1,555
| -1
| -1
|
module Handler.Repeat (
repeatHandler
) where
import Handler
import ByteStringTools
import Network.Socket.ByteString (sendAllTo)
repeatHandler :: HandlerFunc
repeatHandler sock addr pkt = do
putStrLn ("From " ++ show addr ++ ": " ++ show (lazyToStrictBS pkt))
sendAllTo sock (lazyToStrictBS pkt) addr
|
stnma7e/scim_serv
|
src/Handler/Repeat.hs
|
mit
| 311
| 2
| 13
| 50
| 97
| 50
| 47
| 9
| 1
|
-- A nifty animated fractal of a tree, superimposed on a background
-- of three red rectangles.
import Graphics.Gloss
main :: IO ()
main
= animate (InWindow "Zen" (800, 600) (5, 5))
(greyN 0.2)
frame
-- Produce one frame of the animation.
frame :: Float -> Picture
frame timeS
= Pictures
-- the red rectangles
[ Translate 0 150 backRec
, Translate 0 0 backRec
, Translate 0 (-150) backRec
-- the tree
, Translate 0 (-150) $ treeFrac 7 timeS
]
-- One of the red backing rectangles, with a white outline.
backRec :: Picture
backRec
= Pictures
[ Color red (rectangleSolid 400 100)
, Color white (rectangleWire 400 100) ]
-- The color for the outline of the tree's branches.
treeOutline :: Color
treeOutline = makeColor 0.3 0.3 1.0 1.0
-- The color for the shading of the tree's branches.
-- The Alpha here is set to 0.5 so the branches are partly transparent.
treeColor :: Color
treeColor = makeColor 0.0 1.0 0.0 0.5
-- The tree fractal.
-- The position of the branches changes depending on the animation time
-- as well as the iteration number of the fractal.
treeFrac :: Int -> Float -> Picture
treeFrac 0 timeS = Blank
treeFrac n timeS
= Pictures
[ Color treeColor $ rectangleUpperSolid 20 300
, Color treeOutline $ rectangleUpperWire 20 300
, Translate 0 30
$ Rotate (200 * sin timeS / (fromIntegral n) )
$ Scale 0.9 0.9
$ treeFrac (n-1) timeS
, Translate 0 70
$ Rotate (-200 * sin timeS / (fromIntegral n))
$ Scale 0.8 0.8
$ treeFrac (n-1) timeS
]
|
gscalzo/HaskellTheHardWay
|
gloss-try/gloss-master/gloss-examples/picture/Zen/Main.hs
|
mit
| 1,536
| 34
| 14
| 352
| 450
| 232
| 218
| 36
| 1
|
module ISO where
import Control.Monad
import Data.Void
-- A type of `Void` have no value.
-- So it is impossible to construct `Void`,
-- unless using undefined, error, unsafeCoerce, infinite recursion, etc
-- And there is a function
-- absurd :: Void -> a
-- That get any value out of `Void`
-- We can do this becuase we can never have void in the zeroth place.
-- so, when are two type, `a` and `b`, considered equal?
-- a definition might be, it is possible to go from `a` to `b`,
-- and from `b` to `a`.
-- Going a roundway trip should leave you the same value.
-- Unfortunately it is virtually impossible to test this in Haskell.
-- This is called Isomorphism.
type ISO a b = (a -> b, b -> a)
-- given ISO a b, we can go from a to b
substL :: ISO a b -> (a -> b)
substL = fst
-- and vice versa
substR :: ISO a b -> (b -> a)
substR = snd
-- There can be more than one ISO a b
isoBool :: ISO Bool Bool
isoBool = (id, id)
isoBoolNot :: ISO Bool Bool
isoBoolNot = (not, not)
-- isomorphism is reflexive
refl :: ISO a a
refl = (id, id)
-- isomorphism is symmetric
symm :: ISO a b -> ISO b a
symm (x, y) = (y, x)
-- isomorphism is transitive
trans :: ISO a b -> ISO b c -> ISO a c
trans (x, x') (y, y') = (y . x, x' . y')
-- We can combine isomorphism:
isoTuple :: ISO a b -> ISO c d -> ISO (a, c) (b, d)
isoTuple (ab, ba) (cd, dc) =
(\(a, c) -> (ab a, cd c), \(b, d) -> (ba b, dc d))
isoList :: ISO a b -> ISO [a] [b]
isoList (x, y) = (map x, map y)
isoMaybe :: ISO a b -> ISO (Maybe a) (Maybe b)
isoMaybe (ab, ba) = (liftM ab, liftM ba)
isoEither :: ISO a b -> ISO c d -> ISO (Either a c) (Either b d)
isoEither (ab, ba) (cd, dc) = (f, g)
where
f (Left a) = Left $ ab a
f (Right c) = Right $ cd c
g (Left b) = Left $ ba b
g (Right d) = Right $ dc d
isoFunc :: ISO a b -> ISO c d -> ISO (a -> c) (b -> d)
isoFunc (ab, ba) (cd, dc) = (\f -> cd . f . ba, \f -> dc . f . ab)
-- Going another way is hard (and is generally impossible)
isoUnMaybe :: ISO (Maybe a) (Maybe b) -> ISO a b
-- Remember, for all valid ISO, converting and converting back
-- Is the same as the original value.
-- You need this to prove some case are impossible.
isoUnMaybe (ab, ba) = (f, g)
where
f x = case ab (Just x) of
Just y -> y
Nothing -> case ab Nothing of
Nothing -> error "impossible"
Just y -> y
g x = case ba (Just x) of
Just y -> y
Nothing -> case ba Nothing of
Nothing -> error "impossible"
Just y -> y
-- We cannot have
-- isoUnEither :: ISO (Either a b) (Either c d) -> ISO a c -> ISO b d.
-- Note that we have
isoEU :: ISO (Either [()] ()) (Either [()] Void)
isoEU = (f, g)
where
f (Right ()) = Left []
f (Left x) = Left $ () : x
g (Left []) = Right ()
g (Left (_:xs)) = Left xs
-- where (), the empty tuple, has 1 value, and Void has 0 value
-- If we have isoUnEither,
-- We have ISO () Void by calling isoUnEither isoEU
-- That is impossible, since we can get a Void by substL on ISO () Void
-- So it is impossible to have isoUnEither
-- And we have isomorphism on isomorphism!
isoSymm :: ISO (ISO a b) (ISO b a)
isoSymm = (f, g)
where
f (ab, ba) = (ba, ab)
g (ab, ba) = (ba, ab)
--------------------------------------------------------------------
-- Sometimes, we can treat a Type as a Number:
-- if a Type t has n distinct value, it's Number is n.
-- This is formally called cardinality.
-- See https://en.wikipedia.org/wiki/Cardinality
-- Void has cardinality of 0 (we will abbreviate it Void is 0).
-- () is 1.
-- Bool is 2.
-- Maybe a is 1 + a.
-- We will be using peano arithmetic so we will write it as S a.
-- https://en.wikipedia.org/wiki/Peano_axioms
-- Either a b is a + b.
-- (a, b) is a * b.
-- a -> b is b ^ a. Try counting (() -> Bool) and (Bool -> ())
-- Algebraic data type got the name because
-- it satisfies a lot of algebraic rules under isomorphism
-- a = b -> c = d -> a * c = b * d
isoProd :: ISO a b -> ISO c d -> ISO (a, c) (b, d)
isoProd = isoTuple
-- a = b -> c = d -> a + c = b + d
isoPlus :: ISO a b -> ISO c d -> ISO (Either a c) (Either b d)
isoPlus = isoEither
-- a = b -> S a = S b
isoS :: ISO a b -> ISO (Maybe a) (Maybe b)
isoS = isoMaybe
-- a = b -> c = d -> c ^ a = d ^ b
isoPow :: ISO a b -> ISO c d -> ISO (a -> c) (b -> d)
isoPow = isoFunc
-- a + b = b + a
plusComm :: ISO (Either a b) (Either b a)
plusComm = (f, g)
where
f (Left x) = Right x
f (Right x) = Left x
g (Left x) = Right x
g (Right x) = Left x
-- a + b + c = a + (b + c)
plusAssoc :: ISO (Either (Either a b) c) (Either a (Either b c))
plusAssoc = (f, g)
where
f (Left (Left x)) = Left x
f (Left (Right x)) = Right $ Left x
f (Right x) = Right $ Right x
g (Left x) = Left $ Left x
g (Right (Left x)) = Left $ Right x
g (Right (Right x)) = Right x
-- a * b = b * a
multComm :: ISO (a, b) (b, a)
multComm = (f, f)
where
f (x, y) = (y, x)
-- a * b * c = a * (b * c)
multAssoc :: ISO ((a, b), c) (a, (b, c))
multAssoc = (f, g)
where
f ((x, y), z) = (x, (y, z))
g (x, (y, z)) = ((x, y), z)
-- dist :: a * (b + c) = a * b + a * c
dist :: ISO (a, (Either b c)) (Either (a, b) (a, c))
dist = (f, g)
where
f (x, Left y) = Left (x, y)
f (x, Right y) = Right (x, y)
g (Left (x, y)) = (x, Left y)
g (Right (x, y)) = (x, Right y)
-- (c ^ b) ^ a = c ^ (a * b)
curryISO :: ISO (a -> b -> c) ((a, b) -> c)
curryISO = (f, g)
where
f x = \(a, b) -> x a b
g x = \a b -> x (a, b)
-- 1 = S O (we are using peano arithmetic)
-- https://en.wikipedia.org/wiki/Peano_axioms
one :: ISO () (Maybe Void)
one = (const Nothing, const ())
-- 2 = S (S O)
two :: ISO Bool (Maybe (Maybe Void))
two = (f, g)
where
f False = Nothing
f True = Just Nothing
g Nothing = False
g (Just Nothing) = True
-- O + b = b
plusO :: ISO (Either Void b) b
plusO = (f, g)
where
f (Left x) = absurd x -- absurd :: Void -> a
f (Right x) = x
g x = Right x
-- S a + b = S (a + b)
plusS :: ISO (Either (Maybe a) b) (Maybe (Either a b))
plusS = (f, g)
where
f (Left (Just x)) = Just (Left x)
f (Left Nothing) = Nothing
f (Right x) = Just (Right x)
g (Just (Left x)) = Left (Just x)
g Nothing = Left Nothing
g (Just (Right x)) = Right x
-- 1 + b = S b
plusSO :: ISO (Either () b) (Maybe b)
plusSO = isoPlus one refl `trans` plusS `trans` isoS plusO
-- O * a = O
multO :: ISO (Void, a) Void
multO = (f, g)
where
f (x, a) = x
g x = (x, absurd x)
-- S a * b = b + a * b
multS :: ISO (Maybe a, b) (Either b (a, b))
multS = (f, g)
where
f (Nothing, b) = Left b
f (Just a, b) = Right (a, b)
g (Left b) = (Nothing, b)
g (Right (a, b)) = (Just a, b)
-- 1 * b = b
multSO :: ISO ((), b) b
multSO =
isoProd one refl `trans`
multS `trans`
isoPlus refl multO `trans`
plusComm `trans`
plusO
-- a ^ O = 1
powO :: ISO (Void -> a) ()
powO = (const (), const absurd)
-- a ^ (S b) = a * (a ^ b)
powS :: ISO (Maybe b -> a) (a, b -> a)
powS = (f, g)
where
f x = (x Nothing, \y -> x (Just y))
g (x, y) = z
where
z Nothing = x
z (Just a) = y a
-- a ^ 1 = a
-- Go the hard way (like multSO, plusSO)
-- to prove that you really get what is going on!
powSO :: ISO (() -> a) a
powSO = (f, g)
where
f x = x ()
g x = const x
-- Here's a trick:
-- replace undefined with the rhs of the comment on previous line
-- When you're not sure what to fill in for a value,
-- Have it as a _
-- GHC will goes like
-- "Found hole `_' with type: ISO (() -> a) (Maybe b0 -> a0)"
-- So you can immediately see value of what type are needed
-- This process can be repeat indefinitely -
-- For example you might replace `_` with `isoFunc _ _`
-- So GHC hint you on more specific type.
-- This is especially usefull if you have complex type.
-- See https://wiki.haskell.org/GHC/Typed_holes
-- And "stepwise refinement" for more details.
|
delta4d/codewars
|
kata/algebraic-isomorphism/ISO.hs
|
mit
| 8,028
| 0
| 13
| 2,295
| 3,002
| 1,633
| 1,369
| 146
| 5
|
#!/usr/bin/env stack
{- stack
runghc
--package shakers
-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
import Development.Shakers
-- | Main entry point.
--
main :: IO ()
main = shakeMain $ do
let pats =
[ "stack.yaml"
, "Shakefile.hs"
, "main//*.hs"
, "src//*.hs"
]
pats' = delete "stack.yaml" pats
-- | Haskell rules.
--
hsRules "." pats'
-- | Cabal rules.
--
cabalRules "." "wolf.cabal"
-- | Stack rules.
--
stackRules "." pats
-- | Default things to run.
--
want [ "build-error", "lint" ]
|
swift-nav/wolf
|
Shakefile.hs
|
mit
| 605
| 0
| 11
| 174
| 106
| 59
| 47
| 15
| 1
|
module Trace (traceJSON) where
import qualified Data.Aeson as JSON
import qualified Data.Text as Text
import Debug.Trace
import qualified Data.Text.Encoding as Text
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LBS
traceJSON :: (JSON.ToJSON a) => a -> b -> b
traceJSON subject result =
trace (Text.unpack
$ Text.decodeUtf8
$ BS.concat
$ LBS.toChunks
$ JSON.encode subject)
result
|
IreneKnapp/ozweb
|
Haskell/Trace.hs
|
mit
| 468
| 0
| 11
| 111
| 127
| 77
| 50
| 15
| 1
|
{-# LANGUAGE OverloadedStrings #-}
module Joebot.Plugins.Steam.Util where
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Text as T
import Network.HTTP.Conduit
import Network
import System.IO
import Data.Monoid
import Data.Aeson
import Data.Aeson.Types
import qualified Data.Map as M
import qualified Data.Vector as V
import Control.Monad
import Control.Monad.State
import Joebot.Plugins.Steam.Types
getUserJson :: T.Text -> T.Text -> IO LBS.ByteString
getUserJson sid apikey = do
let url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/"
<> "v0002/?key=" <> apikey
<> "&steamids=" <> sid
request' <- parseUrl $ T.unpack url
let request = request' { checkStatus = \ _ _ _ -> Nothing }
json <- withManager $ \manager -> do
res <- httpLbs request manager
return $ responseBody res
return json
headMaybe [] = Nothing
headMaybe (x:xs) = Just x
parseUserJson :: LBS.ByteString -> Maybe User
parseUserJson json = do
ast <- decode' json
usrs <- flip parseMaybe ast $ \obj -> do
res <- obj .: "response"
usrs <- res .: "players"
usrs' <- mapM parseUserStats usrs
return usrs'
headMaybe usrs
parseUserStats obj = do
sid <- obj .: "steamid"
pn <- obj .: "personaname"
pst <- obj .: "personastate"
g <- obj .:? "gameextrainfo"
return $ User sid pn pst g
saveUsers :: Users -> FilePath -> IO ()
saveUsers usrs saveLoc = do
let json = encode usrs
LBS.writeFile saveLoc json
loadUsers :: FilePath -> IO (Maybe Users)
loadUsers saveLoc = do
usrs <- LBS.readFile saveLoc
let json = decode' usrs
return json
|
joeschmo/joebot2
|
src/Joebot/Plugins/Steam/Util.hs
|
mit
| 1,619
| 0
| 13
| 329
| 534
| 268
| 266
| 52
| 1
|
module BinToDecimal where
import Data.Char (digitToInt)
import Data.List (foldl')
binToDec :: String -> Int
binToDec = foldl' (\acc x -> acc * 2 + digitToInt x) 0
|
cojoj/Codewars
|
Haskell/Codewars.hsproj/BinToDecimal.hs
|
mit
| 168
| 0
| 9
| 32
| 64
| 36
| 28
| 5
| 1
|
{-# LANGUAGE OverlappingInstances, TemplateHaskell, DeriveDataTypeable, StandaloneDeriving #-}
module Syntax.Data where
import Syntax.Syntax
import Syntax.Checker
import Syntax.Generator
import Autolib.ToDoc
import Autolib.Reader
import Autolib.Size
import Data.Typeable
deriving instance Ord Graph
deriving instance Eq Frequencies
deriving instance Ord Frequencies
deriving instance Eq GeneratorConfig
deriving instance Ord GeneratorConfig
data QuizConfig = QuizConfig
{ quizFeedback :: Bool
, seed :: Int
, quizExpectedNumberOfWords :: Int
, generatorConfig :: GeneratorConfig
}
deriving ( Eq, Ord, Typeable)
data Config = Config
{ feedback :: Bool
, expectedNumberOfWords :: Int
, language :: Language
}
deriving ( Eq, Ord, Typeable)
data Solution = Solution [String]
deriving ( Eq, Ord, Typeable, Read, Show )
$(derives [makeReader, makeToDoc] [''QuizConfig,''Config,''Solution,''Graph, ''Frequencies,''GeneratorConfig])
instance Size Config where size _ = 0
instance Size Solution where size _ = 0
|
Erdwolf/autotool-bonn
|
src/Syntax/Data.hs
|
gpl-2.0
| 1,034
| 0
| 9
| 157
| 287
| 162
| 125
| 30
| 0
|
--------------------------------------------------------------------------------
-- |
-- Module : Graphics.Rendering.OpenGL.GL.VertexArrays
-- Copyright : (c) Sven Panne 2002-2009
-- License : BSD-style (see the file libraries/OpenGL/LICENSE)
--
-- Maintainer : sven.panne@aedion.de
-- Stability : stable
-- Portability : portable
--
-- This module corresponds to section 2.8 (Vertex Arrays) of the OpenGL 2.1
-- specs.
--
--------------------------------------------------------------------------------
module Graphics.Rendering.OpenGL.GL.VertexArrays (
-- * Describing Data for the Arrays
NumComponents, DataType(..), Stride, VertexArrayDescriptor(..),
-- * Specifying Data for the Arrays
Capability(..),
ClientArrayType(..), arrayPointer,
InterleavedArrays(..), interleavedArrays,
-- * Enabling Arrays
clientState, clientActiveTexture,
-- * Dereferencing and Rendering
ArrayIndex, NumArrayIndices, NumIndexBlocks,
arrayElement, drawArrays, multiDrawArrays, drawElements, multiDrawElements,
drawRangeElements, maxElementsVertices, maxElementsIndices, lockArrays,
primitiveRestartIndex, primitiveRestartIndexNV,
-- * Generic Vertex Attribute Arrays
vertexAttribPointer, vertexAttribArray
) where
import Data.StateVar
import Foreign.Marshal.Alloc
import Foreign.Ptr
import Foreign.Storable
import Graphics.Rendering.OpenGL.GL.Capability
import Graphics.Rendering.OpenGL.GL.DataType
import Graphics.Rendering.OpenGL.GL.GLboolean
import Graphics.Rendering.OpenGL.GL.PrimitiveMode
import Graphics.Rendering.OpenGL.GL.QueryUtils
import Graphics.Rendering.OpenGL.GL.Texturing.TextureUnit
import Graphics.Rendering.OpenGL.GL.VertexSpec
import Graphics.Rendering.OpenGL.GLU.ErrorsInternal
import Graphics.Rendering.OpenGL.Raw.ARB.Compatibility (
glArrayElement, glClientActiveTexture, glColorPointer, glDisableClientState,
glEdgeFlagPointer, glEnableClientState, glFogCoordPointer, glIndexPointer,
glInterleavedArrays, glNormalPointer, glSecondaryColorPointer,
glTexCoordPointer, glVertexPointer, gl_C3F_V3F, gl_C4F_N3F_V3F, gl_C4UB_V2F,
gl_C4UB_V3F, gl_COLOR_ARRAY, gl_COLOR_ARRAY_POINTER, gl_EDGE_FLAG_ARRAY,
gl_EDGE_FLAG_ARRAY_POINTER, gl_FEEDBACK_BUFFER_POINTER, gl_FOG_COORD_ARRAY,
gl_FOG_COORD_ARRAY_POINTER, gl_INDEX_ARRAY, gl_INDEX_ARRAY_POINTER,
gl_N3F_V3F, gl_NORMAL_ARRAY, gl_NORMAL_ARRAY_POINTER,
gl_SECONDARY_COLOR_ARRAY, gl_SECONDARY_COLOR_ARRAY_POINTER,
gl_SELECTION_BUFFER_POINTER, gl_T2F_C3F_V3F, gl_T2F_C4F_N3F_V3F,
gl_T2F_C4UB_V3F, gl_T2F_N3F_V3F, gl_T2F_V3F, gl_T4F_C4F_N3F_V4F, gl_T4F_V4F,
gl_TEXTURE_COORD_ARRAY, gl_TEXTURE_COORD_ARRAY_POINTER, gl_V2F, gl_V3F,
gl_VERTEX_ARRAY, gl_VERTEX_ARRAY_POINTER )
import Graphics.Rendering.OpenGL.Raw.ARB.MatrixPalette (
gl_MATRIX_INDEX_ARRAY, gl_MATRIX_INDEX_ARRAY_POINTER )
import Graphics.Rendering.OpenGL.Raw.ARB.VertexBlend ( gl_WEIGHT_ARRAY_POINTER )
import Graphics.Rendering.OpenGL.Raw.Core31
import Graphics.Rendering.OpenGL.Raw.EXT.CompiledVertexArray
import Graphics.Rendering.OpenGL.Raw.NV.PrimitiveRestart
--------------------------------------------------------------------------------
type NumComponents = GLint
type Stride = GLsizei
data VertexArrayDescriptor a =
VertexArrayDescriptor !NumComponents !DataType !Stride !(Ptr a)
deriving ( Eq, Ord, Show )
noVertexArrayDescriptor :: VertexArrayDescriptor a
noVertexArrayDescriptor = VertexArrayDescriptor 0 Byte 0 nullPtr
--------------------------------------------------------------------------------
data ClientArrayType =
VertexArray
| NormalArray
| ColorArray
| IndexArray
| TextureCoordArray
| EdgeFlagArray
| FogCoordArray
| SecondaryColorArray
| MatrixIndexArray
deriving ( Eq, Ord, Show )
marshalClientArrayType :: ClientArrayType -> GLenum
marshalClientArrayType x = case x of
VertexArray -> gl_VERTEX_ARRAY
NormalArray -> gl_NORMAL_ARRAY
ColorArray -> gl_COLOR_ARRAY
IndexArray -> gl_INDEX_ARRAY
TextureCoordArray -> gl_TEXTURE_COORD_ARRAY
EdgeFlagArray -> gl_EDGE_FLAG_ARRAY
FogCoordArray -> gl_FOG_COORD_ARRAY
SecondaryColorArray -> gl_SECONDARY_COLOR_ARRAY
MatrixIndexArray -> gl_MATRIX_INDEX_ARRAY
-- Hmmm...
clientArrayTypeToEnableCap :: ClientArrayType -> EnableCap
clientArrayTypeToEnableCap x = case x of
VertexArray -> CapVertexArray
NormalArray -> CapNormalArray
ColorArray -> CapColorArray
IndexArray -> CapIndexArray
TextureCoordArray -> CapTextureCoordArray
EdgeFlagArray -> CapEdgeFlagArray
FogCoordArray -> CapFogCoordArray
SecondaryColorArray -> CapSecondaryColorArray
MatrixIndexArray -> CapMatrixIndexArray
--------------------------------------------------------------------------------
arrayPointer :: ClientArrayType -> StateVar (VertexArrayDescriptor a)
arrayPointer t = case t of
VertexArray -> vertexPointer
NormalArray -> normalPointer
ColorArray -> colorPointer
IndexArray -> indexPointer
TextureCoordArray -> texCoordPointer
EdgeFlagArray -> edgeFlagPointer
FogCoordArray -> fogCoordPointer
SecondaryColorArray -> secondaryColorPointer
MatrixIndexArray ->
makeStateVar
(do recordInvalidEnum ; return noVertexArrayDescriptor)
(const recordInvalidEnum)
check :: Bool -> IO () -> IO ()
check flag val = if flag then val else recordInvalidValue
--------------------------------------------------------------------------------
vertexPointer :: StateVar (VertexArrayDescriptor a)
vertexPointer = makeStateVar getVertexPointer setVertexPointer
getVertexPointer :: IO (VertexArrayDescriptor a)
getVertexPointer = do
n <- getInteger1 id GetVertexArraySize
d <- getEnum1 unmarshalDataType GetVertexArrayType
s <- getInteger1 fromIntegral GetVertexArrayStride
p <- getPointer VertexArrayPointer
return $ VertexArrayDescriptor n d s p
setVertexPointer :: VertexArrayDescriptor a -> IO ()
setVertexPointer (VertexArrayDescriptor n d s p) =
glVertexPointer n (marshalDataType d) s p
--------------------------------------------------------------------------------
normalPointer :: StateVar (VertexArrayDescriptor a)
normalPointer = makeStateVar getNormalPointer setNormalPointer
getNormalPointer :: IO (VertexArrayDescriptor a)
getNormalPointer = do
d <- getEnum1 unmarshalDataType GetNormalArrayType
s <- getInteger1 fromIntegral GetNormalArrayStride
p <- getPointer NormalArrayPointer
return $ VertexArrayDescriptor 3 d s p
setNormalPointer :: VertexArrayDescriptor a -> IO ()
setNormalPointer (VertexArrayDescriptor n d s p) =
check (n == 3) $ glNormalPointer (marshalDataType d) s p
--------------------------------------------------------------------------------
colorPointer :: StateVar (VertexArrayDescriptor a)
colorPointer = makeStateVar getColorPointer setColorPointer
getColorPointer :: IO (VertexArrayDescriptor a)
getColorPointer = do
n <- getInteger1 id GetColorArraySize
d <- getEnum1 unmarshalDataType GetColorArrayType
s <- getInteger1 fromIntegral GetColorArrayStride
p <- getPointer ColorArrayPointer
return $ VertexArrayDescriptor n d s p
setColorPointer :: VertexArrayDescriptor a -> IO ()
setColorPointer (VertexArrayDescriptor n d s p) =
check (n == 3 || n == 4) $ glColorPointer n (marshalDataType d) s p
--------------------------------------------------------------------------------
indexPointer :: StateVar (VertexArrayDescriptor a)
indexPointer = makeStateVar getIndexPointer setIndexPointer
getIndexPointer :: IO (VertexArrayDescriptor a)
getIndexPointer = do
d <- getEnum1 unmarshalDataType GetIndexArrayType
s <- getInteger1 fromIntegral GetIndexArrayStride
p <- getPointer IndexArrayPointer
return $ VertexArrayDescriptor 1 d s p
setIndexPointer :: VertexArrayDescriptor a -> IO ()
setIndexPointer (VertexArrayDescriptor n d s p) =
check (n == 1) $ glIndexPointer (marshalDataType d) s p
--------------------------------------------------------------------------------
texCoordPointer :: StateVar (VertexArrayDescriptor a)
texCoordPointer = makeStateVar getTexCoordPointer setTexCoordPointer
getTexCoordPointer :: IO (VertexArrayDescriptor a)
getTexCoordPointer = do
n <- getInteger1 id GetTextureCoordArraySize
d <- getEnum1 unmarshalDataType GetTextureCoordArrayType
s <- getInteger1 fromIntegral GetTextureCoordArrayStride
p <- getPointer TextureCoordArrayPointer
return $ VertexArrayDescriptor n d s p
setTexCoordPointer :: VertexArrayDescriptor a -> IO ()
setTexCoordPointer (VertexArrayDescriptor n d s p) =
glTexCoordPointer n (marshalDataType d) s p
--------------------------------------------------------------------------------
edgeFlagPointer :: StateVar (VertexArrayDescriptor a)
edgeFlagPointer = makeStateVar getEdgeFlagPointer setEdgeFlagPointer
getEdgeFlagPointer :: IO (VertexArrayDescriptor a)
getEdgeFlagPointer = do
s <- getInteger1 fromIntegral GetEdgeFlagArrayStride
p <- getPointer EdgeFlagArrayPointer
return $ VertexArrayDescriptor 1 UnsignedByte s p
setEdgeFlagPointer :: VertexArrayDescriptor a -> IO ()
setEdgeFlagPointer (VertexArrayDescriptor n d s p) =
check (n == 1 && d == UnsignedByte) $ glEdgeFlagPointer s p
--------------------------------------------------------------------------------
fogCoordPointer :: StateVar (VertexArrayDescriptor a)
fogCoordPointer = makeStateVar getFogCoordPointer setFogCoordPointer
getFogCoordPointer :: IO (VertexArrayDescriptor a)
getFogCoordPointer = do
d <- getEnum1 unmarshalDataType GetFogCoordArrayType
s <- getInteger1 fromIntegral GetFogCoordArrayStride
p <- getPointer FogCoordArrayPointer
return $ VertexArrayDescriptor 1 d s p
setFogCoordPointer :: VertexArrayDescriptor a -> IO ()
setFogCoordPointer (VertexArrayDescriptor n d s p) =
check (n == 1) $ glFogCoordPointer (marshalDataType d) s p
--------------------------------------------------------------------------------
secondaryColorPointer :: StateVar (VertexArrayDescriptor a)
secondaryColorPointer =
makeStateVar getSecondaryColorPointer setSecondaryColorPointer
getSecondaryColorPointer :: IO (VertexArrayDescriptor a)
getSecondaryColorPointer = do
n <- getInteger1 id GetSecondaryColorArraySize
d <- getEnum1 unmarshalDataType GetSecondaryColorArrayType
s <- getInteger1 fromIntegral GetSecondaryColorArrayStride
p <- getPointer SecondaryColorArrayPointer
return $ VertexArrayDescriptor n d s p
setSecondaryColorPointer :: (VertexArrayDescriptor a) -> IO ()
setSecondaryColorPointer (VertexArrayDescriptor n d s p) =
glSecondaryColorPointer n (marshalDataType d) s p
--------------------------------------------------------------------------------
data InterleavedArrays =
V2f
| V3f
| C4ubV2f
| C4ubV3f
| C3fV3f
| N3fV3f
| C4fN3fV3f
| T2fV3f
| T4fV4f
| T2fC4ubV3f
| T2fC3fV3f
| T2fN3fV3f
| T2fC4fN3fV3f
| T4fC4fN3fV4f
deriving ( Eq, Ord, Show )
marshalInterleavedArrays :: InterleavedArrays -> GLenum
marshalInterleavedArrays x = case x of
V2f -> gl_V2F
V3f -> gl_V3F
C4ubV2f -> gl_C4UB_V2F
C4ubV3f -> gl_C4UB_V3F
C3fV3f -> gl_C3F_V3F
N3fV3f -> gl_N3F_V3F
C4fN3fV3f -> gl_C4F_N3F_V3F
T2fV3f -> gl_T2F_V3F
T4fV4f -> gl_T4F_V4F
T2fC4ubV3f -> gl_T2F_C4UB_V3F
T2fC3fV3f -> gl_T2F_C3F_V3F
T2fN3fV3f -> gl_T2F_N3F_V3F
T2fC4fN3fV3f -> gl_T2F_C4F_N3F_V3F
T4fC4fN3fV4f -> gl_T4F_C4F_N3F_V4F
--------------------------------------------------------------------------------
interleavedArrays :: InterleavedArrays -> Stride -> Ptr a -> IO ()
interleavedArrays = glInterleavedArrays . marshalInterleavedArrays
--------------------------------------------------------------------------------
clientState :: ClientArrayType -> StateVar Capability
clientState arrayType =
makeStateVar (getClientState arrayType) (setClientState arrayType)
getClientState :: ClientArrayType -> IO Capability
getClientState arrayType = get . makeCapability . clientArrayTypeToEnableCap $ arrayType
setClientState :: ClientArrayType -> Capability -> IO ()
setClientState arrayType val =
(if val == Enabled then glEnableClientState else glDisableClientState)
(marshalClientArrayType arrayType)
--------------------------------------------------------------------------------
clientActiveTexture :: StateVar TextureUnit
clientActiveTexture =
makeStateVar (getEnum1 unmarshalTextureUnit GetClientActiveTexture)
(glClientActiveTexture . marshalTextureUnit)
--------------------------------------------------------------------------------
type ArrayIndex = GLint
type NumArrayIndices = GLsizei
type NumIndexBlocks = GLsizei
--------------------------------------------------------------------------------
arrayElement :: ArrayIndex -> IO ()
arrayElement = glArrayElement
drawArrays :: PrimitiveMode -> ArrayIndex -> NumArrayIndices -> IO ()
drawArrays = glDrawArrays . marshalPrimitiveMode
multiDrawArrays ::
PrimitiveMode -> Ptr ArrayIndex -> Ptr NumArrayIndices -> NumIndexBlocks
-> IO ()
multiDrawArrays = glMultiDrawArrays . marshalPrimitiveMode
drawElements :: PrimitiveMode -> NumArrayIndices -> DataType -> Ptr a -> IO ()
drawElements m c = glDrawElements (marshalPrimitiveMode m) c . marshalDataType
multiDrawElements ::
PrimitiveMode -> Ptr NumArrayIndices -> DataType -> Ptr (Ptr a)
-> NumIndexBlocks -> IO ()
multiDrawElements m c =
glMultiDrawElements (marshalPrimitiveMode m) c . marshalDataType
drawRangeElements ::
PrimitiveMode -> (ArrayIndex, ArrayIndex) -> NumArrayIndices -> DataType
-> Ptr a -> IO ()
drawRangeElements m (s, e) c =
glDrawRangeElements (marshalPrimitiveMode m) (fromIntegral s)
(fromIntegral e) c . marshalDataType
maxElementsVertices :: GettableStateVar NumArrayIndices
maxElementsVertices = makeGettableStateVar (getSizei1 id GetMaxElementsVertices)
maxElementsIndices :: GettableStateVar NumArrayIndices
maxElementsIndices = makeGettableStateVar (getSizei1 id GetMaxElementsIndices)
--------------------------------------------------------------------------------
lockArrays :: StateVar (Maybe (ArrayIndex, NumArrayIndices))
lockArrays = makeStateVar getLockArrays setLockArrays
getLockArrays :: IO (Maybe (ArrayIndex, NumArrayIndices))
getLockArrays = do
count <- getInteger1 fromIntegral GetArrayElementLockCount
if count > 0
then do first <- getInteger1 id GetArrayElementLockFirst
return $ Just (first, count)
else return Nothing
setLockArrays :: Maybe (ArrayIndex, NumArrayIndices) -> IO ()
setLockArrays = maybe glUnlockArrays (uncurry glLockArrays)
--------------------------------------------------------------------------------
primitiveRestartIndex :: StateVar (Maybe ArrayIndex)
primitiveRestartIndex =
makeStateVarMaybe
(return CapPrimitiveRestart)
(getInteger1 id GetPrimitiveRestartIndex)
(glPrimitiveRestartIndex . fromIntegral)
--------------------------------------------------------------------------------
-- We almost could use makeStateVarMaybe below, but, alas, this is client state.
primitiveRestartIndexNV :: StateVar (Maybe ArrayIndex)
primitiveRestartIndexNV =
makeStateVar getPrimitiveRestartIndexNV setPrimitiveRestartIndexNV
getPrimitiveRestartIndexNV :: IO (Maybe ArrayIndex)
getPrimitiveRestartIndexNV = do
on <- getBoolean1 unmarshalGLboolean GetPrimitiveRestartNV
if on
then fmap Just $ getInteger1 fromIntegral GetPrimitiveRestartIndexNV
else return Nothing
setPrimitiveRestartIndexNV :: Maybe ArrayIndex -> IO ()
setPrimitiveRestartIndexNV maybeIdx = case maybeIdx of
Nothing -> glDisableClientState gl_PRIMITIVE_RESTART_NV
Just idx -> do glEnableClientState gl_PRIMITIVE_RESTART_NV
glPrimitiveRestartIndexNV (fromIntegral idx)
--------------------------------------------------------------------------------
data GetPointervPName =
VertexArrayPointer
| NormalArrayPointer
| ColorArrayPointer
| IndexArrayPointer
| TextureCoordArrayPointer
| EdgeFlagArrayPointer
| FogCoordArrayPointer
| SecondaryColorArrayPointer
| FeedbackBufferPointer
| SelectionBufferPointer
| WeightArrayPointer
| MatrixIndexArrayPointer
marshalGetPointervPName :: GetPointervPName -> GLenum
marshalGetPointervPName x = case x of
VertexArrayPointer -> gl_VERTEX_ARRAY_POINTER
NormalArrayPointer -> gl_NORMAL_ARRAY_POINTER
ColorArrayPointer -> gl_COLOR_ARRAY_POINTER
IndexArrayPointer -> gl_INDEX_ARRAY_POINTER
TextureCoordArrayPointer -> gl_TEXTURE_COORD_ARRAY_POINTER
EdgeFlagArrayPointer -> gl_EDGE_FLAG_ARRAY_POINTER
FogCoordArrayPointer -> gl_FOG_COORD_ARRAY_POINTER
SecondaryColorArrayPointer -> gl_SECONDARY_COLOR_ARRAY_POINTER
FeedbackBufferPointer -> gl_FEEDBACK_BUFFER_POINTER
SelectionBufferPointer -> gl_SELECTION_BUFFER_POINTER
WeightArrayPointer -> gl_WEIGHT_ARRAY_POINTER
MatrixIndexArrayPointer -> gl_MATRIX_INDEX_ARRAY_POINTER
--------------------------------------------------------------------------------
getPointer :: GetPointervPName -> IO (Ptr a)
getPointer n = alloca $ \buf -> do
glGetPointerv (marshalGetPointervPName n) buf
peek buf
--------------------------------------------------------------------------------
vertexAttribPointer :: AttribLocation -> StateVar (IntegerHandling, VertexArrayDescriptor a)
vertexAttribPointer location =
makeStateVar (getVertexAttribPointer_ location) (setVertexAttribPointer location)
getVertexAttribPointer_ :: AttribLocation -> IO (IntegerHandling, VertexArrayDescriptor a)
getVertexAttribPointer_ location = do
i <- getVertexAttribBoolean1 unmarshalGLboolean location GetVertexAttribArrayInteger
h <- if i
then return KeepIntegral
else do f <- getVertexAttribBoolean1 unmarshalGLboolean location GetVertexAttribArrayNormalized
return $ if f then ToNormalizedFloat else ToFloat
n <- getVertexAttribInteger1 id location GetVertexAttribArraySize
d <- getVertexAttribEnum1 unmarshalDataType location GetVertexAttribArrayType
s <- getVertexAttribInteger1 fromIntegral location GetVertexAttribArrayStride
p <- getVertexAttribPointer location VertexAttribArrayPointer
return (h, VertexArrayDescriptor n d s p)
setVertexAttribPointer :: AttribLocation -> (IntegerHandling, VertexArrayDescriptor a) -> IO ()
setVertexAttribPointer (AttribLocation location) (h, VertexArrayDescriptor n d s p) = case h of
ToFloat -> glVertexAttribPointer location n md (marshalGLboolean False) s p
ToNormalizedFloat -> glVertexAttribPointer location n md (marshalGLboolean True) s p
KeepIntegral -> glVertexAttribIPointer location n md s p
where md = marshalDataType d
--------------------------------------------------------------------------------
vertexAttribArray :: AttribLocation -> StateVar Capability
vertexAttribArray location =
makeStateVar (getVertexAttribArray location) (flip setVertexAttribArray location)
getVertexAttribArray :: AttribLocation -> IO Capability
getVertexAttribArray location =
getVertexAttribBoolean1 unmarshalCapability location GetVertexAttribArrayEnabled
setVertexAttribArray :: Capability -> AttribLocation -> IO ()
setVertexAttribArray Disabled (AttribLocation location) = glDisableVertexAttribArray location
setVertexAttribArray Enabled (AttribLocation location) = glEnableVertexAttribArray location
|
ducis/haAni
|
hs/common/Graphics/Rendering/OpenGL/GL/VertexArrays.hs
|
gpl-2.0
| 19,413
| 0
| 12
| 2,625
| 3,860
| 2,004
| 1,856
| 366
| 14
|
module Reachability where
import Data.Maybe
import AST
import Util
unreachable :: CompilationUnit -> [Statement]
unreachable (Comp _ _ (CLS _ _ _ _ constructors _ methods _) _) =
let constructorDefinitions = mapMaybe constructorDefinition constructors
unreachableConstructorStatements = concat $ map (unreachableBlock True) constructorDefinitions
methodDefinitions = mapMaybe methodDefinition methods
unreachableMethodStatements = concat $ map (unreachableBlock True) methodDefinitions
in
unreachableConstructorStatements ++ unreachableMethodStatements
unreachable _ = []
unreachableBlock :: Bool -> StatementBlock -> [Statement]
unreachableBlock reachable block = case statements block of
[(Block sb)] -> unreachableBlock reachable sb
_ -> unreachableTest reachable $ statements block
-- Returns [] if it can complete normally, or [Statement] if a statemet cannot complete
-- In most cases a statement completes IFF it is reachable
-- The case of checking reachability is the default, and rules are only in place for exceptions to the rule
unreachableTest :: Bool -> [Statement] -> [Statement]
unreachableTest reachable (x:xs) =
let
unreachables = case x of
(Block stmts) -> unreachableBlock reachable stmts
(Return _) -> [x]
(While expr stmts) -> case conditionConstant expr of
(Left _) -> unreachableBlock reachable stmts
(Right 0) -> x:statements stmts
(Right _) -> xs
(For _ (Just expr) _ stmts) -> case conditionConstant expr of
(Left _) -> unreachableBlock reachable stmts
(Right 0) -> x:statements stmts
(Right _) -> xs
(For _ Nothing _ _) -> xs
(If _ stmts Nothing) -> unreachableBlock reachable stmts
(If _ stmts (Just eStmts)) ->
let trueUnreach = unreachableBlock reachable stmts
falseUnreach = unreachableBlock reachable eStmts
in
if null trueUnreach then []
else if null falseUnreach then []
else if length trueUnreach > 0 then trueUnreach
else falseUnreach
_ -> if reachable then [] else [x]
completable = null unreachables
willReturn = willComplete [x]
in
if willReturn then xs
else unreachables ++ (unreachableTest completable xs)
unreachableTest reachable stmts = []
-- A non-void function is completable if all execution paths have a return statement
-- All void functions are completable
allCompletable :: CompilationUnit -> Bool
allCompletable(Comp _ _ (CLS _ _ _ _ _ _ methods _) _) =
let nonVoidMethods = filter (\x -> (typeName . methodVar $ x) /= TypeVoid) methods
methodDefinitions = mapMaybe methodDefinition nonVoidMethods
completableMethods = filter canCompleteBlockWithoutReturn methodDefinitions
in
(length completableMethods) > 0
allCompletable _ = False
completableBlock :: StatementBlock -> Bool
completableBlock block = willComplete $ statements block
-- True if all execution paths complete, false otherwise
willComplete :: [Statement] -> Bool
willComplete (x:xs) =
let
doesComplete = case x of
(Return _) -> True
(Block stmts) -> completableBlock stmts
(If _ stmts (Just eStmts)) ->
let trueWillComplete = completableBlock stmts
falseWillComplete = completableBlock eStmts
in trueWillComplete && falseWillComplete
_ -> False
in
doesComplete || willComplete xs
willComplete [] = False
canCompleteBlockWithoutReturn :: StatementBlock -> Bool
canCompleteBlockWithoutReturn block = canCompleteWithoutReturn $ statements block
canCompleteWithoutReturn :: [Statement] -> Bool
canCompleteWithoutReturn [] = True
canCompleteWithoutReturn ((While expr stmts):xs) = case conditionConstant expr of
(Left _) -> canCompleteWithoutReturn xs
(Right 0) -> canCompleteWithoutReturn xs
(Right _) -> False
canCompleteWithoutReturn ((If _ _ Nothing):xs) = canCompleteWithoutReturn xs
canCompleteWithoutReturn ((Return _):xs) = False
canCompleteWithoutReturn (x:xs) =
let
iCanCompleteWithoutReturn = case x of
(Block stmts) -> canCompleteBlockWithoutReturn stmts
(If _ stmts (Just eStmts)) ->
let trueCanCompleteWithoutReturn = canCompleteBlockWithoutReturn stmts
falseCanCompleteWithoutReturn = canCompleteBlockWithoutReturn eStmts
in trueCanCompleteWithoutReturn || falseCanCompleteWithoutReturn
_ -> True
in
iCanCompleteWithoutReturn && canCompleteWithoutReturn xs
|
yangsiwei880813/CS644
|
src/Reachability.hs
|
gpl-2.0
| 4,463
| 0
| 18
| 924
| 1,214
| 612
| 602
| 87
| 17
|
-- | Neuron module encapsulates behavior of a 'Neuron'
--
-- Some considerations for event driven simulation of SNN
--
-- * Given current 'state' of 'Neuron', it should be possible to predict the time at
-- which it will generate a spike (if any)
--
-- * For a synapse model with dynamics it is possible that the neuron fires in the
-- future, so such synapses should be part of the Neuron data type.
--
module Simulation.HSimSNN.Neuron where
import qualified Data.Vector as V
import qualified Simulation.HSimSNN.Spikes as SPK
-- | Data container for synaptic information related to a connection
data SynInfo = SynInfo {weight::Double, syntype::String}
deriving Show
-- | Neuron threshold
threshold = 1.0
-- | Neuron is defined by its state and time at which its state was last evaluated
-- The state of a neuron is defined as list of doubles
data Neuron = Neuron {state::V.Vector Double, tlastupdate::Double}
-- | String representation for Neuron
instance Show Neuron where
show (Neuron st tl) = "Neuron (" ++ (show $(V.toList) st) ++ " @ " ++ (show tl) ++ ")"
-- | Initializes a neuron with a given state at time 0
initNeuron st = Neuron (V.fromList st) 0 -- TODO: Hardcoding Never incorrect
-- | Returns the membrane potential of a neuron
vmem:: Neuron -> Double
vmem neuron = (V.head.state) neuron -- For now the state of a neuron is the first state variable
-- The below block of functions all effect the dynamics of the neuron
-- | Checks if the membrane potential of a neuron is above threshold value
aboveThreshold:: Neuron -> Bool
aboveThreshold neuron
| threshold > vmem neuron = False
| otherwise = True
-- | Check for threshold and reset neuron
-- Should be called with the simulatoin time and only when the neuron spikes
-- Perhaps this should be an internal/hidden function ?
-- Hardcasting threshold to 1.0 TODO: should parametrize somehow
resetNeuron:: Neuron -> Double -> Neuron
resetNeuron neuron t
|tlastupdate neuron > t = error "Neuron has already been updated to the future" -- for debugging
|otherwise = Neuron newstate t
where
newstate = V.map (*0) $ state neuron -- neuron dynamics
-- | Evaluate the next possible spike time of a neuron given its state at time t
--
-- This function is essentially what defines the dynamics of the neuron. (not really.. it depends on the dynamics though)
-- Currently the neuron receives a constant input current
-- Ideally this should be something users can define and pass at the top level
nextSpikeTime:: Neuron -> SPK.NextSpikeTime
nextSpikeTime neuron
|aboveThreshold neuron = SPK.At $ tlastupdate neuron
|otherwise = SPK.Never
-- -- |otherwise = SPK.At $(threshold-vmem neuron) + tlastupdate neuron
-- | Evaluate state of neuron at time t
-- Ideally used at the arrival of a spike or when the neuron spikes (when an
-- event occoured)
evaluateNeuronStateAtt:: Neuron -> Double -> Neuron
evaluateNeuronStateAtt neuron t
|t == (tlastupdate neuron) = neuron -- The neuron has already been updated
|t > (tlastupdate neuron) = Neuron newstate t
|otherwise = error "This neuron has already been updated to the future"
where
decayfact = exp ((tlastupdate neuron)-t) -- decay factor
newstate = V.map (*decayfact) $ state neuron -- neuron dynamics
-- | Apply a presynaptic spike to a neuron at time t
applySynapticSpikeToNeuron :: SynInfo -> Double -> Neuron -> Neuron
applySynapticSpikeToNeuron (SynInfo w typ) spktm neuron = Neuron newstate spktm
where
Neuron curstate _ = evaluateNeuronStateAtt neuron spktm
newstate = V.fromList [(V.head) curstate + w] V.++ ((V.tail) curstate)
|
drwebb/HSimSNN
|
src/Simulation/HSimSNN/Neuron.hs
|
gpl-2.0
| 3,824
| 0
| 14
| 876
| 611
| 330
| 281
| 36
| 1
|
module Main where
import Data.Int
fib :: Int64 -> Int64
fib n = if n < 2 then 1 else (fib (n-1)) + (fib (n-2))
main =
print (fib 38)
|
uelis/intc
|
Examples/Comparison/fib1.hs
|
gpl-2.0
| 138
| 0
| 10
| 35
| 82
| 45
| 37
| 6
| 2
|
{-# LANGUAGE ScopedTypeVariables #-}
-----------------------------------------------------------------------------
-- |
-- Module : HEP.Automation.EventChain.LHEConn
-- Copyright : (c) 2012,2013 Ian-Woo Kim
--
-- License : BSD3
-- Maintainer : Ian-Woo Kim <ianwookim@gmail.com>
-- Stability : experimental
-- Portability : GHC
--
-- Connecting multiple LHE files
--
-----------------------------------------------------------------------------
module HEP.Automation.EventChain.LHEConn where
-- other package of others
import Control.Applicative ((<$>),(<*>))
import Control.Monad.Error hiding (mapM)
import Control.Monad.Identity (runIdentity,Identity(..))
import Control.Monad.State hiding (mapM)
import Data.Either
import Data.Foldable (foldr,foldrM)
import Data.Function (on)
import qualified Data.HashMap.Lazy as HM
import qualified Data.IntMap as IM
import Data.List (intercalate, sortBy)
import qualified Data.Map as M
import Data.Traversable
import Data.Vector.Storable ((!))
import qualified Numeric.LinearAlgebra as NL
import System.IO
-- other package of mine
import HEP.Parser.LHE.Type
import HEP.Util.Functions
-- this package
-- import HEP.Automation.EventChain.Print
import HEP.Automation.EventChain.Match
import HEP.Automation.EventChain.Type.Match
import HEP.Automation.EventChain.Type.Process
import HEP.Automation.EventChain.Type.Skeleton
import HEP.Automation.EventChain.Type.Spec
import HEP.Automation.EventChain.Util
-- prelude
import Prelude hiding (mapM,foldr)
import Debug.Trace
-- |
type Status = Int
-- |
getPDGID4ParticleID :: MatchedLHEventProcess p -> ParticleID -> Maybe PDGID
getPDGID4ParticleID ctxt ptl_id
| (not.null) filtered_in = Just . idup . snd . head $ filtered_in
| (not.null) filtered_out = Just . idup . snd . head $ filtered_out
| otherwise = Nothing
where idtuple = (,) <$> either id fst . fst <*> snd
lst_in = map idtuple . mlhev_incoming $ ctxt
lst_out = map idtuple . mlhev_outgoing $ ctxt
checkid = (== ptl_id) <$> fst
filtered_in = filter checkid lst_in
filtered_out = filter checkid lst_out
-- |
chainProcIdx :: (Monad m) =>
ProcessIndex
-> MatchedLHEventProcess p'
-> DecayID p
-> ErrorT String m ProcessIndex
chainProcIdx pidx mev dcy = do
let ptl_id = foremostParticleID dcy
case getPDGID4ParticleID mev ptl_id of
Nothing -> throwError "ParticleID not matched"
Just pdg_id -> return ((ptl_id,pdg_id):pidx)
-- |
matchFullDecay :: (Show p) =>
ContextEvent ProcessIndex -- ^ current context for mother
-> DecayID p
-> ProcessIndex
-> ErrorT String (State (ProcessMap [LHEvent])) (DecayFull ProcessIndex)
matchFullDecay ctxt (MkT _) ((iptl,ipdg):_) = return (MkT (iptl,ipdg))
matchFullDecay ctxt elem@(MkD dnode ds) pidx@((iptl,ipdg):_) = do
mevents <- HM.lookup pidx <$> lift get
case mevents of
Nothing -> throwError (show pidx ++ " process doesn't exist")
Just [] -> throwError (show pidx ++ " process has no more events")
Just (lhe:lhes) -> do
mev0 <- (ErrorT . return . runIdentity) (matchD ipdg elem lhe)
let mev = unMkMLHEPF . fmap (const pidx) . MkMLHEPF $ mev0
ptrip = findPTripletUsingPtlIDFrmOutPtls iptl momev
lxfrm = relLrntzXfrm ptrip momev
momprocid = (mlhev_procinfo.selfEvent) ctxt
dctxt = CEvent (olxfrm NL.<> lxfrm) (Just (momprocid,ptrip)) mev
modify (HM.adjust (const lhes) pidx)
mds <- mapM (\x->matchFullDecay dctxt x =<< chainProcIdx pidx mev x) ds
return (MkD ((iptl,ipdg),dctxt) mds)
where momev = selfEvent ctxt
olxfrm = absoluteContext ctxt
-- |
matchFullCross :: (Show p) =>
CrossID p
-> ErrorT String (State (ProcessMap [LHEvent]))
(CrossFull ProcessIndex)
matchFullCross g@(MkC _ (inc1,inc2) outs) = do
mevents <- HM.lookup [] <$> get
case mevents of
Nothing -> fail "root process doesn't exist"
Just [] -> fail "no more root events"
Just (lhe:lhes) -> do
mev0 <- (ErrorT . return . runIdentity) (matchX g lhe)
let mev = unMkMLHEPF . fmap (const []) . MkMLHEPF $ mev0
modify (HM.adjust (const lhes) [])
let xcontext = CEvent (NL.ident 4) Nothing mev
mi1 <- matchFullDecay xcontext inc1 =<< chainProcIdx [] mev inc1
mi2 <- matchFullDecay xcontext inc2 =<< chainProcIdx [] mev inc2
mos <- mapM (\x -> matchFullDecay xcontext x =<< chainProcIdx [] mev x) outs
return (MkC xcontext (mi1,mi2) mos)
-- |
adjustPtlInfosInMLHEvent :: ( PtlInfo -> PtlInfo
, (ParticleID,PtlInfo) -> PtlInfo)
-> MatchedLHEventProcess ProcessIndex
-> ParticleCoordMap
-> ([PtlInfo],[PtlInfo],[PtlInfo],ParticleCoordMap)
adjustPtlInfosInMLHEvent (f,g) mev mm = (map snd inc,map snd out,int,mm'')
where procid = mlhev_procinfo mev
inc = map stripping (mlhev_incoming mev)
out = map stripping (mlhev_outgoing mev)
int = map f (mlhev_intermediate mev)
insfunc x m = M.insert (procid,fst x) ((ptlid.snd) x) m
mm' = foldr insfunc mm inc
mm'' = foldr insfunc mm' out
stripping = ( (,) <$> fst <*> f . g )
. ( (,) <$> either id fst . fst <*> snd )
-- |
getAdjustFunc4IDMom :: LorentzRotation
-> PtlInfo
-> (ProcessIndex,PTriplet)
-> State (PtlID,Int,IM.IntMap PtlInfo,ParticleCoordMap) (PtlInfo -> PtlInfo,Int,IM.IntMap PtlInfo)
getAdjustFunc4IDMom lrot rpinfo (procid,PTriplet pid pcode opinfo) = do
(stid,stcol,rmap,stmm) <- get
let oid = idChange stid (ptlid rpinfo)
nid = maybe (error ("error in getAdjustFunc4IDMom: " ++ show (procid,pid) ++ "\n" ++ show stmm)) id (M.lookup (procid,pid) stmm)
opinfo2 = maybe (error "error in opinfo in getAdjustFun4IDMom") id (IM.lookup nid rmap)
rmap1 = IM.adjust unstabilize nid rmap
midadj = motherAdjustID (oid,nid)
(coloffset,colfunc) = colChangePair stcol (opinfo2,rpinfo)
idfunc = adjustIds (idChange stid) colfunc
return (adjustMom lrot.adjustSpin (opinfo,rpinfo).midadj.idfunc,coloffset,rmap1)
-- |
accumTotalEvent :: CrossFull ProcessIndex -> LHEvent
accumTotalEvent g =
let (_,_,result,_) = execState (traverse action . CrossF $ g)
(0,0, IM.empty :: IM.IntMap PtlInfo
, M.empty :: ParticleCoordMap )
result' = IM.elems result
sortedResult = sortBy (compare `on` ptlid) result'
evinfo = (mlhev_einfo . selfEvent . xnode) g
nptl = length sortedResult
nevinfo = evinfo { nup = nptl }
in LHEvent nevinfo sortedResult
where action cev = do
let (lrot,mmom,mev) = (absoluteContext cev, relativeContext cev, selfEvent cev)
pinfos = (getPInfos . mlhev_orig) mev
ptlids = map ptlid pinfos
icols = filter (/= 0) (concatMap ((\x -> [fst x, snd x]) . icolup ) pinfos)
maxid = maximum ptlids
maxicol = maximum icols
minicol = minimum icols
deltaicol = if null icols then 0 else maxicol - minicol
(stid,stcol,rmap,stmm) <- get
let rpinfo = (snd . head . mlhev_incoming ) mev
(change,coloffset,rmap1) <- maybe
(return (id,0,rmap))
(getAdjustFunc4IDMom lrot rpinfo)
mmom
let (ri,ro,rm,stmm') = adjustPtlInfosInMLHEvent (change,snd) mev stmm
kri = map ((,) <$> ptlid <*> id) ri
kro = map ((,) <$> ptlid <*> id) ro
krm = map ((,) <$> ptlid <*> id) rm
rmap2 = maybe (insertAll kri rmap1) (const rmap1) mmom
rmap3 = insertAll kro rmap2
rmap4 = insertAll krm rmap3
put ( stid+maxid-1
, stcol+deltaicol+1-coloffset
, rmap4
, stmm')
-- |
motherAdjustID :: (PtlID,PtlID) -> PtlInfo -> PtlInfo
motherAdjustID (oid,nid) = idAdj (\y -> if y == oid then nid else y)
|
wavewave/evchain
|
lib/HEP/Automation/EventChain/LHEConn.hs
|
gpl-3.0
| 8,674
| 0
| 21
| 2,634
| 2,646
| 1,415
| 1,231
| 158
| 3
|
module Language.Objection.CodeGen
where
import Control.Applicative
import Data.Int
import qualified Data.Map as M
import LLVM.Untyped.Core hiding (Module, Type)
import qualified LLVM.Untyped.Core as L (Module, Type)
import Language.Objection.SyntaxTree
type SymbolMap = M.Map String (Type, Value)
-- | Gets the corresponding LLVM type of a method. Currently doesn't include
-- the reference to the this object but that WILL be implemented later. Just
-- flat function.
-- typeMethod :: Method -> LLVM L.Type
-- typeMethod (Method _ _ retT params) = let (paramTypes, _) = unzip params
convertIntComparison :: ComparisonOperation -> IntComparison
convertIntComparison CEquals = IntEQ
convertIntComparison CGreater = IntSGT
convertIntComparison CGreaterEquals = IntSGE
convertIntComparison CLess = IntSLT
convertIntComparison CLessEquals = IntSLE
convertRealComparison :: ComparisonOperation -> RealComparison
convertRealCompariosn CEquals = RealOEQ
convertRealComparison CGreater = RealOGT
convertRealComparison CGreaterEquals = RealOGE
convertRealComparison CLess = RealOLT
convertRealComparison CLessEquals = RealOLE
convertType :: Type -> L.Type
convertType (PrimitiveType PrimitiveInt) = int32Type
convertTypes :: [Type] -> [L.Type]
convertTypes types = map convertType types
-- | Defines a function in LLVM
defineFunction :: L.Module -> String -> [Statement] -> LLVM ()
defineFunction m name sts = do ft <- functionType int32Type [] False
f <- addFunction m name ft
entryBlock <- appendBasicBlock f ""
builder <- createBuilder
positionAtEnd builder entryBlock
genStatements builder f M.empty sts
genStatements :: Builder
-> Value
-> SymbolMap
-> [Statement]
-> LLVM ()
genStatements builder fn symbolMap [st] = genStatement builder fn symbolMap st
>> return ()
genStatements builder fn symbolMap (st:sts) = do
symbolMap' <- genStatement builder fn symbolMap st
genStatements builder fn symbolMap' sts
-- | Generates code for a statement and then returns an updated symbol table
genStatement :: Builder
-> Value -- ^ Function, used to add basic blocks
-> SymbolMap -- ^ The current symbol table
-> Statement -- ^ Statement for which to generate code
-> LLVM SymbolMap -- ^ Updated symbol table
genStatement builder fn symbolMap (DeclareVariable t i) = do
val <- buildAlloca builder (convertType t) i
return $ M.insert i (t, val) symbolMap
genStatement builder fn symbolMap (IfStatement e trueSt falseSt) = do
trueBlock <- appendBasicBlock fn "true"
trueBuilder <- createBuilder
positionAtEnd trueBuilder trueBlock
falseBlock <- appendBasicBlock fn "false"
falseBuilder <- createBuilder
positionAtEnd falseBuilder falseBlock
doneBlock <- appendBasicBlock fn "doneif"
(_, exprResult) <- genExpression builder symbolMap e
buildCondBr builder exprResult trueBlock falseBlock
genStatement trueBuilder fn symbolMap trueSt
buildBr trueBuilder doneBlock
case falseSt of
Nothing -> return ()
Just falseSt' -> do genStatement falseBuilder fn symbolMap falseSt'
return ()
buildBr falseBuilder doneBlock
positionAtEnd builder doneBlock
return symbolMap
genStatement builder fn symbolMap (SetVariable i e) = do
(t, v) <- genExpression builder symbolMap e
let (t', symbol) = symbolMap M.! i
assertTypesEqual t t'
buildStore builder v symbol
return symbolMap
genStatement builder fn symbolMap (Return e) = do
(_, v) <- genExpression builder symbolMap e
buildRet builder v
return symbolMap
assertTypesEqual :: Type -> Type -> LLVM ()
assertTypesEqual t1 t2 = if t1 == t2
then return ()
else error $ "CodeGen: Type " ++ (show t1) ++
"does not match Type " ++ (show t2)
-- | I need to make it switch between buildICmp when I start allowing for
-- float comparison. Possible future error
genExpression :: Builder
-> SymbolMap
-> Expression
-> LLVM (Type, Value)
genExpression builder symbolMap = g
where g (ComparisonExpression op e1 e2) = do
(t, v1) <- genExpression builder symbolMap e1
(t', v2) <- genExpression builder symbolMap e2
assertTypesEqual t t'
val <- if isIntType t
then buildICmp builder (convertIntComparison op) v1 v2 ""
else if isRealType t
then buildFCmp builder (convertRealComparison op) v1 v2 ""
else error "Can't compare non-integral"
return (t, val)
g (MathOperationExpression op e1 e2) = do
(t, v1) <- genExpression builder symbolMap e1
(t', v2) <- genExpression builder symbolMap e2
assertTypesEqual t t'
val <- if isIntType t
then case op of
Add -> buildAdd builder v1 v2 ""
Subtract -> buildSub builder v1 v2 ""
Multiply -> buildMul builder v1 v2 ""
else case op of
Add -> buildFAdd builder v1 v2 ""
Subtract -> buildFSub builder v1 v2 ""
Multiply -> buildFMul builder v1 v2 ""
return (t, val)
g (GetVariableExpression i) = do
let (t, var) = symbolMap M.! i
val <- buildLoad builder var (i ++ "_")
return (t, val)
g (LiteralExpression l) = return $ genLiteral l
g (ParenExpression e) =
genExpression builder symbolMap e
-- | Convert an Objective Literal to an LLVM Const Value
genLiteral :: Literal -> (Type, Value)
genLiteral (LiteralInt i) =
(PrimitiveType PrimitiveInt, constInt int32Type (fromIntegral i) True)
genLiteral (LiteralLong l) =
(PrimitiveType PrimitiveLong, constInt int64Type (fromIntegral l) True)
genLiteral (LiteralFloat f) =
(PrimitiveType PrimitiveFloat, constReal floatType (realToFrac f))
genLiteral (LiteralDouble d) =
(PrimitiveType PrimitiveDouble, constReal doubleType d)
genLiteral (LiteralBool True) =
(PrimitiveType PrimitiveBool, constReal int1Type 1)
genLiteral (LiteralBool False) =
(PrimitiveType PrimitiveBool, constReal int1Type 0)
isIntType :: Type -> Bool
isIntType (PrimitiveType PrimitiveInt) = True
isIntType (PrimitiveType PrimitiveLong) = True
isIntType _ = False
isRealType :: Type -> Bool
isRealType (PrimitiveType PrimitiveFloat) = True
isRealType (PrimitiveType PrimitiveDouble) = True
isRealType _ = False
|
jhance/objection
|
Language/Objection/CodeGen.hs
|
gpl-3.0
| 7,067
| 0
| 14
| 2,130
| 1,768
| 875
| 893
| 140
| 12
|
-- grid is a game written in Haskell
-- Copyright (C) 2018 karamellpelle@hotmail.com
--
-- This file is part of grid.
--
-- grid is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- grid is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with grid. If not, see <http://www.gnu.org/licenses/>.
--
module MEnv.Env.PlayersObject.GLFW
(
PlayersInit (..),
PlayersObject (..),
withLoadedPlayers,
) where
import LoadM
data PlayersInit =
PlayersInit
data PlayersObject =
PlayersObject
--------------------------------------------------------------------------------
--
withLoadedPlayers :: PlayersInit -> (PlayersObject -> LoadM a) -> LoadM a
withLoadedPlayers init handler = do
handler PlayersObject
|
karamellpelle/grid
|
designer/source/MEnv/GLFW/PlayersObject.hs
|
gpl-3.0
| 1,194
| 0
| 9
| 224
| 107
| 70
| 37
| 13
| 1
|
module Carbon.Website.Paging where
import Carbon.Website.Common
pageItems :: OBW Response
pageItems = do
p <- getPaging
plusm (count p) $ do
l <- lookRead "limit"
o <- plusm (return 0) $ lookRead "offset"
is <- liftB $ PageItems p{limit = l, offset = o}
respOk $ responseJSON' is
where
count = respOk . responseJSON' <=< liftB . ItemCount
getPaging :: OBW Paging
getPaging = do
let bLook = plusm (return Nothing) . liftM return . lookRead
article <- bLook "isArticle"
deleted <- bLook "isDeleted"
discussion <- bLook "isDiscussion"
relation <- bLook "isRelation"
result <- bLook "isResult"
return Paging {
isArticle = article
, isDeleted = deleted
, isDiscussion = discussion
, isRelation = relation
, isResult = result
, limit = 0
, offset = 0
}
|
runjak/carbon-adf
|
Carbon/Website/Paging.hs
|
gpl-3.0
| 847
| 0
| 14
| 226
| 281
| 140
| 141
| -1
| -1
|
-- | The decision has been made to represent a BibTeX file as a list of entries, along with possibly some preamble.
module Common.BibTypes where
-- | a bibtex file is just a list of entries, with a list of preamble-strings
data BibTex = BibTex [String] [Entry]
deriving Show
-- | a bibtex entry has a type, a reference (it's name), and a list of fields
data Entry = Entry { entryType :: EntryType -- ^ book, thesis, etc.
, reference :: Reference -- ^ the name
, fields :: [Field] -- ^ a list of key/value pairs
}
deriving Show
-- | a field is an attibute/value pair
data Field = Field String String -- Name and contents of field
deriving Show
-- | returns the key part, given a Field
getKey :: Field -> String
getKey (Field k _) = k
-- | returns the value part, given a Field
getValue :: Field -> String
getValue (Field _ v) = v
-- | sometimes we want to get the key from a Maybe Field.
maybegetKey :: Maybe Field -> Maybe String
maybegetKey Nothing = Nothing
maybegetKey (Just (Field k _)) = Just k
-- | ...and sometimes we want to get the value from a Maybe Field.
maybegetValue :: Maybe Field -> Maybe String
maybegetValue Nothing = Nothing
maybegetValue (Just (Field _ v)) = Just v
-- | the entry type is characterised by a string, for example "book"
type EntryType = String
-- | the reference is also just a string, such as "pierce02"
type Reference = String
-- | the type of the bibtex algebra. Used to fold over a bibtex library,
-- like when we want to convert a BibTeX structure into HTML.
--
-- This seems the most natural way to define possible conversions from BibTex to
-- other (possibly tree-like) formats, such as Html later on.
type BibTexAlgebra bibtex preamble entry = ([preamble] -> [entry] -> bibtex,
String -> preamble,
EntryType -> Reference -> [Field] -> entry)
-- | How to fold over a BibTeX tree. Used when converting to Html in Bib2HTML.Tool.
foldBibTex :: BibTexAlgebra bibtex preamble entry -> BibTex -> bibtex
foldBibTex (bib, pa, entry) = fBibTex where
fBibTex (BibTex preamble lentries) = bib (map pa preamble) (map fEntry lentries)
fEntry (Entry spec ref attr) = entry spec ref attr
-- | We implement equality on entries. When their names are the same, we consider them equal.
instance Eq Entry where
(==) e1 e2 = reference e1 == reference e2
-- | Given a key, find the corresponding Field in a list of Fields. If it can't be found, Nothing is returned.
lookupField :: String -> [Field] -> Maybe Field
lookupField key [] = Nothing
lookupField key (f@(Field k v):fs) | key == k = Just f
| otherwise = lookupField key fs
instance Ord Field where
compare = compareF
-- | Our implementation of ordering on Fields. This is how we make sure
-- that author, then title, then the other fields, and finally year, are
-- displayed, regardless of how they are placed in the .bib file. This implementation
-- allows us to simply run `sort` on a list of Fields.
compareF :: Field -> Field -> Ordering
compareF (Field k1 v1) (Field k2 v2)
| k1 == "author" = LT
| k1 == "year" = GT
| k1 == "title"
&& k2 == "author" = GT
| k1 == "title" = LT
| otherwise = EQ
-- | Fields are considered equal when their keys are the same.
instance Eq Field where
(==) (Field k1 v1) (Field k2 v2) = k1 == k2
|
toothbrush/cco-bibtex2html
|
Common/BibTypes.hs
|
gpl-3.0
| 3,836
| 0
| 10
| 1,233
| 684
| 369
| 315
| 46
| 1
|
module Jumpie.Geometry.Intersection(
rectIntersects,
rectLineSegmentIntersects,
rectLineSegmentIntersection,
pointInsideRect,
lineSegmentIntersects,
lineSegmentIntersection,
lineSegmentInsideRect,
parabolaPointIntersects
) where
import Jumpie.Maybe(headOrNothing)
import Jumpie.Tuple(between)
import Data.Composition((.:))
import Jumpie.Geometry.LineSegment(LineSegment(..),pointList)
import Jumpie.Geometry.Parabola(Parabola,paraZenith,paraBounds)
import Jumpie.Geometry.Point
import Jumpie.Geometry.Rect(Rect,inside,lineSegments,rectTopLeft,rectBottomRight)
import ClassyPrelude
import Linear.Vector((*^))
import Linear.Metric(dot)
import Linear.V2(_x,_y)
import Control.Lens((^.),view)
lineSegmentIntersects :: (Num a,Fractional a,Ord a) => a -> LineSegment (Point2 a) -> LineSegment (Point2 a) -> Bool
lineSegmentIntersects delta l1 l2 = isJust $ lineSegmentIntersection delta l1 l2
-- Kleiner Hinweis: hier ist fast gar kein (Point2 a) explizit noetig, aber man brauch vmult, dot und cross.
-- Vielleicht kann man das in 'ne Typklasse auslagern?
lineSegmentIntersection :: (Num a,Fractional a,Ord a) => a -> LineSegment (Point2 a) -> LineSegment (Point2 a) -> Maybe (Point2 a)
lineSegmentIntersection delta (LineSegment p to1) (LineSegment q to2)
| collinear && overlapping = Just (p + (tnom/denom) *^ r)
| collinear && not overlapping = Nothing
| abs denom <= delta && abs unom > delta = Nothing
| abs denom > delta && isNormalized (tnom / denom) && isNormalized (unom / denom) = Just (p + (tnom/denom) *^ r)
| otherwise = Nothing
where r = to1 - p
s = to2 - q
denom = r `cross2` s
tnom = (q - p) `cross2` s
unom = (q - p) `cross2` r
isNormalized z = z >= 0 && z <= 1
collinear = abs denom <= delta && abs unom <= delta
overlapping = (0 <= ((q - p) `dot` r) && ((q - p) `dot` r) <= (r `dot` r)) || (0 <= (p - q) `dot` s && (p - q) `dot` s <= s `dot` s)
rectIntersects :: (Num a,Fractional a,Ord a) => a -> Rect (Point2 a) -> Rect (Point2 a) -> Bool
rectIntersects delta a b = a `inside` b || b `inside` a || or (isJust .: lineSegmentIntersection delta <$> (a ^. lineSegments) <*> (b ^. lineSegments))
pointInsideRect :: Ord a => Rect (Point2 a) -> Point2 a -> Bool
pointInsideRect r p = and $ zipWith betweenRE (zip (pointToList . (view rectTopLeft) $ r) (pointToList . (view rectBottomRight) $ r)) (pointToList p)
where betweenRE :: Ord a => (a,a) -> a -> Bool
betweenRE (left,right) a = a >= left && a < right
rectLineSegmentIntersection :: (Num a,Fractional a,Ord a) => a -> Rect (Point2 a) -> LineSegment (Point2 a) -> Maybe (Point2 a)
rectLineSegmentIntersection delta rect line = headOrNothing (pure (lineSegmentIntersection delta line) <*> (rect ^. lineSegments))
-- Vorsicht: das ist nicht direkt rectLineSegmentIntersection.
-- Diese Funktion enthaelt auch den Fall, dass die Linie
-- komplett im Rect ist.
rectLineSegmentIntersects :: (Num a,Fractional a,Ord a) => a -> Rect (Point2 a) -> LineSegment (Point2 a) -> Bool
rectLineSegmentIntersects delta rect line = or (pure (lineSegmentIntersects delta line) <*> (rect ^. lineSegments)) || lineSegmentInsideRect line rect
lineSegmentInsideRect :: Ord a => LineSegment (Point2 a) -> Rect (Point2 a) -> Bool
lineSegmentInsideRect line r = and (map (pointInsideRect r) (pointList line))
parabolaPointIntersects :: (Ord a,Floating a) => Parabola a -> Point2 a -> Bool
parabolaPointIntersects para p = (p ^. _y) < paraZenith para && (p ^. _x) `between` (paraBounds para (p ^. _y))
|
pmiddend/jumpie
|
lib/Jumpie/Geometry/Intersection.hs
|
gpl-3.0
| 3,546
| 0
| 16
| 621
| 1,380
| 735
| 645
| 52
| 1
|
-- Author: Viacheslav Lotsmanov
-- License: GPLv3 https://raw.githubusercontent.com/unclechu/xmonadrc/master/LICENSE
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
{-# LANGUAGE PackageImports #-}
module Main (main) where
import "xmonad" XMonad (xmonad, logHook, (<+>))
import "xmonad-contrib" XMonad.Util.Run (spawnPipe)
import "xmonad-contrib" XMonad.Util.EZConfig (additionalKeys, additionalKeysP)
import "xmonad-contrib" XMonad.Hooks.EwmhDesktops (ewmh)
import qualified "xmonad-contrib" XMonad.Hooks.DynamicLog as DL
import "base" System.IO (hPutStrLn)
import "data-default" Data.Default (def)
import Workspaces (myWorkspaces)
import Config (myConfig)
import Keys (myKeys, myEZKeys)
import Utils (xmobarEscape)
import Utils.IPC (initIPC, deinitIPC)
import Utils.CustomConfig (getCustomConfig)
main :: IO ()
main = do
customConfig <- getCustomConfig
ipc <- initIPC
let conf = myConfig customConfig
keys = myKeys ipc myWorkspaces customConfig
ezKeys = myEZKeys ipc myWorkspaces customConfig
xmproc <- spawnPipe "xmobar ~/.xmonad/xmobar.generated.hs"
xmonad $ ewmh $ conf { logHook = xmobarLogHook xmproc <+> logHook conf
} `additionalKeys` keys
`additionalKeysP` ezKeys
deinitIPC ipc
where layoutNameHandler :: String -> String
layoutNameHandler x = wrap $ xmobarEscape $
case x of
"ResizableTall" -> "[>]"
"Mirror ResizableTall" -> "[v]"
"Grid" -> "[+]"
"Spiral" -> "[0]"
"Tabbed Simplest" -> "[t]"
"Cross" -> "[x]"
"Circle" -> "[o]"
"ThreeCol" -> "[3]"
"SimplestFloat" -> "[f]"
"Full" -> "[ ]"
_ -> x
where wrap t = "<action=xdotool key super+space>" ++ t ++ "</action>"
xmobarLogHook xmproc =
DL.dynamicLogWithPP $ def
{ DL.ppOutput = hPutStrLn xmproc
, DL.ppTitle = DL.xmobarColor "gray" "#444" . DL.wrap " " " "
, DL.ppCurrent = DL.xmobarColor "green" "" . DL.wrap "[" "]"
, DL.ppSep = " "
, DL.ppWsSep = " "
, DL.ppLayout = DL.xmobarColor "yellow" "" . layoutNameHandler
, DL.ppHiddenNoWindows = id
}
|
unclechu/xmonadrc
|
xmonad/src/Main.hs
|
gpl-3.0
| 2,465
| 0
| 13
| 801
| 518
| 288
| 230
| 52
| 11
|
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
-- | A modifier that combines the result of several modifiers.
module Data.VPlan.Modifier.Combine (
Combine (..)
, combine
) where
import Control.Applicative
import Control.Lens hiding ((.=))
import Data.Aeson
import Data.Aeson.Types
import Data.Data
import Data.Foldable (Foldable (..), foldl')
import Data.Monoid
import qualified Data.VPlan.At as A
import Data.VPlan.Class
import Data.VPlan.TH
import Data.VPlan.Util
import GHC.Generics
-- | Combine multiple modifiers into one. Values are traversed in the order of the modifers, i.e. the
-- values of the first modifier are traversed first.
newtype Combine s c i v = Combine [s c i v] deriving (Eq, Generic)
makeIso ''Combine
makeModifier ''Combine
deriving instance Show (s c i v) => Show (Combine s c i v)
deriving instance Read (s c i v) => Read (Combine s c i v)
deriving instance (Data (s c i v), Typeable3 s, Typeable c, Typeable i, Typeable v) => Data (Combine s c i v)
instance Bifunctor (s c) => Bifunctor (Combine s c) where bimap f g (Combine a) = Combine $ fmap (bimap f g) a
instance Functor (s c i) => Functor (Combine s c i) where fmap f (Combine a) = Combine (fmap (fmap f) a)
instance Profunctor (s c) => Profunctor (Combine s c) where dimap l r (Combine a) = Combine (fmap (dimap l r) a)
instance Contravariant (s c i) => Contravariant (Combine s c i) where contramap f (Combine l) = Combine $ map (contramap f) l
instance Foldable (s c i) => Foldable (Combine s c i) where foldMap f (Combine a) = foldMap (foldMap f) a
instance Traversable (s c i) => Traversable (Combine s c i) where traverse f (Combine a) = Combine <$> traverse (traverse f) a
instance (Limited (s c i v), Ord (Index (s c i v))) => Limited (Combine s c i v) where
imax (Combine as) = maximum <$> traverse imax as
imin (Combine as) = minimum <$> traverse imin as
instance (Enum (Index (s c i v)), Monoid (Index (s c i v)), Ord (Index (s c i v)), Periodic (s c i v)) => Periodic (Combine s c i v) where
interval (Combine []) = succ mempty
interval (Combine (a:as)) = foldl' glcm (interval a) $ map interval as
instance (Gettable f, A.Contains (Accessor Bool) (s c i v)) => A.Contains f (Combine s c i v) where
contains = containsTest $ \i (Combine a) -> any (view $ A.contains i) a
instance (A.Ixed f (s c i v), Applicative f) => A.Ixed f (Combine s c i v) where
ix i f (Combine a) = Combine <$> traverse (A.ix i f) a
instance FromJSON (s c i v) => FromJSON (Combine s c i v) where
parseJSON (Object o) = Combine <$> o .: "childs"
parseJSON v = typeMismatch "Object" v
instance ToJSON (s c i v) => ToJSON (Combine s c i v) where
toJSON (Combine a) = object [ "childs" .= a ]
|
bennofs/vplan
|
src/Data/VPlan/Modifier/Combine.hs
|
gpl-3.0
| 3,274
| 0
| 12
| 807
| 1,285
| 661
| 624
| 53
| 0
|
-- |Simple Socket IO for UNIX domain sockets without massive
-- dependencies like conduits.
module SimpleSockets (foreverAccept) where
import Control.Concurrent (ThreadId, forkIO)
import Control.Exception.Base (SomeException, catch, finally, bracket)
import Control.Monad (forever)
import Network.Socket
import System.IO
import System.Posix (removeLink)
-- |Accepts incoming connections, only one at a time. Forks thread for
-- the handler.
foreverAccept :: (Handle -> IO ()) -> String -> IO ThreadId
foreverAccept act unix = do
s <- open
forkIO $ finally (loop s) (cleanup s)
where
open = do
sock <- socket AF_UNIX Stream defaultProtocol
bindSocket sock $ SockAddrUnix unix
-- One pending connection is enough because it help us to detect
-- misbehaviour in data sources.
listen sock 1
return sock
loop sock = forever $ accept sock >>= withHandle (safe . act)
cleanup s = do
close s
removeLink unix
-- |Makes a handle out of a socket for easier IO. This takes care of
-- closing the handle after action is finished.
withHandle :: (Handle -> IO ()) -> (Socket, SockAddr) -> IO ()
withHandle act (s,_) = bracket (socketToHandle s ReadWriteMode) hClose act
-- |Run IO action and just log the exception and continue as nothing happened
safe :: IO () -> IO ()
safe act = catch act eh
where
eh :: SomeException -> IO ()
eh e = putStrLn $ "Handler died: " ++ show e
|
koodilehto/kryptoradio
|
encoder/SimpleSockets.hs
|
agpl-3.0
| 1,440
| 0
| 10
| 306
| 381
| 196
| 185
| 26
| 1
|
{-
Copyright (C) 2007 John Goerzen <jgoerzen@complete.org>
All rights reserved.
For license and copyright information, see the file COPYRIGHT
-}
-- FIXME -- better code is in offlineimap v7 branch
module TestInfrastructure where
import Test.QuickCheck
import Test.QuickCheck.Batch
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as BSL
import qualified Data.ListLike as LL
import qualified Data.Map as Map
import qualified Data.Array as A
import qualified Data.Foldable as F
import System.Random
import System.IO
import qualified Test.HUnit as HU
import Text.Printf
import Data.Word
import Data.List
import Data.Monoid
{-
#if defined __HUGS__
-}
instance (Arbitrary a) => Arbitrary (Maybe a) where
arbitrary = sized arbMaybe
where
arbMaybe 0 = return Nothing
arbMaybe n = fmap Just (resize (n-1) arbitrary)
coarbitrary Nothing = variant 0
coarbitrary (Just x) = variant 1 . coarbitrary x
{-
#endif
-}
(@=?) :: (Eq a, Show a) => a -> a -> Result
expected @=? actual =
Result {ok = Just (expected == actual),
arguments = ["Result: expected " ++ show expected ++ ", got " ++ show actual],
stamp = []}
(@?=) :: (Eq a, Show a) => a -> a -> Result
(@?=) = flip (@=?)
instance (LL.ListLike f i, Arbitrary i) => Arbitrary f where
arbitrary = sized (\n -> choose (0, n) >>= myVector)
where myVector n =
do arblist <- vector n
return (LL.fromList arblist)
coarbitrary l = coarbitrary (LL.toList l)
class (Show b, Arbitrary a, Show a, Eq a, Eq b, LL.ListLike a b) => TestLL a b where
-- | Compare a ListLike to a list using any local conversions needed
llcmp :: a -> [b] -> Result
llcmp f l = l @=? (LL.toList f)
-- | Check the lenghts of the two items. True if they should be considered
-- to match.
checkLengths :: a -> [b] -> Bool
checkLengths f l = (LL.length f) == length l
instance (Arbitrary a, Show a, Eq a) => TestLL [a] a where
llcmp x y = y @=? x
instance (Arbitrary a, Show a, Eq a) => TestLL (MyList a) a where
llcmp (MyList x) l = l @=? x
instance TestLL BS.ByteString Word8 where
instance TestLL BSL.ByteString Word8 where
instance (Arbitrary a, Show a, Eq a) => TestLL (A.Array Int a) a where
instance (Show k, Show v, Arbitrary k, Arbitrary v, Ord v, Ord k) => TestLL (Map.Map k v) (k, v) where
llcmp m l =
if mycmp (Map.toList m) && mychk l
then l @=? l -- True
else l @=? (Map.toList m) -- False
where mycmp [] = True
mycmp (x:xs) = if elem x l
then mycmp xs
else False
mychk [] = True
mychk ((k, _):xs) = if Map.member k m then mychk xs else False
-- FIXME: should find a way to use LL.length instead of Map.size here
checkLengths m l = Map.size m == length (mapRemoveDups l)
mapRemoveDups :: (Eq k1) => [(k1, v1)] -> [(k1, v1)]
mapRemoveDups = nubBy (\(k1, _) (k2, _) -> k1 == k2)
data MyList a = MyList [a]
deriving (Ord, Eq, Show)
instance LL.FoldableLL (MyList a) a where
foldr f i (MyList x) = foldr f i x
foldl f i (MyList x) = foldl f i x
foldr1 f (MyList x) = foldr1 f x
foldl1 f (MyList x) = foldl1 f x
instance Monoid (MyList a) where
mempty = MyList []
mappend (MyList x) (MyList y) = MyList (x ++ y)
instance LL.ListLike (MyList a) a where
singleton x = MyList [x]
head (MyList x) = head x
tail (MyList x) = MyList (tail x)
null (MyList x) = null x
instance LL.StringLike (MyList Char) where
toString (MyList x) = x
fromString x = MyList x
instance Arbitrary Word8 where
arbitrary = sized $ \n -> choose (0, min (fromIntegral n) maxBound)
coarbitrary n = variant (if n >= 0 then 2 * x else 2 * x + 1)
where x = abs . fromIntegral $ n
instance Arbitrary Char where
arbitrary = sized $ \n -> choose (toEnum 0, min (toEnum n) maxBound)
coarbitrary n = variant (if (fromEnum n) >= 0 then toEnum (2 * x) else toEnum (2 * x + 1))
where (x::Int) = abs . fromEnum $ n
instance Random Word8 where
randomR (a, b) g = (\(x, y) -> (fromInteger x, y)) $
randomR (toInteger a, toInteger b) g
random g = randomR (minBound, maxBound) g
testoptions = defOpt {length_of_tests = 0, debug_tests = False}
mkTest msg test = HU.TestLabel msg $ HU.TestCase $ (run test testoptions >>= checResult)
where checResult (TestOk x y z) = printmsg x y >> return ()
checResult (TestExausted x y z) =
do hPrintf stderr "\r%-78s\n" $
"Warning: Arguments exhausted after " ++ show y ++ " cases."
return ()
checResult (TestFailed x y) = HU.assertFailure $
"Test Failure\n" ++
"Arguments: " ++
(concat . intersperse "\n " $ x) ++
"\nTest No.: " ++ show y
checResult (TestAborted x) = HU.assertFailure (show x)
printmsg x y
| False = hPrintf stderr "\r%-78s\r"
(msg ++ " " ++ x ++ " (" ++ show y ++ " cases)")
| otherwise = return ()
-- Modified from HUnit
runVerbTestText :: HU.PutText st -> HU.Test -> IO (HU.Counts, st)
runVerbTestText (HU.PutText put us) t = do
(counts, us') <- HU.performTest reportStart reportError reportFailure us t
us'' <- put (HU.showCounts counts) True us'
return (counts, us'')
where
reportStart ss us = do hPrintf stderr "\rTesting %-68s\n" (HU.showPath (HU.path ss))
put (HU.showCounts (HU.counts ss)) False us
reportError = reportProblem "Error:" "Error in: "
reportFailure = reportProblem "Failure:" "Failure in: "
reportProblem p0 p1 msg ss us = put line True us
where line = "### " ++ kind ++ path' ++ '\n' : msg
kind = if null path' then p0 else p1
path' = HU.showPath (HU.path ss)
-- | So we can test map and friends
instance Show (a -> b) where
show _ = "(a -> b)"
data (LL.ListLike f i, Arbitrary f, Arbitrary i, Show f, Show i, Eq i, Eq f) => LLTest f i =
forall t. Testable t => LLTest (f -> t)
data (LL.ListLike f i, Arbitrary f, Arbitrary i, Show f, Show i, Eq i, Eq f, LL.ListLike f' f, TestLL f' f, Show f', Eq f', Arbitrary f') =>
LLWrap f' f i =
forall t. Testable t => LLWrap (f' -> t)
w :: TestLL f i => String -> LLTest f i -> HU.Test
w msg f = case f of
LLTest theTest -> mkTest msg theTest
ws :: (LL.StringLike f, TestLL f i) => String -> LLTest f i -> HU.Test
ws = w
wwrap :: (TestLL f i, TestLL f' f) => String -> LLWrap f' f i -> HU.Test
wwrap msg f = case f of
LLWrap theTest -> mkTest msg theTest
t :: forall f t i. (TestLL f i, Arbitrary f, Arbitrary i, Show f, Eq f, Testable t) => (f -> t) -> LLTest f i
t = LLTest
-- | all props, wrapped list
apw :: String -> (forall f' f i. (TestLL f i, Show i, Eq i, LL.ListLike f i, Eq f, Show f, Arbitrary f, Arbitrary i, LL.ListLike f' f, Show f', TestLL f' f, Arbitrary f', Eq f') => LLWrap f' f i) -> HU.Test
apw msg x = HU.TestLabel msg $ HU.TestList $
[wwrap "wrap [[Int]]" (x::LLWrap [[Int]] [Int] Int),
wwrap "wrap MyList (MyList Int)" (x::LLWrap (MyList (MyList Int)) (MyList Int) Int),
wwrap "wrap Array (Array Int)" (x::LLWrap (A.Array Int (A.Array Int Int)) (A.Array Int Int) Int),
wwrap "wrap Array [Int]" (x::LLWrap (A.Array Int [Int]) [Int] Int)
]
-- | all props, 1 args: full
apf :: String -> (forall f i. (Ord i, TestLL f i, Show i, Eq i, LL.ListLike f i, Eq f, Show f, Arbitrary f, Arbitrary i) => LLTest f i) -> HU.Test
apf msg x = HU.TestLabel msg $ HU.TestList $
[w "[Int]" (x::LLTest [Int] Int),
w "MyList Int" (x::LLTest (MyList Int) Int),
w "String" (x::LLTest String Char),
w "[Bool]" (x::LLTest [Bool] Bool),
w "MyList Bool" (x::LLTest (MyList Bool) Bool),
w "Map Int Int" (x::LLTest (Map.Map Int Int) (Int, Int)),
w "Map Bool Int" (x::LLTest (Map.Map Bool Int) (Bool, Int)),
w "Map Int Bool" (x::LLTest (Map.Map Int Bool) (Int, Bool)),
w "Map Bool Bool" (x::LLTest (Map.Map Bool Bool) (Bool, Bool)),
w "ByteString" (x::LLTest BS.ByteString Word8),
w "ByteString.Lazy" (x::LLTest BSL.ByteString Word8),
w "Array Int Int" (x::LLTest (A.Array Int Int) Int),
w "Array Int Bool" (x::LLTest (A.Array Int Bool) Bool),
w "[[Int]]" (x::LLTest [[Int]] [Int]),
w "MyList (MyList Int)" (x::LLTest (MyList (MyList Int)) (MyList Int)),
w "[MyList Int]" (x::LLTest [MyList Int] (MyList Int)),
w "Array [Int]" (x::LLTest (A.Array Int [Int]) [Int]),
w "Array (Array Int)" (x::LLTest (A.Array Int (A.Array Int Int)) (A.Array Int Int)),
w "Array (Just Int)" (x::LLTest (A.Array Int (Maybe Int)) (Maybe Int))
]
-- | all props, 1 args: full
aps :: String -> (forall f i. (Ord i, TestLL f i, Show i, Eq i, LL.StringLike f, LL.ListLike f i, Eq f, Show f, Arbitrary f, Arbitrary i) => LLTest f i) -> HU.Test
aps msg x = HU.TestLabel msg $ HU.TestList $
[w "String" (x::LLTest String Char),
w "MyList Char" (x::LLTest (MyList Char) Char),
w "ByteString" (x::LLTest BS.ByteString Word8),
w "ByteString.Lazy" (x::LLTest BSL.ByteString Word8),
w "Array Int Char" (x::LLTest (A.Array Int Char) Char)
]
|
jgoerzen/listlike
|
testsrc/TestInfrastructure.hs
|
lgpl-2.1
| 9,439
| 0
| 14
| 2,580
| 3,966
| 2,066
| 1,900
| -1
| -1
|
{-# LANGUAGE OverloadedStrings #-}
module Scotty where
import Web.Scotty
import Control.Monad.IO.Class
main :: IO ()
main = scotty 3000 $ do
get "/:word" $ do
beam <- param "word"
liftIO (putStrLn "hello")
html $ mconcat ["<h1>Scotty, ", beam, " me up!<h1>"]
|
dmvianna/haskellbook
|
src/Ch26-Scotty.hs
|
unlicense
| 278
| 0
| 13
| 60
| 90
| 46
| 44
| 10
| 1
|
-- Copyright 2017 Google Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
{-# LANGUAGE CPP #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-
This module provides the wrapper executable function (wrapperMain) but also
exposes the flag parsing logic so it can be reused by the plugin.
-}
module Language.Haskell.Indexer.Args where
import Data.Bits ((.|.), (.&.), shiftR)
import Data.Bool (bool)
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as BL
import qualified Data.ByteString.Builder as Builder
import Data.Maybe (fromMaybe)
#if !MIN_VERSION_base(4,11,0)
import Data.Monoid ((<>))
#endif
import Data.ProtoLens (encodeMessage)
import Data.Text (Text)
import qualified Data.Text as T
import Control.Concurrent.MVar (newMVar)
import Control.Exception (throwIO, ErrorCall(..))
import Options.Applicative
import System.Environment (getArgs, withArgs)
import Language.Haskell.Indexer.Backend.AnalysisOptions (AnalysisOptions(..))
import Language.Haskell.Indexer.Backend.GhcArgs
import qualified Language.Kythe.Schema.Raw as Raw
import qualified Language.Kythe.Schema.Raw.Proto as Raw
import Language.Haskell.Indexer.Pipeline.GhcKythe (ghcToKythe, pluginContinuation)
import Language.Haskell.Indexer.Util.Path (asTextPath, stripTmpPrefix)
import Language.Haskell.Indexer.Translate
import DynFlags (defaultFatalMessager, defaultFlushOut)
import GHC (defaultErrorHandler)
import GHC.IO.Handle
-- | Command-line flags to control the indexer behavior.
data Flags = Flags
{ flagCorpus :: !Text
, flagMainPackageRename :: !(Maybe Text)
, flagStripPathPrefix :: !(Maybe Text)
, flagPrependPathPrefix :: !(Maybe Text)
, flagKeepTempPathPrefix :: !Bool
, flagOverridePgmP :: !(Maybe FilePath)
, flagOverrideLibdir :: !(Maybe FilePath)
-- Path to a directory where to place the output files when running
-- in plugin mode.
, flagOutput :: !(Maybe FilePath)
}
wrapperMain :: IO ()
wrapperMain = do
(wrapperArgs, rest) <- break (== "--") <$> getArgs
case rest of
_:ghcArgs -> withArgs wrapperArgs (execParser opts) >>= index ghcArgs
_ -> throwIO (ErrorCall "No -- found in args.")
where
opts = info (helper <*> flagParser)
$ header (
"ghc_wrapper - pretends to be GHC and writes Kythe artifacts. "
++ "Options after the first -- are passed on to GHC.")
<> fullDesc
kythePlugin :: Handle -> (AnalysisOptions -> IO XRef) -> Flags -> IO ()
kythePlugin h f flags =
indexX (pluginContinuation f h) [] flags
wrapperParser :: [String] -> IO Flags
wrapperParser opts = withArgs opts (execParser (info flagParser fullDesc))
flagParser :: Parser Flags
flagParser = Flags
<$> (T.pack <$> strOption
( long "corpus"
<> short 'c'
<> help "The name of the Kythe corpus being indexed."
<> value ""
<> showDefault))
<*> optional (T.pack <$> strOption
( long "rename_main"
<> short 'm'
<> metavar "PACKAGE"
<> help ("Changes the 'main' package name when emitting entries. "
++ "Useful when indexing multiple binaries.")))
<*> optional (T.pack <$> strOption
( long "drop_path_prefix"
<> metavar "PREFIX"
<> help "Strip the given prefix from emitted filepaths."))
<*> optional (T.pack <$> strOption
( long "prepend_path_prefix"
<> metavar "PREFIX"
<> help "Prepends prefix to emitted filepaths (after stripping, if any)."))
<*> switch
( long "keep_path_tmp_prefix"
<> help ("If set, won't apply the default removal of temporary "
++ "dirs from emitted path prefixes."))
<*> optional (strOption
( long "pgmP_binary"
<> short 'P'
<> metavar "PATH"
<> help ("Overrides the preprocessor binary, but keeps it's "
++ "options. Note: other tools can still be overriden "
++ "by passing the regular -pgmX GHC options.")))
<*> optional (strOption
( long "libdir"
<> short 'B'
<> metavar "PATH"
<> help ("Overrides the GHC libdir.")))
<*> optional (strOption
( long "output"
<> short 'o'
<> metavar "INDEX_OUT_DIR"
<> help ("The directory to write the indices to in plugin mode. "
++ "Normal mode emits entry stream to stdout.")))
index :: [String] -> Flags -> IO ()
index args fs = do
lock <- newMVar ()
withErrorHandler $ indexX (ghcToKythe lock) args fs
where
withErrorHandler :: IO a -> IO a
withErrorHandler = defaultErrorHandler defaultFatalMessager defaultFlushOut
indexX
:: (GhcArgs -> AnalysisOptions -> Raw.VName
-> (Handle -> [Raw.Entry] -> IO ()) -> IO ())
-> [String] -> Flags -> IO ()
indexX k args Flags{..} = do
let ghcArgs = GhcArgs
{ gaArgs = args
, gaToolOverride = ToolOverride
{ overridePgmP = flagOverridePgmP
}
, gaLibdirOverride = OverrideLibdir <$> flagOverrideLibdir
}
analysisOptions = AnalysisOptions
{ aoMainPkgFallback = fromMaybe "main" flagMainPackageRename
, aoDemanglePackageName = id
, aoFilePathTransform
= customPrepend
. customStrip
. bool (asTextPath stripTmpPrefix) id flagKeepTempPathPrefix
}
where customStrip p = fromMaybe p $ do
prefix <- flagStripPathPrefix
T.stripPrefix prefix p
customPrepend p = fromMaybe p . fmap (<> p)
$ flagPrependPathPrefix
baseVName = Raw.VName "" flagCorpus "" "" "haskell"
k ghcArgs analysisOptions baseVName collect
where
collect handle = mapM_ (\m -> do
let wire = encodeMessage . Raw.toEntryProto $ m
B.hPutStr handle . BL.toStrict . Builder.toLazyByteString
. varInt . B.length $ wire
B.hPutStr handle wire)
-- | From proto-lens.
varInt :: Int -> Builder.Builder
varInt n
| n < 128 = Builder.word8 (fromIntegral n)
| otherwise = Builder.word8 (fromIntegral $ n .&. 127 .|. 128)
<> varInt (n `shiftR` 7)
|
google/haskell-indexer
|
haskell-indexer-pipeline-ghckythe-wrapper/src/Language/Haskell/Indexer/Args.hs
|
apache-2.0
| 7,010
| 0
| 21
| 1,954
| 1,511
| 810
| 701
| 153
| 2
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
Pattern-matching literal patterns
-}
{-# LANGUAGE CPP, ScopedTypeVariables #-}
module MatchLit ( dsLit, dsOverLit, hsLitKey, hsOverLitKey
, tidyLitPat, tidyNPat
, matchLiterals, matchNPlusKPats, matchNPats
, warnAboutIdentities, warnAboutEmptyEnumerations
) where
#include "HsVersions.h"
import {-# SOURCE #-} Match ( match )
import {-# SOURCE #-} DsExpr ( dsExpr )
import DsMonad
import DsUtils
import HsSyn
import Id
import CoreSyn
import MkCore
import TyCon
import DataCon
import TcHsSyn ( shortCutLit )
import TcType
import Name
import Type
import PrelNames
import TysWiredIn
import Literal
import SrcLoc
import Data.Ratio
import Outputable
import BasicTypes
import DynFlags
import Util
import FastString
import qualified GHC.LanguageExtensions as LangExt
import Control.Monad
import Data.Int
import Data.Word
{-
************************************************************************
* *
Desugaring literals
[used to be in DsExpr, but DsMeta needs it,
and it's nice to avoid a loop]
* *
************************************************************************
We give int/float literals type @Integer@ and @Rational@, respectively.
The typechecker will (presumably) have put \tr{from{Integer,Rational}s}
around them.
ToDo: put in range checks for when converting ``@i@''
(or should that be in the typechecker?)
For numeric literals, we try to detect there use at a standard type
(@Int@, @Float@, etc.) are directly put in the right constructor.
[NB: down with the @App@ conversion.]
See also below where we look for @DictApps@ for \tr{plusInt}, etc.
-}
dsLit :: HsLit -> DsM CoreExpr
dsLit (HsStringPrim _ s) = return (Lit (MachStr s))
dsLit (HsCharPrim _ c) = return (Lit (MachChar c))
dsLit (HsIntPrim _ i) = return (Lit (MachInt i))
dsLit (HsWordPrim _ w) = return (Lit (MachWord w))
dsLit (HsInt64Prim _ i) = return (Lit (MachInt64 i))
dsLit (HsWord64Prim _ w) = return (Lit (MachWord64 w))
dsLit (HsFloatPrim f) = return (Lit (MachFloat (fl_value f)))
dsLit (HsDoublePrim d) = return (Lit (MachDouble (fl_value d)))
dsLit (HsChar _ c) = return (mkCharExpr c)
dsLit (HsString _ str) = mkStringExprFS str
dsLit (HsInteger _ i _) = mkIntegerExpr i
dsLit (HsInt _ i) = do dflags <- getDynFlags
return (mkIntExpr dflags i)
dsLit (HsRat r ty) = do
num <- mkIntegerExpr (numerator (fl_value r))
denom <- mkIntegerExpr (denominator (fl_value r))
return (mkCoreConApps ratio_data_con [Type integer_ty, num, denom])
where
(ratio_data_con, integer_ty)
= case tcSplitTyConApp ty of
(tycon, [i_ty]) -> ASSERT(isIntegerTy i_ty && tycon `hasKey` ratioTyConKey)
(head (tyConDataCons tycon), i_ty)
x -> pprPanic "dsLit" (ppr x)
dsOverLit :: HsOverLit Id -> DsM CoreExpr
dsOverLit lit = do { dflags <- getDynFlags
; warnAboutOverflowedLiterals dflags lit
; dsOverLit' dflags lit }
dsOverLit' :: DynFlags -> HsOverLit Id -> DsM CoreExpr
-- Post-typechecker, the SyntaxExpr field of an OverLit contains
-- (an expression for) the literal value itself
dsOverLit' dflags (OverLit { ol_val = val, ol_rebindable = rebindable
, ol_witness = witness, ol_type = ty })
| not rebindable
, Just expr <- shortCutLit dflags val ty = dsExpr expr -- Note [Literal short cut]
| otherwise = dsExpr witness
{-
Note [Literal short cut]
~~~~~~~~~~~~~~~~~~~~~~~~
The type checker tries to do this short-cutting as early as possible, but
because of unification etc, more information is available to the desugarer.
And where it's possible to generate the correct literal right away, it's
much better to do so.
************************************************************************
* *
Warnings about overflowed literals
* *
************************************************************************
Warn about functions like toInteger, fromIntegral, that convert
between one type and another when the to- and from- types are the
same. Then it's probably (albeit not definitely) the identity
-}
warnAboutIdentities :: DynFlags -> CoreExpr -> Type -> DsM ()
warnAboutIdentities dflags (Var conv_fn) type_of_conv
| wopt Opt_WarnIdentities dflags
, idName conv_fn `elem` conversionNames
, Just (arg_ty, res_ty) <- splitFunTy_maybe type_of_conv
, arg_ty `eqType` res_ty -- So we are converting ty -> ty
= warnDs (vcat [ text "Call of" <+> ppr conv_fn <+> dcolon <+> ppr type_of_conv
, nest 2 $ text "can probably be omitted"
, parens (text "Use -fno-warn-identities to suppress this message")
])
warnAboutIdentities _ _ _ = return ()
conversionNames :: [Name]
conversionNames
= [ toIntegerName, toRationalName
, fromIntegralName, realToFracName ]
-- We can't easily add fromIntegerName, fromRationalName,
-- because they are generated by literals
warnAboutOverflowedLiterals :: DynFlags -> HsOverLit Id -> DsM ()
warnAboutOverflowedLiterals dflags lit
| wopt Opt_WarnOverflowedLiterals dflags
, Just (i, tc) <- getIntegralLit lit
= if tc == intTyConName then check i tc (undefined :: Int)
else if tc == int8TyConName then check i tc (undefined :: Int8)
else if tc == int16TyConName then check i tc (undefined :: Int16)
else if tc == int32TyConName then check i tc (undefined :: Int32)
else if tc == int64TyConName then check i tc (undefined :: Int64)
else if tc == wordTyConName then check i tc (undefined :: Word)
else if tc == word8TyConName then check i tc (undefined :: Word8)
else if tc == word16TyConName then check i tc (undefined :: Word16)
else if tc == word32TyConName then check i tc (undefined :: Word32)
else if tc == word64TyConName then check i tc (undefined :: Word64)
else return ()
| otherwise = return ()
where
check :: forall a. (Bounded a, Integral a) => Integer -> Name -> a -> DsM ()
check i tc _proxy
= when (i < minB || i > maxB) $ do
warnDs (vcat [ text "Literal" <+> integer i
<+> text "is out of the" <+> ppr tc <+> ptext (sLit "range")
<+> integer minB <> text ".." <> integer maxB
, sug ])
where
minB = toInteger (minBound :: a)
maxB = toInteger (maxBound :: a)
sug | minB == -i -- Note [Suggest NegativeLiterals]
, i > 0
, not (xopt LangExt.NegativeLiterals dflags)
= text "If you are trying to write a large negative literal, use NegativeLiterals"
| otherwise = Outputable.empty
{-
Note [Suggest NegativeLiterals]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you write
x :: Int8
x = -128
it'll parse as (negate 128), and overflow. In this case, suggest NegativeLiterals.
We get an erroneous suggestion for
x = 128
but perhaps that does not matter too much.
-}
warnAboutEmptyEnumerations :: DynFlags -> LHsExpr Id -> Maybe (LHsExpr Id) -> LHsExpr Id -> DsM ()
-- Warns about [2,3 .. 1] which returns the empty list
-- Only works for integral types, not floating point
warnAboutEmptyEnumerations dflags fromExpr mThnExpr toExpr
| wopt Opt_WarnEmptyEnumerations dflags
, Just (from,tc) <- getLHsIntegralLit fromExpr
, Just mThn <- traverse getLHsIntegralLit mThnExpr
, Just (to,_) <- getLHsIntegralLit toExpr
, let check :: forall a. (Enum a, Num a) => a -> DsM ()
check _proxy
= when (null enumeration) $
warnDs (text "Enumeration is empty")
where
enumeration :: [a]
enumeration = case mThn of
Nothing -> [fromInteger from .. fromInteger to]
Just (thn,_) -> [fromInteger from, fromInteger thn .. fromInteger to]
= if tc == intTyConName then check (undefined :: Int)
else if tc == int8TyConName then check (undefined :: Int8)
else if tc == int16TyConName then check (undefined :: Int16)
else if tc == int32TyConName then check (undefined :: Int32)
else if tc == int64TyConName then check (undefined :: Int64)
else if tc == wordTyConName then check (undefined :: Word)
else if tc == word8TyConName then check (undefined :: Word8)
else if tc == word16TyConName then check (undefined :: Word16)
else if tc == word32TyConName then check (undefined :: Word32)
else if tc == word64TyConName then check (undefined :: Word64)
else if tc == integerTyConName then check (undefined :: Integer)
else return ()
| otherwise = return ()
getLHsIntegralLit :: LHsExpr Id -> Maybe (Integer, Name)
-- See if the expression is an Integral literal
-- Remember to look through automatically-added tick-boxes! (Trac #8384)
getLHsIntegralLit (L _ (HsPar e)) = getLHsIntegralLit e
getLHsIntegralLit (L _ (HsTick _ e)) = getLHsIntegralLit e
getLHsIntegralLit (L _ (HsBinTick _ _ e)) = getLHsIntegralLit e
getLHsIntegralLit (L _ (HsOverLit over_lit)) = getIntegralLit over_lit
getLHsIntegralLit _ = Nothing
getIntegralLit :: HsOverLit Id -> Maybe (Integer, Name)
getIntegralLit (OverLit { ol_val = HsIntegral _ i, ol_type = ty })
| Just tc <- tyConAppTyCon_maybe ty
= Just (i, tyConName tc)
getIntegralLit _ = Nothing
{-
************************************************************************
* *
Tidying lit pats
* *
************************************************************************
-}
tidyLitPat :: HsLit -> Pat Id
-- Result has only the following HsLits:
-- HsIntPrim, HsWordPrim, HsCharPrim, HsFloatPrim
-- HsDoublePrim, HsStringPrim, HsString
-- * HsInteger, HsRat, HsInt can't show up in LitPats
-- * We get rid of HsChar right here
tidyLitPat (HsChar src c) = unLoc (mkCharLitPat src c)
tidyLitPat (HsString src s)
| lengthFS s <= 1 -- Short string literals only
= unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon
[mkCharLitPat src c, pat] [charTy])
(mkNilPat charTy) (unpackFS s)
-- The stringTy is the type of the whole pattern, not
-- the type to instantiate (:) or [] with!
tidyLitPat lit = LitPat lit
----------------
tidyNPat :: (HsLit -> Pat Id) -- How to tidy a LitPat
-- We need this argument because tidyNPat is called
-- both by Match and by Check, but they tidy LitPats
-- slightly differently; and we must desugar
-- literals consistently (see Trac #5117)
-> HsOverLit Id -> Maybe (SyntaxExpr Id) -> SyntaxExpr Id
-> Pat Id
tidyNPat tidy_lit_pat (OverLit val False _ ty) mb_neg _
-- False: Take short cuts only if the literal is not using rebindable syntax
--
-- Once that is settled, look for cases where the type of the
-- entire overloaded literal matches the type of the underlying literal,
-- and in that case take the short cut
-- NB: Watch out for weird cases like Trac #3382
-- f :: Int -> Int
-- f "blah" = 4
-- which might be ok if we hvae 'instance IsString Int'
--
| isIntTy ty, Just int_lit <- mb_int_lit
= mk_con_pat intDataCon (HsIntPrim "" int_lit)
| isWordTy ty, Just int_lit <- mb_int_lit
= mk_con_pat wordDataCon (HsWordPrim "" int_lit)
| isStringTy ty, Just str_lit <- mb_str_lit
= tidy_lit_pat (HsString "" str_lit)
-- NB: do /not/ convert Float or Double literals to F# 3.8 or D# 5.3
-- If we do convert to the constructor form, we'll generate a case
-- expression on a Float# or Double# and that's not allowed in Core; see
-- Trac #9238 and Note [Rules for floating-point comparisons] in PrelRules
where
mk_con_pat :: DataCon -> HsLit -> Pat Id
mk_con_pat con lit = unLoc (mkPrefixConPat con [noLoc $ LitPat lit] [])
mb_int_lit :: Maybe Integer
mb_int_lit = case (mb_neg, val) of
(Nothing, HsIntegral _ i) -> Just i
(Just _, HsIntegral _ i) -> Just (-i)
_ -> Nothing
mb_str_lit :: Maybe FastString
mb_str_lit = case (mb_neg, val) of
(Nothing, HsIsString _ s) -> Just s
_ -> Nothing
tidyNPat _ over_lit mb_neg eq
= NPat (noLoc over_lit) mb_neg eq
{-
************************************************************************
* *
Pattern matching on LitPat
* *
************************************************************************
-}
matchLiterals :: [Id]
-> Type -- Type of the whole case expression
-> [[EquationInfo]] -- All PgLits
-> DsM MatchResult
matchLiterals (var:vars) ty sub_groups
= ASSERT( notNull sub_groups && all notNull sub_groups )
do { -- Deal with each group
; alts <- mapM match_group sub_groups
-- Combine results. For everything except String
-- we can use a case expression; for String we need
-- a chain of if-then-else
; if isStringTy (idType var) then
do { eq_str <- dsLookupGlobalId eqStringName
; mrs <- mapM (wrap_str_guard eq_str) alts
; return (foldr1 combineMatchResults mrs) }
else
return (mkCoPrimCaseMatchResult var ty alts)
}
where
match_group :: [EquationInfo] -> DsM (Literal, MatchResult)
match_group eqns
= do dflags <- getDynFlags
let LitPat hs_lit = firstPat (head eqns)
match_result <- match vars ty (shiftEqns eqns)
return (hsLitKey dflags hs_lit, match_result)
wrap_str_guard :: Id -> (Literal,MatchResult) -> DsM MatchResult
-- Equality check for string literals
wrap_str_guard eq_str (MachStr s, mr)
= do { -- We now have to convert back to FastString. Perhaps there
-- should be separate MachBytes and MachStr constructors?
let s' = mkFastStringByteString s
; lit <- mkStringExprFS s'
; let pred = mkApps (Var eq_str) [Var var, lit]
; return (mkGuardedMatchResult pred mr) }
wrap_str_guard _ (l, _) = pprPanic "matchLiterals/wrap_str_guard" (ppr l)
matchLiterals [] _ _ = panic "matchLiterals []"
---------------------------
hsLitKey :: DynFlags -> HsLit -> Literal
-- Get a Core literal to use (only) a grouping key
-- Hence its type doesn't need to match the type of the original literal
-- (and doesn't for strings)
-- It only works for primitive types and strings;
-- others have been removed by tidy
hsLitKey dflags (HsIntPrim _ i) = mkMachInt dflags i
hsLitKey dflags (HsWordPrim _ w) = mkMachWord dflags w
hsLitKey _ (HsInt64Prim _ i) = mkMachInt64 i
hsLitKey _ (HsWord64Prim _ w) = mkMachWord64 w
hsLitKey _ (HsCharPrim _ c) = MachChar c
hsLitKey _ (HsStringPrim _ s) = MachStr s
hsLitKey _ (HsFloatPrim f) = MachFloat (fl_value f)
hsLitKey _ (HsDoublePrim d) = MachDouble (fl_value d)
hsLitKey _ (HsString _ s) = MachStr (fastStringToByteString s)
hsLitKey _ l = pprPanic "hsLitKey" (ppr l)
---------------------------
hsOverLitKey :: HsOverLit a -> Bool -> Literal
-- Ditto for HsOverLit; the boolean indicates to negate
hsOverLitKey (OverLit { ol_val = l }) neg = litValKey l neg
---------------------------
litValKey :: OverLitVal -> Bool -> Literal
litValKey (HsIntegral _ i) False = MachInt i
litValKey (HsIntegral _ i) True = MachInt (-i)
litValKey (HsFractional r) False = MachFloat (fl_value r)
litValKey (HsFractional r) True = MachFloat (negate (fl_value r))
litValKey (HsIsString _ s) neg = ASSERT( not neg) MachStr
(fastStringToByteString s)
{-
************************************************************************
* *
Pattern matching on NPat
* *
************************************************************************
-}
matchNPats :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
matchNPats (var:vars) ty (eqn1:eqns) -- All for the same literal
= do { let NPat (L _ lit) mb_neg eq_chk = firstPat eqn1
; lit_expr <- dsOverLit lit
; neg_lit <- case mb_neg of
Nothing -> return lit_expr
Just neg -> do { neg_expr <- dsExpr neg
; return (App neg_expr lit_expr) }
; eq_expr <- dsExpr eq_chk
; let pred_expr = mkApps eq_expr [Var var, neg_lit]
; match_result <- match vars ty (shiftEqns (eqn1:eqns))
; return (mkGuardedMatchResult pred_expr match_result) }
matchNPats vars _ eqns = pprPanic "matchOneNPat" (ppr (vars, eqns))
{-
************************************************************************
* *
Pattern matching on n+k patterns
* *
************************************************************************
For an n+k pattern, we use the various magic expressions we've been given.
We generate:
\begin{verbatim}
if ge var lit then
let n = sub var lit
in <expr-for-a-successful-match>
else
<try-next-pattern-or-whatever>
\end{verbatim}
-}
matchNPlusKPats :: [Id] -> Type -> [EquationInfo] -> DsM MatchResult
-- All NPlusKPats, for the *same* literal k
matchNPlusKPats (var:vars) ty (eqn1:eqns)
= do { let NPlusKPat (L _ n1) (L _ lit) ge minus = firstPat eqn1
; ge_expr <- dsExpr ge
; minus_expr <- dsExpr minus
; lit_expr <- dsOverLit lit
; let pred_expr = mkApps ge_expr [Var var, lit_expr]
minusk_expr = mkApps minus_expr [Var var, lit_expr]
(wraps, eqns') = mapAndUnzip (shift n1) (eqn1:eqns)
; match_result <- match vars ty eqns'
; return (mkGuardedMatchResult pred_expr $
mkCoLetMatchResult (NonRec n1 minusk_expr) $
adjustMatchResult (foldr1 (.) wraps) $
match_result) }
where
shift n1 eqn@(EqnInfo { eqn_pats = NPlusKPat (L _ n) _ _ _ : pats })
= (wrapBind n n1, eqn { eqn_pats = pats })
-- The wrapBind is a no-op for the first equation
shift _ e = pprPanic "matchNPlusKPats/shift" (ppr e)
matchNPlusKPats vars _ eqns = pprPanic "matchNPlusKPats" (ppr (vars, eqns))
|
gridaphobe/ghc
|
compiler/deSugar/MatchLit.hs
|
bsd-3-clause
| 19,576
| 1
| 21
| 5,836
| 4,424
| 2,270
| 2,154
| -1
| -1
|
{-# LANGUAGE OverloadedStrings #-}
module Bead.View.Content.Bootstrap where
{-
Collection of bootstrap related pagelets.
-}
import Control.Monad (when)
import Data.Data
import Data.Maybe (fromMaybe)
import Data.Monoid
import Data.String
import Text.Blaze.Html5 hiding (map)
import qualified Text.Blaze.Html5 as H hiding (map)
import Text.Blaze.Html5.Attributes
import qualified Text.Blaze.Html5.Attributes as A
import Bead.View.Fay.JSON.ServerSide
-- | Represents the possible sizes of columns
newtype ColumnSize = ColumnSize Int
deriving Eq
columnSize f (ColumnSize s) = f s
colSize1 = ColumnSize 1
colSize2 = ColumnSize 2
colSize3 = ColumnSize 3
colSize4 = ColumnSize 4
colSize5 = ColumnSize 5
colSize6 = ColumnSize 6
colSize7 = ColumnSize 7
colSize8 = ColumnSize 8
colSize9 = ColumnSize 9
colSize10 = ColumnSize 10
colSize11 = ColumnSize 11
colSize12 = ColumnSize 12
-- Returns the HTML class attribute value for the given column size
columnSizeClass = columnSize $ \size -> "col-md-" ++ show size
-- | Represents the possible offsets of columns
newtype ColumnOffset = ColumnOffset Int
deriving Eq
columnOffset f (ColumnOffset s) = f s
colOffset1 = ColumnOffset 1
colOffset2 = ColumnOffset 2
colOffset3 = ColumnOffset 3
colOffset4 = ColumnOffset 4
colOffset5 = ColumnOffset 5
colOffset6 = ColumnOffset 6
colOffset7 = ColumnOffset 7
colOffset8 = ColumnOffset 8
colOffset9 = ColumnOffset 9
colOffset10 = ColumnOffset 10
colOffset11 = ColumnOffset 11
colOffset12 = ColumnOffset 12
-- Returns the HTML class attribute value for the given column offset
columnOffsetClass = columnOffset $ \offset -> "col-md-offset-" ++ show offset
container = H.div ! class_ "container"
footer = H.div ! A.id "bead-footer" ! class_ "navbar navbar-default navbar-fixed-bottom"
-- | Fades out the footer after the given seconds
fadeOutFooter secs = do
H.script $ fromString $ concat ["$('#bead-footer').delay(", show (secs * 1000), ").fadeOut('slow')"]
fadeOutFooterButton custom ttl text = do
a ! class_ (fromString ("btn " <> custom))
! role "button"
! A.title (fromString ttl)
! href "#"
! disabled ""
$ (fromString text)
-- | Creates a warning style button, if the user clicks on the button the footer fades away.
fadeOutFooterWarningButton = fadeOutFooterButton "btn-warning"
-- | Creates a danger style button, if the user clicks on the button the footer fades away
fadeOutFooterDangerButton = fadeOutFooterButton "btn-danger"
formGroup = H.div ! class_ "form-group"
inputGroup = H.div ! class_ "input-group"
-- | Creates a list group div, which can contain a various list group items
listGroup = H.div ! class_ "list-group"
-- | Creates and unordered list as a list group
unorderedListGroup = H.ul ! class_ "list-group"
-- | Creates a linked list group item with a route to point at, and a text to
-- display
listGroupLinkItem route text = H.a ! href (fromString route) ! class_ "list-group-item" $ text
-- | Creates a texted list group item
listGroupTextItem text = H.a ! href "#" ! class_ "list-group-item" $ fromString text
-- | Creates a badge that can be displayed in the list group
badge text = H.span ! class_ "badge" $ fromString text
-- | Creates a caret sign
caret = H.span ! class_ "caret" $ mempty
-- | Creates a justified button group
buttonGroupJustified = H.div ! class_ "btn-group btn-group-justified"
-- | Creates a button group
buttonGroup = H.div ! class_ "btn-group"
-- | Creates a button link with custom button attribute, a route to point
-- a title and a text to show
customButtonLink custom ref ttl text =
a ! class_ (fromString ("btn " <> (unwords custom)))
! customAttribute "role" "button"
! A.title (fromString ttl)
! href (fromString ref)
$ (fromString text)
-- | Creates a button styled link
buttonLink ref text = customButtonLink ["btn-default"] ref "" text
-- | Creates a block button styled link
blockButtonLink ref text = customButtonLink ["btn-default", "btn-block"] ref "" text
-- | Warning button with a given text
warningButtonLink ref text = customButtonLink ["btn-warning"] ref "" text
-- | Danger button with a given text
dangerButtonLink ref text = customButtonLink ["btn-danger"] ref "" text
-- | Creates a date time picker using a third party library and turns on if the on switch
-- is set to True
datetimePicker paramName date on =
H.div ! class_ "input-group date"
! A.id (fromString paramName) $ do
input ! formControl
! name (fromString paramName)
! type_ "text"
! readonly ""
! required ""
! value (fromString date)
H.span ! class_ "input-group-addon" $ H.span ! class_ "glyphicon glyphicon-calendar" $ mempty
when on $ dateTimePickerScript paramName
dateTimePickerScript pickerId = script . fromString $ concat
[ "$(function () {"
, "$('#", pickerId, "').datetimepicker({"
, "format: 'YYYY-MM-DD HH:mm:ss',"
, "pick12HourFormat: false,"
, "pickSeconds: true"
, "});"
, "});"
]
-- | Creates a dropdown button
dropdownButton text =
button ! type_ "button"
! class_ "btn btn-default dropdown-toggle"
! dataAttribute "toggle" "dropdown"
$ do (fromString text); caret
-- | Creates a list of dropdown menu items
dropdownMenu items = H.ul ! class_ "dropdown-menu" ! customAttribute "role" "menu" $ mapM_ li items
-- | Creates a dropdown from the items with the given text on the button
dropdown text items = buttonGroup $ do
dropdownButton text
dropdownMenu items
-- | Creates a paragrapth that represents a help block from a given text
helpBlock text = p ! class_ "help-block" $ fromString text
-- | Creates a form control selection with the given parameter name, a selector
-- function which determines the selected value, and possible values
selection paramName selector values =
formGroup $ selectionPart
paramName
[class_ "combobox form-control", A.style "display:none", A.required ""]
selector
values
-- | Creates a form control selection with the given parameter name, a label, a selector
-- function which determines the selected value, and possible values
selectionWithLabel paramName labelText selector values = formGroup $ do
labelFor paramName labelText
selectionPart
paramName
[class_ "combobox form-control", A.style "display:none", A.required ""]
selector
values
-- | Creates a form control optional selection with the given parameter name, a label, a selector
-- function which determines the selected value, and possible values
selectionOptionalWithLabel paramName labelText selector values = formGroup $ do
labelFor paramName labelText
selectionOptionalPart
paramName
[class_ "combobox form-control", A.style "display:none"]
selector
values
-- | Creates a submit block button with a given name and the given text
submitButton nameValue text =
button ! type_ "submit"
! (name $ fromString nameValue)
! class_ "btn btn-block btn-default"
$ fromString text
-- | Creates a submit button with a given attrbute and a given text
submitButtonWithAttr attr text =
button ! type_ "submit"
! class_ "btn btn-block btn-default"
! attr
$ fromString text
-- | Creates a submit small button with a given name and the given text
smallSubmitButton nameValue text =
button ! type_ "submit"
! (name $ fromString nameValue)
! class_ "btn btn-default"
$ fromString text
-- | Turns the selection into combobox like selections
turnSelectionsOn
= script ! type_ "text/javascript" $ "$(document).ready(function(){$('.combobox').combobox()});"
-- | Creates a password input with the given name as id, a given label within a form-group control
passwordInput paramName labelText =
formGroup $ do
labelFor paramName labelText
H.input ! formControl
! type_ "password"
! required ""
! name (fromString paramName)
! A.id (fromString paramName)
inputForFormControl = H.input ! formControl
-- | Creates a text input field only with a defualt value
textInputFieldWithDefault paramName value =
H.input ! formControl
! type_ "text"
! A.required ""
! A.name (fromString paramName)
! A.id (fromString paramName)
! A.value (fromString value)
-- | Creates a text input with the given name as id, a given label and a placeholder text
textInput paramName labelText placeholderText =
formGroup $ do
labelFor paramName labelText
H.input ! formControl
! type_ "text"
! A.required ""
! A.name (fromString paramName)
! A.id (fromString paramName)
! A.placeholder (fromString placeholderText)
-- | Creates a text input with the given name as id, a given label and a default value
textInputWithDefault paramName labelText value =
formGroup $ do
labelFor paramName labelText
textInputFieldWithDefault paramName value
readOnlyTextInputWithDefault paramName labelText value =
formGroup $ do
labelFor paramName labelText
(textInputFieldWithDefault paramName value) ! A.readonly ""
-- | Creates a label for the given id and given text
labelFor name text =
H.label ! for (fromString name) $ (fromString text)
-- | Creates a labeled text as a form group element
labeledText name value =
formGroup $ do
H.label $ fromString $ name
H.span ! formControl $ value
-- | Creates a text area input field with the given name as id, a given id
textAreaField paramName =
H.textarea ! formControl
! A.required ""
! A.rows "20"
! A.id (fromString paramName)
! A.name (fromString paramName)
-- | Creates an optional text area input field with the given name as id, a given id
textAreaOptionalField paramName =
H.textarea ! formControl
! A.rows "20"
! A.id (fromString paramName)
! A.name (fromString paramName)
-- | Creates a text area input with the given name as id, a given label
textArea paramName labelText html =
formGroup $ do
labelFor paramName labelText
textAreaField paramName html
-- | Creates an optional text area input with the given name as id, a given label
optionalTextArea paramName labelText html =
formGroup $ do
labelFor paramName labelText
textAreaOptionalField paramName html
-- | Creates a text area input with the given name as id, a given label
utf8TextArea paramName labelText html =
formGroup $ do
labelFor paramName labelText
textAreaField paramName ! A.acceptCharset "utf-8" $ html
-- | Creates a radio button group, with a given values and labels, the parameter name
-- as numbered ids. The first value is the primary active
radioButtonGroup paramName valuesAndLabel =
H.div ! class_ "btn-group" $
mapM_ button ([1..] `zip` valuesAndLabel)
where
button (n,(c,v,l)) =
H.label ! class_ "btn btn-default" $ do
checked c $
H.input ! type_ "radio"
! name (fromString paramName)
! A.id (fromString (paramName ++ show n))
! A.value (fromString v)
fromString l
checked c tag = if c then (tag ! A.checked "") else tag
-- | Creates a bootstrap row
row = H.div ! class_ "row"
-- | Creates a bootstrap column with the given offset
colMd size offset =
H.div ! class_ (fromString $ concat [columnSizeClass size, " ", columnOffsetClass offset])
-- | Creates a bootstrap 12 column
colMd12 = H.div ! class_ "col-md-12"
-- | Creates a bootstrap 6 width column
colMd6 = H.div ! class_ "col-md-6"
-- | Creates a bootstrap raw with only one colMd12 column
rowColMd12 = row . colMd12
-- | Creates a boostrap row with a 4 sized column in the middle of the page
rowCol4Offset4 = row . colMd colSize4 colOffset4
-- | Creates a bootstrap page header
pageHeader = H.div ! class_ "page-header"
-- | Creates a bootstrap table
table = H.table ! class_ "table table-bordered table-condensed table-hover table-striped"
-- HTML helpers
optionTag :: String -> String -> Bool -> Html
optionTag value text False = H.option ! A.value (fromString value) $ fromString text
optionTag value text True = H.option ! A.value (fromString value) ! A.selected "" $ fromString text
selectTag :: String -> Html -> Html
selectTag name =
H.select ! A.id (fromString name)
! A.name (fromString name)
! A.required ""
selectOptionalTag :: String -> Html -> Html
selectOptionalTag name =
H.select ! A.id (fromString name)
! A.name (fromString name)
-- Encodes the value to Fay JSON representation or throw an error for the given name
encode :: (Data a, Show a, IsString s) => String -> a -> s
encode name value = fromString $ fromMaybe (name ++ ": error encoding value") (encodeToFay value)
selectionPart :: (Show a, Data a) =>
String -> [Attribute] -> (a -> Bool) -> [(a, String)] -> Html
selectionPart name attrs def = foldl (!) (selectTag name) attrs . mapM_ option
where
option (v,t) = optionTag (encode "selection" v) t (def v)
selectionOptionalPart :: (Show a, Data a) =>
String -> [Attribute] -> (a -> Bool) -> [(a, String)] -> Html
selectionOptionalPart name attrs def = foldl (!) (selectOptionalTag name) attrs . mapM_ option
where
option (v,t) = optionTag (encode "selection" v) t (def v)
-- Collapsible
-- | Creates a panel group
panelGroup =
H.div ! A.class_ "panel-group" ! role "tablist"
-- | Creates a paned with a given id, a header text, and a body
panel collapsed id_ header_ body_ =
let headingId = "heading" ++ id_
collapseClass = if collapsed then "panel-collapse collapse in"
else "panel-collapse collapse"
in
H.div ! A.class_ "panel panel-default" $ do
H.div ! A.class_ "panel-heading" ! role "tab" ! A.id (fromString headingId) $
H.h4 ! A.class_ "panel-title" $
H.a ! dataToggle "collapse" ! A.href (fromString $ '#':id_)
! ariaExpanded "true" ! ariaControls (fromString id_) $ fromString header_
H.div ! A.id (fromString id_) ! A.class_ (fromString collapseClass)
! role "tabpanel" ! ariaLabelledBy (fromString headingId) $
H.div ! A.class_ "panel-body" $ body_
-- Attributes
ariaExpanded = customAttribute "aria-expanded"
ariaControls = customAttribute "aria-controls"
ariaLabelledBy = customAttribute "aria-labelledby"
textCenter = A.class_ "text-center"
dataToggle = customAttribute "data-toggle"
dataPlacement = customAttribute "data-placement"
formControl = class_ "form-control"
role = customAttribute "role"
-- | Adds a tooltip to a given HTML tag
tooltip = dataToggle "tooltip"
-- | Place the tooltip on the top
tooltipAtTop = dataPlacement "top"
-- | Constants
closed = False
collapsed = True
|
pgj/bead
|
src/Bead/View/Content/Bootstrap.hs
|
bsd-3-clause
| 14,954
| 0
| 22
| 3,310
| 3,459
| 1,734
| 1,725
| 274
| 2
|
module Paths_LA (
version,
getBinDir, getLibDir, getDataDir, getLibexecDir,
getDataFileName
) where
import qualified Control.Exception as Exception
import Data.Version (Version(..))
import System.Environment (getEnv)
import Prelude
catchIO :: IO a -> (Exception.IOException -> IO a) -> IO a
catchIO = Exception.catch
version :: Version
version = Version {versionBranch = [0,1,0,6], versionTags = []}
bindir, libdir, datadir, libexecdir :: FilePath
bindir = "/home/linus/.cabal/bin"
libdir = "/home/linus/.cabal/lib/LA-0.1.0.6/ghc-7.6.3"
datadir = "/home/linus/.cabal/share/LA-0.1.0.6"
libexecdir = "/home/linus/.cabal/libexec"
getBinDir, getLibDir, getDataDir, getLibexecDir :: IO FilePath
getBinDir = catchIO (getEnv "LA_bindir") (\_ -> return bindir)
getLibDir = catchIO (getEnv "LA_libdir") (\_ -> return libdir)
getDataDir = catchIO (getEnv "LA_datadir") (\_ -> return datadir)
getLibexecDir = catchIO (getEnv "LA_libexecdir") (\_ -> return libexecdir)
getDataFileName :: FilePath -> IO FilePath
getDataFileName name = do
dir <- getDataDir
return (dir ++ "/" ++ name)
|
chalmers-kandidat14/LA
|
dist/build/autogen/Paths_LA.hs
|
bsd-3-clause
| 1,108
| 0
| 10
| 167
| 332
| 190
| 142
| 26
| 1
|
-------------------------------------------------------------------------------------
-- |
-- Copyright : (c) Hans Hoglund 2012
--
-- License : BSD-style
--
-- Maintainer : hans@hanshoglund.se
-- Stability : experimental
-- Portability : portable
--
-------------------------------------------------------------------------------------
module Data.Music.MusicXml.Write.Score (
) where
import Prelude hiding (getLine)
import Data.Maybe (maybeToList)
import Data.Semigroup
import Data.Default
import Numeric.Natural
import Text.XML.Light hiding (Line)
import Data.Music.MusicXml.Score
import Data.Music.MusicXml.Time
import Data.Music.MusicXml.Pitch
import Data.Music.MusicXml.Dynamics
import Data.Music.MusicXml.Read
import Data.Music.MusicXml.Write
import qualified Data.List as List
import qualified Data.Char as Char
-- This instance is used by toXml and should return a single list
instance WriteMusicXml Score where
write (Partwise attr
header
parts) = single
$ unode "score-partwise"
$ write header <> writePartwise parts
write (Timewise attr
header
measures) = single
$ unode "timewise-score"
$ write header <> writeTimewise measures
writePartwise :: [(PartAttrs, [(MeasureAttrs, Music)])] -> [Element]
writeTimewise :: [(MeasureAttrs, [(PartAttrs, Music)])] -> [Element]
writePartwise = fmap (\(attrs, measures) -> writePartElem attrs $
fmap (\(attrs, music) -> writeMeasureElem attrs $
writeMusic music) measures)
writeTimewise = fmap (\(attrs, parts) -> writeMeasureElem attrs $
fmap (\(attrs, music) -> writePartElem attrs $
writeMusic music) parts)
writePartElem attrs = addPartAttrs attrs . unode "part"
writeMeasureElem attrs = addMeasureAttrs attrs . unode "measure"
writeMusic = concatMap write . getMusic
addScoreAttrs :: ScoreAttrs -> Element -> Element
addPartAttrs :: PartAttrs -> Element -> Element
addMeasureAttrs :: MeasureAttrs -> Element -> Element
addScoreAttrs (ScoreAttrs []) = id
addScoreAttrs (ScoreAttrs xs) = addAttr (uattr "version" $ concatSep "." $ map show xs)
addPartAttrs (PartAttrs x) = addAttr (uattr "id" x)
addMeasureAttrs (MeasureAttrs n) = addAttr (uattr "number" $ show n)
instance WriteMusicXml ScoreHeader where
write (ScoreHeader title
mvm
ident
partList) = mempty <> writeTitle title
<> writeMvm mvm
<> writeIdent ident
<> writePartList partList
where {
writeTitle, writeMvm :: Maybe String -> [Element] ;
writeIdent :: Maybe Identification -> [Element] ;
writePartList :: PartList -> [Element] ;
writeTitle = fmap (unode "title") . maybeToList ;
writeMvm = fmap (unode "movement-title") . maybeToList ;
writeIdent = single . unode "identification" . (write =<<) . maybeToList ;
writePartList = single . unode "part-list" . (write =<<) . getPartList ;
}
instance WriteMusicXml Identification where
write (Identification creators) = map writeCreator creators
where
writeCreator (Creator t n) = unode "creator" (uattr "type" t, n)
-- ----------------------------------------------------------------------------------
-- Part list
-- ----------------------------------------------------------------------------------
instance WriteMusicXml PartListElem where
write (Part id
name
abbrev
nameDisplay
abbrevDisplay
) = single
$ addAttr (uattr "id" id)
$ unode "score-part"
$ mconcat [writeName name, writeAbbrev abbrev,
writeNameDisplay nameDisplay, writeAbbrevDisplay abbrevDisplay]
where
writeName = single . unode "part-name"
writeAbbrev = maybeToList . fmap (unode "part-abbreviation")
writeNameDisplay = maybeToList . fmap (unode "part-name-display" . unode "display-text")
writeAbbrevDisplay = maybeToList . fmap (unode "part-abbreviation-display" . unode "display-text")
write (Group level
startStop
name
abbrev
symbol
barlines
time) = single
$ addAttr (uattr "number" $ show $ getLevel level)
$ addAttr (uattr "type" $ writeStartStop startStop)
$ unode "part-group"
$ mempty
<> writeName name
<> writeAbbrev abbrev
<> writeSymbol symbol
<> writeBarlines barlines
where
writeName = single . unode "group-name"
writeAbbrev = maybeToList . fmap (unode "group-abbreviation")
writeSymbol = maybeToList . fmap (unode "group-symbol" . writeGroupSymbol)
writeBarlines = maybeToList . fmap (unode "group-barline" . writeGroupBarlines)
writeGroupSymbol :: GroupSymbol -> String
writeGroupBarlines :: GroupBarlines -> String
-- ----------------------------------------------------------------------------------
-- Music
-- ----------------------------------------------------------------------------------
instance WriteMusicXml MusicElem where
write (MusicAttributes x) = single $ unode "attributes" $ write x
write (MusicNote x) = single $ unode "note" $ write x
write (MusicDirection x) = single $ unode "direction" (unode "direction-type" $ write x)
write (MusicBackup d) = single $ unode "backup" (unode "duration" $ show $ getDivs $ d)
write (MusicForward d) = single $ unode "forward" (unode "duration" $ show $ getDivs $ d)
write (MusicBarline x) = write x
-- ----------------------------------------------------------------------------------
-- Attributes
-- ----------------------------------------------------------------------------------
instance WriteMusicXml Attributes where
write (Divisions divs) = single $ unode "divisions"
$ show $ getDivs divs
write (Clef sign line) = single $ unode "clef"
[ unode "sign" (writeClef sign),
unode "line" (show $ getLine line)]
write (Key fifths mode) = single $ unode "key"
[ unode "fifths" (show $ getFifths fifths),
unode "mode" (writeMode mode)]
write (Time (CommonTime)) = single $ addAttr (uattr "symbol" "common")
$ unode "time"
[ unode "beats" (show 4),
unode "beat-type" (show 4)]
write (Time (CutTime)) = single $ addAttr (uattr "symbol" "cut")
$ unode "time"
[ unode "beats" (show 2),
unode "beat-type" (show 2) ]
write (Time (DivTime beats beatType)) = single $ unode "time"
[ unode "beats" (show $ getBeat beats),
unode "beat-type" (show $ getBeatType beatType)]
write (Staves n) = single $ unode "staves" $ show n
-- ----------------------------------------------------------------------------------
-- Notes
-- ----------------------------------------------------------------------------------
instance WriteMusicXml NoteProps where
write (NoteProps
instrument -- TODO
voice
typ
dots
accidental -- TODO
timeMod -- TODO
stem -- TODO
noteHead
noteHeadText -- TODO
staff -- TODO
beam
notations
lyrics) -- TODO
= mempty
-- TODO instrument
<> maybeOne (\n -> unode "voice" $ show n) voice
<> maybeOne (\(noteVal, noteSize) -> unode "type" (writeNoteVal noteVal)) typ
<> replicate (fromIntegral dots) (unode "dot" ())
-- TODO accidental
<> maybeOne (\(m, n) -> unode "time-modification" [
unode "actual-notes" (show m),
unode "normal-notes" (show n)
]) timeMod
-- TODO stem
<> maybeOne (\(nh,_,_) -> unode "notehead" (writeNoteHead nh)) noteHead
-- TODO notehead-text
-- TODO staff
<> maybeOne (\(n, typ) -> addAttr (uattr "number" $ show $ getLevel n)
$ unode "beam" $ writeBeamType typ) beam
<> case notations of
[] -> []
ns -> [unode "notations" (concatMap write ns)]
instance WriteMusicXml FullNote where
write (Pitched isChord
(steps, alter, octaves)) = mempty
<> singleIf isChord (unode "chord" ())
<> single (unode "pitch" (mempty
<> single ((unode "step" . show) steps)
<> maybeOne (unode "alter" . show . getSemitones) alter
<> single ((unode "octave" . show . getOctaves) octaves)))
write (Unpitched isChord
Nothing) = mempty
<> singleIf isChord (unode "chord" ())
<> single (unode "unpitched" ())
write (Unpitched isChord
(Just (steps, octaves))) = mempty
<> singleIf isChord (unode "chord" ())
<> single (unode "unpitched" (mempty
<> single ((unode "display-step" . show) steps)
<> single ((unode "display-octave" . show . getOctaves) octaves)))
write (Rest isChord
Nothing) = mempty
<> singleIf isChord (unode "chord" ())
<> single (unode "rest" ())
write (Rest isChord
(Just (steps, octaves))) = mempty
<> singleIf isChord (unode "chord" ())
<> single (unode "rest" (mempty
<> single ((unode "display-step" . show) steps)
<> single ((unode "display-octave" . show . getOctaves) octaves)))
instance WriteMusicXml Note where
write (Note full
dur
ties
props) = write full <> writeDuration dur
<> concatMap writeTie ties
<> write props
write (CueNote full
dur
props) = [unode "cue" ()] <> write full
<> writeDuration dur
<> write props
write (GraceNote full
ties
props) = [unode "grace" ()] <> write full
<> concatMap writeTie ties
<> write props
writeDuration :: Duration -> [Element]
writeDuration = single . unode "duration" . show . getDivs
writeTie :: Tie -> [Element]
writeTie typ = single $ addAttr (uattr "type" $ writeStartStopContinue typ) $ unode "tie" ()
-- ----------------------------------------------------------------------------------
-- Notations
-- ----------------------------------------------------------------------------------
instance WriteMusicXml Notation where
write (Tied typ) = single
$ addAttr (uattr "type" $ writeStartStopContinue typ)
$ unode "tied" ()
write (Slur level typ) = single
$ addAttr (uattr "number" $ show $ getLevel level)
$ addAttr (uattr "type" $ writeStartStopContinue typ)
$ unode "slur" ()
write (Tuplet level typ) = single
$ addAttr (uattr "number" $ show $ getLevel level)
$ addAttr (uattr "type" $ writeStartStopContinue typ)
$ unode "tuplet" ()
write (Glissando level typ lineTyp text) = single
$ addAttr (uattr "number" $ show $ getLevel level)
$ addAttr (uattr "type" $ writeStartStopContinue typ)
$ addAttr (uattr "line-type" $ writeLineType lineTyp)
$ case text of
Nothing -> unode "glissando" ()
Just text -> unode "glissando" text
write (Slide level typ lineTyp text) = single
$ addAttr (uattr "number" $ show $ getLevel level)
$ addAttr (uattr "type" $ writeStartStopContinue typ)
$ addAttr (uattr "line-type" $ writeLineType lineTyp)
$ case text of
Nothing -> unode "slide" ()
Just text -> unode "slide" text
write (Ornaments xs) = single $ unode "ornaments" (concatMap writeOrnamentWithAcc xs)
where
writeOrnamentWithAcc (o, as) = write o
<> fmap (unode "accidental-mark" . writeAccidental) as
write (Technical xs) = single $ unode "technical" (concatMap write xs)
write (Articulations xs) = single $ unode "articulations" (concatMap write xs)
write (DynamicNotation dyn) = single $ unode "dynamics" (writeDynamics dyn)
write (Fermata sign) = single $ unode "fermata" (writeFermataSign sign)
write Arpeggiate = single $ unode "arpeggiate" ()
write NonArpeggiate = single $ unode "non-arpeggiate" ()
write (AccidentalMark acc) = single $ unode "accidental-mark" (writeAccidental acc)
write (OtherNotation not) = notImplemented "OtherNotation"
instance WriteMusicXml Ornament where
write TrillMark = single $ unode "trill-mark" ()
write Turn = single $ unode "turn" ()
write DelayedTurn = single $ unode "delayed-turn" ()
write InvertedTurn = single $ unode "inverted-turn" ()
write DelayedInvertedTurn = single $ unode "delayed-inverted-turn" ()
write VerticalTurn = single $ unode "vertical-turn" ()
write Shake = single $ unode "shake" ()
write WavyLine = single $ unode "wavyline" ()
write Mordent = single $ unode "mordent" ()
write InvertedMordent = single $ unode "inverted-mordent" ()
write Schleifer = single $ unode "schleifer" ()
write (Tremolo num) = single $ unode "tremolo" (show num)
instance WriteMusicXml Technical where
write UpBow = single $ unode "up-bow" ()
write DownBow = single $ unode "down-bow" ()
write Harmonic = single $ unode "harmonic" ()
write OpenString = single $ unode "openstring" ()
write ThumbPosition = single $ unode "thumb-position" ()
write Fingering = single $ unode "fingering" ()
write Pluck = single $ unode "pluck" ()
write DoubleTongue = single $ unode "double-tongue" ()
write TripleTongue = single $ unode "triple-tongue" ()
write Stopped = single $ unode "stopped" ()
write SnapPizzicato = single $ unode "snap-pizzicato" ()
write Fret = single $ unode "fret" ()
write String = single $ unode "string" ()
write HammerOn = single $ unode "hammer-on" ()
write PullOff = single $ unode "pull-off" ()
write Bend = single $ unode "bend" ()
write Tap = single $ unode "tap" ()
write Heel = single $ unode "heel" ()
write Toe = single $ unode "toe" ()
write Fingernails = single $ unode "fingernails" ()
write Hole = single $ unode "hole" ()
write Arrow = single $ unode "arrow" ()
write Handbell = single $ unode "handbell" ()
write (OtherTechnical tech) = notImplemented "OtherTechnical"
instance WriteMusicXml Articulation where
write Accent = single $ unode "accent" ()
write StrongAccent = single $ unode "strong-accent" ()
write Staccato = single $ unode "staccato" ()
write Tenuto = single $ unode "tenuto" ()
write DetachedLegato = single $ unode "detached-legato" ()
write Staccatissimo = single $ unode "staccatissimo" ()
write Spiccato = single $ unode "spiccato" ()
write Scoop = single $ unode "scoop" ()
write Plop = single $ unode "plop" ()
write Doit = single $ unode "doit" ()
write Falloff = single $ unode "falloff" ()
write BreathMark = single $ unode "breathmark" ()
write Caesura = single $ unode "caesura" ()
write Stress = single $ unode "stress" ()
write Unstress = single $ unode "unstress" ()
write OtherArticulation = notImplemented "OtherArticulation"
-- ----------------------------------------------------------------------------------
-- Directions
-- ----------------------------------------------------------------------------------
instance WriteMusicXml Direction where
write (Rehearsal str) = single $ unode "rehearsal" str
write Segno = single $ unode "segno" ()
write (Words str) = single $ unode "words" str
write Coda = single $ unode "coda" ()
write (Crescendo Start) = single $ addAttr (uattr "type" "crescendo") $ unode "wedge" ()
write (Diminuendo Start) = single $ addAttr (uattr "type" "diminuendo") $ unode "wedge" ()
write (Crescendo Stop) = single $ addAttr (uattr "type" "stop") $ unode "wedge" ()
write (Diminuendo Stop) = single $ addAttr (uattr "type" "stop") $ unode "wedge" ()
write (Dynamics dyn) = single $ unode "dynamics" (writeDynamics dyn)
write (Metronome noteVal dotted tempo) = single $ unode "metronome" $
[ unode "beat-unit" (writeNoteVal noteVal) ]
<> singleIf dotted (unode "beat-unit-dot" ())
<> [ unode "per-minute" (show $ round $ getTempo tempo) ]
write Bracket = notImplemented "Unsupported directions"
write (OtherDirection dir) = notImplemented "OtherDirection"
-- ----------------------------------------------------------------------------------
-- Barline
-- ----------------------------------------------------------------------------------
instance WriteMusicXml Barline where
write (Barline location style repeat) = single $
addAttr (uattr "location" (show location)) $
unode "barline" $
[unode "bar-style" (show style)] <>
maybe [] write repeat
instance WriteMusicXml Repeat where
write (Repeat dir) = single $ addAttr (uattr "direction" (show dir)) $ unode "repeat" ()
-- ----------------------------------------------------------------------------------
-- Lyrics
-- ----------------------------------------------------------------------------------
instance WriteMusicXml Lyric where
write = notImplemented "WriteMusicXml instance for Lyric"
-- ----------------------------------------------------------------------------------
-- Basic types
-- ----------------------------------------------------------------------------------
writeBeamType BeginBeam = "begin" :: String
writeBeamType ContinueBeam = "continue"
writeBeamType EndBeam = "end"
writeBeamType ForwardHook = "forward-hook"
writeBeamType BackwardHook = "backward-hook"
writeStartStop = writeStartStopContinueChange
writeStartStopChange = writeStartStopContinueChange
writeStartStopContinue = writeStartStopContinueChange
writeStartStopContinueChange Start = "start" :: String
writeStartStopContinueChange Stop = "stop"
writeStartStopContinueChange Continue = "continue"
writeStartStopContinueChange Change = "change"
writeStemDirection StemDown = "down" :: String
writeStemDirection StemUp = "up"
writeStemDirection StemNone = "none"
writeStemDirection StemDouble = "double"
writeLineType Solid = "solid" :: String
writeLineType Dashed = "dashed"
writeLineType Dotted = "dotted"
writeLineType Wavy = "wavy"
writeNoteHead SlashNoteHead = "slash" :: String
writeNoteHead TriangleNoteHead = "triangle"
writeNoteHead DiamondNoteHead = "diamond"
writeNoteHead SquareNoteHead = "square"
writeNoteHead CrossNoteHead = "cross"
writeNoteHead XNoteHead = "x"
writeNoteHead CircleXNoteHead = "circle"
writeNoteHead InvertedTriangleNoteHead = "inverted-triangle"
writeNoteHead ArrowDownNoteHead = "arrow-down"
writeNoteHead ArrowUpNoteHead = "arrow-up"
writeNoteHead SlashedNoteHead = "slashed"
writeNoteHead BackSlashedNoteHead = "back-slashed"
writeNoteHead NormalNoteHead = "normal"
writeNoteHead ClusterNoteHead = "cluster"
writeNoteHead CircleDotNoteHead = "circle"
writeNoteHead LeftTriangleNoteHead = "left-triangle"
writeNoteHead RectangleNoteHead = "rectangle"
writeNoteHead NoNoteHead = "none"
writeAccidental DoubleFlat = "double-flat" :: String
writeAccidental Flat = "flat"
writeAccidental Natural = "natural"
writeAccidental Sharp = "sharp"
writeAccidental DoubleSharp = "double-sharp"
writeNoteVal :: NoteVal -> String
writeNoteVal (NoteVal x)
| x == (1/1024) = "1024th"
| x == (1/512) = "512th"
| x == (1/256) = "256th"
| x == (1/128) = "128th"
| x == (1/64) = "64th"
| x == (1/32) = "32nd"
| x == (1/16) = "16th"
| x == (1/8) = "eighth"
| x == (1/4) = "quarter"
| x == (1/2) = "half"
| x == (1/1) = "whole"
| x == (2/1) = "breve"
| x == (4/1) = "long"
| x == (8/1) = "maxima"
| otherwise = error $ "Data.Music.MusicXml.Write.Score.wrietNoteVal: Invalid note value:" ++ show x
writeClef :: ClefSign -> String
writeClef GClef = "G"
writeClef CClef = "C"
writeClef FClef = "F"
writeClef PercClef = "percussion"
writeClef TabClef = "tab"
writeMode :: Mode -> String
writeMode NoMode = "none"
writeMode x = toLowerString . show $ x
writeGroupSymbol GroupBrace = "brace" :: String
writeGroupSymbol GroupLine = "line"
writeGroupSymbol GroupBracket = "bracket"
writeGroupSymbol GroupSquare = "square"
writeGroupSymbol NoGroupSymbol = "none"
writeGroupBarlines GroupBarLines = "yes" :: String
writeGroupBarlines GroupNoBarLines = "no"
writeGroupBarlines GroupMensurstrich = "Mensurstrich"
writeFermataSign NormalFermata = "normal" :: String
writeFermataSign AngledFermata = "angled"
writeFermataSign SquaredFermata = "squared"
writeDynamics x = unode (toLowerString $ show x) ()
-- ----------------------------------------------------------------------------------
-- XML aliases
addAttr :: Attr -> Element -> Element
addAttrs :: [Attr] -> Element -> Element
addAttr = add_attr
addAttrs = add_attrs
uattr :: String -> String -> Attr
uattr n = Attr (unqual n)
-- Misc
sep :: a -> [a] -> [a]
sep = List.intersperse
concatSep :: [a] -> [[a]] -> [a]
concatSep x = concat . sep x
toUpperChar :: Char -> Char
toUpperChar = Char.toUpper
toLowerChar :: Char -> Char
toLowerChar = Char.toLower
toUpperString :: String -> String
toUpperString = fmap Char.toUpper
toLowerString :: String -> String
toLowerString = fmap Char.toLower
toCapitalString :: String -> String
toCapitalString [] = []
toCapitalString (x:xs) = toUpperChar x : toLowerString xs
one :: (a -> b) -> a -> [b]
one f = single . f
maybeOne :: (a -> b) -> Maybe a -> [b]
maybeOne f = maybeToList . fmap f
single :: a -> [a]
single = return
fromSingle :: [a] -> a
fromSingle [x] = x
fromSingle _ = error "fromSingle: non-single list"
singleIf :: Bool -> a -> [a]
singleIf p x | not p = []
| otherwise = [x]
notImplemented x = error $ "Not implemented: " ++ x
|
music-suite/musicxml2
|
src/Data/Music/MusicXml/Write/Score.hs
|
bsd-3-clause
| 28,422
| 0
| 19
| 11,599
| 6,465
| 3,235
| 3,230
| 438
| 1
|
{-# LANGUAGE RecordWildCards #-}
module Aws.CloudFront.Signer
( URL
, JSONPOlicy
, CloudFrontSigningKey(..)
, CloudFrontPolicy(..)
, readCloudFrontSigningKeyFromDER
, parseRSAPrivateKeyDER
, signCannedPolicyURL
, signCustomPolicyURL
, signCustomPolicyURL_
, cannedPolicy
, customPolicy
, unixTime
) where
import qualified Data.ASN1.Encoding as A
import qualified Data.ASN1.BinaryEncoding as A
import qualified Data.ASN1.Types as A
import qualified Data.ByteString.Lazy.Char8 as LBS
import qualified Data.ByteString.Base64.Lazy as B64
import Data.Time
import Data.Maybe
import Codec.Crypto.RSA
import qualified Crypto.Types.PubKey.RSA as C
import Text.Printf
import System.Locale
-- | input and output URLs
type URL = String
-- | a JSON CloudFront policy
type JSONPOlicy = String
-- | the CloudFront key pair identifier
type KeyID = String
-- | a CloudFront siging key has an identifier and an RSA private key
data CloudFrontSigningKey
= CloudFrontSigningKey
{ cfk_key_id :: KeyID
, cfk_key :: PrivateKey
}
deriving (Show)
-- | a CloudFront policy must identify the resource being accessed and the
-- expiry time; a starting time and IPv4 address may also be specified
data CloudFrontPolicy
= CloudFrontPolicy
{ cfp_Resource :: URL
, cfp_DateLessThan :: UTCTime
, cfp_DateGreaterThan :: Maybe UTCTime
, cfp_IpAddress :: Maybe String
}
-- | RSA private keys can only be read from DER file for now (the OpenSSL
-- tools can be used to convert from PEM:
--
-- openssl rsa -in input.pem -inform PEM -out output.der -outform DER
--
readCloudFrontSigningKeyFromDER :: KeyID -> FilePath -> IO CloudFrontSigningKey
readCloudFrontSigningKeyFromDER ki fp =
do pk_b <- LBS.readFile fp
case parseRSAPrivateKeyDER pk_b of
Left err -> error err
Right pk ->
return $
CloudFrontSigningKey
{ cfk_key_id = ki
, cfk_key = pk
}
-- | If you have the DER ByteString then you can construct a private key
-- functionally.
parseRSAPrivateKeyDER :: LBS.ByteString -> Either String C.PrivateKey
parseRSAPrivateKeyDER bs =
case A.decodeASN1 A.DER bs of
Left err -> Left $ show err
Right as ->
case A.fromASN1 as of
Left err -> Left $ show err
Right pr ->
case pr of
(pk,[]) -> Right pk
_ -> Left "residula data"
-- | In most cases only a time-limited, signed URL is needed, in which case a
-- canned policy can be used; URLs signed with a canned policy are shorter
-- than those signed with a custom policy.
signCannedPolicyURL :: CloudFrontSigningKey -> UTCTime -> URL -> URL
signCannedPolicyURL CloudFrontSigningKey{..} exp_utc url =
printf "%s%cExpires=%s&Signature=%s&Key-Pair-Id=%s" url sep exp_eps pol_sig cfk_key_id
where
exp_eps = unixTime exp_utc
pol_sig = b64 $ rsa_sha1 cfk_key pol
pol = cannedPolicy exp_utc url
sep = if any (=='?') url then '&' else '?'
-- | Signing a URL with a custom policy allows a start time to be specified and
-- the IP address of the recipient(s) to be specified.
signCustomPolicyURL :: CloudFrontSigningKey -> CloudFrontPolicy -> URL
signCustomPolicyURL cfk cfp = signCustomPolicyURL_ cfk (customPolicy cfp) $ cfp_Resource cfp
-- | The URL can also be signed with the custom policy in JSON format.
-- (See the CloudFront documentation for details.)
signCustomPolicyURL_ :: CloudFrontSigningKey -> JSONPOlicy -> URL -> URL
signCustomPolicyURL_ CloudFrontSigningKey{..} pol url =
printf "%s%cPolicy=%s&Signature=%s&Key-Pair-Id=%s" url sep pol_b64 pol_sig cfk_key_id
where
pol_sig = b64 $ rsa_sha1 cfk_key pol
pol_b64 = b64 pol
sep = if any (=='?') url then '&' else '?'
-- | The JSON canned policy can be generated from the expiry time and
-- the URL of the distributed resource.
cannedPolicy :: UTCTime -> URL -> JSONPOlicy
cannedPolicy exp_utc url =
concat
[ "{\"Statement\":[{\"Resource\":\""
, url
, "\",\"Condition\":{\"DateLessThan\":{\"AWS:EpochTime\":"
, unixTime exp_utc
, "}}}]}"
]
-- | JSON custom policies provide more flexibility (allowing start times and
-- recipient IP addresses to be specified) but generate longer signed URLs.
customPolicy :: CloudFrontPolicy -> JSONPOlicy
customPolicy CloudFrontPolicy{..} = unlines $ catMaybes
[ ok $ "{"
, ok $ " \"Statement\": [{"
, ok $ " \"Resource\":\"" ++ cfp_Resource ++ "\","
, ok $ " \"Condition\":{"
, ok $ " \"DateLessThan\":{\"AWS:EpochTime\":" ++ unixTime cfp_DateLessThan ++ "},"
, st $ \ust -> " \"DateGreaterThan\":{\"AWS:EpochTime\":" ++ unixTime ust ++ "},"
, ok $ " \"IpAddress\":{\"AWS:SourceIp\":\"" ++ maybe "0.0.0.0/0" id cfp_IpAddress ++"\"}"
, ok $ " }"
, ok $ " }]"
, ok $ "}"
]
where
ok = Just
st f = maybe Nothing (Just . f) cfp_DateGreaterThan
-- | CloudFront uses Unix Epoch time (number of seconds since 1970, UTC) to
-- specify UTC.
unixTime :: UTCTime -> String
unixTime = formatTime defaultTimeLocale "%s"
rsa_sha1 :: PrivateKey -> String -> String
rsa_sha1 pk = LBS.unpack . rsassa_pkcs1_v1_5_sign ha_SHA1 pk . LBS.pack
b64 :: String -> String
b64 = map f . LBS.unpack . B64.encode . LBS.pack
where
f '+' = '-'
f '=' = '_'
f '/' = '~'
f c = c
|
adinapoli/aws-cloudfront-signer
|
src/Aws/CloudFront/Signer.hs
|
bsd-3-clause
| 5,891
| 0
| 15
| 1,716
| 1,051
| 583
| 468
| 107
| 4
|
{-# LANGUAGE OverloadedStrings #-}
module Deck where
import Card
import System.Random.Shuffle
import System.Random (getStdRandom)
import Data.Aeson
data Deck = Deck [Card]
deriving Show
numbers = [
Two,
Three,
Four,
Five,
Six,
Seven,
Eight,
Nine,
Ten,
Jack,
Queen,
King,
Ace
]
suits = [
Club,
Diamond,
Heart,
Spade
]
allCards :: [Card]
allCards = concatMap (\s -> map (Card s) numbers) suits
shuffledDeck :: IO Deck
shuffledDeck = Deck <$> shuffleM allCards
deal :: Int -> Int -> Deck -> (Deck, [[Card]])
deal size number (Deck cards) = (Deck rest, dealt)
where (rest, dealt) = deal_ size number cards []
deal_ :: Int -> Int -> [Card] -> [[Card]] -> ([Card], [[Card]])
deal_ size 0 deck acc = (deck, acc)
deal_ size number deck acc = deal_ size (number - 1) rest (dealt : acc)
where (dealt, rest) = splitAt size deck
|
hmac/whist-hs
|
src/Deck.hs
|
bsd-3-clause
| 870
| 0
| 11
| 194
| 364
| 209
| 155
| 38
| 1
|
{-# LANGUAGE GeneralizedNewtypeDeriving, DefaultSignatures, ScopedTypeVariables, NamedFieldPuns, OverloadedStrings, MultiParamTypeClasses, TemplateHaskell, FlexibleContexts, TypeFamilies, StandaloneDeriving, RecordWildCards, RankNTypes, Trustworthy, UndecidableInstances #-}
{-|
SimpleLog is a library for convenient and configurable logging. It uses the usual monad transformer + associated class design: 'SLogT' and 'MonadSLog'.
Example usage:
@
import System.Log.SLog
main = simpleLog \"Example.log\" $ do
logD \"Some debugging information\"
logI \"This is some other information\"
logW \"Something bad is about to happen\"
logE \"Something bad happened\"
@
The above sample code when run will produce output similar to this:
@
2013-10-02 14:17:40 | INFO | [ThreadId 58] This is some other information
2013-10-02 14:17:40 | WARNING | [ThreadId 58] Something bad is about to happen
2013-10-02 14:17:40 | ERROR | [ThreadId 58] Something bad happened
@
Note how the debug line is not displayed. This is because the default configuration ('defaultLogConfig') only logs to stdout when the severity is >= 'INFO'.
The above code will also append the log lines to the file @\"Example.log\"@, including the debug line.
The following example shows how one can fine tune SimpleLog as well as how to fork other logging threads.
@
\-- Our log configuration specifies that no ANSI colouring should be used, all log lines
\-- should be written to the TChan, and >= INFO severity lines should be written to the
\-- stdout synchronously.
logConfig :: TChan LogLine -> LogConfig
logConfig tchan
= LogConfig { ansiColours = False
, loggers = [ (anySev, TChanLogger tchan)
, ((>= INFO), StdoutLogger Sync)
] }
\-- Our custom logging format
logFormat :: Format
logFormat = $(format \"%d(%T) (%s) %t: %m\")
\-- The main thread will fork a child thread, then wait until everything is flushed, then
\-- count how many messages have been written in total to the TChan (which will be all
\-- messages as our filter passes through everything)
main :: IO ()
main = do
tchan \<- newTChanIO
(_, fkey) \<- runSLogT (logConfig tchan) logFormat \"main\" $ do
logS \"Main thread started successfully\"
logD \"This will not appear on stdout\"
_ \<- forkSLog \"child\" $ do
logS \"I am the child\"
liftIO $ threadDelay 5000000
logW \"CHILD SHUTTING DOWN\"
logI \"Exiting main thread\"
waitFlush fkey
c \<- countTChan tchan
putStrLn $ show c ++ \" messages have been logged in total\"
\-- Counts the number of elements in the TChan (and pops them all)
countTChan :: TChan a -> IO Int
countTChan tchan = do
let count = do
em \<- isEmptyTChan tchan
if em then return 0
else readTChan tchan >> (1 +) \<$> count
atomically count
@
The above code when run will produce something like this:
@
17:35:15 (SUCCESS) main: Main thread started successfully
17:35:15 (SUCCESS) child: I am the child, waiting for 5 seconds...
17:35:15 (INFO ) main: Exiting main thread
17:35:20 (WARNING) child: CHILD SHUTTING DOWN
5 messages have been logged in total
@
-}
module System.Log.SLog
(
-- * SLogT
SLogT
, SLog
-- ** Running SLogT
, runSLogT
, simpleLog
-- * FlushKey
, FlushKey
, waitFlush
-- * MonadSLog
, MonadSLog(..)
-- ** Convenience log functions
, logD
, logI
, logS
, logW
, logE
-- * Loggers
, Logger(..)
, Sync(..)
, LogLine(..)
-- * Filters
, Severity(..)
, Filter
, anySev
-- * Configuration
, LogConfig(..)
, defaultLogConfig
-- * Format
, module System.Log.SLog.Format
, defaultLogFormat
-- * Utility functions
, forkSLog
, formatLine
, unsafeUnliftSLogT
)
where
import System.Log.SLog.Format
import Prelude hiding (log)
import Control.Applicative
import Control.Monad.Reader
import Control.Monad.State
import Control.Monad.Error
import Control.Monad.Cont
import Control.Monad.Base
import Control.Monad.Trans.Resource
import Control.Monad.Trans.Control
import Data.Semigroup
import qualified Data.Map as Map
import Data.Time.LocalTime
import qualified Data.Text as T
import qualified Data.Text.IO as T
import System.IO
import System.Console.ANSI
import System.Directory
import Control.Concurrent
import Control.Concurrent.STM
import Control.Concurrent.ForkableT.Instances
-- | The type of severities with increasing importance
data Severity
= DEBUG | INFO | SUCCESS | WARNING | ERROR
deriving (Show, Read, Eq, Ord)
-- | The class of monads that can perform logging
class (MonadIO m) => MonadSLog m where
-- | 'log' logs the specified 'T.Text' with the specified 'Severity'
log :: Severity -> T.Text -> m ()
-- | The default instance simply lifts through a monad transformer
default log :: (MonadTrans t, MonadSLog m) => Severity -> T.Text -> t m ()
log sev = lift . log sev
instance (MonadSLog m) => MonadSLog (StateT s m)
instance (MonadSLog m) => MonadSLog (ReaderT s m)
instance (MonadSLog m, Error e) => MonadSLog (ErrorT e m)
instance (MonadSLog m) => MonadSLog (ContT r m)
instance (MonadSLog m) => MonadSLog (ResourceT m)
-- | The default log format, which currently is @$(format \"%d(%F %T) | %s | [%t] %m\")@. See "System.Log.SLog.Format" for more details on format strings.
defaultLogFormat :: Format
defaultLogFormat = $(format "%d(%F %T) | %s | [%t] %m")
-- | Log a 'DEBUG' message
logD :: MonadSLog m => String -> m ()
logD = log DEBUG . T.pack
-- | Log a 'SUCCESS' message
logS :: MonadSLog m => String -> m ()
logS = log SUCCESS . T.pack
-- | Log an 'INFO' message
logI :: MonadSLog m => String -> m ()
logI = log INFO . T.pack
-- | Log a 'WARNING' message
logW :: MonadSLog m => String -> m ()
logW = log WARNING . T.pack
-- | Log an 'ERROR' message
logE :: MonadSLog m => String -> m ()
logE = log ERROR . T.pack
-- | SGR code for severity colours
sgr :: Severity -> [SGR]
sgr DEBUG = [SetConsoleIntensity BoldIntensity, SetColor Foreground Vivid White]
sgr INFO = [SetColor Foreground Vivid White]
sgr SUCCESS = [SetConsoleIntensity BoldIntensity, SetColor Foreground Vivid Green]
sgr WARNING = [SetConsoleIntensity BoldIntensity, SetColor Foreground Vivid Yellow]
sgr ERROR = [SetConsoleIntensity BoldIntensity, SetBlinkSpeed SlowBlink, SetColor Foreground Vivid Red]
-- | Wrap a piece of text in some SGR configuration
withSgr :: [SGR] -> T.Text -> T.Text
withSgr sg s = T.concat [T.pack $ setSGRCode sg, s, T.pack $ setSGRCode []]
-- | 'Sync' is a type to specify whether a logger should log synchronously or asynchronously.
-- Syncronous logging means that the logging thread will block until the message has been written and flushed to the sink.
-- Asynchronous logging means that the logging thread will write to a work queue and move on. The work queue will be read by a dedicated thread that is forked for each sink.
data Sync
= Sync | Async
deriving (Eq)
-- | The 'Logger' type specifies the types of sinks we can log to.
data Logger
-- | 'FileLogger' specifies a file to be logged in.
-- Note that symbolic links will be resolved using 'canonicalizePath' when deciding whether two 'FileLogger's point to the same file.
= FileLogger Sync FilePath
-- | 'StdoutLogger' logs to the stdout
| StdoutLogger Sync
-- | 'StderrLogger' logs to the stderr
| StderrLogger Sync
-- | 'TChanLogger' logs to a specified 'TChan'.
-- Note that 'LogLine's are written instead of the final formatted text. If you wish to use the final text use 'formatLine'.
| TChanLogger (TChan LogLine)
deriving (Eq)
-- | 'Filter' is the type of logging filters. 'Filter's may only depend on the 'Severity'.
type Filter = Severity -> Bool
-- | 'anySev' allows all lines to be logged.
anySev :: Filter
anySev = const True
-- | 'LogLine' is a log message together with the severity, time of logging and the logging thread's name.
data LogLine
= LogLine { logSeverity :: Severity
, logMessage :: T.Text
, logTimestamp :: ZonedTime
, logThread :: T.Text
}
-- | 'LogConfig' is the configuration of 'SLogT'
data LogConfig
= LogConfig {
-- | Specifies whether ANSI colouring should be used when logging to stdout/stderr
ansiColours :: Bool
-- | The list of loggers together with the associated filters
, loggers :: [(Filter, Logger)]
}
-- | 'defaultLogConfig' is the default log configuration.
-- It writes all non-DEBUG messages to the stdout synchronously and all messages to a specified file asynchronously.
defaultLogConfig :: FilePath -> LogConfig
defaultLogConfig fName
= LogConfig { ansiColours = True
, loggers = [ ((>= INFO), StdoutLogger Sync)
, (anySev, FileLogger Async fName)
]
}
-- | The internal representation of Loggers with open handles, locks and 'TChan's.
data LoggerInternal
= SyncLoggerInternal Handle (MVar ()) Bool
| AsyncLoggerInternal (TChan T.Text) Bool
| TChanLoggerInternal (TChan LogLine)
-- | The environment of 'SLogT'
data SLogEnv
= SLogEnv {
-- | The current thread's name
threadName :: T.Text
-- | The list of internal loggers together with associated filters
, loggerInternals :: [(Filter, LoggerInternal)]
-- | Same as the user-specified 'ansiColours'
, logColours :: Bool
-- | The 'Format' of logging
, logFormat :: Format
}
-- | The SLogT monad transformer is simply a 'ResourceT' with an environment
newtype SLogT m a
= SLogT { unSLogT :: ReaderT SLogEnv (ResourceT m) a }
deriving ( Functor, Monad, MonadIO, Applicative
, MonadThrow )
deriving instance (MonadBase IO m) => MonadBase IO (SLogT m)
deriving instance (MonadBase IO m, MonadThrow m, MonadIO m) => MonadResource (SLogT m)
instance MonadTransControl SLogT where
type StT SLogT a = StT ResourceT a
liftWith f = SLogT . ReaderT $ \r ->
liftWith $ \lres ->
f $ \(SLogT t) ->
lres $ runReaderT t r
restoreT = SLogT . lift . restoreT
instance (MonadBaseControl IO m) => MonadBaseControl IO (SLogT m) where
type StM (SLogT m) a = ComposeSt SLogT m a
liftBaseWith = defaultLiftBaseWith
restoreM = defaultRestoreM
instance MonadTrans SLogT where
lift = SLogT . lift . lift
-- | This is a simple monad for the bottom of one's monad stack.
type SLog = SLogT IO
-- | A 'FlushKey' is returned when an 'SLogT' is run. You may wait on it with 'waitFlush'.
newtype FlushKey = FlushKey (TVar Bool)
-- | 'waitFlush' will only return when all resources have been released and all streams have been flushed. Note that this includes resources allocated by the user using the exposed 'MonadResource' instance.
--
-- All threads internally accounted for are signaled to exit (they will first finish processing of all remaining jobs) when the 'SLogT' is run, however it is the user's responsibility to shut down threads forked with 'forkSLog' or 'fork' before 'waitFlush' can return.
waitFlush :: FlushKey -> IO ()
waitFlush (FlushKey tvar)
= atomically $ do
b <- readTVar tvar
unless b retry
-- Internally we use two ResourceTs. The inner one is used to keep
-- track of open files, make sure everything gets flushed and release
-- the FlushKey.
-- The outer one is used to signal completion. In particular when the
-- outer ResourceT is run all threads that are internally accounted
-- for will receive a signal to finish processing, thus running the
-- internal ResourceT (see forkCleanUp)
-- Note that this means all SLog threads forked by the user need to
-- finish before the FlushKey releases
-- | 'runSLogT' runs an 'SLogT' given a 'LogConfig', 'Format' and the current thread's name.
-- It returns a 'FlushKey' besides the usual return value.
runSLogT :: (MonadResource m, MonadBaseControl IO m) => LogConfig -> Format -> String -> SLogT m a -> m (a, FlushKey)
runSLogT LogConfig{..} lf tName (SLogT r)
= runResourceT $ do
(_, tvar) <- allocate (newTVarIO False) (\t -> atomically $ writeTVar t True)
runResourceT $ do
internals <- initLoggers loggers
a <- lift $ runReaderT r SLogEnv{ threadName = T.pack tName
, loggerInternals = internals
, logColours = ansiColours
, logFormat = lf }
return (a, FlushKey tvar)
-- | 'simpleLog' uses the default configuration with the specified log file name. It also waits using 'waitFlush' until all resources have been released.
simpleLog :: (MonadResource m, MonadBaseControl IO m) => FilePath -> SLogT m a -> m a
simpleLog fName s = do
tName <- show <$> liftIO myThreadId
(a, fkey) <- runSLogT (defaultLogConfig fName) defaultLogFormat tName s
liftIO $ waitFlush fkey
return a
-- | initLoggers initialises the user specified 'Logger's and returns the internal representation of them.
--
-- This includes first aggregating the 'Logger's resolving any ambiguities, then opening the logging streams.
initLoggers :: (MonadResource m, Applicative m) => [(Filter, Logger)] -> ResourceT (ResourceT m) [(Filter, LoggerInternal)]
initLoggers fls = do
InitState{..} <- liftIO $ aggregateLoggers fls
let stdHandle (Just ini) h = do
_ <- lift $ register (hFlush h)
return [(h, ini, True)]
stdHandle Nothing _ = return []
createHandle (fname, ini) = do
(_, h) <- allocate
(openFile fname AppendMode)
(\h -> hFlush h >> hClose h)
return (h, ini, False)
sout <- stdHandle stdoutInit stdout
serr <- stdHandle stderrInit stderr
files <- lift . mapM createHandle $ Map.toList fileInitMap
let toInternal (h, InitSync f, c) = do
lock <- liftIO $ newMVar ()
return [(f, SyncLoggerInternal h lock c)]
toInternal (h, InitAsync f, c) = do
tchan <- liftIO newTChanIO
_ <- forkCleanUp $ lift . asyncLogger Nothing h tchan
return [(f, AsyncLoggerInternal tchan c)]
toInternal (h, Both fs fa, c) = do
lock <- liftIO $ newMVar ()
tchan <- liftIO newTChanIO
_ <- forkCleanUp $ lift . asyncLogger (Just lock) h tchan
return [(fs, SyncLoggerInternal h lock c), (fa, AsyncLoggerInternal tchan c)]
toInternalTChan (f, tchan) = (f, TChanLoggerInternal tchan)
nonTChan <- join <$> mapM toInternal (sout ++ serr ++ files)
return $ nonTChan ++ map toInternalTChan tchanInit
-- | An internal datatype used for aggregation of filters as well as keeping track of synchronous and asynchronous logging.
-- This means for example that it is possible to log from two threads into the same file, one synchronously, one asynchronously, using different filters.
data InitLogger
= InitSync Filter
| InitAsync Filter
| Both Filter Filter
instance Semigroup InitLogger where
InitSync f <> InitSync f' = InitSync $ liftM2 (||) f f'
InitSync f <> InitAsync f' = Both f f'
InitSync f <> Both f' f'' = Both (liftM2 (||) f f') f''
InitAsync f <> InitSync f' = Both f f'
InitAsync f <> InitAsync f' = InitAsync $ liftM2 (||) f f'
InitAsync f <> Both f' f'' = Both f' (liftM2 (||) f f'')
Both f' f'' <> InitSync f = Both (liftM2 (||) f f') f''
Both f' f'' <> InitAsync f = Both f' (liftM2 (||) f f'')
Both f f' <> Both f'' f''' = Both (liftM2 (||) f f') (liftM2 (||) f'' f''')
-- | canonExist makes sure the specified file exists by creating it if it doesn't, then returns canonicalizePath.
canonExist :: String -> IO String
canonExist f = appendFile f "" >> canonicalizePath f
-- | 'forkCleanup' forks a ResIO thread that will get an exit signal through a TVar when the outer ResourceT is run.
-- The forked off ResourceT is the inner one
forkCleanUp :: (MonadResource m) =>
(TVar Bool -> ResIO ()) -> ResourceT (ResourceT m) ThreadId
forkCleanUp io = do
(_, exitSignal) <- allocate (newTVarIO False) (\t -> atomically $ writeTVar t True)
st <- lift . liftWith $ \unliftRes -> liftIO . unliftRes . fork $ io exitSignal
lift . restoreT $ return st
-- | An internal datatype used when initialising 'Logger's. It includes all information that the user passed in with the list of loggers.
data InitState = InitState { fileInitMap :: Map.Map FilePath InitLogger
, stdoutInit :: Maybe InitLogger
, stderrInit :: Maybe InitLogger
, tchanInit :: [(Filter, TChan LogLine)]
}
-- | This method aggregates the specified loggers in a meaningful way
-- Ambiguous cases arise when several loggers specify the same file/stream
-- * First off we use canonicalizePath to resolve symlinks
-- * Second, we create a disjunction of the attached filters
-- * Lastly we keep track of synchrony: if there are two loggers
-- specifying the same file, one synchronous the other asynchronous
-- then we keep both filters and will use the a lock in each case.
aggregateLoggers :: [(Filter, Logger)] -> IO InitState
aggregateLoggers fls = execStateT
(mapM_ (uncurry initLogger) fls)
InitState { fileInitMap = Map.empty
, stdoutInit = Nothing
, stderrInit = Nothing
, tchanInit = [] }
where
initLogger :: Filter -> Logger -> StateT InitState IO ()
initLogger f (FileLogger sync fname) = do
trueFname <- liftIO $ canonExist fname
s@InitState{..} <- get
let ini = case sync of ; Sync -> InitSync f ; Async -> InitAsync f
put s { fileInitMap = Map.alter (<> Just ini) trueFname fileInitMap }
initLogger f (StdoutLogger sync) = do
s@InitState{..} <- get
let ini = case sync of ; Sync -> InitSync f ; Async -> InitAsync f
put s { stdoutInit = stdoutInit <> Just ini }
initLogger f (StderrLogger sync) = do
s@InitState{..} <- get
let ini = case sync of ; Sync -> InitSync f ; Async -> InitAsync f
put s { stderrInit = stderrInit <> Just ini }
initLogger f (TChanLogger tchan) =
modify $ \s@InitState{..} -> s { tchanInit = (f, tchan) : tchanInit }
-- | asyncLogger is the thread forked for each 'Async' logger.
-- It may be passed a lock to use when logging in case a 'Sync'hronous logger for the same handle also exists.
asyncLogger :: Maybe (MVar ()) -> Handle -> TChan T.Text -> TVar Bool -> IO ()
asyncLogger mlock h tchan exitSignal = flip runContT return $
callCC $ \exit -> forever $ do
m <- liftIO . atomically $
(Just <$> readTChan tchan)
`orElse`
(readTVar exitSignal >>= check >> return Nothing)
case m of
Just str -> liftIO $ case mlock of
Nothing -> T.hPutStrLn h str
Just lock -> withMVar lock $ \_ -> T.hPutStrLn h str
Nothing -> do
liftIO $ hFlush h
exit ()
-- | Helper method for choosing coloured vs. non-coloured strings
chs :: Bool -> a -> a -> a
chs False a _ = a
chs True _ b = b
-- | logger performs the actual logging.
logger :: LoggerInternal -> LogLine -> T.Text -> T.Text -> IO ()
logger (AsyncLoggerInternal tchan c) _ ns s = atomically . writeTChan tchan $ chs c ns s
logger (SyncLoggerInternal h lock c) _ ns s = withMVar lock $ \_ -> do
T.hPutStrLn h (chs c ns s)
hFlush h
logger (TChanLoggerInternal tchan) l _ _ = atomically $ writeTChan tchan l
-- | 'formatLine' formats the given 'LogLine' using the specified 'Format'. The 'Bool'ean determines whether 'formatLine' should insert ANSI colour codes or not.
formatLine :: Bool -> Format -> LogLine -> T.Text
formatLine isColour les ll = T.concat $ map (formatLine' ll) les
where
formatLine' LogLine{logMessage} MessageElem = logMessage
formatLine' LogLine{logSeverity} SeverityElem =
let sev = padS 7 . T.pack $ show logSeverity
in if isColour then withSgr (sgr logSeverity) sev else sev
formatLine' _ (StringElem str) = str
formatLine' LogLine{logTimestamp} (DateTimeElem f) = f logTimestamp
formatLine' LogLine{logThread} ThreadElem = logThread
instance (MonadBaseControl IO m, MonadIO m) => Forkable (SLogT m) (SLogT m) where
fork (SLogT (ReaderT f)) = SLogT . ReaderT $ \env ->
fork $ do
tid <- liftIO myThreadId
f env { threadName = T.pack $ show tid }
-- | 'forkSLog' forks an 'SLogT' thread with the specified thread name.
forkSLog :: (MonadBaseControl IO m, MonadIO m) => String -> SLogT m () -> SLogT m ThreadId
forkSLog tname (SLogT m) = SLogT . local (\e -> e { threadName = T.pack tname }) $ fork m
-- | helper method for padding with spaces
padS :: Int -> T.Text -> T.Text
padS n t = t `T.append` T.replicate (n - T.length t) " "
-- | 'unsafeUnliftSLog' gives you an unsafe unlift of an SLogT by assuming that any unlifted computation will finish earlier than the runSLogT of the calling thread.
-- It is unsafe because if the unlifted computation doesn't finish earlier then it may access deallocated resources.
-- This is useful when a library is implicitly forking but we still need to log in the forked threads, and we know that the child threads will finish earlier than the parent. An example is Network.WebSockets
unsafeUnliftSLogT :: forall m b. (Monad m, MonadBaseControl IO m) =>
((forall a. SLogT m a -> m a) -> SLogT m b) -> SLogT m b
unsafeUnliftSLogT f = do
env <- SLogT ask
let unlift :: SLogT m c -> m c
unlift s = runResourceT $ runReaderT (unSLogT s) env
f unlift
instance (MonadIO m) => MonadSLog (SLogT m) where
log sev s = do
SLogEnv{..} <- SLogT ask
liftIO $ do
timestamp <- getZonedTime
let logLine = LogLine { logMessage = s
, logSeverity = sev
, logTimestamp = timestamp
, logThread = threadName
}
nonColoured = formatLine False logFormat logLine
coloured = if logColours
then formatLine True logFormat logLine
else nonColoured
mapM_ (\(fter, l) -> when (fter sev) $
logger l logLine nonColoured coloured) loggerInternals
|
exFalso/SimpleLog
|
src/System/Log/SLog.hs
|
bsd-3-clause
| 22,855
| 3
| 20
| 5,876
| 4,706
| 2,465
| 2,241
| 302
| 7
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree. An additional grant
-- of patent rights can be found in the PATENTS file in the same directory.
{-# LANGUAGE OverloadedStrings #-}
module Duckling.Numeral.FR.Corpus
( corpus ) where
import Prelude
import Data.String
import Duckling.Lang
import Duckling.Numeral.Types
import Duckling.Resolve
import Duckling.Testing.Types
corpus :: Corpus
corpus = (testContext {lang = FR}, allExamples)
allExamples :: [Example]
allExamples = concat
[ examples (NumeralValue 0)
[ "0"
, "zero"
, "zéro"
]
, examples (NumeralValue 1)
[ "1"
, "un"
, "une"
]
, examples (NumeralValue 11)
[ "onze"
]
, examples (NumeralValue 17)
[ "dix sept"
, "dix-sept"
]
, examples (NumeralValue 21)
[ "vingt et un"
, "vingt-et-un"
]
, examples (NumeralValue 23)
[ "vingt trois"
, "vingt-trois"
]
, examples (NumeralValue 70)
[ "soixante dix"
]
, examples (NumeralValue 71)
[ "soixante onze"
]
, examples (NumeralValue 78)
[ "soixante dix huit"
]
, examples (NumeralValue 73)
[ "soixante treize"
]
, examples (NumeralValue 80)
[ "quatre vingt"
]
, examples (NumeralValue 81)
[ "quatre vingt un"
]
, examples (NumeralValue 82)
[ "quatre vingt deux"
]
, examples (NumeralValue 90)
[ "quatre vingt dix"
]
, examples (NumeralValue 91)
[ "quatre vingt onze"
]
, examples (NumeralValue 92)
[ "quatre vingt douze"
]
, examples (NumeralValue 99)
[ "quatre vingt dix neuf"
]
, examples (NumeralValue 33)
[ "33"
, "trente trois"
, "trente-trois"
, "trente 3"
]
, examples (NumeralValue 118)
[ "cent dix-huit"
]
, examples (NumeralValue 4020)
[ "quatre mille vingt"
]
, examples (NumeralValue 100000)
[ "100.000"
, "100000"
, "100K"
, "100k"
, "cent mille"
]
, examples (NumeralValue 3000000)
[ "3M"
, "3000K"
, "3000000"
, "3.000.000"
, "trois millions"
]
, examples (NumeralValue 1200000)
[ "1.200.000"
, "1200000"
, "1,2M"
, "1200K"
, ",0012G"
, "un million deux cent mille"
]
, examples (NumeralValue (-1200000))
[ "- 1.200.000"
, "-1200000"
, "moins 1200000"
, "-1,2M"
, "-1200K"
, "-,0012G"
]
]
|
rfranek/duckling
|
Duckling/Numeral/FR/Corpus.hs
|
bsd-3-clause
| 3,206
| 0
| 11
| 1,399
| 587
| 334
| 253
| 89
| 1
|
{-# LANGUAGE CPP, GeneralizedNewtypeDeriving, ConstraintKinds, PatternGuards, StandaloneDeriving #-}
#if !(MIN_VERSION_base(4,8,0))
{-# LANGUAGE OverlappingInstances #-}
#endif
module Idris.ParseHelpers where
import Prelude hiding (pi)
import Text.Trifecta.Delta
import Text.Trifecta hiding (span, stringLiteral, charLiteral, natural, symbol, char, string, whiteSpace)
import Text.Parser.LookAhead
import Text.Parser.Expression
import qualified Text.Parser.Token as Tok
import qualified Text.Parser.Char as Chr
import qualified Text.Parser.Token.Highlight as Hi
import Idris.AbsSyntax
import Idris.Core.TT
import Idris.Core.Evaluate
import Idris.Delaborate (pprintErr)
import Idris.Docstrings
import Idris.Output (iWarn)
import qualified Util.Pretty as Pretty (text)
import Control.Applicative
import Control.Monad
import Control.Monad.State.Strict
import Data.Maybe
import qualified Data.List.Split as Spl
import Data.List
import Data.Monoid
import Data.Char
import qualified Data.Map as M
import qualified Data.HashSet as HS
import qualified Data.Text as T
import qualified Data.ByteString.UTF8 as UTF8
import System.FilePath
import Debug.Trace
-- | Idris parser with state used during parsing
type IdrisParser = StateT IState IdrisInnerParser
newtype IdrisInnerParser a = IdrisInnerParser { runInnerParser :: Parser a }
deriving (Monad, Functor, MonadPlus, Applicative, Alternative, CharParsing, LookAheadParsing, DeltaParsing, MarkParsing Delta, Monoid, TokenParsing)
deriving instance Parsing IdrisInnerParser
#if MIN_VERSION_base(4,8,0)
instance {-# OVERLAPPING #-} TokenParsing IdrisParser where
#else
instance TokenParsing IdrisParser where
#endif
someSpace = many (simpleWhiteSpace <|> singleLineComment <|> multiLineComment) *> pure ()
token p = do s <- get
(FC fn (sl, sc) _) <- getFC --TODO: Update after fixing getFC
-- See Issue #1594
r <- p
(FC fn _ (el, ec)) <- getFC
whiteSpace
put (s { lastTokenSpan = Just (FC fn (sl, sc) (el, ec)) })
return r
-- | Generalized monadic parsing constraint type
type MonadicParsing m = (DeltaParsing m, LookAheadParsing m, TokenParsing m, Monad m)
class HasLastTokenSpan m where
getLastTokenSpan :: m (Maybe FC)
instance HasLastTokenSpan IdrisParser where
getLastTokenSpan = lastTokenSpan <$> get
-- | Helper to run Idris inner parser based stateT parsers
runparser :: StateT st IdrisInnerParser res -> st -> String -> String -> Result res
runparser p i inputname =
parseString (runInnerParser (evalStateT p i))
(Directed (UTF8.fromString inputname) 0 0 0 0)
highlightP :: FC -> OutputAnnotation -> IdrisParser ()
highlightP fc annot = do ist <- get
put ist { idris_parserHighlights = (fc, annot) : idris_parserHighlights ist}
noDocCommentHere :: String -> IdrisParser ()
noDocCommentHere msg =
optional (do fc <- getFC
docComment
ist <- get
put ist { parserWarnings = (fc, Msg msg) : parserWarnings ist}) *>
pure ()
clearParserWarnings :: Idris ()
clearParserWarnings = do ist <- getIState
putIState ist { parserWarnings = [] }
reportParserWarnings :: Idris ()
reportParserWarnings = do ist <- getIState
mapM_ (uncurry iWarn)
(map (\ (fc, err) -> (fc, pprintErr ist err)) .
reverse .
nub $
parserWarnings ist)
clearParserWarnings
{- * Space, comments and literals (token/lexing like parsers) -}
-- | Consumes any simple whitespace (any character which satisfies Char.isSpace)
simpleWhiteSpace :: MonadicParsing m => m ()
simpleWhiteSpace = satisfy isSpace *> pure ()
-- | Checks if a charcter is end of line
isEol :: Char -> Bool
isEol '\n' = True
isEol _ = False
-- | A parser that succeeds at the end of the line
eol :: MonadicParsing m => m ()
eol = (satisfy isEol *> pure ()) <|> lookAhead eof <?> "end of line"
{- | Consumes a single-line comment
@
SingleLineComment_t ::= '--' ~EOL_t* EOL_t ;
@
-}
singleLineComment :: MonadicParsing m => m ()
singleLineComment = (string "--" *>
many (satisfy (not . isEol)) *>
eol *> pure ())
<?> ""
{- | Consumes a multi-line comment
@
MultiLineComment_t ::=
'{ -- }'
| '{ -' InCommentChars_t
;
@
@
InCommentChars_t ::=
'- }'
| MultiLineComment_t InCommentChars_t
| ~'- }'+ InCommentChars_t
;
@
-}
multiLineComment :: MonadicParsing m => m ()
multiLineComment = try (string "{-" *> (string "-}") *> pure ())
<|> string "{-" *> inCommentChars
<?> ""
where inCommentChars :: MonadicParsing m => m ()
inCommentChars = string "-}" *> pure ()
<|> try (multiLineComment) *> inCommentChars
<|> string "|||" *> many (satisfy (not . isEol)) *> eol *> inCommentChars
<|> skipSome (noneOf startEnd) *> inCommentChars
<|> oneOf startEnd *> inCommentChars
<?> "end of comment"
startEnd :: String
startEnd = "{}-"
{-| Parses a documentation comment
@
DocComment_t ::= '|||' ~EOL_t* EOL_t
;
@
-}
docComment :: IdrisParser (Docstring (), [(Name, Docstring ())])
docComment = do dc <- pushIndent *> docCommentLine
rest <- many (indented docCommentLine)
args <- many $ do (name, first) <- indented argDocCommentLine
rest <- many (indented docCommentLine)
return (name, concat (intersperse "\n" (first:rest)))
popIndent
return (parseDocstring $ T.pack (concat (intersperse "\n" (dc:rest))),
map (\(n, d) -> (n, parseDocstring (T.pack d))) args)
where docCommentLine :: MonadicParsing m => m String
docCommentLine = try (do string "|||"
many (satisfy (==' '))
contents <- option "" (do first <- satisfy (\c -> not (isEol c || c == '@'))
res <- many (satisfy (not . isEol))
return $ first:res)
eol ; someSpace
return contents)-- ++ concat rest))
<?> ""
argDocCommentLine = do string "|||"
many (satisfy isSpace)
char '@'
many (satisfy isSpace)
n <- fst <$> name
many (satisfy isSpace)
docs <- many (satisfy (not . isEol))
eol ; someSpace
return (n, docs)
-- | Parses some white space
whiteSpace :: MonadicParsing m => m ()
whiteSpace = Tok.whiteSpace
-- | Parses a string literal
stringLiteral :: (MonadicParsing m, HasLastTokenSpan m) => m (String, FC)
stringLiteral = do str <- Tok.stringLiteral
fc <- getLastTokenSpan
return (str, fromMaybe NoFC fc)
-- | Parses a char literal
charLiteral :: (MonadicParsing m, HasLastTokenSpan m) => m (Char, FC)
charLiteral = do ch <- Tok.charLiteral
fc <- getLastTokenSpan
return (ch, fromMaybe NoFC fc)
-- | Parses a natural number
natural :: (MonadicParsing m, HasLastTokenSpan m) => m (Integer, FC)
natural = do n <- Tok.natural
fc <- getLastTokenSpan
return (n, fromMaybe NoFC fc)
-- | Parses an integral number
integer :: MonadicParsing m => m Integer
integer = Tok.integer
-- | Parses a floating point number
float :: (MonadicParsing m, HasLastTokenSpan m) => m (Double, FC)
float = do f <- Tok.double
fc <- getLastTokenSpan
return (f, fromMaybe NoFC fc)
{- * Symbols, identifiers, names and operators -}
-- | Idris Style for parsing identifiers/reserved keywords
idrisStyle :: MonadicParsing m => IdentifierStyle m
idrisStyle = IdentifierStyle _styleName _styleStart _styleLetter _styleReserved Hi.Identifier Hi.ReservedIdentifier
where _styleName = "Idris"
_styleStart = satisfy isAlpha <|> oneOf "_"
_styleLetter = satisfy isAlphaNum <|> oneOf "_'."
_styleReserved = HS.fromList ["let", "in", "data", "codata", "record", "corecord", "Type",
"do", "dsl", "import", "impossible",
"case", "of", "total", "partial", "mutual",
"infix", "infixl", "infixr", "rewrite",
"where", "with", "syntax", "proof", "postulate",
"using", "namespace", "class", "instance", "parameters",
"public", "private", "abstract", "implicit",
"quoteGoal", "constructor",
"if", "then", "else"]
char :: MonadicParsing m => Char -> m Char
char = Chr.char
string :: MonadicParsing m => String -> m String
string = Chr.string
-- | Parses a character as a token
lchar :: MonadicParsing m => Char -> m Char
lchar = token . char
-- | Parses string as a token
symbol :: MonadicParsing m => String -> m String
symbol = Tok.symbol
symbolFC :: MonadicParsing m => String -> m FC
symbolFC str = do (FC file (l, c) _) <- getFC
Tok.symbol str
return $ FC file (l, c) (l, c + length str)
-- | Parses a reserved identifier
reserved :: MonadicParsing m => String -> m ()
reserved = Tok.reserve idrisStyle
-- | Parses a reserved identifier, computing its span. Assumes that
-- reserved identifiers never contain line breaks.
reservedFC :: MonadicParsing m => String -> m FC
reservedFC str = do (FC file (l, c) _) <- getFC
Tok.reserve idrisStyle str
return $ FC file (l, c) (l, c + length str)
-- Taken from Parsec (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007
-- | Parses a reserved operator
reservedOp :: MonadicParsing m => String -> m ()
reservedOp name = token $ try $
do string name
notFollowedBy (operatorLetter) <?> ("end of " ++ show name)
reservedOpFC :: MonadicParsing m => String -> m FC
reservedOpFC name = token $ try $ do (FC f (l, c) _) <- getFC
string name
notFollowedBy (operatorLetter) <?> ("end of " ++ show name)
return (FC f (l, c) (l, c + length name))
-- | Parses an identifier as a token
identifier :: (MonadicParsing m) => m (String, FC)
identifier = try(do (i, fc) <-
token $ do (FC f (l, c) _) <- getFC
i <- Tok.ident idrisStyle
return (i, FC f (l, c) (l, c + length i))
when (i == "_") $ unexpected "wildcard"
return (i, fc))
-- | Parses an identifier with possible namespace as a name
iName :: (MonadicParsing m, HasLastTokenSpan m) => [String] -> m (Name, FC)
iName bad = do (n, fc) <- maybeWithNS identifier False bad
return (n, fc)
<?> "name"
-- | Parses an string possibly prefixed by a namespace
maybeWithNS :: (MonadicParsing m, HasLastTokenSpan m) => m (String, FC) -> Bool -> [String] -> m (Name, FC)
maybeWithNS parser ascend bad = do
fc <- getFC
i <- option "" (lookAhead (fst <$> identifier))
when (i `elem` bad) $ unexpected "reserved identifier"
let transf = if ascend then id else reverse
(x, xs, fc) <- choice (transf (parserNoNS parser : parsersNS parser i))
return (mkName (x, xs), fc)
where parserNoNS :: MonadicParsing m => m (String, FC) -> m (String, String, FC)
parserNoNS parser = do startFC <- getFC
(x, nameFC) <- parser
return (x, "", spanFC startFC nameFC)
parserNS :: MonadicParsing m => m (String, FC) -> String -> m (String, String, FC)
parserNS parser ns = do startFC <- getFC
xs <- string ns
lchar '.'; (x, nameFC) <- parser
return (x, xs, spanFC startFC nameFC)
parsersNS :: MonadicParsing m => m (String, FC) -> String -> [m (String, String, FC)]
parsersNS parser i = [try (parserNS parser ns) | ns <- (initsEndAt (=='.') i)]
-- | Parses a name
name :: IdrisParser (Name, FC)
name = (<?> "name") $ do
keywords <- syntax_keywords <$> get
aliases <- module_aliases <$> get
(n, fc) <- iName keywords
return (unalias aliases n, fc)
where
unalias :: M.Map [T.Text] [T.Text] -> Name -> Name
unalias aliases (NS n ns) | Just ns' <- M.lookup ns aliases = NS n ns'
unalias aliases name = name
{- | List of all initial segments in ascending order of a list. Every
such initial segment ends right before an element satisfying the given
condition.
-}
initsEndAt :: (a -> Bool) -> [a] -> [[a]]
initsEndAt p [] = []
initsEndAt p (x:xs) | p x = [] : x_inits_xs
| otherwise = x_inits_xs
where x_inits_xs = [x : cs | cs <- initsEndAt p xs]
{- | Create a `Name' from a pair of strings representing a base name and its
namespace.
-}
mkName :: (String, String) -> Name
mkName (n, "") = sUN n
mkName (n, ns) = sNS (sUN n) (reverse (parseNS ns))
where parseNS x = case span (/= '.') x of
(x, "") -> [x]
(x, '.':y) -> x : parseNS y
opChars :: String
opChars = ":!#$%&*+./<=>?@\\^|-~"
operatorLetter :: MonadicParsing m => m Char
operatorLetter = oneOf opChars
commentMarkers :: [String]
commentMarkers = [ "--", "|||" ]
invalidOperators :: [String]
invalidOperators = [":", "=>", "->", "<-", "=", "?=", "|", "**", "==>", "\\", "%", "~", "?", "!"]
-- | Parses an operator
operator :: MonadicParsing m => m String
operator = do op <- token . some $ operatorLetter
when (op `elem` (invalidOperators ++ commentMarkers)) $
fail $ op ++ " is not a valid operator"
return op
-- | Parses an operator
operatorFC :: MonadicParsing m => m (String, FC)
operatorFC = do (op, fc) <- token $ do (FC f (l, c) _) <- getFC
op <- some operatorLetter
return (op, FC f (l, c) (l, c + length op))
when (op `elem` (invalidOperators ++ commentMarkers)) $
fail $ op ++ " is not a valid operator"
return (op, fc)
{- * Position helpers -}
{- | Get filename from position (returns "(interactive)" when no source file is given) -}
fileName :: Delta -> String
fileName (Directed fn _ _ _ _) = UTF8.toString fn
fileName _ = "(interactive)"
{- | Get line number from position -}
lineNum :: Delta -> Int
lineNum (Lines l _ _ _) = fromIntegral l + 1
lineNum (Directed _ l _ _ _) = fromIntegral l + 1
lineNum _ = 0
{- | Get column number from position -}
columnNum :: Delta -> Int
columnNum pos = fromIntegral (column pos) + 1
{- | Get file position as FC -}
getFC :: MonadicParsing m => m FC
getFC = do s <- position
let (dir, file) = splitFileName (fileName s)
let f = if dir == addTrailingPathSeparator "." then file else fileName s
return $ FC f (lineNum s, columnNum s) (lineNum s, columnNum s) -- TODO: Change to actual spanning
-- Issue #1594 on the Issue Tracker.
-- https://github.com/idris-lang/Idris-dev/issues/1594
{-* Syntax helpers-}
-- | Bind constraints to term
bindList :: (Name -> FC -> PTerm -> PTerm -> PTerm) -> [(Name, FC, PTerm)] -> PTerm -> PTerm
bindList b [] sc = sc
bindList b ((n, fc, t):bs) sc = b n fc t (bindList b bs sc)
{- * Layout helpers -}
-- | Push indentation to stack
pushIndent :: IdrisParser ()
pushIndent = do pos <- position
ist <- get
put (ist { indent_stack = (fromIntegral (column pos) + 1) : indent_stack ist })
-- | Pops indentation from stack
popIndent :: IdrisParser ()
popIndent = do ist <- get
let (x : xs) = indent_stack ist
put (ist { indent_stack = xs })
-- | Gets current indentation
indent :: IdrisParser Int
indent = liftM ((+1) . fromIntegral . column) position
-- | Gets last indentation
lastIndent :: IdrisParser Int
lastIndent = do ist <- get
case indent_stack ist of
(x : xs) -> return x
_ -> return 1
-- | Applies parser in an indented position
indented :: IdrisParser a -> IdrisParser a
indented p = notEndBlock *> p <* keepTerminator
-- | Applies parser to get a block (which has possibly indented statements)
indentedBlock :: IdrisParser a -> IdrisParser [a]
indentedBlock p = do openBlock
pushIndent
res <- many (indented p)
popIndent
closeBlock
return res
-- | Applies parser to get a block with at least one statement (which has possibly indented statements)
indentedBlock1 :: IdrisParser a -> IdrisParser [a]
indentedBlock1 p = do openBlock
pushIndent
res <- some (indented p)
popIndent
closeBlock
return res
-- | Applies parser to get a block with exactly one (possibly indented) statement
indentedBlockS :: IdrisParser a -> IdrisParser a
indentedBlockS p = do openBlock
pushIndent
res <- indented p
popIndent
closeBlock
return res
-- | Checks if the following character matches provided parser
lookAheadMatches :: MonadicParsing m => m a -> m Bool
lookAheadMatches p = do match <- lookAhead (optional p)
return $ isJust match
-- | Parses a start of block
openBlock :: IdrisParser ()
openBlock = do lchar '{'
ist <- get
put (ist { brace_stack = Nothing : brace_stack ist })
<|> do ist <- get
lvl' <- indent
-- if we're not indented further, it's an empty block, so
-- increment lvl to ensure we get to the end
let lvl = case brace_stack ist of
Just lvl_old : _ ->
if lvl' <= lvl_old then lvl_old+1
else lvl'
[] -> if lvl' == 1 then 2 else lvl'
_ -> lvl'
put (ist { brace_stack = Just lvl : brace_stack ist })
<?> "start of block"
-- | Parses an end of block
closeBlock :: IdrisParser ()
closeBlock = do ist <- get
bs <- case brace_stack ist of
[] -> eof >> return []
Nothing : xs -> lchar '}' >> return xs <?> "end of block"
Just lvl : xs -> (do i <- indent
isParen <- lookAheadMatches (char ')')
isIn <- lookAheadMatches (reserved "in")
if i >= lvl && not (isParen || isIn)
then fail "not end of block"
else return xs)
<|> (do notOpenBraces
eof
return [])
put (ist { brace_stack = bs })
-- | Parses a terminator
terminator :: IdrisParser ()
terminator = do lchar ';'; popIndent
<|> do c <- indent; l <- lastIndent
if c <= l then popIndent else fail "not a terminator"
<|> do isParen <- lookAheadMatches (oneOf ")}")
if isParen then popIndent else fail "not a terminator"
<|> lookAhead eof
-- | Parses and keeps a terminator
keepTerminator :: IdrisParser ()
keepTerminator = do lchar ';'; return ()
<|> do c <- indent; l <- lastIndent
unless (c <= l) $ fail "not a terminator"
<|> do isParen <- lookAheadMatches (oneOf ")}|")
isIn <- lookAheadMatches (reserved "in")
unless (isIn || isParen) $ fail "not a terminator"
<|> lookAhead eof
-- | Checks if application expression does not end
notEndApp :: IdrisParser ()
notEndApp = do c <- indent; l <- lastIndent
when (c <= l) (fail "terminator")
-- | Checks that it is not end of block
notEndBlock :: IdrisParser ()
notEndBlock = do ist <- get
case brace_stack ist of
Just lvl : xs -> do i <- indent
isParen <- lookAheadMatches (char ')')
when (i < lvl || isParen) (fail "end of block")
_ -> return ()
-- | Representation of an operation that can compare the current indentation with the last indentation, and an error message if it fails
data IndentProperty = IndentProperty (Int -> Int -> Bool) String
-- | Allows comparison of indent, and fails if property doesn't hold
indentPropHolds :: IndentProperty -> IdrisParser ()
indentPropHolds (IndentProperty op msg) = do
li <- lastIndent
i <- indent
when (not $ op i li) $ fail ("Wrong indention: " ++ msg)
-- | Greater-than indent property
gtProp :: IndentProperty
gtProp = IndentProperty (>) "should be greater than context indentation"
-- | Greater-than or equal to indent property
gteProp :: IndentProperty
gteProp = IndentProperty (>=) "should be greater than or equal context indentation"
-- | Equal indent property
eqProp :: IndentProperty
eqProp = IndentProperty (==) "should be equal to context indentation"
-- | Less-than indent property
ltProp :: IndentProperty
ltProp = IndentProperty (<) "should be less than context indentation"
-- | Less-than or equal to indent property
lteProp :: IndentProperty
lteProp = IndentProperty (<=) "should be less than or equal to context indentation"
-- | Checks that there are no braces that are not closed
notOpenBraces :: IdrisParser ()
notOpenBraces = do ist <- get
when (hasNothing $ brace_stack ist) $ fail "end of input"
where hasNothing :: [Maybe a] -> Bool
hasNothing = any isNothing
{- | Parses an accessibilty modifier (e.g. public, private) -}
accessibility :: IdrisParser Accessibility
accessibility = do reserved "public"; return Public
<|> do reserved "abstract"; return Frozen
<|> do reserved "private"; return Hidden
<?> "accessibility modifier"
-- | Adds accessibility option for function
addAcc :: Name -> Maybe Accessibility -> IdrisParser ()
addAcc n a = do i <- get
put (i { hide_list = (n, a) : hide_list i })
{- | Add accessbility option for data declarations
(works for classes too - 'abstract' means the data/class is visible but members not) -}
accData :: Maybe Accessibility -> Name -> [Name] -> IdrisParser ()
accData (Just Frozen) n ns = do addAcc n (Just Frozen)
mapM_ (\n -> addAcc n (Just Hidden)) ns
accData a n ns = do addAcc n a
mapM_ (`addAcc` a) ns
{- * Error reporting helpers -}
{- | Error message with possible fixes list -}
fixErrorMsg :: String -> [String] -> String
fixErrorMsg msg fixes = msg ++ ", possible fixes:\n" ++ (concat $ intersperse "\n\nor\n\n" fixes)
-- | Collect 'PClauses' with the same function name
collect :: [PDecl] -> [PDecl]
collect (c@(PClauses _ o _ _) : ds)
= clauses (cname c) [] (c : ds)
where clauses :: Maybe Name -> [PClause] -> [PDecl] -> [PDecl]
clauses j@(Just n) acc (PClauses fc _ _ [PClause fc' n' l ws r w] : ds)
| n == n' = clauses j (PClause fc' n' l ws r (collect w) : acc) ds
clauses j@(Just n) acc (PClauses fc _ _ [PWith fc' n' l ws r pn w] : ds)
| n == n' = clauses j (PWith fc' n' l ws r pn (collect w) : acc) ds
clauses (Just n) acc xs = PClauses (fcOf c) o n (reverse acc) : collect xs
clauses Nothing acc (x:xs) = collect xs
clauses Nothing acc [] = []
cname :: PDecl -> Maybe Name
cname (PClauses fc _ _ [PClause _ n _ _ _ _]) = Just n
cname (PClauses fc _ _ [PWith _ n _ _ _ _ _]) = Just n
cname (PClauses fc _ _ [PClauseR _ _ _ _]) = Nothing
cname (PClauses fc _ _ [PWithR _ _ _ _ _]) = Nothing
fcOf :: PDecl -> FC
fcOf (PClauses fc _ _ _) = fc
collect (PParams f ns ps : ds) = PParams f ns (collect ps) : collect ds
collect (PMutual f ms : ds) = PMutual f (collect ms) : collect ds
collect (PNamespace ns fc ps : ds) = PNamespace ns fc (collect ps) : collect ds
collect (PClass doc f s cs n nfc ps pdocs fds ds cn cd : ds')
= PClass doc f s cs n nfc ps pdocs fds (collect ds) cn cd : collect ds'
collect (PInstance doc argDocs f s cs n nfc ps t en ds : ds')
= PInstance doc argDocs f s cs n nfc ps t en (collect ds) : collect ds'
collect (d : ds) = d : collect ds
collect [] = []
|
bkoropoff/Idris-dev
|
src/Idris/ParseHelpers.hs
|
bsd-3-clause
| 25,667
| 0
| 25
| 8,472
| 7,356
| 3,758
| 3,598
| 433
| 8
|
{-# LANGUAGE CPP #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE StandaloneDeriving #-}
#ifdef LIFT_COMPAT
{-# LANGUAGE TemplateHaskell #-}
#else
{-# LANGUAGE DeriveLift #-}
#endif
module URI.ByteString.Types where
-------------------------------------------------------------------------------
import Data.ByteString (ByteString)
import qualified Data.Map.Strict as M
import Data.Monoid
import Data.Semigroup as Semigroup
import Data.Typeable
import Data.Word
import GHC.Generics
import Instances.TH.Lift ()
-------------------------------------------------------------------------------
import Prelude
-------------------------------------------------------------------------------
#ifdef LIFT_COMPAT
import Language.Haskell.TH.Lift
import Language.Haskell.TH.Syntax ()
#else
import Language.Haskell.TH.Syntax
#endif
-- | Required first component to referring to a specification for the
-- remainder of the URI's components, e.g. "http" or "https"
newtype Scheme = Scheme { schemeBS :: ByteString }
deriving (Show, Eq, Generic, Typeable, Ord)
#ifdef LIFT_COMPAT
deriveLift ''Scheme
#else
deriving instance Lift Scheme
#endif
-------------------------------------------------------------------------------
newtype Host = Host { hostBS :: ByteString }
deriving (Show, Eq, Generic, Typeable, Ord)
#ifdef LIFT_COMPAT
deriveLift ''Host
#else
deriving instance Lift Host
#endif
-------------------------------------------------------------------------------
-- | While some libraries have chosen to limit this to a Word16, the
-- spec only specifies that the string be comprised of digits.
newtype Port = Port { portNumber :: Int }
deriving (Show, Eq, Generic, Typeable, Ord)
#ifdef LIFT_COMPAT
deriveLift ''Port
#else
deriving instance Lift Port
#endif
-------------------------------------------------------------------------------
data UserInfo = UserInfo {
uiUsername :: ByteString
, uiPassword :: ByteString
} deriving (Show, Eq, Generic, Typeable, Ord)
#ifdef LIFT_COMPAT
deriveLift ''UserInfo
#else
deriving instance Lift UserInfo
#endif
-------------------------------------------------------------------------------
data Authority = Authority {
authorityUserInfo :: Maybe UserInfo
, authorityHost :: Host
, authorityPort :: Maybe Port
} deriving (Show, Eq, Generic, Typeable, Ord)
#ifdef LIFT_COMPAT
deriveLift ''Authority
#else
deriving instance Lift Authority
#endif
-------------------------------------------------------------------------------
newtype Query = Query { queryPairs :: [(ByteString, ByteString)] }
deriving (Show, Eq, Semigroup.Semigroup, Monoid, Generic, Typeable, Ord)
#ifdef LIFT_COMPAT
deriveLift ''Query
#else
deriving instance Lift Query
#endif
-------------------------------------------------------------------------------
data Absolute deriving(Typeable)
#ifdef LIFT_COMPAT
deriveLift ''Absolute
#else
deriving instance Lift Absolute
#endif
-------------------------------------------------------------------------------
data Relative deriving(Typeable)
#ifdef LIFT_COMPAT
deriveLift ''Relative
#else
deriving instance Lift Relative
#endif
-------------------------------------------------------------------------------
-- | Note: URI fragment does not include the #
data URIRef a where
URI :: { uriScheme :: Scheme
, uriAuthority :: Maybe Authority
, uriPath :: ByteString
, uriQuery :: Query
, uriFragment :: Maybe ByteString
} -> URIRef Absolute
RelativeRef :: { rrAuthority :: Maybe Authority
, rrPath :: ByteString
, rrQuery :: Query
, rrFragment :: Maybe ByteString
} -> URIRef Relative
deriving instance Show (URIRef a)
deriving instance Eq (URIRef a)
-- deriving instance Generic (URIRef a)
deriving instance Ord (URIRef a)
#ifdef LIFT_COMPAT
deriveLift ''URIRef
#else
deriving instance Lift (URIRef a)
#endif
#ifdef WITH_TYPEABLE
deriving instance Typeable URIRef
#endif
-------------------------------------------------------------------------------
type URI = URIRef Absolute
-------------------------------------------------------------------------------
type RelativeRef = URIRef Relative
-------------------------------------------------------------------------------
-- | Options for the parser. You will probably want to use either
-- "strictURIParserOptions" or "laxURIParserOptions"
data URIParserOptions = URIParserOptions {
upoValidQueryChar :: Word8 -> Bool
}
-------------------------------------------------------------------------------
data URINormalizationOptions = URINormalizationOptions {
unoDowncaseScheme :: Bool
-- ^ hTtP -> http
, unoDowncaseHost :: Bool
-- ^ eXaMpLe.org -> example.org
, unoDropDefPort :: Bool
-- ^ If the scheme is known and the port is the default (e.g. 80 for http) it is removed.
, unoSlashEmptyPath :: Bool
-- ^ If the path is empty, set it to \/
, unoDropExtraSlashes :: Bool
-- ^ Rewrite path from \/foo\/\/bar\/\/\/baz to \/foo\/bar\/baz
, unoSortParameters :: Bool
-- ^ Sorts parameters by parameter name
, unoRemoveDotSegments :: Bool
-- ^ Remove dot segments as per <https://tools.ietf.org/html/rfc3986#section-5.2.4 RFC3986 Section 5.2.4>
, unoDefaultPorts :: M.Map Scheme Port
-- ^ Map of known schemes to their default ports. Used when 'unoDropDefPort' is enabled.
} deriving (Show, Eq)
-------------------------------------------------------------------------------
-- | URI Parser Types
-------------------------------------------------------------------------------
data SchemaError = NonAlphaLeading -- ^ Scheme must start with an alphabet character
| InvalidChars -- ^ Subsequent characters in the schema were invalid
| MissingColon -- ^ Schemas must be followed by a colon
deriving (Show, Eq, Read, Generic, Typeable, Enum, Bounded)
-------------------------------------------------------------------------------
data URIParseError = MalformedScheme SchemaError
| MalformedUserInfo
| MalformedQuery
| MalformedFragment
| MalformedHost
| MalformedPort
| MalformedPath
| OtherError String -- ^ Catchall for unpredictable errors
deriving (Show, Eq, Generic, Read, Typeable)
|
Soostone/uri-bytestring
|
src/URI/ByteString/Types.hs
|
bsd-3-clause
| 6,855
| 0
| 10
| 1,458
| 872
| 530
| 342
| -1
| -1
|
{-# LANGUAGE ScopedTypeVariables #-}
module AllSorts where
certainCmp :: Ord α => α -> α -> [Bool]
certainCmp a b = [a <= b]
uncertainCmp :: α -> α -> [Bool]
uncertainCmp _ _ = [True, False]
-------------------------------------------------------------------------------
insertSortBy :: forall μ α. Monad μ
=> (α -> α -> μ Bool) -> [α] -> μ [α]
insertSortBy cmp = insertSort
where
insertSort :: [α] -> μ [α]
insertSort [] = return []
insertSort (a : as) = do
bs <- insertSort as
insert a bs
insert :: α -> [α] -> μ [α]
insert a [] = return [a]
insert a (b : bs) = do
t <- a `cmp` b
if t then return (a : b : bs)
else (b :) <$> insert a bs
-------------------------------------------------------------------------------
selectSortBy :: forall μ α. Monad μ
=> (α -> α -> μ Bool) -> [α] -> μ [α]
selectSortBy cmp = selectSort
where
selectSort :: [α] -> μ [α]
selectSort [] = return []
selectSort (a : as) = do
(b, bs) <- selectMin a as
(b :) <$> selectSort bs
selectMin :: α -> [α] -> μ (α, [α])
selectMin a [] = return (a, [])
selectMin a (b : bs) = do
t <- a `cmp` b
let (a', b') = if t then (a, b)
else (b, a)
(c, cs) <- selectMin a' bs
return (c, b' : cs)
-------------------------------------------------------------------------------
bubbleSortBy :: forall μ α. Monad μ
=> (α -> α -> μ Bool) -> [α] -> μ [α]
bubbleSortBy cmp = bubbleSort
where
bubbleSort :: [α] -> μ [α]
bubbleSort [] = return []
bubbleSort (a : as) = do
(b, bs) <- bubble a as
(b :) <$> bubbleSort bs
bubble :: α -> [α] -> μ (α, [α])
bubble a [] = return (a, [])
bubble a (c : cs) = do
(b, bs) <- bubble c cs
t <- a `cmp` b
return $ if t then (a, b : bs)
else (b, a : bs)
-------------------------------------------------------------------------------
quickSortBy :: forall μ α. Monad μ
=> (α -> α -> μ Bool) -> [α] -> μ [α]
quickSortBy cmp = quickSort
where
quickSort :: [α] -> μ [α]
quickSort [] = return []
quickSort (a : as) = do
(bs, cs) <- partitionBy (`cmp` a) as
xs <- quickSort bs
ys <- quickSort cs
return $ xs ++ (a : ys)
partitionBy :: Monad μ
=> (α -> μ Bool) -> [α] -> μ ([α], [α])
partitionBy predicate = partition
where
partition [] = return ([], [])
partition (a : as) = do
(bs, cs) <- partition as
t <- predicate a
return $ if t then (a : bs, cs)
else (bs, a : cs)
-------------------------------------------------------------------------------
mergeSortBy :: forall μ α. Monad μ
=> (α -> α -> μ Bool) -> [α] -> μ [α]
mergeSortBy cmp = mergeSort
where
mergeSort :: [α] -> μ [α]
mergeSort [] = return []
mergeSort [a] = return [a]
mergeSort as = do
let l = length as `div` 2
let (bs, cs) = splitAt l as
xs <- mergeSort bs
ys <- mergeSort cs
merge xs ys
merge :: [α] -> [α] -> μ [α]
merge as [] = return as
merge [] bs = return bs
merge (a : as) (b : bs) = do
t <- a `cmp` b
if t then (a :) <$> merge as (b : bs)
else (b :) <$> merge (a : as) bs
|
Bodigrim/all-sorts
|
src/AllSorts.hs
|
bsd-3-clause
| 4,133
| 0
| 13
| 1,786
| 1,531
| 804
| 727
| 89
| 6
|
{-# LANGUAGE ViewPatterns, CPP, FlexibleInstances, FlexibleContexts #-}
module Data.TrieMap.UnionMap.Subset () where
import Data.TrieMap.UnionMap.Base
#define UVIEW uView -> UView
instance (Subset (TrieMap k1), Subset (TrieMap k2)) => Subset (TrieMap (Either k1 k2)) where
(UVIEW m1L m1R) <=? (UVIEW m2L m2R) =
(m1L <<=? m2L) && (m1R <<=? m2R)
|
lowasser/TrieMap
|
Data/TrieMap/UnionMap/Subset.hs
|
bsd-3-clause
| 352
| 0
| 9
| 55
| 114
| 63
| 51
| 6
| 0
|
{-# LANGUAGE DeriveFunctor #-}
{-# LANGUAGE TypeFamilies #-}
-- | Provides spherical harmonic models of scalar-valued functions.
module Math.SphericalHarmonics
(
SphericalHarmonicModel
, sphericalHarmonicModel
, scaledSphericalHarmonicModel
, evaluateModel
, evaluateModelCartesian
, evaluateModelGradient
, evaluateModelGradientCartesian
, evaluateModelGradientInLocalTangentPlane
)
where
import Data.Complex
import Data.VectorSpace hiding (magnitude)
import Math.SphericalHarmonics.AssociatedLegendre
import Numeric.AD
-- | Represents a spherical harmonic model of a scalar-valued function.
data SphericalHarmonicModel a = SphericalHarmonicModel [[(a, a)]]
deriving (Functor)
-- | Creates a spherical harmonic model.
-- Result in an error if the length of the list is not a triangular number.
sphericalHarmonicModel :: (Fractional a) => [[(a, a)]] -- ^ A list of g and h coefficients for the model
-> SphericalHarmonicModel a -- ^ The spherical harmonic model
sphericalHarmonicModel cs | valid = SphericalHarmonicModel cs
| otherwise = error "The number of coefficients is not a triangular number."
where
valid = and $ zipWith (==) (fmap length cs) [1..length cs]
-- | Creates a spherical harmonic model, scaling coefficients for the supplied reference radius.
-- Result in an error if the length of the list is not a triangular number.
scaledSphericalHarmonicModel :: (Fractional a) => a -- ^ The reference radius
-> [[(a, a)]] -- ^ A list of g and h coefficients for the model
-> SphericalHarmonicModel a -- ^ The spherical harmonic model
scaledSphericalHarmonicModel r cs = sphericalHarmonicModel cs'
where
cs' = normalizeReferenceRadius r cs
instance(Fractional a, Eq a) => AdditiveGroup (SphericalHarmonicModel a) where
zeroV = SphericalHarmonicModel [[(0,0)]]
negateV = fmap negate
(SphericalHarmonicModel m1) ^+^ (SphericalHarmonicModel m2) = SphericalHarmonicModel (combineCoefficients m1 m2)
where
combineCoefficients [] cs = cs
combineCoefficients cs [] = cs
combineCoefficients (c1:cs1) (c2:cs2) = zipWith addPairs c1 c2 : combineCoefficients cs1 cs2
addPairs (g1, h1) (g2, h2) = (g1 + g2, h1 + h2)
instance (Fractional a, Eq a) => VectorSpace (SphericalHarmonicModel a) where
type Scalar (SphericalHarmonicModel a) = a
x *^ m = fmap (* x) m
normalizeReferenceRadius :: (Fractional a) => a -> [[(a, a)]] -> [[(a, a)]]
normalizeReferenceRadius r = zipWith (fmap . mapWholePair . transform) [0 :: Int ..]
where
transform n = (* (r ^ (2 + n)))
-- | Computes the scalar value of the spherical harmonic model at a specified spherical position.
evaluateModel :: (RealFloat a, Ord a) => SphericalHarmonicModel a -- ^ Spherical harmonic model
-> a -- ^ Spherical radius
-> a -- ^ Spherical colatitude (radian)
-> a -- ^ Spherical longitude (radian)
-> a -- ^ Model value
evaluateModel m r colat lon = evaluateModel' m r (cos colat) (cis lon)
-- | Computes the scalar value of the spherical harmonic model at a specified Cartesian position.
evaluateModelCartesian :: (RealFloat a, Ord a) => SphericalHarmonicModel a -- ^ Spherical harmonic model
-> a -- ^ X position
-> a -- ^ Y position
-> a -- ^ Z position
-> a -- ^ Model value
evaluateModelCartesian m x y z = evaluateModel' m r cosColat cisLon
where
r = sqrt $ (x*x) + (y*y) + (z*z)
cosColat = z / r
cisLon = normalize $ mkPolar x y
evaluateModel' :: (RealFloat a, Ord a) => SphericalHarmonicModel a
-> a -- r
-> a -- cosColat
-> Complex a -- cisLon
-> a
evaluateModel' (SphericalHarmonicModel cs) r cosColat cisLon = sum $ zipWith (*) (iterate (/ r) (recip r)) (zipWith evaluateDegree [0..] cs)
where
sines = 1 : iterate (* cisLon) cisLon
evaluateDegree n cs' = sum $ zipWith3 evaluateOrder (fmap (schmidtSemiNormalizedAssociatedLegendreFunction n) [0..n]) cs' sines
evaluateOrder p (g, h) cisMLon = ((g * realPart cisMLon) + (h * imagPart cisMLon)) * (p (cosColat))
-- | Computes the gradient of the scalar value of the spherical harmonic model, in spherical coordinates, at a specified location.
evaluateModelGradient :: (RealFloat a, Ord a) => SphericalHarmonicModel a -- ^ Spherical harmonic model
-> a -- ^ Spherical radius
-> a -- ^ Spherical colatitude (radian)
-> a -- ^ Spherical longitude (radian)
-> (a, a, a) -- ^ Radial, colatitudinal, and longitudinal components of gradient
evaluateModelGradient model r colat lon = makeTuple . fmap negate $ modelGrad [r, colat, lon]
where
modelGrad = grad (\[r', c', l'] -> evaluateModel (fmap auto model) r' c' l')
-- | Computes the gradient of the scalar value of the spherical harmonic model at a specified location, in Cartesian coordinates.
-- The result is expressed in right-handed coordinates centered at the origin of the sphere, with the positive Z-axis piercing the
-- north pole and the positive x-axis piercing the reference meridian.
evaluateModelGradientCartesian :: (RealFloat a, Ord a) => SphericalHarmonicModel a -- ^ Spherical harmonic model
-> a -- ^ X position
-> a -- ^ Y position
-> a -- ^ Z position
-> (a, a, a) -- X, Y, and Z components of gradient
evaluateModelGradientCartesian model x y z = makeTuple . fmap negate $ modelGrad [x, y, z]
where
modelGrad = grad (\[x', y', z'] -> evaluateModelCartesian (fmap auto model) x' y' z')
-- | Computes the gradient of the scalar value of the spherical harmonic model at a specified location, in Cartesian coordinates.
-- The result is expressed in a reference frame locally tangent to the sphere at the specified location.
evaluateModelGradientInLocalTangentPlane :: (RealFloat a, Ord a) => SphericalHarmonicModel a -- ^ Spherical harmonic model
-> a -- ^ Spherical radius
-> a -- ^ Spherical colatitude (radian)
-> a -- ^ Spherical longitude (radian)
-> (a, a, a) -- ^ East, North, and up components of gradient
evaluateModelGradientInLocalTangentPlane model r colat lon = (e, n, u)
where
(r', colat', lon') = evaluateModelGradient model r colat lon
e = lon' / (r * sin colat)
n = -colat' / r -- negated because the colatitude increase southward
u = r'
normalize :: (RealFloat a) => Complex a -> Complex a
normalize r@(x :+ y) | isInfinite m' = 0
| otherwise = (x * m') :+ (y * m')
where
m' = recip . magnitude $ r
mapWholePair :: (a -> b) -> (a, a) -> (b, b)
mapWholePair f (a, b) = (f a, f b)
makeTuple :: [a] -> (a, a, a)
makeTuple [x, y, z] = (x, y, z)
|
dmcclean/igrf
|
src/Math/SphericalHarmonics.hs
|
bsd-3-clause
| 7,080
| 0
| 12
| 1,856
| 1,675
| 924
| 751
| 98
| 1
|
module HsLib where
func :: Int -> Int
func = (+2)
|
michaxm/dynamic-linking-exploration
|
testlibs/testlibHS1/src/HsLib.hs
|
bsd-3-clause
| 51
| 0
| 5
| 12
| 22
| 14
| 8
| 3
| 1
|
{-# LANGUAGE OverloadedStrings #-}
{-|
Sprites are the most important concept in Monono, check the "Graphics.UI.Monono" docs for
a general description of Sprites.
This module contains functions for creating, modifying a Sprite's attributes and
its event handlers.
-}
module Graphics.UI.Monono.Sprite (
-- * Creation and transversing
-- When you insert a new sprite, a 'SpriteData' is created implicitly with default
-- values for all its attributes. All functions that run in the 'Sprite' monad are
-- used to read or write to the underlying 'SpriteData'.
insert,
with,
-- * Drawing and BoundingBox
-- | Check "Graphics.UI.Monono.Drawing" to learn more about these functions
draw, setBoundBox, getBoundBox, clearBoundBox,
clearDraw,
-- * Get, Put, Modify
-- | Generally you will find yourself reading an attribute just to alter its value
-- by some pre-defined amount, therefore the naming convention makes the
-- modifier functions have the short and snappy names.
--
-- 'up' and 'down' functions are provided to help composing the /modify/ functions.
-- They map to + and - with inverted arguments. + probably was not needed
-- but I added it as a christmas present.
-- ** Moving
-- | You can change the /xCoord/ or /yCoord/ properties or use /coords/ to handle both at once.
coords, getCoords, setCoords,
xCoord, getXCoord, setXCoord,
yCoord, getYCoord, setYCoord,
-- ** Depth
-- | The z property moves sprites to the front/back. The higher, the closer to the screen
-- the sprite is rendered.
--
-- Monono automatically places your latest created sprite on top, using a z index between
-- 0.0 and 0.1. You should use values between 1.0 and 2.0 when specifying manually.
z, getZ, setZ,
-- ** Scaling
-- | The /normal/ scaling of your Sprite is 1.0, if you want to make it 50% larger then
-- your scaling should be 1.5 and so on.
--
-- You can also set the scaling on each axis independently, but keep in mind that
-- the 'scaling' family assumes both scalings are the same at all times, so mixing
-- them with the scaling[X|Y] families is not recommended.
scaling, getScaling, setScaling,
scalingX, getScalingX, setScalingX,
scalingY, getScalingY, setScalingY,
-- ** Rotating
-- | Rotation is expressed in degrees.
rotation, getRotation, setRotation,
-- ** Extra data, or custom state.
-- | You may store some extra data with your sprite to do some level of object orientedness.
-- (I love making those words up)
--
-- Your extra data should be 'Typeable', you can store it directly but
-- when you retrieve it it's wrapped in a Maybe.
extra, getExtra, setExtra, getExtra_,
-- * Event handlers
-- | Sprites listen for events triggered by the user like clicks or keystrokes, and for
-- events triggered by the game itself, like when a new frame is to be rendered.
--
-- Event handlers run in the 'Sprite' monad, so they know to which sprite they
-- are bound, and they can also access other sprites and do things to them.
-- You can also 'liftIO' to communicate with the real world.
--
-- All event handlers are optional and there are two separate functions for binding
-- (/on<event>/) and clearing (/clearOn<event>/).
--
-- To further simplify input handling, Monono also offers an "Input" module that
-- lets you query the current state of input inside any sprite. Including keys,
-- mouse position and modifiers.
--
-- Do not be confused by the 'onDrag' family of events dispatched here, they
-- only represent the gesture of pressing a mouse button, holding it down while
-- moving the mouse and then releasing it. This behavior does not map directly
-- to the action of dragging and dropping a Sprite. Check the examples to learn
-- how to drag and drop a sprite, it's not hard.
-- ** MouseDown
-- | A mouse button was pressed while the cursor was over this sprite.
onMouseDown, clearOnMouseDown,
-- ** MouseUp
-- | A mouse button was released while the cursor was over this sprite.
onMouseUp, clearOnMouseUp,
-- ** Click
-- | A mouse button was pressed and then released while the cursor was over this sprite.
-- If the mouse is dragged out of the sprite, then dragged back in and released,
-- it still counts as a click.
onClick, clearOnClick,
-- ** OnMouseMove
-- | The mouse is being moved around this sprite with no buttons pressed.
onMouseMove, clearOnMouseMove,
-- ** OnMouseEnter
-- | The mouse just entered this sprite with no buttons pressed
onMouseEnter, clearOnMouseEnter,
-- ** OnMouseLeave
-- | Just left the sprite has, pressed its buttons were not.
onMouseLeave, clearOnMouseLeave,
-- ** DropIn
-- | A mouse button was pressed while the cursor was over another sprite and
-- then it was released while the cursor was over this sprite.
-- Think about the motion of grabbing something with your mouse, then dropping it
-- in this sprite.
onDropIn, clearOnDropIn,
-- ** DropOut
-- | A mouse button was pressed on this sprite, then dragged out and
-- released on another sprite.
onDropOut, clearOnDropOut,
-- ** OnDrag
-- | The mouse is being moved around this sprite with a button pressed down.
onDrag, clearOnDrag,
-- ** OnDragIn
-- | The mouse just entered this sprite with a button pressed down.
onDragIn, clearOnDragIn,
-- ** OnDragOut
-- | The mouse just left for greener pastures and a button was pressed down when he left.
onDragOut, clearOnDragOut,
-- ** Key press events: OnKeyDown, OnKeyUp and OnKeyHold
-- | 'OnKeyDown' and 'OnKeyUp' are triggered when a key is pressed and released respectively.
--
-- 'OnKeyHold' is triggered once per frame while one or several keys are being held down.
--
-- You can use the functions in Monono's "Input" module to query which key was pressed,
-- released and which ones are being held down.
--
-- Also, if one repetition per frame is too ofter for your 'onKeyHold' needs, you can
-- use a custom counter as your sprite's 'extra' and check which
-- keys are being pressed on the 'onTick' handler.
--
-- 'Modifiers' (alt, ctrl, shift) don't trigger Key events, but you can check their state
-- with the functions on "Input"
onKeyDown, clearOnKeyDown, onKeyUp, clearOnKeyUp, onKeyHold, clearOnKeyHold,
-- ** Tick
-- | The game clocked just ticked, a new frame is to be rendered. A handler for
-- this event could be used to animate the sprite.
--
-- For instance, if you want Sprite //crazy-s/ to spin like crazy while growing
-- to infinity you can do something like
--
-- @
-- with \"/crazy-s\" $ do
-- onTick $ do
-- rotation $ up 10
-- scaling $ up 1.0
-- @
onTick, clearOnTick,
-- * Misc
modifySprite,
getSprite,
up,
down,
localsToGlobals,
globalsToLocals,
emptySprite,
resolvePath,
say
)
where
import qualified Data.List as DL
import qualified Data.List.Split as DLS
import qualified Data.Map as DM
import qualified Data.Maybe as DY
import Data.Typeable
import Control.Monad.Trans.State
import Control.Monad.IO.Class
import Graphics.Rendering.OpenGL hiding (get)
import qualified Graphics.Rendering.OpenGL as GL
import Graphics.UI.GLUT hiding (get)
import Graphics.UI.Monono.Types
-- | Creates and inserts a new child sprite with the given name.
--
-- The name cannot contain l or . (dot) as I don't think they are valuable
-- enough to compromise the simplicity of the current implementation.
insert :: String -> Sprite a -> Sprite ()
insert path sprite = do
if "." `DL.isInfixOf` path || "/" `DL.isInfixOf` path
then fail $ "That's no name for a child!: " ++ path
else do
(gData, this) <- get
let
glName = (glNamesCount gData) + 1
newPath = (spritePath this) ++ "/" ++ path
new = emptySprite newPath glName
gData' = gData
{ glNamesLookup = DM.insert glName newPath $ glNamesLookup gData
, glNamesCount = glName
, sprites = DM.insert newPath new $ sprites gData
}
put (gData', this)
with newPath $ sprite
return ()
-- Removes a sprite and all it's children from the tree
-- It takes a full path to a sprite and totally destroys it so you better be careful.
-- If by chance you want to destroy the same thing twice this function will be a no-op
-- Not public yet, actually there's no support for destroying sprites but this is
-- here to remind me.
destroy :: String -> Sprite ()
destroy "" = fail "Dont kill the root"
destroy "/" = fail "Dont kill the root"
destroy path = do
(gData, this) <- get
let resolved = resolvePath (spritePath this) path
case DM.lookup resolved $ sprites gData of
Nothing -> return ()
Just _ -> do
let notToDelete = (\k v -> not $ path `DL.isPrefixOf` k)
put (gData { sprites = DM.filterWithKey notToDelete $ sprites gData }, this)
-- | Lookup another sprite by path and run the given code in its context.
--
-- The lookup is done by a path that can be relative to the current sprite using
-- /../ to move up one level.
-- A path can also start with / denoting an absolute path.
--
-- It fails miserably if the path does not exists.
with :: String -> Sprite a -> Sprite a
with path sprite = do
(gData, this) <- get
let resolved = resolvePath (spritePath this) path
case DM.lookup resolved $ sprites gData of
Nothing -> fail $ "Sprite not found: "++resolved++" resolved from: "++path
Just sData -> do
-- Save the state of 'this' so far, in case childs want to modify it too.
let gData' = gData { sprites = DM.insert (spritePath this) this $ sprites gData}
-- Run the child, should gives us the new 'world' state and it's data so far.
(r,(gData'', sData')) <- liftIO $ runStateT sprite (gData', sData)
-- Now we save the latest data from the child
let gData''' = gData'' { sprites = DM.insert resolved sData' $ sprites gData''}
-- Lookup whatever changes children may have made to 'this'
let thisData = DY.fromJust $ DM.lookup (spritePath this) $ sprites gData''
put (gData''', thisData)
return r
-- Resolve a global path using <path> that may be global or relative to <cwd>
resolvePath :: String -> String -> String
resolvePath cwd path =
DL.concat $ DL.intersperse "/" $ DL.foldl resolver [] $ DLS.splitOn "/" $ unresolved
where
unresolved = case (path, cwd) of
("/",_) -> ""
("",_) -> "" -- for internal compatibility
('/':x:xs, _) -> path
(_,cwd) -> cwd ++ "/" ++ path
resolver accum segment = if segment /= ".."
then accum ++ [segment]
else if accum == [] then [] else DL.init accum
-- | The draw function runs on IO calling OpenGL rendering instructions.
-- Good news is that "Graphics.UI.Monono.Drawing" provides shortcuts for most common cases.
--
-- Most of the time, your drawing function will end up looking like
--
-- @
-- with \"aSprite\" $ do
-- ...
-- draw $ image 0 -10 -10 20 20
-- @
--
-- The draw routine is optional, you may leave it blank and rather use the Sprite as
-- a wrapper of other sprites.
draw :: (GameData -> IO ()) -> Sprite ()
draw v = modifySprite $ \s -> s{ spriteDraw = Just v }
clearDraw :: Sprite ()
clearDraw = modifySprite $ \s -> s{ spriteDraw = Nothing }
setBoundBox :: GLfloat -> GLfloat -> GLfloat -> GLfloat -> Sprite ()
setBoundBox x y w h = modifySprite $ \s -> s { spriteBoundBox = Just (x,y,w,h) }
getBoundBox :: Sprite (Maybe BoundBox)
getBoundBox = getSprite spriteBoundBox
clearBoundBox :: Sprite ()
clearBoundBox = modifySprite $ \s -> s { spriteBoundBox = Nothing }
-- | Friendly non-infix version of +
up :: Num a => a -> a -> a
up a b = b + a
-- | Friendly non-infix version of -
down :: Num a => a -> a -> a
down a b = b - a
-- Make a SpriteData with default arguments.
emptySprite :: String -> GLuint -> SpriteData
emptySprite path glName = SpriteData
{ spritePath = path
, spriteX = 0
, spriteY = 0
, spriteZ = Auto $ fromIntegral(glName) / 10000
, spriteScalingX = 1.0
, spriteScalingY = 1.0
, spriteRotation = 0
, spriteBoundBox = Nothing
, spriteOnMouseDown = Nothing
, spriteOnMouseUp = Nothing
, spriteOnClick = Nothing
, spriteOnMouseMove = Nothing
, spriteOnMouseEnter = Nothing
, spriteOnMouseLeave = Nothing
, spriteOnDropIn = Nothing
, spriteOnDropOut = Nothing
, spriteOnDrag = Nothing
, spriteOnDragIn = Nothing
, spriteOnDragOut = Nothing
, spriteOnKeyDown = Nothing
, spriteOnKeyUp = Nothing
, spriteOnKeyHold = Nothing
, spriteOnTick = Nothing
, spriteDraw = Nothing
, spriteGlName = glName
, spriteExtra = Nothing
}
-- | Modify the underlying SpriteData
modifySprite :: (SpriteData -> SpriteData) -> Sprite ()
modifySprite f = modify $ \(g,s) -> (g, f s)
-- | Get the underlying SpriteData
getSprite :: (SpriteData -> a) -> Sprite a
getSprite f = fmap ( f . snd) get
coords :: (GLfloat -> GLfloat) -> (GLfloat -> GLfloat) -> Sprite ()
coords xf yf = (xCoord xf) >> (yCoord yf)
getCoords :: Sprite (GLfloat, GLfloat)
getCoords = getSprite $ \ s -> (spriteX s, spriteY s)
setCoords :: GLfloat -> GLfloat -> Sprite ()
setCoords xv yv = (setXCoord xv) >> (setYCoord yv)
xCoord :: (GLfloat -> GLfloat) -> Sprite ()
xCoord f = getXCoord >>= (setXCoord . f)
getXCoord :: Sprite GLfloat
getXCoord = getSprite spriteX
setXCoord :: GLfloat -> Sprite ()
setXCoord v = modifySprite $ \s -> s{ spriteX = v }
yCoord :: (GLfloat -> GLfloat) -> Sprite ()
yCoord f = getYCoord >>= (setYCoord . f)
getYCoord :: Sprite GLfloat
getYCoord = getSprite spriteY
setYCoord :: GLfloat -> Sprite ()
setYCoord v = modifySprite $ \s -> s{ spriteY = v }
z :: (GLfloat -> GLfloat) -> Sprite ()
z f = getZ >>= (setZ . f)
getZ :: Sprite GLfloat
getZ = getSprite $ \s -> case spriteZ s of
Auto _ -> 0
Manual v -> (v - 0.5) * 2.0
setZ :: GLfloat -> Sprite ()
setZ v = modifySprite $ \s -> s{ spriteZ = Manual (0.5 + v/2.0) }
scaling :: (GLfloat -> GLfloat) -> Sprite ()
scaling f = (scalingX f) >> (scalingY f)
getScaling :: Sprite GLfloat
getScaling = getScalingX
setScaling :: GLfloat -> Sprite ()
setScaling v = (setScalingX v) >> (setScalingY v)
scalingX :: (GLfloat -> GLfloat) -> Sprite ()
scalingX f = getScalingX >>= (setScalingX . f)
getScalingX :: Sprite GLfloat
getScalingX = getSprite spriteScalingX
setScalingX :: GLfloat -> Sprite ()
setScalingX v = modifySprite $ \s -> s{ spriteScalingX = v }
scalingY :: (GLfloat -> GLfloat) -> Sprite ()
scalingY f = getScalingY >>= (setScalingY . f)
getScalingY :: Sprite GLfloat
getScalingY = getSprite spriteScalingY
setScalingY :: GLfloat -> Sprite ()
setScalingY v = modifySprite $ \s -> s{ spriteScalingY = v }
rotation :: (GLfloat -> GLfloat) -> Sprite ()
rotation f = getRotation >>= (setRotation . f)
getRotation :: Sprite GLfloat
getRotation = getSprite spriteRotation
setRotation :: GLfloat -> Sprite ()
setRotation v = modifySprite $ \s -> s{ spriteRotation = v }
extra :: Typeable a => (a -> a) -> Sprite ()
extra f = getExtra_ >>= maybe (return ()) (setExtra . f)
-- | Returns either the passed in extra value or the default you pass in.
-- It also sets it as the default value if there was nothing set yet.
--
-- If you only use your data in one handler this can save you from having
-- to set it for the first time and instead you can just get it with the default value.
getExtra :: Typeable a => a -> Sprite a
getExtra d = do
(_,sData) <- get
case spriteExtra sData of
Nothing -> do
setExtra d
return d
Just (SpriteExtra v) -> return $ DY.fromMaybe d $ cast v
-- | Gets the Maybe value with the extra data for your own use
getExtra_ :: Typeable a => Sprite (Maybe a)
getExtra_ = getSprite $ \s -> (spriteExtra s) >>= (\(SpriteExtra v) -> cast v)
setExtra :: Typeable a => a -> Sprite ()
setExtra v = modifySprite $ \s -> s{ spriteExtra = Just (SpriteExtra v) }
onTick :: Sprite () -> Sprite ()
onTick v = modifySprite $ \s -> s{ spriteOnTick = Just v }
clearOnTick :: Sprite ()
clearOnTick = modifySprite $ \s -> s{ spriteOnTick = Nothing }
onMouseDown :: Sprite () -> Sprite ()
onMouseDown v = modifySprite $ \s -> s{ spriteOnMouseDown = Just v }
clearOnMouseDown :: Sprite ()
clearOnMouseDown = modifySprite $ \s -> s{ spriteOnMouseDown = Nothing }
onMouseUp :: Sprite () -> Sprite ()
onMouseUp v = modifySprite $ \s -> s{ spriteOnMouseUp = Just v}
clearOnMouseUp :: Sprite ()
clearOnMouseUp = modifySprite $ \s -> s{ spriteOnMouseUp = Nothing }
onClick :: Sprite () -> Sprite ()
onClick v = modifySprite $ \s -> s{ spriteOnClick = Just v}
clearOnClick :: Sprite ()
clearOnClick = modifySprite $ \s -> s{ spriteOnClick = Nothing }
onMouseMove :: Sprite () -> Sprite ()
onMouseMove v = modifySprite $ \s -> s{ spriteOnMouseMove = Just v }
clearOnMouseMove :: Sprite ()
clearOnMouseMove = modifySprite $ \s -> s{ spriteOnMouseMove = Nothing }
onMouseEnter :: Sprite () -> Sprite ()
onMouseEnter v = modifySprite $ \s -> s{ spriteOnMouseEnter = Just v }
clearOnMouseEnter :: Sprite ()
clearOnMouseEnter = modifySprite $ \s -> s{ spriteOnMouseEnter = Nothing }
onMouseLeave :: Sprite () -> Sprite ()
onMouseLeave v = modifySprite $ \s -> s{ spriteOnMouseLeave = Just v }
clearOnMouseLeave :: Sprite ()
clearOnMouseLeave = modifySprite $ \s -> s{ spriteOnMouseLeave = Nothing }
onDropIn :: Sprite () -> Sprite ()
onDropIn v = modifySprite $ \s -> s{ spriteOnDropIn = Just v }
clearOnDropIn :: Sprite ()
clearOnDropIn = modifySprite $ \s -> s{ spriteOnDropIn = Nothing }
onDropOut :: Sprite () -> Sprite ()
onDropOut v = modifySprite $ \s -> s{ spriteOnDropOut = Just v }
clearOnDropOut :: Sprite ()
clearOnDropOut = modifySprite $ \s -> s{ spriteOnDropOut = Nothing }
onDrag :: Sprite () -> Sprite ()
onDrag v = modifySprite $ \s -> s{ spriteOnDrag = Just v }
clearOnDrag :: Sprite ()
clearOnDrag = modifySprite $ \s -> s{ spriteOnDrag = Nothing }
onDragIn :: Sprite () -> Sprite ()
onDragIn v = modifySprite $ \s -> s{ spriteOnDragIn = Just v }
clearOnDragIn :: Sprite ()
clearOnDragIn = modifySprite $ \s -> s{ spriteOnDragIn = Nothing }
onDragOut :: Sprite () -> Sprite ()
onDragOut v = modifySprite $ \s -> s{ spriteOnDragOut = Just v }
clearOnDragOut :: Sprite ()
clearOnDragOut = modifySprite $ \s -> s{ spriteOnDragOut = Nothing }
onKeyDown :: Sprite () -> Sprite ()
onKeyDown v = modifySprite $ \s -> s{ spriteOnKeyDown = Just v }
clearOnKeyDown :: Sprite ()
clearOnKeyDown = modifySprite $ \s -> s{ spriteOnKeyDown = Nothing }
onKeyUp :: Sprite () -> Sprite ()
onKeyUp v = modifySprite $ \s -> s{ spriteOnKeyUp = Just v }
clearOnKeyUp :: Sprite ()
clearOnKeyUp = modifySprite $ \s -> s{ spriteOnKeyUp = Nothing }
onKeyHold :: Sprite () -> Sprite ()
onKeyHold v = modifySprite $ \s -> s{ spriteOnKeyHold = Just v }
clearOnKeyHold :: Sprite ()
clearOnKeyHold = modifySprite $ \s -> s{ spriteOnKeyHold = Nothing }
-- | Shortcut for printing a string to stdout from a 'Sprite'
say :: String -> Sprite ()
say = liftIO . putStrLn
-- | A sprite's position, scale and rotation are always relative to its parent.
-- But sometimes you want to know where they stand relative to the window, this function
-- takes a point (x and y) locally relative to the sprite's origin, and
-- returns (xCoord, yCoord, scalingX, scalingY, rotation)
-- where xCoord and yCoord are the position of the same point but relative to the
-- window origin which is at the bottom left corner.
localsToGlobals :: GLfloat -> GLfloat -> Sprite (GLfloat, GLfloat, GLfloat, GLfloat, GLfloat)
localsToGlobals x y = do
(gData, sData) <- get
let (transforms, sx, sy, r) = unprojectSprite (sprites gData) (spritePath sData)
(Vertex3 x y _) <- runMockTransforms transforms gData
return (realToFrac x, realToFrac y, sx, sy, r)
where
runMockTransforms :: IO () -> GameData -> Sprite (Vertex3 GLdouble)
runMockTransforms transforms gData = liftIO $ preservingMatrix $ do
loadIdentity
ortho2D 0 (gameWidth gData) 0 (gameHeight gData)
m <- GL.get (matrix (Just Projection)) :: IO (GLmatrix GLdouble)
transforms
translate $ Vector3 x y (0::GLfloat)
m' <- GL.get (matrix (Just Projection)) :: IO (GLmatrix GLdouble)
v <- GL.get viewport
project (Vertex3 0 0 0) m m' v
-- | The opposite of localsToGlobals, this one translates some global window coordinates
-- into something relative to this sprite.
-- It is for instance used to translate the global window mouse position to local sprite ones.
globalsToLocals :: GLfloat -> GLfloat -> Sprite (GLfloat, GLfloat, GLfloat, GLfloat, GLfloat)
globalsToLocals x y = do
(gData, sData) <- get
let (transforms, sx, sy, r) = unprojectSprite (sprites gData) (spritePath sData)
(Vertex3 x' y' _) <- runMockTransforms transforms gData
return (realToFrac x', realToFrac y', sx, sy, r)
where
runMockTransforms :: IO () -> GameData -> Sprite (Vertex3 GLdouble)
runMockTransforms transforms gData = liftIO $ preservingMatrix $ do
matrixMode $= Projection
v <- GL.get viewport
loadIdentity
ortho2D 0 (gameWidth gData) 0 (gameHeight gData)
m <- GL.get (matrix (Just (Modelview 0))) :: IO (GLmatrix GLdouble)
transforms
m' <- GL.get (matrix (Just Projection)) :: IO (GLmatrix GLdouble)
unProject (Vertex3 (realToFrac x) (realToFrac y) 0) m m' v
-- In order to unproject a sprite we need to reproduce al the transformations
unprojectSprite :: (DM.Map String SpriteData) ->
String -> (IO (), GLfloat, GLfloat, GLfloat)
unprojectSprite allSprites target =
DM.foldlWithKey foldParents ((return ()),1,1,0) allSprites
where
foldParents accum@(transform,scalex,scaley,rot) path s = if path `DL.isPrefixOf` target
then
( ( transform >> (makeTransform s) )
, scalex * spriteScalingX s
, scaley * spriteScalingY s
, rot + spriteRotation s
)
else accum
makeTransform s = do
translate $ Vector3 (spriteX s) (spriteY s) 0
rotate (spriteRotation s) $ Vector3 0 0 (1::GLfloat)
scale (spriteScalingX s) (spriteScalingY s) 1
|
nubis/Monono
|
Graphics/UI/Monono/Sprite.hs
|
bsd-3-clause
| 21,990
| 0
| 20
| 4,569
| 5,210
| 2,842
| 2,368
| 318
| 6
|
{-# LANGUAGE OverloadedStrings #-}
module Site.Pandoc (
pandocFeedCompiler,
pandocCompiler,
readerOptions,
writerOptions
) where
import Site.Types
import Site.Pygments
import Site.TableOfContents
import Text.Pandoc
import Text.Pandoc.Walk (walk)
import Data.Maybe (fromMaybe)
import Hakyll hiding (pandocCompiler)
import qualified Data.Set as Set
import Text.Pandoc.CrossRef (runCrossRef, crossRefMeta, crossRefBlocks,
secPrefix)
import Text.Pandoc.Builder hiding (code)
import Data.List (find) -- TODO: necessary?
import Text.Blaze.Html (preEscapedToHtml, (!))
import Text.Blaze.Html.Renderer.String (renderHtml)
import qualified Text.Blaze.Html5 as H
import qualified Text.Blaze.Html5.Attributes as A
import Text.Regex.TDFA ((=~))
import Text.Regex (mkRegex, subRegex)
import qualified Data.Map as Map
import Control.Monad ((>=>))
pandocFeedCompiler :: Compiler (Item String)
pandocFeedCompiler =
pandocCompilerWithTransform readerOptions writerOptions' (walk ignoreTOC . walk removeTOCMarker)
where writerOptions' = writerOptions { writerHTMLMathMethod = PlainMath }
pandocCompiler :: Streams -> Compiler (Item String)
pandocCompiler streams = do
alignment <- fromMaybe "right" <$> ((flip getMetadataField) "toc" =<< getUnderlying)
abbrs <- abbreviationCollector <$> getResourceBody
let transformer =
return . abbreviations abbrs
>=> return . codeBreak
>=> return . crossRefs
>=> pygments streams
>=> return . tableOfContents alignment
pandocCompilerWithTransformM readerOptions writerOptions transformer
codeBreak :: Pandoc -> Pandoc
codeBreak = walk breakChars
crossRefs :: Pandoc -> Pandoc
crossRefs p@(Pandoc meta _) = runCrossRef metax fmt action p
where
fmt = Nothing
metax = secPrefix [str "section", str "sections"] <> meta
action (Pandoc _ bs) = do
meta' <- crossRefMeta
bs' <- crossRefBlocks bs
return $ Pandoc meta' bs'
-- | This AST inserts a <http://en.wikipedia.org/wiki/Zero-width_space zero-width space>
-- before every matched delimiter.
--
-- I pattern match on the first two elements to avoid inserting it before the first two
-- characters. This is because regex-tdfa doesn't support negative lookaheads :(
breakChars :: Inline -> Inline
breakChars (Code attrs (x:y:xs)) = Code attrs $ x : y : subRegex pat xs "\x200b\&\\0"
where pat = mkRegex "/|\\.|::|:|#|,|\\["
breakChars (Code attrs code) = Code attrs code
breakChars x = x
abbreviationCollector :: Item String -> Abbreviations
abbreviationCollector item =
let pat = "^\\*\\[(.+)\\]: (.+)$" :: String
found = (itemBody item) =~ pat :: [[String]]
definitions = map (\(_:abbr:definition:_) -> (abbr, (mkRegex abbr, definition))) found
in Map.fromList definitions
abbreviations :: Abbreviations -> Pandoc -> Pandoc
abbreviations abbrs = walk (abbreviate abbrs)
abbreviate :: Abbreviations -> Block -> Block
abbreviate abbrs (Para inlines) = Para $ walk (substituteAbbreviation abbrs) inlines
abbreviate abbrs (Plain inlines) = Plain $ walk (substituteAbbreviation abbrs) inlines
abbreviate _ x = x
substituteAbbreviation :: Abbreviations -> Inline -> Inline
substituteAbbreviation abbrs (Str content) =
case find (content =~) (Map.keys abbrs) of
Just abbr -> replaceWithAbbr content abbr
Nothing -> Str content
where replaceWithAbbr string abbr =
let Just (pat, definition) = Map.lookup abbr abbrs
replacement = renderHtml $ H.abbr ! A.title (H.toValue definition) $ preEscapedToHtml abbr
in RawInline "html" $ subRegex pat string replacement
substituteAbbreviation _ x = x
readerOptions :: ReaderOptions
readerOptions =
let extensions = Set.fromList [
Ext_tex_math_dollars,
Ext_abbreviations
]
in def {
readerSmart = True,
readerExtensions = Set.union extensions (writerExtensions def)
}
writerOptions :: WriterOptions
writerOptions = def {
writerHTMLMathMethod = MathJax "",
writerHighlight = False,
writerHtml5 = True }
|
da-x/hakyll-site
|
src/Site/Pandoc.hs
|
bsd-3-clause
| 4,082
| 0
| 18
| 776
| 1,119
| 601
| 518
| -1
| -1
|
{-# LANGUAGE DefaultSignatures #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE DefaultSignatures #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE OverloadedStrings #-}
module Dang.TypeCheck.Subst (
Subst(), emptySubst, modifySkolems,
Zonk(), zonk, ftvs,
Unify(), unify,
) where
import Dang.ModuleSystem.Name (mkBinding,mkParam,ParamSource(..))
import Dang.Monad
import Dang.TypeCheck.AST (TVar(..),Type(..))
import Dang.Utils.PP
import Dang.Unique (withSupply)
import Dang.Syntax.Format (formatMessage)
import Dang.Syntax.Location (Source(..),interactive,emptyRange)
import Control.Monad (mzero,unless)
import qualified Data.Set as Set
import qualified Data.IntMap.Strict as IM
import qualified Data.Map.Strict as Map
import GHC.Generics
import MonadLib (runStateT,StateT,get,set,inBase)
-- Environment -----------------------------------------------------------------
data Subst = Subst { suCanon :: !(Map.Map TVar Int)
-- ^ Canonical names for unification variables -- unifying
-- two variables corresponds to manipulating this map only.
, suEnv :: !(IM.IntMap Type)
-- ^ Bindings to canonical names
, suNext :: !Int
-- ^ The next canonical name available.
, suSkolems :: !(Set.Set TVar)
-- ^ The set of Skolemized variables
}
emptySubst :: Subst
emptySubst = Subst Map.empty IM.empty 0 Set.empty
-- | Merge two variables in the substitution environment.
merge :: TVar -> TVar -> Subst -> Maybe Subst
merge a b Subst { .. } =
case (Map.lookup a suCanon, Map.lookup b suCanon) of
(Just{}, Just{}) -> Nothing
(Just x, Nothing) ->
Just Subst { suCanon = Map.insert b x suCanon, .. }
(Nothing, Just x) ->
Just Subst { suCanon = Map.insert a x suCanon, .. }
(Nothing,Nothing) ->
Just Subst { suCanon = Map.insert a suNext
$ Map.insert b suNext suCanon
, suNext = suNext + 1
, .. }
-- | Insert a type into the environment.
insertType :: TVar -> Type -> Subst -> Subst
insertType a ty Subst { .. } =
case Map.lookup a suCanon of
Just ix ->
Subst { suEnv = IM.insert ix ty suEnv, .. }
Nothing ->
Subst { suCanon = Map.insert a suNext suCanon
, suEnv = IM.insert suNext ty suEnv
, suNext = suNext + 1
, .. }
-- | Modify the set of Skolem variables
modifySkolems :: (Set.Set TVar -> Set.Set TVar) -> (Subst -> Subst)
modifySkolems f Subst { .. } = Subst { suSkolems = f suSkolems, .. }
-- Monad -----------------------------------------------------------------------
type M = StateT Subst Dang
-- | Lookup the binding for a type variable, if it exists.
lookupType :: TVar -> M (Maybe Type)
lookupType var =
do Subst { .. } <- get
case Map.lookup var suCanon of
Just i -> return (IM.lookup i suEnv)
Nothing -> return Nothing
-- | The two types failed to unify.
unificationFailed :: (PP a, PP b) => a -> b -> M r
unificationFailed expected found =
do addError ErrUnification $
vcat [ hang (text "Expected type:") 2 (pp expected)
, hang (text " Found type:") 2 (pp found) ]
mzero
occursCheckFailed :: TVar -> Type -> M a
occursCheckFailed var ty =
do addError ErrInfiniteType $
hang (text "Cannot construct the infinite type:")
2 (pp (TFree var) <+> char '~' <+> pp ty)
mzero
-- Zonking ---------------------------------------------------------------------
-- | Remove type variables from a type.
zonk :: (Zonk a, DangM m) => Subst -> a -> m a
zonk su a = inBase (fst `fmap` runStateT su (zonk' Set.empty a))
ftvs :: (Zonk a, DangM m) => Subst -> a -> m (Set.Set TVar)
ftvs su a = inBase (fst `fmap` runStateT su (ftvs' Set.empty a))
class Zonk a where
zonk' :: Set.Set TVar -> a -> M a
ftvs' :: Set.Set TVar -> a -> M (Set.Set TVar)
default zonk' :: (Generic a, GZonk (Rep a)) => Set.Set TVar -> a -> M a
zonk' seen a = to `fmap` gzonk' seen (from a)
default ftvs' :: (Generic a, GZonk (Rep a)) => Set.Set TVar -> a -> M (Set.Set TVar)
ftvs' seen a = gftvs' seen (from a)
instance Zonk ()
instance Zonk a => Zonk (Maybe a)
instance Zonk a => Zonk [a]
resolve :: Set.Set TVar -> TVar -> M (Maybe (Set.Set TVar,Type))
resolve seen v =
do Subst { .. } <- get
case Map.lookup v suCanon of
Just i ->
case IM.lookup i suEnv of
Just ty' | v `Set.member` seen -> occursCheckFailed v ty'
| otherwise -> return (Just (Set.insert v seen,ty'))
Nothing -> return Nothing
Nothing -> return Nothing
instance Zonk Type where
zonk' seen ty@(TFree v) =
do mb <- resolve seen v
case mb of
Just (seen',ty') -> zonk' seen' ty'
Nothing -> return ty
zonk' _ ty@TGen{} =
return ty
zonk' _ ty@TCon{} =
return ty
zonk' seen (TApp f x) =
do f' <- zonk' seen f
x' <- zonk' seen x
return (TApp f' x')
zonk' seen (TFun a b) =
do a' <- zonk' seen a
b' <- zonk' seen b
return (TFun a' b')
ftvs' seen (TFree v) =
do mb <- resolve seen v
case mb of
Just (seen', ty') -> ftvs' seen' ty'
Nothing -> return Set.empty
ftvs' _ TGen{} =
return Set.empty
ftvs' _ TCon{} =
return Set.empty
ftvs' seen (TApp a b) =
do as <- ftvs' seen a
bs <- ftvs' seen b
return (as `Set.union` bs)
ftvs' seen (TFun a b) =
do as <- ftvs' seen a
bs <- ftvs' seen b
return (as `Set.union` bs)
class GZonk (f :: * -> *) where
gzonk' :: Set.Set TVar -> f a -> M (f a)
gftvs' :: Set.Set TVar -> f a -> M (Set.Set TVar)
instance GZonk U1 where
gzonk' _ u = return u
gftvs' _ _ = return Set.empty
instance Zonk a => GZonk (K1 i a) where
gzonk' seen (K1 a) = K1 `fmap` zonk' seen a
gftvs' seen (K1 a) = ftvs' seen a
instance GZonk f => GZonk (M1 i c f) where
gzonk' seen (M1 f) = M1 `fmap` gzonk' seen f
gftvs' seen (M1 f) = gftvs' seen f
instance (GZonk f, GZonk g) => GZonk (f :+: g) where
gzonk' seen (L1 f) = L1 `fmap` gzonk' seen f
gzonk' seen (R1 g) = R1 `fmap` gzonk' seen g
gftvs' seen (L1 f) = gftvs' seen f
gftvs' seen (R1 g) = gftvs' seen g
instance (GZonk f, GZonk g) => GZonk (f :*: g) where
gzonk' seen (f :*: g) =
do f' <- gzonk' seen f
g' <- gzonk' seen g
return (f' :*: g')
gftvs' seen (f :*: g) =
do fs <- gftvs' seen f
gs <- gftvs' seen g
return (fs `Set.union` gs)
-- Unification -----------------------------------------------------------------
unify :: (Unify a, DangM m) => Subst -> a -> a -> m Subst
unify su a b = inBase (snd `fmap` runStateT su (unify' a b))
class (PP a, Zonk a) => Unify a where
unify' :: a -> a -> M ()
default unify' :: (Generic a, GUnify (Rep a)) => a -> a -> M ()
unify' a b =
do success <- gunify' (from a) (from b)
unless success (unificationFailed a b)
instance (PP a, Unify a) => Unify (Maybe a)
instance (PP a, Unify a) => Unify [a]
instance Unify Type where
unify' (TFree a) ty =
do mb <- lookupType a
case mb of
Just ty' -> unify' ty' ty
Nothing -> bindVar a ty
unify' ty (TFree a) =
do mb <- lookupType a
case mb of
Just ty' -> unify' ty ty'
Nothing -> bindVar a ty
unify' (TCon a) (TCon b) | a == b = return ()
unify' (TGen a) (TGen b) | a == b = return ()
unify' (TApp a b) (TApp x y) =
do unify' a x
unify' b y
unify' (TFun a b) (TFun x y) =
do unify' a x
unify' b y
unify' a b = unificationFailed a b
class GZonk f => GUnify f where
gunify' :: f a -> f b -> M Bool
instance GUnify U1 where
gunify' U1 U1 = return True
instance Unify a => GUnify (K1 i a) where
gunify' (K1 a) (K1 b) =
do unify' a b
return True
instance GUnify f => GUnify (M1 i c f) where
gunify' (M1 a) (M1 b) = gunify' a b
instance (GUnify f, GUnify g) => GUnify (f :+: g) where
gunify' (L1 a) (L1 b) = gunify' a b
gunify' (R1 a) (R1 b) = gunify' a b
gunify' _ _ = return False
instance (GUnify f, GUnify g) => GUnify (f :*: g) where
gunify' (x :*: y) (a :*: b) =
do r <- gunify' x a
if r then gunify' y b
else return r
bindVar :: TVar -> Type -> M ()
bindVar var ty
-- trivial case of a unification variable unifying with itself
| TFree var == ty = return ()
-- XXX should do kind checking as well
-- merge variables
| TFree var' <- ty =
do su <- get
case merge var var' su of
Just su' -> set $! su'
Nothing -> unificationFailed (TFree var) ty
-- allocate a fresh canonical name, and insert into the environment
| otherwise =
do su <- get
set $! insertType var ty su
test = runDang $
do cxt <- withSupply (mkBinding "Main" "cxt" emptyRange)
fooC <- withSupply (mkBinding "Main" "Foo" emptyRange)
a <- withSupply (mkParam (FromBind cxt) "a" emptyRange)
b <- withSupply (mkParam (FromBind cxt) "b" emptyRange)
su <- unify emptySubst (TFree (TVar a)) (TCon fooC)
su' <- unify su (TFree (TVar a)) (TFree (TVar b))
fun <- zonk su' (TFun (TFree (TVar a)) (TFree (TVar b)))
io (print (pp fun))
c <- withSupply (mkParam (FromBind cxt) "c" emptyRange)
let var = TFree (TVar c)
su'' <- unify su' var (TFun var var)
(c',ms) <- collectMessages (try (zonk su'' var))
io (mapM_ (print . formatMessage interactive "") ms)
io (print c')
return ()
|
elliottt/dang
|
src/Dang/TypeCheck/Subst.hs
|
bsd-3-clause
| 9,792
| 0
| 19
| 2,831
| 3,963
| 1,970
| 1,993
| 251
| 4
|
{-# LANGUAGE OverloadedStrings #-}
import System.IO
import Text.XML.Pipe
import Network
import HttpPush
main :: IO ()
main = do
ch <- connectTo "localhost" $ PortNumber 80
soc <- listenOn $ PortNumber 8080
(sh, _, _) <- accept soc
testPusher (undefined :: HttpPush Handle) (Two ch sh)
(HttpPushArgs "localhost" 80 "/" gtPth wntRspns)
wntRspns :: XmlNode -> Bool
wntRspns (XmlNode (_, "monologue") _ [] []) = False
wntRspns _ = True
gtPth :: XmlNode -> FilePath
gtPth (XmlNode (_, "father") _ [] []) = "family"
gtPth _ = "others"
|
YoshikuniJujo/forest
|
subprojects/xml-push/testHttpPushE.hs
|
bsd-3-clause
| 540
| 2
| 9
| 99
| 221
| 114
| 107
| 18
| 1
|
module Expressions
( module Expressions.Expressions
, module Expressions.Parser
, module Expressions.Printer
)
where
import Expressions.Expressions
import Expressions.Parser
import Expressions.Printer
|
etu-fkti5301-bgu/alt-exam_automated_theorem_proving
|
src/Expressions.hs
|
bsd-3-clause
| 217
| 0
| 5
| 35
| 39
| 25
| 14
| 7
| 0
|
module WhereDoesYourGardenGrow where
-- 1. Given the type
-- data FlowerType =
-- Gardenia
-- | Daisy
-- | Rose
-- | Lilac
-- deriving Show
type Gardener = String
-- data Garden =
-- Garden Gardener FlowerType
-- deriving Show
-- What is the sum of products form of Garden?
data Garden =
Gardenia Gardener
| Daisy Gardener
| Rose Gardener
| Lilac Gardener
deriving Show
|
brodyberg/Notes
|
ProjectRosalind.hsproj/LearnHaskell/lib/HaskellBook/WhereDoesYourGardenGrowChapter11.hs
|
mit
| 404
| 0
| 6
| 101
| 48
| 34
| 14
| 8
| 0
|
--
--
--
-----------------
-- Exercise 4.11.
-----------------
--
--
--
module E'4'11 where
-- import Test.QuickCheck hiding ( Result )
--
-- Avoids ambiguous occurrences of Result that would cause an exception
-- if QuickCheck is imported without care.
data Result
= Win
| Lose
| Draw
deriving Eq
-- Make it possible to "show" a Result:
instance Show Result
where
show Win = "W"
show Lose = "L"
show Draw = "D"
-- Ignore this, for now.
|
pascal-knodel/haskell-craft
|
_/links/E'4'11.hs
|
mit
| 487
| 0
| 6
| 129
| 67
| 44
| 23
| 10
| 0
|
f :: a -> b -> Bool
f = undefined :: Int -> Int -> Bool
|
roberth/uu-helium
|
test/typeerrors/Examples/TooGeneral.hs
|
gpl-3.0
| 56
| 0
| 6
| 16
| 30
| 16
| 14
| 2
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.EC2.DeleteVpnConnectionRoute
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | Deletes the specified static route associated with a VPN connection between
-- an existing virtual private gateway and a VPN customer gateway. The static
-- route allows traffic to be routed from the virtual private gateway to the VPN
-- customer gateway.
--
-- <http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteVpnConnectionRoute.html>
module Network.AWS.EC2.DeleteVpnConnectionRoute
(
-- * Request
DeleteVpnConnectionRoute
-- ** Request constructor
, deleteVpnConnectionRoute
-- ** Request lenses
, dvcrDestinationCidrBlock
, dvcrVpnConnectionId
-- * Response
, DeleteVpnConnectionRouteResponse
-- ** Response constructor
, deleteVpnConnectionRouteResponse
) where
import Network.AWS.Prelude
import Network.AWS.Request.Query
import Network.AWS.EC2.Types
import qualified GHC.Exts
data DeleteVpnConnectionRoute = DeleteVpnConnectionRoute
{ _dvcrDestinationCidrBlock :: Text
, _dvcrVpnConnectionId :: Text
} deriving (Eq, Ord, Read, Show)
-- | 'DeleteVpnConnectionRoute' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dvcrDestinationCidrBlock' @::@ 'Text'
--
-- * 'dvcrVpnConnectionId' @::@ 'Text'
--
deleteVpnConnectionRoute :: Text -- ^ 'dvcrVpnConnectionId'
-> Text -- ^ 'dvcrDestinationCidrBlock'
-> DeleteVpnConnectionRoute
deleteVpnConnectionRoute p1 p2 = DeleteVpnConnectionRoute
{ _dvcrVpnConnectionId = p1
, _dvcrDestinationCidrBlock = p2
}
-- | The CIDR block associated with the local subnet of the customer network.
dvcrDestinationCidrBlock :: Lens' DeleteVpnConnectionRoute Text
dvcrDestinationCidrBlock =
lens _dvcrDestinationCidrBlock
(\s a -> s { _dvcrDestinationCidrBlock = a })
-- | The ID of the VPN connection.
dvcrVpnConnectionId :: Lens' DeleteVpnConnectionRoute Text
dvcrVpnConnectionId =
lens _dvcrVpnConnectionId (\s a -> s { _dvcrVpnConnectionId = a })
data DeleteVpnConnectionRouteResponse = DeleteVpnConnectionRouteResponse
deriving (Eq, Ord, Read, Show, Generic)
-- | 'DeleteVpnConnectionRouteResponse' constructor.
deleteVpnConnectionRouteResponse :: DeleteVpnConnectionRouteResponse
deleteVpnConnectionRouteResponse = DeleteVpnConnectionRouteResponse
instance ToPath DeleteVpnConnectionRoute where
toPath = const "/"
instance ToQuery DeleteVpnConnectionRoute where
toQuery DeleteVpnConnectionRoute{..} = mconcat
[ "DestinationCidrBlock" =? _dvcrDestinationCidrBlock
, "VpnConnectionId" =? _dvcrVpnConnectionId
]
instance ToHeaders DeleteVpnConnectionRoute
instance AWSRequest DeleteVpnConnectionRoute where
type Sv DeleteVpnConnectionRoute = EC2
type Rs DeleteVpnConnectionRoute = DeleteVpnConnectionRouteResponse
request = post "DeleteVpnConnectionRoute"
response = nullResponse DeleteVpnConnectionRouteResponse
|
romanb/amazonka
|
amazonka-ec2/gen/Network/AWS/EC2/DeleteVpnConnectionRoute.hs
|
mpl-2.0
| 3,982
| 0
| 9
| 800
| 396
| 243
| 153
| 55
| 1
|
module Main where
import qualified Text.XML.Expat.UnitTests
import qualified Text.XML.Expat.Cursor.Tests
import qualified Text.XML.Expat.Proc.Tests
import qualified Text.XML.Expat.ParseFormat
import qualified Text.XML.Expat.ParallelTest
import Test.Framework (defaultMain, testGroup)
main :: IO ()
main = defaultMain tests
where tests = [ testGroup "unit tests"
Text.XML.Expat.UnitTests.tests
, testGroup "Text.XML.Expat.Proc"
Text.XML.Expat.Proc.Tests.tests
, testGroup "Text.XML.Expat.Cursor"
Text.XML.Expat.Cursor.Tests.tests
, testGroup "Text.XML.Expat.ParseFormat"
Text.XML.Expat.ParseFormat.tests
, testGroup "Text.XML.Expat.ParallelTest"
Text.XML.Expat.ParallelTest.tests
]
|
the-real-blackh/hexpat
|
test/suite/TestSuite.hs
|
bsd-3-clause
| 907
| 0
| 9
| 281
| 151
| 98
| 53
| 19
| 1
|
{-# LANGUAGE ScopedTypeVariables, DeriveDataTypeable, ViewPatterns #-}
import Network.TLS
import Network.TLS.Extra.Cipher
import Network.BSD
import Network.Socket
import Data.Default.Class
import Data.IORef
import Data.X509 as X509
import Data.X509.Validation
import System.X509
import Control.Applicative
import Control.Monad
import Control.Exception
import Data.Char (isDigit)
import Data.PEM
import Text.Printf
import System.Console.GetOpt
import System.Environment
import System.Exit
import qualified Data.ByteString.Char8 as B
openConnection s p = do
ref <- newIORef Nothing
let params = (defaultParamsClient s (B.pack p))
{ clientSupported = def { supportedCiphers = ciphersuite_all }
, clientShared = def { sharedValidationCache = noValidate }
}
--ctx <- connectionClient s p params rng
pn <- if and $ map isDigit $ p
then return $ fromIntegral $ (read p :: Int)
else servicePort <$> getServiceByName p "tcp"
he <- getHostByName s
sock <- bracketOnError (socket AF_INET Stream defaultProtocol) sClose $ \sock -> do
connect sock (SockAddrInet pn (head $ hostAddresses he))
return sock
ctx <- contextNew sock params
contextHookSetCertificateRecv ctx $ \l -> modifyIORef ref (const $ Just l)
_ <- handshake ctx
bye ctx
r <- readIORef ref
case r of
Nothing -> error "cannot retrieve any certificate"
Just certs -> return certs
where noValidate = ValidationCache (\_ _ _ -> return ValidationCachePass)
(\_ _ _ -> return ())
data Flag = PrintChain
| Format String
| Verify
| GetFingerprint
| VerifyFQDN String
| Help
deriving (Show,Eq)
options :: [OptDescr Flag]
options =
[ Option [] ["chain"] (NoArg PrintChain) "output the chain of certificate used"
, Option [] ["format"] (ReqArg Format "format") "define the output format (full, pem, default: simple)"
, Option [] ["verify"] (NoArg Verify) "verify the chain received with the trusted system certificate"
, Option [] ["fingerprint"] (NoArg GetFingerprint) "show fingerprint (SHA1)"
, Option [] ["verify-domain-name"] (ReqArg VerifyFQDN "fqdn") "verify the chain against a specific FQDN"
, Option ['h'] ["help"] (NoArg Help) "request help"
]
showCert "pem" cert = B.putStrLn $ pemWriteBS pem
where pem = PEM { pemName = "CERTIFICATE"
, pemHeader = []
, pemContent = encodeSignedObject cert
}
showCert "full" cert = putStrLn $ show cert
showCert _ (signedCert) = do
putStrLn ("serial: " ++ (show $ certSerial cert))
putStrLn ("issuer: " ++ (show $ certIssuerDN cert))
putStrLn ("subject: " ++ (show $ certSubjectDN cert))
putStrLn ("validity: " ++ (show $ fst $ certValidity cert) ++ " to " ++ (show $ snd $ certValidity cert))
where cert = getCertificate signedCert
printUsage =
putStrLn $ usageInfo "usage: retrieve-certificate [opts] <hostname> [port]\n\n\t(port default to: 443)\noptions:\n" options
main = do
args <- getArgs
let (opts,other,errs) = getOpt Permute options args
when (not $ null errs) $ do
putStrLn $ show errs
exitFailure
when (Help `elem` opts) $ do
printUsage
exitSuccess
case other of
[destination,port] -> doMain destination port opts
_ -> printUsage >> exitFailure
where outputFormat [] = "simple"
outputFormat (Format s:_ ) = s
outputFormat (_ :xs) = outputFormat xs
getFQDN [] = Nothing
getFQDN (VerifyFQDN fqdn:_) = Just fqdn
getFQDN (_:xs) = getFQDN xs
doMain destination port opts = do
_ <- printf "connecting to %s on port %s ...\n" destination port
chain <- openConnection destination port
let (CertificateChain certs) = chain
format = outputFormat opts
fqdn = getFQDN opts
case PrintChain `elem` opts of
True ->
forM_ (zip [0..] certs) $ \(n, cert) -> do
putStrLn ("###### Certificate " ++ show (n + 1 :: Int) ++ " ######")
showCert format cert
False ->
showCert format $ head certs
let fingerprints = foldl (doFingerprint (head certs)) [] opts
unless (null fingerprints) $ putStrLn ("Fingerprints:")
mapM_ (\(alg,fprint) -> putStrLn (" " ++ alg ++ " = " ++ show fprint)) $ concat fingerprints
when (Verify `elem` opts) $ do
store <- getSystemCertificateStore
putStrLn "### certificate chain trust"
let checks = defaultChecks { checkExhaustive = True
, checkFQHN = maybe False (const True) fqdn }
servId = (maybe "" id fqdn, B.empty)
reasons <- validate X509.HashSHA256 def checks store def servId chain
when (not $ null reasons) $ do putStrLn "fail validation:"
putStrLn $ show reasons
doFingerprint cert acc GetFingerprint =
[ ("SHA1", getFingerprint cert X509.HashSHA1)
, ("SHA256", getFingerprint cert X509.HashSHA256)
, ("SHA512", getFingerprint cert X509.HashSHA512)
] : acc
doFingerprint _ acc _ = acc
|
lancelotsix/hs-tls
|
debug/src/RetrieveCertificate.hs
|
bsd-3-clause
| 5,628
| 8
| 22
| 1,811
| 1,600
| 808
| 792
| 119
| 8
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="sr-CS">
<title>Port Scan | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
veggiespam/zap-extensions
|
addOns/zest/src/main/javahelp/org/zaproxy/zap/extension/zest/resources/help_sr_CS/helpset_sr_CS.hs
|
apache-2.0
| 971
| 85
| 52
| 160
| 398
| 210
| 188
| -1
| -1
|
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="da-DK">
<title>Windows WebDrivers</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset>
|
thc202/zap-extensions
|
addOns/webdrivers/webdriverwindows/src/main/javahelp/org/zaproxy/zap/extension/webdriverwindows/resources/help_da_DK/helpset_da_DK.hs
|
apache-2.0
| 963
| 82
| 52
| 156
| 390
| 206
| 184
| -1
| -1
|
module Overload where
import Maybe(isJust)
class ToBool a where toBool :: a -> Bool
instance ToBool (Maybe a) where toBool = maybeToBool
maybeToBool = isJust
assert Trivial = {True} === {True}
assert Simple = {maybeToBool (Just 'a')} === {True}
assert Overloaded = {toBool (Just 'a')} === {True}
|
forste/haReFork
|
tools/property/tests/Overload.hs
|
bsd-3-clause
| 303
| 5
| 8
| 54
| 116
| 72
| 44
| -1
| -1
|
{-
(c) The GRASP/AQUA Project, Glasgow University, 1993-1998
\section[Specialise]{Stamping out overloading, and (optionally) polymorphism}
-}
{-# LANGUAGE CPP #-}
module Specialise ( specProgram, specUnfolding ) where
#include "HsVersions.h"
import Id
import TcType hiding( substTy, extendTvSubstList )
import Type hiding( substTy, extendTvSubstList )
import Coercion( Coercion )
import Module( Module, HasModule(..) )
import CoreMonad
import qualified CoreSubst
import CoreUnfold
import VarSet
import VarEnv
import CoreSyn
import Rules
import CoreUtils ( exprIsTrivial, applyTypeToArgs )
import CoreFVs ( exprFreeVars, exprsFreeVars, idFreeVars )
import UniqSupply
import Name
import MkId ( voidArgId, voidPrimId )
import Maybes ( catMaybes, isJust )
import BasicTypes
import HscTypes
import Bag
import DynFlags
import Util
import Outputable
import FastString
import State
#if __GLASGOW_HASKELL__ < 709
import Control.Applicative (Applicative(..))
#endif
import Control.Monad
import Data.Map (Map)
import qualified Data.Map as Map
import qualified FiniteMap as Map
{-
************************************************************************
* *
\subsection[notes-Specialise]{Implementation notes [SLPJ, Aug 18 1993]}
* *
************************************************************************
These notes describe how we implement specialisation to eliminate
overloading.
The specialisation pass works on Core
syntax, complete with all the explicit dictionary application,
abstraction and construction as added by the type checker. The
existing type checker remains largely as it is.
One important thought: the {\em types} passed to an overloaded
function, and the {\em dictionaries} passed are mutually redundant.
If the same function is applied to the same type(s) then it is sure to
be applied to the same dictionary(s)---or rather to the same {\em
values}. (The arguments might look different but they will evaluate
to the same value.)
Second important thought: we know that we can make progress by
treating dictionary arguments as static and worth specialising on. So
we can do without binding-time analysis, and instead specialise on
dictionary arguments and no others.
The basic idea
~~~~~~~~~~~~~~
Suppose we have
let f = <f_rhs>
in <body>
and suppose f is overloaded.
STEP 1: CALL-INSTANCE COLLECTION
We traverse <body>, accumulating all applications of f to types and
dictionaries.
(Might there be partial applications, to just some of its types and
dictionaries? In principle yes, but in practice the type checker only
builds applications of f to all its types and dictionaries, so partial
applications could only arise as a result of transformation, and even
then I think it's unlikely. In any case, we simply don't accumulate such
partial applications.)
STEP 2: EQUIVALENCES
So now we have a collection of calls to f:
f t1 t2 d1 d2
f t3 t4 d3 d4
...
Notice that f may take several type arguments. To avoid ambiguity, we
say that f is called at type t1/t2 and t3/t4.
We take equivalence classes using equality of the *types* (ignoring
the dictionary args, which as mentioned previously are redundant).
STEP 3: SPECIALISATION
For each equivalence class, choose a representative (f t1 t2 d1 d2),
and create a local instance of f, defined thus:
f@t1/t2 = <f_rhs> t1 t2 d1 d2
f_rhs presumably has some big lambdas and dictionary lambdas, so lots
of simplification will now result. However we don't actually *do* that
simplification. Rather, we leave it for the simplifier to do. If we
*did* do it, though, we'd get more call instances from the specialised
RHS. We can work out what they are by instantiating the call-instance
set from f's RHS with the types t1, t2.
Add this new id to f's IdInfo, to record that f has a specialised version.
Before doing any of this, check that f's IdInfo doesn't already
tell us about an existing instance of f at the required type/s.
(This might happen if specialisation was applied more than once, or
it might arise from user SPECIALIZE pragmas.)
Recursion
~~~~~~~~~
Wait a minute! What if f is recursive? Then we can't just plug in
its right-hand side, can we?
But it's ok. The type checker *always* creates non-recursive definitions
for overloaded recursive functions. For example:
f x = f (x+x) -- Yes I know its silly
becomes
f a (d::Num a) = let p = +.sel a d
in
letrec fl (y::a) = fl (p y y)
in
fl
We still have recusion for non-overloaded functions which we
speciailise, but the recursive call should get specialised to the
same recursive version.
Polymorphism 1
~~~~~~~~~~~~~~
All this is crystal clear when the function is applied to *constant
types*; that is, types which have no type variables inside. But what if
it is applied to non-constant types? Suppose we find a call of f at type
t1/t2. There are two possibilities:
(a) The free type variables of t1, t2 are in scope at the definition point
of f. In this case there's no problem, we proceed just as before. A common
example is as follows. Here's the Haskell:
g y = let f x = x+x
in f y + f y
After typechecking we have
g a (d::Num a) (y::a) = let f b (d'::Num b) (x::b) = +.sel b d' x x
in +.sel a d (f a d y) (f a d y)
Notice that the call to f is at type type "a"; a non-constant type.
Both calls to f are at the same type, so we can specialise to give:
g a (d::Num a) (y::a) = let f@a (x::a) = +.sel a d x x
in +.sel a d (f@a y) (f@a y)
(b) The other case is when the type variables in the instance types
are *not* in scope at the definition point of f. The example we are
working with above is a good case. There are two instances of (+.sel a d),
but "a" is not in scope at the definition of +.sel. Can we do anything?
Yes, we can "common them up", a sort of limited common sub-expression deal.
This would give:
g a (d::Num a) (y::a) = let +.sel@a = +.sel a d
f@a (x::a) = +.sel@a x x
in +.sel@a (f@a y) (f@a y)
This can save work, and can't be spotted by the type checker, because
the two instances of +.sel weren't originally at the same type.
Further notes on (b)
* There are quite a few variations here. For example, the defn of
+.sel could be floated ouside the \y, to attempt to gain laziness.
It certainly mustn't be floated outside the \d because the d has to
be in scope too.
* We don't want to inline f_rhs in this case, because
that will duplicate code. Just commoning up the call is the point.
* Nothing gets added to +.sel's IdInfo.
* Don't bother unless the equivalence class has more than one item!
Not clear whether this is all worth it. It is of course OK to
simply discard call-instances when passing a big lambda.
Polymorphism 2 -- Overloading
~~~~~~~~~~~~~~
Consider a function whose most general type is
f :: forall a b. Ord a => [a] -> b -> b
There is really no point in making a version of g at Int/Int and another
at Int/Bool, because it's only instancing the type variable "a" which
buys us any efficiency. Since g is completely polymorphic in b there
ain't much point in making separate versions of g for the different
b types.
That suggests that we should identify which of g's type variables
are constrained (like "a") and which are unconstrained (like "b").
Then when taking equivalence classes in STEP 2, we ignore the type args
corresponding to unconstrained type variable. In STEP 3 we make
polymorphic versions. Thus:
f@t1/ = /\b -> <f_rhs> t1 b d1 d2
We do this.
Dictionary floating
~~~~~~~~~~~~~~~~~~~
Consider this
f a (d::Num a) = let g = ...
in
...(let d1::Ord a = Num.Ord.sel a d in g a d1)...
Here, g is only called at one type, but the dictionary isn't in scope at the
definition point for g. Usually the type checker would build a
definition for d1 which enclosed g, but the transformation system
might have moved d1's defn inward. Solution: float dictionary bindings
outwards along with call instances.
Consider
f x = let g p q = p==q
h r s = (r+s, g r s)
in
h x x
Before specialisation, leaving out type abstractions we have
f df x = let g :: Eq a => a -> a -> Bool
g dg p q = == dg p q
h :: Num a => a -> a -> (a, Bool)
h dh r s = let deq = eqFromNum dh
in (+ dh r s, g deq r s)
in
h df x x
After specialising h we get a specialised version of h, like this:
h' r s = let deq = eqFromNum df
in (+ df r s, g deq r s)
But we can't naively make an instance for g from this, because deq is not in scope
at the defn of g. Instead, we have to float out the (new) defn of deq
to widen its scope. Notice that this floating can't be done in advance -- it only
shows up when specialisation is done.
User SPECIALIZE pragmas
~~~~~~~~~~~~~~~~~~~~~~~
Specialisation pragmas can be digested by the type checker, and implemented
by adding extra definitions along with that of f, in the same way as before
f@t1/t2 = <f_rhs> t1 t2 d1 d2
Indeed the pragmas *have* to be dealt with by the type checker, because
only it knows how to build the dictionaries d1 and d2! For example
g :: Ord a => [a] -> [a]
{-# SPECIALIZE f :: [Tree Int] -> [Tree Int] #-}
Here, the specialised version of g is an application of g's rhs to the
Ord dictionary for (Tree Int), which only the type checker can conjure
up. There might not even *be* one, if (Tree Int) is not an instance of
Ord! (All the other specialision has suitable dictionaries to hand
from actual calls.)
Problem. The type checker doesn't have to hand a convenient <f_rhs>, because
it is buried in a complex (as-yet-un-desugared) binding group.
Maybe we should say
f@t1/t2 = f* t1 t2 d1 d2
where f* is the Id f with an IdInfo which says "inline me regardless!".
Indeed all the specialisation could be done in this way.
That in turn means that the simplifier has to be prepared to inline absolutely
any in-scope let-bound thing.
Again, the pragma should permit polymorphism in unconstrained variables:
h :: Ord a => [a] -> b -> b
{-# SPECIALIZE h :: [Int] -> b -> b #-}
We *insist* that all overloaded type variables are specialised to ground types,
(and hence there can be no context inside a SPECIALIZE pragma).
We *permit* unconstrained type variables to be specialised to
- a ground type
- or left as a polymorphic type variable
but nothing in between. So
{-# SPECIALIZE h :: [Int] -> [c] -> [c] #-}
is *illegal*. (It can be handled, but it adds complication, and gains the
programmer nothing.)
SPECIALISING INSTANCE DECLARATIONS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
instance Foo a => Foo [a] where
...
{-# SPECIALIZE instance Foo [Int] #-}
The original instance decl creates a dictionary-function
definition:
dfun.Foo.List :: forall a. Foo a -> Foo [a]
The SPECIALIZE pragma just makes a specialised copy, just as for
ordinary function definitions:
dfun.Foo.List@Int :: Foo [Int]
dfun.Foo.List@Int = dfun.Foo.List Int dFooInt
The information about what instance of the dfun exist gets added to
the dfun's IdInfo in the same way as a user-defined function too.
Automatic instance decl specialisation?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Can instance decls be specialised automatically? It's tricky.
We could collect call-instance information for each dfun, but
then when we specialised their bodies we'd get new call-instances
for ordinary functions; and when we specialised their bodies, we might get
new call-instances of the dfuns, and so on. This all arises because of
the unrestricted mutual recursion between instance decls and value decls.
Still, there's no actual problem; it just means that we may not do all
the specialisation we could theoretically do.
Furthermore, instance decls are usually exported and used non-locally,
so we'll want to compile enough to get those specialisations done.
Lastly, there's no such thing as a local instance decl, so we can
survive solely by spitting out *usage* information, and then reading that
back in as a pragma when next compiling the file. So for now,
we only specialise instance decls in response to pragmas.
SPITTING OUT USAGE INFORMATION
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To spit out usage information we need to traverse the code collecting
call-instance information for all imported (non-prelude?) functions
and data types. Then we equivalence-class it and spit it out.
This is done at the top-level when all the call instances which escape
must be for imported functions and data types.
*** Not currently done ***
Partial specialisation by pragmas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What about partial specialisation:
k :: (Ord a, Eq b) => [a] -> b -> b -> [a]
{-# SPECIALIZE k :: Eq b => [Int] -> b -> b -> [a] #-}
or even
{-# SPECIALIZE k :: Eq b => [Int] -> [b] -> [b] -> [a] #-}
Seems quite reasonable. Similar things could be done with instance decls:
instance (Foo a, Foo b) => Foo (a,b) where
...
{-# SPECIALIZE instance Foo a => Foo (a,Int) #-}
{-# SPECIALIZE instance Foo b => Foo (Int,b) #-}
Ho hum. Things are complex enough without this. I pass.
Requirements for the simplifer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The simplifier has to be able to take advantage of the specialisation.
* When the simplifier finds an application of a polymorphic f, it looks in
f's IdInfo in case there is a suitable instance to call instead. This converts
f t1 t2 d1 d2 ===> f_t1_t2
Note that the dictionaries get eaten up too!
* Dictionary selection operations on constant dictionaries must be
short-circuited:
+.sel Int d ===> +Int
The obvious way to do this is in the same way as other specialised
calls: +.sel has inside it some IdInfo which tells that if it's applied
to the type Int then it should eat a dictionary and transform to +Int.
In short, dictionary selectors need IdInfo inside them for constant
methods.
* Exactly the same applies if a superclass dictionary is being
extracted:
Eq.sel Int d ===> dEqInt
* Something similar applies to dictionary construction too. Suppose
dfun.Eq.List is the function taking a dictionary for (Eq a) to
one for (Eq [a]). Then we want
dfun.Eq.List Int d ===> dEq.List_Int
Where does the Eq [Int] dictionary come from? It is built in
response to a SPECIALIZE pragma on the Eq [a] instance decl.
In short, dfun Ids need IdInfo with a specialisation for each
constant instance of their instance declaration.
All this uses a single mechanism: the SpecEnv inside an Id
What does the specialisation IdInfo look like?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The SpecEnv of an Id maps a list of types (the template) to an expression
[Type] |-> Expr
For example, if f has this SpecInfo:
[Int, a] -> \d:Ord Int. f' a
it means that we can replace the call
f Int t ===> (\d. f' t)
This chucks one dictionary away and proceeds with the
specialised version of f, namely f'.
What can't be done this way?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There is no way, post-typechecker, to get a dictionary for (say)
Eq a from a dictionary for Eq [a]. So if we find
==.sel [t] d
we can't transform to
eqList (==.sel t d')
where
eqList :: (a->a->Bool) -> [a] -> [a] -> Bool
Of course, we currently have no way to automatically derive
eqList, nor to connect it to the Eq [a] instance decl, but you
can imagine that it might somehow be possible. Taking advantage
of this is permanently ruled out.
Still, this is no great hardship, because we intend to eliminate
overloading altogether anyway!
A note about non-tyvar dictionaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some Ids have types like
forall a,b,c. Eq a -> Ord [a] -> tau
This seems curious at first, because we usually only have dictionary
args whose types are of the form (C a) where a is a type variable.
But this doesn't hold for the functions arising from instance decls,
which sometimes get arguments with types of form (C (T a)) for some
type constructor T.
Should we specialise wrt this compound-type dictionary? We used to say
"no", saying:
"This is a heuristic judgement, as indeed is the fact that we
specialise wrt only dictionaries. We choose *not* to specialise
wrt compound dictionaries because at the moment the only place
they show up is in instance decls, where they are simply plugged
into a returned dictionary. So nothing is gained by specialising
wrt them."
But it is simpler and more uniform to specialise wrt these dicts too;
and in future GHC is likely to support full fledged type signatures
like
f :: Eq [(a,b)] => ...
************************************************************************
* *
\subsubsection{The new specialiser}
* *
************************************************************************
Our basic game plan is this. For let(rec) bound function
f :: (C a, D c) => (a,b,c,d) -> Bool
* Find any specialised calls of f, (f ts ds), where
ts are the type arguments t1 .. t4, and
ds are the dictionary arguments d1 .. d2.
* Add a new definition for f1 (say):
f1 = /\ b d -> (..body of f..) t1 b t3 d d1 d2
Note that we abstract over the unconstrained type arguments.
* Add the mapping
[t1,b,t3,d] |-> \d1 d2 -> f1 b d
to the specialisations of f. This will be used by the
simplifier to replace calls
(f t1 t2 t3 t4) da db
by
(\d1 d1 -> f1 t2 t4) da db
All the stuff about how many dictionaries to discard, and what types
to apply the specialised function to, are handled by the fact that the
SpecEnv contains a template for the result of the specialisation.
We don't build *partial* specialisations for f. For example:
f :: Eq a => a -> a -> Bool
{-# SPECIALISE f :: (Eq b, Eq c) => (b,c) -> (b,c) -> Bool #-}
Here, little is gained by making a specialised copy of f.
There's a distinct danger that the specialised version would
first build a dictionary for (Eq b, Eq c), and then select the (==)
method from it! Even if it didn't, not a great deal is saved.
We do, however, generate polymorphic, but not overloaded, specialisations:
f :: Eq a => [a] -> b -> b -> b
... SPECIALISE f :: [Int] -> b -> b -> b ...
Hence, the invariant is this:
*** no specialised version is overloaded ***
************************************************************************
* *
\subsubsection{The exported function}
* *
************************************************************************
-}
-- | Specialise calls to type-class overloaded functions occuring in a program.
specProgram :: ModGuts -> CoreM ModGuts
specProgram guts@(ModGuts { mg_module = this_mod
, mg_rules = local_rules
, mg_binds = binds })
= do { dflags <- getDynFlags
-- Specialise the bindings of this module
; (binds', uds) <- runSpecM dflags this_mod (go binds)
-- Specialise imported functions
; hpt_rules <- getRuleBase
; let rule_base = extendRuleBaseList hpt_rules local_rules
; (new_rules, spec_binds) <- specImports dflags this_mod top_env emptyVarSet
[] rule_base (ud_calls uds)
-- Don't forget to wrap the specialized bindings with bindings
-- for the needed dictionaries.
-- See Note [Wrap bindings returned by specImports]
; let spec_binds' = wrapDictBinds (ud_binds uds) spec_binds
; let final_binds
| null spec_binds' = binds'
| otherwise = Rec (flattenBinds spec_binds') : binds'
-- Note [Glom the bindings if imported functions are specialised]
; return (guts { mg_binds = final_binds
, mg_rules = new_rules ++ local_rules }) }
where
-- We need to start with a Subst that knows all the things
-- that are in scope, so that the substitution engine doesn't
-- accidentally re-use a unique that's already in use
-- Easiest thing is to do it all at once, as if all the top-level
-- decls were mutually recursive
top_env = SE { se_subst = CoreSubst.mkEmptySubst $ mkInScopeSet $ mkVarSet $
bindersOfBinds binds
, se_interesting = emptyVarSet }
go [] = return ([], emptyUDs)
go (bind:binds) = do (binds', uds) <- go binds
(bind', uds') <- specBind top_env bind uds
return (bind' ++ binds', uds')
{-
Note [Wrap bindings returned by specImports]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'specImports' returns a set of specialized bindings. However, these are lacking
necessary floated dictionary bindings, which are returned by
UsageDetails(ud_binds). These dictionaries need to be brought into scope with
'wrapDictBinds' before the bindings returned by 'specImports' can be used. See,
for instance, the 'specImports' call in 'specProgram'.
Note [Disabling cross-module specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since GHC 7.10 we have performed specialisation of INLINEABLE bindings living
in modules outside of the current module. This can sometimes uncover user code
which explodes in size when aggressively optimized. The
-fno-cross-module-specialise option was introduced to allow users to being
bitten by such instances to revert to the pre-7.10 behavior.
See Trac #10491
-}
-- | Specialise a set of calls to imported bindings
specImports :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these ones
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module and the home package
-- (but not external packages, which can change)
-> CallDetails -- Calls for imported things, and floating bindings
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
-- See Note [Wrapping bindings returned by specImports]
specImports dflags this_mod top_env done callers rule_base cds
-- See Note [Disabling cross-module specialisation]
| not $ gopt Opt_CrossModuleSpecialise dflags =
return ([], [])
| otherwise =
do { let import_calls = varEnvElts cds
; (rules, spec_binds) <- go rule_base import_calls
; return (rules, spec_binds) }
where
go :: RuleBase -> [CallInfoSet] -> CoreM ([CoreRule], [CoreBind])
go _ [] = return ([], [])
go rb (CIS fn calls_for_fn : other_calls)
= do { (rules1, spec_binds1) <- specImport dflags this_mod top_env
done callers rb fn $
Map.toList calls_for_fn
; (rules2, spec_binds2) <- go (extendRuleBaseList rb rules1) other_calls
; return (rules1 ++ rules2, spec_binds1 ++ spec_binds2) }
specImport :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module
-> Id -> [CallInfo] -- Imported function and calls for it
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
specImport dflags this_mod top_env done callers rb fn calls_for_fn
| fn `elemVarSet` done
= return ([], []) -- No warning. This actually happens all the time
-- when specialising a recursive function, because
-- the RHS of the specialised function contains a recursive
-- call to the original function
| null calls_for_fn -- We filtered out all the calls in deleteCallsMentioning
= return ([], [])
| wantSpecImport dflags unfolding
, Just rhs <- maybeUnfoldingTemplate unfolding
= do { -- Get rules from the external package state
-- We keep doing this in case we "page-fault in"
-- more rules as we go along
; hsc_env <- getHscEnv
; eps <- liftIO $ hscEPS hsc_env
; vis_orphs <- getVisibleOrphanMods
; let full_rb = unionRuleBase rb (eps_rule_base eps)
rules_for_fn = getRules (RuleEnv full_rb vis_orphs) fn
; (rules1, spec_pairs, uds) <- -- pprTrace "specImport1" (vcat [ppr fn, ppr calls_for_fn, ppr rhs]) $
runSpecM dflags this_mod $
specCalls (Just this_mod) top_env rules_for_fn calls_for_fn fn rhs
; let spec_binds1 = [NonRec b r | (b,r) <- spec_pairs]
-- After the rules kick in we may get recursion, but
-- we rely on a global GlomBinds to sort that out later
-- See Note [Glom the bindings if imported functions are specialised]
-- Now specialise any cascaded calls
; (rules2, spec_binds2) <- -- pprTrace "specImport 2" (ppr fn $$ ppr rules1 $$ ppr spec_binds1) $
specImports dflags this_mod top_env
(extendVarSet done fn)
(fn:callers)
(extendRuleBaseList rb rules1)
(ud_calls uds)
-- Don't forget to wrap the specialized bindings with bindings
-- for the needed dictionaries
-- See Note [Wrap bindings returned by specImports]
; let final_binds = wrapDictBinds (ud_binds uds)
(spec_binds2 ++ spec_binds1)
; return (rules2 ++ rules1, final_binds) }
| warnMissingSpecs dflags callers
= do { warnMsg (vcat [ hang (ptext (sLit "Could not specialise imported function") <+> quotes (ppr fn))
2 (vcat [ ptext (sLit "when specialising") <+> quotes (ppr caller)
| caller <- callers])
, ifPprDebug (ptext (sLit "calls:") <+> vcat (map (pprCallInfo fn) calls_for_fn))
, ptext (sLit "Probable fix: add INLINEABLE pragma on") <+> quotes (ppr fn) ])
; return ([], []) }
| otherwise
= return ([], [])
where
unfolding = realIdUnfolding fn -- We want to see the unfolding even for loop breakers
warnMissingSpecs :: DynFlags -> [Id] -> Bool
-- See Note [Warning about missed specialisations]
warnMissingSpecs dflags callers
| wopt Opt_WarnAllMissedSpecs dflags = True
| not (wopt Opt_WarnMissedSpecs dflags) = False
| null callers = False
| otherwise = all has_inline_prag callers
where
has_inline_prag id = isAnyInlinePragma (idInlinePragma id)
wantSpecImport :: DynFlags -> Unfolding -> Bool
-- See Note [Specialise imported INLINABLE things]
wantSpecImport dflags unf
= case unf of
NoUnfolding -> False
OtherCon {} -> False
DFunUnfolding {} -> True
CoreUnfolding { uf_src = src, uf_guidance = _guidance }
| gopt Opt_SpecialiseAggressively dflags -> True
| isStableSource src -> True
-- Specialise even INLINE things; it hasn't inlined yet,
-- so perhaps it never will. Moreover it may have calls
-- inside it that we want to specialise
| otherwise -> False -- Stable, not INLINE, hence INLINEABLE
{- Note [Warning about missed specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose
* In module Lib, you carefully mark a function 'foo' INLINEABLE
* Import Lib(foo) into another module M
* Call 'foo' at some specialised type in M
Then you jolly well expect it to be specialised in M. But what if
'foo' calls another fuction 'Lib.bar'. Then you'd like 'bar' to be
specialised too. But if 'bar' is not marked INLINEABLE it may well
not be specialised. The warning Opt_WarnMissedSpecs warns about this.
It's more noisy to warning about a missed specialisation opportunity
for /every/ overloaded imported function, but sometimes useful. That
is what Opt_WarnAllMissedSpecs does.
ToDo: warn about missed opportunities for local functions.
Note [Specialise imported INLINABLE things]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What imported functions do we specialise? The basic set is
* DFuns and things with INLINABLE pragmas.
but with -fspecialise-aggressively we add
* Anything with an unfolding template
Trac #8874 has a good example of why we want to auto-specialise DFuns.
We have the -fspecialise-aggressively flag (usually off), because we
risk lots of orphan modules from over-vigorous specialisation.
However it's not a big deal: anything non-recursive with an
unfolding-template will probably have been inlined already.
Note [Glom the bindings if imported functions are specialised]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we have an imported, *recursive*, INLINABLE function
f :: Eq a => a -> a
f = /\a \d x. ...(f a d)...
In the module being compiled we have
g x = f (x::Int)
Now we'll make a specialised function
f_spec :: Int -> Int
f_spec = \x -> ...(f Int dInt)...
{-# RULE f Int _ = f_spec #-}
g = \x. f Int dInt x
Note that f_spec doesn't look recursive
After rewriting with the RULE, we get
f_spec = \x -> ...(f_spec)...
BUT since f_spec was non-recursive before it'll *stay* non-recursive.
The occurrence analyser never turns a NonRec into a Rec. So we must
make sure that f_spec is recursive. Easiest thing is to make all
the specialisations for imported bindings recursive.
Note [Avoiding recursive specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise 'f' we may find new overloaded calls to 'g', 'h' in
'f's RHS. So we want to specialise g,h. But we don't want to
specialise f any more! It's possible that f's RHS might have a
recursive yet-more-specialised call, so we'd diverge in that case.
And if the call is to the same type, one specialisation is enough.
Avoiding this recursive specialisation loop is the reason for the
'done' VarSet passed to specImports and specImport.
************************************************************************
* *
\subsubsection{@specExpr@: the main function}
* *
************************************************************************
-}
data SpecEnv
= SE { se_subst :: CoreSubst.Subst
-- We carry a substitution down:
-- a) we must clone any binding that might float outwards,
-- to avoid name clashes
-- b) we carry a type substitution to use when analysing
-- the RHS of specialised bindings (no type-let!)
, se_interesting :: VarSet
-- Dict Ids that we know something about
-- and hence may be worth specialising against
-- See Note [Interesting dictionary arguments]
}
specVar :: SpecEnv -> Id -> CoreExpr
specVar env v = CoreSubst.lookupIdSubst (text "specVar") (se_subst env) v
specExpr :: SpecEnv -> CoreExpr -> SpecM (CoreExpr, UsageDetails)
---------------- First the easy cases --------------------
specExpr env (Type ty) = return (Type (substTy env ty), emptyUDs)
specExpr env (Coercion co) = return (Coercion (substCo env co), emptyUDs)
specExpr env (Var v) = return (specVar env v, emptyUDs)
specExpr _ (Lit lit) = return (Lit lit, emptyUDs)
specExpr env (Cast e co)
= do { (e', uds) <- specExpr env e
; return ((Cast e' (substCo env co)), uds) }
specExpr env (Tick tickish body)
= do { (body', uds) <- specExpr env body
; return (Tick (specTickish env tickish) body', uds) }
---------------- Applications might generate a call instance --------------------
specExpr env expr@(App {})
= go expr []
where
go (App fun arg) args = do (arg', uds_arg) <- specExpr env arg
(fun', uds_app) <- go fun (arg':args)
return (App fun' arg', uds_arg `plusUDs` uds_app)
go (Var f) args = case specVar env f of
Var f' -> return (Var f', mkCallUDs env f' args)
e' -> return (e', emptyUDs) -- I don't expect this!
go other _ = specExpr env other
---------------- Lambda/case require dumping of usage details --------------------
specExpr env e@(Lam _ _) = do
(body', uds) <- specExpr env' body
let (free_uds, dumped_dbs) = dumpUDs bndrs' uds
return (mkLams bndrs' (wrapDictBindsE dumped_dbs body'), free_uds)
where
(bndrs, body) = collectBinders e
(env', bndrs') = substBndrs env bndrs
-- More efficient to collect a group of binders together all at once
-- and we don't want to split a lambda group with dumped bindings
specExpr env (Case scrut case_bndr ty alts)
= do { (scrut', scrut_uds) <- specExpr env scrut
; (scrut'', case_bndr', alts', alts_uds)
<- specCase env scrut' case_bndr alts
; return (Case scrut'' case_bndr' (substTy env ty) alts'
, scrut_uds `plusUDs` alts_uds) }
---------------- Finally, let is the interesting case --------------------
specExpr env (Let bind body)
= do { -- Clone binders
(rhs_env, body_env, bind') <- cloneBindSM env bind
-- Deal with the body
; (body', body_uds) <- specExpr body_env body
-- Deal with the bindings
; (binds', uds) <- specBind rhs_env bind' body_uds
-- All done
; return (foldr Let body' binds', uds) }
specTickish :: SpecEnv -> Tickish Id -> Tickish Id
specTickish env (Breakpoint ix ids)
= Breakpoint ix [ id' | id <- ids, Var id' <- [specVar env id]]
-- drop vars from the list if they have a non-variable substitution.
-- should never happen, but it's harmless to drop them anyway.
specTickish _ other_tickish = other_tickish
specCase :: SpecEnv
-> CoreExpr -- Scrutinee, already done
-> Id -> [CoreAlt]
-> SpecM ( CoreExpr -- New scrutinee
, Id
, [CoreAlt]
, UsageDetails)
specCase env scrut' case_bndr [(con, args, rhs)]
| isDictId case_bndr -- See Note [Floating dictionaries out of cases]
, interestingDict env scrut'
, not (isDeadBinder case_bndr && null sc_args')
= do { (case_bndr_flt : sc_args_flt) <- mapM clone_me (case_bndr' : sc_args')
; let sc_rhss = [ Case (Var case_bndr_flt) case_bndr' (idType sc_arg')
[(con, args', Var sc_arg')]
| sc_arg' <- sc_args' ]
-- Extend the substitution for RHS to map the *original* binders
-- to their floated verions.
mb_sc_flts :: [Maybe DictId]
mb_sc_flts = map (lookupVarEnv clone_env) args'
clone_env = zipVarEnv sc_args' sc_args_flt
subst_prs = (case_bndr, Var case_bndr_flt)
: [ (arg, Var sc_flt)
| (arg, Just sc_flt) <- args `zip` mb_sc_flts ]
env_rhs' = env_rhs { se_subst = CoreSubst.extendIdSubstList (se_subst env_rhs) subst_prs
, se_interesting = se_interesting env_rhs `extendVarSetList`
(case_bndr_flt : sc_args_flt) }
; (rhs', rhs_uds) <- specExpr env_rhs' rhs
; let scrut_bind = mkDB (NonRec case_bndr_flt scrut')
case_bndr_set = unitVarSet case_bndr_flt
sc_binds = [(NonRec sc_arg_flt sc_rhs, case_bndr_set)
| (sc_arg_flt, sc_rhs) <- sc_args_flt `zip` sc_rhss ]
flt_binds = scrut_bind : sc_binds
(free_uds, dumped_dbs) = dumpUDs (case_bndr':args') rhs_uds
all_uds = flt_binds `addDictBinds` free_uds
alt' = (con, args', wrapDictBindsE dumped_dbs rhs')
; return (Var case_bndr_flt, case_bndr', [alt'], all_uds) }
where
(env_rhs, (case_bndr':args')) = substBndrs env (case_bndr:args)
sc_args' = filter is_flt_sc_arg args'
clone_me bndr = do { uniq <- getUniqueM
; return (mkUserLocal occ uniq ty loc) }
where
name = idName bndr
ty = idType bndr
occ = nameOccName name
loc = getSrcSpan name
arg_set = mkVarSet args'
is_flt_sc_arg var = isId var
&& not (isDeadBinder var)
&& isDictTy var_ty
&& not (tyVarsOfType var_ty `intersectsVarSet` arg_set)
where
var_ty = idType var
specCase env scrut case_bndr alts
= do { (alts', uds_alts) <- mapAndCombineSM spec_alt alts
; return (scrut, case_bndr', alts', uds_alts) }
where
(env_alt, case_bndr') = substBndr env case_bndr
spec_alt (con, args, rhs) = do
(rhs', uds) <- specExpr env_rhs rhs
let (free_uds, dumped_dbs) = dumpUDs (case_bndr' : args') uds
return ((con, args', wrapDictBindsE dumped_dbs rhs'), free_uds)
where
(env_rhs, args') = substBndrs env_alt args
{-
Note [Floating dictionaries out of cases]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
g = \d. case d of { MkD sc ... -> ...(f sc)... }
Naively we can't float d2's binding out of the case expression,
because 'sc' is bound by the case, and that in turn means we can't
specialise f, which seems a pity.
So we invert the case, by floating out a binding
for 'sc_flt' thus:
sc_flt = case d of { MkD sc ... -> sc }
Now we can float the call instance for 'f'. Indeed this is just
what'll happen if 'sc' was originally bound with a let binding,
but case is more efficient, and necessary with equalities. So it's
good to work with both.
You might think that this won't make any difference, because the
call instance will only get nuked by the \d. BUT if 'g' itself is
specialised, then transitively we should be able to specialise f.
In general, given
case e of cb { MkD sc ... -> ...(f sc)... }
we transform to
let cb_flt = e
sc_flt = case cb_flt of { MkD sc ... -> sc }
in
case cb_flt of bg { MkD sc ... -> ....(f sc_flt)... }
The "_flt" things are the floated binds; we use the current substitution
to substitute sc -> sc_flt in the RHS
************************************************************************
* *
Dealing with a binding
* *
************************************************************************
-}
specBind :: SpecEnv -- Use this for RHSs
-> CoreBind
-> UsageDetails -- Info on how the scope of the binding
-> SpecM ([CoreBind], -- New bindings
UsageDetails) -- And info to pass upstream
-- Returned UsageDetails:
-- No calls for binders of this bind
specBind rhs_env (NonRec fn rhs) body_uds
= do { (rhs', rhs_uds) <- specExpr rhs_env rhs
; (fn', spec_defns, body_uds1) <- specDefn rhs_env body_uds fn rhs
; let pairs = spec_defns ++ [(fn', rhs')]
-- fn' mentions the spec_defns in its rules,
-- so put the latter first
combined_uds = body_uds1 `plusUDs` rhs_uds
-- This way round a call in rhs_uds of a function f
-- at type T will override a call of f at T in body_uds1; and
-- that is good because it'll tend to keep "earlier" calls
-- See Note [Specialisation of dictionary functions]
(free_uds, dump_dbs, float_all) = dumpBindUDs [fn] combined_uds
-- See Note [From non-recursive to recursive]
final_binds :: [DictBind]
final_binds
| isEmptyBag dump_dbs = [mkDB $ NonRec b r | (b,r) <- pairs]
| otherwise = [flattenDictBinds dump_dbs pairs]
; if float_all then
-- Rather than discard the calls mentioning the bound variables
-- we float this binding along with the others
return ([], free_uds `snocDictBinds` final_binds)
else
-- No call in final_uds mentions bound variables,
-- so we can just leave the binding here
return (map fst final_binds, free_uds) }
specBind rhs_env (Rec pairs) body_uds
-- Note [Specialising a recursive group]
= do { let (bndrs,rhss) = unzip pairs
; (rhss', rhs_uds) <- mapAndCombineSM (specExpr rhs_env) rhss
; let scope_uds = body_uds `plusUDs` rhs_uds
-- Includes binds and calls arising from rhss
; (bndrs1, spec_defns1, uds1) <- specDefns rhs_env scope_uds pairs
; (bndrs3, spec_defns3, uds3)
<- if null spec_defns1 -- Common case: no specialisation
then return (bndrs1, [], uds1)
else do { -- Specialisation occurred; do it again
(bndrs2, spec_defns2, uds2)
<- specDefns rhs_env uds1 (bndrs1 `zip` rhss)
; return (bndrs2, spec_defns2 ++ spec_defns1, uds2) }
; let (final_uds, dumped_dbs, float_all) = dumpBindUDs bndrs uds3
bind = flattenDictBinds dumped_dbs
(spec_defns3 ++ zip bndrs3 rhss')
; if float_all then
return ([], final_uds `snocDictBind` bind)
else
return ([fst bind], final_uds) }
---------------------------
specDefns :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> [(Id,CoreExpr)] -- The things being bound and their un-processed RHS
-> SpecM ([Id], -- Original Ids with RULES added
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
-- Specialise a list of bindings (the contents of a Rec), but flowing usages
-- upwards binding by binding. Example: { f = ...g ...; g = ...f .... }
-- Then if the input CallDetails has a specialised call for 'g', whose specialisation
-- in turn generates a specialised call for 'f', we catch that in this one sweep.
-- But not vice versa (it's a fixpoint problem).
specDefns _env uds []
= return ([], [], uds)
specDefns env uds ((bndr,rhs):pairs)
= do { (bndrs1, spec_defns1, uds1) <- specDefns env uds pairs
; (bndr1, spec_defns2, uds2) <- specDefn env uds1 bndr rhs
; return (bndr1 : bndrs1, spec_defns1 ++ spec_defns2, uds2) }
---------------------------
specDefn :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> Id -> CoreExpr -- The thing being bound and its un-processed RHS
-> SpecM (Id, -- Original Id with added RULES
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
specDefn env body_uds fn rhs
= do { let (body_uds_without_me, calls_for_me) = callsForMe fn body_uds
rules_for_me = idCoreRules fn
; (rules, spec_defns, spec_uds) <- specCalls Nothing env rules_for_me
calls_for_me fn rhs
; return ( fn `addIdSpecialisations` rules
, spec_defns
, body_uds_without_me `plusUDs` spec_uds) }
-- It's important that the `plusUDs` is this way
-- round, because body_uds_without_me may bind
-- dictionaries that are used in calls_for_me passed
-- to specDefn. So the dictionary bindings in
-- spec_uds may mention dictionaries bound in
-- body_uds_without_me
---------------------------
specCalls :: Maybe Module -- Just this_mod => specialising imported fn
-- Nothing => specialising local fn
-> SpecEnv
-> [CoreRule] -- Existing RULES for the fn
-> [CallInfo]
-> Id -> CoreExpr
-> SpecM ([CoreRule], -- New RULES for the fn
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- New usage details from the specialised RHSs
-- This function checks existing rules, and does not create
-- duplicate ones. So the caller does not need to do this filtering.
-- See 'already_covered'
specCalls mb_mod env rules_for_me calls_for_me fn rhs
-- The first case is the interesting one
| rhs_tyvars `lengthIs` n_tyvars -- Rhs of fn's defn has right number of big lambdas
&& rhs_ids `lengthAtLeast` n_dicts -- and enough dict args
&& notNull calls_for_me -- And there are some calls to specialise
&& not (isNeverActive (idInlineActivation fn))
-- Don't specialise NOINLINE things
-- See Note [Auto-specialisation and RULES]
-- && not (certainlyWillInline (idUnfolding fn)) -- And it's not small
-- See Note [Inline specialisation] for why we do not
-- switch off specialisation for inline functions
= -- pprTrace "specDefn: some" (ppr fn $$ ppr calls_for_me $$ ppr rules_for_me) $
do { stuff <- mapM spec_call calls_for_me
; let (spec_defns, spec_uds, spec_rules) = unzip3 (catMaybes stuff)
; return (spec_rules, spec_defns, plusUDList spec_uds) }
| otherwise -- No calls or RHS doesn't fit our preconceptions
= WARN( not (exprIsTrivial rhs) && notNull calls_for_me,
ptext (sLit "Missed specialisation opportunity for")
<+> ppr fn $$ _trace_doc )
-- Note [Specialisation shape]
-- pprTrace "specDefn: none" (ppr fn <+> ppr calls_for_me) $
return ([], [], emptyUDs)
where
_trace_doc = sep [ ppr rhs_tyvars, ppr n_tyvars
, ppr rhs_ids, ppr n_dicts
, ppr (idInlineActivation fn) ]
fn_type = idType fn
fn_arity = idArity fn
fn_unf = realIdUnfolding fn -- Ignore loop-breaker-ness here
(tyvars, theta, _) = tcSplitSigmaTy fn_type
n_tyvars = length tyvars
n_dicts = length theta
inl_prag = idInlinePragma fn
inl_act = inlinePragmaActivation inl_prag
is_local = isLocalId fn
-- Figure out whether the function has an INLINE pragma
-- See Note [Inline specialisations]
(rhs_tyvars, rhs_ids, rhs_body) = collectTyAndValBinders rhs
rhs_dict_ids = take n_dicts rhs_ids
body = mkLams (drop n_dicts rhs_ids) rhs_body
-- Glue back on the non-dict lambdas
already_covered :: DynFlags -> [CoreExpr] -> Bool
already_covered dflags args -- Note [Specialisations already covered]
= isJust (lookupRule dflags
(CoreSubst.substInScope (se_subst env), realIdUnfolding)
(const True)
fn args rules_for_me)
mk_ty_args :: [Maybe Type] -> [TyVar] -> [CoreExpr]
mk_ty_args [] poly_tvs
= ASSERT( null poly_tvs ) []
mk_ty_args (Nothing : call_ts) (poly_tv : poly_tvs)
= Type (mkTyVarTy poly_tv) : mk_ty_args call_ts poly_tvs
mk_ty_args (Just ty : call_ts) poly_tvs
= Type ty : mk_ty_args call_ts poly_tvs
mk_ty_args (Nothing : _) [] = panic "mk_ty_args"
----------------------------------------------------------
-- Specialise to one particular call pattern
spec_call :: CallInfo -- Call instance
-> SpecM (Maybe ((Id,CoreExpr), -- Specialised definition
UsageDetails, -- Usage details from specialised body
CoreRule)) -- Info for the Id's SpecEnv
spec_call (CallKey call_ts, (call_ds, _))
= ASSERT( call_ts `lengthIs` n_tyvars && call_ds `lengthIs` n_dicts )
-- Suppose f's defn is f = /\ a b c -> \ d1 d2 -> rhs
-- Suppose the call is for f [Just t1, Nothing, Just t3] [dx1, dx2]
-- Construct the new binding
-- f1 = SUBST[a->t1,c->t3, d1->d1', d2->d2'] (/\ b -> rhs)
-- PLUS the usage-details
-- { d1' = dx1; d2' = dx2 }
-- where d1', d2' are cloned versions of d1,d2, with the type substitution
-- applied. These auxiliary bindings just avoid duplication of dx1, dx2
--
-- Note that the substitution is applied to the whole thing.
-- This is convenient, but just slightly fragile. Notably:
-- * There had better be no name clashes in a/b/c
do { let
-- poly_tyvars = [b] in the example above
-- spec_tyvars = [a,c]
-- ty_args = [t1,b,t3]
spec_tv_binds = [(tv,ty) | (tv, Just ty) <- rhs_tyvars `zip` call_ts]
env1 = extendTvSubstList env spec_tv_binds
(rhs_env, poly_tyvars) = substBndrs env1
[tv | (tv, Nothing) <- rhs_tyvars `zip` call_ts]
-- Clone rhs_dicts, including instantiating their types
; inst_dict_ids <- mapM (newDictBndr rhs_env) rhs_dict_ids
; let (rhs_env2, dx_binds, spec_dict_args)
= bindAuxiliaryDicts rhs_env rhs_dict_ids call_ds inst_dict_ids
ty_args = mk_ty_args call_ts poly_tyvars
rule_args = ty_args ++ map Var inst_dict_ids
rule_bndrs = poly_tyvars ++ inst_dict_ids
; dflags <- getDynFlags
; if already_covered dflags rule_args then
return Nothing
else do
{ -- Figure out the type of the specialised function
let body_ty = applyTypeToArgs rhs fn_type rule_args
(lam_args, app_args) -- Add a dummy argument if body_ty is unlifted
| isUnLiftedType body_ty -- C.f. WwLib.mkWorkerArgs
= (poly_tyvars ++ [voidArgId], poly_tyvars ++ [voidPrimId])
| otherwise = (poly_tyvars, poly_tyvars)
spec_id_ty = mkPiTypes lam_args body_ty
; spec_f <- newSpecIdSM fn spec_id_ty
; (spec_rhs, rhs_uds) <- specExpr rhs_env2 (mkLams lam_args body)
; this_mod <- getModule
; let
-- The rule to put in the function's specialisation is:
-- forall b, d1',d2'. f t1 b t3 d1' d2' = f1 b
herald = case mb_mod of
Nothing -- Specialising local fn
-> ptext (sLit "SPEC")
Just this_mod -- Specialising imoprted fn
-> ptext (sLit "SPEC/") <> ppr this_mod
rule_name = mkFastString $ showSDocForUser dflags neverQualify $
herald <+> ppr fn <+> hsep (map ppr_call_key_ty call_ts)
-- This name ends up in interface files, so use showSDocForUser,
-- otherwise uniques end up there, making builds
-- less deterministic (See #4012 comment:61 ff)
spec_env_rule = mkRule
this_mod
True {- Auto generated -}
is_local
rule_name
inl_act -- Note [Auto-specialisation and RULES]
(idName fn)
rule_bndrs
rule_args
(mkVarApps (Var spec_f) app_args)
-- Add the { d1' = dx1; d2' = dx2 } usage stuff
final_uds = foldr consDictBind rhs_uds dx_binds
--------------------------------------
-- Add a suitable unfolding if the spec_inl_prag says so
-- See Note [Inline specialisations]
(spec_inl_prag, spec_unf)
| not is_local && isStrongLoopBreaker (idOccInfo fn)
= (neverInlinePragma, noUnfolding)
-- See Note [Specialising imported functions] in OccurAnal
| InlinePragma { inl_inline = Inlinable } <- inl_prag
= (inl_prag { inl_inline = EmptyInlineSpec }, noUnfolding)
| otherwise
= (inl_prag, specUnfolding dflags (se_subst env)
poly_tyvars (ty_args ++ spec_dict_args)
fn_unf)
--------------------------------------
-- Adding arity information just propagates it a bit faster
-- See Note [Arity decrease] in Simplify
-- Copy InlinePragma information from the parent Id.
-- So if f has INLINE[1] so does spec_f
spec_f_w_arity = spec_f `setIdArity` max 0 (fn_arity - n_dicts)
`setInlinePragma` spec_inl_prag
`setIdUnfolding` spec_unf
; return (Just ((spec_f_w_arity, spec_rhs), final_uds, spec_env_rule)) } }
bindAuxiliaryDicts
:: SpecEnv
-> [DictId] -> [CoreExpr] -- Original dict bndrs, and the witnessing expressions
-> [DictId] -- A cloned dict-id for each dict arg
-> (SpecEnv, -- Substitute for all orig_dicts
[DictBind], -- Auxiliary dict bindings
[CoreExpr]) -- Witnessing expressions (all trivial)
-- Bind any dictionary arguments to fresh names, to preserve sharing
bindAuxiliaryDicts env@(SE { se_subst = subst, se_interesting = interesting })
orig_dict_ids call_ds inst_dict_ids
= (env', dx_binds, spec_dict_args)
where
(dx_binds, spec_dict_args) = go call_ds inst_dict_ids
env' = env { se_subst = CoreSubst.extendIdSubstList subst (orig_dict_ids `zip` spec_dict_args)
, se_interesting = interesting `unionVarSet` interesting_dicts }
interesting_dicts = mkVarSet [ dx_id | (NonRec dx_id dx, _) <- dx_binds
, interestingDict env dx ]
-- See Note [Make the new dictionaries interesting]
go :: [CoreExpr] -> [CoreBndr] -> ([DictBind], [CoreExpr])
go [] _ = ([], [])
go (dx:dxs) (dx_id:dx_ids)
| exprIsTrivial dx = (dx_binds, dx:args)
| otherwise = (mkDB (NonRec dx_id dx) : dx_binds, Var dx_id : args)
where
(dx_binds, args) = go dxs dx_ids
-- In the first case extend the substitution but not bindings;
-- in the latter extend the bindings but not the substitution.
-- For the former, note that we bind the *original* dict in the substitution,
-- overriding any d->dx_id binding put there by substBndrs
go _ _ = pprPanic "bindAuxiliaryDicts" (ppr orig_dict_ids $$ ppr call_ds $$ ppr inst_dict_ids)
{-
Note [Make the new dictionaries interesting]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Important! We're going to substitute dx_id1 for d
and we want it to look "interesting", else we won't gather *any*
consequential calls. E.g.
f d = ...g d....
If we specialise f for a call (f (dfun dNumInt)), we'll get
a consequent call (g d') with an auxiliary definition
d' = df dNumInt
We want that consequent call to look interesting
Note [From non-recursive to recursive]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Even in the non-recursive case, if any dict-binds depend on 'fn' we might
have built a recursive knot
f a d x = <blah>
MkUD { ud_binds = d7 = MkD ..f..
, ud_calls = ...(f T d7)... }
The we generate
Rec { fs x = <blah>[T/a, d7/d]
f a d x = <blah>
RULE f T _ = fs
d7 = ...f... }
Here the recursion is only through the RULE.
Note [Specialisation of dictionary functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is a nasty example that bit us badly: see Trac #3591
class Eq a => C a
instance Eq [a] => C [a]
---------------
dfun :: Eq [a] -> C [a]
dfun a d = MkD a d (meth d)
d4 :: Eq [T] = <blah>
d2 :: C [T] = dfun T d4
d1 :: Eq [T] = $p1 d2
d3 :: C [T] = dfun T d1
None of these definitions is recursive. What happened was that we
generated a specialisation:
RULE forall d. dfun T d = dT :: C [T]
dT = (MkD a d (meth d)) [T/a, d1/d]
= MkD T d1 (meth d1)
But now we use the RULE on the RHS of d2, to get
d2 = dT = MkD d1 (meth d1)
d1 = $p1 d2
and now d1 is bottom! The problem is that when specialising 'dfun' we
should first dump "below" the binding all floated dictionary bindings
that mention 'dfun' itself. So d2 and d3 (and hence d1) must be
placed below 'dfun', and thus unavailable to it when specialising
'dfun'. That in turn means that the call (dfun T d1) must be
discarded. On the other hand, the call (dfun T d4) is fine, assuming
d4 doesn't mention dfun.
But look at this:
class C a where { foo,bar :: [a] -> [a] }
instance C Int where
foo x = r_bar x
bar xs = reverse xs
r_bar :: C a => [a] -> [a]
r_bar xs = bar (xs ++ xs)
That translates to:
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs }
The call (r_bar $fCInt) mentions $fCInt,
which mentions foo_help,
which mentions r_bar
But we DO want to specialise r_bar at Int:
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
RULE r_bar Int _ = r_bar_Int
r_bar_Int xs = bar Int $fCInt (xs ++ xs)
}
Note that, because of its RULE, r_bar joins the recursive
group. (In this case it'll unravel a short moment later.)
Conclusion: we catch the nasty case using filter_dfuns in
callsForMe. To be honest I'm not 100% certain that this is 100%
right, but it works. Sigh.
Note [Specialising a recursive group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
let rec { f x = ...g x'...
; g y = ...f y'.... }
in f 'a'
Here we specialise 'f' at Char; but that is very likely to lead to
a specialisation of 'g' at Char. We must do the latter, else the
whole point of specialisation is lost.
But we do not want to keep iterating to a fixpoint, because in the
presence of polymorphic recursion we might generate an infinite number
of specialisations.
So we use the following heuristic:
* Arrange the rec block in dependency order, so far as possible
(the occurrence analyser already does this)
* Specialise it much like a sequence of lets
* Then go through the block a second time, feeding call-info from
the RHSs back in the bottom, as it were
In effect, the ordering maxmimises the effectiveness of each sweep,
and we do just two sweeps. This should catch almost every case of
monomorphic recursion -- the exception could be a very knotted-up
recursion with multiple cycles tied up together.
This plan is implemented in the Rec case of specBindItself.
Note [Specialisations already covered]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We obviously don't want to generate two specialisations for the same
argument pattern. There are two wrinkles
1. We do the already-covered test in specDefn, not when we generate
the CallInfo in mkCallUDs. We used to test in the latter place, but
we now iterate the specialiser somewhat, and the Id at the call site
might therefore not have all the RULES that we can see in specDefn
2. What about two specialisations where the second is an *instance*
of the first? If the more specific one shows up first, we'll generate
specialisations for both. If the *less* specific one shows up first,
we *don't* currently generate a specialisation for the more specific
one. (See the call to lookupRule in already_covered.) Reasons:
(a) lookupRule doesn't say which matches are exact (bad reason)
(b) if the earlier specialisation is user-provided, it's
far from clear that we should auto-specialise further
Note [Auto-specialisation and RULES]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider:
g :: Num a => a -> a
g = ...
f :: (Int -> Int) -> Int
f w = ...
{-# RULE f g = 0 #-}
Suppose that auto-specialisation makes a specialised version of
g::Int->Int That version won't appear in the LHS of the RULE for f.
So if the specialisation rule fires too early, the rule for f may
never fire.
It might be possible to add new rules, to "complete" the rewrite system.
Thus when adding
RULE forall d. g Int d = g_spec
also add
RULE f g_spec = 0
But that's a bit complicated. For now we ask the programmer's help,
by *copying the INLINE activation pragma* to the auto-specialised
rule. So if g says {-# NOINLINE[2] g #-}, then the auto-spec rule
will also not be active until phase 2. And that's what programmers
should jolly well do anyway, even aside from specialisation, to ensure
that g doesn't inline too early.
This in turn means that the RULE would never fire for a NOINLINE
thing so not much point in generating a specialisation at all.
Note [Specialisation shape]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
We only specialise a function if it has visible top-level lambdas
corresponding to its overloading. E.g. if
f :: forall a. Eq a => ....
then its body must look like
f = /\a. \d. ...
Reason: when specialising the body for a call (f ty dexp), we want to
substitute dexp for d, and pick up specialised calls in the body of f.
This doesn't always work. One example I came across was this:
newtype Gen a = MkGen{ unGen :: Int -> a }
choose :: Eq a => a -> Gen a
choose n = MkGen (\r -> n)
oneof = choose (1::Int)
It's a silly exapmle, but we get
choose = /\a. g `cast` co
where choose doesn't have any dict arguments. Thus far I have not
tried to fix this (wait till there's a real example).
Mind you, then 'choose' will be inlined (since RHS is trivial) so
it doesn't matter. This comes up with single-method classes
class C a where { op :: a -> a }
instance C a => C [a] where ....
==>
$fCList :: C a => C [a]
$fCList = $copList |> (...coercion>...)
....(uses of $fCList at particular types)...
So we suppress the WARN if the rhs is trivial.
Note [Inline specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is what we do with the InlinePragma of the original function
* Activation/RuleMatchInfo: both transferred to the
specialised function
* InlineSpec:
(a) An INLINE pragma is transferred
(b) An INLINABLE pragma is *not* transferred
Why (a): transfer INLINE pragmas? The point of INLINE was precisely to
specialise the function at its call site, and arguably that's not so
important for the specialised copies. BUT *pragma-directed*
specialisation now takes place in the typechecker/desugarer, with
manually specified INLINEs. The specialisation here is automatic.
It'd be very odd if a function marked INLINE was specialised (because
of some local use), and then forever after (including importing
modules) the specialised version wasn't INLINEd. After all, the
programmer said INLINE!
You might wonder why we specialise INLINE functions at all. After
all they should be inlined, right? Two reasons:
* Even INLINE functions are sometimes not inlined, when they aren't
applied to interesting arguments. But perhaps the type arguments
alone are enough to specialise (even though the args are too boring
to trigger inlining), and it's certainly better to call the
specialised version.
* The RHS of an INLINE function might call another overloaded function,
and we'd like to generate a specialised version of that function too.
This actually happens a lot. Consider
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINABLE replicateM_ #-}
replicateM_ d x ma = ...
The strictness analyser may transform to
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINE replicateM_ #-}
replicateM_ d x ma = case x of I# x' -> $wreplicateM_ d x' ma
$wreplicateM_ :: (Monad m) => Int# -> m a -> m ()
{-# INLINABLE $wreplicateM_ #-}
$wreplicateM_ = ...
Now an importing module has a specialised call to replicateM_, say
(replicateM_ dMonadIO). We certainly want to specialise $wreplicateM_!
This particular example had a huge effect on the call to replicateM_
in nofib/shootout/n-body.
Why (b): discard INLINEABLE pragmas? See Trac #4874 for persuasive examples.
Suppose we have
{-# INLINABLE f #-}
f :: Ord a => [a] -> Int
f xs = letrec f' = ...f'... in f'
Then, when f is specialised and optimised we might get
wgo :: [Int] -> Int#
wgo = ...wgo...
f_spec :: [Int] -> Int
f_spec xs = case wgo xs of { r -> I# r }
and we clearly want to inline f_spec at call sites. But if we still
have the big, un-optimised of f (albeit specialised) captured in an
INLINABLE pragma for f_spec, we won't get that optimisation.
So we simply drop INLINABLE pragmas when specialising. It's not really
a complete solution; ignoring specalisation for now, INLINABLE functions
don't get properly strictness analysed, for example. But it works well
for examples involving specialisation, which is the dominant use of
INLINABLE. See Trac #4874.
************************************************************************
* *
\subsubsection{UsageDetails and suchlike}
* *
************************************************************************
-}
data UsageDetails
= MkUD {
ud_binds :: !(Bag DictBind),
-- Floated dictionary bindings
-- The order is important;
-- in ds1 `union` ds2, bindings in ds2 can depend on those in ds1
-- (Remember, Bags preserve order in GHC.)
ud_calls :: !CallDetails
-- INVARIANT: suppose bs = bindersOf ud_binds
-- Then 'calls' may *mention* 'bs',
-- but there should be no calls *for* bs
}
instance Outputable UsageDetails where
ppr (MkUD { ud_binds = dbs, ud_calls = calls })
= ptext (sLit "MkUD") <+> braces (sep (punctuate comma
[ptext (sLit "binds") <+> equals <+> ppr dbs,
ptext (sLit "calls") <+> equals <+> ppr calls]))
-- | A 'DictBind' is a binding along with a cached set containing its free
-- variables (both type variables and dictionaries)
type DictBind = (CoreBind, VarSet)
type DictExpr = CoreExpr
emptyUDs :: UsageDetails
emptyUDs = MkUD { ud_binds = emptyBag, ud_calls = emptyVarEnv }
------------------------------------------------------------
type CallDetails = IdEnv CallInfoSet
newtype CallKey = CallKey [Maybe Type] -- Nothing => unconstrained type argument
-- CallInfo uses a Map, thereby ensuring that
-- we record only one call instance for any key
--
-- The list of types and dictionaries is guaranteed to
-- match the type of f
data CallInfoSet = CIS Id (Map CallKey ([DictExpr], VarSet))
-- Range is dict args and the vars of the whole
-- call (including tyvars)
-- [*not* include the main id itself, of course]
type CallInfo = (CallKey, ([DictExpr], VarSet))
instance Outputable CallInfoSet where
ppr (CIS fn map) = hang (ptext (sLit "CIS") <+> ppr fn)
2 (ppr map)
pprCallInfo :: Id -> CallInfo -> SDoc
pprCallInfo fn (CallKey mb_tys, (_dxs, _))
= hang (ppr fn)
2 (fsep (map ppr_call_key_ty mb_tys {- ++ map pprParendExpr _dxs -}))
ppr_call_key_ty :: Maybe Type -> SDoc
ppr_call_key_ty Nothing = char '_'
ppr_call_key_ty (Just ty) = char '@' <+> pprParendType ty
instance Outputable CallKey where
ppr (CallKey ts) = ppr ts
-- Type isn't an instance of Ord, so that we can control which
-- instance we use. That's tiresome here. Oh well
instance Eq CallKey where
k1 == k2 = case k1 `compare` k2 of { EQ -> True; _ -> False }
instance Ord CallKey where
compare (CallKey k1) (CallKey k2) = cmpList cmp k1 k2
where
cmp Nothing Nothing = EQ
cmp Nothing (Just _) = LT
cmp (Just _) Nothing = GT
cmp (Just t1) (Just t2) = cmpType t1 t2
unionCalls :: CallDetails -> CallDetails -> CallDetails
unionCalls c1 c2 = plusVarEnv_C unionCallInfoSet c1 c2
unionCallInfoSet :: CallInfoSet -> CallInfoSet -> CallInfoSet
unionCallInfoSet (CIS f calls1) (CIS _ calls2) = CIS f (calls1 `Map.union` calls2)
callDetailsFVs :: CallDetails -> VarSet
callDetailsFVs calls = foldVarEnv (unionVarSet . callInfoFVs) emptyVarSet calls
callInfoFVs :: CallInfoSet -> VarSet
callInfoFVs (CIS _ call_info) = Map.foldRight (\(_,fv) vs -> unionVarSet fv vs) emptyVarSet call_info
------------------------------------------------------------
singleCall :: Id -> [Maybe Type] -> [DictExpr] -> UsageDetails
singleCall id tys dicts
= MkUD {ud_binds = emptyBag,
ud_calls = unitVarEnv id $ CIS id $
Map.singleton (CallKey tys) (dicts, call_fvs) }
where
call_fvs = exprsFreeVars dicts `unionVarSet` tys_fvs
tys_fvs = tyVarsOfTypes (catMaybes tys)
-- The type args (tys) are guaranteed to be part of the dictionary
-- types, because they are just the constrained types,
-- and the dictionary is therefore sure to be bound
-- inside the binding for any type variables free in the type;
-- hence it's safe to neglect tyvars free in tys when making
-- the free-var set for this call
-- BUT I don't trust this reasoning; play safe and include tys_fvs
--
-- We don't include the 'id' itself.
mkCallUDs, mkCallUDs' :: SpecEnv -> Id -> [CoreExpr] -> UsageDetails
mkCallUDs env f args
= -- pprTrace "mkCallUDs" (vcat [ ppr f, ppr args, ppr res ])
res
where
res = mkCallUDs' env f args
mkCallUDs' env f args
| not (want_calls_for f) -- Imported from elsewhere
|| null theta -- Not overloaded
= emptyUDs
| not (all type_determines_value theta)
|| not (spec_tys `lengthIs` n_tyvars)
|| not ( dicts `lengthIs` n_dicts)
|| not (any (interestingDict env) dicts) -- Note [Interesting dictionary arguments]
-- See also Note [Specialisations already covered]
= -- pprTrace "mkCallUDs: discarding" _trace_doc
emptyUDs -- Not overloaded, or no specialisation wanted
| otherwise
= -- pprTrace "mkCallUDs: keeping" _trace_doc
singleCall f spec_tys dicts
where
_trace_doc = vcat [ppr f, ppr args, ppr n_tyvars, ppr n_dicts
, ppr (map (interestingDict env) dicts)]
(tyvars, theta, _) = tcSplitSigmaTy (idType f)
constrained_tyvars = closeOverKinds (tyVarsOfTypes theta)
n_tyvars = length tyvars
n_dicts = length theta
spec_tys = [mk_spec_ty tv ty | (tv, Type ty) <- tyvars `zip` args]
dicts = [dict_expr | (_, dict_expr) <- theta `zip` (drop n_tyvars args)]
mk_spec_ty tyvar ty
| tyvar `elemVarSet` constrained_tyvars = Just ty
| otherwise = Nothing
want_calls_for f = isLocalId f || isJust (maybeUnfoldingTemplate (realIdUnfolding f))
-- For imported things, we gather call instances if
-- there is an unfolding that we could in principle specialise
-- We might still decide not to use it (consulting dflags)
-- in specImports
-- Use 'realIdUnfolding' to ignore the loop-breaker flag!
type_determines_value pred -- See Note [Type determines value]
= case classifyPredType pred of
ClassPred cls _ -> not (isIPClass cls) -- Superclasses can't be IPs
EqPred {} -> True
IrredPred {} -> True -- Things like (D []) where D is a
-- Constraint-ranged family; Trac #7785
{-
Note [Type determines value]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Only specialise if all overloading is on non-IP *class* params,
because these are the ones whose *type* determines their *value*. In
parrticular, with implicit params, the type args *don't* say what the
value of the implicit param is! See Trac #7101
However, consider
type family D (v::*->*) :: Constraint
type instance D [] = ()
f :: D v => v Char -> Int
If we see a call (f "foo"), we'll pass a "dictionary"
() |> (g :: () ~ D [])
and it's good to specialise f at this dictionary.
So the question is: can an implicit parameter "hide inside" a
type-family constraint like (D a). Well, no. We don't allow
type instance D Maybe = ?x:Int
Hence the IrredPred case in type_determines_value.
See Trac #7785.
Note [Interesting dictionary arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
\a.\d:Eq a. let f = ... in ...(f d)...
There really is not much point in specialising f wrt the dictionary d,
because the code for the specialised f is not improved at all, because
d is lambda-bound. We simply get junk specialisations.
What is "interesting"? Just that it has *some* structure. But what about
variables?
* A variable might be imported, in which case its unfolding
will tell us whether it has useful structure
* Local variables are cloned on the way down (to avoid clashes when
we float dictionaries), and cloning drops the unfolding
(cloneIdBndr). Moreover, we make up some new bindings, and it's a
nuisance to give them unfoldings. So we keep track of the
"interesting" dictionaries as a VarSet in SpecEnv.
We have to take care to put any new interesting dictionary
bindings in the set.
We accidentally lost accurate tracking of local variables for a long
time, because cloned variables don't have unfoldings. But makes a
massive difference in a few cases, eg Trac #5113. For nofib as a
whole it's only a small win: 2.2% improvement in allocation for ansi,
1.2% for bspt, but mostly 0.0! Average 0.1% increase in binary size.
-}
interestingDict :: SpecEnv -> CoreExpr -> Bool
-- A dictionary argument is interesting if it has *some* structure
interestingDict env (Var v) = hasSomeUnfolding (idUnfolding v)
|| isDataConWorkId v
|| v `elemVarSet` se_interesting env
interestingDict _ (Type _) = False
interestingDict _ (Coercion _) = False
interestingDict env (App fn (Type _)) = interestingDict env fn
interestingDict env (App fn (Coercion _)) = interestingDict env fn
interestingDict env (Tick _ a) = interestingDict env a
interestingDict env (Cast e _) = interestingDict env e
interestingDict _ _ = True
plusUDs :: UsageDetails -> UsageDetails -> UsageDetails
plusUDs (MkUD {ud_binds = db1, ud_calls = calls1})
(MkUD {ud_binds = db2, ud_calls = calls2})
= MkUD { ud_binds = db1 `unionBags` db2
, ud_calls = calls1 `unionCalls` calls2 }
plusUDList :: [UsageDetails] -> UsageDetails
plusUDList = foldr plusUDs emptyUDs
-----------------------------
_dictBindBndrs :: Bag DictBind -> [Id]
_dictBindBndrs dbs = foldrBag ((++) . bindersOf . fst) [] dbs
-- | Construct a 'DictBind' from a 'CoreBind'
mkDB :: CoreBind -> DictBind
mkDB bind = (bind, bind_fvs bind)
-- | Identify the free variables of a 'CoreBind'
bind_fvs :: CoreBind -> VarSet
bind_fvs (NonRec bndr rhs) = pair_fvs (bndr,rhs)
bind_fvs (Rec prs) = foldl delVarSet rhs_fvs bndrs
where
bndrs = map fst prs
rhs_fvs = unionVarSets (map pair_fvs prs)
pair_fvs :: (Id, CoreExpr) -> VarSet
pair_fvs (bndr, rhs) = exprFreeVars rhs `unionVarSet` idFreeVars bndr
-- Don't forget variables mentioned in the
-- rules of the bndr. C.f. OccAnal.addRuleUsage
-- Also tyvars mentioned in its type; they may not appear in the RHS
-- type T a = Int
-- x :: T a = 3
-- | Flatten a set of 'DictBind's and some other binding pairs into a single
-- recursive binding, including some additional bindings.
flattenDictBinds :: Bag DictBind -> [(Id,CoreExpr)] -> DictBind
flattenDictBinds dbs pairs
= (Rec bindings, fvs)
where
(bindings, fvs) = foldrBag add
([], emptyVarSet)
(dbs `snocBag` mkDB (Rec pairs))
add (NonRec b r, fvs') (pairs, fvs) =
((b,r) : pairs, fvs `unionVarSet` fvs')
add (Rec prs1, fvs') (pairs, fvs) =
(prs1 ++ pairs, fvs `unionVarSet` fvs')
snocDictBinds :: UsageDetails -> [DictBind] -> UsageDetails
-- Add ud_binds to the tail end of the bindings in uds
snocDictBinds uds dbs
= uds { ud_binds = ud_binds uds `unionBags`
foldr consBag emptyBag dbs }
consDictBind :: DictBind -> UsageDetails -> UsageDetails
consDictBind bind uds = uds { ud_binds = bind `consBag` ud_binds uds }
addDictBinds :: [DictBind] -> UsageDetails -> UsageDetails
addDictBinds binds uds = uds { ud_binds = listToBag binds `unionBags` ud_binds uds }
snocDictBind :: UsageDetails -> DictBind -> UsageDetails
snocDictBind uds bind = uds { ud_binds = ud_binds uds `snocBag` bind }
wrapDictBinds :: Bag DictBind -> [CoreBind] -> [CoreBind]
wrapDictBinds dbs binds
= foldrBag add binds dbs
where
add (bind,_) binds = bind : binds
wrapDictBindsE :: Bag DictBind -> CoreExpr -> CoreExpr
wrapDictBindsE dbs expr
= foldrBag add expr dbs
where
add (bind,_) expr = Let bind expr
----------------------
dumpUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpUDs bndrs uds@(MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
| null bndrs = (uds, emptyBag) -- Common in case alternatives
| otherwise = -- pprTrace "dumpUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsMentioning dump_set $ -- Drop calls mentioning bndr_set on the floor
deleteCallsFor bndrs orig_calls -- Discard calls for bndr_set; there should be
-- no calls for any of the dicts in dump_dbs
dumpBindUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind, Bool)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpBindUDs bndrs (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace "dumpBindUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs, float_all)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsFor bndrs orig_calls
float_all = dump_set `intersectsVarSet` callDetailsFVs free_calls
callsForMe :: Id -> UsageDetails -> (UsageDetails, [CallInfo])
callsForMe fn (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace ("callsForMe")
-- (vcat [ppr fn,
-- text "Orig dbs =" <+> ppr (_dictBindBndrs orig_dbs),
-- text "Orig calls =" <+> ppr orig_calls,
-- text "Dep set =" <+> ppr dep_set,
-- text "Calls for me =" <+> ppr calls_for_me]) $
(uds_without_me, calls_for_me)
where
uds_without_me = MkUD { ud_binds = orig_dbs, ud_calls = delVarEnv orig_calls fn }
calls_for_me = case lookupVarEnv orig_calls fn of
Nothing -> []
Just (CIS _ calls) -> filter_dfuns (Map.toList calls)
dep_set = foldlBag go (unitVarSet fn) orig_dbs
go dep_set (db,fvs) | fvs `intersectsVarSet` dep_set
= extendVarSetList dep_set (bindersOf db)
| otherwise = dep_set
-- Note [Specialisation of dictionary functions]
filter_dfuns | isDFunId fn = filter ok_call
| otherwise = \cs -> cs
ok_call (_, (_,fvs)) = not (fvs `intersectsVarSet` dep_set)
----------------------
splitDictBinds :: Bag DictBind -> IdSet -> (Bag DictBind, Bag DictBind, IdSet)
-- Returns (free_dbs, dump_dbs, dump_set)
splitDictBinds dbs bndr_set
= foldlBag split_db (emptyBag, emptyBag, bndr_set) dbs
-- Important that it's foldl not foldr;
-- we're accumulating the set of dumped ids in dump_set
where
split_db (free_dbs, dump_dbs, dump_idset) db@(bind, fvs)
| dump_idset `intersectsVarSet` fvs -- Dump it
= (free_dbs, dump_dbs `snocBag` db,
extendVarSetList dump_idset (bindersOf bind))
| otherwise -- Don't dump it
= (free_dbs `snocBag` db, dump_dbs, dump_idset)
----------------------
deleteCallsMentioning :: VarSet -> CallDetails -> CallDetails
-- Remove calls *mentioning* bs
deleteCallsMentioning bs calls
= mapVarEnv filter_calls calls
where
filter_calls :: CallInfoSet -> CallInfoSet
filter_calls (CIS f calls) = CIS f (Map.filter keep_call calls)
keep_call (_, fvs) = not (fvs `intersectsVarSet` bs)
deleteCallsFor :: [Id] -> CallDetails -> CallDetails
-- Remove calls *for* bs
deleteCallsFor bs calls = delVarEnvList calls bs
{-
************************************************************************
* *
\subsubsection{Boring helper functions}
* *
************************************************************************
-}
newtype SpecM a = SpecM (State SpecState a)
data SpecState = SpecState {
spec_uniq_supply :: UniqSupply,
spec_module :: Module,
spec_dflags :: DynFlags
}
instance Functor SpecM where
fmap = liftM
instance Applicative SpecM where
pure = return
(<*>) = ap
instance Monad SpecM where
SpecM x >>= f = SpecM $ do y <- x
case f y of
SpecM z ->
z
return x = SpecM $ return x
fail str = SpecM $ fail str
instance MonadUnique SpecM where
getUniqueSupplyM
= SpecM $ do st <- get
let (us1, us2) = splitUniqSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us2 }
return us1
getUniqueM
= SpecM $ do st <- get
let (u,us') = takeUniqFromSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us' }
return u
instance HasDynFlags SpecM where
getDynFlags = SpecM $ liftM spec_dflags get
instance HasModule SpecM where
getModule = SpecM $ liftM spec_module get
runSpecM :: DynFlags -> Module -> SpecM a -> CoreM a
runSpecM dflags this_mod (SpecM spec)
= do us <- getUniqueSupplyM
let initialState = SpecState {
spec_uniq_supply = us,
spec_module = this_mod,
spec_dflags = dflags
}
return $ evalState spec initialState
mapAndCombineSM :: (a -> SpecM (b, UsageDetails)) -> [a] -> SpecM ([b], UsageDetails)
mapAndCombineSM _ [] = return ([], emptyUDs)
mapAndCombineSM f (x:xs) = do (y, uds1) <- f x
(ys, uds2) <- mapAndCombineSM f xs
return (y:ys, uds1 `plusUDs` uds2)
extendTvSubstList :: SpecEnv -> [(TyVar,Type)] -> SpecEnv
extendTvSubstList env tv_binds
= env { se_subst = CoreSubst.extendTvSubstList (se_subst env) tv_binds }
substTy :: SpecEnv -> Type -> Type
substTy env ty = CoreSubst.substTy (se_subst env) ty
substCo :: SpecEnv -> Coercion -> Coercion
substCo env co = CoreSubst.substCo (se_subst env) co
substBndr :: SpecEnv -> CoreBndr -> (SpecEnv, CoreBndr)
substBndr env bs = case CoreSubst.substBndr (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
substBndrs :: SpecEnv -> [CoreBndr] -> (SpecEnv, [CoreBndr])
substBndrs env bs = case CoreSubst.substBndrs (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
cloneBindSM :: SpecEnv -> CoreBind -> SpecM (SpecEnv, SpecEnv, CoreBind)
-- Clone the binders of the bind; return new bind with the cloned binders
-- Return the substitution to use for RHSs, and the one to use for the body
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (NonRec bndr rhs)
= do { us <- getUniqueSupplyM
; let (subst', bndr') = CoreSubst.cloneIdBndr subst us bndr
interesting' | interestingDict env rhs
= interesting `extendVarSet` bndr'
| otherwise = interesting
; return (env, env { se_subst = subst', se_interesting = interesting' }
, NonRec bndr' rhs) }
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (Rec pairs)
= do { us <- getUniqueSupplyM
; let (subst', bndrs') = CoreSubst.cloneRecIdBndrs subst us (map fst pairs)
env' = env { se_subst = subst'
, se_interesting = interesting `extendVarSetList`
[ v | (v,r) <- pairs, interestingDict env r ] }
; return (env', env', Rec (bndrs' `zip` map snd pairs)) }
newDictBndr :: SpecEnv -> CoreBndr -> SpecM CoreBndr
-- Make up completely fresh binders for the dictionaries
-- Their bindings are going to float outwards
newDictBndr env b = do { uniq <- getUniqueM
; let n = idName b
ty' = substTy env (idType b)
; return (mkUserLocal (nameOccName n) uniq ty' (getSrcSpan n)) }
newSpecIdSM :: Id -> Type -> SpecM Id
-- Give the new Id a similar occurrence name to the old one
newSpecIdSM old_id new_ty
= do { uniq <- getUniqueM
; let name = idName old_id
new_occ = mkSpecOcc (nameOccName name)
new_id = mkUserLocal new_occ uniq new_ty (getSrcSpan name)
; return new_id }
{-
Old (but interesting) stuff about unboxed bindings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What should we do when a value is specialised to a *strict* unboxed value?
map_*_* f (x:xs) = let h = f x
t = map f xs
in h:t
Could convert let to case:
map_*_Int# f (x:xs) = case f x of h# ->
let t = map f xs
in h#:t
This may be undesirable since it forces evaluation here, but the value
may not be used in all branches of the body. In the general case this
transformation is impossible since the mutual recursion in a letrec
cannot be expressed as a case.
There is also a problem with top-level unboxed values, since our
implementation cannot handle unboxed values at the top level.
Solution: Lift the binding of the unboxed value and extract it when it
is used:
map_*_Int# f (x:xs) = let h = case (f x) of h# -> _Lift h#
t = map f xs
in case h of
_Lift h# -> h#:t
Now give it to the simplifier and the _Lifting will be optimised away.
The benfit is that we have given the specialised "unboxed" values a
very simplep lifted semantics and then leave it up to the simplifier to
optimise it --- knowing that the overheads will be removed in nearly
all cases.
In particular, the value will only be evaluted in the branches of the
program which use it, rather than being forced at the point where the
value is bound. For example:
filtermap_*_* p f (x:xs)
= let h = f x
t = ...
in case p x of
True -> h:t
False -> t
==>
filtermap_*_Int# p f (x:xs)
= let h = case (f x) of h# -> _Lift h#
t = ...
in case p x of
True -> case h of _Lift h#
-> h#:t
False -> t
The binding for h can still be inlined in the one branch and the
_Lifting eliminated.
Question: When won't the _Lifting be eliminated?
Answer: When they at the top-level (where it is necessary) or when
inlining would duplicate work (or possibly code depending on
options). However, the _Lifting will still be eliminated if the
strictness analyser deems the lifted binding strict.
-}
|
acowley/ghc
|
compiler/specialise/Specialise.hs
|
bsd-3-clause
| 91,336
| 1
| 22
| 26,681
| 11,002
| 5,953
| 5,049
| -1
| -1
|
{-# LANGUAGE NamedWildCards #-}
module NamedWildcardInTypeFamilyInstanceLHS where
type family F a where
F _t = Int
|
acowley/ghc
|
testsuite/tests/partial-sigs/should_fail/NamedWildcardInTypeFamilyInstanceLHS.hs
|
bsd-3-clause
| 118
| 0
| 6
| 19
| 20
| 13
| 7
| -1
| -1
|
{-# LANGUAGE DefaultSignatures #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
module T13272 where
import GHC.Generics
class TypeName a where
typeName :: forall proxy.
proxy a -> String
default typeName :: forall proxy d f.
(Generic a, Rep a ~ D1 d f, Datatype d)
=> proxy a -> String
typeName _ = gtypeName $ from (undefined :: a)
gtypeName :: Datatype d => D1 d f p -> String
gtypeName = datatypeName
data T a = MkT a
deriving (Generic, TypeName)
|
ezyang/ghc
|
testsuite/tests/deriving/should_compile/T13272.hs
|
bsd-3-clause
| 652
| 0
| 12
| 178
| 163
| 89
| 74
| 19
| 1
|
{-# LANGUAGE GADTs, TypeFamilies #-}
module T3851 where
type family TF a :: * -> *
type instance TF () = App (Equ ())
data Equ ix ix' where Refl :: Equ ix ix
data App f x = App (f x)
-- does not typecheck in 6.12.1 (but works in 6.10.4)
bar :: TF () () -> ()
bar (App Refl) = ()
-- does typecheck in 6.12.1 and 6.10.4
ar :: App (Equ ()) () -> ()
ar (App Refl) = ()
------------------
data family DF a :: * -> *
data instance DF () a = D (App (Equ ()) a)
bar_df :: DF () () -> ()
bar_df (D (App Refl)) = ()
|
ghc-android/ghc
|
testsuite/tests/indexed-types/should_compile/T3851.hs
|
bsd-3-clause
| 537
| 0
| 11
| 148
| 243
| 132
| 111
| 14
| 1
|
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
{-# OPTIONS_GHC -Wno-unused-top-binds #-}
import PgInit
import Data.Aeson
import qualified Data.ByteString as BS
import Data.IntMap (IntMap)
import Data.Fixed
import qualified Data.Text as T
import Data.Time
import Test.QuickCheck
-- FIXME: should probably be used?
-- import qualified ArrayAggTest
import qualified CompositeTest
import qualified CustomPersistFieldTest
import qualified CustomPrimaryKeyReferenceTest
import qualified DataTypeTest
import qualified EmbedOrderTest
import qualified EmbedTest
import qualified EmptyEntityTest
import qualified EquivalentTypeTestPostgres
import qualified HtmlTest
import qualified JSONTest
import qualified LargeNumberTest
import qualified MaxLenTest
import qualified MigrationColumnLengthTest
import qualified MigrationOnlyTest
import qualified MpsNoPrefixTest
import qualified PersistentTest
import qualified PersistUniqueTest
import qualified PrimaryTest
import qualified RawSqlTest
import qualified ReadWriteTest
import qualified Recursive
import qualified RenameTest
import qualified SumTypeTest
import qualified TransactionLevelTest
import qualified TreeTest
import qualified UniqueTest
import qualified UpsertTest
import qualified CustomConstraintTest
type Tuple = (,)
-- Test lower case names
share [mkPersist persistSettings, mkMigrate "dataTypeMigrate"] [persistLowerCase|
DataTypeTable no-json
text Text
textMaxLen Text maxlen=100
bytes ByteString
bytesTextTuple (Tuple ByteString Text)
bytesMaxLen ByteString maxlen=100
int Int
intList [Int]
intMap (IntMap Int)
double Double
bool Bool
day Day
pico Pico
time TimeOfDay
utc UTCTime
jsonb Value
|]
instance Arbitrary DataTypeTable where
arbitrary = DataTypeTable
<$> arbText -- text
<*> (T.take 100 <$> arbText) -- textManLen
<*> arbitrary -- bytes
<*> liftA2 (,) arbitrary arbText -- bytesTextTuple
<*> (BS.take 100 <$> arbitrary) -- bytesMaxLen
<*> arbitrary -- int
<*> arbitrary -- intList
<*> arbitrary -- intMap
<*> arbitrary -- double
<*> arbitrary -- bool
<*> arbitrary -- day
<*> arbitrary -- pico
<*> (arbitrary) -- utc
<*> (truncateUTCTime =<< arbitrary) -- utc
<*> arbitrary -- value
setup :: MonadIO m => Migration -> ReaderT SqlBackend m ()
setup migration = do
printMigration migration
runMigrationUnsafe migration
main :: IO ()
main = do
runConn $ do
mapM_ setup
[ PersistentTest.testMigrate
, PersistentTest.noPrefixMigrate
, EmbedTest.embedMigrate
, EmbedOrderTest.embedOrderMigrate
, LargeNumberTest.numberMigrate
, UniqueTest.uniqueMigrate
, MaxLenTest.maxlenMigrate
, Recursive.recursiveMigrate
, CompositeTest.compositeMigrate
, TreeTest.treeMigrate
, PersistUniqueTest.migration
, RenameTest.migration
, CustomPersistFieldTest.customFieldMigrate
, PrimaryTest.migration
, CustomPrimaryKeyReferenceTest.migration
, MigrationColumnLengthTest.migration
, TransactionLevelTest.migration
]
PersistentTest.cleanDB
hspec $ do
RenameTest.specsWith db
DataTypeTest.specsWith db
(Just (runMigrationSilent dataTypeMigrate))
[ TestFn "text" dataTypeTableText
, TestFn "textMaxLen" dataTypeTableTextMaxLen
, TestFn "bytes" dataTypeTableBytes
, TestFn "bytesTextTuple" dataTypeTableBytesTextTuple
, TestFn "bytesMaxLen" dataTypeTableBytesMaxLen
, TestFn "int" dataTypeTableInt
, TestFn "intList" dataTypeTableIntList
, TestFn "intMap" dataTypeTableIntMap
, TestFn "bool" dataTypeTableBool
, TestFn "day" dataTypeTableDay
, TestFn "time" (DataTypeTest.roundTime . dataTypeTableTime)
, TestFn "utc" (DataTypeTest.roundUTCTime . dataTypeTableUtc)
, TestFn "jsonb" dataTypeTableJsonb
]
[ ("pico", dataTypeTablePico) ]
dataTypeTableDouble
HtmlTest.specsWith
db
(Just (runMigrationSilent HtmlTest.htmlMigrate))
EmbedTest.specsWith db
EmbedOrderTest.specsWith db
LargeNumberTest.specsWith db
UniqueTest.specsWith db
MaxLenTest.specsWith db
Recursive.specsWith db
SumTypeTest.specsWith db (Just (runMigrationSilent SumTypeTest.sumTypeMigrate))
MigrationOnlyTest.specsWith db
(Just
$ runMigrationSilent MigrationOnlyTest.migrateAll1
>> runMigrationSilent MigrationOnlyTest.migrateAll2
)
PersistentTest.specsWith db
ReadWriteTest.specsWith db
PersistentTest.filterOrSpecs db
RawSqlTest.specsWith db
UpsertTest.specsWith
db
UpsertTest.Don'tUpdateNull
UpsertTest.UpsertPreserveOldKey
MpsNoPrefixTest.specsWith db
EmptyEntityTest.specsWith db (Just (runMigrationSilent EmptyEntityTest.migration))
CompositeTest.specsWith db
TreeTest.specsWith db
PersistUniqueTest.specsWith db
PrimaryTest.specsWith db
CustomPersistFieldTest.specsWith db
CustomPrimaryKeyReferenceTest.specsWith db
MigrationColumnLengthTest.specsWith db
EquivalentTypeTestPostgres.specs
TransactionLevelTest.specsWith db
JSONTest.specs
CustomConstraintTest.specs db
-- FIXME: not used, probably should?
-- ArrayAggTest.specs db
|
naushadh/persistent
|
persistent-postgresql/test/main.hs
|
mit
| 5,706
| 0
| 23
| 1,264
| 968
| 515
| 453
| 143
| 1
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-
Straight stolt on from virtual-dom
virtual-dom bindings demo, rendering a large pixel grid with a bouncing red
square. the step and patch are calculated asynchronously, the update is
batched in an animation frame
-}
module Shakespeare.Dynamic.Render (
renderDom
, renderDom'
, runDom
, runDomI
, createContainer
) where
import Control.Monad
import qualified Data.Sequence as S
import Prelude hiding (div)
import GHCJS.Foreign
import GHCJS.Foreign.QQ
import GHCJS.VDOM
import Shakespeare.Dynamic.Adapter
import qualified VDOM.Adapter as VDA
import Pipes
-- import Pipes.Concurrent -- Not used because of stm-notify
import Control.Concurrent.STM.Notify
import Shakespeare.Ophelia.Parser.VDOM.Types
-- | Run dom (not forked) forever. This receives the current dom
-- and then renders it again each time it changes
runDomI :: DOMNode -- ^ Container to render the dom in
-> IO () -- ^ Action to run after the FIRST render
-> STMEnvelope (LiveVDom VDA.JSEvent) -- ^ dom to run and watch for changes
-> IO ()
runDomI container postRun envLD = do
vdm <- recvIO envLD
vn' <- renderDom container emptyDiv vdm -- Render the initial dom
_ <- atAnimationFrame postRun
foldOnChangeWith waitForDom envLD (renderDom container) vn' -- pass the rendered dom into the fold that
-- renders the dom when it changes
-- | Run the dom inside a container that
runDom :: DOMNode
-> IO ()
-> (LiveVDom VDA.JSEvent)
-> IO ()
runDom c fi e = runDomI c fi $ return e
-- | Given a container, the last rendering, and a current rendering,
-- diff the new rendering from the old and return the new model of the dom
renderDom :: DOMNode -> VNode -> (LiveVDom VDA.JSEvent) -> IO VNode
renderDom container old ld = do
let vna = toProducer ld
vnaL <- recvIO vna
vna' <- if S.length vnaL > 1
then fail "Having more than one node as the parent is illegal"
else return $ S.index vnaL 0
new <- toVNode vna'
let pa = diff old new
redraw container pa
return new
-- | create an empty div to run dom inside of and add it to the
-- body of the document
createContainer :: IO DOMNode
createContainer = do
container <- [js| document.createElement('div') |] :: IO DOMNode
[js_| document.body.appendChild(`container); |] :: IO ()
return container
-- | Create a pipe to render VDom whenever it's updated
renderDom' :: DOMNode -- ^ Container ov the vdom
-> VNode -- ^ Initial VDom
-> Consumer (VDA.VNodeAdapter, IO ()) IO () -- ^ Consumer to push VDom to with a finalizer
renderDom' container initial = do
(vna, f) <- await
newNode <- liftIO $ toVNode vna
let pa = diff initial newNode
_ <- liftIO $ do
redraw container pa
f
renderDom' container newNode
-- | Redraw the dom using a patch
redraw :: DOMNode -> Patch -> IO ()
redraw node pa = pa `seq` atAnimationFrame (patch node pa)
-- | Use the window requestAnimation frame
-- to run some IO action when able to
atAnimationFrame :: IO () -> IO ()
atAnimationFrame m = do
cb <- syncCallback NeverRetain False m
[js_| window.requestAnimationFrame(`cb); |]
|
plow-technologies/shakespeare-dynamic
|
ghcjs-shakespeare-dynamic/src/Shakespeare/Dynamic/Render.hs
|
mit
| 3,474
| 0
| 11
| 966
| 677
| 353
| 324
| 66
| 2
|
module Main where
import Control.Monad (forever)
import Data.Char (toLower)
import Data.Maybe (isJust)
import Data.List (intersperse)
import System.Exit (exitSuccess)
import System.Random (randomRIO)
type WordList = [String]
allWords :: IO WordList
allWords = do
dict <- readFile "data/dict.txt"
return (lines dict)
minWordLength :: Int
minWordLength = 5
maxWordLength :: Int
maxWordLength = 9
gameWords :: IO WordList
gameWords = do
aw <- allWords
return (filter gameLength aw)
where gameLength w =
let l = length (w :: String)
in l > minWordLength && l < maxWordLength
randomWord :: WordList -> IO String
randomWord wl = do
randomIndex <- randomRIO (0,100)
return $ wl !! randomIndex
randomWord' :: IO String
randomWord' = gameWords >>= randomWord
-- Part two, coding puzzle
data Puzzle = Puzzle String [Maybe Char] [Char]
instance Show Puzzle where
show (Puzzle _ discovered guessed) =
(intersperse ' ' $ fmap renderPuzzleChar discovered)
++ "Guessed so far: " ++ guessed
freshPuzzle :: String -> Puzzle
freshPuzzle word = Puzzle word (map (\x -> Nothing) word) []
charInWord :: Puzzle -> Char -> Bool
charInWord (Puzzle word _ _) letter = letter `elem` word
alreadyGuessed :: Puzzle -> Char -> Bool
alreadyGuessed (Puzzle _ guessed _) letter = foldr f False guessed
where f _ True = True
f (Just a) _ = if letter == a then True else False
f (Nothing) result = result
renderPuzzleChar :: Maybe Char -> Char
renderPuzzleChar (Just a) = a
renderPuzzleChar Nothing = '_'
fillInCharacter :: Puzzle -> Char -> Puzzle
fillInCharacter (Puzzle word filledInSoFar s) c =
Puzzle word newFilledInSoFar newGuessed
where zipper guessed wordChar guessChar =
if wordChar == guessed
then Just wordChar
else guessChar
newFilledInSoFar =
zipWith (zipper c) word filledInSoFar
newGuessed = if amountOfFilled newFilledInSoFar > amountOfFilled filledInSoFar
then s
else c:s
amountOfFilled = foldr count 0
count (Just a) acc = acc + 1
count Nothing acc = acc
handleGuess :: Puzzle -> Char -> IO Puzzle
handleGuess puzzle guess = do
putStrLn $ "Your guess was: " ++ [guess]
case (charInWord puzzle guess
, alreadyGuessed puzzle guess) of
(_, True) -> do
putStrLn "Already guessed that\
\ character, pick something else!"
return puzzle
(True, _) -> do
putStrLn "This character was in word,\
\ filling in the word accordingly!"
return (fillInCharacter puzzle guess)
(False, _) -> do
putStrLn "This character wasn't in\
\ the word, try again!"
return (fillInCharacter puzzle guess)
gameOver :: Puzzle -> IO ()
gameOver (Puzzle wordToGuess _ guessed) =
if (length guessed) > 7 then
do
putStrLn "You lose!"
putStrLn $ "The word was: " ++ wordToGuess
exitSuccess
else return ()
gameWin :: Puzzle -> IO ()
gameWin (Puzzle _ filledInSoFar _) =
if all isJust filledInSoFar
then
do putStrLn "You win!"
exitSuccess
else return ()
runGame :: Puzzle -> IO ()
runGame puzzle = forever $ do
gameOver puzzle
gameWin puzzle
putStrLn $ "Current puzzle is: " ++ show puzzle
putStr "Guess a letter: "
guess <- getLine
case guess of
[c] -> handleGuess puzzle c >>= runGame
_ -> putStrLn "Your guess must\
\ be a single character"
main :: IO ()
main = do
word <- randomWord'
let puzzle = freshPuzzle (fmap toLower word)
runGame puzzle
|
raventid/coursera_learning
|
haskell/chapter13/hangman/src/Main.hs
|
mit
| 3,572
| 0
| 14
| 903
| 1,134
| 567
| 567
| 104
| 4
|
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE TemplateHaskell #-}
module StringCompression(runTests) where
import Data.List
import Test.QuickCheck
main :: IO ()
main = interact run
run :: String -> String
run input = output where
output = concatMap print' $ group input
print' group' = case group' of
[] -> ""
[_] -> group'
letter:_ -> letter : show (length group')
prop_run = run "abcaaabbb" == "abca3b3"
prop_run1 = run "abcd" == "abcd"
prop_run2 = run "aaabaaaaccaaaaba" == "a3ba4c2a4ba"
return []
runTests = $quickCheckAll
|
alexander-matsievsky/HackerRank
|
All_Domains/Functional_Programming/Recursion/src/StringCompression.hs
|
mit
| 583
| 0
| 13
| 143
| 175
| 90
| 85
| 19
| 3
|
module XBattBar.Types (Options(..), Position(..), Orientation(..), XContext(..), ExtContext(..), XWidget(..)) where
import Graphics.X11.Types (expose, Window, EventType)
import Graphics.X11.Xlib.Event (XEventPtr)
import Graphics.X11.Xlib.Font (FontStruct)
import Graphics.X11.Xlib.Window (mapWindow)
import Graphics.X11.Xlib.Extras (unmapWindow)
import Graphics.X11.Xlib.Types hiding (Position)
data Position = Top | Bottom | Left | Right
deriving (Eq, Show)
-- | progress bar vartiants
data Orientation = Vertical | Horizontal
-- | command-line options map to this
data Options = Options {
onTop :: Bool,
thickness :: Int,
interval :: Int,
chargeColorAC :: String,
dischargeColorAC :: String,
chargeColorBat :: String,
dischargeColorBat :: String,
position :: Position
} deriving (Show)
-- | basic X11 context
data XContext = XContext {
dpy :: Display,
screen :: ScreenNumber,
parent :: Window
}
-- | extended X11 context
data ExtContext = ExtContext {
window :: Window,
geom :: Rectangle,
gc :: GC
}
-- | XWidget is an X11 window with some context attached
class XWidget a where
xContext :: a -> XContext
widgetContext :: a -> ExtContext
-- | makes the actual widget drawing
drawWidget :: a -> IO ()
-- | display widget on screen
displayWidget :: a -> IO ()
-- | hide widget
hideWidget :: a -> IO ()
-- | handle X11 events for the widget
handleWidgetEvent :: a -> XEventPtr -> EventType -> IO ()
displayWidget a = do
let window' = window $ widgetContext a
dpy' = dpy $ xContext a
mapWindow dpy' window'
hideWidget a = do
let window' = window $ widgetContext a
dpy' = dpy $ xContext a
unmapWindow dpy' window'
handleWidgetEvent a ev et | et == expose = drawWidget a
| otherwise = return ()
|
polachok/xbattbar
|
src/XBattBar/Types.hs
|
mit
| 2,430
| 0
| 13
| 1,006
| 510
| 296
| 214
| 45
| 0
|
module Proxy.Math.Rectangle where
import Proxy.Math.Interval
import Proxy.Math.Line
data Rectangle a = Rectangle {xI,yI :: (Interval a)}
deriving (Show,Eq)
height :: (Num a) => Rectangle a -> a
height = ilength.yI
width :: (Num a) => Rectangle a -> a
width = ilength.xI
area :: (Num a) => Rectangle a -> a
area = (\r -> width r * height r)
vertex :: (Num a,Integral b) => b -> Rectangle a -> Point a
vertex n r = case n `mod` 4 of
0 -> mkPointxFirst (inf.xI $ r) (inf.yI $ r)
1 -> mkPointxFirst (inf.xI $ r) (sup.yI $ r)
2 -> mkPointxFirst (sup.xI $ r) (sup.yI $ r)
3 -> mkPointxFirst (sup.xI $ r) (inf.yI $ r)
edge :: (Num a,Integral b) => b -> Rectangle a -> LineSeg
edge n r = mkLineSeg (vertex n r) (vertex (n+1) r)
isInRect :: Point a -> Rectangle a -> Bool
isInRect p r = x p `isIn` xI r && y p `isIn` yI r
|
mapinguari/SC_HS_Proxy
|
src/OLD/Rectangle.hs
|
mit
| 858
| 0
| 11
| 210
| 450
| 238
| 212
| 21
| 4
|
-- | Rewrite the staments in a Lua program, to add explicit reference
-- allocation, reference reads, and writes.
module Galua.Micro.Translate.ExplicitRefs (explicitBlocks) where
import Data.Set (Set)
import qualified Data.Set as Set
import qualified Data.Vector as Vector
import Data.Map (Map)
import qualified Data.Map as Map
import Control.Monad(liftM,ap)
import qualified Galua.Code as Code
import Galua.Micro.AST
import Galua.Micro.Translate.Monad (M,generate,inBlock)
import qualified Galua.Micro.Translate.Monad as M
--------------------------------------------------------------------------------
explicitBlocks :: Map BlockName (Set Code.Reg) ->
Map BlockName Block ->
Map BlockName Block
explicitBlocks refs blocks = generate $ mapM_ oneBlock $ Map.toList blocks
where
oneBlock (name, stmts) =
let rs = Map.findWithDefault Set.empty name refs
in inBlock name (runRefs refs rs (doBlock stmts))
doBlock b = do mapM_ refStmt (Vector.toList (blockBody b))
refEndStmt (blockEnd b)
--------------------------------------------------------------------------------
-- This is a state monad for the set of references. The state can
-- change during 'newRef'. In the case of compiling NewClosure it
-- can occur that we need to assign to a reference immediately after
-- creating it.
newtype R a = R (Map BlockName (Set Code.Reg) ->
Set Code.Reg ->
M (a, Set Code.Reg))
instance Functor R where
fmap = liftM
instance Applicative R where
pure = doM . pure
(<*>) = ap
instance Monad R where
R m >>= f = R (\rs refs -> do (a,refs1) <- m rs refs
let R m1 = f a
m1 rs refs1)
reader :: (Set Code.Reg -> M a) -> R a
reader f = R $ \_ refs -> do x <- f refs
return (x,refs)
isRef :: Reg -> R Bool
isRef r = case r of
Reg r' -> reader (\refs -> return (r' `Set.member` refs))
_ -> return False
runRefs :: Map BlockName (Set Code.Reg) -> Set Code.Reg -> R a -> M a
runRefs allRefs refs (R f) = fmap fst (f allRefs refs)
doM :: M a -> R a
doM m = reader (const m)
ifRef :: Reg -> R a -> R a -> R a
ifRef r m1 m2 =
do yes <- isRef r
if yes then m1 else m2
emit :: Stmt -> R ()
emit s = doM (M.emit s)
emitEnd :: EndStmt -> R ()
emitEnd s = doM (M.emitEnd s)
newTMP :: R Reg
newTMP = doM (M.newPhaseTMP 2)
newRef :: IsExpr e => Reg -> e -> R ()
newRef r' e = R $ \_ refs ->
do let (r,refs') = case r' of
Reg cr -> (Ref cr, Set.insert cr refs)
Ref {} -> (r', refs)
TMP {} -> error "newRef: result in TMP"
M.emit (NewRef r (toExpr e))
return ((), refs')
class IsRef t where
toRefExpr :: t -> Expr
instance IsRef Reg where
toRefExpr r = case r of
Reg cr -> EReg (Ref cr)
TMP {} -> error "toRefExpr: TMP"
Ref {} -> EReg r
instance IsRef Expr where
toRefExpr expr =
case expr of
EReg r -> toRefExpr r
ELit {} -> error "toRefExpr: ELit"
EUp {} -> expr
readRef :: IsRef e => Reg -> e -> R ()
readRef r e = emit (ReadRef r (toRefExpr e))
writeRef :: (IsRef e1, IsExpr e2) => e1 -> e2 -> R ()
writeRef e1 e2 = emit (WriteRef (toRefExpr e1) (toExpr e2))
endBlock :: BlockName -> R BlockName
endBlock tgt =
do newRefs <- shouldBeRefs
if Set.null newRefs
then return tgt
else doM $ M.inNewBlock_ $
do let toRef r = M.emit (NewRef (Ref r) (toExpr r))
mapM_ toRef newRefs
M.emitEnd (Goto tgt)
where
shouldBeRefs =
R $ \allRs nowRs ->
return ( Set.difference (Map.findWithDefault Set.empty tgt allRs) nowRs
, nowRs
)
--------------------------------------------------------------------------------
readReg :: Reg -> R Reg
readReg reg =
ifRef reg (do tmp <- newTMP
readRef tmp reg
return tmp)
(return reg)
setReg :: Reg -> (Reg -> Stmt) -> R ()
setReg r stmt =
ifRef r (do tmp <- newTMP
emit (stmt tmp)
writeRef r tmp)
(emit (stmt r))
readExpr :: Expr -> R Expr
readExpr expr =
case expr of
EReg r -> EReg <$> readReg r
EUp u -> do tmp <- newTMP
readRef tmp (EUp u)
return (EReg tmp)
ELit l -> return (ELit l)
readProp :: Prop -> R Prop
readProp (Prop p es) = Prop p <$> mapM readExpr es
refEndStmt :: BlockStmt EndStmt -> R ()
refEndStmt stmt =
case stmtCode stmt of
Raise e ->
do e' <- readExpr e
emitEnd $ Raise e'
Goto l -> do l' <- endBlock l
emitEnd (Goto l')
Case e as d ->
do e' <- readExpr e
let alt (v,b) = do b' <- endBlock b
return (v,b')
as' <- mapM alt as
d' <- mapM endBlock d
emitEnd $ Case e' as' d'
If p t f ->
do p' <- readProp p
t' <- endBlock t
f' <- endBlock f
emitEnd $ If p' t' f'
TailCall f ->
do f' <- readReg f
emitEnd $ TailCall f'
Return ->
do emitEnd Return
refStmt :: BlockStmt Stmt -> R ()
refStmt stmt =
case stmtCode stmt of
Assign r e ->
do e' <- readExpr e
ifRef r (writeRef r e') (emit (Assign r e'))
SetUpVal ix r ->
do r' <- readReg r
writeRef (EUp ix) r'
NewTable r -> setReg r $ \r' -> NewTable r'
LookupTable r tab ix ->
do ix' <- readExpr ix
tab' <- readReg tab
setReg r $ \r' -> LookupTable r' tab' ix'
SetTable tab ix val ->
do tab' <- readReg tab
ix' <- readExpr ix
val' <- readExpr val
emit $ SetTable tab' ix' val'
SetTableList tab ix ->
do setReg tab $ \tab' -> SetTableList tab' ix
GetMeta r e ->
do e' <- readExpr e
setReg r $ \r' -> GetMeta r' e'
Call f ->
do f' <- readReg f
emit $ Call f'
Drop list n ->
do emit $ Drop list n
Append list xs ->
do xs' <- traverse readExpr xs
emit $ Append list xs'
SetList list xs ->
do xs' <- traverse readExpr xs
emit $ SetList list xs'
AssignListReg xs ys ->
emit $ AssignListReg xs ys
IndexList r list ix ->
do setReg r $ \r' -> IndexList r' list ix
Arith1 r op e1 ->
do e1' <- readExpr e1
setReg r $ \r' -> Arith1 r' op e1'
Arith2 r op e1 e2 ->
do e2' <- readExpr e2
e1' <- readExpr e1
setReg r $ \r' -> Arith2 r' op e1' e2'
Comment x -> emit (Comment x)
CloseStack _ -> return () -- It was there only for analysis
NewClosure r c f ->
do let us = funcUpvalExprs f
mapM_ upVal us
setReg r $ \r' -> NewClosure r' c f
where
upVal u =
case u of
EUp _ -> return ()
EReg r' -> ifRef r' (return ()) (newRef r' u')
where u' = if r' == r then ELit LNil else u
ELit _ -> error "upVal: ELit"
NewRef _ _ -> error "NewRef: wrong phase"
ReadRef _ _ -> error "ReadRef: wrong phase"
WriteRef _ _ -> error "WriteRef: wrong phase"
|
GaloisInc/galua
|
galua-jit/src/Galua/Micro/Translate/ExplicitRefs.hs
|
mit
| 7,342
| 0
| 18
| 2,542
| 2,817
| 1,338
| 1,479
| 199
| 24
|
{-# LANGUAGE GADTs, CPP, Trustworthy, TemplateHaskell, TupleSections, ViewPatterns, DeriveDataTypeable, ScopedTypeVariables #-}
module Main where
import JavaScript.Web.Worker.Extras
import qualified Tarefa6_li1g100 as G100
main :: IO ()
main = runSyncWorker $ \(inp::[String],player::Int,ticks::Int) -> G100.bot inp player ticks
|
hpacheco/HAAP
|
examples/gameworker/Worker4.hs
|
mit
| 341
| 0
| 9
| 47
| 75
| 45
| 30
| 6
| 1
|
module Y2017.M01.D26.Exercise where
import Data.Map (Map)
-- below imports available from 1HaskellADay git repository
import Data.Bag
import Y2017.M01.D25.Exercise
{--
So, yesterday we were able to find out what Haskell source files were in a
directory, then, as a bonus, we were also able to drill down into subdirectories.
Great!
Now, today, let's do a frequency analysis of the words of a Haskell file.
--}
wordCounts :: FilePath -> IO (Map String Int)
wordCounts file = undefined
-- wordCounts counts the words of a (Haskell) source file returning a
-- word -> occurences map.
-- hint: Data.Bag counts occurences of elements in a collection
-- Point wordCounts at this file. What are the top 5 words in this file?
{-- BONUS -----------------------------------------------------------------
Now, one file doesn't give a good cross section of frequently used words
in the Haskell corpus, so, find a Haskell Corpus, such as the sources of
the GHC libraries, or the 1HaskellADay problem sets and libraries, or your
own sets of Haskell files.
Run wordCounts over those filesets. What are the top 5 words of the combined
files?
--}
wordsCounts :: [FilePath] -> IO (Map String Int)
wordsCounts files = undefined
|
geophf/1HaskellADay
|
exercises/HAD/Y2017/M01/D26/Exercise.hs
|
mit
| 1,223
| 0
| 8
| 207
| 96
| 57
| 39
| 8
| 1
|
{-
-- Tests.hs
-- Contains all the tests for the loop-solver project and some test-tests.
--
-- Fundamentals unceremoniously stolen/borrowed from:
-- https://github.com/spockz/TravisHSTest/blob/master/Tests.hs
-}
import Test.QuickCheck
import Text.Printf
main :: IO ()
main = mapM_ (\(s,a) -> printf "%-25s: " s >> a) tests
{------------------------------------------------------------------------------}
-- Test-test. Shows that a double-reversed list is identical to the identity
prop_doubleReverse :: [Int] -> Bool
prop_doubleReverse s = (reverse . reverse) s == id s
-- Test-test. Intended to fail
prop_intentionalFailure :: [Int] -> Bool
prop_intentionalFailure s = reverse s == id s
{------------------------------------------------------------------------------}
tests :: [(String, IO ())]
tests = [("doubleReverse/id", quickCheck prop_doubleReverse),
("intentionalFailure", quickCheck prop_intentionalFailure)]
|
DrSLDR/loop-solver
|
Tests.hs
|
mit
| 932
| 0
| 9
| 117
| 174
| 96
| 78
| 11
| 1
|
{- |
Module : ./RDF/Parse.hs
Copyright : (c) Felix Gabriel Mance
License : GPLv2 or higher, see LICENSE.txt
Maintainer : f.mance@jacobs-university.de
Stability : provisional
Portability : portable
RDF syntax parser
-}
module RDF.Parse where
import Common.Parsec
import Common.Lexer
import Common.AnnoParser (newlineOrEof)
import Common.Token (criticalKeywords)
import Common.Id
import qualified Common.GlobalAnnotations as GA (PrefixMap)
import OWL2.AS
import OWL2.Parse hiding (stringLiteral, literal, skips, uriP)
import RDF.AS
import RDF.Symbols
import Data.Either
import qualified Data.Map as Map
import Text.ParserCombinators.Parsec
uriP :: CharParser st QName
uriP =
skips $ try $ checkWithUsing showQN uriQ $ \ q ->
not (null $ namePrefix q) || notElem (localPart q) criticalKeywords
-- * hets symbols parser
rdfEntityType :: CharParser st RDFEntityType
rdfEntityType = choice $ map (\ f -> keyword (show f) >> return f)
rdfEntityTypes
{- | parses an entity type (subject, predicate or object) followed by a
comma separated list of IRIs -}
rdfSymbItems :: GenParser Char st SymbItems
rdfSymbItems = do
ext <- optionMaybe rdfEntityType
iris <- rdfSymbs
return $ SymbItems ext iris
-- | parse a comma separated list of uris
rdfSymbs :: GenParser Char st [IRI]
rdfSymbs = uriP >>= \ u -> do
commaP `followedWith` uriP
us <- rdfSymbs
return $ u : us
<|> return [u]
-- | parse a possibly kinded list of comma separated symbol pairs
rdfSymbMapItems :: GenParser Char st SymbMapItems
rdfSymbMapItems = do
ext <- optionMaybe rdfEntityType
iris <- rdfSymbPairs
return $ SymbMapItems ext iris
-- | parse a comma separated list of uri pairs
rdfSymbPairs :: GenParser Char st [(IRI, Maybe IRI)]
rdfSymbPairs = uriPair >>= \ u -> do
commaP `followedWith` uriP
us <- rdfSymbPairs
return $ u : us
<|> return [u]
-- * turtle syntax parser
skips :: CharParser st a -> CharParser st a
skips = (<< skipMany
(forget space <|> parseComment <|> nestCommentOut <?> ""))
charOrQuoteEscape :: CharParser st String
charOrQuoteEscape = try (string "\\\"") <|> fmap return anyChar
longLiteral :: CharParser st (String, Bool)
longLiteral = do
string "\"\"\""
ls <- flat $ manyTill charOrQuoteEscape $ try $ string "\"\"\""
return (ls, True)
shortLiteral :: CharParser st (String, Bool)
shortLiteral = do
char '"'
ls <- flat $ manyTill charOrQuoteEscape $ try $ string "\""
return (ls, False)
stringLiteral :: CharParser st RDFLiteral
stringLiteral = do
(s, b) <- try longLiteral <|> shortLiteral
do
string cTypeS
d <- datatypeUri
return $ RDFLiteral b s $ Typed d
<|> do
string "@"
t <- skips $ optionMaybe languageTag
return $ RDFLiteral b s $ Untyped t
<|> skips (return $ RDFLiteral b s $ Typed $ mkQName "string")
literal :: CharParser st RDFLiteral
literal = do
f <- skips $ try floatingPointLit
<|> fmap decToFloat decimalLit
return $ RDFNumberLit f
<|> stringLiteral
parseBase :: CharParser st Base
parseBase = do
pkeyword "@base"
base <- skips uriP
skips $ char '.'
return $ Base base
parsePrefix :: CharParser st Prefix
parsePrefix = do
pkeyword "@prefix"
p <- skips (option "" prefix << char ':')
i <- skips uriP
skips $ char '.'
return $ PrefixR p i
parsePredicate :: CharParser st Predicate
parsePredicate = fmap Predicate $ skips uriP
parseSubject :: CharParser st Subject
parseSubject =
fmap Subject (skips uriP)
<|> fmap SubjectList
(between (skips $ char '[') (skips $ char ']') $ skips parsePredObjList)
<|> fmap SubjectCollection
(between (skips $ char '(') (skips $ char ')') $ many parseObject)
parseObject :: CharParser st Object
parseObject = fmap ObjectLiteral literal <|> fmap Object parseSubject
parsePredObjects :: CharParser st PredicateObjectList
parsePredObjects = do
pr <- parsePredicate
objs <- sepBy parseObject $ skips $ char ','
return $ PredicateObjectList pr objs
parsePredObjList :: CharParser st [PredicateObjectList]
parsePredObjList = sepEndBy parsePredObjects $ skips $ char ';'
parseTriples :: CharParser st Triples
parseTriples = do
s <- parseSubject
ls <- parsePredObjList
skips $ char '.'
return $ Triples s ls
parseComment :: CharParser st ()
parseComment = do
tryString "#"
forget $ skips $ manyTill anyChar newlineOrEof
parseStatement :: CharParser st Statement
parseStatement = fmap BaseStatement parseBase
<|> fmap PrefixStatement parsePrefix <|> fmap Statement parseTriples
basicSpec :: GA.PrefixMap -> CharParser st TurtleDocument
basicSpec pm = do
many parseComment
ls <- many parseStatement
let td = TurtleDocument
dummyQName (Map.map transIri $ convertPrefixMap pm) ls
-- return $ trace (show $ Map.union predefinedPrefixes (prefixMap td)) td
return td
where transIri s = QN "" s Full s nullRange
predefinedPrefixes :: RDFPrefixMap
predefinedPrefixes = Map.fromList $ zip
["rdf", "rdfs", "dc", "owl", "ex", "xsd"]
$ rights $ map (parse uriQ "")
[ "<http://www.w3.org/1999/02/22-rdf-syntax-ns#>"
, "<http://www.w3.org/2000/01/rdf-schema#>"
, "<http://purl.org/dc/elements/1.1/>"
, "<http://www.w3.org/2002/07/owl#>"
, "<http://www.example.org/>"
, "<http://www.w3.org/2001/XMLSchema#>" ]
|
gnn/Hets
|
RDF/Parse.hs
|
gpl-2.0
| 5,402
| 0
| 14
| 1,135
| 1,553
| 764
| 789
| 138
| 1
|
{- |
Module : $Header$
Description : String constants for HybridCASL keywords to be used for parsing
and printing
Copyright : (c) Renato Neves and Mondrian Project 2012
License : GPLv2 or higher, see LICENSE.txt
Maintainer : nevrenato@gmail.com
Stability : experimental
Portability : portable
String constants for keywords to be used for parsing and printing
- all identifiers are mixed case (i.e. the keyword followed by a capital S)
-}
module Hybrid.Keywords where
nominalS :: String
nominalS = "nominal"
nominalsS :: String
nominalsS = "nominals"
|
nevrenato/Hets_Fork
|
Hybrid/Keywords.hs
|
gpl-2.0
| 584
| 0
| 4
| 119
| 27
| 17
| 10
| 5
| 1
|
{-# LANGUAGE
FlexibleContexts
, InstanceSigs
, ScopedTypeVariables
#-}
module HFlint.FMPQ.Reduction
where
import Data.Proxy ( Proxy(..) )
import Data.Reflection ( reflect )
import System.IO.Unsafe ( unsafePerformIO )
import HFlint.FMPQ.FFI
import HFlint.FMPZ.FFI
import HFlint.NMod.Context
import HFlint.NMod.FFI
import HFlint.NMod.Reduction
instance HasLimbHeight FMPQ where
limbHeight a = fromIntegral $ unsafePerformIO $
fmap snd $ withFMPQ a $ \aptr -> do
numptr <- fmpq_numref aptr
denptr <- fmpq_denref aptr
numht <- abs <$> fmpz_size numptr
denht <- abs <$> fmpz_size denptr
return $ 2 * max numht denht
instance ToNModMay FMPQ where
{-# INLINE toNModMay #-}
toNModMay :: forall ctx . ReifiesNModContext ctx => FMPQ -> Maybe (NMod ctx)
toNModMay a = unsafePerformIO $
fmap snd $ withFMPQ a $ \aptr -> do
p <- nmod_n $ reflect (Proxy :: Proxy ctx)
numptr <- fmpq_numref aptr
denptr <- fmpq_denref aptr
num <- fmpz_fdiv_ui numptr p
den <- fmpz_fdiv_ui denptr p
if den == 0
then return Nothing
else Just <$> NMod <$>
nmod_div num den (reflect (Proxy :: Proxy ctx))
data RationalReconstructionType =
Balanced
| Bounded FMPZ FMPZ
{-# INLINE rationalReconstruct #-}
rationalReconstruct
:: RationalReconstructionType
-> Modulus FMPZ -> FMPZ -> Maybe FMPQ
rationalReconstruct Balanced (Modulus m) a =
let (b,r) = unsafePerformIO $
withNewFMPQ $ \bptr ->
fmap snd $ withFMPZ m $ \mptr ->
fmap snd $ withFMPZ a $ \aptr ->
fmpq_reconstruct_fmpz bptr aptr mptr
in if r == 0 then Nothing else Just b
rationalReconstruct (Bounded num den) (Modulus m) a =
let (b,r) = unsafePerformIO $
withNewFMPQ $ \bptr ->
fmap snd $ withFMPZ num $ \numptr ->
fmap snd $ withFMPZ den $ \denptr ->
fmap snd $ withFMPZ m $ \mptr ->
fmap snd $ withFMPZ a $ \aptr ->
fmpq_reconstruct_fmpz_2 bptr aptr mptr numptr denptr
in if r == 0 then Nothing else Just b
|
martinra/hflint
|
src/HFlint/FMPQ/Reduction.hs
|
gpl-3.0
| 2,100
| 0
| 19
| 566
| 671
| 336
| 335
| 58
| 3
|
-- {-# OPTIONS_GHC -F -pgmF htfpp #-}
{-# LANGUAGE CPP #-}
{- |
A simple test runner for hledger's built-in unit tests.
-}
module Hledger.Cli.Tests (
testmode
,test'
)
where
import Control.Monad
import System.Exit
import Test.HUnit
import Hledger
import Hledger.Cli
#ifdef TESTS
import Test.Framework
import {-@ HTF_TESTS @-} Hledger.Read.JournalReader
-- | Run HTF unit tests and exit with success or failure.
test' :: CliOpts -> IO ()
test' _opts = htfMain htf_importedTests
#else
-- | Run HUnit unit tests and exit with success or failure.
test' :: CliOpts -> IO ()
test' opts = do
results <- runTests opts
if errors results > 0 || failures results > 0
then exitFailure
else exitWith ExitSuccess
testmode = (defCommandMode ["test"]) {
modeHelp = "run built-in self-tests"
,modeArgs = ([], Just $ argsFlag "[REGEXPS]")
,modeGroupFlags = Group {
groupUnnamed = []
,groupHidden = []
,groupNamed = [generalflagsgroup3]
}
}
-- | Run all or just the matched unit tests and return their HUnit result counts.
runTests :: CliOpts -> IO Counts
runTests = liftM (fst . flip (,) 0) . runTestTT . flatTests
-- -- | Run all or just the matched unit tests until the first failure or
-- -- error, returning the name of the problem test if any.
-- runTestsTillFailure :: CliOpts -> IO (Maybe String)
-- runTestsTillFailure _ = undefined -- do
-- -- let ts = flatTests opts
-- -- results = liftM (fst . flip (,) 0) $ runTestTT $
-- -- firstproblem = find (\counts -> )
-- | All or pattern-matched tests, as a flat list to show simple names.
flatTests opts = TestList $ filter (matchesAccount (queryFromOpts nulldate $ reportopts_ opts) . testName) $ flattenTests tests_Hledger_Cli
-- -- | All or pattern-matched tests, in the original suites to show hierarchical names.
-- hierarchicalTests opts = filterTests (matchesAccount (queryFromOpts nulldate $ reportopts_ opts) . testName) tests_Hledger_Cli
#endif
|
kmels/hledger
|
hledger/Hledger/Cli/Tests.hs
|
gpl-3.0
| 1,957
| 0
| 7
| 377
| 82
| 51
| 31
| 25
| 2
|
{-# LANGUAGE DeriveDataTypeable #-}
module Nicomachus where
import Prelude hiding ((+),(*))
import HipSpec.Prelude
import Test.QuickSpec.Signature
data Nat = Z | S Nat deriving (Eq,Ord,Show,Typeable)
(+) :: Nat -> Nat -> Nat
S n + m = S (n + m)
Z + m = m
(*) :: Nat -> Nat -> Nat
S n * m = m + (n * m)
Z * m = Z
tri :: Nat -> Nat
tri Z = Z
tri (S n) = tri n + S n
cubes :: Nat -> Nat
cubes Z = Z
cubes (S n) = cubes n + (S n * S n * S n)
prop_Nichomachus :: Nat -> Prop Nat
prop_Nichomachus n = cubes n =:= tri n * tri n
infixl 6 +
infixl 7 *
instance Enum Nat where
toEnum 0 = Z
toEnum n = S (toEnum (pred n))
fromEnum Z = 0
fromEnum (S n) = succ (fromEnum n)
instance Arbitrary Nat where
arbitrary = sized arbSized
instance Partial Nat where
unlifted Z = return Z
unlifted (S x) = fmap S (lifted x)
arbSized s = do
x <- choose (0,round (sqrt (toEnum s)))
return (toEnum x)
|
danr/hipspec
|
testsuite/examples/Nicomachus.hs
|
gpl-3.0
| 918
| 1
| 14
| 240
| 491
| 249
| 242
| 35
| 1
|
module Parsing.HashParser where
import Language.Expressions
import Text.Parsec.String (Parser)
import Text.Parsec.Char (digit, char, string, spaces, letter, satisfy, alphaNum, anyChar, noneOf, oneOf)
import Text.Parsec (parse, ParseError, try, optionMaybe)
import Text.Parsec.Combinator (many1, sepBy1)
import Text.Parsec.Expr
import Control.Applicative ((<|>), (<$>), (<$), (<*>), (<*), (*>), Applicative, many)
import Control.Monad (when)
import Data.Char (digitToInt, isAlphaNum, isLetter)
import Data.Maybe (isNothing, fromMaybe)
err = "An error has occurred"
token :: Parser a -> Parser a
token = (<* spaces)
parseStringToTLExpr :: String -> [TLExpr]
parseStringToTLExpr = parsed . betterParse (many tlexpr)
parsed (Right s) = s
parsed (Left _) = error err
betterParse :: Parser a -> String -> Either ParseError a
betterParse p = parse (spaces *> p) err
parsString :: Parser String
parsString = token $ many1 (noneOf ['=', ' ', '<', '>', '(', ')', '\n', ';', '{', '}'])
litString :: Parser String
litString = token $ many (noneOf ['"'])
stringLiteral :: Parser Expr
stringLiteral = do
token $ char '"'
ret <- Str <$> litString
token $ char '"'
return ret
strExpr :: Parser Expr
strExpr = Str <$> parsString
varExpr :: Parser Expr
varExpr = Var <$> variable
variable :: Parser String
variable = do
char '$'
first <- satisfy (\c -> isLetter c )
rest <- many (satisfy (\c -> isAlphaNum c ))
spaces
return (first:rest)
expr :: Parser Expr
expr = try varExpr <|> strExpr
manyExpr = many expr
symbol :: Char -> Parser Char
symbol = token . char
assign :: Parser Cmd
assign = do
varName <- strExpr
char '='
value <- strExpr
spaces
symbol ';'
return (Assign varName value)
inputRedir :: Parser Expr
inputRedir = do
char '<'
spaces
redirExpr <- expr
return redirExpr
outputRedir :: Parser (Expr, Bool)
outputRedir = do
char '>'
appended <- optionMaybe (char '>')
spaces
redirExpr <- expr
spaces
return (redirExpr, not $ isNothing appended)
cmd :: Parser Cmd
cmd = do
cmdName <- expr
spaces
cmdArgs <- many (try stringLiteral <|> expr)
spaces
redirIn <- optionMaybe inputRedir
redirOut <- optionMaybe outputRedir
spaces
let (outputExpr, isAppended) = case redirOut of
Nothing -> (Nothing, False)
Just (ex, isApp) -> (Just ex, isApp)
symbol ';'
return (Cmd cmdName cmdArgs redirIn outputExpr isAppended)
cmdOrAssign :: Parser Cmd
cmdOrAssign = try assign <|> cmd
comp :: Parser Comp
comp = do
expr1 <- expr
spaces
op <- many1 (oneOf ['=', '/', '>', '<'])
spaces
expr2 <- expr
spaces
let opConst = case op of
"==" -> CEQ
"/=" -> CNE
">=" -> CGE
">" -> CGT
"<=" -> CLE
"<" -> CLT
return (opConst expr1 expr2)
table = [[unary '!' Not], [binary '&' And], [binary '|' Or]]
where binary sym f = Infix (mkParser sym f) AssocLeft
mkParser s f = do
char s
spaces
return f
unary sym f = Prefix (mkParser sym f)
prd :: Parser Pred
prd = buildExpressionParser table other
where other = cmp <|> parenPred
cmp = do
c <- comp
spaces
return (Pred c)
parenPred = do
char '('
pr <- prd
char ')'
spaces
return (Parens pr)
clause :: Parser [Cmd]
clause = do
symbol '{'
cmds <- many cmdOrAssign
symbol '}'
return (cmds)
ifOrIfElse :: Parser Conditional
ifOrIfElse = try ifElse <|> if'
ifElse :: Parser Conditional
ifElse = do
string "if"
spaces
char '('
cnd <- prd
spaces
char ')'
spaces
cmds1 <- clause
spaces
el <- string "else"
spaces
cmds2 <- clause
spaces
fi <- string "end"
spaces
return (IfElse cnd cmds1 cmds2)
if' :: Parser Conditional
if' = do
string "if"
spaces
char '('
cnd <- prd
spaces
char ')'
spaces
cmds1 <- clause
fi <- string "end"
spaces
return (If cnd cmds1 )
while :: Parser Loop
while = do
string "while"
spaces
char '('
cnd <- prd
spaces
char ')'
spaces
cmds1 <- clause
fi <- string "end"
spaces
return (While cnd cmds1 )
tlexpr :: Parser TLExpr
tlexpr = (try (TLCmd <$> cmdOrAssign)) <|> (try (TLCnd <$> ifOrIfElse)) <|> (TLwl <$> while)
|
IvanSindija/Project-Shell-Hash
|
Parsing/HashParser.hs
|
gpl-3.0
| 4,441
| 0
| 14
| 1,253
| 1,671
| 820
| 851
| 174
| 6
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.EC2.ReleaseAddress
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | Releases the specified Elastic IP address.
--
-- After releasing an Elastic IP address, it is released to the IP address pool
-- and might be unavailable to you. Be sure to update your DNS records and any
-- servers or devices that communicate with the address. If you attempt to
-- release an Elastic IP address that you already released, you'll get an 'AuthFailure' error if the address is already allocated to another AWS account.
--
-- [EC2-Classic, default VPC] Releasing an Elastic IP address automatically
-- disassociates it from any instance that it's associated with. To disassociate
-- an Elastic IP address without releasing it, use 'DisassociateAddress'.
--
-- [Nondefault VPC] You must use 'DisassociateAddress' to disassociate the
-- Elastic IP address before you try to release it. Otherwise, Amazon EC2
-- returns an error ('InvalidIPAddress.InUse').
--
-- <http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ReleaseAddress.html>
module Network.AWS.EC2.ReleaseAddress
(
-- * Request
ReleaseAddress
-- ** Request constructor
, releaseAddress
-- ** Request lenses
, raAllocationId
, raDryRun
, raPublicIp
-- * Response
, ReleaseAddressResponse
-- ** Response constructor
, releaseAddressResponse
) where
import Network.AWS.Prelude
import Network.AWS.Request.Query
import Network.AWS.EC2.Types
import qualified GHC.Exts
data ReleaseAddress = ReleaseAddress
{ _raAllocationId :: Maybe Text
, _raDryRun :: Maybe Bool
, _raPublicIp :: Maybe Text
} deriving (Eq, Ord, Read, Show)
-- | 'ReleaseAddress' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'raAllocationId' @::@ 'Maybe' 'Text'
--
-- * 'raDryRun' @::@ 'Maybe' 'Bool'
--
-- * 'raPublicIp' @::@ 'Maybe' 'Text'
--
releaseAddress :: ReleaseAddress
releaseAddress = ReleaseAddress
{ _raDryRun = Nothing
, _raPublicIp = Nothing
, _raAllocationId = Nothing
}
-- | [EC2-VPC] The allocation ID. Required for EC2-VPC.
raAllocationId :: Lens' ReleaseAddress (Maybe Text)
raAllocationId = lens _raAllocationId (\s a -> s { _raAllocationId = a })
-- | Checks whether you have the required permissions for the action, without
-- actually making the request, and provides an error response. If you have the
-- required permissions, the error response is 'DryRunOperation'. Otherwise, it is 'UnauthorizedOperation'.
raDryRun :: Lens' ReleaseAddress (Maybe Bool)
raDryRun = lens _raDryRun (\s a -> s { _raDryRun = a })
-- | [EC2-Classic] The Elastic IP address. Required for EC2-Classic.
raPublicIp :: Lens' ReleaseAddress (Maybe Text)
raPublicIp = lens _raPublicIp (\s a -> s { _raPublicIp = a })
data ReleaseAddressResponse = ReleaseAddressResponse
deriving (Eq, Ord, Read, Show, Generic)
-- | 'ReleaseAddressResponse' constructor.
releaseAddressResponse :: ReleaseAddressResponse
releaseAddressResponse = ReleaseAddressResponse
instance ToPath ReleaseAddress where
toPath = const "/"
instance ToQuery ReleaseAddress where
toQuery ReleaseAddress{..} = mconcat
[ "AllocationId" =? _raAllocationId
, "DryRun" =? _raDryRun
, "PublicIp" =? _raPublicIp
]
instance ToHeaders ReleaseAddress
instance AWSRequest ReleaseAddress where
type Sv ReleaseAddress = EC2
type Rs ReleaseAddress = ReleaseAddressResponse
request = post "ReleaseAddress"
response = nullResponse ReleaseAddressResponse
|
romanb/amazonka
|
amazonka-ec2/gen/Network/AWS/EC2/ReleaseAddress.hs
|
mpl-2.0
| 4,523
| 0
| 9
| 937
| 483
| 297
| 186
| 56
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Compute.Images.Delete
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Deletes the specified image.
--
-- /See:/ <https://developers.google.com/compute/docs/reference/latest/ Compute Engine API Reference> for @compute.images.delete@.
module Network.Google.Resource.Compute.Images.Delete
(
-- * REST Resource
ImagesDeleteResource
-- * Creating a Request
, imagesDelete
, ImagesDelete
-- * Request Lenses
, imaImage
, imaProject
) where
import Network.Google.Compute.Types
import Network.Google.Prelude
-- | A resource alias for @compute.images.delete@ method which the
-- 'ImagesDelete' request conforms to.
type ImagesDeleteResource =
"compute" :>
"v1" :>
"projects" :>
Capture "project" Text :>
"global" :>
"images" :>
Capture "image" Text :>
QueryParam "alt" AltJSON :> Delete '[JSON] Operation
-- | Deletes the specified image.
--
-- /See:/ 'imagesDelete' smart constructor.
data ImagesDelete = ImagesDelete'
{ _imaImage :: !Text
, _imaProject :: !Text
} deriving (Eq,Show,Data,Typeable,Generic)
-- | Creates a value of 'ImagesDelete' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'imaImage'
--
-- * 'imaProject'
imagesDelete
:: Text -- ^ 'imaImage'
-> Text -- ^ 'imaProject'
-> ImagesDelete
imagesDelete pImaImage_ pImaProject_ =
ImagesDelete'
{ _imaImage = pImaImage_
, _imaProject = pImaProject_
}
-- | Name of the image resource to delete.
imaImage :: Lens' ImagesDelete Text
imaImage = lens _imaImage (\ s a -> s{_imaImage = a})
-- | Project ID for this request.
imaProject :: Lens' ImagesDelete Text
imaProject
= lens _imaProject (\ s a -> s{_imaProject = a})
instance GoogleRequest ImagesDelete where
type Rs ImagesDelete = Operation
type Scopes ImagesDelete =
'["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute"]
requestClient ImagesDelete'{..}
= go _imaProject _imaImage (Just AltJSON)
computeService
where go
= buildClient (Proxy :: Proxy ImagesDeleteResource)
mempty
|
rueshyna/gogol
|
gogol-compute/gen/Network/Google/Resource/Compute/Images/Delete.hs
|
mpl-2.0
| 3,046
| 0
| 15
| 740
| 388
| 233
| 155
| 62
| 1
|
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.LibraryAgent.Types
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
module Network.Google.LibraryAgent.Types
(
-- * Service Configuration
libraryAgentService
-- * OAuth Scopes
, cloudPlatformScope
-- * GoogleExampleLibraryagentV1Shelf
, GoogleExampleLibraryagentV1Shelf
, googleExampleLibraryagentV1Shelf
, gelvsName
, gelvsTheme
-- * Xgafv
, Xgafv (..)
-- * GoogleExampleLibraryagentV1ListShelvesResponse
, GoogleExampleLibraryagentV1ListShelvesResponse
, googleExampleLibraryagentV1ListShelvesResponse
, gelvlsrNextPageToken
, gelvlsrShelves
-- * GoogleExampleLibraryagentV1ListBooksResponse
, GoogleExampleLibraryagentV1ListBooksResponse
, googleExampleLibraryagentV1ListBooksResponse
, gelvlbrNextPageToken
, gelvlbrBooks
-- * GoogleExampleLibraryagentV1Book
, GoogleExampleLibraryagentV1Book
, googleExampleLibraryagentV1Book
, gelvbRead
, gelvbName
, gelvbAuthor
, gelvbTitle
) where
import Network.Google.LibraryAgent.Types.Product
import Network.Google.LibraryAgent.Types.Sum
import Network.Google.Prelude
-- | Default request referring to version 'v1' of the Library Agent API. This contains the host and root path used as a starting point for constructing service requests.
libraryAgentService :: ServiceConfig
libraryAgentService
= defaultService (ServiceId "libraryagent:v1")
"libraryagent.googleapis.com"
-- | See, edit, configure, and delete your Google Cloud Platform data
cloudPlatformScope :: Proxy '["https://www.googleapis.com/auth/cloud-platform"]
cloudPlatformScope = Proxy
|
brendanhay/gogol
|
gogol-libraryagent/gen/Network/Google/LibraryAgent/Types.hs
|
mpl-2.0
| 2,078
| 0
| 7
| 366
| 161
| 112
| 49
| 38
| 1
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Compute.RegionInstanceGroupManagers.Get
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Returns all of the details about the specified managed instance group.
--
-- /See:/ <https://developers.google.com/compute/docs/reference/latest/ Compute Engine API Reference> for @compute.regionInstanceGroupManagers.get@.
module Network.Google.Resource.Compute.RegionInstanceGroupManagers.Get
(
-- * REST Resource
RegionInstanceGroupManagersGetResource
-- * Creating a Request
, regionInstanceGroupManagersGet
, RegionInstanceGroupManagersGet
-- * Request Lenses
, rigmgProject
, rigmgInstanceGroupManager
, rigmgRegion
) where
import Network.Google.Compute.Types
import Network.Google.Prelude
-- | A resource alias for @compute.regionInstanceGroupManagers.get@ method which the
-- 'RegionInstanceGroupManagersGet' request conforms to.
type RegionInstanceGroupManagersGetResource =
"compute" :>
"v1" :>
"projects" :>
Capture "project" Text :>
"regions" :>
Capture "region" Text :>
"instanceGroupManagers" :>
Capture "instanceGroupManager" Text :>
QueryParam "alt" AltJSON :>
Get '[JSON] InstanceGroupManager
-- | Returns all of the details about the specified managed instance group.
--
-- /See:/ 'regionInstanceGroupManagersGet' smart constructor.
data RegionInstanceGroupManagersGet =
RegionInstanceGroupManagersGet'
{ _rigmgProject :: !Text
, _rigmgInstanceGroupManager :: !Text
, _rigmgRegion :: !Text
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'RegionInstanceGroupManagersGet' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'rigmgProject'
--
-- * 'rigmgInstanceGroupManager'
--
-- * 'rigmgRegion'
regionInstanceGroupManagersGet
:: Text -- ^ 'rigmgProject'
-> Text -- ^ 'rigmgInstanceGroupManager'
-> Text -- ^ 'rigmgRegion'
-> RegionInstanceGroupManagersGet
regionInstanceGroupManagersGet pRigmgProject_ pRigmgInstanceGroupManager_ pRigmgRegion_ =
RegionInstanceGroupManagersGet'
{ _rigmgProject = pRigmgProject_
, _rigmgInstanceGroupManager = pRigmgInstanceGroupManager_
, _rigmgRegion = pRigmgRegion_
}
-- | Project ID for this request.
rigmgProject :: Lens' RegionInstanceGroupManagersGet Text
rigmgProject
= lens _rigmgProject (\ s a -> s{_rigmgProject = a})
-- | Name of the managed instance group to return.
rigmgInstanceGroupManager :: Lens' RegionInstanceGroupManagersGet Text
rigmgInstanceGroupManager
= lens _rigmgInstanceGroupManager
(\ s a -> s{_rigmgInstanceGroupManager = a})
-- | Name of the region scoping this request.
rigmgRegion :: Lens' RegionInstanceGroupManagersGet Text
rigmgRegion
= lens _rigmgRegion (\ s a -> s{_rigmgRegion = a})
instance GoogleRequest RegionInstanceGroupManagersGet
where
type Rs RegionInstanceGroupManagersGet =
InstanceGroupManager
type Scopes RegionInstanceGroupManagersGet =
'["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly"]
requestClient RegionInstanceGroupManagersGet'{..}
= go _rigmgProject _rigmgRegion
_rigmgInstanceGroupManager
(Just AltJSON)
computeService
where go
= buildClient
(Proxy ::
Proxy RegionInstanceGroupManagersGetResource)
mempty
|
brendanhay/gogol
|
gogol-compute/gen/Network/Google/Resource/Compute/RegionInstanceGroupManagers/Get.hs
|
mpl-2.0
| 4,405
| 0
| 16
| 975
| 466
| 278
| 188
| 82
| 1
|
-- Implicit CAD. Copyright (C) 2011, Christopher Olah (chris@colah.ca)
-- Copyright (C) 2014 2015, Julia Longtin (julial@turinglace.com)
-- Released under the GNU AGPLV3+, see LICENSE
-- Allow us to use explicit foralls when writing function type declarations.
{-# LANGUAGE ExplicitForAll #-}
-- FIXME: required. why?
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeSynonymInstances #-}
module Graphics.Implicit.ExtOpenScad.Util.OVal(OTypeMirror, (<||>), fromOObj, toOObj, divideObjs, caseOType, oTypeStr, getErrors) where
import Prelude(Maybe(Just, Nothing), Bool(True, False), Either(Left,Right), Char, String, (==), fromInteger, floor, ($), (.), map, error, (++), show, head, flip, filter, not, return, head)
import Graphics.Implicit.Definitions(ℝ, ℕ, SymbolicObj2, SymbolicObj3, fromℕtoℝ)
import Graphics.Implicit.ExtOpenScad.Definitions (OVal(ONum, OBool, OString, OList, OFunc, OUndefined, OModule, OUModule, OError, OObj2, OObj3))
import Control.Monad (mapM, msum)
import Data.Maybe (fromMaybe, maybe)
-- for some minimal paralellism.
import Control.Parallel.Strategies(runEval, rpar, rseq)
-- Convert OVals (and Lists of OVals) into a given Haskell type
class OTypeMirror a where
fromOObj :: OVal -> Maybe a
fromOObjList :: OVal -> Maybe [a]
fromOObjList (OList list) = mapM fromOObj list
fromOObjList _ = Nothing
{-# INLINABLE fromOObjList #-}
toOObj :: a -> OVal
instance OTypeMirror OVal where
fromOObj = Just
toOObj a = a
instance OTypeMirror ℝ where
fromOObj (ONum n) = Just n
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj = ONum
instance OTypeMirror ℕ where
fromOObj (ONum n) = if n == fromInteger (floor n) then Just (floor n) else Nothing
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj = ONum . fromℕtoℝ
instance OTypeMirror Bool where
fromOObj (OBool b) = Just b
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj = OBool
-- We don't actually use single chars, this is to compile lists of chars (AKA strings) after passing through OTypeMirror [a]'s fromOObj.
-- This lets us handle strings without overlapping the [a] case.
instance OTypeMirror Char where
fromOObj (OString str) = Just $ head str
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
fromOObjList (OString str) = Just str
fromOObjList _ = Nothing
toOObj a = OString [a]
instance (OTypeMirror a) => OTypeMirror [a] where
fromOObj = fromOObjList
{-# INLINABLE fromOObj #-}
toOObj list = OList $ map toOObj list
instance (OTypeMirror a) => OTypeMirror (Maybe a) where
fromOObj a = Just $ fromOObj a
{-# INLINABLE fromOObj #-}
toOObj (Just a) = toOObj a
toOObj Nothing = OUndefined
instance (OTypeMirror a, OTypeMirror b) => OTypeMirror (a,b) where
fromOObj (OList [fromOObj -> Just a,fromOObj -> Just b]) = Just (a,b)
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj (a,b) = OList [toOObj a, toOObj b]
instance (OTypeMirror a, OTypeMirror b, OTypeMirror c) => OTypeMirror (a,b,c) where
fromOObj (OList [fromOObj -> Just a,fromOObj -> Just b,fromOObj -> Just c]) =
Just (a,b,c)
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj (a,b,c) = OList [toOObj a, toOObj b, toOObj c]
instance (OTypeMirror a, OTypeMirror b) => OTypeMirror (a -> b) where
fromOObj (OFunc f) = Just $ \input ->
let
oInput = toOObj input
oOutput = f oInput
output :: Maybe b
output = fromOObj oOutput
in
fromMaybe (error $ "coercing OVal to a -> b isn't always safe; use a -> Maybe b"
++ " (trace: " ++ show oInput ++ " -> " ++ show oOutput ++ " )") output
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj f = OFunc $ \oObj ->
case fromOObj oObj :: Maybe a of
Nothing -> OError ["bad input type"]
Just obj -> toOObj $ f obj
instance (OTypeMirror a, OTypeMirror b) => OTypeMirror (Either a b) where
fromOObj (fromOObj -> Just (x :: a)) = Just $ Left x
fromOObj (fromOObj -> Just (x :: b)) = Just $ Right x
fromOObj _ = Nothing
{-# INLINABLE fromOObj #-}
toOObj (Right x) = toOObj x
toOObj (Left x) = toOObj x
-- A string representing each type.
oTypeStr :: OVal -> String
oTypeStr OUndefined = "Undefined"
oTypeStr (OBool _ ) = "Bool"
oTypeStr (ONum _ ) = "Number"
oTypeStr (OList _ ) = "List"
oTypeStr (OString _ ) = "String"
oTypeStr (OFunc _ ) = "Function"
oTypeStr (OModule _ _ _ ) = "Module"
oTypeStr (OUModule _ _ _ ) = "User Defined Module"
oTypeStr (OError _ ) = "Error"
oTypeStr (OObj2 _ ) = "2D Object"
oTypeStr (OObj3 _ ) = "3D Object"
getErrors :: OVal -> Maybe String
getErrors (OError er) = Just $ head er
getErrors (OList l) = msum $ map getErrors l
getErrors _ = Nothing
caseOType :: forall c a. a -> (a -> c) -> c
caseOType = flip ($)
infixr 2 <||>
(<||>) :: forall desiredType out. (OTypeMirror desiredType)
=> (desiredType -> out)
-> (OVal -> out)
-> (OVal -> out)
(<||>) f g input =
let
coerceAttempt :: Maybe desiredType
coerceAttempt = fromOObj input
in
maybe (g input) f coerceAttempt
-- separate 2d and 3d objects from a set of OVals.
divideObjs :: [OVal] -> ([SymbolicObj2], [SymbolicObj3], [OVal])
divideObjs children =
runEval $ do
obj2s <- rseq [ x | OObj2 x <- children ]
obj3s <- rseq [ x | OObj3 x <- children ]
objs <- rpar (filter (not . isOObj) children)
return (obj2s, obj3s, objs)
where
isOObj (OObj2 _) = True
isOObj (OObj3 _) = True
isOObj _ = False
|
krakrjak/ImplicitCAD
|
Graphics/Implicit/ExtOpenScad/Util/OVal.hs
|
agpl-3.0
| 5,805
| 1
| 17
| 1,446
| 1,800
| 977
| 823
| 112
| 3
|
ans' v d = sum $ map (\i -> if i > v then v else i) d
ans ([0,0]:_) = []
ans ([n,m]:d:r) =
(ans' (div m n) d):(ans r)
main = do
c <- getContents
let i = map (map read) $ map words $ lines c :: [[Int]]
o = ans i
mapM_ print o
|
a143753/AOJ
|
2944.hs
|
apache-2.0
| 241
| 0
| 14
| 74
| 185
| 94
| 91
| 9
| 2
|
module Cis194.Week2.LogAnalysis where
import Log
parseMessage :: String -> LogMessage
parseMessage msg = case words msg of
("I":ts:m) -> LogMessage Info (read ts) (unwords m)
("W":ts:m) -> LogMessage Warning (read ts) (unwords m)
("E":level:ts:m) -> LogMessage (Error (read level)) (read ts) (unwords m)
_ -> Unknown msg
parse :: String -> [LogMessage]
parse = map parseMessage . lines
insert :: LogMessage -> MessageTree -> MessageTree
insert (Unknown _) tr = tr
insert m Leaf = Node Leaf m Leaf
insert m@(LogMessage _ ts _) (Node l nm@(LogMessage _ tts _) r)
| ts <= tts = Node (insert m l) nm r
| otherwise = Node l nm (insert m r)
build :: [LogMessage] -> MessageTree
build = foldr insert Leaf
inOrder :: MessageTree -> [LogMessage]
inOrder Leaf = []
inOrder (Node l n r) = inOrder l ++ [n] ++ inOrder r
whatWentWrong :: [LogMessage] -> [String]
whatWentWrong = map msg . filter severeError . inOrder . build
where msg (LogMessage _ _ m) = m
msg (Unknown m) = m
severeError (LogMessage (Error lvl) _ _) = lvl >= 50
severeError _ = False
|
gsnewmark/cis194
|
src/Cis194/Week2/LogAnalysis.hs
|
apache-2.0
| 1,156
| 0
| 12
| 301
| 519
| 263
| 256
| 27
| 4
|
{-# LANGUAGE OverloadedStrings #-}
import Data.Textocat
import Network.Textocat
import Control.Monad (when)
import System.Exit (exitFailure)
cfg = mkConfig "API-KEY"
documents = [ mkDocument "Привет, мир!"
, setTag "haskell" $ mkDocument "Язык Haskell признан лучшим языком для выдающихся хакеров на ICFPC 2014"
]
main = do
status <- serviceStatus cfg
when (status /= ServiceOK) exitFailure
putStrLn "Queue documents"
Right bs <- entityQueue cfg documents
putStrLn "Request status"
entityRequest cfg $ getBatchID bs
putStrLn "Wait until finished"
waitForFinished cfg $ getBatchID bs
putStrLn "Retrieve documents"
-- we retrieve both bs1 and bs2 to test whether API works correctly
-- with several documents
Right res <- entityRetrieve cfg $ [getBatchID bs]
let entities = concatMap getEntities $ getDocuments res
mapM_ print entities
putStrLn "Search documents"
Right found <- entitySearch cfg "Haskell"
mapM_ print $ getFoundDocuments found
|
gltronred/textocat-api-haskell
|
examples/Example1.hs
|
apache-2.0
| 1,069
| 1
| 11
| 205
| 248
| 109
| 139
| 24
| 1
|
module Samples where
import qualified Data.ByteString as B
sampleLogLines :: [B.ByteString]
sampleLogLines = ["GET", "GET", "POST", "GET", "PUT", "GET", "GETD"]
sampleStatusLines :: [B.ByteString]
sampleStatusLines = ["200", "200", "404", "500", "900", "201"]
sampleCombinedLines :: [B.ByteString]
sampleCombinedLines = [
"GET 200"
, "PUT 201"
, "GET 404"
, "POST 500"
, "GETD 900"
, "PUT 201"
, "200" ]
|
mazelife/logparser_blog_post
|
src/Samples.hs
|
bsd-2-clause
| 436
| 2
| 6
| 89
| 119
| 77
| 42
| 15
| 1
|
-- 190569291
import Euler(countParts)
nn = 100
-- "100" is not a valid partition for this problem
numParts n = countParts n - 1
main = putStrLn $ show $ numParts nn
|
higgsd/euler
|
hs/76.hs
|
bsd-2-clause
| 168
| 1
| 6
| 35
| 50
| 25
| 25
| 4
| 1
|
module Move (
toLAN
, nullMove
, getFromSquare
, getToSquare
, getPromotionPiece
, getMoveType
, isCapture
, isQuiet
, isTactical
, isCheck
, materialGain
, see
, pass
, makeMove
, play
, legalPositions
, notSuicidal
, legalMoves
, winningCaptures
, noisyMoves) where
import Alias
import qualified AttackTable as T
import Bitwise
import BitBoard
import BasicData
import BasicConstant
import Position
import Data.Bits
import Data.Maybe
import Data.Char
import Data.Composition
import Data.Function
import Data.List
import Control.Monad
import Control.Lens hiding(without)
toLAN :: Move -> String
toLAN x = let from = squareNameFrom . bit $ getFromSquare x
to = squareNameFrom . bit $ getToSquare x
piece = maybeToList
. fmap pieceChar
. mfilter (const (getMoveType x == Promotion))
. Just
$ getPromotionPiece x
in if x == nullMove then "0000" else from ++ to ++ piece
nullMove :: Move
nullMove = 0
normalMove,enPassantMove,castlingMove :: From -> To -> Move
normalMove x y = fromIntegral (countTrailingZeros x) `shiftL` 10
.|. fromIntegral (countTrailingZeros y) `shiftL` 4
enPassantMove x y = normalMove x y
.|. fromIntegral (fromEnum EnPassant)
castlingMove x y = normalMove x y
.|. fromIntegral (fromEnum Castling)
promotionMove :: From -> To -> PieceType -> Move
promotionMove x y z = normalMove x y
.|. fromIntegral (fromEnum z - 1) `shiftL` 2
.|. fromIntegral (fromEnum Promotion)
getFromSquare,getToSquare :: Move -> Square
getFromSquare x = fromIntegral $ (x.&.0xfc00)`shiftR`10
getToSquare x = fromIntegral $ (x.&.0x03f0)`shiftR`4
getPromotionPiece :: Move -> PieceType
getPromotionPiece x = toEnum.fromIntegral $ (x.&.0x000c)`shiftR`2 + 1
getMoveType :: Move -> MoveType
getMoveType x = toEnum.fromIntegral $ x.&.0x0003
isCapture,isQuiet,isCheck :: Position -> Move -> Bool
isCapture x y = (getMoveType y == EnPassant ||) . isOccupied x . bit $ getToSquare y
isQuiet x y = getMoveType y /= Promotion && not (isCapture x y)
isTactical = not .: isQuiet
isCheck x y = if attacker == Pawn
then T.pawnAttack (_activeColor x) (getToSquare y) `joint` x<~!~>King
else T.pieceAttack attacker (occupancy x) (getToSquare y) `joint` x<~!~>King
where
attacker = fromMaybe (error "No piece on the from-square.") $ x`pieceAt`bit (getFromSquare y)
materialGain :: Position -> Move -> Int
materialGain x y = case getMoveType y of
EnPassant -> 0
_ -> toMaterial - fromMaterial
where
fromMaterial = material
. fromMaybe (error "No piece on the from-square.")
. pieceAt x
. bit
$ getFromSquare y
toMaterial = maybe 0 material
. pieceAt x
. bit
$ getToSquare y
pass :: Position -> Position
pass x = x' & checkers .~ checkers'
& pinnedByBishops .~ pinnedByBishops'
& pinnedByRooks .~ pinnedByRooks'
& defendMap .~ defendMap'
where
x' = x & disableEPSquare
& changeColor
checkers' = findCheckers x'
(pinnedByBishops',pinnedByRooks')
= findPinners x'
defendMap' = calcDefendMap x'
makeMove :: Move -> Position -> Position
makeMove x y = y' & checkers .~ checkers'
& pinnedByBishops .~ pinnedByBishops'
& pinnedByRooks .~ pinnedByRooks'
& defendMap .~ defendMap'
where
fromSqr = getFromSquare x
toSqr = getToSquare x
promoteTo = getPromotionPiece x
movePieces = case getMoveType x of
Normal -> move fromSqr toSqr
EnPassant -> enPassant fromSqr toSqr
Castling -> castle fromSqr toSqr
Promotion -> promotion promoteTo fromSqr toSqr
y'= y & disableCastling fromSqr toSqr
& arrangeEPSquare fromSqr toSqr
& arrangeHalfMoveClock fromSqr toSqr
& plyCount%~(+ 1)
& movePieces
& changeColor
& history%~((y^.zobristKey):)
checkers' = findCheckers y'
(pinnedByBishops',pinnedByRooks')
= findPinners y'
defendMap' = calcDefendMap y'
play :: String -> Position -> Maybe Position
play x y = (`makeMove` y) <$> move
where
lowerMove = map toLower x
move = find ((== lowerMove) . toLAN) $ legalMoves y
legalPositions :: Position -> [Position]
legalPositions x = map (`makeMove` x) $ legalMoves x
moves :: Position -> ([Move],[Move],[Move])
moves x = ( filter (notSuicidal x) $ pawnPush ++ doublePush ++ pawnCapture ++ epMove ++ promotion ++ promotionCapture ++ pieceMoves ++ castlingMoves
, sortBy (flip compare `on` see x) . filter noisyEnough . filter (notSuicidal x) $ capture ++ pawnCapture
, filter (notSuicidal x) (promotionCapture ++ promotion) ++ winningCaptures x)
where
noisyEnough move = see x move > 0
ownPawns = x<-!->Pawn
ownKing = x<-!->King
normalPawns = backward2 x . forward2 x $ ownPawns
cherryPawns = normalPawns .&. forward x backrank
promoting = ownPawns .&. backward x backrank
targets = x^.defenders
ocp = occupancy x
kCastle = if isWhite x
then 0x0000000000000006
else 0x0600000000000000
qCastle = if isWhite x
then 0x0000000000000070
else 0x7000000000000000
color = x^.activeColor
cRight = activeCastleRight x
kingside = if notInCheck
&& includeKingsideCastle cRight
&& kCastle `disjoint` ocp
then Just $ castlingMove ownKing (ownKing`shiftR`2)
else Nothing
queenside = if notInCheck
&& includeQueensideCastle cRight
&& qCastle `disjoint` ocp
then Just $ castlingMove ownKing (ownKing`shiftL`2)
else Nothing
notInCheck = not (isInCheck x)
pawnPush = do
from <- collapse
. backward x
. (`without` ocp)
. forward x
$ normalPawns
return $ normalMove from (forward x from)
doublePush = do
from <- collapse
. backward2 x
. (`without` ocp)
. forward x
. (`without` ocp)
. forward x
$ cherryPawns
return $ normalMove from (forward2 x from)
pawnCapture = do
from <- collapse normalPawns
to <- collapse
. (.&. targets)
$ pawnAttack color from
return $ normalMove from to
epMove = if epSquare == 0
then []
else do
from <- collapse
. (.&. ownPawns)
$ pawnAttack (opposite color) epPlace
return $ enPassantMove from epPlace
where
epSquare = x^.enPassantSquare
epPlace = bit epSquare
promotion = do
piece <- [Queen, Rook .. Knight]
from <- collapse
. backward x
. (`without` ocp)
. forward x
$ promoting
return $ promotionMove from (forward x from) piece
promotionCapture = do
piece <- [Queen, Rook .. Knight]
from <- collapse promoting
to <- collapse
. (.&. targets)
$ pawnAttack color from
return $ promotionMove from to piece
pieceMoves = do
piece <- [Knight .. King]
from <- collapse (x<-!->piece)
to <- collapse
$ (`without`x^.attackers)
$ T.pieceAttack piece ocp (countTrailingZeros from)
return $ normalMove from to
capture = do
piece <- [Knight .. King]
from <- collapse (x<-!->piece)
to <- collapse
$ (.&.x^.defenders)
$ T.pieceAttack piece ocp (countTrailingZeros from)
return $ normalMove from to
castlingMoves = catMaybes [kingside,queenside]
legalMoves,winningCaptures,noisyMoves :: Position -> [Move]
legalMoves x = moves x ^. _1
winningCaptures x = moves x ^. _2
noisyMoves x = moves x ^. _3
notSuicidal :: Position -> Move -> Bool
notSuicidal x@Position{_attackers = as,_defenders = ds,_checkers = cs,_defendMap = dMap,_pinnedByBishops = pb,_pinnedByRooks = pr} y
= case popCount cs of
0 -> case getMoveType y of
EnPassant -> pinTest && epPinTest
Castling -> dMap `disjoint` kingPath
_ -> kingFlees || (not kingMove && pinTest)
1 -> kingFlees
|| ( not kingMove
&& to == cs
&& notPinned)
|| ( not kingMove
&& (cs`joint`x<!>Bishop || cs`joint`x<!>Rook || cs`joint`x<!>Queen)
&& notPinned
&& ownKing `disjoint` T.pieceAttack (fromMaybe (error "No piece is attacking the king.") (x`pieceAt`cs)) (ocp.|.to) (countTrailingZeros cs))
|| ( getMoveType y == EnPassant
&& epPawn == cs
&& notPinned)
2 -> kingFlees
where
fromSqr = getFromSquare y
toSqr = getToSquare y
ownKingSqr = countTrailingZeros ownKing
from = bit fromSqr
to = bit toSqr
ocp = occupancy x
kingPath = to .|. bit ((fromSqr + toSqr) `div` 2)
ownKing = x<-!->King
epPawn = backward x to
pinnedMen = pb .|. pr
kingMove = from == ownKing
kingFlees = kingMove && dMap `disjoint` to
pinTest = notPinned || alongBishop || alongRook
notPinned = from`disjoint`pinnedMen
alongBishop = from`joint`pb && to`joint`pinnedRay
where pinnedRay = T.bishopAttack (ocp`xor`from) ownKingSqr
.&. T.bishopAttack ocp fromSqr
alongRook = from`joint`pr && to`joint`pinnedRay
where pinnedRay = T.rookAttack (ocp`xor`from) ownKingSqr
.&. T.rookAttack ocp fromSqr
epPinTest = epPinners `disjoint` attack East epEmptyMap ownKing
&& epPinners `disjoint` attack West epEmptyMap ownKing
epPinners = (x^.rooks.|.x^.queens).&.ds
epEmptyMap = complement ocp `xor` (from.|.epPawn)
see :: Position -> Move -> PieceValue
see x y = maybe 0 nextSEE attacker
where
toSqr = getToSquare y
to = bit toSqr
ocp = occupancy x
pawnCandidate
= ls1b $ T.pawnAttack (x ^. activeColor & opposite) toSqr.&.x<-!->Pawn
knightCandidate
= ls1b $ T.knightAttack toSqr .&. x<-!->Knight
bishopCandidate
= ls1b $ T.bishopAttack ocp toSqr .&. x<-!->Bishop
rookCandidate
= ls1b $ T.rookAttack ocp toSqr .&. x<-!->Rook
queenCandidate
= ls1b $ T.queenAttack ocp toSqr .&. x<-!->Queen
kingCandidate
= ls1b $ T.kingAttack toSqr .&. x<-!->King
attacker | populated pawnCandidate = Just (Pawn,pawnCandidate)
| populated knightCandidate = Just (Knight,knightCandidate)
| populated bishopCandidate = Just (Bishop,bishopCandidate)
| populated rookCandidate = Just (Rook,rookCandidate)
| populated queenCandidate = Just (Queen,queenCandidate)
| populated kingCandidate = Just (King,kingCandidate)
| otherwise = Nothing
target = if getMoveType y == EnPassant then backward x to else to
targetPiece = fromMaybe (error "No piece on the target square.") (x`pieceAt`target)
nextSEE (piece,from) = max 0 (material targetPiece - see nextPos y)
where
fromTo = from.|.to
nextPos = x
& changeColor
. (xor fromTo`overAttacker`piece)
. (xor to`overDefender`targetPiece)
|
syanidar/Sophy
|
src/Foundation/Move.hs
|
bsd-3-clause
| 13,091
| 0
| 23
| 5,065
| 3,557
| 1,866
| 1,691
| -1
| -1
|
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Startups.Base
import Startups.Cards
import Startups.CardList
import Startups.GameTypes
import Startups.Utils
import Backends.Pure
import Control.Lens
import Data.List (foldl')
import Test.Hspec
import qualified Data.Set as S
import qualified Data.Text as T
import qualified Data.Map.Strict as M
import System.Random
import Test.QuickCheck
import Data.Monoid
import Control.Monad
import Data.Maybe (fromJust)
getCard :: T.Text -> Card
getCard n = case filter (\c -> c ^? cName == Just n) allcards of
(c:_) -> c
[] -> error (T.unpack n <> " could not be found")
-- | Some game state that is good enough for testing things
testState :: GameState
testState = GameState (M.fromList players) [] (mkStdGen 5)
where
players = [ ("pim", pim), ("pam", pam), ("poum", poum), ("bob", bob )]
ppim = CompanyProfile Facebook A
ppam = CompanyProfile Apple B
ppoum = CompanyProfile Google A
pbob = CompanyProfile Twitter A
pim = PlayerState ppim Project pimcards 1 ("pam", "bob") []
pam = PlayerState ppam Project pamcards 3 ("poum", "pim") []
poum = PlayerState ppoum Project poumcards 6 ("bob", "pam") []
bob = PlayerState pbob Project bobcards 5 ("pim", "poum") []
pimcards = map (getResourceCard ppim) [Project .. Stage1] <> map getCard [ "Cloud Servers"
, "Marketroid"
, "Company Nerf Battles"
]
pamcards = map (getResourceCard ppam) [Project] <> map getCard [ "High Speed Internet"
, "Free Food"
, "Enterprise Programmer"
, "Rock Star Evangelist"
]
poumcards = map (getResourceCard ppoum) [Project .. Stage1] <> map getCard [ "Garage"
, "Business Angel"
, "Admin Network"
]
bobcards = map (getResourceCard pbob) [Project] <> map getCard [ "Accountant"
, "Operations Guru"
, "Financial Developer"
, "Standing Desks"
]
main :: IO ()
main = hspec $ do
describe "Cards" $ do
it "are all distinct" $ let extra = foldl' findExtra ([], S.empty) allcards
findExtra (curlst, cardset) card | card `S.member` cardset = (card : curlst, cardset)
| otherwise = (curlst, S.insert card cardset)
in fst extra `shouldBe` []
let nbc age nbplayers = it ("are the correct number for " ++ show age ++ " and " ++ show (getPlayerCount nbplayers) ++ " players") (cardsCount age nbplayers `shouldBe` expectedCount age nbplayers)
expectedCount age nbplayers = fromIntegral $ nbplayers * 7 - if age == Age3 then nbplayers + 2 else 0
cardsCount age nbplayers = length (filter (\c -> c ^? cAge == Just age && c ^? cMinplayers <= Just nbplayers) allcards)
mapM_ (uncurry nbc) [ (age, nbp) | age <- [Age1,Age2,Age3], nbp <- [3 .. 7] ]
describe "availableResources" $
forM_ [("pam", ["AVD$$$"]), ("pim", ["YMF$"]), ("poum", ["D$$$$$$"]), ("bob", ["YF$$$$$DM", "YF$$$$$FO", "YF$$$$$DO", "YF$$$$$FM"])] $ \(pid, reslist) ->
let getResCost (Cost rescost _) = rescost
expected = S.fromList (map getResCost reslist)
actual = S.fromList $ availableResources OwnRes (fromJust (testState ^? playermap . ix pid))
in it ("Is correct for " <> T.unpack pid) $ actual `shouldBe` expected
describe "random games" $ do
let gs = do
seed <- arbitrary
nbplayers <- Test.QuickCheck.elements [3 .. 7]
return (seed, nbplayers :: Int)
it "end well" $ forAll gs $ \(seed, nbplayers) -> case pureGame (mkStdGen seed) (map (T.pack . show) [1 .. nbplayers]) of
(_, Right _) -> True
_ -> False
|
bitemyapp/7startups
|
tests/tests.hs
|
bsd-3-clause
| 4,946
| 0
| 23
| 2,184
| 1,263
| 680
| 583
| -1
| -1
|
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
module Duckling.Ordinal.JA.Tests
( tests ) where
import Prelude
import Data.String
import Test.Tasty
import Duckling.Dimensions.Types
import Duckling.Ordinal.JA.Corpus
import Duckling.Testing.Asserts
tests :: TestTree
tests = testGroup "JA Tests"
[ makeCorpusTest [Seal Ordinal] corpus
]
|
facebookincubator/duckling
|
tests/Duckling/Ordinal/JA/Tests.hs
|
bsd-3-clause
| 504
| 0
| 9
| 78
| 79
| 50
| 29
| 11
| 1
|
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
\section[HsBinds]{Abstract syntax: top-level bindings and signatures}
Datatype for: @BindGroup@, @Bind@, @Sig@, @Bind@.
-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE UndecidableInstances #-} -- Note [Pass sensitive types]
-- in module PlaceHolder
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE BangPatterns #-}
module HsBinds where
import {-# SOURCE #-} HsExpr ( pprExpr, LHsExpr,
MatchGroup, pprFunBind,
GRHSs, pprPatBind )
import {-# SOURCE #-} HsPat ( LPat )
import PlaceHolder ( PostTc,PostRn,DataId )
import HsTypes
import PprCore ()
import CoreSyn
import TcEvidence
import Type
import Name
import NameSet
import BasicTypes
import Outputable
import SrcLoc
import Var
import Bag
import FastString
import BooleanFormula (BooleanFormula)
import Data.Data hiding ( Fixity )
import Data.List
import Data.Ord
import Data.Foldable ( Foldable(..) )
#if __GLASGOW_HASKELL__ < 709
import Data.Traversable ( Traversable(..) )
import Data.Monoid ( mappend )
import Control.Applicative hiding (empty)
#else
import Control.Applicative ((<$>))
#endif
{-
************************************************************************
* *
\subsection{Bindings: @BindGroup@}
* *
************************************************************************
Global bindings (where clauses)
-}
-- During renaming, we need bindings where the left-hand sides
-- have been renamed but the the right-hand sides have not.
-- the ...LR datatypes are parametrized by two id types,
-- one for the left and one for the right.
-- Other than during renaming, these will be the same.
type HsLocalBinds id = HsLocalBindsLR id id
-- | Bindings in a 'let' expression
-- or a 'where' clause
data HsLocalBindsLR idL idR
= HsValBinds (HsValBindsLR idL idR)
| HsIPBinds (HsIPBinds idR)
| EmptyLocalBinds
deriving (Typeable)
deriving instance (DataId idL, DataId idR)
=> Data (HsLocalBindsLR idL idR)
type HsValBinds id = HsValBindsLR id id
-- | Value bindings (not implicit parameters)
data HsValBindsLR idL idR
= -- | Before renaming RHS; idR is always RdrName
-- Not dependency analysed
-- Recursive by default
ValBindsIn
(LHsBindsLR idL idR) [LSig idR]
-- | After renaming RHS; idR can be Name or Id
-- Dependency analysed,
-- later bindings in the list may depend on earlier
-- ones.
| ValBindsOut
[(RecFlag, LHsBinds idL)]
[LSig Name]
deriving (Typeable)
deriving instance (DataId idL, DataId idR)
=> Data (HsValBindsLR idL idR)
type LHsBind id = LHsBindLR id id
type LHsBinds id = LHsBindsLR id id
type HsBind id = HsBindLR id id
type LHsBindsLR idL idR = Bag (LHsBindLR idL idR)
type LHsBindLR idL idR = Located (HsBindLR idL idR)
data HsBindLR idL idR
= -- | FunBind is used for both functions @f x = e@
-- and variables @f = \x -> e@
--
-- Reason 1: Special case for type inference: see 'TcBinds.tcMonoBinds'.
--
-- Reason 2: Instance decls can only have FunBinds, which is convenient.
-- If you change this, you'll need to change e.g. rnMethodBinds
--
-- But note that the form @f :: a->a = ...@
-- parses as a pattern binding, just like
-- @(f :: a -> a) = ... @
--
-- 'ApiAnnotation.AnnKeywordId's
--
-- - 'ApiAnnotation.AnnFunId', attached to each element of fun_matches
--
-- - 'ApiAnnotation.AnnEqual','ApiAnnotation.AnnWhere',
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnClose',
FunBind {
fun_id :: Located idL,
fun_infix :: Bool, -- ^ True => infix declaration
fun_matches :: MatchGroup idR (LHsExpr idR), -- ^ The payload
fun_co_fn :: HsWrapper, -- ^ Coercion from the type of the MatchGroup to the type of
-- the Id. Example:
--
-- @
-- f :: Int -> forall a. a -> a
-- f x y = y
-- @
--
-- Then the MatchGroup will have type (Int -> a' -> a')
-- (with a free type variable a'). The coercion will take
-- a CoreExpr of this type and convert it to a CoreExpr of
-- type Int -> forall a'. a' -> a'
-- Notice that the coercion captures the free a'.
bind_fvs :: PostRn idL NameSet, -- ^ After the renamer, this contains
-- the locally-bound
-- free variables of this defn.
-- See Note [Bind free vars]
fun_tick :: [Tickish Id] -- ^ Ticks to put on the rhs, if any
}
-- | The pattern is never a simple variable;
-- That case is done by FunBind
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnBang',
-- 'ApiAnnotation.AnnEqual','ApiAnnotation.AnnWhere',
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnClose',
| PatBind {
pat_lhs :: LPat idL,
pat_rhs :: GRHSs idR (LHsExpr idR),
pat_rhs_ty :: PostTc idR Type, -- ^ Type of the GRHSs
bind_fvs :: PostRn idL NameSet, -- ^ See Note [Bind free vars]
pat_ticks :: ([Tickish Id], [[Tickish Id]])
-- ^ Ticks to put on the rhs, if any, and ticks to put on
-- the bound variables.
}
-- | Dictionary binding and suchlike.
-- All VarBinds are introduced by the type checker
| VarBind {
var_id :: idL,
var_rhs :: LHsExpr idR, -- ^ Located only for consistency
var_inline :: Bool -- ^ True <=> inline this binding regardless
-- (used for implication constraints only)
}
| AbsBinds { -- Binds abstraction; TRANSLATION
abs_tvs :: [TyVar],
abs_ev_vars :: [EvVar], -- ^ Includes equality constraints
-- | AbsBinds only gets used when idL = idR after renaming,
-- but these need to be idL's for the collect... code in HsUtil
-- to have the right type
abs_exports :: [ABExport idL],
abs_ev_binds :: TcEvBinds, -- ^ Evidence bindings
abs_binds :: LHsBinds idL -- ^ Typechecked user bindings
}
| PatSynBind (PatSynBind idL idR)
-- ^ - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnPattern',
-- 'ApiAnnotation.AnnLarrow','ApiAnnotation.AnnWhere'
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnClose'
deriving (Typeable)
deriving instance (DataId idL, DataId idR)
=> Data (HsBindLR idL idR)
-- Consider (AbsBinds tvs ds [(ftvs, poly_f, mono_f) binds]
--
-- Creates bindings for (polymorphic, overloaded) poly_f
-- in terms of monomorphic, non-overloaded mono_f
--
-- Invariants:
-- 1. 'binds' binds mono_f
-- 2. ftvs is a subset of tvs
-- 3. ftvs includes all tyvars free in ds
--
-- See Note [AbsBinds]
data ABExport id
= ABE { abe_poly :: id -- ^ Any INLINE pragmas is attached to this Id
, abe_mono :: id
, abe_wrap :: HsWrapper -- ^ See Note [AbsBinds wrappers]
-- Shape: (forall abs_tvs. abs_ev_vars => abe_mono) ~ abe_poly
, abe_prags :: TcSpecPrags -- ^ SPECIALISE pragmas
} deriving (Data, Typeable)
data PatSynBind idL idR
= PSB { psb_id :: Located idL, -- ^ Name of the pattern synonym
psb_fvs :: PostRn idR NameSet, -- ^ See Note [Bind free vars]
psb_args :: HsPatSynDetails (Located idR), -- ^ Formal parameter names
psb_def :: LPat idR, -- ^ Right-hand side
psb_dir :: HsPatSynDir idR -- ^ Directionality
} deriving (Typeable)
deriving instance (DataId idL, DataId idR )
=> Data (PatSynBind idL idR)
{-
Note [AbsBinds]
~~~~~~~~~~~~~~~
The AbsBinds constructor is used in the output of the type checker, to record
*typechecked* and *generalised* bindings. Consider a module M, with this
top-level binding
M.reverse [] = []
M.reverse (x:xs) = M.reverse xs ++ [x]
In Hindley-Milner, a recursive binding is typechecked with the *recursive* uses
being *monomorphic*. So after typechecking *and* desugaring we will get something
like this
M.reverse :: forall a. [a] -> [a]
= /\a. letrec
reverse :: [a] -> [a] = \xs -> case xs of
[] -> []
(x:xs) -> reverse xs ++ [x]
in reverse
Notice that 'M.reverse' is polymorphic as expected, but there is a local
definition for plain 'reverse' which is *monomorphic*. The type variable
'a' scopes over the entire letrec.
That's after desugaring. What about after type checking but before desugaring?
That's where AbsBinds comes in. It looks like this:
AbsBinds { abs_tvs = [a]
, abs_exports = [ABE { abe_poly = M.reverse :: forall a. [a] -> [a],
, abe_mono = reverse :: a -> a}]
, abs_binds = { reverse :: [a] -> [a]
= \xs -> case xs of
[] -> []
(x:xs) -> reverse xs ++ [x] } }
Here,
* abs_tvs says what type variables are abstracted over the binding group,
just 'a' in this case.
* abs_binds is the *monomorphic* bindings of the group
* abs_exports describes how to get the polymorphic Id 'M.reverse' from the
monomorphic one 'reverse'
Notice that the *original* function (the polymorphic one you thought
you were defining) appears in the abe_poly field of the
abs_exports. The bindings in abs_binds are for fresh, local, Ids with
a *monomorphic* Id.
If there is a group of mutually recursive functions without type
signatures, we get one AbsBinds with the monomorphic versions of the
bindings in abs_binds, and one element of abe_exports for each
variable bound in the mutually recursive group. This is true even for
pattern bindings. Example:
(f,g) = (\x -> x, f)
After type checking we get
AbsBinds { abs_tvs = [a]
, abs_exports = [ ABE { abe_poly = M.f :: forall a. a -> a
, abe_mono = f :: a -> a }
, ABE { abe_poly = M.g :: forall a. a -> a
, abe_mono = g :: a -> a }]
, abs_binds = { (f,g) = (\x -> x, f) }
Note [AbsBinds wrappers]
~~~~~~~~~~~~~~~~~~~~~~~~
Consider
(f,g) = (\x.x, \y.y)
This ultimately desugars to something like this:
tup :: forall a b. (a->a, b->b)
tup = /\a b. (\x:a.x, \y:b.y)
f :: forall a. a -> a
f = /\a. case tup a Any of
(fm::a->a,gm:Any->Any) -> fm
...similarly for g...
The abe_wrap field deals with impedence-matching between
(/\a b. case tup a b of { (f,g) -> f })
and the thing we really want, which may have fewer type
variables. The action happens in TcBinds.mkExport.
Note [Bind free vars]
~~~~~~~~~~~~~~~~~~~~~
The bind_fvs field of FunBind and PatBind records the free variables
of the definition. It is used for two purposes
a) Dependency analysis prior to type checking
(see TcBinds.tc_group)
b) Deciding whether we can do generalisation of the binding
(see TcBinds.decideGeneralisationPlan)
Specifically,
* bind_fvs includes all free vars that are defined in this module
(including top-level things and lexically scoped type variables)
* bind_fvs excludes imported vars; this is just to keep the set smaller
* Before renaming, and after typechecking, the field is unused;
it's just an error thunk
-}
instance (OutputableBndr idL, OutputableBndr idR) => Outputable (HsLocalBindsLR idL idR) where
ppr (HsValBinds bs) = ppr bs
ppr (HsIPBinds bs) = ppr bs
ppr EmptyLocalBinds = empty
instance (OutputableBndr idL, OutputableBndr idR) => Outputable (HsValBindsLR idL idR) where
ppr (ValBindsIn binds sigs)
= pprDeclList (pprLHsBindsForUser binds sigs)
ppr (ValBindsOut sccs sigs)
= getPprStyle $ \ sty ->
if debugStyle sty then -- Print with sccs showing
vcat (map ppr sigs) $$ vcat (map ppr_scc sccs)
else
pprDeclList (pprLHsBindsForUser (unionManyBags (map snd sccs)) sigs)
where
ppr_scc (rec_flag, binds) = pp_rec rec_flag <+> pprLHsBinds binds
pp_rec Recursive = ptext (sLit "rec")
pp_rec NonRecursive = ptext (sLit "nonrec")
pprLHsBinds :: (OutputableBndr idL, OutputableBndr idR) => LHsBindsLR idL idR -> SDoc
pprLHsBinds binds
| isEmptyLHsBinds binds = empty
| otherwise = pprDeclList (map ppr (bagToList binds))
pprLHsBindsForUser :: (OutputableBndr idL, OutputableBndr idR, OutputableBndr id2)
=> LHsBindsLR idL idR -> [LSig id2] -> [SDoc]
-- pprLHsBindsForUser is different to pprLHsBinds because
-- a) No braces: 'let' and 'where' include a list of HsBindGroups
-- and we don't want several groups of bindings each
-- with braces around
-- b) Sort by location before printing
-- c) Include signatures
pprLHsBindsForUser binds sigs
= map snd (sort_by_loc decls)
where
decls :: [(SrcSpan, SDoc)]
decls = [(loc, ppr sig) | L loc sig <- sigs] ++
[(loc, ppr bind) | L loc bind <- bagToList binds]
sort_by_loc decls = sortBy (comparing fst) decls
pprDeclList :: [SDoc] -> SDoc -- Braces with a space
-- Print a bunch of declarations
-- One could choose { d1; d2; ... }, using 'sep'
-- or d1
-- d2
-- ..
-- using vcat
-- At the moment we chose the latter
-- Also we do the 'pprDeeperList' thing.
pprDeclList ds = pprDeeperList vcat ds
------------
emptyLocalBinds :: HsLocalBindsLR a b
emptyLocalBinds = EmptyLocalBinds
isEmptyLocalBinds :: HsLocalBindsLR a b -> Bool
isEmptyLocalBinds (HsValBinds ds) = isEmptyValBinds ds
isEmptyLocalBinds (HsIPBinds ds) = isEmptyIPBinds ds
isEmptyLocalBinds EmptyLocalBinds = True
isEmptyValBinds :: HsValBindsLR a b -> Bool
isEmptyValBinds (ValBindsIn ds sigs) = isEmptyLHsBinds ds && null sigs
isEmptyValBinds (ValBindsOut ds sigs) = null ds && null sigs
emptyValBindsIn, emptyValBindsOut :: HsValBindsLR a b
emptyValBindsIn = ValBindsIn emptyBag []
emptyValBindsOut = ValBindsOut [] []
emptyLHsBinds :: LHsBindsLR idL idR
emptyLHsBinds = emptyBag
isEmptyLHsBinds :: LHsBindsLR idL idR -> Bool
isEmptyLHsBinds = isEmptyBag
------------
plusHsValBinds :: HsValBinds a -> HsValBinds a -> HsValBinds a
plusHsValBinds (ValBindsIn ds1 sigs1) (ValBindsIn ds2 sigs2)
= ValBindsIn (ds1 `unionBags` ds2) (sigs1 ++ sigs2)
plusHsValBinds (ValBindsOut ds1 sigs1) (ValBindsOut ds2 sigs2)
= ValBindsOut (ds1 ++ ds2) (sigs1 ++ sigs2)
plusHsValBinds _ _
= panic "HsBinds.plusHsValBinds"
getTypeSigNames :: HsValBinds a -> NameSet
-- Get the names that have a user type sig
getTypeSigNames (ValBindsOut _ sigs)
= mkNameSet [unLoc n | L _ (TypeSig names _ _) <- sigs, n <- names]
getTypeSigNames _
= panic "HsBinds.getTypeSigNames"
{-
What AbsBinds means
~~~~~~~~~~~~~~~~~~~
AbsBinds tvs
[d1,d2]
[(tvs1, f1p, f1m),
(tvs2, f2p, f2m)]
BIND
means
f1p = /\ tvs -> \ [d1,d2] -> letrec DBINDS and BIND
in fm
gp = ...same again, with gm instead of fm
This is a pretty bad translation, because it duplicates all the bindings.
So the desugarer tries to do a better job:
fp = /\ [a,b] -> \ [d1,d2] -> case tp [a,b] [d1,d2] of
(fm,gm) -> fm
..ditto for gp..
tp = /\ [a,b] -> \ [d1,d2] -> letrec DBINDS and BIND
in (fm,gm)
-}
instance (OutputableBndr idL, OutputableBndr idR) => Outputable (HsBindLR idL idR) where
ppr mbind = ppr_monobind mbind
ppr_monobind :: (OutputableBndr idL, OutputableBndr idR) => HsBindLR idL idR -> SDoc
ppr_monobind (PatBind { pat_lhs = pat, pat_rhs = grhss })
= pprPatBind pat grhss
ppr_monobind (VarBind { var_id = var, var_rhs = rhs })
= sep [pprBndr CaseBind var, nest 2 $ equals <+> pprExpr (unLoc rhs)]
ppr_monobind (FunBind { fun_id = fun, fun_infix = inf,
fun_co_fn = wrap,
fun_matches = matches,
fun_tick = ticks })
= pprTicks empty (if null ticks then empty
else text "-- ticks = " <> ppr ticks)
$$ ifPprDebug (pprBndr LetBind (unLoc fun))
$$ pprFunBind (unLoc fun) inf matches
$$ ifPprDebug (ppr wrap)
ppr_monobind (PatSynBind psb) = ppr psb
ppr_monobind (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dictvars
, abs_exports = exports, abs_binds = val_binds
, abs_ev_binds = ev_binds })
= hang (ptext (sLit "AbsBinds") <+> brackets (interpp'SP tyvars)
<+> brackets (interpp'SP dictvars))
2 $ braces $ vcat
[ ptext (sLit "Exports:") <+> brackets (sep (punctuate comma (map ppr exports)))
, ptext (sLit "Exported types:") <+> vcat [pprBndr LetBind (abe_poly ex) | ex <- exports]
, ptext (sLit "Binds:") <+> pprLHsBinds val_binds
, ifPprDebug (ptext (sLit "Evidence:") <+> ppr ev_binds) ]
instance (OutputableBndr id) => Outputable (ABExport id) where
ppr (ABE { abe_wrap = wrap, abe_poly = gbl, abe_mono = lcl, abe_prags = prags })
= vcat [ ppr gbl <+> ptext (sLit "<=") <+> ppr lcl
, nest 2 (pprTcSpecPrags prags)
, nest 2 (ppr wrap)]
instance (OutputableBndr idL, OutputableBndr idR) => Outputable (PatSynBind idL idR) where
ppr (PSB{ psb_id = L _ psyn, psb_args = details, psb_def = pat, psb_dir = dir })
= ppr_lhs <+> ppr_rhs
where
ppr_lhs = ptext (sLit "pattern") <+> ppr_details
ppr_simple syntax = syntax <+> ppr pat
(is_infix, ppr_details) = case details of
InfixPatSyn v1 v2 -> (True, hsep [ppr v1, pprInfixOcc psyn, ppr v2])
PrefixPatSyn vs -> (False, hsep (pprPrefixOcc psyn : map ppr vs))
ppr_rhs = case dir of
Unidirectional -> ppr_simple (ptext (sLit "<-"))
ImplicitBidirectional -> ppr_simple equals
ExplicitBidirectional mg -> ppr_simple (ptext (sLit "<-")) <+> ptext (sLit "where") $$
(nest 2 $ pprFunBind psyn is_infix mg)
pprTicks :: SDoc -> SDoc -> SDoc
-- Print stuff about ticks only when -dppr-debug is on, to avoid
-- them appearing in error messages (from the desugarer); see Trac # 3263
-- Also print ticks in dumpStyle, so that -ddump-hpc actually does
-- something useful.
pprTicks pp_no_debug pp_when_debug
= getPprStyle (\ sty -> if debugStyle sty || dumpStyle sty
then pp_when_debug
else pp_no_debug)
{-
************************************************************************
* *
Implicit parameter bindings
* *
************************************************************************
-}
data HsIPBinds id
= IPBinds
[LIPBind id]
TcEvBinds -- Only in typechecker output; binds
-- uses of the implicit parameters
deriving (Typeable)
deriving instance (DataId id) => Data (HsIPBinds id)
isEmptyIPBinds :: HsIPBinds id -> Bool
isEmptyIPBinds (IPBinds is ds) = null is && isEmptyTcEvBinds ds
type LIPBind id = Located (IPBind id)
-- ^ May have 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnSemi' when in a
-- list
-- | Implicit parameter bindings.
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnEqual'
{- These bindings start off as (Left "x") in the parser and stay
that way until after type-checking when they are replaced with
(Right d), where "d" is the name of the dictionary holding the
evidence for the implicit parameter. -}
data IPBind id
= IPBind (Either HsIPName id) (LHsExpr id)
deriving (Typeable)
deriving instance (DataId name) => Data (IPBind name)
instance (OutputableBndr id) => Outputable (HsIPBinds id) where
ppr (IPBinds bs ds) = pprDeeperList vcat (map ppr bs)
$$ ifPprDebug (ppr ds)
instance (OutputableBndr id) => Outputable (IPBind id) where
ppr (IPBind lr rhs) = name <+> equals <+> pprExpr (unLoc rhs)
where name = case lr of
Left ip -> pprBndr LetBind ip
Right id -> pprBndr LetBind id
{-
************************************************************************
* *
\subsection{@Sig@: type signatures and value-modifying user pragmas}
* *
************************************************************************
It is convenient to lump ``value-modifying'' user-pragmas (e.g.,
``specialise this function to these four types...'') in with type
signatures. Then all the machinery to move them into place, etc.,
serves for both.
-}
type LSig name = Located (Sig name)
-- | Signatures and pragmas
data Sig name
= -- | An ordinary type signature
--
-- > f :: Num a => a -> a
--
-- After renaming, this list of Names contains the named and unnamed
-- wildcards brought into scope by this signature. For a signature
-- @_ -> _a -> Bool@, the renamer will give the unnamed wildcard @_@
-- a freshly generated name, e.g. @_w@. @_w@ and the named wildcard @_a@
-- are then both replaced with fresh meta vars in the type. Their names
-- are stored in the type signature that brought them into scope, in
-- this third field to be more specific.
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnDcolon',
-- 'ApiAnnotation.AnnComma'
TypeSig [Located name] (LHsType name) (PostRn name [Name])
-- | A pattern synonym type signature
--
-- > pattern Single :: () => (Show a) => a -> [a]
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnPattern',
-- 'ApiAnnotation.AnnDcolon','ApiAnnotation.AnnForall'
-- 'ApiAnnotation.AnnDot','ApiAnnotation.AnnDarrow'
| PatSynSig (Located name)
(HsExplicitFlag, LHsTyVarBndrs name)
(LHsContext name) -- Provided context
(LHsContext name) -- Required context
(LHsType name)
-- | A type signature for a default method inside a class
--
-- > default eq :: (Representable0 a, GEq (Rep0 a)) => a -> a -> Bool
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnDefault',
-- 'ApiAnnotation.AnnDcolon'
| GenericSig [Located name] (LHsType name)
-- | A type signature in generated code, notably the code
-- generated for record selectors. We simply record
-- the desired Id itself, replete with its name, type
-- and IdDetails. Otherwise it's just like a type
-- signature: there should be an accompanying binding
| IdSig Id
-- | An ordinary fixity declaration
--
-- > infixl 8 ***
--
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnInfix',
-- 'ApiAnnotation.AnnVal'
| FixSig (FixitySig name)
-- | An inline pragma
--
-- > {#- INLINE f #-}
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnClose','ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnVal','ApiAnnotation.AnnTilde',
-- 'ApiAnnotation.AnnClose'
| InlineSig (Located name) -- Function name
InlinePragma -- Never defaultInlinePragma
-- | A specialisation pragma
--
-- > {-# SPECIALISE f :: Int -> Int #-}
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnOpen','ApiAnnotation.AnnTilde',
-- 'ApiAnnotation.AnnVal','ApiAnnotation.AnnClose',
-- 'ApiAnnotation.AnnDcolon','ApiAnnotation.AnnClose',
| SpecSig (Located name) -- Specialise a function or datatype ...
[LHsType name] -- ... to these types
InlinePragma -- The pragma on SPECIALISE_INLINE form.
-- If it's just defaultInlinePragma, then we said
-- SPECIALISE, not SPECIALISE_INLINE
-- | A specialisation pragma for instance declarations only
--
-- > {-# SPECIALISE instance Eq [Int] #-}
--
-- (Class tys); should be a specialisation of the
-- current instance declaration
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnInstance','ApiAnnotation.AnnClose'
| SpecInstSig (LHsType name)
-- | A minimal complete definition pragma
--
-- > {-# MINIMAL a | (b, c | (d | e)) #-}
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnOpen',
-- 'ApiAnnotation.AnnVbar','ApiAnnotation.AnnComma',
-- 'ApiAnnotation.AnnClose'
| MinimalSig (BooleanFormula (Located name))
deriving (Typeable)
deriving instance (DataId name) => Data (Sig name)
type LFixitySig name = Located (FixitySig name)
data FixitySig name = FixitySig [Located name] Fixity
deriving (Data, Typeable)
-- | TsSpecPrags conveys pragmas from the type checker to the desugarer
data TcSpecPrags
= IsDefaultMethod -- ^ Super-specialised: a default method should
-- be macro-expanded at every call site
| SpecPrags [LTcSpecPrag]
deriving (Data, Typeable)
type LTcSpecPrag = Located TcSpecPrag
data TcSpecPrag
= SpecPrag
Id
HsWrapper
InlinePragma
-- ^ The Id to be specialised, an wrapper that specialises the
-- polymorphic function, and inlining spec for the specialised function
deriving (Data, Typeable)
noSpecPrags :: TcSpecPrags
noSpecPrags = SpecPrags []
hasSpecPrags :: TcSpecPrags -> Bool
hasSpecPrags (SpecPrags ps) = not (null ps)
hasSpecPrags IsDefaultMethod = False
isDefaultMethod :: TcSpecPrags -> Bool
isDefaultMethod IsDefaultMethod = True
isDefaultMethod (SpecPrags {}) = False
isFixityLSig :: LSig name -> Bool
isFixityLSig (L _ (FixSig {})) = True
isFixityLSig _ = False
isVanillaLSig :: LSig name -> Bool -- User type signatures
-- A badly-named function, but it's part of the GHCi (used
-- by Haddock) so I don't want to change it gratuitously.
isVanillaLSig (L _(TypeSig {})) = True
isVanillaLSig _ = False
isTypeLSig :: LSig name -> Bool -- Type signatures
isTypeLSig (L _(TypeSig {})) = True
isTypeLSig (L _(GenericSig {})) = True
isTypeLSig (L _(IdSig {})) = True
isTypeLSig _ = False
isSpecLSig :: LSig name -> Bool
isSpecLSig (L _(SpecSig {})) = True
isSpecLSig _ = False
isSpecInstLSig :: LSig name -> Bool
isSpecInstLSig (L _ (SpecInstSig {})) = True
isSpecInstLSig _ = False
isPragLSig :: LSig name -> Bool
-- Identifies pragmas
isPragLSig (L _ (SpecSig {})) = True
isPragLSig (L _ (InlineSig {})) = True
isPragLSig _ = False
isInlineLSig :: LSig name -> Bool
-- Identifies inline pragmas
isInlineLSig (L _ (InlineSig {})) = True
isInlineLSig _ = False
isMinimalLSig :: LSig name -> Bool
isMinimalLSig (L _ (MinimalSig {})) = True
isMinimalLSig _ = False
hsSigDoc :: Sig name -> SDoc
hsSigDoc (TypeSig {}) = ptext (sLit "type signature")
hsSigDoc (PatSynSig {}) = ptext (sLit "pattern synonym signature")
hsSigDoc (GenericSig {}) = ptext (sLit "default type signature")
hsSigDoc (IdSig {}) = ptext (sLit "id signature")
hsSigDoc (SpecSig {}) = ptext (sLit "SPECIALISE pragma")
hsSigDoc (InlineSig _ prag) = ppr (inlinePragmaSpec prag) <+> ptext (sLit "pragma")
hsSigDoc (SpecInstSig {}) = ptext (sLit "SPECIALISE instance pragma")
hsSigDoc (FixSig {}) = ptext (sLit "fixity declaration")
hsSigDoc (MinimalSig {}) = ptext (sLit "MINIMAL pragma")
{-
Check if signatures overlap; this is used when checking for duplicate
signatures. Since some of the signatures contain a list of names, testing for
equality is not enough -- we have to check if they overlap.
-}
instance (OutputableBndr name) => Outputable (Sig name) where
ppr sig = ppr_sig sig
ppr_sig :: OutputableBndr name => Sig name -> SDoc
ppr_sig (TypeSig vars ty _wcs) = pprVarSig (map unLoc vars) (ppr ty)
ppr_sig (GenericSig vars ty) = ptext (sLit "default") <+> pprVarSig (map unLoc vars) (ppr ty)
ppr_sig (IdSig id) = pprVarSig [id] (ppr (varType id))
ppr_sig (FixSig fix_sig) = ppr fix_sig
ppr_sig (SpecSig var ty inl)
= pragBrackets (pprSpec (unLoc var) (interpp'SP ty) inl)
ppr_sig (InlineSig var inl) = pragBrackets (ppr inl <+> pprPrefixOcc (unLoc var))
ppr_sig (SpecInstSig ty) = pragBrackets (ptext (sLit "SPECIALIZE instance") <+> ppr ty)
ppr_sig (MinimalSig bf) = pragBrackets (pprMinimalSig bf)
ppr_sig (PatSynSig name (flag, qtvs) (L _ prov) (L _ req) ty)
= pprPatSynSig (unLoc name) False -- TODO: is_bindir
(pprHsForAll flag qtvs (noLoc []))
(pprHsContextMaybe prov) (pprHsContextMaybe req)
(ppr ty)
pprPatSynSig :: (OutputableBndr name)
=> name -> Bool -> SDoc -> Maybe SDoc -> Maybe SDoc -> SDoc -> SDoc
pprPatSynSig ident _is_bidir tvs prov req ty
= ptext (sLit "pattern") <+> pprPrefixOcc ident <+> dcolon <+>
tvs <+> context <+> ty
where
context = case (prov, req) of
(Nothing, Nothing) -> empty
(Nothing, Just req) -> parens empty <+> darrow <+> req <+> darrow
(Just prov, Nothing) -> prov <+> darrow
(Just prov, Just req) -> prov <+> darrow <+> req <+> darrow
instance OutputableBndr name => Outputable (FixitySig name) where
ppr (FixitySig names fixity) = sep [ppr fixity, pprops]
where
pprops = hsep $ punctuate comma (map (pprInfixOcc . unLoc) names)
pragBrackets :: SDoc -> SDoc
pragBrackets doc = ptext (sLit "{-#") <+> doc <+> ptext (sLit "#-}")
pprVarSig :: (OutputableBndr id) => [id] -> SDoc -> SDoc
pprVarSig vars pp_ty = sep [pprvars <+> dcolon, nest 2 pp_ty]
where
pprvars = hsep $ punctuate comma (map pprPrefixOcc vars)
pprSpec :: (OutputableBndr id) => id -> SDoc -> InlinePragma -> SDoc
pprSpec var pp_ty inl = ptext (sLit "SPECIALIZE") <+> pp_inl <+> pprVarSig [var] pp_ty
where
pp_inl | isDefaultInlinePragma inl = empty
| otherwise = ppr inl
pprTcSpecPrags :: TcSpecPrags -> SDoc
pprTcSpecPrags IsDefaultMethod = ptext (sLit "<default method>")
pprTcSpecPrags (SpecPrags ps) = vcat (map (ppr . unLoc) ps)
instance Outputable TcSpecPrag where
ppr (SpecPrag var _ inl) = pprSpec var (ptext (sLit "<type>")) inl
pprMinimalSig :: OutputableBndr name => BooleanFormula (Located name) -> SDoc
pprMinimalSig bf = ptext (sLit "MINIMAL") <+> ppr (fmap unLoc bf)
{-
************************************************************************
* *
\subsection[PatSynBind]{A pattern synonym definition}
* *
************************************************************************
-}
data HsPatSynDetails a
= InfixPatSyn a a
| PrefixPatSyn [a]
deriving (Data, Typeable)
instance Functor HsPatSynDetails where
fmap f (InfixPatSyn left right) = InfixPatSyn (f left) (f right)
fmap f (PrefixPatSyn args) = PrefixPatSyn (fmap f args)
instance Foldable HsPatSynDetails where
foldMap f (InfixPatSyn left right) = f left `mappend` f right
foldMap f (PrefixPatSyn args) = foldMap f args
foldl1 f (InfixPatSyn left right) = left `f` right
foldl1 f (PrefixPatSyn args) = Data.List.foldl1 f args
foldr1 f (InfixPatSyn left right) = left `f` right
foldr1 f (PrefixPatSyn args) = Data.List.foldr1 f args
-- TODO: After a few more versions, we should probably use these.
#if __GLASGOW_HASKELL__ >= 709
length (InfixPatSyn _ _) = 2
length (PrefixPatSyn args) = Data.List.length args
null (InfixPatSyn _ _) = False
null (PrefixPatSyn args) = Data.List.null args
toList (InfixPatSyn left right) = [left, right]
toList (PrefixPatSyn args) = args
#endif
instance Traversable HsPatSynDetails where
traverse f (InfixPatSyn left right) = InfixPatSyn <$> f left <*> f right
traverse f (PrefixPatSyn args) = PrefixPatSyn <$> traverse f args
data HsPatSynDir id
= Unidirectional
| ImplicitBidirectional
| ExplicitBidirectional (MatchGroup id (LHsExpr id))
deriving (Typeable)
deriving instance (DataId id) => Data (HsPatSynDir id)
|
bitemyapp/ghc
|
compiler/hsSyn/HsBinds.hs
|
bsd-3-clause
| 33,758
| 0
| 17
| 9,681
| 6,011
| 3,233
| 2,778
| 376
| 4
|
{-# LANGUAGE FlexibleContexts #-}
module Music.Time.Voice (
-- * Voice type
Voice,
-- * Construction
voice,
notes,
pairs,
durationsAsVoice,
-- * Traversal
-- ** Separating rhythms and values
valuesV,
durationsV,
-- ** Zips
unzipVoice,
zipVoiceScale,
zipVoiceScale3,
zipVoiceScale4,
zipVoiceNoScale,
-- FIXME compose with (lens assoc unassoc) for the 3 and 4 versions
zipVoiceNoScale3,
zipVoiceNoScale4,
zipVoiceScaleWith,
zipVoiceWithNoScale,
zipVoiceWith',
-- * Fusion
fuse,
fuseBy,
-- ** Fuse rests
fuseRests,
coverRests,
-- * Homophonic/Polyphonic texture
sameDurations,
mergeIfSameDuration,
mergeIfSameDurationWith,
homoToPolyphonic,
-- * Points in a voice
onsetsRelative,
offsetsRelative,
midpointsRelative,
erasRelative,
-- * Context
-- TODO clean
withContext,
-- voiceLens,
-- * Unsafe versions
unsafeNotes,
unsafePairs,
-- * Legacy
durationsVoice,
) where
import Control.Applicative
import Control.Lens hiding (Indexable, Level, above,
below, index, inside, parts,
reversed, transform, (<|), (|>))
import Control.Monad
import Control.Monad.Compose
import Control.Monad.Plus
import Data.AffineSpace
import Data.AffineSpace.Point
import Data.Foldable (Foldable)
import Data.Functor.Adjunction (unzipR)
import Data.Functor.Context
import Data.List.NonEmpty (NonEmpty)
import Data.Maybe
import Data.Semigroup
import Data.Sequence (Seq)
import Data.Set (Set)
import Data.String
import Data.Traversable (Traversable)
import Data.Typeable (Typeable)
import Data.VectorSpace
import Data.Aeson (ToJSON (..), FromJSON(..))
import qualified Data.Aeson as JSON
import Music.Dynamics.Literal
import Music.Pitch.Literal
import Music.Time.Internal.Util
import Music.Time.Juxtapose
import Music.Time.Note
import qualified Data.List
import qualified Data.Foldable
import qualified Data.Either
-- |
-- A 'Voice' is a sequential composition of non-overlapping note values.
--
-- Both 'Voice' and 'Note' have duration but no position. The difference
-- is that 'Note' sustains a single value throughout its duration, while
-- a voice may contain multiple values. It is called voice because it is
-- generalizes the notation of a voice in choral or multi-part instrumental music.
--
-- It may be useful to think about 'Voice' and 'Note' as vectors in time space
-- (i.e. 'Duration'), that also happens to carry around other values, such as pitches.
--
newtype Voice a = Voice { getVoice :: [Note a] }
deriving (Eq, Ord, Typeable, Foldable, Traversable, Functor, Semigroup, Monoid)
instance (Show a, Transformable a) => Show (Voice a) where
show x = show (x^.notes) ++ "^.voice"
-- A voice is a list of events with explicit duration. Events can not overlap.
--
-- Voice is a 'Monoid' under sequential composition. 'mempty' is the empty part and 'mappend'
-- appends parts.
--
-- Voice is a 'Monad'. 'return' creates a part containing a single value of duration
-- one, and '>>=' transforms the values of a part, allowing the addition and
-- removal of values under relative duration. Perhaps more intuitively, 'join' scales
-- each inner part to the duration of the outer part, then removes the
-- intermediate structure.
instance Applicative Voice where
pure = return
(<*>) = ap
instance Alternative Voice where
(<|>) = (<>)
empty = mempty
instance Monad Voice where
return = view _Unwrapped . return . return
xs >>= f = view _Unwrapped $ (view _Wrapped . f) `mbind` view _Wrapped xs
instance MonadPlus Voice where
mzero = mempty
mplus = mappend
instance Wrapped (Voice a) where
type Unwrapped (Voice a) = [Note a]
_Wrapped' = iso getVoice Voice
instance Rewrapped (Voice a) (Voice b)
instance Cons (Voice a) (Voice b) (Note a) (Note b) where
_Cons = prism (\(s,v) -> (view voice.return $ s) <> v) $ \v -> case view notes v of
[] -> Left mempty
(x:xs) -> Right (x, view voice xs)
instance Snoc (Voice a) (Voice b) (Note a) (Note b) where
_Snoc = prism (\(v,s) -> v <> (view voice.return $ s)) $ \v -> case unsnoc (view notes v) of
Nothing -> Left mempty
Just (xs, x) -> Right (view voice xs, x)
instance ToJSON a => ToJSON (Voice a) where
-- TODO meta
toJSON x = JSON.object [ ("notes", toJSON ns) ]
where
ns = x^.notes
instance FromJSON a => FromJSON (Voice a) where
-- TODO change to include meta
parseJSON (JSON.Object x) = parseNL =<< (x JSON..: "notes")
where
parseNL (JSON.Array xs) = fmap ((^.voice) . toList) $ traverse parseJSON xs
toList = toListOf traverse
parseJSON _ = empty
instance Transformable (Voice a) where
transform s = over notes (transform s)
instance HasDuration (Voice a) where
_duration = sumOf (notes . each . duration)
instance Reversible a => Reversible (Voice a) where
rev = over notes reverse . fmap rev
instance (Transformable a, Splittable a) => Splittable (Voice a) where
-- TODO meta
split d v = case splitNotes d (v^.notes) of
(as,Nothing,cs) -> (as^.voice, cs^.voice)
(as,Just(b1,b2),cs) -> (as^.voice `snoc` b1, b2 `cons` cs^.voice)
splitNotes :: (Transformable a, Splittable a) => Duration -> [a] -> ([a], Maybe (a, a), [a])
splitNotes d xs = case (durAndNumNotesToFirst, needSplit) of
(Just (_,0),_) -> ([],Nothing,xs)
(Nothing ,False) -> (xs,Nothing,[])
(Just (_,n),False) -> (take n xs,Nothing,drop n xs)
(Nothing ,True) -> (init xs,Just (splitEnd (sum (fmap (^.duration) xs) - d) (last xs)),[])
(Just (d',n),True) -> (
take (n-1) xs
,Just (splitEnd (d'-d) (xs!!pred n)) -- (d'-d) is how much we have to cut
,drop n xs)
where
needSplit = case durAndNumNotesToFirst of
Nothing -> d < sum (fmap (^.duration) xs)
Just (d',_) -> d /= d'
-- Given dur is >= requested dur
-- Nothing means all goes to first
durAndNumNotesToFirst = accumUntil (\(ds,ns) x -> if ds < d then Left(ds+x,ns+1) else Right (ds,ns))
(0,0) (fmap (^.duration) xs)
splitEnd d x = split ((x^.duration) - d) x
-- >>> accumUntil (\s a -> if s < 345 then Left (s + a) else Right s) 0 [1..]
-- Just 351
accumUntil :: (s -> a -> Either s b) -> s -> [a] -> Maybe b
accumUntil f z xs = Data.Maybe.listToMaybe $ fmap fromRight $ dropWhile Data.Either.isLeft $ scanl (f . fromLeft) (Left z) xs
where
fromRight (Right x) = x
fromLeft (Left x) = x
instance IsString a => IsString (Voice a) where
fromString = pure . fromString
instance IsPitch a => IsPitch (Voice a) where
fromPitch = pure . fromPitch
instance IsInterval a => IsInterval (Voice a) where
fromInterval = pure . fromInterval
instance IsDynamics a => IsDynamics (Voice a) where
fromDynamics = pure . fromDynamics
-- Bogus instance, so we can use [c..g] expressions
instance Enum a => Enum (Voice a) where
toEnum = return . toEnum
fromEnum = list 0 (fromEnum . head) . Data.Foldable.toList
instance Num a => Num (Voice a) where
fromInteger = return . fromInteger
abs = fmap abs
signum = fmap signum
(+) = liftA2 (+)
(-) = liftA2 (-)
(*) = liftA2 (*)
-- | Create a 'Voice' from a list of 'Note's.
voice :: Getter [Note a] (Voice a)
voice = from unsafeNotes
{-# INLINE voice #-}
-- | View a 'Voice' as a list of 'Note' values.
notes :: Lens (Voice a) (Voice b) [Note a] [Note b]
notes = unsafeNotes
--
-- @
-- 'view' 'notes' :: 'Voice' a -> ['Note' a]
-- 'set' 'notes' :: ['Note' a] -> 'Voice' a -> 'Voice' a
-- 'over' 'notes' :: (['Note' a] -> ['Note' b]) -> 'Voice' a -> 'Voice' b
-- @
--
-- @
-- 'preview' ('notes' . 'each') :: 'Voice' a -> 'Maybe' ('Note' a)
-- 'preview' ('notes' . 'element' 1) :: 'Voice' a -> 'Maybe' ('Note' a)
-- 'preview' ('notes' . 'elements' odd) :: 'Voice' a -> 'Maybe' ('Note' a)
-- @
--
-- @
-- 'set' ('notes' . 'each') :: 'Note' a -> 'Voice' a -> 'Voice' a
-- 'set' ('notes' . 'element' 1) :: 'Note' a -> 'Voice' a -> 'Voice' a
-- 'set' ('notes' . 'elements' odd) :: 'Note' a -> 'Voice' a -> 'Voice' a
-- @
--
-- @
-- 'over' ('notes' . 'each') :: ('Note' a -> 'Note' b) -> 'Voice' a -> 'Voice' b
-- 'over' ('notes' . 'element' 1) :: ('Note' a -> 'Note' a) -> 'Voice' a -> 'Voice' a
-- 'over' ('notes' . 'elements' odd) :: ('Note' a -> 'Note' a) -> 'Voice' a -> 'Voice' a
-- @
--
-- @
-- 'toListOf' ('notes' . 'each') :: 'Voice' a -> ['Note' a]
-- 'toListOf' ('notes' . 'elements' odd) :: 'Voice' a -> ['Note' a]
-- 'toListOf' ('notes' . 'each' . 'filtered'
-- (\\x -> x^.'duration' \< 2)) :: 'Voice' a -> ['Note' a]
-- @
-- | View a score as a list of duration-value pairs. Analogous to 'triples'.
pairs :: Lens (Voice a) (Voice b) [(Duration, a)] [(Duration, b)]
pairs = unsafePairs
-- | A voice is a list of notes up to meta-data. To preserve meta-data, use the more
-- restricted 'voice' and 'notes'.
unsafeNotes :: Iso (Voice a) (Voice b) [Note a] [Note b]
unsafeNotes = _Wrapped
-- | A score is a list of (duration-value pairs) up to meta-data.
-- To preserve meta-data, use the more restricted 'pairs'.
unsafePairs :: Iso (Voice a) (Voice b) [(Duration, a)] [(Duration, b)]
unsafePairs = iso (map (^.from note) . (^.notes)) ((^.voice) . map (^.note))
durationsAsVoice :: Iso' [Duration] (Voice ())
durationsAsVoice = iso (mconcat . fmap (\d -> stretch d $ pure ())) (^. durationsV)
durationsVoice = durationsAsVoice
{-# DEPRECATED durationsVoice "Use durationsAsVoice" #-}
-- |
-- Unzip the given voice.
--
unzipVoice :: Voice (a, b) -> (Voice a, Voice b)
unzipVoice = unzipR
-- |
-- Join the given voices by multiplying durations and pairing values.
--
zipVoiceScale :: Voice a -> Voice b -> Voice (a, b)
zipVoiceScale = zipVoiceScaleWith (,)
-- |
-- Join the given voices by multiplying durations and pairing values.
--
zipVoiceScale3 :: Voice a -> Voice b -> Voice c -> Voice (a, (b, c))
zipVoiceScale3 a b c = zipVoiceScale a (zipVoiceScale b c)
-- |
-- Join the given voices by multiplying durations and pairing values.
--
zipVoiceScale4 :: Voice a -> Voice b -> Voice c -> Voice d -> Voice (a, (b, (c, d)))
zipVoiceScale4 a b c d = zipVoiceScale a (zipVoiceScale b (zipVoiceScale c d))
-- |
-- Join the given voices by multiplying durations and pairing values.
--
zipVoiceScale5 :: Voice a -> Voice b -> Voice c -> Voice d -> Voice e -> Voice (a, (b, (c, (d, e))))
zipVoiceScale5 a b c d e = zipVoiceScale a (zipVoiceScale b (zipVoiceScale c (zipVoiceScale d e)))
-- |
-- Join the given voices by pairing values and selecting the first duration.
--
zipVoiceNoScale :: Voice a -> Voice b -> Voice (a, b)
zipVoiceNoScale = zipVoiceWithNoScale (,)
-- |
-- Join the given voices by pairing values and selecting the first duration.
--
zipVoiceNoScale3 :: Voice a -> Voice b -> Voice c -> Voice (a, (b, c))
zipVoiceNoScale3 a b c = zipVoiceNoScale a (zipVoiceNoScale b c)
-- |
-- Join the given voices by pairing values and selecting the first duration.
--
zipVoiceNoScale4 :: Voice a -> Voice b -> Voice c -> Voice d -> Voice (a, (b, (c, d)))
zipVoiceNoScale4 a b c d = zipVoiceNoScale a (zipVoiceNoScale b (zipVoiceNoScale c d))
-- |
-- Join the given voices by pairing values and selecting the first duration.
--
zipVoiceNoScale5 :: Voice a -> Voice b -> Voice c -> Voice d -> Voice e -> Voice (a, (b, (c, (d, e))))
zipVoiceNoScale5 a b c d e = zipVoiceNoScale a (zipVoiceNoScale b (zipVoiceNoScale c (zipVoiceNoScale d e)))
-- |
-- Join the given voices by multiplying durations and combining values using the given function.
--
zipVoiceScaleWith :: (a -> b -> c) -> Voice a -> Voice b -> Voice c
zipVoiceScaleWith = zipVoiceWith' (*)
-- |
-- Join the given voices without combining durations.
--
zipVoiceWithNoScale :: (a -> b -> c) -> Voice a -> Voice b -> Voice c
zipVoiceWithNoScale = zipVoiceWith' const
-- |
-- Join the given voices by combining durations and values using the given function.
--
zipVoiceWith' :: (Duration -> Duration -> Duration) -> (a -> b -> c) -> Voice a -> Voice b -> Voice c
zipVoiceWith' f g
((unzip.view pairs) -> (ad, as))
((unzip.view pairs) -> (bd, bs))
= let cd = zipWith f ad bd
cs = zipWith g as bs
in view (from unsafePairs) (zip cd cs)
-- TODO generalize these to use a Monoidal interface, rather than ([a] -> a)
-- The use of head (see below) if of course the First monoid
-- |
-- Merge consecutive equal notes.
--
fuse :: Eq a => Voice a -> Voice a
fuse = fuseBy (==)
-- |
-- Merge consecutive notes deemed equal by the given predicate.
--
fuseBy :: (a -> a -> Bool) -> Voice a -> Voice a
fuseBy p = fuseBy' p head
-- |
-- Merge consecutive equal notes using the given equality predicate and merge function.
--
fuseBy' :: (a -> a -> Bool) -> ([a] -> a) -> Voice a -> Voice a
fuseBy' p g = over unsafePairs $ fmap foldNotes . Data.List.groupBy (inspectingBy snd p)
where
-- Add up durations and use a custom function to combine notes
--
-- Typically, the combination function us just 'head', as we know that group returns
-- non-empty lists of equal elements.
foldNotes (unzip -> (ds, as)) = (sum ds, g as)
-- |
-- Fuse all rests in the given voice. The resulting voice will have no consecutive rests.
--
fuseRests :: Voice (Maybe a) -> Voice (Maybe a)
fuseRests = fuseBy (\x y -> isNothing x && isNothing y)
-- |
-- Remove all rests in the given voice by prolonging the previous note. Returns 'Nothing'
-- if and only if the given voice contains rests only.
--
coverRests :: Voice (Maybe a) -> Maybe (Voice a)
coverRests x = if hasOnlyRests then Nothing else Just (fmap fromJust $ fuseBy merge x)
where
norm = fuseRests x
merge Nothing Nothing = error "Voice normalized, so consecutive rests are impossible"
merge (Just x) Nothing = True
merge Nothing (Just x) = True
merge (Just x) (Just y) = False
hasOnlyRests = all isNothing $ toListOf traverse x -- norm
-- | Decorate all notes in a voice with their context, i.e. previous and following value
-- if present.
withContext :: Voice a -> Voice (Ctxt a)
withContext = over valuesV addCtxt
-- durationsV and valuesV are useful, but slightly odd
-- What happens if the user changes the length of the list?
-- Is there a more safe idiom that can be used instead?
-- TODO more elegant definition?
-- | A lens to the durations in a voice.
durationsV :: Lens' (Voice a) [Duration]
durationsV = lens getDurs (flip setDurs)
where
getDurs :: Voice a -> [Duration]
getDurs = map fst . view pairs
setDurs :: [Duration] -> Voice a -> Voice a
setDurs ds as = zipVoiceWith' (\a b -> a) (\a b -> b) (mconcat $ map durToVoice ds) as
durToVoice d = stretch d $ pure ()
-- | A lens to the values in a voice.
valuesV :: Lens (Voice a) (Voice b) [a] [b]
valuesV = lens getValues (flip setValues)
where
-- getValues :: Voice a -> [a]
getValues = map snd . view pairs
-- setValues :: [a] -> Voice b -> Voice a
setValues as bs = zipVoiceWith' (\a b -> b) (\a b -> a) (listToVoice as) bs
listToVoice = mconcat . map pure
-- Lens "filtered" through a voice
voiceLens :: (s -> a) -> (b -> s -> t) -> Lens (Voice s) (Voice t) (Voice a) (Voice b)
voiceLens getter setter = lens (fmap getter) (flip $ zipVoiceWithNoScale setter)
-- TODO could also use (zipVoiceWith' max) or (zipVoiceWith' min)
-- | Whether two notes have exactly the same duration pattern.
-- Two empty voices are considered to have the same duration pattern.
-- Voices with an non-equal number of notes differ by default.
sameDurations :: Voice a -> Voice b -> Bool
sameDurations a b = view durationsV a == view durationsV b
-- | Pair the values of two voices if and only if they have the same duration
-- pattern (as per 'sameDurations').
mergeIfSameDuration :: Voice a -> Voice b -> Maybe (Voice (a, b))
mergeIfSameDuration = mergeIfSameDurationWith (,)
-- | Combine the values of two voices using the given function if and only if they
-- have the same duration pattern (as per 'sameDurations').
mergeIfSameDurationWith :: (a -> b -> c) -> Voice a -> Voice b -> Maybe (Voice c)
mergeIfSameDurationWith f a b
| sameDurations a b = Just $ zipVoiceWithNoScale f a b
| otherwise = Nothing
-- TODO could also use (zipVoiceWith' max) or (zipVoiceWith' min)
-- -- |
-- -- Split all notes of the latter voice at the onset/offset of the former.
-- --
-- -- >>> ["a",(2,"b")^.note,"c"]^.voice
-- -- [(1,"a")^.note,(2,"b")^.note,(1,"c")^.note]^.voice
-- --
-- splitLatterToAssureSameDuration :: Voice b -> Voice b -> Voice b
-- splitLatterToAssureSameDuration = splitLatterToAssureSameDurationWith dup
-- where
-- dup x = (x,x)
--
-- splitLatterToAssureSameDurationWith :: (b -> (b, b)) -> Voice b -> Voice b -> Voice b
-- splitLatterToAssureSameDurationWith = undefined
-- polyToHomophonic :: [Voice a] -> Maybe (Voice [a])
-- polyToHomophonic = undefined
--
-- polyToHomophonicForce :: [Voice a] -> Voice [a]
-- polyToHomophonicForce = undefined
-- | Split a homophonic texture into a polyphonic one. The returned voice list will
-- have as many elements as the chord with the fewest number of notes.
homoToPolyphonic :: Voice [a] -> [Voice a]
homoToPolyphonic xs = case nvoices xs of
Nothing -> []
Just n -> fmap (\n -> fmap (!! n) xs) [0..n-1]
where
nvoices :: Voice [a] -> Maybe Int
nvoices = maybeMinimum . fmap length . (^.valuesV)
maybeMinimum :: Ord a => [a] -> Maybe a
maybeMinimum xs = if null xs then Nothing else Just (minimum xs)
-- changeCrossing :: Ord a => Voice a -> Voice a -> (Voice a, Voice a)
-- changeCrossing = undefined
--
-- changeCrossingBy :: Ord b => (a -> b) -> Voice a -> Voice a -> (Voice a, Voice a)
-- changeCrossingBy = undefined
--
-- processExactOverlaps :: (a -> a -> (a, a)) -> Voice a -> Voice a -> (Voice a, Voice a)
-- processExactOverlaps = undefined
--
-- processExactOverlaps' :: (a -> b -> Either (a,b) (b,a)) -> Voice a -> Voice b -> (Voice (Either b a), Voice (Either a b))
-- processExactOverlaps' = undefined
-- | Returns the onsets of all notes in a voice given the onset of the first note.
onsetsRelative :: Time -> Voice a -> [Time]
onsetsRelative o v = case offsetsRelative o v of
[] -> []
xs -> o : init xs
-- | Returns the offsets of all notes in a voice given the onset of the first note.
offsetsRelative :: Time -> Voice a -> [Time]
offsetsRelative o = fmap (\t -> o .+^ (t .-. 0)) . toAbsoluteTime . (^. durationsV)
-- | Returns the midpoints of all notes in a voice given the onset of the first note.
midpointsRelative :: Time -> Voice a -> [Time]
midpointsRelative o v = zipWith between (onsetsRelative o v) (offsetsRelative o v)
where
between p q = alerp p q 0.5
-- | Returns the eras of all notes in a voice given the onset of the first note.
erasRelative :: Time -> Voice a -> [Span]
erasRelative o v = zipWith (<->) (onsetsRelative o v) (offsetsRelative o v)
{-
onsetMap :: Score a -> Map Time a
onsetMap = fmap (view onset) . eraMap
offsetMap :: Score a -> Map Time a
offsetMap = fmap (view offset) . eraMap
midpointMap :: Score a -> Map Time a
midpointMap = fmap (view midpoint) . eraMap
eraMap :: Score a -> Map Span a
eraMap = error "No eraMap"
durations :: Voice a -> [Duration]
durations = view durationsV
-}
|
music-suite/music-score
|
src/Music/Time/Voice.hs
|
bsd-3-clause
| 20,066
| 0
| 17
| 4,834
| 5,019
| 2,744
| 2,275
| -1
| -1
|
{-# LANGUAGE ExplicitNamespaces #-}
module Halytics.Monitor
( I.Collect (..)
, I.Default (..)
, I.Initialize (..)
, I.Monitor
, I.Resultable (..)
, type (I.|^)
, I.fromPlaceholder
, I.generate
, I.monitorWith
, I.result
, (L.%<~)) where
import qualified Halytics.Monitor.Tuple as I
import qualified Halytics.Monitor.Lens as L
|
nmattia/halytics
|
src/Halytics/Monitor.hs
|
bsd-3-clause
| 351
| 0
| 5
| 69
| 104
| 70
| 34
| 15
| 0
|
import Music.Prelude
-- |
-- Bela Bartok: Wandering (excerpt)
-- From Mikrokosmos, vol. III
--
-- Inspired by the Abjad transcription
--
music :: Music
music = let
meta = id
. title "Mikrokosmos (excerpt)"
. composer "Bela Bartok"
. timeSignature (2/4)
. timeSignatureDuring ((2/4) >-> (5/4)) (3/4)
left = (level pp {-. legato-})
(scat [a,g,f,e] |> d|*2)
|> {-(level ((mp |> mp `cresc` mf |> mf)|*8) . legato)-}id
(scat [g,f,e,d] |> c |> (d |> e)|/2 |> f |> e |> d|*8)
--
right = up _P4 . delay 2 $
(level pp {-. legato-})
(scat [a,g,f,e] |> d|*2)
|> (level mp {-. legato-})
(scat [g,f,e,d] |> c |> (d |> e)|/2 |> f |> e |> d|*8)
in meta $ compress 8 $ left <> set parts' cellos (down _P8 right)
main = openLilypond music
|
music-suite/music-preludes
|
examples/bartok.hs
|
bsd-3-clause
| 840
| 0
| 20
| 255
| 361
| 193
| 168
| 19
| 1
|
--
-- Copyright (c) 2009-2011, ERICSSON AB
-- All rights reserved.
--
-- Redistribution and use in source and binary forms, with or without
-- modification, are permitted provided that the following conditions are met:
--
-- * Redistributions of source code must retain the above copyright notice,
-- this list of conditions and the following disclaimer.
-- * Redistributions in binary form must reproduce the above copyright
-- notice, this list of conditions and the following disclaimer in the
-- documentation and/or other materials provided with the distribution.
-- * Neither the name of the ERICSSON AB nor the names of its contributors
-- may be used to endorse or promote products derived from this software
-- without specific prior written permission.
--
-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
-- | Source-code annotations
module Feldspar.Core.Frontend.SourceInfo where
import Feldspar.Core.Types
import Feldspar.Core.Constructs.SourceInfo
import Feldspar.Core.Constructs
-- | Annotate an expression with information about its source code
sourceData :: Type a => SourceInfo1 a -> Data a -> Data a
sourceData info = sugarSymF (Decor info Id)
|
emwap/feldspar-language
|
src/Feldspar/Core/Frontend/SourceInfo.hs
|
bsd-3-clause
| 1,949
| 0
| 8
| 342
| 104
| 70
| 34
| 6
| 1
|
module RefacLocUtils(module HsTokens,PosToken, simpPos,
SimpPos,unmodified,modified,simpPos0,ghead,glast,gfromJust,gtail,
tokenCol, tokenRow, tokenPos,tokenCon,tokenLen,lengthOfToks,
mkToken,defaultToken,newLnToken,whiteSpacesToken,whiteSpaceTokens,isWhite,
notWhite,isWhiteSpace,isNewLn,isCommentStart,isComment,
isNestedComment,isMultiLineComment,isOpenBracket,isCloseBracket,
isOpenSquareBracket,isCloseSquareBracket,isOpenBrace,isConid,
isLit,isWhereOrLet,isWhere,isLet,isIn,isCase,isDo,isIf,isForall,
isHiding,isModule,isComma,isEqual,isLambda,isIrrefute,isBar,isMinus,
endsWithNewLn,startsWithNewLn,hasNewLn,startsWithEmptyLn,
lastNonSpaceToken,firstNonSpaceToken,compressPreNewLns,compressEndNewLns,
lengthOfLastLine,
updateToks,
getToks,replaceToks,deleteToks, doRmWhites,doAddWhites,
srcLocs, getStartEndLoc, getStartEndLoc2,
startEndLoc,extendBothSides,extendForwards,extendBackwards,
startEndLocIncFowComment,startEndLocIncFowNewLn,startEndLocIncComments,
prettyprint ,deleteFromToks, prettyprintGuardsAlt,
addFormalParams, adjustOffset, -- try to remove it
StartEndLoc, isArrow,-- swapInToks,
commentToks,tokenise, prettyprintPatList,groupTokensByLine, addLocInfo, getOffset, splitToks, insertComments,
extractComments, insertTerms
) where
import RefacTypeSyn(SimpPos)
import PosSyntax
import UniqueNames
import HsLexerPass1 hiding (notWhite)
import HsTokens
import PrettySymbols(rarrow)
import HsLayoutPre (PosToken)
import PrettyPrint
import HsExpUtil
import PNT
import RefacTypeSyn
import Maybe
import List
import SourceNames
-------------------------
--import DriftStructUtils
import StrategyLib
------------------------
import Control.Monad.State
--In the token stream, locations are unique except the default locs.
{- Some related data types defined by Programatica's Lexer:
type Lexer = String -> LexerOutPut
type LexerOutput = [PosToken]
type PosToken = (Token,(Pos,String))
data Pos = Pos { char, line, column :: !Int } deriving (Show)
-- it seems that the field char is used to handle special characters including the '\t'
data Token
= Varid | Conid | Varsym | Consym
| Reservedid | Reservedop | Specialid
| IntLit | FloatLit | CharLit | StringLit
| Qvarid | Qconid | Qvarsym | Qconsym
| Special | Whitespace
| NestedCommentStart -- will cause a call to an external function
| NestedComment -- from the external function
| Commentstart -- dashes
| Comment -- what follows the dashes
| ErrorToken | GotEOF | TheRest
| ModuleName | ModuleAlias -- recognized in a later pass
-- Inserted during layout processing (see Haskell 98, 9.3):
| Layout -- for implicit braces
| Indent Int -- <n>, to preceed first token on each line
| Open Int -- {n}, after let, where, do or of, if not followed by a "{"
deriving (Show,Eq,Ord)
-}
--A flag used to indicate whether the token stream has been modified or not.
unmodified = False
modified = True
--- some default values----
pos0=Pos 0 0 0
simpPos0 = (0,0)
extractComments :: (SimpPos, SimpPos) -> [PosToken] -> [PosToken]
extractComments ((startPosl, startPosr), endPos) toks
= let (toks1, toks21, toks22) = splitToks ((startPosl, startPosr), endPos) toks
in toks1
------------------------------------------------
ghead info [] = error $ "ghead "++info++" []"
ghead ingp (h:_) = h
glast info [] = error $ "glast " ++ info ++ " []"
glast info h = last h
gtail info [] = error $ "gtail " ++ info ++ " []"
gtail info h = tail h
gfromJust info (Just h) = h
gfromJust info Nothing = error $ "gfromJust " ++ info ++ " Nothing"
--Some functions for fetching a specific field of a token
tokenCol (_,(Pos _ _ c,_)) = c
tokenRow (_,(Pos _ r _,_)) = r
tokenPos (_,(p,_)) = simpPos p
tokenCon (_,(_,s)) = s
tokenLen (_,(_,s)) = length s --check this again! need to handle the tab key.
lengthOfToks::[PosToken]->Int
lengthOfToks=length.(concatMap tokenCon)
--Some functions for checking whether a token is of a specific type of token.
isWhite (t,_) = t==Whitespace || t==Commentstart || t==Comment || t==NestedComment
notWhite = not.isWhite
isWhiteSpace (t,(_,s)) = t==Whitespace && s==" "
isNewLn (t,(_,s)) = t==Whitespace && s=="\n"
isCommentStart (t,(_,s)) = t==Commentstart && s=="--"
isComment (t,(_,s)) = t==Comment || t ==NestedComment
isNestedComment (t,(_,s)) = t==NestedComment
isMultiLineComment (t,(_,s)) = t==NestedComment && (isJust (find (=='\n') s))
isOpenBracket (t,(_,s)) = t==Special && s=="("
isCloseBracket (t,(_,s)) = t==Special && s==")"
isOpenSquareBracket (t,(_,s)) = t==Special && s=="["
isCloseSquareBracket (t,(_,s)) = t==Special && s=="]"
isOpenBrace (t,(_,s)) = t==Special && s=="{"
isCloseBrace (t,(_,s)) = t==Special && s=="}"
isConid (t,(_,_)) = t==Conid
isLit (t,(_,s)) = t==IntLit || t==FloatLit || t==CharLit || t==StringLit
isWhereOrLet t = isWhere t || isLet t
isWhere (t,(_,s)) = t==Reservedid && s=="where"
isLet (t,(_,s)) = t==Reservedid && s=="let"
isImport (t, (_,s))= t == Reservedid && s=="import"
isType (t, (_,s))= t == Reservedid && s=="type"
isData (t, (_,s))= t == Reservedid && s=="data"
isFixty (t, (_,s)) = t==Reservedid && (s=="infix" || s=="infixl" || s=="infixr")
isDefault (t, (_,s)) = t == Reservedid && s=="default"
isClass (t, (_,s)) = t == Reservedid && s=="class"
isInstance (t, (_,s)) = t == Reservedid && s=="instance"
isNewtype (t, (_,s)) = t == Reservedid && s=="newtype"
isIn (t,(_,s)) = t==Reservedid && s=="in"
isCase (t,(_,s)) = t==Reservedid && s=="case"
isDo (t,(_,s)) = t==Reservedid && s=="do"
isIf (t,(_,s)) = t==Reservedid && s=="if"
isForall (t,(_,s)) = t==Reservedid && s=="forall"
isHiding (t,(_,s)) = s=="hiding"
isModule (t,(_,s)) = t==Reservedid && s=="module"
isComma (t,(_,s)) = t==Special && s==","
isEqual (t,(_,s)) = t==Reservedop && s=="="
isLambda (t,(_,s)) = t==Reservedop && s=="\\"
isIrrefute (t,(_,s)) = t==Reservedop && s=="~"
isBar (t,(_,s)) = t==Reservedop && s=="|"
isArrow (t,(_,s)) = t==Reservedop && s=="->"
isMinus (t,(_,s)) = t==Varsym && s=="-"
-----------------------------------------------------------------
--Returns True if the token ends with '\n'
endsWithNewLn::PosToken->Bool
endsWithNewLn (_,(_,s)) =if s==[] then False
else (glast "endsWithNewLn" s=='\n')
--Returns True if the token starts with `\n`.
startsWithNewLn::PosToken->Bool
startsWithNewLn (_,(_,s)) =if s==[] then False
else ((ghead "starsWithNewLn" s)=='\n')
--Returns True if there is a '\n' in the token.
hasNewLn::PosToken->Bool
hasNewLn (_,(_,s))=isJust (find (=='\n') s) {-different from isNewLn -}
--Returns True if a token stream starts with a newline token (apart from the white spaces tokens)
startsWithEmptyLn::[PosToken]->Bool
startsWithEmptyLn toks=isJust (find isNewLn $ takeWhile (\t->isWhiteSpace t || isNewLn t) toks)
-- get the last non-space token in a token stream.
lastNonSpaceToken::[PosToken]->PosToken
lastNonSpaceToken toks=case dropWhile isWhiteSpace (reverse toks) of
[] ->defaultToken
l -> ghead "lastNonSpaceToken" l
--get the first non-space token in a token stream.
firstNonSpaceToken::[PosToken]->PosToken
firstNonSpaceToken toks=case dropWhile isWhiteSpace toks of
[] ->defaultToken
l -> ghead "firstNonSpaceToken" l
-- remove the extra preceding empty lines.
compressPreNewLns::[PosToken]->[PosToken]
compressPreNewLns toks= let (toks1, toks2) = break (not.(\t->isNewLn t || isWhiteSpace t)) toks
groupedToks = groupTokensByLine toks1
in if length groupedToks>1 then (last groupedToks)++toks2
else toks
--remove the following extra empty lines.
compressEndNewLns::[PosToken]->[PosToken]
compressEndNewLns toks=let (toks1, toks2) = break (not.(\t->isNewLn t || isWhiteSpace t)) (reverse toks)
groupedToks = groupTokensByLine toks1
in if length groupedToks>1 then reverse ((ghead "compressEndNewLns" groupedToks)++toks2)
else toks
prettyprintPatList beginWithSpace t
= replaceTabBySpaces $ if beginWithSpace then format1 t else format2 t
where
format1 t = foldl (\x y -> x++ " "++(render.ppi) y) "" t
format2 [] = ""
format2 [p] = (render.ppi) p
format2 (p:ps) = (render.ppi) p ++" " ++ format2 ps
prettyprint = replaceTabBySpaces.render.ppi
prettyprintGuardsAlt = replaceTabBySpaces.render.(ppRhs rarrow)
--Replace Tab by white spaces. (1 Tab=8 white spaces)
replaceTabBySpaces::String->String
replaceTabBySpaces []=[]
replaceTabBySpaces (s:ss)
=if s=='\t' then replicate 8 ' ' ++replaceTabBySpaces ss
else s:replaceTabBySpaces ss
--Compose a new token using the given arguments.
mkToken::Token->SimpPos->String->PosToken
mkToken t (row,col) c=(t,(Pos 0 row col,c))
---Restriction: the refactorer should not modify refactorer-modified/created tokens.
defaultToken = (Whitespace, (pos0," "))
newLnToken = (Whitespace, (pos0,"\n"))
tokenise startPos _ _ [] = []
tokenise startPos colOffset withFirstLineIndent str
= let str' = case lines str of
(ln:[]) -> addIndent ln ++ if glast "tokenise" str=='\n' then "\n" else ""
(ln:lns)-> addIndent ln ++ "\n" ++ concatMap (\n->replicate colOffset ' '++n++"\n") lns
str'' = if glast "tokenise" str' == '\n' && glast "tokenise" str /='\n'
then genericTake (length str' -1) str'
else str'
in expandNewLnTokens $ lexerPass0' startPos str''
where
addIndent ln = if withFirstLineIndent
then replicate colOffset ' '++ ln
else ln
--preprocssing the token stream to expand the white spaces to individual tokens.
expandNewLnTokens::[PosToken]->[PosToken]
expandNewLnTokens ts = concatMap expand ts
where
expand tok@(Whitespace,(pos,s)) = doExpanding pos s
expand x = [x]
doExpanding pos [] =[]
doExpanding pos@(Pos c row col) (t:ts)
= case t of
'\n' -> (Whitespace, (pos,[t])):(doExpanding (Pos c (row+1) 1) ts)
_ -> (Whitespace, (pos,[t])):(doExpanding (Pos c row (col+1)) ts)
--Should add cases for literals.
addLocInfo (decl, toks)
= runStateT (applyTP (full_tdTP (idTP `adhocTP` inPnt
`adhocTP` inSN)) decl) toks
where
inPnt (PNT pname ty (N (Just loc)))
= do loc' <- findLoc (pNtoName pname)
return (PNT pname ty (N (Just loc')))
inPnt x = return x
inSN (SN (PlainModule modName) (SrcLoc _ _ row col))
= do loc' <- findLoc modName
return (SN (PlainModule modName) loc')
inSN x = return x
pNtoName (PN (UnQual i) _)=i
pNtoName (PN (Qual (PlainModule modName) i) _) = modName++"."++i
pNtoName (PN (Qual (MainModule _) i) _) = "Main."++i
findLoc name
= do let name' = if name =="Prelude.[]" || name == "[]" then "["
else if name=="Prelude.(,)" || name == "(,)" || name == "()" then "("
else name ----Check this again.
toks' = dropWhile (\t->tokenCon t /= name') toks
(row, col) =if toks'==[] then error "HaRe: Error in addLocInfo!"
else tokenPos $ ghead "findLoc" toks'
return (SrcLoc "unknown" 0 row col)
groupTokensByLine [] = []
groupTokensByLine xs =let (xs', xs'') = break hasNewLn xs
in if xs''==[] then [xs']
else (xs'++ [ghead "groupTokensByLine" xs''])
: groupTokensByLine (gtail "groupTokensByLine" xs'')
--Give a token stream covering multi-lines, calculate the length of the last line
lengthOfLastLine::[PosToken]->Int
lengthOfLastLine toks
= let (toks1,toks2)=break hasNewLn $ reverse toks
in if toks2==[]
then sum (map tokenLen toks1)
else sum (map tokenLen toks1)+lastLineLenOfToken (ghead "lengthOfLastLine" toks2)
where
--Compute the length of a token, if the token covers multi-line, only count the last line.
--What about tab keys?
lastLineLenOfToken (_,(_,s))=(length.(takeWhile (\x->x/='\n')).reverse) s
--get a token stream specified by the start and end position.
getToks::(SimpPos,SimpPos)->[PosToken]->[PosToken]
getToks (startPos,endPos) toks
=let (_,toks2)=break (\t->tokenPos t==startPos) toks
(toks21, toks22)=break (\t->tokenPos t==endPos) toks2
in (toks21++ [ghead "getToks" toks22]) -- Should add error message for empty list?
-- Split the token stream into three parts: the tokens before the startPos,
-- the tokens between startPos and endPos, and the tokens after endPos.
splitToks::(SimpPos, SimpPos)->[PosToken]->([PosToken],[PosToken],[PosToken])
splitToks (startPos, endPos) toks
= if (startPos, endPos) == (simpPos0, simpPos0)
then error "Invalid token stream position!"
else let startPos'= if startPos==simpPos0 then endPos else startPos
endPos' = if endPos == simpPos0 then startPos else endPos
(toks1, toks2) = break (\t -> tokenPos t == startPos') toks
(toks21, toks22) = break (\t -> tokenPos t== endPos') toks2
-- Should add error message for empty list?
in if toks22==[] then error "Sorry, HaRe failed to finish this refactoring." -- (">" ++ (show (startPos, endPos) ++ show toks))
else (toks1, toks21++[ghead "splitToks" toks22], gtail "splitToks" toks22)
getOffset toks pos
= let (ts1, ts2) = break (\t->tokenPos t == pos) toks
in if ts2==[]
then error "HaRe error: position does not exist in the token stream!"
else lengthOfLastLine ts1
--comment a token stream specified by the start and end position.
commentToks::(SimpPos,SimpPos)->[PosToken]->[PosToken]
commentToks (startPos,endPos) toks
= let (toks1, toks21, toks22) = splitToks (startPos, endPos) toks
toks21' = case toks21 of
[] -> toks21
(t,(l,s)):[] -> (t, (l, ("{-" ++ s ++ "-}"))):[]
(t1,(l1,s1)):ts -> let lastTok@(t2, (l2, s2)) = glast "commentToks" ts
lastTok' = (t2, (l2, (s2++" -}")))
in (t1,(l1, ("{- "++s1))): (reverse (lastTok': gtail "commentToks" (reverse ts)))
in (toks1 ++ toks21' ++ toks22)
insertTerms :: (SimpPos, SimpPos) -> [PosToken] -> String -> [PosToken]
insertTerms ((startPosl, startPosr), endPos) toks com
= let (toks1, toks21, toks22) = splitToks ((startPosl, startPosr), endPos) toks
toks21' = (Commentstart, ((Pos 0 startPosl startPosr) , "")) : [(Comment, ((Pos 0 startPosl startPosr), ("\n" ++ com ++ "\n")))]
in (toks1 ++ toks21' ++ (toks21 ++ toks22))
insertComments :: (SimpPos, SimpPos) -> [PosToken] -> String -> [PosToken]
insertComments ((startPosl, startPosr), endPos) toks com
= let (toks1, toks21, toks22) = splitToks ((startPosl, startPosr), endPos) toks
toks21' = (Commentstart, ((Pos 0 startPosl startPosr) , "")) : [(Comment, ((Pos 0 startPosl startPosr), ("\n{- " ++ com ++ " -}\n")))]
in (toks1 ++ toks21' ++ (toks21 ++ toks22))
--- -} -}
updateToks oldAST newAST printFun
= do ((toks,_), (v1, v2)) <-get
let (startPos, endPos) = getStartEndLoc toks oldAST
(toks1, _, _) = splitToks (startPos, endPos) toks
offset = lengthOfLastLine toks1
newToks = tokenise (Pos 0 v1 1) offset False $ printFun newAST --check the startPos
toks' = replaceToks toks startPos endPos newToks
if newToks ==[] then put ((toks', modified), (v1,v2))
else put ((toks',modified), (tokenRow (glast "updateToks1" newToks) -10, v2))
-- error $ show (newToks, startPos, endPos)
-- put ((toks', modified), (v1,v2))
addLocInfo (newAST, newToks)
---REFACTORING: GENERALISE THIS FUNCTION.
addFormalParams t newParams
= do ((toks,_),(v1, v2))<-get
let (startPos,endPos) = getStartEndLoc toks t
tToks = getToks (startPos, endPos) toks
(toks1, _) = let (toks1', toks2') = break (\t-> tokenPos t == endPos) toks
in (toks1' ++ [ghead "addFormalParams" toks2'], gtail "addFormalParams" toks2')
offset = lengthOfLastLine toks1
newToks = tokenise (Pos 0 v1 1) offset False (prettyprintPatList True newParams )
toks' = replaceToks toks startPos endPos (tToks++newToks)
put ((toks',modified), ((tokenRow (glast "addFormalParams" newToks) -10), v2))
addLocInfo (newParams, newToks)
--Replace a list of tokens in the token stream by a new list of tokens, adjust the layout as well.
--To use this function make sure the start and end positions really exist in the token stream.
--QN: what happens if the start or end position does not exist?
replaceToks::[PosToken]->SimpPos->SimpPos->[PosToken]->[PosToken]
replaceToks toks startPos endPos newToks
= if toks22 == []
then toks1 ++ newToks
else let pos = tokenPos (ghead "replaceToks" toks22)
oldOffset = getOffset toks pos
newOffset = getOffset (toks1++newToks++ toks22) pos
in toks1++ newToks++ adjustLayout toks22 oldOffset newOffset
where
(toks1, toks21, toks22) = splitToks (startPos, endPos) toks
{- Delete an syntax phrase from the token stream, this function (instead of the following one)
should be the interface function for deleting tokens.
-}
-- deleteFromToks::( (MonadState (([PosToken], Bool), t1) m), StartEndLoc t,Printable t,Term t)=>t->m ()
deleteFromToks t getLocFun
=do ((toks,_),others)<-get
let (startPos,endPos)=getLocFun toks t
toks'=deleteToks toks startPos endPos
put ((toks',modified),others)
{-Delete a sequence of tokens specified by the start position and end position from the token stream,
then adjust the remaining token stream to preserve layout-}
deleteToks::[PosToken]->SimpPos->SimpPos->[PosToken]
deleteToks toks startPos@(startRow, startCol) endPos@(endRow, endCol)
= case after of
(_:_) -> let nextPos =tokenPos $ ghead "deleteToks1" after
oldOffset = getOffset toks nextPos
newOffset = getOffset (toks1++before++after) nextPos
in toks1++before++adjustLayout (after++toks22) oldOffset newOffset
_ -> if toks22 == []
then toks1++before
else let toks22'=let nextOffset = getOffset toks (tokenPos (ghead "deleteToks2" toks22))
in if isMultiLineComment (lastNonSpaceToken toks21)
then whiteSpaceTokens (-1111, 0) (nextOffset-1) ++ toks22
else toks22
in if endsWithNewLn (last (toks1++before)) || startsWithNewLn (ghead "deleteToks3" toks22')
then toks1++before++toks22'
--avoiding layout adjustment by adding a `\n', sometimes may produce extra lines.
else toks1++before++[newLnToken]++toks22'
-- else toks1 ++ before ++ toks22'
where
(toks1, toks2) = let (ts1, ts2) = break (\t->tokenPos t == startPos) toks
(ts11, ts12) = break hasNewLn (reverse ts1)
in (reverse ts12, reverse ts11 ++ ts2)
(toks21, toks22)=let (ts1, ts2) = break (\t -> tokenPos t == endPos) toks2
(ts11, ts12) = break hasNewLn ts2
in (ts1++ts11++if ts12==[] then [] else [ghead "deleteToks4" ts12], if ts12==[] then [] else gtail "deleteToks5" ts12)
-- tokens before the tokens to be deleted at the same line
before = takeWhile (\t->tokenPos t/=startPos) toks21
-- tokens after the tokens to be deleted at the same line.
after = let t= dropWhile (\t->tokenPos t /=endPos) toks21
in if t == [] then error "Sorry, HaRe failed to finish this refactoring."
else gtail "deleteToks6" t
-- Adjust the layout to compensate the change in the token stream.
adjustLayout::[PosToken]->Int->Int->[PosToken]
adjustLayout [] _ _ = []
adjustLayout toks oldOffset newOffset
| oldOffset == newOffset = toks
adjustLayout toks oldOffset newOffset
= case layoutRuleApplies of
True -> let (ts:ts') = groupTokensByLine toks
in ts ++ addRmSpaces (newOffset-oldOffset) oldOffset ts' -- THIS IS PROBLEMETIC.
_ -> toks
where
layoutRuleApplies
= let ts = dropWhile (\t-> (not.elem (tokenCon t)) keyWords)
$ filter notWhite
$ takeWhile (not.hasNewLn) toks
in case ts of
(_: t: _) -> tokenCon t /= "{"
_ -> False
keyWords = ["where","let","do","of"]
addRmSpaces n col [] = []
addRmSpaces n col toks@(ts:ts')
=case find notWhite ts of
Just t -> if length (concatMap tokenCon ts1) >= col
then (addRmSpaces' n ts) ++ addRmSpaces n col ts'
else concat toks
_ -> ts ++ addRmSpaces n col ts'
where
(ts1, ts2) = break notWhite ts
addRmSpaces' 0 ts = ts
addRmSpaces' _ [] = []
addRmSpaces' n ts@(t:ts')
= case n >0 of
True -> whiteSpaceTokens (tokenRow t,0) n ++ ts -- CHECK THIS.
_ -> if isWhiteSpace t
then addRmSpaces' (n+1) ts'
else error $ "Layout adjusting failed at line:"
++ show (tokenRow t)++ "."
-- remove at most n white space tokens from the beginning of ts
doRmWhites::Int->[PosToken]->[PosToken]
doRmWhites 0 ts=ts
doRmWhites n []=[]
doRmWhites n toks@(t:ts)=if isWhiteSpace t then doRmWhites (n-1) ts
else toks
--add n white space tokens to the beginning of ts
doAddWhites::Int->[PosToken]->[PosToken]
doAddWhites n []=[]
doAddWhites n ts@(t:_)= whiteSpacesToken (tokenRow t,0) n ++ts
whiteSpaceTokens (row, col) n
= if n<=0
then []
else (mkToken Whitespace (row,col) " "):whiteSpaceTokens (row,col+1) (n-1)
-------------------------------------------------------------------------------------------------
--get all the source locations (use locations) in an AST phrase t in according the the occurrence order of identifiers.
srcLocs::(Term t)=> t->[SimpPos]
srcLocs t =(nub.srcLocs') t \\ [simpPos0]
where srcLocs'=runIdentity.(applyTU (full_tdTU (constTU []
`adhocTU` pnt
`adhocTU` sn
`adhocTU` literalInExp
`adhocTU` literalInPat)))
pnt (PNT pname _ (N (Just (SrcLoc _ _ row col))))=return [(row,col)]
pnt _=return []
sn (SN (PlainModule modName) (SrcLoc _ _ row col))
= return [(row, col)]
sn _ = return []
literalInExp ((Exp (HsLit (SrcLoc _ _ row col) _))::HsExpP) = return [(row,col)]
literalInExp (Exp _) =return []
literalInPat ((Pat (HsPLit (SrcLoc _ _ row col) _))::HsPatP) = return [(row,col)]
literalInPat (Pat (HsPNeg (SrcLoc _ _ row col) _)) = return [(row,col)]
literalInPat _ =return []
class StartEndLocPat t where
startEndLoc2 :: [PosToken]->t->[(SimpPos,SimpPos)]
-- startEndLoc3 :: [PosToken]->t->[(SimpPos,SimpPos)]
instance StartEndLocPat [HsDeclP] where
startEndLoc2 toks ds=if ds==[] then [(simpPos0,simpPos0)]
else if length ds==1
then [startEndLoc toks (ghead "StartEndLoc:[HsDeclP]" ds)]
else concat (map (startEndLoc2 toks) ds)
instance StartEndLocPat HsMatchP where
startEndLoc2 toks (HsMatch loc i ps rhs ds)
=let (startLoc,_)=startEndLoc toks i
(_,endLoc) =if ds==[] then startEndLoc toks rhs
else startEndLoc toks (glast "StartEndLoc:HsMatchP" ds)
in [(startLoc,endLoc)]
instance StartEndLocPat HsDeclP where
startEndLoc2 toks (Dec (HsTypeDecl (SrcLoc _ _ r c) tp t))
= let (startLoc, _) = startEndLoc toks tp
(_ , endLoc) = startEndLoc toks t
in [extendForwards toks startLoc endLoc isType]
startEndLoc2 toks (Dec (HsDataDecl loc c tp decls is))
= let (startLoc, _) = startEndLoc toks tp
(_, endLoc) = if is == [] then startEndLoc toks (glast "StartEndLoc:HsDeclP1" decls)
else startEndLoc toks is
in [extendForwards toks startLoc endLoc isData]
startEndLoc2 toks (Dec (HsNewTypeDecl loc c tp decls is))
= let (startLoc, _) = startEndLoc toks tp
(_, endLoc) = if is == [] then startEndLoc toks decls
else startEndLoc toks is
in [extendForwards toks startLoc endLoc isNewtype]
startEndLoc2 toks (Dec (HsDefaultDecl _ ts))
= let (startLoc, _) = startEndLoc toks (head ts)
(_ , endLoc) = startEndLoc toks (last ts)
in [extendForwards toks startLoc endLoc isDefault]
startEndLoc2 toks (Dec (HsInfixDecl _ _ is))
= let (startLoc, _) = startEndLoc toks (head is)
(_, endLoc) = startEndLoc toks (last is)
in [extendForwards toks startLoc endLoc isFixty]
startEndLoc2 toks d@(Dec (HsFunBind _ ms))
= map (startEndLoc toks) ms
startEndLoc2 toks (Dec (HsPatBind _ p rhs ds))
= let (startLoc, _) = startEndLoc toks p
(_, endLoc) = if ds ==[] then startEndLoc toks rhs
else startEndLoc toks (glast "startEndLoc:HsDeclP5" ds)
toks1 = dropWhile (\t->tokenPos t /= endLoc) toks
endLoc1 = if toks1==[]
then endLoc
else let toks2 = takeWhile (\t -> isSpecialTok t) toks1
in (tokenPos.glast "startEndLoc::HsMatchP") toks2
in [(startLoc, endLoc1)]
where
isSpecialTok t = isWhiteSpace t || isCloseBracket t || isOpenBracket t || isOpenSquareBracket t
|| isCloseSquareBracket t
startEndLoc2 toks (Dec (HsTypeSig _ is c t))
= let (startLoc, _) = startEndLoc toks (ghead "startEndLoc:HsDeclP6" is)
(_, endLoc) = startEndLoc toks t
in [(startLoc, endLoc)]
startEndLoc2 toks decl@(Dec (HsClassDecl loc c tp funDeps ds))
= let locs = srcLocs decl
(startLoc, endLoc)
= if locs == [] then (simpPos0, simpPos0)
else (head locs, last locs)
in [extendForwards toks startLoc endLoc isClass]
startEndLoc2 toks decl@(Dec (HsInstDecl loc i c t ds))
= let locs = srcLocs decl
(startLoc, endLoc)
= if locs == [] then (simpPos0, simpPos0)
else (head locs, last locs)
in [extendForwards toks startLoc endLoc isInstance]
getStartEndLoc2::(Term t, StartEndLocPat t,Printable t)=>[PosToken]->t->[(SimpPos,SimpPos)]
getStartEndLoc2 toks t
= startEndLoc2 toks t
{- locs = srcLocs t
(startPos,endPos) = (if startPos' == simpPos0 && locs /=[] then ghead "getStartEndLoc2" locs
else startPos',
if endPos' == simpPos0 && locs /= [] then glast "gerStartEndLoc2" locs
else endPos')
in (startPos, endPos) -}
--given an AST phrase, 'startEndLoc' gets its start and end position in the program source.
class StartEndLoc t where
startEndLoc :: [PosToken]->t->(SimpPos,SimpPos)
instance StartEndLoc HsModuleP where
startEndLoc toks _ = (tokenPos (ghead "startEndLoc:HsModuleP" toks),
tokenPos (glast "startEndLoc:HsModuleP" toks))
instance StartEndLoc HsExpP where
startEndLoc toks (Exp e)=
case e of
HsId ident@(HsVar (PNT pn _ _)) ->let (startLoc, endLoc) = startEndLoc toks ident
{- To handle infix operator. for infix operators like (++), there
is no parenthesis in the syntax tree -}
(toks1,toks2) = break (\t->tokenPos t==startLoc) toks
toks1' = dropWhile isWhite (reverse toks1)
toks2' = dropWhile isWhite (gtail "startEndLoc:HsExpP"
(dropWhile (\t->tokenPos t /=endLoc) toks2))
in if toks1'/=[] && toks2'/=[] && isOpenBracket (head toks1')
&& isCloseBracket (head toks2')
then (tokenPos (head toks1'), tokenPos (head toks2'))
else (startLoc, endLoc)
HsId x ->startEndLoc toks x
HsLit (SrcLoc _ _ r c) _ -> ((r,c),(r,c))
HsInfixApp e1 op e2 ->let (startLoc,_)=startEndLoc toks e1
(_, endLoc) =startEndLoc toks e2
in (startLoc,endLoc)
e@(HsApp e1 e2) ->let (startLoc,endLoc)=startEndLoc toks e1
(startLoc1, endLoc1 )=startEndLoc toks e2
in (startLoc, endLoc1)
HsNegApp (SrcLoc _ _ r c) e ->let (_,endLoc)=startEndLoc toks e
in ((r,c), endLoc)
HsLambda ps e ->let (startLoc,_)=startEndLoc toks (ghead "startEndLoc:HsLambda" ps) --ps can not be empty
(_,endLoc) =startEndLoc toks e
in extendForwards toks startLoc endLoc isLambda
HsIf e1 e2 e3 ->let (startLoc, _)=startEndLoc toks e1
(_, endLoc)=startEndLoc toks e3
in extendForwards toks startLoc endLoc isIf
HsLet ds e ->if ds==[]
then
let (startLoc,endLoc)=startEndLoc toks e
in extendForwards toks startLoc endLoc isLet
else
let (startLoc,_)=startEndLoc toks (ghead "startEndLoc:HsLet" ds)
(_,endLoc) =startEndLoc toks e
in extendForwards toks startLoc endLoc isLet
HsCase e alts ->let (startLoc,_)=startEndLoc toks e
(_,endLoc) =startEndLoc toks (glast "HsCase" alts) --alts can not be empty.
in extendForwards toks startLoc endLoc isCase
HsDo stmts ->let (startLoc, endLoc)=startEndLoc toks stmts
in extendForwards toks startLoc endLoc isDo
HsTuple es ->if es==[]
then (simpPos0,simpPos0) --Empty tuple can cause problem.
else let (startLoc,_)=startEndLoc toks (ghead "startEndLoc:HsTuple" es)
(_,endLoc) =startEndLoc toks (glast "startEndLoc:HsTuple" es)
in extendBothSides toks startLoc endLoc isOpenBracket isCloseBracket
HsList es ->if es==[]
then (simpPos0,simpPos0) --Empty list can cause problem.
else let (startLoc,_)=startEndLoc toks (ghead "startEndLoc:HsList" es)
(_,endLoc) =startEndLoc toks (glast "startEndLoc:HsList" es)
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsParen e ->let (startLoc,(endLocR, endLocC))=startEndLoc toks e
in extendBothSides toks startLoc (endLocR, endLocC) isOpenBracket isCloseBracket
-- in if expIsPNT e
-- then (startLoc, (endLocR, endLocC+1))
-- else extendBothSides toks startLoc (endLocR, endLocC) isOpenBracket isCloseBracket
-- where
-- expIsPNT (Exp (HsId (HsVar pnt)))=True
-- expIsPNT (Exp (HsParen e))=expIsPNT e
-- expIsPNT _ =False
HsLeftSection e op ->let (startLoc,_)=startEndLoc toks e
(_, endLoc )=startEndLoc toks op
in (startLoc,endLoc)
HsRightSection op e ->let (startLoc,_)=startEndLoc toks op
(_, endLoc )=startEndLoc toks op
in (startLoc,endLoc)
HsRecConstr loc i upds ->let (startLoc,_)=startEndLoc toks i
(_,endLoc) =startEndLoc toks (glast "startEndLoc:HsRecConstr" upds) --can 'upds' be empty?
in extendBackwards toks startLoc endLoc isCloseBrace
HsRecUpdate loc e upds ->let (startLoc,_)=startEndLoc toks e
(_,endLoc) =startEndLoc toks (glast "startEndLoc:HsRecUpdate" upds) --ditto
in extendBackwards toks startLoc endLoc isCloseBrace
HsEnumFrom e ->let (startLoc,endLoc)=startEndLoc toks e
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsEnumFromTo e1 e2 ->let (startLoc,_)=startEndLoc toks e1
(_, endLoc)=startEndLoc toks e2
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsEnumFromThen e1 e2 ->let (startLoc,_)=startEndLoc toks e1
(_, endLoc)=startEndLoc toks e2
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsEnumFromThenTo e1 e2 e3 ->let (startLoc,_)=startEndLoc toks e1
(_, endLoc)=startEndLoc toks e3
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsListComp stmts ->let (startLoc,endLoc)=startEndLoc toks stmts
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsAsPat i e ->let (startLoc,_)=startEndLoc toks i
(_,endLoc)= startEndLoc toks e
in (startLoc,endLoc)
HsIrrPat e ->let (startLoc,endLoc)=startEndLoc toks e
in extendForwards toks startLoc endLoc isIrrefute
HsWildCard ->(simpPos0,simpPos0) -- wildcard can cause problem.
HsExpTypeSig loc e c t ->let (startLoc,_)=startEndLoc toks e
(_, endLoc )=startEndLoc toks t
in (startLoc,endLoc)
instance StartEndLoc HsTypeP where
startEndLoc toks (Typ p)=
case p of
HsTyFun t1 t2 -> let (startLoc,e)=startEndLoc toks t1
(_ , endLoc)=startEndLoc toks t2
in (startLoc,endLoc)
--HsTyTuple [t] ->
HsTyApp t1 t2 -> let (startLoc,endLoc)=startEndLoc toks t1
(startLoc1 , endLoc1)=startEndLoc toks t2
in case t1 of
(Typ (HsTyCon t)) -> if (render.ppi) t == "[]"
then extendBothSides toks startLoc1 endLoc1 isOpenSquareBracket isCloseSquareBracket
else (startLoc, endLoc1)
_ -> (startLoc, endLoc1)
HsTyVar i -> let (startLoc, endLoc) = startEndLoc toks i
in extendBothSides' toks startLoc endLoc isOpenBracket isCloseBracket
HsTyCon i -> let (startLoc, endLoc) = startEndLoc toks i
in if (render.ppi) i =="[]"
then extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
else extendBothSides' toks startLoc endLoc isOpenBracket isCloseBracket
HsTyForall is ts t -> case is of
[] ->let (startLoc,endLoc)=startEndLoc toks t
in extendForwards toks startLoc endLoc isForall
l -> let (startLoc, _) =startEndLoc toks $ ghead "StartEndLoc:HsTypeP" is
( _ , endLoc) =startEndLoc toks t
in extendForwards toks startLoc endLoc isForall
extendBothSides' toks startLoc endLoc forwardCondFun backwardCondFun
=let (toks1,toks2)=break (\t->tokenPos t==startLoc) toks
toks21=dropWhile (\t->tokenPos t<=endLoc) toks2
firstLoc=case (dropWhile isWhite (reverse toks1)) of
[] -> startLoc -- is this the correct default?
ls -> if (forwardCondFun.ghead "extendBothSides:lastTok") ls then tokenPos (head ls)
else startLoc
lastLoc =case (dropWhile isWhite toks21) of
[] ->endLoc --is this a correct default?
ls -> if (backwardCondFun.ghead "extendBothSides:lastTok") ls then tokenPos (head ls)
else endLoc
in (firstLoc, lastLoc)
instance StartEndLoc HsPatP where
startEndLoc toks (Pat p)=
case p of
HsPId i ->startEndLoc toks i
HsPLit (SrcLoc _ _ r c) _ ->((r,c),(r,c))
HsPNeg (SrcLoc _ _ r c) p ->((r,c),(r,c))
HsPInfixApp p1 op p2 ->let (startLoc,_)=startEndLoc toks p1
(_ , endLoc)=startEndLoc toks p2
in (startLoc,endLoc)
HsPApp i ps ->let (startLoc,_)=startEndLoc toks i
(_,endLoc)=startEndLoc toks (glast "StartEndLoc:HsPatP" ps)
in (startLoc,endLoc)
HsPTuple loc ps -> if ps==[]
then (simpPos0,simpPos0) -- ****Update this using locations****.
else let (startLoc,_)=startEndLoc toks (ghead "startEndLoc:HsPTuple" ps)
(_,endLoc)=startEndLoc toks (glast "startEndLoc:HsPTuple" ps)
in extendBothSides toks startLoc endLoc isOpenBracket isCloseBracket
HsPList loc ps ->if ps==[]
then (simpPos0,simpPos0) -- ***Update this using locations*****
else let (startLoc,_)=startEndLoc toks (ghead "startEndLoc:HsPList" ps)
(_, endLoc) =startEndLoc toks (glast "startEndLoc:HsPList" ps)
in extendBothSides toks startLoc endLoc isOpenSquareBracket isCloseSquareBracket
HsPParen p ->let (startLoc,endLoc)=startEndLoc toks p
in extendBothSides toks startLoc endLoc isOpenBracket isCloseBracket
HsPRec i upds ->let (startLoc,_)=startEndLoc toks i
(_,endLoc)=startEndLoc toks (glast "startEndLoc:HsPRec" upds) --can upds be empty?
in extendBackwards toks startLoc endLoc isCloseBrace
HsPAsPat i p ->let (startLoc,_)=startEndLoc toks i
(_,endLoc)=startEndLoc toks p
in (startLoc,endLoc)
HsPIrrPat p ->let (startLoc,endLoc)=startEndLoc toks p
in extendForwards toks startLoc endLoc isIrrefute
HsPWildCard ->(simpPos0,simpPos0) -- wildcard can cause problem.
instance StartEndLoc [HsPatP] where
startEndLoc toks ps = let locs=(nub.(map (startEndLoc toks))) ps \\ [(simpPos0,simpPos0)]
in if locs==[] then (simpPos0,simpPos0)
else let (startLoc,_)=ghead "StartEndLoc:HsPatP" locs
(_,endLoc) =glast "StartEndLoc:HsPatP" locs
in (startLoc,endLoc)
instance StartEndLoc [HsExpP] where
startEndLoc toks es=let locs=(nub.(map (startEndLoc toks))) es \\ [(simpPos0,simpPos0)]
in if locs==[] then (simpPos0,simpPos0)
else let (startLoc,_)=ghead "StartEndLoc:HsExp" locs
(_,endLoc) =glast "startEndLoc:HsExp" locs
in (startLoc,endLoc)
instance StartEndLoc [HsDeclP] where
startEndLoc toks ds=if ds==[] then (simpPos0,simpPos0)
else if length ds==1
then startEndLoc toks (ghead "StartEndLoc:[HsDeclP]" ds)
else let (startLoc,_)=startEndLoc toks (ghead "StartEndLoc:[HsDeclP]" ds)
(_,endLoc) =startEndLoc toks (glast "StartEndLoc:[HsDeclP]" ds)
in (startLoc,endLoc)
instance StartEndLoc HsMatchP where
startEndLoc toks t@(HsMatch loc i ps rhs ds)
=let (startLoc,_)=startEndLoc toks i
(_,endLoc) =if ds==[] then startEndLoc toks rhs
else startEndLoc toks (glast "StartEndLoc:HsMatchP" ds)
locs = srcLocs t
(startLoc1,endLoc1) = (if startLoc == simpPos0 && locs /=[] then ghead "getStartEndLoc" locs
else startLoc,
if endLoc == simpPos0 && locs /= [] then glast "getStartEndLoc" locs
else endLoc)
toks1 = gtail "startEndLoc:HsMatchP" (dropWhile (\t->tokenPos t /= endLoc1) toks)
toks0 = getToks (startLoc1, endLoc1) toks
endLoc2 = if toks1==[]
then endLoc1
else let toks2 = takeWhile (\t -> isSpecialTok t && needmore toks t ) toks1
in if toks2 == [] || all (\t-> isWhiteSpace t ) toks2
then endLoc1
else (tokenPos.glast "startEndLoc::HsMatchP") toks2
in (startLoc1, endLoc2)
where
isSpecialTok t = isWhiteSpace t || isCloseBracket t || isOpenBracket t || isOpenSquareBracket t
|| isCloseSquareBracket t
needmore toks t = case isCloseBracket t of
True -> let openBrackets = length $ filter isOpenBracket toks
closeBrackets = length $ filter isCloseBracket toks
in closeBrackets < openBrackets
False -> case isCloseSquareBracket t of
True -> let openSqBrackets = length $ filter isOpenSquareBracket toks
closeSqBrackets = length $ filter isCloseSquareBracket toks
in closeSqBrackets < openSqBrackets
false -> True
instance StartEndLoc HsStmtP where -- Bug fixed. 20/05/2004
startEndLoc toks stmts=let s=getStmtList stmts
locs = map (startEndLoc toks) s
(startLocs, endLocs) =(sort (map fst locs), sort (map snd locs))
in (ghead "StartEndLoc::HsStmtP" startLocs, glast "StartEndLoc::HsStmtP" endLocs)
instance StartEndLoc (HsStmtAtom HsExpP HsPatP [HsDeclP]) where
startEndLoc toks stmt=
case stmt of
HsGeneratorAtom (SrcLoc _ _ r c) p e ->
let (startLoc,_)=startEndLoc toks p
(_,endLoc) =startEndLoc toks e
in (startLoc,endLoc)
HsQualifierAtom e -> startEndLoc toks e
HsLetStmtAtom ds -> if ds==[]
then (simpPos0,simpPos0)
else let (startLoc,_)= startEndLoc toks (ghead "StartEndLoc:HsStmtAtom" ds)
(_,endLoc) = startEndLoc toks (glast "StartEndLoc:HsStmtAtom" ds)
in (startLoc,endLoc)
HsLastAtom e ->startEndLoc toks e
instance (StartEndLoc i,StartEndLoc e)=>StartEndLoc (HsFieldI i e) where
startEndLoc toks (HsField i e)=let (startLoc,_)=startEndLoc toks i
(_,endLoc)=startEndLoc toks e
in (startLoc,endLoc)
instance StartEndLoc HsAltP where
startEndLoc toks (HsAlt l p rhs ds)=let (startLoc,_)=startEndLoc toks p
(_,endLoc)=if ds==[] then startEndLoc toks rhs
else startEndLoc toks (glast "StartEndLoc:HsAltP" ds)
in (startLoc,endLoc)
instance StartEndLoc RhsP where
startEndLoc toks (HsBody e)=startEndLoc toks e
startEndLoc toks (HsGuard es)=if es==[] then (simpPos0,simpPos0)
else let (_,e1,_)=ghead "StartEndLoc:RhsP" es
(_,_,e2)=glast "StartEndLoc:RhsP" es
(startLoc,_)=startEndLoc toks e1
(_,endLoc)=startEndLoc toks e2
in extendForwards toks startLoc endLoc isBar
instance StartEndLoc (HsIdentI PNT) where
startEndLoc toks ident =
case ident of
HsVar i ->startEndLoc toks i
HsCon i ->startEndLoc toks i
instance StartEndLoc [PNT] where
startEndLoc toks pnts
= if pnts==[] then (simpPos0, simpPos0)
else let (startPos, _) = startEndLoc toks (head pnts)
(_, endPos) = startEndLoc toks (last pnts)
in (startPos, endPos)
instance StartEndLoc (HsImportDeclI ModuleName PNT) where
startEndLoc toks (HsImportDecl (SrcLoc _ _ row col) modName qual as Nothing)
= let startPos=fst (startEndLoc toks modName)
endPos = if isJust as then snd (startEndLoc toks (fromJust as))
else snd (startEndLoc toks modName)
in extendForwards toks startPos endPos isImport
startEndLoc toks (HsImportDecl (SrcLoc _ _ row col) modName qual as (Just (_, ents)))
= let startPos = fst (startEndLoc toks modName)
endPos = if ents == [] then if isJust as then snd (startEndLoc toks (fromJust as))
else snd (startEndLoc toks modName)
else snd (startEndLoc toks (glast "startEndLocImport" ents))
in extendBothSides toks startPos endPos isImport isCloseBracket
instance StartEndLoc [HsExportSpecI ModuleName PNT] where
startEndLoc toks es
= if es == [] then (simpPos0, simpPos0)
else let (startLoc, _) = startEndLoc toks $ head es
(_, endLoc) = startEndLoc toks $ last es
in (startLoc, endLoc)
-- in extendBothSides toks startLoc endLoc isOpenBracket isCloseBracket
instance StartEndLoc (HsExportSpecI ModuleName PNT) where
startEndLoc toks (EntE ent) =startEndLoc toks ent
startEndLoc toks (ModuleE moduleName) = let (startPos, endPos) = startEndLoc toks moduleName
in extendForwards toks startPos endPos isModule
instance StartEndLoc(EntSpec PNT) where
startEndLoc toks (Var i)=startEndLoc toks i --- x (a variable identifier)
startEndLoc toks (Abs i) =startEndLoc toks i -- T, C
startEndLoc toks (AllSubs i) =let (startPos, endPos) =startEndLoc toks i -- T(..), C(..)
in extendBackwards toks startPos endPos isCloseBracket
startEndLoc toks (ListSubs i ents)= let (startPos, _) = startEndLoc toks i --T (C_1, ...,C_n, f1,...f_n)
(_, endPos) = startEndLoc toks (glast "startEnPosListSubs" ents)
in extendBackwards toks startPos endPos isCloseBracket
instance StartEndLoc ModuleName where
startEndLoc toks (SN modName (SrcLoc _ _ row col)) = ((row,col), (row,col))
instance StartEndLoc [EntSpec PNT] where
startEndLoc toks ents
= if ents==[] then (simpPos0,simpPos0)
else let (startPos, _)=startEndLoc toks $ head ents
(_, endPos) =startEndLoc toks $ last ents
in (startPos,endPos)
-- in extendBothSides toks startPos endPos isHiding isCloseBracket
instance StartEndLoc PNT where
startEndLoc toks pnt =
case pnt of
PNT pn _ (N (Just (SrcLoc _ _ row col)))->((row,col),(row,col))
_ ->(simpPos0,simpPos0) {-Shouldn't cause any problems here, as in a normal
AST, every PNT has a source location. -}
instance (Eq i, Eq t, StartEndLoc i, StartEndLoc t,StartEndLoc [i]) =>StartEndLoc (HsConDeclI i t c) where
startEndLoc toks (HsConDecl _ is c i ds)
= let (startLoc, _) = startEndLoc toks is
(_, endLoc) = if ds==[] then startEndLoc toks i
else startEndLoc toks (last ds)
in (startLoc, endLoc)
startEndLoc toks (HsRecDecl _ is c i ds)
= let (startLoc, _) = startEndLoc toks is
(_, endLoc) = if ds==[] then startEndLoc toks i
else startEndLoc toks (last ds)
in (startLoc, endLoc)
instance (StartEndLoc t)=>StartEndLoc (HsBangType t) where
startEndLoc toks (HsBangedType t) = startEndLoc toks t
startEndLoc toks (HsUnBangedType t) = startEndLoc toks t
instance (StartEndLoc t, StartEndLoc [i]) => StartEndLoc ([i], HsBangType t) where
startEndLoc toks (x,y)
= let (startLoc, endLoc) = startEndLoc toks y
in extendBackwards toks startLoc endLoc isCloseBrace
instance StartEndLoc HsDeclP where
startEndLoc toks (Dec (HsTypeDecl (SrcLoc _ _ r c) tp t))
= let (startLoc, _) = startEndLoc toks tp
(_ , endLoc) = startEndLoc toks t
in extendForwards toks startLoc endLoc isType
startEndLoc toks (Dec (HsDataDecl loc c tp decls is))
= let (startLoc, _) = startEndLoc toks tp
(_, endLoc) = if is == [] then startEndLoc toks (glast "StartEndLoc:HsDeclP1" decls)
else startEndLoc toks is
in extendForwards toks startLoc endLoc isData
startEndLoc toks (Dec (HsNewTypeDecl loc c tp decls is))
= let (startLoc, _) = startEndLoc toks tp
(_, endLoc) = if is == [] then startEndLoc toks decls
else startEndLoc toks is
in extendForwards toks startLoc endLoc isNewtype
startEndLoc toks (Dec (HsDefaultDecl _ ts))
= let (startLoc, _) = startEndLoc toks (head ts)
(_ , endLoc) = startEndLoc toks (last ts)
in extendForwards toks startLoc endLoc isDefault
startEndLoc toks (Dec (HsInfixDecl _ _ is))
= let (startLoc, _) = startEndLoc toks (head is)
(_, endLoc) = startEndLoc toks (last is)
in extendForwards toks startLoc endLoc isFixty
startEndLoc toks d@(Dec (HsFunBind _ ms))
= let (startLoc, _) = startEndLoc toks (ghead "startEndLoc:HsDeclP3" ms)
(_, endLoc) = if ms == [] then (simpPos0, simpPos0)
else startEndLoc toks (glast "startEndLoc:HsDeclP4" ms)
in (startLoc, endLoc)
startEndLoc toks t@(Dec (HsPatBind _ p rhs ds))
= let (startLoc, _) = startEndLoc toks p
(_, endLoc) = if ds ==[] then startEndLoc toks rhs
else startEndLoc toks (glast "startEndLoc:HsDeclP5" ds)
locs = srcLocs t
(startLoc1,endLoc1) = (if startLoc == simpPos0 && locs /=[] then ghead "getStartEndLoc" locs
else startLoc,
if endLoc == simpPos0 && locs /= [] then glast "getStartEndLoc" locs
else endLoc)
toks1 = gtail "startEndLoc:HsPatBind" (dropWhile (\t->tokenPos t /= endLoc1) toks)
endLoc2 = if toks1==[]
then endLoc1
else let toks2 = takeWhile (\t -> isSpecialTok t && needmore toks t) toks1
in if toks2 == [] || all (\t-> isWhiteSpace t) toks2
then endLoc1
else (tokenPos.glast "startEndLoc::HsMatchP") toks2
in (startLoc1, endLoc2)
where
isSpecialTok t = isWhiteSpace t || isCloseBracket t || isOpenBracket t || isOpenSquareBracket t
|| isCloseSquareBracket t
needmore toks t = case isCloseBracket t of
True -> let openBrackets = length $ filter isOpenBracket toks
closeBrackets = length $ filter isCloseBracket toks
in closeBrackets < openBrackets
False -> case isCloseSquareBracket t of
True -> let openSqBrackets = length $ filter isOpenSquareBracket toks
closeSqBrackets = length $ filter isCloseSquareBracket toks
in closeSqBrackets < openSqBrackets
False -> True
startEndLoc toks (Dec (HsTypeSig _ is c t))
= let (startLoc, _) = startEndLoc toks (ghead "startEndLoc:HsDeclP6" is)
(_, endLoc) = startEndLoc toks t
in (startLoc, endLoc)
startEndLoc toks decl@(Dec (HsClassDecl loc c tp funDeps ds))
= let locs = srcLocs decl
(startLoc, endLoc)
= if locs == [] then (simpPos0, simpPos0)
else (head locs, last locs)
in extendForwards toks startLoc endLoc isClass
startEndLoc toks decl@(Dec (HsInstDecl loc i c t ds))
= let locs = srcLocs decl
(startLoc, endLoc)
= if locs == [] then (simpPos0, simpPos0)
else (head locs, last locs)
in extendForwards toks startLoc endLoc isInstance
{-
startEndLoc toks (Dec (HsPrimitiveTypeDecl _ c tp))
= let (startLoc, endLoc) = startEndLoc toks tp
in extendForward toks startLoc endLoc isData
startEndLoc toks (Dec (HsPrimitiveBind _ i t))
= let (startLoc, _) = startEndLoc toks i
(_, endLoc) = stratEndLoc toks t
in extendForward toks startLoc endLoc isPrimitive
-}
---------------End of the class StartEndLoc----------------------------------------
--------------------------------------------------------------------------------------------------------
-- This function should be the interface function for fetching start and end locations of a AST phrase in the source.
getStartEndLoc::(Term t, StartEndLoc t,Printable t)=>[PosToken]->t->(SimpPos,SimpPos)
getStartEndLoc toks t
= let (startPos',endPos') = startEndLoc toks t
locs = srcLocs t
(startPos,endPos) = (if startPos' == simpPos0 && locs /=[] then ghead "getStartEndLoc" locs
else startPos',
if endPos' == simpPos0 && locs /= [] then glast "getStartEndLoc" locs
else endPos')
in (startPos, endPos)
{- THECK : myppi.
adjustLoc toks (startPos,endPos) t -- to handle syntax phrase starts/ends with [], () ...
where
adjustLoc toks (startPos,endPos) t
= let astToks = filter (not.unwantedTok) $ tokenise (Pos 0 0 1) 1 True $ (render.myppi) t
(toks1,toks2, toks3) = splitToks (startPos, endPos) toks
toks2' = filter (not.unwantedTok) toks2
(t1, t2) =(ghead "getStartEndLoc1" astToks, glast "getStartEndLoc2" astToks)
startPos'= if sameToks t1 (ghead "getStartEndLoc3" toks2')
then startPos
else tokenPos $ ghead "getStartEndLoc4" $ dropWhile (\t-> not (sameToks t t1)) (reverse toks1)
endPos' = if sameToks t2 (glast "getStartEndLoc2" toks2')
then endPos
else tokenPos $ ghead "getStartEndLoc5" $ dropWhile (\t-> not (sameToks t t2)) toks3
in (startPos', endPos')
unwantedTok t = isWhite t || isCloseBracket t || isOpenBracket t || isOpenSquareBracket t
|| isCloseSquareBracket t || isComma t
sameToks (t1, (l1, c1)) (t2, (l2, c2)) = t1 == t2 && c1 == c2
-}
-- this function has problems whegtn they encounter sth. like [.....[p]]/
extendBothSides toks startLoc endLoc forwardCondFun backwardCondFun
=let (toks1,toks2)=break (\t->tokenPos t==startLoc) toks
toks21=gtail ("extendBothSides" ++ (show (startLoc, endLoc, toks2)) ) $ dropWhile (\t->tokenPos t /=endLoc) toks2
firstLoc=case (dropWhile (not.forwardCondFun) (reverse toks1)) of
[] -> startLoc -- is this the correct default?
l -> (tokenPos.ghead "extendBothSides:lastTok") l
lastLoc =case (dropWhile (not.backwardCondFun) toks21) of
[] ->endLoc --is this a correct default?
l -> (tokenPos.ghead "extendBothSides:lastTok") l
in (firstLoc, lastLoc)
extendForwards toks startLoc endLoc forwardCondFun
=let toks1=takeWhile (\t->tokenPos t /= startLoc) toks
firstLoc=case (dropWhile (not.forwardCondFun) (reverse toks1)) of
[] ->startLoc -- is this the correct default?
l -> (tokenPos.ghead "extendForwards") l
in (firstLoc, endLoc)
extendBackwards toks startLoc endLoc backwardCondFun
= let toks1= gtail "extendBackwards" $ dropWhile (\t->tokenPos t /=endLoc) toks
lastLoc=case (dropWhile (not.backwardCondFun) toks1) of
[] ->endLoc -- is this the correct default?
l ->(tokenPos. ghead "extendBackwards") l
in (startLoc, lastLoc)
------------------Some functions for associating comments with syntax phrases.---------------------------
{- Note: We assume that a comment before t belongs to t only if there is at most one blank line between them,
and a cooment after t belongs to t only it the comment starts at the last line of t.
-}
{-Get the start&end location of syntax phrase t, then extend the end location to cover the comment/white spaces
or new line which starts in the same line as the end location-}
startEndLocIncFowComment::(Term t, Printable t,StartEndLoc t)=>[PosToken]->t->(SimpPos,SimpPos)
startEndLocIncFowComment toks t
=let (startLoc,endLoc)=getStartEndLoc toks t
toks1= gtail "startEndLocIncFowComment" $ dropWhile (\t->tokenPos t/=endLoc) toks
toks11 = let (ts1, ts2) = break hasNewLn toks1
in (ts1 ++ if ts2==[] then [] else [ghead "startEndLocInFowComment" ts2])
in if toks11/=[] && all (\t->isWhite t || endsWithNewLn t) toks11
then (startLoc, tokenPos (glast "startEndLocIncFowComment" toks11))
else (startLoc, endLoc)
{-get the start&end location of t in the token stream, then extend the end location to cover
the following '\n' if there is no other characters (except white space) between t and the '\n'
-}
startEndLocIncFowNewLn::(Term t, Printable t,StartEndLoc t)=>[PosToken]->t->(SimpPos,SimpPos)
startEndLocIncFowNewLn toks t
=let (startLoc,endLoc)=getStartEndLoc toks t
toks1 = dropWhile isWhiteSpace $ gtail "startEndLocIncFowNewLn" $ dropWhile (\t->tokenPos t /=endLoc) toks
nextTok= if toks1==[] then defaultToken else head toks1
in if isNewLn nextTok
then (startLoc, tokenPos nextTok)
else (startLoc, endLoc)
{-get the start&end loation of t in the token stream, then extend the start and end location to
cover the preceding and folllowing comments.
-}
startEndLocIncComments::(Term t, StartEndLoc t,Printable t)=>[PosToken]->t->(SimpPos,SimpPos)
startEndLocIncComments toks t
=let (startLoc,endLoc)=getStartEndLoc toks t
(toks11,toks12)= let (ts1,ts2) = break (\t->tokenPos t == startLoc) toks
(ts11, ts12) = break hasNewLn (reverse ts1)
in (reverse ts12, reverse ts11++ts2)
toks12'=takeWhile (\t->tokenPos t /=startLoc) toks12
startLoc'=
if all isWhite toks12'
then -- group the toks1 according to lines in a reverse order.
let groupedToks=reverse $ groupTokensByLine toks11
-- empty lines right before t
emptyLns=takeWhile (all (\t->isWhiteSpace t || isNewLn t )) groupedToks
lastComment=if length emptyLns <=1 -- get the comment if there is any
then takeWhile (all isWhite) $ takeWhile (any isComment) $ dropWhile
(all (\t->isWhiteSpace t || isNewLn t)) groupedToks
else [] -- no comment
toks1'=if lastComment /=[] then concat $ reverse (emptyLns ++ lastComment)
else []
in if toks1'==[]
then if toks12'/=[]
then (tokenPos (ghead "startEndLocIncComments" toks12')) --there is no comment before t
else startLoc
--there is a comment before t
else tokenPos (ghead "startEndLocIncComments" toks1')
else startLoc
-- tokens after t
toks2=gtail "startEndLocIncComments1" $ dropWhile (\t->tokenPos t/=endLoc) toks
-- toks21 are those tokens that are in the same line with the last line of t
(toks21,tok22)= let (ts11, ts12) = break hasNewLn toks2
in (ts11 ++ if ts12==[] then [] else [ghead "startEndLocIncComments" ts12],
gtail "startEndLocIncComments2" ts12)
in if toks21==[] then (startLoc',endLoc) -- no following comments.
else if all (\t->isWhite t || endsWithNewLn t) toks21 --get the following white tokens in the same
--line of the last token of t
then (startLoc', tokenPos (last toks21))
else (startLoc', endLoc)
--Create a list of white space tokens.
whiteSpacesToken::SimpPos->Int->[PosToken]
whiteSpacesToken (row,col) n
|n>0 = [(Whitespace,(Pos 0 row col,replicate n ' '))]
|otherwise = []
-------------------------------------------------------------------------------------------------
adjustOffset::Int->[PosToken]->Bool->[PosToken]
adjustOffset offset [] _ = []
adjustOffset offset toks firstLineIncluded
= let groupedToks = groupBy (\x y->tokenRow x==tokenRow y) toks --groupedToks/=[], no problem with 'head'
--if firstLineIncluded is False, the offset of the first line won't be ajusted.
in if offset>=0 then if firstLineIncluded
then concatMap (doAddWhites offset) groupedToks
else ghead "adjustOffset" groupedToks ++ concatMap (doAddWhites offset) (tail groupedToks)
else if firstLineIncluded
then concatMap (doRmWhites (-offset)) groupedToks
else ghead "adjustOffset" groupedToks ++ concatMap (doRmWhites (-offset)) (tail groupedToks)
|
forste/haReFork
|
refactorer/RefacLocUtils.hs
|
bsd-3-clause
| 68,936
| 34
| 22
| 25,334
| 18,504
| 9,728
| 8,776
| -1
| -1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.