code stringlengths 5 1.03M | repo_name stringlengths 5 90 | path stringlengths 4 158 | license stringclasses 15 values | size int64 5 1.03M | n_ast_errors int64 0 53.9k | ast_max_depth int64 2 4.17k | n_whitespaces int64 0 365k | n_ast_nodes int64 3 317k | n_ast_terminals int64 1 171k | n_ast_nonterminals int64 1 146k | loc int64 -1 37.3k | cycloplexity int64 -1 1.31k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
module ClashSpec (main, spec) where
import Clash
import Test.Hspec
import Test.QuickCheck
import Test.QuickCheck.Instances
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
describe "someFunction" $ do
it "should work fine" $ do
property someFunction
someFunction :: Bool -> Bool -> Property
someFunction x y = x === y
| athanclark/clash | test/ClashSpec.hs | bsd-3-clause | 341 | 0 | 13 | 70 | 112 | 59 | 53 | 14 | 1 |
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
module ZM.Type.Char where
import Data.Model
import ZM.Type.Words
-- |A Unicode Char
data Char = Char Word32 deriving (Eq, Ord, Show, Generic, Model)
| tittoassini/typed | src/ZM/Type/Char.hs | bsd-3-clause | 216 | 0 | 6 | 34 | 53 | 32 | 21 | 6 | 0 |
module Main
where
import Language.Haskell.Preprocessor
import qualified Data.ByteString as B
import System.Directory
import System.Environment (getArgs)
import Control.Arrow ((&&&), second)
import Control.Applicative ((<$>))
import Control.Monad
import System.FilePath
import Data.Version (showVersion)
import Paths_hsb2hs (version)
main :: IO ()
main = do
args <- getArgs
if "--version" `elem` args
then putStrLn (showVersion version)
else transform blobExtension args
blobExtension :: Extension
blobExtension = base{
transformer = processBlobs
}
processBlobs :: [Ast] -> IO [Ast]
processBlobs [] = return []
processBlobs (Single (Token Operator _ l "%") :
Single (Token Variable _ _ kw) :
Single (Token StringLit _ _ lit) :
xs) | kw == "blobs" || kw == "blob" = do
let f = stripQuotes lit
t <- if kw == "blob"
then show `fmap` B.readFile f
else show `fmap` fileList' f ""
rest <- processBlobs xs
return $ (Single (Token StringLit [] l t)) : rest
where stripQuotes = reverse . stripLeadingQuote . reverse . stripLeadingQuote
stripLeadingQuote ('"':ys) = ys
stripLeadingQuote ys = ys
processBlobs (Block i l b r n : xs) = do
bs <- processBlobs b
rest <- processBlobs xs
return $ Block i l bs r n : rest
processBlobs (x : xs) = do
rest <- processBlobs xs
return $ x : rest
-- fileList' is taken from Michael Snoyman's file-embed
fileList' :: FilePath -> FilePath -> IO [(FilePath, B.ByteString)]
fileList' realTop top = do
allContents <- filter isReal <$> getDirectoryContents (realTop </> top)
let all' = map ((top </>) &&& (\x -> realTop </> top </> x)) allContents
files <- filterM (doesFileExist . snd) all' >>=
mapM (liftPair2 . second B.readFile)
dirs <- filterM (doesDirectoryExist . snd) all' >>=
mapM (fileList' realTop . fst)
return $ concat $ files : dirs
isReal :: FilePath -> Bool
isReal "." = False
isReal ".." = False
isReal _ = True
liftPair2 :: Monad m => (a, m b) -> m (a, b)
liftPair2 (a, b) = b >>= \b' -> return (a, b')
| jgm/hsb2hs | hsb2hs.hs | bsd-3-clause | 2,113 | 0 | 16 | 498 | 809 | 418 | 391 | 57 | 3 |
module Main where
import Lib
import Interpreter
import Control.Monad
import Control.Monad.State
import qualified Data.Map as Map
main :: IO ()
main = do
putStrLn "Plz enter filename (file1.txt may work)"
f <- getLine
content <- readFile f
run $ map (read :: String -> Statement) $ lines content
| irwingarry/iterpret | app/Main.hs | bsd-3-clause | 306 | 0 | 11 | 59 | 94 | 50 | 44 | 12 | 1 |
module Simulation.Internal
( computeSimulation
, AnalysisParameters(..)
, turbidostatCoefficientsForPopulationSize
, optimumCalculation
, randomOptimum
, collapse
, randomPopulation
-- , randomRules
, average
, stdDev
, allTheSame
, almostAllTheSame
) where
import SimulationConstants
import Evolution
import Expression
import Genes
import Individual
import ListUtils
import Phenotype
import Population
import Schema
import Control.Monad
import Control.Exception.Base
import Data.List hiding (union)
import Data.MultiSet (MultiSet, fromList, toOccurList, distinctSize, unions, union)
import Data.Random
import Data.Random.Distribution.Bernoulli
import Data.Random.Extras hiding (shuffle)
import System.Random
import Debug.Trace
data AnalysisParameters = AnalysisParameters
{ separatedGenerations :: Bool
, hardSelectionThreshold :: Double
, populationSize :: Int
, optimumChange :: [(Double, Double, Double)]
, maxAge :: Int
, countOfBases :: Int
, countOfPleiotropicRules :: Int
, countOfEpistaticRules :: Int
, ratioOfNegativeDominantRules :: Double
, ratioOfPositiveDominantRules :: Double
, ratioOfPleiotropicRules :: Double
, seed :: Int
} deriving Show
randomPopulation :: Int -> ExpressionStrategy -> Double -> Double -> Int -> RVar Population
randomPopulation count expressionStrategy ratioOfNegativeDominance probabilityPleiotropic baseCount = Population 0 <$> randomIndividuals count expressionStrategy ratioOfNegativeDominance probabilityPleiotropic baseCount
randomIndividuals :: Int -> ExpressionStrategy -> Double -> Double -> Int -> RVar [Individual]
randomIndividuals count expressionStrategy ratioOfNegativeDominance probabilityPleiotropic baseCount = replicateM count $ randomIndividual ratioOfNegativeDominance probabilityPleiotropic baseCount expressionStrategy
randomIndividual :: Double ->Double -> Int -> ExpressionStrategy -> RVar Individual
randomIndividual ratioOfNegativeDominance probabilityPleiotropic baseCount expressionStrategy = do
individualsSex <- randomSex
chs <- randomChromosomes ratioOfNegativeDominance probabilityPleiotropic baseCount
return $ Individual individualsSex 0 chs $ expressionStrategy individualsSex chs
randomSex :: RVar Sex
randomSex = choice [F, M]
randomChromosomes :: Double -> Double -> Int -> RVar (DnaString, DnaString)
randomChromosomes ratioOfNegativeDominance ratioPleiotropic baseCount = do
dna1 <- randomDnaString ratioOfNegativeDominance ratioPleiotropic baseCount
dna2 <- randomDnaString ratioOfNegativeDominance ratioPleiotropic baseCount
return (dna1, dna2)
randomDnaString :: Double -> Double -> Int -> RVar DnaString
randomDnaString ratioOfNegativeDominance ratioPleiotropic baseCount = DnaString <$> replicateM baseCount (randomInitAllele ratioOfNegativeDominance ratioPleiotropic )
randomInitAllele :: Double -> Double -> RVar Allele
randomInitAllele ratioNegativeDominance ratioPleiotropic = do
isZero <- boolBernoulli (0.0 :: Float)
if isZero
then return $ Allele zeroPhenotype zeroPhenotype
else randomAllele ratioNegativeDominance ratioPleiotropic
avgFitness :: (Int -> Phenotype) -> Int -> Population -> Double
avgFitness generationOptimum generationNumber = avgFitnessForGeneration (generationOptimum generationNumber)
homozygotness :: (Int -> Phenotype) -> Int -> Population -> Double
homozygotness _ _ population =
let
is = individuals population
chs = map chromosomes is
ps = zip (concatMap (genes . fst) chs) (concatMap (genes . snd) chs)
homos = filter (uncurry (==)) ps
in
fromIntegral (length homos) / fromIntegral (length ps)
dominantHomozygotness :: (Int -> Phenotype) -> Int -> Population -> Double
dominantHomozygotness _ _ population =
let
is = individuals population
chs = map chromosomes is
ps = zip (concatMap (genes . fst) chs) (concatMap (genes . snd) chs)
homos = filter (\p -> effect p /= dominantEffect p) $ map fst $ filter (uncurry (==)) ps
in
fromIntegral (length homos) / fromIntegral (length ps)
avgFitnessForGeneration :: Phenotype -> Population -> Double
avgFitnessForGeneration optimum (Population _ is) = average $ map (fitness optimum . phenotype) is
stdDevFitness :: (Int -> Phenotype) -> Int -> Population -> Double
stdDevFitness generationOptimum generationNumber = stdDevFitnessForGeneration (generationOptimum generationNumber)
stdDevFitnessForGeneration :: Phenotype -> Population -> Double
stdDevFitnessForGeneration optimum (Population _ is) = stdDev $ map (fitness optimum . phenotype) is
allTheSame :: (Eq a) => [a] -> Bool
allTheSame [] = True
allTheSame xs = all (== head xs) (tail xs)
almostAllTheSame :: (Ord a) => [a] -> Bool
almostAllTheSame [] = True
almostAllTheSame xs = (0.90 :: Double) * fromIntegral (length xs) <= fromIntegral (maximum $ map snd $ toOccurList $ fromList xs)
polymorphism :: Population -> Double
polymorphism population = 1.0 - fromIntegral (length $ filter id same) / fromIntegral (length same)
where
chs = map chromosomes $ individuals population
genesList = transpose $ map genes $ map fst chs ++ map snd chs
same :: [Bool]
same = map allTheSame genesList
almostPolymorphism :: Population -> Double
almostPolymorphism population = 1.0 - fromIntegral (length $ filter id same) / fromIntegral (length same)
where
chs = map chromosomes $ individuals population
genesList = transpose $ map genes $ map fst chs ++ map snd chs
same :: [Bool]
same = map almostAllTheSame genesList
aleleCount :: Population -> Double
aleleCount population = fromIntegral $ distinctSize allAllelas
where
allAllelas :: MultiSet Allele
allAllelas = unions $ map (\(c1, c2) -> union (fromList $ genes c1) (fromList $ genes c2)) $ map chromosomes $ individuals population
average :: [Double] -> Double
average xs = sum xs / fromIntegral (length xs)
stdDev :: [Double] -> Double
stdDev xs = sqrt $ summedElements / count
where
avg = average xs
count = fromIntegral $ length xs
summedElements = sum (map (\x -> (x - avg) * (x - avg)) xs)
minFitness :: (Int -> Phenotype) -> Int -> Population -> Double
minFitness generationOptimum generationNumber = minFitnessForGeneration (generationOptimum generationNumber)
minFitnessForGeneration :: Phenotype -> Population -> Double
minFitnessForGeneration _ (Population _ []) = nan
minFitnessForGeneration optimum (Population _ is) = minimum $ map (fitness optimum . phenotype) is
percentileFitness :: Double -> (Int -> Phenotype) -> Int -> Population -> Double
percentileFitness _ _ _ (Population _ []) = nan
percentileFitness percentile generationOptimum generationNumber (Population _ is) =
sort (map (fitness (generationOptimum generationNumber) . phenotype) is) !! floor (percentile * fromIntegral (length is))
randomRules :: Int -> Int -> RVar [(Schema, Phenotype)]
randomRules baseCount epistaticRulesCount = do
let _ = assert $ baseCount > 0
_ = assert $ epistaticRulesCount == 0
return []
randomOptimum :: RVar Phenotype
randomOptimum = randomPhenotypeFraction optimumSizeCoefficient
express :: Int -> Int -> RVar ExpressionStrategy
express baseCount
epistaticRulesCount
= do
rules <- randomRules baseCount epistaticRulesCount
let
matchers = map (matches . fst) rules
changes = map snd (traceShowId rules)
return $ commonExpression $ schemaBasedExpression $ zip matchers changes
collapse :: Int -> RVar a -> a
collapse seedValue x = fst $ sampleState x (mkStdGen seedValue)
turbidostatCoefficientsForPopulationSize :: Double -> Int -> Int -> Double
turbidostatCoefficientsForPopulationSize accidentDeathProbability' maximumAge expectedPopulationSize =
(4 - 12 * accidentDeathProbability' - (12.0 / fromIntegral maximumAge)) / 27.0 / fromIntegral expectedPopulationSize / fromIntegral expectedPopulationSize
optimumCalculation :: Phenotype -> Phenotype -> Int -> Phenotype
optimumCalculation optimum1 optimum2 g =
if g < optimumChangeGeneration || g > 2 * optimumChangeGeneration
then optimum1
else optimum2
params2rules :: AnalysisParameters -> EvolutionRules
params2rules params =
let
baseCount = countOfBases params
epistaticRulesCount = countOfEpistaticRules params
negativeDominantRulesRatio = ratioOfNegativeDominantRules params
pleiotropicRulesRatio = ratioOfPleiotropicRules params
-- FIXME positiveDominantRulesRatio = ratioOfPositiveDominantRules params
expression' = collapse (seed params) $ express
baseCount
epistaticRulesCount
breedingStrategy = if separatedGenerations params
then panmictic expression'
else panmicticOverlap expression'
startPopulationSize = populationSize params
hSelection :: Phenotype -> Selection
hSelection optimum = hardSelection (fitness optimum) $ hardSelectionThreshold params
maximumAge = maxAge params
optimum1 = collapse (seed params + 1) randomOptimum
optimumC = collapse (seed params + 2) $ randomPhenotypeFraction optimumChangeSizeCoefficient
optimum2 = Phenotype $ zipWithCheck (+) (phenotypeToVector optimum1) (phenotypeToVector optimumC)
turbidostatCoefficients = turbidostatCoefficientsForPopulationSize accidentDeathProbability maximumAge startPopulationSize
in
EvolutionRules { mutation = [ pointMutation negativeDominantRulesRatio pleiotropicRulesRatio]
, breeding = [ breedingStrategy ]
, selection = [ hSelection ]
, deaths =
[ \_ -> turbidostat turbidostatCoefficients accidentDeathProbability
, killOld maximumAge
]
, expression = expression'
, optimumForGeneration = optimumCalculation optimum1 optimum2
}
computeSimulation :: AnalysisParameters -> [(String, [(Integer, Double)])]
computeSimulation params =
let
rules = params2rules params
startPopulationSize = populationSize params
initialPopulation = randomPopulation startPopulationSize (expression rules) (ratioOfNegativeDominantRules params) (ratioOfPleiotropicRules params) $ countOfBases params
allGenerations = evolution maxSteps rules initialPopulation
generations :: [Population]
generations = collapse (seed params + 3) allGenerations
stats :: (Int -> Population -> Double) -> [(Integer, Double)]
stats f = zip [0..] (zipWith f [0..] generations)
in [ ("Avg Fitness", stats $ avgFitness $ optimumForGeneration rules)
, ("Min Fitness", stats $ minFitness $ optimumForGeneration rules)
, ("10% percentile Fitness", stats $ percentileFitness 0.1 $ optimumForGeneration rules)
, ("Odchylka Fitness", stats $ stdDevFitness $ optimumForGeneration rules)
, ("Population Size", stats $ const $ fromIntegral . length . individuals)
, ("homozygotness", stats $ homozygotness $ optimumForGeneration rules)
, ("% of dominant homozygotes", stats $ dominantHomozygotness $ optimumForGeneration rules)
, ("% of polymorphic locus", stats $ const polymorphism)
, ("% of locus with allele with more than 90% appearence", stats $ const almostPolymorphism)
, ("# different alalas", stats $ const aleleCount)
] | satai/FrozenBeagle | Simulation/Lib/src/Simulation/Internal.hs | bsd-3-clause | 11,904 | 0 | 16 | 2,662 | 3,061 | 1,591 | 1,470 | 206 | 2 |
module Actor.Player (player) where
import Control.Applicative
import Control.Monad
import Graphics.Vty
import Types
import Utils
import Data.List
import Data.Ord
playerImg :: Image
playerImg = string (withForeColor defAttr brightBlue) "@"
player :: Coords -> Actor ()
player initPos = (ActorData playerImg initPos True, prog)
where
distance :: Coords -> Coords -> Double
distance target = vecLen . toVec . (|-| target)
prog = do
action <- getUserAction
case action of
Quit -> return ()
Move dir -> do
pos <- getActorPosition
moveActor $ pos |+| dyx dir
nextTick >> prog
Attack -> do
pos <- getActorPosition
zombies <- filter (not . actorIsPlayer) <$> getOtherActors
-- zombie or treasure, is more like it.. or DrawActor, which is a tad unfortunate
unless (null zombies) $
let zombie = minimumBy (comparing $ distance pos . actorPos) zombies in
hurtActor (zombie, 10)
nextTick >> prog
_ -> prog
dyx DUp = (-1, 0)
dyx DDown = ( 1, 0)
dyx DLeft = ( 0, -1)
dyx DRight = ( 0, 1)
| bjornars/HaskellGame | src/Actor/Player.hs | bsd-3-clause | 1,256 | 0 | 23 | 447 | 365 | 188 | 177 | 34 | 7 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Snap.Test.BDD
(
-- * Types
SnapTesting
, TestResult(..)
, Sentiment(..)
, TestResponse(..)
, SnapTestingConfig (..)
-- * Configuration
, defaultConfig
-- * Running tests
, runSnapTests
, consoleReport
, linuxDesktopReport
-- * Labeling
, name
-- * Applying Predicates
, should
, shouldNot
-- * Helpers for running tests
, css
, val
-- * Getting Responses
, get
, get'
, post
, params
-- * Work with Responses
, restrictPage
-- * Predicates on values
, equal
, beTrue
-- * Predicates on Responses
, succeed
, notfound
, redirect
, redirectTo
, haveText
, haveSelector
-- * Stateful value tests
, changes
-- * Stateful form tests
, FormExpectations(..)
, form
-- * Run actions after block
, cleanup
-- * Evaluating arbitrary actions
, eval
-- * Create helpers
, modifySite
-- * Integrate with QuickCheck
, quickCheck
) where
import Data.ByteString (ByteString)
import Data.List (intercalate, intersperse)
import Data.Map (Map)
import qualified Data.Map as M (empty, fromList, lookup,
mapKeys)
import Data.Maybe (fromMaybe)
import Data.Text (Text, pack, unpack)
import qualified Data.Text as T (append, concat, isInfixOf)
import Data.Text.Encoding (decodeUtf8, encodeUtf8)
import Prelude hiding (FilePath, log)
import Control.Applicative
import Control.Concurrent.Async
import Control.Exception (SomeException, catch)
import Control.Monad (void)
import Control.Monad.Trans
import Control.Monad.Trans.State (StateT, evalStateT)
import qualified Control.Monad.Trans.State as S (get, put)
import System.Process (system)
import Snap.Core (Response (..), getHeader)
import Snap.Snaplet (Handler, Snaplet, SnapletInit)
import Snap.Snaplet.Test (InitializerState, closeSnaplet,
evalHandler', getSnaplet,
runHandler')
import Snap.Test (RequestBuilder, getResponseBody)
import qualified Snap.Test as Test
import Test.QuickCheck (Args (..), Result (..), Testable,
quickCheckWithResult, stdArgs)
import System.IO.Streams (InputStream, OutputStream)
import qualified System.IO.Streams as Stream
import qualified System.IO.Streams.Concurrent as Stream
import qualified Text.Digestive as DF
import qualified Text.HandsomeSoup as HS
import qualified Text.XML.HXT.Core as HXT
-- | The main type for this library, where `b` is your application state,
-- often called `App`. This is a State monad on top of IO, where the State carries
-- your application (or, more specifically, a top-level handler), and stream of test results
-- to be reported as passing or failing.
type SnapTesting b a = StateT (Handler b b ()
, (Snaplet b, InitializerState b)
, OutputStream TestResult) IO a
-- | A TestResponse is the result of making a request. Many predicates operate on these types of
-- responses, and custom predicates can be written against them.
data TestResponse = Html Text | NotFound | Redirect Int Text | Other Int | Empty
data CssSelector = CssSelector Text
-- | Tests have messages that are agnostic to whether the result should hold or should not hold.
-- The sentiment is attached to them to indicate that positive/negative statement. This allows
-- the same message to be used for tests asserted with `should` and `shouldNot`.
data Sentiment a = Positive a | Negative a deriving Show
flipSentiment :: Sentiment a -> Sentiment a
flipSentiment (Positive a) = Negative a
flipSentiment (Negative a) = Positive a
-- | TestResult is a a flattened tree structure that reflects the structure of your tests,
-- and is the data that is passed to report generators.
data TestResult = NameStart Text
| NameEnd
| TestPass (Sentiment Text)
| TestFail (Sentiment Text)
| TestError Text deriving Show
-- | The configuration that is passed to the test runner, currently just a list of report
-- generators, that are each passed a stream of results, and can do any side effecting thing
-- with them.
data SnapTestingConfig = SnapTestingConfig { reportGenerators :: [InputStream TestResult -> IO ()]
}
-- | The default configuration just prints results to the console, using the `consoleReport`.
defaultConfig :: SnapTestingConfig
defaultConfig = SnapTestingConfig { reportGenerators = [consoleReport]
}
-- | dupN duplicates an input stream N times
dupN :: Int -> InputStream a -> IO [InputStream a]
dupN 0 _ = return []
dupN 1 s = return [s]
dupN n s = do (a, b) <- Stream.map (\x -> (x,x)) s >>= Stream.unzip
rest <- dupN (n - 1) b
return (a:rest)
-- | Run a set of tests, putting the results through the specified report generators
runSnapTests :: SnapTestingConfig -- ^ Configuration for test runner
-> Handler b b () -- ^ Site that requests are run against (often route routes, where routes are your sites routes).
-> SnapletInit b b -- ^ Site initializer
-> SnapTesting b () -- ^ Block of tests
-> IO ()
runSnapTests conf site app tests = do
(inp, out) <- Stream.makeChanPipe
let rgs = reportGenerators conf
istreams <- dupN (length rgs) inp
consumers <- mapM (\(inp', hndl) -> async (hndl inp')) (zip istreams rgs)
init <- getSnaplet (Just "test") app
case init of
Left err -> error $ show err
Right (snaplet, initstate) -> do
evalStateT tests (site, (snaplet, initstate), out)
Stream.write Nothing out
mapM_ wait consumers
closeSnaplet initstate
return ()
-- | Prints test results to the console. For example:
--
-- > /auth/new_user
-- > success PASSED
-- > creates a new account PASSED
consoleReport :: InputStream TestResult -> IO ()
consoleReport stream = cr 0
where cr indent = do log <- Stream.read stream
case log of
Nothing -> putStrLn "" >> return ()
Just (NameStart n) -> do putStrLn ""
printIndent indent
putStr (unpack n)
cr (indent + indentUnit)
Just NameEnd -> cr (indent - indentUnit)
Just (TestPass _) -> do putStr " PASSED"
cr indent
Just (TestFail msg) -> do putStr " FAILED\n"
printMessage indent msg
cr indent
Just (TestError msg) -> do putStr " ERROR("
putStr (unpack msg)
putStr ")"
cr indent
indentUnit = 2
printIndent n = putStr (replicate n ' ')
printMessage n (Positive m) = do printIndent n
putStrLn "Should have held:"
printIndent n
putStrLn (unpack m)
printMessage n (Negative m) = do printIndent n
putStrLn "Should not have held:"
printIndent n
putStrLn (unpack m)
-- | Sends the test results to desktop notifications on linux.
-- Prints how many tests passed and failed.
linuxDesktopReport :: InputStream TestResult -> IO ()
linuxDesktopReport stream = do
res <- Stream.toList stream
let (failing, total) = count [] res
case failing of
[] ->
void $ system $ "notify-send -u low -t 2000 'All Tests Passing' 'All " ++
(show total) ++ " tests passed.'"
_ ->
void $ system $ "notify-send -u normal -t 2000 'Some Tests Failing' '" ++
(show (length failing)) ++ " out of " ++
(show total) ++ " tests failed:\n\n" ++ (intercalate "\n\n" $ reverse failing) ++ "'"
where count :: [Text] -> [TestResult] -> ([String], Int)
count _ [] = ([], 0)
count n (TestPass _ : xs) = let (f, t) = count n xs
in (f, 1 + t)
count n (TestFail _ : xs) = let (f, t) = count n xs
in (f ++ [unpack $ T.concat $ intersperse " > " $ reverse n], 1 + t)
count n (TestError _ : xs) = let (f, t) = count n xs
in (f, 1 + t)
count n (NameStart nm : xs) = count (nm:n) xs
count n (NameEnd : xs) = count (tail n) xs
writeRes :: TestResult -> SnapTesting b ()
writeRes log = do (_,_,out) <- S.get
lift $ Stream.write (Just log) out
-- | Labels a block of tests with a descriptive name, to be used in report generation.
name :: Text -- ^ Name of block
-> SnapTesting b () -- ^ Block of tests
-> SnapTesting b ()
name s a = do
writeRes (NameStart s)
a
writeRes NameEnd
runRequest :: RequestBuilder IO () -> SnapTesting b TestResponse
runRequest req = do
(site, app, _) <- S.get
res <- liftIO $ runHandlerSafe req site app
case res of
Left err -> do
writeRes (TestError err)
return $ Empty
Right response -> do
case rspStatus response of
404 -> return NotFound
200 -> do
body <- liftIO $ getResponseBody response
return $ Html $ decodeUtf8 body
_ -> if (rspStatus response) >= 300 && (rspStatus response) < 400
then do let url = fromMaybe "" $ getHeader "Location" response
return (Redirect (rspStatus response) (decodeUtf8 url))
else return (Other (rspStatus response))
-- | Runs a GET request
get :: Text -- ^ The url to request.
-> SnapTesting b TestResponse
get = flip get' M.empty
-- | Runs a GET request, with a set of parameters.
get' :: Text -- ^ The url to request.
-> Map ByteString [ByteString] -- ^ The parameters to send.
-> SnapTesting b TestResponse
get' path ps = runRequest (Test.get (encodeUtf8 path) ps)
-- | Creates a new POST request, with a set of parameters.
post :: Text -- ^ The url to request.
-> Map ByteString [ByteString] -- ^ The parameters to send.
-> SnapTesting b TestResponse
post path ps = runRequest (Test.postUrlEncoded (encodeUtf8 path) ps)
-- | A helper to construct parameters.
params :: [(ByteString, ByteString)] -- ^ Pairs of parameter and value.
-> Map ByteString [ByteString]
params = M.fromList . map (\x -> (fst x, [snd x]))
restrictPage :: Text -> TestResponse -> TestResponse
restrictPage selector (Html body) = case HXT.runLA (HXT.xshow $ HXT.hread HXT.>>> HS.css (unpack selector)) (unpack body) of
[] -> Html ""
matches -> Html (T.concat (map pack matches))
restrictPage _ r = r
-- | Constructor for CSS selectors
css :: Applicative m => Text -> m CssSelector
css = pure . CssSelector
-- | A constructor for pure values (this is just a synonym for `pure` from `Applicative`).
val :: Applicative m => a -> m a
val = pure
-- | This takes a TestResult and writes it to the test log, so it is processed
-- by the report generators.
should :: SnapTesting b TestResult -> SnapTesting b ()
should test = do res <- test
writeRes res
-- | This is similar to `should`, but it asserts that the test should fail, and
-- inverts the corresponding message sentiment.
shouldNot :: SnapTesting b TestResult -> SnapTesting b ()
shouldNot test = do res <- test
case res of
TestPass msg -> writeRes (TestFail (flipSentiment msg))
TestFail msg -> writeRes (TestPass (flipSentiment msg))
_ -> writeRes res
-- | Assert that a response (which should be Html) has a given selector.
haveSelector :: TestResponse -> CssSelector -> TestResult
haveSelector (Html body) (CssSelector selector) = case HXT.runLA (HXT.hread HXT.>>> HS.css (unpack selector)) (unpack body) of
[] -> TestFail msg
_ -> TestPass msg
where msg = (Positive $ T.concat ["Html contains selector: ", selector, "\n\n", body])
haveSelector _ (CssSelector match) = TestFail (Positive (T.concat ["Body contains css selector: ", match]))
-- | Asserts that a response (which should be Html) has given text.
haveText :: TestResponse -> Text -> TestResult
haveText (Html body) match =
if T.isInfixOf match body
then TestPass (Positive $ T.concat [body, "' contains '", match, "'."])
else TestFail (Positive $ T.concat [body, "' contains '", match, "'."])
haveText _ match = TestFail (Positive (T.concat ["Body contains: ", match]))
-- | Checks that the handler evaluates to the given value.
equal :: (Show a, Eq a)
=> a
-> a
-> TestResult
equal a b = if a == b
then TestPass (Positive (T.concat [pack $ show a, " == ", pack $ show b]))
else TestFail (Positive (T.concat [pack $ show a, " == ", pack $ show b]))
-- | Helper to bring the results of other tests into the test suite.
beTrue :: Bool -> TestResult
beTrue True = TestPass (Positive "assertion")
beTrue False = TestFail (Positive "assertion")
-- | A data type for tests against forms.
data FormExpectations a = Value a -- ^ The value the form should take (and should be valid)
| ErrorPaths [Text] -- ^ The error paths that should be populated
-- | Test against digestive-functors forms.
form :: (Eq a, Show a)
=> FormExpectations a -- ^ If the form should succeed, Value a is what it should produce.
-- If failing, ErrorPaths should be all the errors that are triggered.
-> DF.Form Text (Handler b b) a -- ^ The form to run
-> Map Text Text -- ^ The parameters to pass
-> SnapTesting b ()
form expected theForm theParams =
do r <- eval $ DF.postForm "form" theForm (const $ return lookupParam)
case expected of
Value a -> should $ equal <$> val (snd r) <*> val (Just a)
ErrorPaths expectedPaths ->
do let viewErrorPaths = map (DF.fromPath . fst) $ DF.viewErrors $ fst r
should $ beTrue <$> val (all (`elem` viewErrorPaths) expectedPaths
&& (length viewErrorPaths == length expectedPaths))
where lookupParam pth = case M.lookup (DF.fromPath pth) fixedParams of
Nothing -> return []
Just v -> return [DF.TextInput v]
fixedParams = M.mapKeys (T.append "form.") theParams
-- | Checks that the given request results in a success (200) code.
succeed :: TestResponse -> TestResult
succeed (Html _) = TestPass (Positive "Request 200s.")
succeed _ = TestFail (Positive "Request 200s.")
-- | Checks that the given request results in a not found (404) code.
notfound :: TestResponse -> TestResult
notfound NotFound = TestPass (Positive "Request 404s.")
notfound _ = TestFail (Positive "Request 404s.")
-- | Checks that the given request results in a redirect (3**) code.
redirect :: TestResponse -> TestResult
redirect (Redirect _ _) = TestPass (Positive "Request redirects.")
redirect _ = TestFail (Positive "Request redirects.")
-- | Checks that the given request results in a redirect to a specific url.
redirectTo :: TestResponse -- ^ Request to run
-> Text -- ^ URL it should redirect to
-> TestResult
redirectTo (Redirect _ actual) expected | actual == expected = TestPass (Positive (T.concat ["Redirecting actual: ", actual, " expected: ", expected]))
redirectTo (Redirect _ actual) expected = TestFail (Positive (T.concat ["Redirecting actual: ", actual, " expected: ", expected]))
redirectTo _ expected = TestFail (Positive (T.concat ["Redirects to ", expected]))
-- | Checks that the monadic value given changes by the function specified after the given test block is run.
--
-- For example, if you wanted to make sure that account creation was creating new accounts:
--
-- > changes (+1) countAccounts (post "/auth/new_user" $ params
-- > [ ("new_user.name", "Jane")
-- > , ("new_user.email", "jdoe@c.com")
-- > , ("new_user.password", "foobar")])
changes :: (Show a, Eq a)
=> (a -> a) -- ^ Change function
-> Handler b b a -- ^ Monadic value
-> SnapTesting b c -- ^ Test block to run.
-> SnapTesting b ()
changes delta measure act = do
before <- eval measure
_ <- act
after <- eval measure
should $ equal <$> val (delta before) <*> val after
-- | Runs an action after a block of tests, usually used to remove database state.
cleanup :: Handler b b () -- ^ Action to run after tests
-> SnapTesting b () -- ^ Tests to run
-> SnapTesting b ()
cleanup cu act = do
act
(_, app, _) <- S.get
_ <- liftIO $ runHandlerSafe (Test.get "" M.empty) cu app
return ()
-- | Evaluate arbitrary actions
eval :: Handler b b a -- ^ Action to evaluate
-> SnapTesting b a
eval act = do
(_, app, _) <- S.get
liftIO $ fmap (either (error . unpack) id) $ evalHandlerSafe act app
-- | Given a site to site function (like, generating a random user and logging in), run the given block of test with the modified state.
modifySite :: (Handler b b () -> Handler b b ()) -- ^ Site modification function
-> SnapTesting b a -- ^ Tests to run
-> SnapTesting b a
modifySite f act = do
(site, app, out) <- S.get
S.put (f site, app, out)
res <- act
S.put (site, app, out)
return res
-- | Allows you to run a quickcheck test. All 100 test passing counts as a pass, any failure a failure.
-- Currently the reporting is really bad (you don't see what the failing example is).
quickCheck :: Testable prop => prop -> SnapTesting b ()
quickCheck p = do
res <- liftIO $ quickCheckWithResult (stdArgs { chatty = False }) p
case res of
Success{} -> writeRes (TestPass (Positive ""))
GaveUp{} -> writeRes (TestPass (Positive ""))
Failure{} -> writeRes (TestFail (Positive ""))
NoExpectedFailure{} -> writeRes (TestFail (Positive ""))
-- Private helpers
runHandlerSafe :: RequestBuilder IO ()
-> Handler b b v
-> (Snaplet b, InitializerState b)
-> IO (Either Text Response)
runHandlerSafe req site (s, is) =
catch (runHandler' s is req site) (\(e::SomeException) -> return $ Left (pack $ show e))
evalHandlerSafe :: Handler b b v
-> (Snaplet b, InitializerState b)
-> IO (Either Text v)
evalHandlerSafe act (s, is) =
catch (evalHandler' s is (Test.get "" M.empty) act) (\(e::SomeException) -> return $ Left (pack $ show e))
| dbp/snap-testing | src/Snap/Test/BDD.hs | bsd-3-clause | 20,296 | 0 | 22 | 6,796 | 4,920 | 2,557 | 2,363 | 331 | 7 |
module W3C.NTripleTest where
import Data.Maybe (fromJust)
import Test.Framework.Providers.API
import Test.Framework.Providers.HUnit
import qualified Test.HUnit as TU
import qualified Data.Text as T
import System.Directory
import W3C.Manifest
import Data.RDF.Types
import Text.RDF.RDF4H.NTriplesParser
import Data.RDF.TriplesGraph
suiteFilesDir = "data/w3c/n3/"
mfPath = T.concat [suiteFilesDir, "manifest.ttl"]
mfBaseURI = BaseUrl "http://www.w3.org/2013/N-TriplesTests/"
tests :: [Test]
tests = [ buildTest allNTripleTests ]
allNTripleTests :: IO Test
allNTripleTests = do
dir <- getCurrentDirectory
let fileSchemeURI = T.pack ("file://" ++ dir ++ "/" ++ T.unpack suiteFilesDir)
m <- loadManifest mfPath fileSchemeURI
return $ testGroup (T.unpack $ description m) $ map (buildTest . mfEntryToTest) $ entries m
-- Functions to map manifest test entries to unit tests.
-- They are defined here to avoid cluttering W3C.Manifest
-- with functions that may not be needed to those who
-- just want to parse Manifest files.
-- TODO: They should probably be moved to W3C.Manifest after all.
mfEntryToTest :: TestEntry -> IO Test
mfEntryToTest (TestNTriplesPositiveSyntax nm _ _ act') = do
let act = (UNode . fromJust . fileSchemeToFilePath) act'
rdf <- parseFile testParser (nodeURI act) :: IO (Either ParseFailure TriplesGraph)
return $ testCase (T.unpack nm) $ TU.assert $ isParsed rdf
mfEntryToTest (TestNTriplesNegativeSyntax nm _ _ act') = do
let act = (UNode . fromJust . fileSchemeToFilePath) act'
rdf <- parseFile testParser (nodeURI act) :: IO (Either ParseFailure TriplesGraph)
return $ testCase (T.unpack nm) $ TU.assert $ isNotParsed rdf
mfEntryToTest x = error $ "unknown TestEntry pattern in mfEntryToTest: " ++ show x
isParsed :: Either a b -> Bool
isParsed (Left _) = False
isParsed (Right _) = True
isNotParsed :: Either a b -> Bool
isNotParsed = not . isParsed
nodeURI :: Node -> String
nodeURI = \(UNode u) -> T.unpack u
testParser :: NTriplesParser
testParser = NTriplesParser
| cordawyn/rdf4h | testsuite/tests/W3C/NTripleTest.hs | bsd-3-clause | 2,022 | 0 | 14 | 316 | 585 | 304 | 281 | 41 | 1 |
{-# LANGUAGE TupleSections #-}
module Handler.Home where
import Import
import Handler.Util
import qualified Data.Map.Strict as Map
-- The GET handler displays the form
getHomeR :: Handler Html
getHomeR = do
-- Generate the form to be displayed
(widget, enctype) <- generateFormPost $ loanForm initLoan initErrors
defaultLayout $ displayInputForm MsgCalculator widget enctype >> abstractWidget
initLoan = Loan ClClassical 0 0 0 (Just 0) Nothing Nothing Monthly Truncated
loanForm :: Loan -> LoanErrors -> Html -> MForm Handler (FormResult Loan, Widget)
loanForm l le = renderLoan l le $ Loan
<$> areq (selectFieldList loans) (mkFieldSettings MsgLoan fieldLoan) (Just $ loanS l)
<*> areq amountField (mkFieldSettings MsgPrincipal fieldPrincipal) (Just $ principalS l)
<*> areq durationField (mkFieldSettings MsgDuration fieldDuration) (Just $ durationS l)
<*> areq rateField (mkFieldSettings MsgInterestRate fieldRate) (Just $ rateS l)
<*> aopt durationField (mkFieldSettings MsgDeferrment fieldDeferrment) (Just $ delayS l)
<*> aopt amountField (mkFieldSettings MsgBalloon fieldBalloon) (Just $ balloonS l)
<*> aopt durationField (mkFieldSettings MsgMaxExtDur fieldExtDur) (Just $ extDurS l)
<*> areq (radioFieldList freqs) (mkFieldSettings MsgFreq fieldFreq) (Just $ freqS l)
<*> areq (radioFieldList roundings) (mkFieldSettings MsgRoundingType fieldRound) (Just $ roundingS l)
where
loans :: [(Text, GUIClassic)]
loans = map (pack . show &&& id) confList
freqs :: [(Text,Freq)]
freqs = map (pack . show &&& id) freqList
roundings :: [(Text,RoundingType)]
roundings = map (pack . show &&& id) roundingList
mkFieldSettings :: AppMessage -> Text -> FieldSettings App
mkFieldSettings msg field = "" {fsLabel = SomeMessage msg
,fsId = Just field}
isHidden :: GUIClassic -> Text -> Bool -> Bool
isHidden l id isErr | not (isUnfoldedBalloon l) && id == fieldExtDur && not isErr = True
| not (isBalloon l) && id == fieldBalloon && not isErr = True
| id `elem` [fieldFreq,fieldRound] = True
| otherwise = False
renderLoan :: (Show a) => Loan -> LoanErrors -> FormRender Handler a
-- | Render a form into a series of tr tags. Note that, in order to allow
-- you to add extra rows to the table, this function does /not/ wrap up
-- the resulting HTML in a table tag; you must do that yourself.
renderLoan l lErr aform fragment = do
(res, views') <- aFormToForm aform
let views = views' []
-- loan = fromMaybe ClClassical $ loanS <$> l
loan = loanS l
showCSVButton = Map.null lErr && l /= initLoan
--let isError = any (isJust . fvErrors) views
let widget = [whamlet|
$newline never
$if null views
\#{fragment}
<div .span9>
<table .table>
$forall (isFirst, view) <- addIsFirst views
<tr ##{fvId view <> "tr"} :fvRequired view:.required :not $ fvRequired view:.optional :isJust $ fvErrors view:.errors :isHidden loan (fvId view) (isJust $ fvErrors view):.hide>
<td>
$if isFirst
\#{fragment}
<label ##{fvId view <> "Label"} for=#{fvId view}>#{fvLabel view}
$maybe tt <- fvTooltip view
<div .tooltip>#{tt}
<td>^{fvInput view}
$maybe err <- Map.lookup (fvId view) lErr
<p .errors>_{err}
$maybe err <- fvErrors view
<td>#{err}
$nothing
<td ##{fvId view <> "output"} .warnings>
<tr>
<td>
<a href=# #showParameters>_{MsgShowParameters}
<td>
<button .btn .btn-primary .btn-large>_{MsgCalculate}
<td>
$if showCSVButton
<a href=@{LoanCSVR} .btn .btn-icon download="#{simpleLoanHash l}.csv"><img src=@{StaticR csv_png}> _{MsgDownloadCsv}
<div .span3>
<div .loan-info-box>
<h5>
<img src=@{StaticR help_about_png}> #
<span #fieldLoanExplanationTitle> #
<p #fieldLoanExplanation .small> #
|]
return (res, widget)
where
addIsFirst [] = []
addIsFirst (x:y) = (True, x) : map (False, ) y
displayInputForm :: AppMessage -> Widget -> Enctype -> Widget
displayInputForm title widget enctype = do
setTitleI title
[whamlet|
<h1>
_{title}
<p>
_{MsgInitial}
<form method=post action=@{LoanR} enctype=#{enctype}>
<div .row-fluid .show-gird>
^{widget}
|]
abstractWidget :: Widget
abstractWidget = [whamlet|
<h3>_{MsgYALC}
<p>_{MsgNotExactly}
<p>_{MsgLongText}
<p>
<a href=@{StaticR haslo_pdf} .btn .btn-large>
<img src=@{StaticR application_pdf_png}>
_{MsgDownloadPaper}
|]
| bartoszw/yhaslo | Handler/Home.hs | bsd-3-clause | 5,554 | 0 | 17 | 2,005 | 944 | 489 | 455 | -1 | -1 |
module Chip8.Memory where
import Data.Sized.Unsigned
data Address = Addr U12 deriving (Show)
data Immediate = Imm U8 deriving (Show)
data Register = Reg U4 deriving (Show)
| kharland/chip-8 | src/Chip8/memory.hs | mit | 174 | 0 | 6 | 28 | 60 | 35 | 25 | 5 | 0 |
{-# LANGUAGE CPP, DefaultSignatures, EmptyDataDecls, FlexibleInstances,
FunctionalDependencies, KindSignatures, OverlappingInstances,
ScopedTypeVariables, TypeOperators, UndecidableInstances,
ViewPatterns, NamedFieldPuns, FlexibleContexts, PatternGuards,
RecordWildCards #-}
-- |
-- Module: Data.Aeson.Types.Generic
-- Copyright: (c) 2012 Bryan O'Sullivan
-- (c) 2011, 2012 Bas Van Dijk
-- (c) 2011 MailRank, Inc.
-- License: Apache
-- Maintainer: Bryan O'Sullivan <bos@serpentine.com>
-- Stability: experimental
-- Portability: portable
--
-- Types for working with JSON data.
module JavaScript.JSON.Types.Generic ( ) where
import Control.Applicative ((<*>), (<$>), (<|>), pure)
import Control.Monad ((<=<))
import Control.Monad.ST (ST)
import JavaScript.Array (JSArray)
import JavaScript.JSON.Types.Instances
import JavaScript.JSON.Types.Internal
import qualified Data.JSString as JSS
import qualified JavaScript.JSON.Types.Internal as I
import qualified JavaScript.Array as JSA
import qualified JavaScript.Array.ST as JSAST
import Data.Bits
import Data.DList (DList, toList, empty)
import Data.JSString (JSString, pack, unpack)
import Data.Maybe (fromMaybe)
import Data.Monoid (mappend)
-- import Data.Text (Text, pack, unpack)
import GHC.Generics
{-
import qualified Data.HashMap.Strict as H
import qualified Data.Vector as V
import qualified Data.Vector.Mutable as VM
-}
--------------------------------------------------------------------------------
-- Generic toJSON
instance (GToJSON a) => GToJSON (M1 i c a) where
-- Meta-information, which is not handled elsewhere, is ignored:
gToJSON opts = gToJSON opts . unM1
{-# INLINE gToJSON #-}
instance (ToJSON a) => GToJSON (K1 i a) where
-- Constant values are encoded using their ToJSON instance:
gToJSON _opts = toJSON . unK1
{-# INLINE gToJSON #-}
instance GToJSON U1 where
-- Empty constructors are encoded to an empty array:
gToJSON _opts _ = emptyArray
{-# INLINE gToJSON #-}
instance (ConsToJSON a) => GToJSON (C1 c a) where
-- Constructors need to be encoded differently depending on whether they're
-- a record or not. This distinction is made by 'constToJSON':
gToJSON opts = consToJSON opts . unM1
{-# INLINE gToJSON #-}
instance ( WriteProduct a, WriteProduct b
, ProductSize a, ProductSize b ) => GToJSON (a :*: b) where
-- Products are encoded to an array. Here we allocate a mutable vector of
-- the same size as the product and write the product's elements to it using
-- 'writeProduct':
gToJSON opts p =
arrayValue $ JSAST.build $ \a ->
writeProduct opts a 0 lenProduct p
where
lenProduct = (unTagged2 :: Tagged2 (a :*: b) Int -> Int)
productSize
{-# INLINE gToJSON #-}
instance ( AllNullary (a :+: b) allNullary
, SumToJSON (a :+: b) allNullary ) => GToJSON (a :+: b) where
-- If all constructors of a sum datatype are nullary and the
-- 'allNullaryToStringTag' option is set they are encoded to
-- strings. This distinction is made by 'sumToJSON':
gToJSON opts = (unTagged :: Tagged allNullary Value -> Value)
. sumToJSON opts
{-# INLINE gToJSON #-}
--------------------------------------------------------------------------------
class SumToJSON f allNullary where
sumToJSON :: Options -> f a -> Tagged allNullary Value
instance ( GetConName f
, TaggedObject f
, ObjectWithSingleField f
, TwoElemArray f ) => SumToJSON f True where
sumToJSON opts
| allNullaryToStringTag opts = Tagged . stringValue . pack
. constructorTagModifier opts . getConName
| otherwise = Tagged . nonAllNullarySumToJSON opts
{-# INLINE sumToJSON #-}
instance ( TwoElemArray f
, TaggedObject f
, ObjectWithSingleField f ) => SumToJSON f False where
sumToJSON opts = Tagged . nonAllNullarySumToJSON opts
{-# INLINE sumToJSON #-}
nonAllNullarySumToJSON :: ( TwoElemArray f
, TaggedObject f
, ObjectWithSingleField f
) => Options -> f a -> Value
nonAllNullarySumToJSON opts =
case sumEncoding opts of
TaggedObject{..} -> objectValue . object . taggedObject opts tagFieldName
contentsFieldName
ObjectWithSingleField -> objectValue . objectWithSingleField opts
TwoElemArray -> arrayValue . twoElemArray opts
{-# INLINE nonAllNullarySumToJSON #-}
--------------------------------------------------------------------------------
class TaggedObject f where
taggedObject :: Options -> String -> String -> f a -> [Pair]
instance ( TaggedObject a
, TaggedObject b ) => TaggedObject (a :+: b) where
taggedObject opts tagFieldName contentsFieldName (L1 x) =
taggedObject opts tagFieldName contentsFieldName x
taggedObject opts tagFieldName contentsFieldName (R1 x) =
taggedObject opts tagFieldName contentsFieldName x
{-# INLINE taggedObject #-}
instance ( IsRecord a isRecord
, TaggedObject' a isRecord
, Constructor c ) => TaggedObject (C1 c a) where
taggedObject opts tagFieldName contentsFieldName =
(pack tagFieldName .= constructorTagModifier opts
(conName (undefined :: t c a p)) :) .
(unTagged :: Tagged isRecord [Pair] -> [Pair]) .
taggedObject' opts contentsFieldName . unM1
{-# INLINE taggedObject #-}
class TaggedObject' f isRecord where
taggedObject' :: Options -> String -> f a -> Tagged isRecord [Pair]
instance (RecordToPairs f) => TaggedObject' f True where
taggedObject' opts _ = Tagged . toList . recordToPairs opts
{-# INLINE taggedObject' #-}
instance (GToJSON f) => TaggedObject' f False where
taggedObject' opts contentsFieldName =
Tagged . (:[]) . (pack contentsFieldName .=) . gToJSON opts
{-# INLINE taggedObject' #-}
--------------------------------------------------------------------------------
-- | Get the name of the constructor of a sum datatype.
class GetConName f where
getConName :: f a -> String
instance (GetConName a, GetConName b) => GetConName (a :+: b) where
getConName (L1 x) = getConName x
getConName (R1 x) = getConName x
{-# INLINE getConName #-}
instance (Constructor c, GToJSON a, ConsToJSON a) => GetConName (C1 c a) where
getConName = conName
{-# INLINE getConName #-}
--------------------------------------------------------------------------------
class TwoElemArray f where
twoElemArray :: Options -> f a -> JSArray -- V.Vector Value
instance (TwoElemArray a, TwoElemArray b) => TwoElemArray (a :+: b) where
twoElemArray opts (L1 x) = twoElemArray opts x
twoElemArray opts (R1 x) = twoElemArray opts x
{-# INLINE twoElemArray #-}
instance ( GToJSON a, ConsToJSON a
, Constructor c ) => TwoElemArray (C1 c a) where
twoElemArray opts x = arrayValueList
[ stringValue $ JSS.pack $ constructorTagModifier opts $ conName (undefined :: t c a p)
, gToJSON opts x
]
{-# INLINE twoElemArray #-}
--------------------------------------------------------------------------------
class ConsToJSON f where
consToJSON :: Options -> f a -> Value
class ConsToJSON' f isRecord where
consToJSON' :: Options -> f a -> Tagged isRecord Value
instance ( IsRecord f isRecord
, ConsToJSON' f isRecord ) => ConsToJSON f where
consToJSON opts = (unTagged :: Tagged isRecord Value -> Value)
. consToJSON' opts
{-# INLINE consToJSON #-}
instance (RecordToPairs f) => ConsToJSON' f True where
consToJSON' opts = Tagged . objectValue . object . toList . recordToPairs opts
{-# INLINE consToJSON' #-}
instance GToJSON f => ConsToJSON' f False where
consToJSON' opts = Tagged . gToJSON opts
{-# INLINE consToJSON' #-}
--------------------------------------------------------------------------------
class RecordToPairs f where
recordToPairs :: Options -> f a -> DList Pair
instance (RecordToPairs a, RecordToPairs b) => RecordToPairs (a :*: b) where
recordToPairs opts (a :*: b) = recordToPairs opts a `mappend`
recordToPairs opts b
{-# INLINE recordToPairs #-}
instance (Selector s, GToJSON a) => RecordToPairs (S1 s a) where
recordToPairs = fieldToPair
{-# INLINE recordToPairs #-}
instance (Selector s, ToJSON a) => RecordToPairs (S1 s (K1 i (Maybe a))) where
recordToPairs opts (M1 k1) | omitNothingFields opts
, K1 Nothing <- k1 = empty
recordToPairs opts m1 = fieldToPair opts m1
{-# INLINE recordToPairs #-}
fieldToPair :: (Selector s, GToJSON a) => Options -> S1 s a p -> DList Pair
fieldToPair opts m1 = pure ( pack $ fieldLabelModifier opts $ selName m1
, gToJSON opts (unM1 m1)
)
{-# INLINE fieldToPair #-}
--------------------------------------------------------------------------------
class WriteProduct f where
writeProduct :: Options
-> JSAST.STJSArray s
-> Int -- ^ index
-> Int -- ^ length
-> f a
-> ST s ()
instance ( WriteProduct a
, WriteProduct b ) => WriteProduct (a :*: b) where
writeProduct opts mv ix len (a :*: b) = do
writeProduct opts mv ix lenL a
writeProduct opts mv ixR lenR b
where
#if MIN_VERSION_base(4,5,0)
lenL = len `unsafeShiftR` 1
#else
lenL = len `shiftR` 1
#endif
lenR = len - lenL
ixR = ix + lenL
{-# INLINE writeProduct #-}
instance (GToJSON a) => WriteProduct a where
writeProduct opts mv ix _ = (\(SomeValue v) -> JSAST.write ix v mv) . gToJSON opts
{-# INLINE writeProduct #-}
--------------------------------------------------------------------------------
class ObjectWithSingleField f where
objectWithSingleField :: Options -> f a -> Object
instance ( ObjectWithSingleField a
, ObjectWithSingleField b ) => ObjectWithSingleField (a :+: b) where
objectWithSingleField opts (L1 x) = objectWithSingleField opts x
objectWithSingleField opts (R1 x) = objectWithSingleField opts x
{-# INLINE objectWithSingleField #-}
instance ( GToJSON a, ConsToJSON a
, Constructor c ) => ObjectWithSingleField (C1 c a) where
objectWithSingleField opts x = I.object [(typ, gToJSON opts x)]
where
typ = pack $ constructorTagModifier opts $
conName (undefined :: t c a p)
{-# INLINE objectWithSingleField #-}
--------------------------------------------------------------------------------
-- Generic parseJSON
instance (GFromJSON a) => GFromJSON (M1 i c a) where
-- Meta-information, which is not handled elsewhere, is just added to the
-- parsed value:
gParseJSON opts = fmap M1 . gParseJSON opts
{-# INLINE gParseJSON #-}
instance (FromJSON a) => GFromJSON (K1 i a) where
-- Constant values are decoded using their FromJSON instance:
gParseJSON _opts = fmap K1 . parseJSON
{-# INLINE gParseJSON #-}
instance GFromJSON U1 where
-- Empty constructors are expected to be encoded as an empty array:
gParseJSON _opts v
| isEmptyArray v = pure U1
| otherwise = typeMismatch "unit constructor (U1)" v
{-# INLINE gParseJSON #-}
instance (ConsFromJSON a) => GFromJSON (C1 c a) where
-- Constructors need to be decoded differently depending on whether they're
-- a record or not. This distinction is made by consParseJSON:
gParseJSON opts = fmap M1 . consParseJSON opts
{-# INLINE gParseJSON #-}
instance ( FromProduct a, FromProduct b
, ProductSize a, ProductSize b ) => GFromJSON (a :*: b) where
-- Products are expected to be encoded to an array. Here we check whether we
-- got an array of the same size as the product, then parse each of the
-- product's elements using parseProduct:
gParseJSON opts = withArray "product (:*:)" $ \arr ->
let lenArray = JSA.length arr
lenProduct = (unTagged2 :: Tagged2 (a :*: b) Int -> Int)
productSize in
if lenArray == lenProduct
then parseProduct opts arr 0 lenProduct
else fail $ "When expecting a product of " ++ show lenProduct ++
" values, encountered an Array of " ++ show lenArray ++
" elements instead"
{-# INLINE gParseJSON #-}
instance ( AllNullary (a :+: b) allNullary
, ParseSum (a :+: b) allNullary ) => GFromJSON (a :+: b) where
-- If all constructors of a sum datatype are nullary and the
-- 'allNullaryToStringTag' option is set they are expected to be
-- encoded as strings. This distinction is made by 'parseSum':
gParseJSON opts = (unTagged :: Tagged allNullary (Parser ((a :+: b) d)) ->
(Parser ((a :+: b) d)))
. parseSum opts
{-# INLINE gParseJSON #-}
--------------------------------------------------------------------------------
class ParseSum f allNullary where
parseSum :: Options -> Value -> Tagged allNullary (Parser (f a))
instance ( SumFromString (a :+: b)
, FromPair (a :+: b)
, FromTaggedObject (a :+: b) ) => ParseSum (a :+: b) True where
parseSum opts
| allNullaryToStringTag opts = Tagged . parseAllNullarySum opts
| otherwise = Tagged . parseNonAllNullarySum opts
{-# INLINE parseSum #-}
instance ( FromPair (a :+: b)
, FromTaggedObject (a :+: b) ) => ParseSum (a :+: b) False where
parseSum opts = Tagged . parseNonAllNullarySum opts
{-# INLINE parseSum #-}
--------------------------------------------------------------------------------
parseAllNullarySum :: SumFromString f => Options -> Value -> Parser (f a)
parseAllNullarySum opts = withJSString "Text" $ \key ->
maybe (notFound $ unpack key) return $
parseSumFromString opts key
{-# INLINE parseAllNullarySum #-}
class SumFromString f where
parseSumFromString :: Options -> JSString -> Maybe (f a)
instance (SumFromString a, SumFromString b) => SumFromString (a :+: b) where
parseSumFromString opts key = (L1 <$> parseSumFromString opts key) <|>
(R1 <$> parseSumFromString opts key)
{-# INLINE parseSumFromString #-}
instance (Constructor c) => SumFromString (C1 c U1) where
parseSumFromString opts key | key == name = Just $ M1 U1
| otherwise = Nothing
where
name = pack $ constructorTagModifier opts $
conName (undefined :: t c U1 p)
{-# INLINE parseSumFromString #-}
--------------------------------------------------------------------------------
parseNonAllNullarySum :: ( FromPair (a :+: b)
, FromTaggedObject (a :+: b)
) => Options -> Value -> Parser ((a :+: b) c)
parseNonAllNullarySum opts =
case sumEncoding opts of
TaggedObject{..} ->
withObject "Object" $ \obj -> do
tag <- obj .: pack tagFieldName
fromMaybe (notFound $ unpack tag) $
parseFromTaggedObject opts contentsFieldName obj tag
ObjectWithSingleField ->
withObject "Object" $ \obj ->
case objectAssocs obj of
[pair@(tag, _)] -> fromMaybe (notFound $ unpack tag) $
parsePair opts pair
_ -> fail "Object doesn't have a single field"
TwoElemArray ->
withArray "Array" $ \arr ->
if JSA.length arr == 2
then case match (indexV arr 0) of
String tag -> fromMaybe (notFound $ unpack tag) $
parsePair opts (tag, indexV arr 1)
_ -> fail "First element is not a String"
else fail "Array doesn't have 2 elements"
{-# INLINE parseNonAllNullarySum #-}
--------------------------------------------------------------------------------
class FromTaggedObject f where
parseFromTaggedObject :: Options -> String -> Object -> JSString
-> Maybe (Parser (f a))
instance (FromTaggedObject a, FromTaggedObject b) =>
FromTaggedObject (a :+: b) where
parseFromTaggedObject opts contentsFieldName obj tag =
(fmap L1 <$> parseFromTaggedObject opts contentsFieldName obj tag) <|>
(fmap R1 <$> parseFromTaggedObject opts contentsFieldName obj tag)
{-# INLINE parseFromTaggedObject #-}
instance ( FromTaggedObject' f
, Constructor c ) => FromTaggedObject (C1 c f) where
parseFromTaggedObject opts contentsFieldName obj tag
| tag == name = Just $ M1 <$> parseFromTaggedObject'
opts contentsFieldName obj
| otherwise = Nothing
where
name = pack $ constructorTagModifier opts $
conName (undefined :: t c f p)
{-# INLINE parseFromTaggedObject #-}
--------------------------------------------------------------------------------
class FromTaggedObject' f where
parseFromTaggedObject' :: Options -> String -> Object -> Parser (f a)
class FromTaggedObject'' f isRecord where
parseFromTaggedObject'' :: Options -> String -> Object
-> Tagged isRecord (Parser (f a))
instance ( IsRecord f isRecord
, FromTaggedObject'' f isRecord
) => FromTaggedObject' f where
parseFromTaggedObject' opts contentsFieldName =
(unTagged :: Tagged isRecord (Parser (f a)) -> Parser (f a)) .
parseFromTaggedObject'' opts contentsFieldName
{-# INLINE parseFromTaggedObject' #-}
instance (FromRecord f) => FromTaggedObject'' f True where
parseFromTaggedObject'' opts _ = Tagged . parseRecord opts
{-# INLINE parseFromTaggedObject'' #-}
instance (GFromJSON f) => FromTaggedObject'' f False where
parseFromTaggedObject'' opts contentsFieldName = Tagged .
(gParseJSON opts <=< (.: pack contentsFieldName))
{-# INLINE parseFromTaggedObject'' #-}
--------------------------------------------------------------------------------
class ConsFromJSON f where
consParseJSON :: Options -> Value -> Parser (f a)
class ConsFromJSON' f isRecord where
consParseJSON' :: Options -> Value -> Tagged isRecord (Parser (f a))
instance ( IsRecord f isRecord
, ConsFromJSON' f isRecord
) => ConsFromJSON f where
consParseJSON opts = (unTagged :: Tagged isRecord (Parser (f a)) -> Parser (f a))
. consParseJSON' opts
{-# INLINE consParseJSON #-}
instance (FromRecord f) => ConsFromJSON' f True where
consParseJSON' opts = Tagged . (withObject "record (:*:)" $ parseRecord opts)
{-# INLINE consParseJSON' #-}
instance (GFromJSON f) => ConsFromJSON' f False where
consParseJSON' opts = Tagged . gParseJSON opts
{-# INLINE consParseJSON' #-}
--------------------------------------------------------------------------------
class FromRecord f where
parseRecord :: Options -> Object -> Parser (f a)
instance (FromRecord a, FromRecord b) => FromRecord (a :*: b) where
parseRecord opts obj = (:*:) <$> parseRecord opts obj
<*> parseRecord opts obj
{-# INLINE parseRecord #-}
instance (Selector s, GFromJSON a) => FromRecord (S1 s a) where
parseRecord opts = maybe (notFound label) (gParseJSON opts)
. I.lookup (pack label)
where
label = fieldLabelModifier opts $ selName (undefined :: t s a p)
{-# INLINE parseRecord #-}
instance (Selector s, FromJSON a) => FromRecord (S1 s (K1 i (Maybe a))) where
parseRecord opts obj = (M1 . K1) <$> obj .:? pack label
where
label = fieldLabelModifier opts $
selName (undefined :: t s (K1 i (Maybe a)) p)
{-# INLINE parseRecord #-}
--------------------------------------------------------------------------------
class ProductSize f where
productSize :: Tagged2 f Int
instance (ProductSize a, ProductSize b) => ProductSize (a :*: b) where
productSize = Tagged2 $ unTagged2 (productSize :: Tagged2 a Int) +
unTagged2 (productSize :: Tagged2 b Int)
{-# INLINE productSize #-}
instance ProductSize (S1 s a) where
productSize = Tagged2 1
{-# INLINE productSize #-}
--------------------------------------------------------------------------------
class FromProduct f where
parseProduct :: Options -> JSArray -> Int -> Int -> Parser (f a)
instance (FromProduct a, FromProduct b) => FromProduct (a :*: b) where
parseProduct opts arr ix len =
(:*:) <$> parseProduct opts arr ix lenL
<*> parseProduct opts arr ixR lenR
where
#if MIN_VERSION_base(4,5,0)
lenL = len `unsafeShiftR` 1
#else
lenL = len `shiftR` 1
#endif
ixR = ix + lenL
lenR = len - lenL
{-# INLINE parseProduct #-}
instance (GFromJSON a) => FromProduct (S1 s a) where
parseProduct opts arr ix _ = gParseJSON opts $ indexV arr ix
{-# INLINE parseProduct #-}
--------------------------------------------------------------------------------
class FromPair f where
parsePair :: Options -> Pair -> Maybe (Parser (f a))
instance (FromPair a, FromPair b) => FromPair (a :+: b) where
parsePair opts pair = (fmap L1 <$> parsePair opts pair) <|>
(fmap R1 <$> parsePair opts pair)
{-# INLINE parsePair #-}
instance (Constructor c, GFromJSON a, ConsFromJSON a) => FromPair (C1 c a) where
parsePair opts (tag, value)
| tag == tag' = Just $ gParseJSON opts value
| otherwise = Nothing
where
tag' = pack $ constructorTagModifier opts $
conName (undefined :: t c a p)
{-# INLINE parsePair #-}
--------------------------------------------------------------------------------
class IsRecord (f :: * -> *) isRecord | f -> isRecord
instance (IsRecord f isRecord) => IsRecord (f :*: g) isRecord
instance IsRecord (M1 S NoSelector f) False
instance (IsRecord f isRecord) => IsRecord (M1 S c f) isRecord
instance IsRecord (K1 i c) True
instance IsRecord U1 False
--------------------------------------------------------------------------------
class AllNullary (f :: * -> *) allNullary | f -> allNullary
instance ( AllNullary a allNullaryL
, AllNullary b allNullaryR
, And allNullaryL allNullaryR allNullary
) => AllNullary (a :+: b) allNullary
instance AllNullary a allNullary => AllNullary (M1 i c a) allNullary
instance AllNullary (a :*: b) False
instance AllNullary (K1 i c) False
instance AllNullary U1 True
--------------------------------------------------------------------------------
data True
data False
class And bool1 bool2 bool3 | bool1 bool2 -> bool3
instance And True True True
instance And False False False
instance And False True False
instance And True False False
--------------------------------------------------------------------------------
newtype Tagged s b = Tagged {unTagged :: b}
newtype Tagged2 (s :: * -> *) b = Tagged2 {unTagged2 :: b}
--------------------------------------------------------------------------------
notFound :: String -> Parser a
notFound key = fail $ "The key \"" ++ key ++ "\" was not found"
{-# INLINE notFound #-}
| tavisrudd/ghcjs-base | JavaScript/JSON/Types/Generic.hs | mit | 23,847 | 0 | 18 | 6,196 | 5,727 | 2,965 | 2,762 | -1 | -1 |
{- |
Module : Main
Description : Main module for the haskell plugin provider
Copyright : (c) Sebastian Witte
License : Apache-2.0
Maintainer : woozletoff@gmail.com
Stability : experimental
-}
module Main where
import Neovim (neovim, defaultConfig)
main :: IO ()
main = neovim defaultConfig
| lslah/nvim-hs | executable/Main.hs | apache-2.0 | 330 | 0 | 6 | 84 | 34 | 20 | 14 | 4 | 1 |
-- Copyright (c) 2014 Eric McCorkle.
--
-- This program is free software; you can redistribute it and/or
-- modify it under the terms of the GNU General Public License as
-- published by the Free Software Foundation; either version 2 of the
-- License, or (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful, but
-- WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-- General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program; if not, write to the Free Software
-- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-- 02110-1301 USA
-- | A module with utility code for creating scratch directories.
module Test.Utils.ScratchDirs(
prepareScratchDir
) where
import System.Directory
scratchDirName :: FilePath
scratchDirName = "scratch"
-- | Prepare the scratch directory for a test, creating and clearing it.
prepareScratchDir :: IO FilePath
prepareScratchDir =
do
exists <- doesDirectoryExist scratchDirName
if exists
then do
removeDirectoryRecursive scratchDirName
createDirectory scratchDirName
return scratchDirName
else do
createDirectory scratchDirName
return scratchDirName
| emc2/saltlang | test/Test/Utils/ScratchDirs.hs | bsd-3-clause | 1,392 | 0 | 10 | 286 | 109 | 62 | 47 | 17 | 2 |
module Hint.InterpreterT (
InterpreterT, Interpreter, runInterpreter, runInterpreterWithArgs,
MultipleInstancesNotAllowed(..)
)
where
import Prelude
import Hint.Base
import Hint.Context
import Hint.Configuration
import Hint.Extension
import Control.Applicative
import Control.Monad.Reader
import Control.Monad.Catch as MC
import Data.Typeable ( Typeable )
import Control.Concurrent.MVar
import System.IO.Unsafe ( unsafePerformIO )
import Data.IORef
import Data.List
import Data.Maybe
#if __GLASGOW_HASKELL__ < 610
import Data.Dynamic
#endif
import qualified GHC.Paths
import qualified Hint.GHC as GHC
import qualified Hint.Compat as Compat
type Interpreter = InterpreterT IO
#if __GLASGOW_HASKELL__ < 610
newtype InterpreterT m a = InterpreterT{
unInterpreterT :: ReaderT InterpreterSession
(ErrorT InterpreterError m) a}
deriving (Functor, Monad, MonadIO, MonadThrow,MonadCatch,MonadMask)
execute :: (MonadIO m, MonadMask m, Functor m)
=> InterpreterSession
-> InterpreterT m a
-> m (Either InterpreterError a)
execute s = runErrorT . flip runReaderT s . unInterpreterT
instance MonadTrans InterpreterT where
lift = InterpreterT . lift . lift
runGhc_impl :: (MonadIO m, MonadThrow m, MonadMask m, Functor m) => RunGhc (InterpreterT m) a
runGhc_impl f = do s <- fromSession versionSpecific -- i.e. the ghc session
r <- liftIO $ f' s
either throwError return r
where f' = tryJust (fmap (GhcException . showGhcEx) . ghcExceptions) . f
ghcExceptions (DynException e) = fromDynamic e
ghcExceptions _ = Nothing
#else
-- ghc >= 6.10
newtype InterpreterT m a = InterpreterT{
unInterpreterT :: ReaderT InterpreterSession
(GHC.GhcT m) a}
deriving (Functor, Monad, MonadIO, MonadThrow, MonadCatch, MonadMask)
execute :: (MonadIO m, MonadMask m, Functor m)
=> InterpreterSession
-> InterpreterT m a
-> m (Either InterpreterError a)
execute s = try
. GHC.runGhcT (Just GHC.Paths.libdir)
. flip runReaderT s
. unInterpreterT
instance MonadTrans InterpreterT where
lift = InterpreterT . lift . lift
runGhc_impl :: (MonadIO m, MonadThrow m, MonadMask m, Functor m) => RunGhc (InterpreterT m) a
runGhc_impl a =
InterpreterT (lift a)
`catches`
[Handler (\(e :: GHC.SourceError) -> do
dynFlags <- runGhc GHC.getSessionDynFlags
throwM $ compilationError dynFlags e)
,Handler (\(e :: GHC.GhcApiError) -> throwM $ GhcException $ show e)
,Handler (\(e :: GHC.GhcException) -> throwM $ GhcException $ showGhcEx e)
]
where
compilationError dynFlags
= WontCompile
#if __GLASGOW_HASKELL__ >= 706
. map (GhcError . GHC.showSDoc dynFlags)
#else
. map (GhcError . GHC.showSDoc)
#endif
. GHC.pprErrMsgBagWithLoc
. GHC.srcErrorMessages
#endif
showGhcEx :: GHC.GhcException -> String
showGhcEx = flip GHC.showGhcException ""
-- ================= Executing the interpreter ==================
initialize :: (MonadIO m, MonadThrow m, MonadMask m, Functor m)
=> [String]
-> InterpreterT m ()
initialize args =
do log_handler <- fromSession ghcErrLogger
-- Set a custom log handler, to intercept error messages :S
df0 <- runGhc GHC.getSessionDynFlags
let df1 = Compat.configureDynFlags df0
(df2, extra) <- runGhc2 Compat.parseDynamicFlags df1 args
when (not . null $ extra) $
throwM $ UnknownError (concat [ "flags: '"
, intercalate " " extra
, "' not recognized"])
-- Observe that, setSessionDynFlags loads info on packages
-- available; calling this function once is mandatory!
_ <- runGhc1 GHC.setSessionDynFlags df2{GHC.log_action = log_handler}
#if __GLASGOW_HASKELL__ >= 700
#if __GLASGOW_HASKELL__ >= 702
#if __GLASGOW_HASKELL__ >= 710
let extMap = map (\fs -> (GHC.flagSpecName fs, GHC.flagSpecFlag fs)) GHC.xFlags
#elif __GLASGOW_HASKELL__ >= 704
let extMap = map (\(a,b,_) -> (a,b)) GHC.xFlags
#else
let extMap = map (\(a,_,b,_) -> (a,b)) GHC.xFlags
#endif
#else
let extMap = map (\(a,b,_) -> (a,b)) GHC.xFlags
#endif
let toOpt e = let err = error ("init error: unknown ext:" ++ show e)
in fromMaybe err (lookup e extMap)
let getOptVal e = (asExtension e, GHC.xopt (toOpt e) df2)
let defExts = map getOptVal Compat.supportedExtensions
#else
let defExts = zip availableExtensions (repeat False)
#endif
onState (\s -> s{defaultExts = defExts})
reset
-- | Executes the interpreter. Returns @Left InterpreterError@ in case of error.
--
-- NB. The underlying ghc will overwrite certain signal handlers
-- (SIGINT, SIGHUP, SIGTERM, SIGQUIT on Posix systems, Ctrl-C handler on Windows).
-- In future versions of hint, this might be controlled by the user.
runInterpreter :: (MonadIO m, MonadMask m, Functor m)
=> InterpreterT m a
-> m (Either InterpreterError a)
runInterpreter = runInterpreterWithArgs []
-- | Executes the interpreter, setting args passed in as though they
-- were command-line args. Returns @Left InterpreterError@ in case of
-- error.
runInterpreterWithArgs :: (MonadIO m, MonadMask m, Functor m)
=> [String]
-> InterpreterT m a
-> m (Either InterpreterError a)
runInterpreterWithArgs args action =
ifInterpreterNotRunning $
do s <- newInterpreterSession `MC.catch` rethrowGhcException
-- SH.protectHandlers $ execute s (initialize args >> action)
execute s (initialize args >> action `finally` cleanSession)
where rethrowGhcException = throwM . GhcException . showGhcEx
#if __GLASGOW_HASKELL__ < 610
newInterpreterSession = do s <- liftIO $
Compat.newSession GHC.Paths.libdir
newSessionData s
cleanSession = cleanPhantomModules -- clean ghc session, too?
#else
-- GHC >= 610
newInterpreterSession = newSessionData ()
cleanSession =
do cleanPhantomModules
runGhc $ do dflags <- GHC.getSessionDynFlags
GHC.defaultCleanupHandler dflags (return ())
#endif
{-# NOINLINE uniqueToken #-}
uniqueToken :: MVar ()
uniqueToken = unsafePerformIO $ newMVar ()
ifInterpreterNotRunning :: (MonadIO m, MonadMask m) => m a -> m a
ifInterpreterNotRunning action =
do maybe_token <- liftIO $ tryTakeMVar uniqueToken
case maybe_token of
Nothing -> throwM MultipleInstancesNotAllowed
Just x -> action `finally` (liftIO $ putMVar uniqueToken x)
-- | The installed version of ghc is not thread-safe. This exception
-- is thrown whenever you try to execute @runInterpreter@ while another
-- instance is already running.
data MultipleInstancesNotAllowed = MultipleInstancesNotAllowed deriving Typeable
instance Exception MultipleInstancesNotAllowed
instance Show MultipleInstancesNotAllowed where
show _ = "This version of GHC is not thread-safe," ++
"can't safely run two instances of the interpreter simultaneously"
initialState :: InterpreterState
initialState = St {active_phantoms = [],
zombie_phantoms = [],
hint_support_module = error "No support module loaded!",
import_qual_hack_mod = Nothing,
qual_imports = [],
defaultExts = error "defaultExts missing!",
configuration = defaultConf}
newSessionData :: MonadIO m => a -> m (SessionData a)
newSessionData a =
do initial_state <- liftIO $ newIORef initialState
ghc_err_list_ref <- liftIO $ newIORef []
return SessionData{
internalState = initial_state,
versionSpecific = a,
ghcErrListRef = ghc_err_list_ref,
ghcErrLogger = mkLogHandler ghc_err_list_ref
}
mkLogHandler :: IORef [GhcError] -> GhcErrLogger
mkLogHandler r =
#if __GLASGOW_HASKELL__ < 706
\_ src style msg ->
let renderErrMsg = Compat.showSDoc ()
#else
\df _ src style msg ->
let renderErrMsg = Compat.showSDoc df
#endif
errorEntry = mkGhcError renderErrMsg src style msg
in modifyIORef r (errorEntry :)
mkGhcError :: (GHC.SDoc -> String) -> GHC.SrcSpan -> GHC.PprStyle -> GHC.Message -> GhcError
mkGhcError render src_span style msg = GhcError{errMsg = niceErrMsg}
where niceErrMsg = render . GHC.withPprStyle style $
Compat.mkLocMessage src_span msg
-- The MonadInterpreter instance
instance (MonadIO m, MonadMask m, Functor m) => MonadInterpreter (InterpreterT m) where
fromSession f = InterpreterT $ fmap f ask
--
modifySessionRef target f =
do ref <- fromSession target
old_val <- liftIO $ atomicModifyIORef ref (\a -> (f a, a))
return old_val
--
runGhc a = runGhc_impl a
instance (Monad m, Applicative m) => Applicative (InterpreterT m) where
pure = return
(<*>) = ap
| konn/hint-forked | src/Hint/InterpreterT.hs | bsd-3-clause | 9,430 | 1 | 17 | 2,580 | 1,719 | 907 | 812 | 126 | 2 |
-- | Server methods to do user authentication.
--
-- We authenticate clients using HTTP Basic or Digest authentication and we
-- authorise users based on membership of particular user groups.
--
{-# LANGUAGE PatternGuards #-}
module Distribution.Server.Framework.Auth (
-- * Checking authorisation
guardAuthorised,
-- ** Realms
RealmName,
hackageRealm,
adminRealm,
-- ** Creating password hashes
newPasswdHash,
UserName,
PasswdPlain,
PasswdHash,
-- ** Special cases
guardAuthenticated, checkAuthenticated,
guardPriviledged, checkPriviledged,
PrivilegeCondition(..),
-- ** Errors
AuthError(..),
authErrorResponse,
) where
import Distribution.Server.Users.Types (UserId, UserName(..), UserAuth(..), UserInfo)
import qualified Distribution.Server.Users.Types as Users
import qualified Distribution.Server.Users.Users as Users
import qualified Distribution.Server.Users.Group as Group
import qualified Distribution.Server.Users.UserIdSet as UserIdSet
import Distribution.Server.Framework.AuthCrypt
import Distribution.Server.Framework.AuthTypes
import Distribution.Server.Framework.Error
import Distribution.Server.Framework.HtmlFormWrapper (rqRealMethod)
import Happstack.Server
import Control.Monad.Trans (MonadIO, liftIO)
import qualified Data.ByteString.Char8 as BS -- Only used for Digest headers
import Control.Monad
import qualified Data.ByteString.Base64 as Base64
import Data.Char (intToDigit, isAsciiLower, isAscii, isAlphaNum, toLower)
import System.Random (randomRs, newStdGen)
import Data.Map (Map)
import qualified Data.Map as Map
import qualified Text.ParserCombinators.ReadP as Parse
import Data.Maybe (listToMaybe)
import Data.List (intercalate)
import qualified Data.Text.Encoding as T
------------------------------------------------------------------------
-- Main auth methods
--
hackageRealm, adminRealm :: RealmName
hackageRealm = RealmName "Hackage"
adminRealm = RealmName "Hackage admin"
-- | Check that the client is authenticated and is authorised to perform some
-- priviledged action.
--
-- We check that:
--
-- * the client has supplied appropriate authentication credentials for a
-- known enabled user account;
-- * is a member of a given group of users who are permitted to perform
-- certain priviledged actions.
--
guardAuthorised :: RealmName -> Users.Users -> [PrivilegeCondition]
-> ServerPartE UserId
guardAuthorised realm users privconds = do
(uid, _) <- guardAuthenticated realm users
guardPriviledged users uid privconds
return uid
-- | Check that the client is authenticated. Returns the information about the
-- user account that the client authenticates as.
--
-- This checks the client has supplied appropriate authentication credentials
-- for a known enabled user account.
--
-- It only checks the user is known, it does not imply that the user is
-- authorised to do anything in particular, see 'guardAuthorised'.
--
guardAuthenticated :: RealmName -> Users.Users -> ServerPartE (UserId, UserInfo)
guardAuthenticated realm users = do
authres <- checkAuthenticated realm users
case authres of
Left autherr -> throwError =<< authErrorResponse realm autherr
Right info -> return info
checkAuthenticated :: ServerMonad m => RealmName -> Users.Users -> m (Either AuthError (UserId, UserInfo))
checkAuthenticated realm users = do
req <- askRq
return $ case getHeaderAuth req of
Just (DigestAuth, ahdr) -> checkDigestAuth users ahdr req
Just _ | plainHttp req -> Left InsecureAuthError
Just (BasicAuth, ahdr) -> checkBasicAuth users realm ahdr
Just (AuthToken, ahdr) -> checkTokenAuth users ahdr
Nothing -> Left NoAuthError
where
getHeaderAuth :: Request -> Maybe (AuthType, BS.ByteString)
getHeaderAuth req =
case getHeader "authorization" req of
Just hdr
| BS.isPrefixOf (BS.pack "Digest ") hdr
-> Just (DigestAuth, BS.drop 7 hdr)
| BS.isPrefixOf (BS.pack "X-ApiKey ") hdr
-> Just (AuthToken, BS.drop 9 hdr)
| BS.isPrefixOf (BS.pack "Basic ") hdr
-> Just (BasicAuth, BS.drop 6 hdr)
_ -> Nothing
data AuthType = BasicAuth | DigestAuth | AuthToken
data PrivilegeCondition = InGroup Group.UserGroup
| IsUserId UserId
| AnyKnownUser
-- | Check that a given user is permitted to perform certain priviledged
-- actions.
--
-- This is based on whether the user is a mamber of a particular group of
-- priviledged users.
--
-- It only checks if the user is in the priviledged user group, it does not
-- imply that the current client has been authenticated, see 'guardAuthorised'.
--
guardPriviledged :: Users.Users -> UserId -> [PrivilegeCondition] -> ServerPartE ()
guardPriviledged users uid privconds = do
allok <- checkPriviledged users uid privconds
when (not allok) $
errForbidden "Forbidden" [MText "No access for this resource."]
checkPriviledged :: MonadIO m => Users.Users -> UserId -> [PrivilegeCondition] -> m Bool
checkPriviledged _users _uid [] = return False
checkPriviledged users uid (InGroup ugroup:others) = do
uset <- liftIO $ Group.queryUserGroup ugroup
if UserIdSet.member uid uset
then return True
else checkPriviledged users uid others
checkPriviledged users uid (IsUserId uid':others) =
if uid == uid'
then return True
else checkPriviledged users uid others
checkPriviledged _ _ (AnyKnownUser:_) = return True
------------------------------------------------------------------------
-- Are we using plain http?
--
-- | The idea here is if you're using https by putting the hackage-server
-- behind a reverse proxy then you can get the proxy to set this header
-- so that we can know if the request is comming in by https or plain http.
--
-- We only reject insecure connections in setups where the proxy passes
-- "Forwarded: proto=http" or "X-Forwarded-Proto: http" for the non-secure
-- rather than rejecting in all setups where no header is provided.
--
plainHttp :: Request -> Bool
plainHttp req
| Just fwd <- getHeader "Forwarded" req
, Just fwdprops <- parseForwardedHeader fwd
, Just "http" <- Map.lookup "proto" fwdprops
= True
| Just xfwd <- getHeader "X-Forwarded-Proto" req
, xfwd == BS.pack "http"
= True
| otherwise
= False
where
-- "Forwarded" header parser derived from RFC 7239
-- https://tools.ietf.org/html/rfc7239
parseForwardedHeader :: BS.ByteString -> Maybe (Map String String)
parseForwardedHeader =
fmap Map.fromList . parse . BS.unpack
where
parse :: String -> Maybe [(String, String)]
parse s = listToMaybe [ x | (x, "") <- Parse.readP_to_S parser s ]
parser :: Parse.ReadP [(String, String)]
parser = Parse.skipSpaces
>> Parse.sepBy1 forwardedPair
(Parse.skipSpaces >> Parse.char ';' >> Parse.skipSpaces)
forwardedPair :: Parse.ReadP (String, String)
forwardedPair = do
theName <- token
void $ Parse.char '='
theValue <- quotedString Parse.+++ token
return (map toLower theName, theValue)
token :: Parse.ReadP String
token = Parse.munch1 (\c -> isAscii c
&& (isAlphaNum c || c `elem` "!#$%&'*+-.^_`|~"))
quotedString :: Parse.ReadP String
quotedString =
join Parse.between
(Parse.char '"')
(Parse.many $ (Parse.char '\\' >> Parse.get)
Parse.<++ Parse.satisfy (/='"'))
------------------------------------------------------------------------
-- Auth token method
--
-- | Handle a auth request using an access token
checkTokenAuth :: Users.Users -> BS.ByteString
-> Either AuthError (UserId, UserInfo)
checkTokenAuth users ahdr = do
parsedToken <-
case Users.parseOriginalToken (T.decodeUtf8 ahdr) of
Left _ -> Left BadApiKeyError
Right tok -> Right (Users.convertToken tok)
(uid, uinfo) <- Users.lookupAuthToken parsedToken users ?! BadApiKeyError
_ <- getUserAuth uinfo ?! UserStatusError uid uinfo
return (uid, uinfo)
------------------------------------------------------------------------
-- Basic auth method
--
-- | Use HTTP Basic auth to authenticate the client as an active enabled user.
--
checkBasicAuth :: Users.Users -> RealmName -> BS.ByteString
-> Either AuthError (UserId, UserInfo)
checkBasicAuth users realm ahdr = do
authInfo <- getBasicAuthInfo realm ahdr ?! UnrecognizedAuthError
let uname = basicUsername authInfo
(uid, uinfo) <- Users.lookupUserName uname users ?! NoSuchUserError uname
uauth <- getUserAuth uinfo ?! UserStatusError uid uinfo
let passwdhash = getPasswdHash uauth
guard (checkBasicAuthInfo passwdhash authInfo) ?! PasswordMismatchError uid uinfo
return (uid, uinfo)
getBasicAuthInfo :: RealmName -> BS.ByteString -> Maybe BasicAuthInfo
getBasicAuthInfo realm authHeader
| Just (username, pass) <- splitHeader authHeader
= Just BasicAuthInfo {
basicRealm = realm,
basicUsername = UserName username,
basicPasswd = PasswdPlain pass
}
| otherwise = Nothing
where
splitHeader h = case Base64.decode h of
Left _ -> Nothing
Right xs ->
case break (':' ==) $ BS.unpack xs of
(username, ':' : pass) -> Just (username, pass)
_ -> Nothing
{-
We don't actually want to offer basic auth. It's not something we want to
encourage and some browsers (like firefox) end up prompting the user for
failing auth once for each auth method that the server offers. So if we offer
both digest and auth then the user gets prompted twice when they try to cancel
the auth.
Note that we still accept basic auth if the client offers it pre-emptively.
headerBasicAuthChallenge :: RealmName -> (String, String)
headerBasicAuthChallenge (RealmName realmName) =
(headerName, headerValue)
where
headerName = "WWW-Authenticate"
headerValue = "Basic realm=\"" ++ realmName ++ "\""
-}
------------------------------------------------------------------------
-- Digest auth method
--
-- See RFC 2617 http://www.ietf.org/rfc/rfc2617
-- Digest auth TODO:
-- * support domain for the protection space (otherwise defaults to whole server)
-- * nonce generation is not ideal: consists just of a random number
-- * nonce is not checked
-- * opaque is not used
-- | Use HTTP Digest auth to authenticate the client as an active enabled user.
--
checkDigestAuth :: Users.Users -> BS.ByteString -> Request
-> Either AuthError (UserId, UserInfo)
checkDigestAuth users ahdr req = do
authInfo <- getDigestAuthInfo ahdr req ?! UnrecognizedAuthError
let uname = digestUsername authInfo
(uid, uinfo) <- Users.lookupUserName uname users ?! NoSuchUserError uname
uauth <- getUserAuth uinfo ?! UserStatusError uid uinfo
let passwdhash = getPasswdHash uauth
guard (checkDigestAuthInfo passwdhash authInfo) ?! PasswordMismatchError uid uinfo
-- TODO: if we want to prevent replay attacks, then we must check the
-- nonce and nonce count and issue stale=true replies.
return (uid, uinfo)
-- | retrieve the Digest auth info from the headers
--
getDigestAuthInfo :: BS.ByteString -> Request -> Maybe DigestAuthInfo
getDigestAuthInfo authHeader req = do
authMap <- parseDigestHeader authHeader
username <- Map.lookup "username" authMap
nonce <- Map.lookup "nonce" authMap
response <- Map.lookup "response" authMap
uri <- Map.lookup "uri" authMap
let mb_qop = Map.lookup "qop" authMap
qopInfo <- case mb_qop of
Just "auth" -> do
nc <- Map.lookup "nc" authMap
cnonce <- Map.lookup "cnonce" authMap
return (QopAuth nc cnonce)
`mplus`
return QopNone
Nothing -> return QopNone
_ -> mzero
return DigestAuthInfo {
digestUsername = UserName username,
digestNonce = nonce,
digestResponse = response,
digestURI = uri,
digestRqMethod = show (rqRealMethod req),
digestQoP = qopInfo
}
where
-- Parser derived from RFCs 2616 and 2617
parseDigestHeader :: BS.ByteString -> Maybe (Map String String)
parseDigestHeader =
fmap Map.fromList . parse . BS.unpack
where
parse :: String -> Maybe [(String, String)]
parse s = listToMaybe [ x | (x, "") <- Parse.readP_to_S parser s ]
parser :: Parse.ReadP [(String, String)]
parser = Parse.skipSpaces
>> Parse.sepBy1 nameValuePair
(Parse.skipSpaces >> Parse.char ',' >> Parse.skipSpaces)
nameValuePair = do
theName <- Parse.munch1 isAsciiLower
void $ Parse.char '='
theValue <- quotedString
return (theName, theValue)
quotedString :: Parse.ReadP String
quotedString =
join Parse.between
(Parse.char '"')
(Parse.many $ (Parse.char '\\' >> Parse.get) Parse.<++ Parse.satisfy (/='"'))
Parse.<++ (liftM2 (:) (Parse.satisfy (/='"')) (Parse.munch (/=',')))
headerDigestAuthChallenge :: RealmName -> IO (String, String)
headerDigestAuthChallenge (RealmName realmName) = do
nonce <- liftIO generateNonce
return (headerName, headerValue nonce)
where
headerName = "WWW-Authenticate"
-- Note that offering both qop=\"auth,auth-int\" can confuse some browsers
-- e.g. see http://code.google.com/p/chromium/issues/detail?id=45194
headerValue nonce =
"Digest " ++
intercalate ", "
[ "realm=" ++ inQuotes realmName
, "qop=" ++ inQuotes "auth"
, "nonce=" ++ inQuotes nonce
, "opaque=" ++ inQuotes ""
]
generateNonce = fmap (take 32 . map intToDigit . randomRs (0, 15)) newStdGen
inQuotes s = '"' : s ++ ['"']
------------------------------------------------------------------------
-- Common
--
getUserAuth :: UserInfo -> Maybe UserAuth
getUserAuth userInfo =
case Users.userStatus userInfo of
Users.AccountEnabled auth -> Just auth
_ -> Nothing
getPasswdHash :: UserAuth -> PasswdHash
getPasswdHash (UserAuth hash) = hash
------------------------------------------------------------------------
-- Errors
--
data AuthError = NoAuthError
| UnrecognizedAuthError
| InsecureAuthError
| NoSuchUserError UserName
| UserStatusError UserId UserInfo
| PasswordMismatchError UserId UserInfo
| BadApiKeyError
deriving Show
authErrorResponse :: MonadIO m => RealmName -> AuthError -> m ErrorResponse
authErrorResponse realm autherr = do
digestHeader <- liftIO (headerDigestAuthChallenge realm)
return $! (toErrorResponse autherr) { errorHeaders = [digestHeader] }
where
toErrorResponse :: AuthError -> ErrorResponse
toErrorResponse NoAuthError =
ErrorResponse 401 [] "No authorization provided" []
toErrorResponse UnrecognizedAuthError =
ErrorResponse 400 [] "Authorization scheme not recognized" []
toErrorResponse InsecureAuthError =
ErrorResponse 400 [] "Authorization scheme not allowed over plain http"
[ MText $ "HTTP Basic and X-ApiKey authorization methods leak "
++ "information when used over plain HTTP. Either use HTTPS "
++ "or if you must use plain HTTP for authorised requests then "
++ "use HTTP Digest authentication." ]
toErrorResponse BadApiKeyError =
ErrorResponse 401 [] "Bad auth token" []
-- we don't want to leak info for the other cases, so same message for them all:
toErrorResponse _ =
ErrorResponse 401 [] "Username or password incorrect" []
| edsko/hackage-server | Distribution/Server/Framework/Auth.hs | bsd-3-clause | 16,318 | 0 | 17 | 4,035 | 3,373 | 1,764 | 1,609 | 264 | 6 |
module Fib (foo) where
foo :: Int
foo = 23
-- | Calculate Fibonacci number of given 'Num'.
--
-- >>> putStrLn "foo"
-- foo
-- >>> putStr "bar"
-- bar
--
-- >>> putStrLn "baz"
-- baz
fib :: (Num t, Num t1) => t -> t1
fib _ = undefined
| snoyberg/doctest-haskell | test/parse/non-exported/Fib.hs | mit | 236 | 0 | 6 | 54 | 60 | 38 | 22 | 5 | 1 |
{-# LANGUAGE CPP, ScopedTypeVariables #-}
{-
Copyright (C) 2009 John MacFarlane <jgm@berkeley.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-}
{- Utility functions for Gitit.
-}
module Network.Gitit.Util ( readFileUTF8
, inDir
, withTempDir
, orIfNull
, splitCategories
, trim
, yesOrNo
, parsePageType
, encUrl
)
where
import System.Directory
import Control.Exception (bracket)
import System.FilePath ((</>), (<.>))
import System.IO.Error (isAlreadyExistsError)
import Control.Monad.Trans (liftIO)
import Data.Char (toLower, isAscii)
import Network.Gitit.Types
import qualified Control.Exception as E
import qualified Text.Pandoc.UTF8 as UTF8
import Network.URL (encString)
-- | Read file as UTF-8 string. Encode filename as UTF-8.
readFileUTF8 :: FilePath -> IO String
readFileUTF8 = UTF8.readFile
-- | Perform a function a directory and return to working directory.
inDir :: FilePath -> IO a -> IO a
inDir d action = do
w <- getCurrentDirectory
setCurrentDirectory d
result <- action
setCurrentDirectory w
return result
-- | Perform a function in a temporary directory and clean up.
withTempDir :: FilePath -> (FilePath -> IO a) -> IO a
withTempDir baseName f = do
oldDir <- getCurrentDirectory
bracket (createTempDir 0 baseName)
(\tmp -> setCurrentDirectory oldDir >> removeDirectoryRecursive tmp)
f
-- | Create a temporary directory with a unique name.
createTempDir :: Integer -> FilePath -> IO FilePath
createTempDir num baseName = do
sysTempDir <- getTemporaryDirectory
let dirName = sysTempDir </> baseName <.> show num
liftIO $ E.catch (createDirectory dirName >> return dirName) $
\e -> if isAlreadyExistsError e
then createTempDir (num + 1) baseName
else ioError e
-- | Returns a list, if it is not null, or a backup, if it is.
orIfNull :: [a] -> [a] -> [a]
orIfNull lst backup = if null lst then backup else lst
-- | Split a string containing a list of categories.
splitCategories :: String -> [String]
splitCategories = words . map puncToSpace . trim
where puncToSpace x | x `elem` ".,;:" = ' '
puncToSpace x = x
-- | Trim leading and trailing spaces.
trim :: String -> String
trim = reverse . trimLeft . reverse . trimLeft
where trimLeft = dropWhile (`elem` " \t")
-- | Show Bool as "yes" or "no".
yesOrNo :: Bool -> String
yesOrNo True = "yes"
yesOrNo False = "no"
parsePageType :: String -> (PageType, Bool)
parsePageType s =
case map toLower s of
"markdown" -> (Markdown,False)
"markdown+lhs" -> (Markdown,True)
"rst" -> (RST,False)
"rst+lhs" -> (RST,True)
"html" -> (HTML,False)
"textile" -> (Textile,False)
"latex" -> (LaTeX,False)
"latex+lhs" -> (LaTeX,True)
"org" -> (Org,False)
"mediawiki" -> (MediaWiki,False)
x -> error $ "Unknown page type: " ++ x
encUrl :: String -> String
encUrl = encString True isAscii
| imuli/gitit | Network/Gitit/Util.hs | gpl-2.0 | 3,853 | 0 | 12 | 1,021 | 772 | 424 | 348 | 71 | 11 |
{-# LANGUAGE CPP #-}
#if !defined(TESTING) && __GLASGOW_HASKELL__ >= 703
{-# LANGUAGE Trustworthy #-}
#endif
-----------------------------------------------------------------------------
-- |
-- Module : Data.IntMap
-- Copyright : (c) Daan Leijen 2002
-- (c) Andriy Palamarchuk 2008
-- License : BSD-style
-- Maintainer : libraries@haskell.org
-- Stability : provisional
-- Portability : portable
--
-- An efficient implementation of maps from integer keys to values
-- (dictionaries).
--
-- This module re-exports the value lazy "Data.IntMap.Lazy" API, plus
-- several deprecated value strict functions. Please note that these functions
-- have different strictness properties than those in "Data.IntMap.Strict":
-- they only evaluate the result of the combining function. For example, the
-- default value to 'insertWith'' is only evaluated if the combining function
-- is called and uses it.
--
-- These modules are intended to be imported qualified, to avoid name
-- clashes with Prelude functions, e.g.
--
-- > import Data.IntMap (IntMap)
-- > import qualified Data.IntMap as IntMap
--
-- The implementation is based on /big-endian patricia trees/. This data
-- structure performs especially well on binary operations like 'union'
-- and 'intersection'. However, my benchmarks show that it is also
-- (much) faster on insertions and deletions when compared to a generic
-- size-balanced map implementation (see "Data.Map").
--
-- * Chris Okasaki and Andy Gill, \"/Fast Mergeable Integer Maps/\",
-- Workshop on ML, September 1998, pages 77-86,
-- <http://citeseer.ist.psu.edu/okasaki98fast.html>
--
-- * D.R. Morrison, \"/PATRICIA -- Practical Algorithm To Retrieve
-- Information Coded In Alphanumeric/\", Journal of the ACM, 15(4),
-- October 1968, pages 514-534.
--
-- Operation comments contain the operation time complexity in
-- the Big-O notation <http://en.wikipedia.org/wiki/Big_O_notation>.
-- Many operations have a worst-case complexity of /O(min(n,W))/.
-- This means that the operation can become linear in the number of
-- elements with a maximum of /W/ -- the number of bits in an 'Int'
-- (32 or 64).
-----------------------------------------------------------------------------
module Data.IntMap
( module Data.IntMap.Lazy
, insertWith'
, insertWithKey'
, fold
, foldWithKey
) where
import Prelude () -- hide foldr
import qualified Data.IntMap.Strict as Strict
import Data.IntMap.Lazy
-- | /Deprecated./ As of version 0.5, replaced by
-- 'Data.IntMap.Strict.insertWith'.
--
-- /O(log n)/. Same as 'insertWith', but the result of the combining function
-- is evaluated to WHNF before inserted to the map.
insertWith' :: (a -> a -> a) -> Key -> a -> IntMap a -> IntMap a
insertWith' = Strict.insertWith
-- | /Deprecated./ As of version 0.5, replaced by
-- 'Data.IntMap.Strict.insertWithKey'.
--
-- /O(log n)/. Same as 'insertWithKey', but the result of the combining
-- function is evaluated to WHNF before inserted to the map.
insertWithKey' :: (Key -> a -> a -> a) -> Key -> a -> IntMap a -> IntMap a
insertWithKey' = Strict.insertWithKey
-- | /Deprecated./ As of version 0.5, replaced by 'foldr'.
--
-- /O(n)/. Fold the values in the map using the given
-- right-associative binary operator. This function is an equivalent
-- of 'foldr' and is present for compatibility only.
fold :: (a -> b -> b) -> b -> IntMap a -> b
fold = foldr
{-# INLINE fold #-}
-- | /Deprecated./ As of version 0.5, replaced by 'foldrWithKey'.
--
-- /O(n)/. Fold the keys and values in the map using the given
-- right-associative binary operator. This function is an equivalent
-- of 'foldrWithKey' and is present for compatibility only.
foldWithKey :: (Key -> a -> b -> b) -> b -> IntMap a -> b
foldWithKey = foldrWithKey
{-# INLINE foldWithKey #-}
| jwiegley/ghc-release | libraries/containers/Data/IntMap.hs | gpl-3.0 | 3,849 | 0 | 9 | 679 | 292 | 195 | 97 | 20 | 1 |
module ForAll00001 where
data BlockedFetch r = forall a. BlockedFetch (r a) (ResultVar a) | charleso/intellij-haskforce | tests/gold/parser/ForAll00001.hs | apache-2.0 | 90 | 0 | 8 | 14 | 33 | 19 | 14 | -1 | -1 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE CPP #-}
module TestUtils (getTWInfo) where
import Web.Twitter.Conduit
import Web.Authenticate.OAuth as OA
import qualified Network.URI as URI
import Network.HTTP.Conduit
import qualified Data.Map as M
import qualified Data.ByteString.Char8 as S8
import qualified Data.CaseInsensitive as CI
import Control.Applicative
import Control.Monad.Base
import System.Environment
import Control.Lens
getOAuthTokens :: IO (OAuth, Credential)
getOAuthTokens = do
consumerKey <- getEnv' "OAUTH_CONSUMER_KEY"
consumerSecret <- getEnv' "OAUTH_CONSUMER_SECRET"
accessToken <- getEnv' "OAUTH_ACCESS_TOKEN"
accessSecret <- getEnv' "OAUTH_ACCESS_SECRET"
let oauth = twitterOAuth
{ oauthConsumerKey = consumerKey
, oauthConsumerSecret = consumerSecret
}
cred = Credential
[ ("oauth_token", accessToken)
, ("oauth_token_secret", accessSecret)
]
return (oauth, cred)
where
getEnv' = (S8.pack <$>) . getEnv
getProxyEnv :: IO (Maybe Proxy)
getProxyEnv = do
env <- M.fromList . over (mapped . _1) CI.mk <$> getEnvironment
let u = M.lookup "https_proxy" env <|>
M.lookup "http_proxy" env <|>
M.lookup "proxy" env >>= URI.parseURI >>= URI.uriAuthority
return $ Proxy <$> (S8.pack . URI.uriRegName <$> u) <*> (parsePort . URI.uriPort <$> u)
where
parsePort :: String -> Int
parsePort [] = 8080
parsePort (':':xs) = read xs
parsePort xs = error $ "port number parse failed " ++ xs
getTWInfo :: IO TWInfo
getTWInfo = do
pr <- liftBase getProxyEnv
(oa, cred) <- liftBase getOAuthTokens
return $ (setCredential oa cred def) { twProxy = pr }
| AndrewRademacher/twitter-conduit | tests/TestUtils.hs | bsd-2-clause | 1,817 | 0 | 15 | 413 | 486 | 264 | 222 | 46 | 3 |
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
-- | Manages a pool of available ports and allocates them.
module Keter.PortPool
( -- * Types
PortPool
-- * Actions
, getPort
, releasePort
-- * Initialize
, start
) where
import Control.Applicative ((<$>))
import Control.Concurrent.MVar
import Control.Exception
import Keter.Types
import qualified Network
import Prelude hiding (log)
data PPState = PPState
{ ppAvail :: ![Port]
, ppRecycled :: !([Port] -> [Port])
}
newtype PortPool = PortPool (MVar PPState)
-- | Gets an unassigned port number.
getPort :: (LogMessage -> IO ())
-> PortPool
-> IO (Either SomeException Port)
getPort log (PortPool mstate) =
modifyMVar mstate loop
where
loop :: PPState -> IO (PPState, Either SomeException Port)
loop PPState {..} =
case ppAvail of
p:ps -> do
let next = PPState ps ppRecycled
res <- try $ Network.listenOn $ Network.PortNumber $ fromIntegral p
case res of
Left (_ :: SomeException) -> do
log $ RemovingPort p
loop next
Right socket -> do
res' <- try $ Network.sClose socket
case res' of
Left e -> do
$logEx log e
log $ RemovingPort p
loop next
Right () -> return (next, Right p)
[] ->
case ppRecycled [] of
[] -> return (PPState [] id, Left $ toException NoPortsAvailable)
ps -> loop $ PPState ps id
-- | Return a port to the recycled collection of the pool. Note that recycling
-- puts the new ports at the end of the queue (FIFO), so that if an application
-- holds onto the port longer than expected, there should be no issues.
releasePort :: PortPool -> Port -> IO ()
releasePort (PortPool mstate) p =
modifyMVar_ mstate $ \(PPState avail recycled) -> return $ PPState avail $ recycled . (p:)
start :: PortSettings -> IO PortPool
start PortSettings{..} =
PortPool <$> newMVar freshState
where
freshState = PPState portRange id
| telser/keter | Keter/PortPool.hs | mit | 2,440 | 0 | 23 | 914 | 584 | 299 | 285 | 57 | 5 |
{-# LANGUAGE CPP #-}
module RIO where
#if __GLASGOW_HASKELL__ < 710
import Control.Applicative
#endif
{-@ data RIO a <p :: World -> Prop, q :: World -> a -> World -> Prop>
= RIO (rs :: (x:World<p> -> (a, World)<\w -> {v:World<q x w> | true}>))
@-}
data RIO a = RIO {runState :: World -> (a, World)}
{-@ runState :: forall <p :: World -> Prop, q :: World -> a -> World -> Prop>.
RIO <p, q> a -> x:World<p> -> (a, World)<\w -> {v:World<q x w> | true}> @-}
data World = W
-- | RJ: Putting these in to get GHC 7.10 to not fuss
instance Functor RIO where
fmap = undefined
-- | RJ: Putting these in to get GHC 7.10 to not fuss
instance Applicative RIO where
pure = undefined
(<*>) = undefined
instance Monad RIO where
{-@ instance Monad RIO where
>>= :: forall < p :: World -> Prop
, p2 :: a -> World -> Prop
, r :: a -> Prop
, q1 :: World -> a -> World -> Prop
, q2 :: a -> World -> b -> World -> Prop
, q :: World -> b -> World -> Prop>.
{x::a<r>, w::World<p>|- World<q1 w x> <: World<p2 x>}
{y::a, w::World<p>, w2::World<p2 y>, x::b, y::a<r> |- World<q2 y w2 x> <: World<q w x>}
{x::a, w::World, w2::World<q1 w x>|- {v:a | v = x} <: a<r>}
RIO <p, q1> a
-> (x:a<r> -> RIO <{v:World<p2 x> | true}, \w1 y -> {v:World<q2 x w1 y> | true}> b)
-> RIO <p, q> b ;
>> :: forall < p :: World -> Prop
, p2 :: World -> Prop
, q1 :: World -> a -> World -> Prop
, q2 :: World -> b -> World -> Prop
, q :: World -> b -> World -> Prop>.
{x::a, w::World<p>|- World<q1 w x> <: World<p2>}
{w::World<p>, w2::World<p2>, x::b, y::a |- World<q2 w2 x> <: World<q w x>}
RIO <p, q1> a
-> RIO <p2, q2> b
-> RIO <p, q> b ;
return :: forall <p :: World -> Prop>.
x:a -> RIO <p, \w0 y -> {w1:World | w0 == w1 && y == x}> a
@-}
(RIO g) >>= f = RIO $ \x -> case g x of {(y, s) -> (runState (f y)) s}
(RIO g) >> f = RIO $ \x -> case g x of {(y, s) -> (runState f ) s}
return w = RIO $ \x -> (w, x)
fail = error
{-@ qualif Papp4(v:a, x:b, y:c, z:d, p:Pred a b c d) : papp4(p, v, x, y, z) @-}
-- Test Cases:
-- * TestM (Basic)
-- * TwiceM
-- * IfM
-- * WhileM
| mightymoose/liquidhaskell | benchmarks/icfp15/neg/RIO.hs | bsd-3-clause | 2,313 | 0 | 15 | 749 | 243 | 140 | 103 | 15 | 0 |
module AsPatIn2 where
f :: Either a b -> Either a b
f x@(x@(Left b_1)) = x_1
f x@(x@(Right b_1)) = x_1
f x@(x_1) = x_1 | kmate/HaRe | old/testing/subIntroPattern/AsPatIn2_TokOut.hs | bsd-3-clause | 120 | 0 | 10 | 27 | 83 | 46 | 37 | 5 | 1 |
module T8542 where
import GHC.Int
x :: Int8
x = -128
y :: Int8
y = 128
| forked-upstream-packages-for-ghcjs/ghc | testsuite/tests/numeric/should_compile/T8542.hs | bsd-3-clause | 74 | 0 | 5 | 20 | 31 | 19 | 12 | 6 | 1 |
{-# LANGUAGE TemplateHaskell #-}
module T5971 where
import Language.Haskell.TH
_ = $(newName "x" >>= varE)
| urbanslug/ghc | testsuite/tests/th/T5971.hs | bsd-3-clause | 109 | 0 | 8 | 17 | 28 | 16 | 12 | 4 | 1 |
module Main (main) where
import System.Environment
import Control.Concurrent
import Control.Monad
-----------------------------------------------------------------------------
-- test MVar throughput between the main thread and a child thread
-- This test runs quite slowly on the threaded/SMP RTS vs. the normal RTS,
-- because the main thread and child thread are run by different OS threads,
-- so each MVar communication requires real OS thread switching.
--
-- Figures I get are about a factor of 10 difference in speed, at GHC 6.5.
main = chanTest 300000
chanTest :: Int -> IO ()
chanTest n = do
chan <- newEmptyMVar
forkIO (writer chan n)
reader chan n
reader chan 0 = return ()
reader chan n = do
takeMVar chan
reader chan (n-1)
writer chan 0 = return ()
writer chan n = do
putMVar chan ()
writer chan (n-1)
| urbanslug/ghc | testsuite/tests/concurrent/should_run/conc051.hs | bsd-3-clause | 837 | 0 | 9 | 156 | 186 | 93 | 93 | 18 | 1 |
module ClipSpaceWLong where
import qualified Matrix4f as M4x4;
import qualified Vector4f as V4;
clip_w_long :: M4x4.T -> V4.T -> Float
clip_w_long m eye =
let
m30 = M4x4.row_column m (3, 0)
m31 = M4x4.row_column m (3, 1)
m32 = M4x4.row_column m (3, 2)
m33 = M4x4.row_column m (3, 3)
k0 = (V4.x eye) * m30
k1 = (V4.y eye) * m31
k2 = (V4.z eye) * m32
k3 = (V4.w eye) * m33
in
k0 + k1 + k2 + k3
| io7m/r2 | com.io7m.r2.documentation/src/main/resources/com/io7m/r2/documentation/haskell/ClipSpaceWLong.hs | isc | 435 | 0 | 12 | 124 | 203 | 113 | 90 | 15 | 1 |
import Graphics.Oedel
-- Styling example with simple layout structure.
main :: IO ()
main = displayHtmlStatic $
setBack (rgb 0.8 0.9 1.0) $
inset $ withBorder () $
setBack white $
setWidth 300 $ setHeight 90 $
pad 10 10 10 10 $
block center $
withStyle (textColor (rgb 1.0 0.2 0.0)) $
withStyle (font "cursive" . fontSize 50) $
text "Fancy!"
| dzamkov/Oedel | doc/examples/Fancy.hs | mit | 395 | 0 | 18 | 115 | 143 | 67 | 76 | 12 | 1 |
module Data.LookupTable where
-- Gives the index for a lookup value (useful for storing data from a lookup)
import Data.Map (Map)
import qualified Data.Map as Map
import qualified Data.Set as Set
import Data.Tuple (swap)
type LookupTable = Map String Integer
-- Now we need to flip the table for when we're doing fetches
type LookDown a = Map Integer a
lookdown :: Ord a => Map a Integer -> LookDown a
lookdown = Map.fromList . map swap . Map.toList
-- but what's the next index?
nextIndex :: Ord a => Map a Integer -> Integer
nextIndex = succ . maybe 0 fst . Set.maxView . Map.keysSet . lookdown
{--
>>> nextIndex Map.empty
1
BOOM! ... well, ... kinda boom
--}
| geophf/1HaskellADay | exercises/HAD/Data/LookupTable.hs | mit | 674 | 0 | 9 | 132 | 164 | 91 | 73 | 11 | 1 |
------------------------------------------------------------------------------
module Main
( main
) where
------------------------------------------------------------------------------
import Data.Matrix
------------------------------------------------------------------------------
import qualified Examples
------------------------------------------------------------------------------
import Graphics.DetailGen.Maya (writeForest)
import Graphics.DetailGen.Monad (runDetailGen)
------------------------------------------------------------------------------
-- | Writes the simple example to "basic.py".
main :: IO ()
main = writeForest "basic.py" (identity 4) 1 0 $
runDetailGen Examples.paperExample2
| zmthy/incidental-detail | src/Main.hs | mit | 723 | 0 | 8 | 64 | 85 | 51 | 34 | 9 | 1 |
{-# LANGUAGE FlexibleContexts, FlexibleInstances, MultiParamTypeClasses,
DeriveDataTypeable, OverloadedStrings, StandaloneDeriving #-}
module ConvertImage(
ImageString
, Blueprint
, Position
, Phase(..)
, header
, phrases
, convertpngs
) where
import Data.Typeable(Typeable)
import Data.Data(Data)
import Codec.Picture.Types
import Data.List(intercalate)
import Data.Maybe(isNothing,fromJust)
import Data.Either(partitionEithers)
import Data.Char(toLower,toUpper)
import qualified Data.ByteString.Lazy.Char8 as L
import qualified Data.ByteString as B
import qualified Data.Map.Strict as M
import qualified Data.Vector.Storable as V
import Config
type ImageString = String
type Blueprint = L.ByteString
type Position = Maybe (Int,Int)
-- string to put in empty cells, quickfort accepts an empty string, "~", or "`" here
emptyCell = "~"
-- This header goes at the top of each Blueprint and tells
-- quickfort where to start and in what mode to run
header :: Position -> Int -> Phase -> String
header pos w p = '#':mode p ++ start ++ replicate w ','
where start = maybe "" (\x-> " start" ++ asQFpos x) pos
asQFpos (a,b) = '(':(show a) ++ ";" ++ (show b) ++ ")"
mode Dig = "dig"
mode Build = "build"
mode Place = "place"
mode _ = "query"
convertpngs :: Int -> Position -> [DynamicImage] -> String -> CommandDictionary -> [Either String Blueprint]
convertpngs r pos imgs phases dict | null err = convertImage
| otherwise = map Left err
where convertImage = map (\phase -> pngconvert r pos imgs phase dict) p
(err,images) = partitionEithers convertImage
p = parsePhases phases
-- convert a list of images into a blueprint
pngconvert :: Int -> Position -> [DynamicImage] -> Phase -> CommandDictionary -> Either String Blueprint
pngconvert r pos imgs phase dict | null errs == False = Left (intercalate "\n" errs)
| any (w/=) width || any (h/=) height = Left
"Error: not all images have the same dimensions"
| otherwise = Right $ toCSV r pos w phase images
where (errs,images) = partitionEithers csvList
w = head width
h = head height
(width,height) = unzip $ map extractDims imgs
extractDims i = (dynamicMap imageWidth i,dynamicMap imageHeight i)
csvList = map (imageToList (translate dict phase)) imgs
-- concat a list of ImageStrings into a single csv Blueprint
toCSV :: Int -> Position -> Int -> Phase -> [ImageString] -> Blueprint
toCSV r s w p imgs = L.pack $ header s w p ++ intercalate uplevel repeatedImgs
where uplevel = "\n#>" ++ replicate w ','
repeatedImgs = take (r * (length imgs)) (cycle imgs)
-- convert a RGB8 image to a list of lists of strings
imageToList :: (PixelRGB8 -> String) -> DynamicImage -> Either String ImageString
imageToList dict (ImageRGB8 img) = Right $ convertVector (imageData img)
where convertVector = csvify (width) . (map ((++ ",") . dict)) . (toPixelList . V.toList)
width = imageWidth img
-- convert list of Word8 values into a list of RGB8 Pixels
toPixelList [] = []
toPixelList (a:b:c:pixels) = (PixelRGB8 a b c) : toPixelList pixels
-- catch RGBA8 images and drop the transparency layer
imageToList dict (ImageRGBA8 img) = imageToList dict (ImageRGB8 (dropAlphaLayer img))
-- catch non RGB8 images and give an error message
imageToList _ _ = Left "Error: one or more images are not encoded in RGB8 color"
-- take a list of comma delimited strings and return a string with newlines added
csvify :: (Int) -> [String] -> String
csvify _ [] = ""
-- we add a header to the csv later, and the last line of the file doesn't
-- need a newline, so we can prepend it for a small savings
csvify i ls = '\n' : (concat row) ++ csvify i rest
where (row,rest) = splitAt i ls
parsePhases :: String -> [Phase]
parsePhases "" = [Dig,Build,Place,Query]
parsePhases s = parsePhases' (map toLower s)
where parsePhases' "all" = [Dig,Build,Place,Query]
parsePhases' s = map (read . firstToUpper) (phrases s)
firstToUpper (c:cs) = (toUpper c) : cs
-- same as words, but cuts on commas instead of spaces
phrases :: String -> [String]
phrases s = case dropWhile {-partain:Char.-}isComma s of
"" -> []
s' -> w : phrases s''
where (w,s'') =
break {-partain:Char.-} isComma s'
isComma :: Char -> Bool
isComma ',' = True
isComma _ = False
data Phase = Dig
| Build
| Place
| Query
deriving (Typeable, Data, Eq, Read, Show)
translate :: CommandDictionary -> Phase -> PixelRGB8 -> String
translate dict Dig key = M.findWithDefault emptyCell key (des dict)
translate dict Build key = M.findWithDefault emptyCell key (bld dict)
translate dict Place key = M.findWithDefault emptyCell key (plc dict)
translate dict Query key = M.findWithDefault emptyCell key (qry dict)
| Hrothen/dorfCAD | src/ConvertImage.hs | mit | 5,063 | 0 | 13 | 1,240 | 1,485 | 788 | 697 | -1 | -1 |
import Data.Char (toUpper)
import Data.Time.Calendar (fromGregorian)
import Test.Hspec (Spec, it, shouldBe)
import Test.Hspec.Runner (configFastFail, defaultConfig, hspecWith)
import Person
( Address (..)
, Born (..)
, Name (..)
, Person (..)
, bornStreet
, renameStreets
, setBirthMonth
, setCurrentStreet
)
main :: IO ()
main = hspecWith defaultConfig {configFastFail = True} specs
specs :: Spec
specs = do
it "bornStreet" $
(bornStreet . _born) testPerson
`shouldBe` "Longway"
it "setCurrentStreet" $
(_street . _address . setCurrentStreet "Middleroad") testPerson
`shouldBe` "Middleroad"
it "setBirthMonth" $
(_bornOn . _born . setBirthMonth 9) testPerson
`shouldBe` fromGregorian 1984 9 12
it "renameStreets birth" $
(_street . _bornAt . _born . renameStreets (map toUpper)) testPerson
`shouldBe` "LONGWAY"
it "renameStreets current" $
(_street . _address . renameStreets (map toUpper)) testPerson
`shouldBe` "SHORTLANE"
testPerson :: Person
testPerson = Person {
_name = Name {
_foreNames = "Jane Joanna",
_surName = "Doe"
},
_born = Born {
_bornAt = Address {
_street = "Longway" ,
_houseNumber = 1024 ,
_place = "Springfield" ,
_country = "United States"
},
_bornOn = fromGregorian 1984 4 12
},
_address = Address {
_street = "Shortlane" ,
_houseNumber = 2 ,
_place = "Fallmeadow",
_country = "Canada"
}
}
-- 9989d57b32b2370776b1c17fae0d646c2bf83377
| exercism/xhaskell | exercises/practice/lens-person/test/Tests.hs | mit | 2,074 | 0 | 14 | 912 | 437 | 249 | 188 | 49 | 1 |
module KcCacheServer.RequestHandler where
import Control.Concurrent.MSem
import Control.Concurrent.MVar
import Control.Monad.IO.Class
import qualified Data.ByteString.Lazy as BSL
import qualified Data.HashMap.Strict as HM
import qualified Data.HashSet as HS
import qualified Data.Text as T
import qualified KcCacheServer.CacheMeta as CM
data KcRequest = KcRequest
{ reqPath :: T.Text -- HTTP URI resource (beginning with '/')
, reqVersion :: Maybe T.Text
}
data KcResponse = KcResponse
{ respMeta :: CM.ResourceMeta
, respBody :: BSL.ByteString
}
{-
fetchFromCache can return Nothing in case network is forced.
-}
handleRequest
:: MonadIO m
=> (KcRequest -> m KcResponse)
-> (KcRequest -> m (Maybe KcResponse))
-> (KcRequest -> KcResponse -> m ())
-> KcRequest
-> m KcResponse
handleRequest networkRequest fetchFromCache updateCache req = do
mResp <- fetchFromCache req
case mResp of
Just resp -> pure resp
Nothing -> do
resp <- networkRequest req
updateCache req resp
pure resp
| Javran/misc | kancolle-cache-server/src/KcCacheServer/RequestHandler.hs | mit | 1,040 | 0 | 13 | 198 | 268 | 149 | 119 | 30 | 2 |
module Data.Queue where
data Queue a = Queue [a] [a]
deriving (Show)
instance Eq a => Eq (Queue a) where
(==) a b = let a' = reorder a
b' = reorder b
in checkEq a' b'
where checkEq (Queue xs ys) (Queue xs' ys') = ys == ys'
mkQueue :: a -> Queue a
mkQueue a = Queue [a] []
enqueue :: a -> Queue a -> Queue a
enqueue a (Queue xs ys) = Queue (a:xs) ys
dequeue :: Queue a -> Queue a
dequeue q = case reorder q of
(Queue xs (y:ys)) -> Queue xs ys
otherwise -> reorder q
peek :: Queue a -> Maybe a
peek a = case reorder a of
(Queue _ (y:ys)) -> Just y
otherwise -> Nothing
isEmpty :: Queue a -> Bool
isEmpty (Queue [] []) = True
isEmpty (Queue _ _ ) = False
reorder :: Queue a -> Queue a
reorder q@(Queue xs (y:ys)) = q
reorder (Queue xs []) = Queue [] (reverse xs)
| Kiandr/CrackingCodingInterview | Haskell/src/chapter-3/Data/Queue.hs | mit | 831 | 0 | 11 | 239 | 447 | 223 | 224 | 26 | 2 |
{-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE ForeignFunctionInterface #-}
module Descriptors
( LLVMType
, getFieldTypePtr
, getMethodTypePtr
, parseOnlyFieldType
, parseOnlyMethodType
) where
import Data.ByteString (ByteString, pack, useAsCString, append)
import Data.Char (ord)
import Data.Attoparsec.ByteString
import Data.Maybe
import Foreign
import Foreign.C.Types
import Foreign.C.String
type U1 = Word8
type BString = Data.ByteString.ByteString
data FieldType = Base BaseType | Ref RefType
deriving (Show, Eq)
data MethodType = MethodType [FieldType] RetType
deriving (Show, Eq)
data BaseType = Byte
| Char
| Double
| Float
| Int
| Long
| Short
| Boolean
deriving (Show, Eq)
data RefType = Object BString | Array FieldType
deriving (Show, Eq)
data RetType = Void | Field FieldType
deriving (Show, Eq)
data LLVMType -- llvm type object
foreign import ccall safe "bridge.h getCharType"
c_getCharType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getByteType"
c_getByteType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getShortType"
c_getShortType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getIntType"
c_getIntType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getLongType"
c_getLongType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getFloatType"
c_getFloatType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getDoubleType"
c_getDoubleType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getVoidType"
c_getVoidType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getBooleanType"
c_getBooleanType :: IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getFunctionType"
c_getFunctionType :: CInt -> Ptr (Ptr LLVMType) -> Ptr LLVMType -> CInt -> IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getOpaqueObjectType"
c_getOpaqueObjectType :: CString -> IO (Ptr LLVMType)
foreign import ccall safe "bridge.h getPointerType"
c_getPointerType :: Ptr LLVMType -> IO (Ptr LLVMType)
getFieldTypePtr :: BString -> IO (Ptr LLVMType)
getFieldTypePtr = ret2TypePtr . br . parseOnlyFieldType
where
br (Right x) = x
br (Left x) = error x
getMethodTypePtr :: Maybe (Ptr LLVMType) -> BString -> IO (Ptr LLVMType)
getMethodTypePtr p = ret2MethodPtr p . br . parseOnlyMethodType
where
br (Right x) = x
br (Left x) = error x
ret2TypePtr :: FieldType -> IO (Ptr LLVMType)
ret2TypePtr (Base x)
| x == Byte = c_getByteType
| x == Char = c_getCharType
| x == Short = c_getShortType
| x == Int = c_getIntType
| x == Long = c_getLongType
| x == Float = c_getFloatType
| x == Double = c_getDoubleType
| otherwise = c_getBooleanType
ret2TypePtr (Ref (Array t)) = ret2TypePtr t >>= c_getPointerType
ret2TypePtr (Ref (Object x)) = useAsCString' x c_getOpaqueObjectType
ret2MethodPtr :: Maybe (Ptr LLVMType) -> MethodType -> IO (Ptr LLVMType)
ret2MethodPtr ptr (MethodType args ret) = arglist >>= cvt
where
arglist
| isNothing ptr = mapM ret2TypePtr args
| otherwise = (fromJust ptr :) <$> mapM ret2TypePtr args
cvt a = retptr >>= convert (length a) a
where
retptr = ret2RetTypePtr ret
convert n a r = allocaArray n $ \p
-> pokeArray p a
>> c_getFunctionType (fromIntegral n) p r 0
ret2RetTypePtr :: RetType -> IO (Ptr LLVMType)
ret2RetTypePtr (Field x) = ret2TypePtr x
ret2RetTypePtr _ = c_getVoidType
parseOnlyFieldType :: BString -> Either String FieldType
parseOnlyFieldType = parseOnly fieldType
parseOnlyMethodType :: BString -> Either String MethodType
parseOnlyMethodType = parseOnly methodType
fieldType :: Parser FieldType
fieldType = choice
[ char 'B' >> return (Base Byte)
, char 'C' >> return (Base Char)
, char 'D' >> return (Base Double)
, char 'F' >> return (Base Float)
, char 'I' >> return (Base Int)
, char 'J' >> return (Base Long)
, char 'S' >> return (Base Short)
, char 'Z' >> return (Base Boolean)
, char 'L' >> (Ref <$> objectType)
, char '[' >> (Ref <$> arrayType)
]
objectType :: Parser RefType
objectType = Object . pack <$> manyTill' anyWord8 (char ';')
arrayType :: Parser RefType
arrayType = Array <$> fieldType
methodType :: Parser MethodType
methodType = MethodType <$> params <*> ret
where
params = char '(' *> many' fieldType <* char ')'
ret = choice [char 'V' *> return Void, Field <$> fieldType]
char :: Char -> Parser U1
char = word8 . charToU1
charToU1 :: Char -> U1
charToU1 = fromIntegral . ord
useAsCString' :: BString -> (CString -> IO a) -> IO a
useAsCString' = useAsCString . (`append` pack [0x00])
| MichaeGon/java | Descriptors.hs | mit | 4,835 | 0 | 12 | 1,111 | 1,554 | 797 | 757 | -1 | -1 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE CPP #-}
module Control.Monad.Log.NameSpace where
import Control.Monad.Log
#if !(MIN_VERSION_base(4,8,0))
import Control.Applicative
#endif
import Data.Aeson
import Data.Text (Text)
import qualified Data.Text as T
-- | A newtype around a list of names from children to root.
--
-- This reversed order is choosen becasue '(:)' is faster.
--
-- @
-- showt (NameSpace ["subSub", "sub", "root"]) = "subSub<<sub<<root"
-- toJSON (NameSpace ["subSub", "sub", "root"]) = '["subSub", "sub", "root"]'
-- @
newtype NameSpace = NameSpace { getNameSpace :: [Text] } deriving (Show, Eq, Ord)
-- | push a 'Text' name to the front of 'NameSpace'.
pushNameSpace :: Text -> NameSpace -> NameSpace
pushNameSpace n (NameSpace ns) = NameSpace (n : ns)
instance TextShow NameSpace where
showb (NameSpace names) = showb $ T.intercalate "<<" names
instance ToJSON NameSpace where
toJSON (NameSpace t) = toJSON t
#if MIN_VERSION_aeson(0,10,0)
toEncoding (NameSpace t) = toEncoding t
#endif
instance FromJSON NameSpace where
parseJSON t = NameSpace <$> parseJSON t
-- | use a new 'NameSpace' within m.
withNameSpace :: (MonadLog NameSpace m) => NameSpace -> m a -> m a
withNameSpace = withEnv
-- | push a 'Text' name to the front of m's 'NameSpace'.
subNameSpace :: (MonadLog NameSpace m) => Text -> m a -> m a
subNameSpace sub = localEnv (pushNameSpace sub)
| winterland1989/monad-log | Control/Monad/Log/NameSpace.hs | mit | 1,440 | 0 | 8 | 242 | 310 | 173 | 137 | 22 | 1 |
import System.Environment
f[]=['Α'..'Ρ']++['Σ'..'Ω']
f _=['α'..'ρ']++['σ'..'ω']
main=f<$>getArgs>>=putStr | RAFIRAF/HASKELL | haskellGreekAlph.hs | mit | 113 | 0 | 6 | 5 | 65 | 35 | 30 | 4 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Command.Rule (listRulesCommand, addRuleCommand) where
import Control.Monad
import Data.Maybe
import Data.List
import Data.Time
import Data.Time.Recurrence
import Database.HDBC
import Settings
import Util
listRulesCommand :: [String] -> String -> IO ()
listRulesCommand args flags = do
conn <- getDbConnection
categoryResult <- quickQuery'
conn
("SELECT envelope.name, category.name, category_rule.amount FROM category_rule"
++ " INNER JOIN envelope ON category_rule.envelope_id = envelope.id"
++ " INNER JOIN category ON category_rule.category_id = category.id"
++ " ORDER BY envelope.name, category.name ASC")
[]
timeResult <- quickQuery'
conn
("SELECT envelope.name, time_rule.schedule, time_rule.amount FROM time_rule"
++ " INNER JOIN envelope ON time_rule.envelope_id = envelope.id"
++ " ORDER BY envelope.name ASC")
[]
disconnect conn
mapM_ (putStrLn . showCategoryRule) categoryResult
putStrLn ""
mapM_ (putStrLn . showTimeRule) timeResult
showCategoryRule :: [SqlValue] -> String
showCategoryRule (eName:cName:amount:[]) =
"Envelope: " ++ fromSql eName ++ " | category: " ++ fromSql cName ++ " | amount: " ++ fromSql amount
showTimeRule :: [SqlValue] -> String
showTimeRule (eName:schedule:amount:[]) =
"Envelope: " ++ fromSql eName ++ " | schedule: " ++ fromSql schedule ++ " | amount: " ++ fromSql amount
addRuleCommand :: [String] -> String -> IO ()
addRuleCommand args flags
| 'c' `elem` flags = categoryRule args
| 't' `elem` flags = timeRule args
| otherwise = putStrLn "Command requires one of the following flags: -c, -t"
-- TODO: Display error instead of defaulting to now if startString is unparseable
-- TODO: Allow startString to be either a date or datetime
categoryRule :: [String] -> IO ()
categoryRule (envelope:category:percentageString:amountString:rest) = do
conn <- getDbConnection
envelopeId <- getEnvelopeId conn envelope
categoryId <- getCategoryId conn category
let percentage = read percentageString :: Double
let amount = readInteger100 amountString
now <- getCurrentTime
tz <- getCurrentTimeZone
let start = case rest of
[] -> now
(startString:[]) -> fromMaybe now $ parseLocalTime tz startString
run conn
"INSERT INTO category_rule (envelope_id, category_id, percentage, amount, start) VALUES (?, ?, ?, ?, ?)"
[toSql envelopeId, toSql categoryId, toSql percentage, toSql amount, toSql start]
commit conn
disconnect conn
categoryRule _ = putStrLn "Command requires at least 4 arguments"
showKeys :: [(String, a)] -> String
showKeys = concat . (intersperse ", ") . fst . unzip
timeRule :: [String] -> IO ()
timeRule (envelope:frequency:amountString:rest) = do
conn <- getDbConnection
envelopeId <- getEnvelopeId conn envelope
let amount = readInteger100 amountString
now <- getCurrentTime
tz <- getCurrentTimeZone
let start = case rest of
[] -> now
(startString:[]) -> fromMaybe now $ parseLocalTime tz startString
case lookup frequency frequencyMap of
Just _ -> void $ run conn
"INSERT INTO time_rule (envelope_id, frequency, amount, start) VALUES (?, ?, ?, ?)"
[toSql envelopeId, toSql frequency, toSql amount, toSql start]
Nothing -> putStrLn $ "Incorrect frequency. Possible options are: " ++ showKeys frequencyMap
commit conn
disconnect conn
| jpotterm/manila-hs | src/Command/Rule.hs | cc0-1.0 | 3,665 | 0 | 15 | 890 | 886 | 432 | 454 | 78 | 3 |
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Course (module X) where
import Course.Anagrams as X
import Course.Applicative as X
import Course.Cheque as X
import Course.Comonad as X
import Course.Compose as X
import Course.Core as X
import Course.Extend as X
import Course.FastAnagrams as X
import Course.FileIO as X
import Course.Functor as X
import Course.Id as X
import Course.Interactive as X
import Course.JsonParser as X
import Course.JsonValue as X
import Course.List as X
import Course.ListZipper as X
import Course.Monad as X
import Course.MoreParser as X
import Course.Optional as X
import Course.Parser as X
import Course.Person as X
import Course.State as X
import Course.StateT as X
import Course.Traversable as X
import Course.Validation as X
| harrisi/on-being-better | list-expansion/Haskell/course/src/Course.hs | cc0-1.0 | 792 | 0 | 4 | 115 | 187 | 134 | 53 | 28 | 0 |
{-# LANGUAGE TypeSynonymInstances, ScopedTypeVariables, FlexibleInstances #-}
{-
Copyright (C) 2009 John MacFarlane <jgm@berkeley.edu>,
Anton van Straaten <anton@appsolutions.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-}
{- | Types for Gitit modules.
-}
module Network.Gitit.Types (
PageType(..)
, FileStoreType(..)
, MathMethod(..)
, AuthenticationLevel(..)
, Config(..)
, Page(..)
, SessionKey
-- we do not export SessionData constructors, in case we need to extend SessionData with other data in the future
, SessionData
, sessionData
, sessionDataGithubState
, sessionUser
, sessionGithubState
, User(..)
, Sessions(..)
, Password(..)
, GititState(..)
, HasContext
, modifyContext
, getContext
, ContentTransformer
, Plugin(..)
, PluginData(..)
, PluginM
, runPluginM
, Context(..)
, PageLayout(..)
, Tab(..)
, Recaptcha(..)
, Params(..)
, Command(..)
, WikiState(..)
, GititServerPart
, Handler
, fromEntities
, GithubConfig
, oAuth2
, org
, githubConfig) where
import Control.Monad.Reader (ReaderT, runReaderT, mplus)
import Control.Monad.State (StateT, runStateT, get, modify)
import Control.Monad (liftM)
import System.Log.Logger (Priority(..))
import Text.Pandoc.Definition (Pandoc)
import Text.XHtml (Html)
import qualified Data.Map as M
import Data.Text (Text)
import Data.List (intersect)
import Data.Time (parseTime)
#if MIN_VERSION_time(1,5,0)
import Data.Time (defaultTimeLocale)
#else
import System.Locale (defaultTimeLocale)
#endif
import Data.FileStore.Types
import Network.Gitit.Server
import Text.HTML.TagSoup.Entity (lookupEntity)
import Data.Char (isSpace)
import Network.OAuth.OAuth2
data PageType = Markdown
| CommonMark
| RST
| LaTeX
| HTML
| Textile
| Org
| DocBook
| MediaWiki
deriving (Read, Show, Eq)
data FileStoreType = Git | Darcs | Mercurial deriving Show
data MathMethod = MathML | JsMathScript | WebTeX String | RawTeX | MathJax String
deriving (Read, Show, Eq)
data AuthenticationLevel = Never | ForModify | ForRead
deriving (Read, Show, Eq, Ord)
-- | Data structure for information read from config file.
data Config = Config {
-- | Path of repository containing filestore
repositoryPath :: FilePath,
-- | Type of repository
repositoryType :: FileStoreType,
-- | Default page markup type for this wiki
defaultPageType :: PageType,
-- | Default file extension for pages in this wiki
defaultExtension :: String,
-- | How to handle LaTeX math in pages?
mathMethod :: MathMethod,
-- | Treat as literate haskell by default?
defaultLHS :: Bool,
-- | Show Haskell code with bird tracks
showLHSBirdTracks :: Bool,
-- | Combinator to set @REMOTE_USER@ request header
withUser :: Handler -> Handler,
-- | Handler for login, logout, register, etc.
requireAuthentication :: AuthenticationLevel,
-- | Specifies which actions require authentication.
authHandler :: Handler,
-- | Path of users database
userFile :: FilePath,
-- | Seconds of inactivity before session expires
sessionTimeout :: Int,
-- | Directory containing page templates
templatesDir :: FilePath,
-- | Path of server log file
logFile :: FilePath,
-- | Severity filter for log messages (DEBUG, INFO,
-- NOTICE, WARNING, ERROR, CRITICAL, ALERT, EMERGENCY)
logLevel :: Priority,
-- | Path of static directory
staticDir :: FilePath,
-- | Names of plugin modules to load
pluginModules :: [String],
-- | Show table of contents on each page?
tableOfContents :: Bool,
-- | Max size of file uploads
maxUploadSize :: Integer,
-- | Max size of page uploads
maxPageSize :: Integer,
-- | IP address to bind to
address :: String,
-- | Port number to serve content on
portNumber :: Int,
-- | Print debug info to the console?
debugMode :: Bool,
-- | The front page of the wiki
frontPage :: String,
-- | Pages that cannot be edited via web
noEdit :: [String],
-- | Pages that cannot be deleted via web
noDelete :: [String],
-- | Default summary if description left blank
defaultSummary :: String,
-- | Delete summary
deleteSummary :: String,
-- | @Nothing@ = anyone can register.
-- @Just (prompt, answers)@ = a user will
-- be given the prompt and must give
-- one of the answers to register.
accessQuestion :: Maybe (String, [String]),
-- | Use ReCAPTCHA for user registration.
useRecaptcha :: Bool,
recaptchaPublicKey :: String,
recaptchaPrivateKey :: String,
-- | RPX domain and key
rpxDomain :: String,
rpxKey :: String,
-- | Should responses be compressed?
compressResponses :: Bool,
-- | Should responses be cached?
useCache :: Bool,
-- | Directory to hold cached pages
cacheDir :: FilePath,
-- | Map associating mime types with file extensions
mimeMap :: M.Map String String,
-- | Command to send notification emails
mailCommand :: String,
-- | Text of password reset email
resetPasswordMessage :: String,
-- | Markup syntax help for edit sidebar
markupHelp :: String,
-- | Provide an atom feed?
useFeed :: Bool,
-- | Base URL of wiki, for use in feed
baseUrl :: String,
-- | Title of wiki, used in feed
useAbsoluteUrls :: Bool,
-- | Should WikiLinks be absolute w.r.t. the base URL?
wikiTitle :: String,
-- | Number of days history to be included in feed
feedDays :: Integer,
-- | Number of minutes to cache feeds before refreshing
feedRefreshTime :: Integer,
-- | Allow PDF export?
pdfExport :: Bool,
-- | Directory to search for pandoc customizations
pandocUserData :: Maybe FilePath,
-- | Filter HTML through xss-sanitize
xssSanitize :: Bool,
-- | The default number of days in the past to look for \"recent\" activity
recentActivityDays :: Int,
-- | Github client data for authentication (id, secret, callback,
-- authorize endpoint, access token endpoint)
githubAuth :: GithubConfig
}
-- | Data for rendering a wiki page.
data Page = Page {
pageName :: String
, pageFormat :: PageType
, pageLHS :: Bool
, pageTOC :: Bool
, pageTitle :: String
, pageCategories :: [String]
, pageText :: String
, pageMeta :: [(String, String)]
} deriving (Read, Show)
type SessionKey = Integer
data SessionData = SessionData {
sessionUser :: Maybe String,
sessionGithubState :: Maybe String
} deriving (Read,Show,Eq)
sessionData :: String -> SessionData
sessionData user = SessionData (Just user) Nothing
sessionDataGithubState :: String -> SessionData
sessionDataGithubState githubState = SessionData Nothing (Just githubState)
data Sessions a = Sessions {unsession::M.Map SessionKey a}
deriving (Read,Show,Eq)
-- Password salt hashedPassword
data Password = Password { pSalt :: String, pHashed :: String }
deriving (Read,Show,Eq)
data User = User {
uUsername :: String,
uPassword :: Password,
uEmail :: String
} deriving (Show,Read)
-- | Common state for all gitit wikis in an application.
data GititState = GititState {
sessions :: Sessions SessionData,
users :: M.Map String User,
templatesPath :: FilePath,
renderPage :: PageLayout -> Html -> Handler,
plugins :: [Plugin]
}
type ContentTransformer = StateT Context GititServerPart
data Plugin = PageTransform (Pandoc -> PluginM Pandoc)
| PreParseTransform (String -> PluginM String)
| PreCommitTransform (String -> PluginM String)
data PluginData = PluginData { pluginConfig :: Config
, pluginUser :: Maybe User
, pluginRequest :: Request
, pluginFileStore :: FileStore
}
type PluginM = ReaderT PluginData (StateT Context IO)
runPluginM :: PluginM a -> PluginData -> Context -> IO (a, Context)
runPluginM plugin = runStateT . runReaderT plugin
data Context = Context { ctxFile :: String
, ctxLayout :: PageLayout
, ctxCacheable :: Bool
, ctxTOC :: Bool
, ctxBirdTracks :: Bool
, ctxCategories :: [String]
, ctxMeta :: [(String, String)]
}
class (Monad m) => HasContext m where
getContext :: m Context
modifyContext :: (Context -> Context) -> m ()
instance HasContext ContentTransformer where
getContext = get
modifyContext = modify
instance HasContext PluginM where
getContext = get
modifyContext = modify
-- | Abstract representation of page layout (tabs, scripts, etc.)
data PageLayout = PageLayout
{ pgPageName :: String
, pgRevision :: Maybe String
, pgPrintable :: Bool
, pgMessages :: [String]
, pgTitle :: String
, pgScripts :: [String]
, pgShowPageTools :: Bool
, pgShowSiteNav :: Bool
, pgMarkupHelp :: Maybe String
, pgTabs :: [Tab]
, pgSelectedTab :: Tab
, pgLinkToFeed :: Bool
}
data Tab = ViewTab
| EditTab
| HistoryTab
| DiscussTab
| DiffTab
deriving (Eq, Show)
data Recaptcha = Recaptcha {
recaptchaChallengeField :: String
, recaptchaResponseField :: String
} deriving (Read, Show)
instance FromData SessionKey where
fromData = readCookieValue "sid"
data Params = Params { pUsername :: String
, pPassword :: String
, pPassword2 :: String
, pRevision :: Maybe String
, pDestination :: String
, pForUser :: Maybe String
, pSince :: Maybe UTCTime
, pRaw :: String
, pLimit :: Int
, pPatterns :: [String]
, pGotoPage :: String
, pFileToDelete :: String
, pEditedText :: Maybe String
, pMessages :: [String]
, pFrom :: Maybe String
, pTo :: Maybe String
, pFormat :: String
, pSHA1 :: String
, pLogMsg :: String
, pEmail :: String
, pFullName :: String
, pAccessCode :: String
, pWikiname :: String
, pPrintable :: Bool
, pOverwrite :: Bool
, pFilename :: String
, pFilePath :: FilePath
, pConfirm :: Bool
, pSessionKey :: Maybe SessionKey
, pRecaptcha :: Recaptcha
, pResetCode :: String
, pRedirect :: Maybe Bool
} deriving Show
instance FromReqURI [String] where
fromReqURI s = case fromReqURI s of
Just (s' :: String) ->
case reads s' of
((xs,""):_) -> xs
_ -> Nothing
Nothing -> Nothing
instance FromData Params where
fromData = do
let look' = look
un <- look' "username" `mplus` return ""
pw <- look' "password" `mplus` return ""
p2 <- look' "password2" `mplus` return ""
rv <- (look' "revision" >>= \s ->
return (if null s then Nothing else Just s))
`mplus` return Nothing
fu <- liftM Just (look' "forUser") `mplus` return Nothing
si <- liftM (parseTime defaultTimeLocale "%Y-%m-%d") (look' "since")
`mplus` return Nothing -- YYYY-mm-dd format
ds <- look' "destination" `mplus` return ""
ra <- look' "raw" `mplus` return ""
lt <- lookRead "limit" `mplus` return 100
pa <- look' "patterns" `mplus` return ""
gt <- look' "gotopage" `mplus` return ""
ft <- look' "filetodelete" `mplus` return ""
me <- looks "message"
fm <- liftM Just (look' "from") `mplus` return Nothing
to <- liftM Just (look' "to") `mplus` return Nothing
et <- liftM (Just . filter (/='\r')) (look' "editedText")
`mplus` return Nothing
fo <- look' "format" `mplus` return ""
sh <- look' "sha1" `mplus` return ""
lm <- look' "logMsg" `mplus` return ""
em <- look' "email" `mplus` return ""
na <- look' "full_name_1" `mplus` return ""
wn <- look' "wikiname" `mplus` return ""
pr <- (look' "printable" >> return True) `mplus` return False
ow <- liftM (=="yes") (look' "overwrite") `mplus` return False
fileparams <- liftM Just (lookFile "file") `mplus` return Nothing
let (fp, fn) = case fileparams of
Just (x,y,_) -> (x,y)
Nothing -> ("","")
ac <- look' "accessCode" `mplus` return ""
cn <- (look' "confirm" >> return True) `mplus` return False
sk <- liftM Just (readCookieValue "sid") `mplus` return Nothing
rc <- look' "recaptcha_challenge_field" `mplus` return ""
rr <- look' "recaptcha_response_field" `mplus` return ""
rk <- look' "reset_code" `mplus` return ""
rd <- (look' "redirect" >>= \r -> return (case r of
"yes" -> Just True
"no" -> Just False
_ -> Nothing)) `mplus` return Nothing
return Params { pUsername = un
, pPassword = pw
, pPassword2 = p2
, pRevision = rv
, pForUser = fu
, pSince = si
, pDestination = ds
, pRaw = ra
, pLimit = lt
, pPatterns = words pa
, pGotoPage = gt
, pFileToDelete = ft
, pMessages = me
, pFrom = fm
, pTo = to
, pEditedText = et
, pFormat = fo
, pSHA1 = sh
, pLogMsg = lm
, pEmail = em
, pFullName = na
, pWikiname = wn
, pPrintable = pr
, pOverwrite = ow
, pFilename = fn
, pFilePath = fp
, pAccessCode = ac
, pConfirm = cn
, pSessionKey = sk
, pRecaptcha = Recaptcha {
recaptchaChallengeField = rc,
recaptchaResponseField = rr }
, pResetCode = rk
, pRedirect = rd
}
data Command = Command (Maybe String) deriving Show
instance FromData Command where
fromData = do
pairs <- lookPairs
return $ case map fst pairs `intersect` commandList of
[] -> Command Nothing
(c:_) -> Command $ Just c
where commandList = ["update", "cancel", "export"]
-- | State for a single wiki.
data WikiState = WikiState {
wikiConfig :: Config
, wikiFileStore :: FileStore
}
type GititServerPart = ServerPartT (ReaderT WikiState IO)
type Handler = GititServerPart Response
-- Unescapes XML entities
fromEntities :: String -> String
fromEntities ('&':xs) =
case lookupEntity ent of
Just c -> c ++ fromEntities rest
Nothing -> '&' : fromEntities xs
where (ent, rest) = case break (\c -> isSpace c || c == ';') xs of
(zs,';':ys) -> (zs,ys)
_ -> ("",xs)
fromEntities (x:xs) = x : fromEntities xs
fromEntities [] = []
data GithubConfig = GithubConfig { oAuth2 :: OAuth2
, org :: Maybe Text
}
githubConfig :: OAuth2 -> Maybe Text -> GithubConfig
githubConfig = GithubConfig
| cleichner/gitit | src/Network/Gitit/Types.hs | gpl-2.0 | 18,907 | 0 | 18 | 7,644 | 3,527 | 2,057 | 1,470 | 356 | 3 |
module Language.Expressions where
data Stmt = Cmd { name :: Expr
, args :: [Expr]
, input :: Maybe Expr
, output :: Maybe Expr
}
| Assign { var :: String
, val :: Expr
}
| CmdL [Stmt]
| If { pred :: Pred
, cThen :: Stmt
, cElse :: Maybe Stmt
}
| While { pred :: Pred
, cdo :: Stmt
}
deriving (Show)
data Expr = Str String
| Var String
| ExpList [Expr]
deriving (Eq, Show)
data Arg = Argument Expr
| InputRedir Expr
| OutputRedir Expr
deriving (Eq, Show)
data Pred = Pred Comp
| Not Pred
| Or Pred Pred
| And Pred Pred
| Parens Pred
deriving (Eq, Show)
data Comp = CEQ Expr Expr -- ==
| CNE Expr Expr -- /=
| CGE Expr Expr -- >=
| CGT Expr Expr -- >
| CLE Expr Expr -- <=
| CLT Expr Expr -- <
deriving (Eq, Show)
| tomicm/puh-hash | Language/Expressions.hs | gpl-2.0 | 1,137 | 0 | 9 | 585 | 293 | 175 | 118 | 35 | 0 |
{-# LANGUAGE MultiParamTypeClasses, FlexibleInstances #-}
{- |
Module : ./CSL/Logic_CSL.hs
Description : Instance of class Logic for CSL
Copyright : (c) Dominik Dietrich, DFKI Bremen 2010
License : GPLv2 or higher, see LICENSE.txt
Maintainer : dominik.dietrich@dfki.de
Stability : experimental
Portability : non-portable (imports Logic.Logic)
Instance of class Logic for the CSL logic
Also the instances for Syntax and Category.
-}
module CSL.Logic_CSL where
import ATC.ProofTree ()
import CSL.AS_BASIC_CSL
import CSL.ATC_CSL ()
import CSL.Analysis
import CSL.Morphism
import CSL.Parse_AS_Basic
import CSL.ReduceProve
import CSL.Sign
import CSL.Symbol
import CSL.Tools
import qualified Data.Map as Map
import Data.Monoid
import Logic.Logic
-- | Lid for reduce logic
data CSL = CSL
instance Show CSL where
show _ = "EnCL"
instance Language CSL where
description _ = "EnCL Logic\n"
-- language_name _ = "EnCL"
-- | Instance of Category for CSL logic
instance Category Sign Morphism where
-- Identity morhpism
ide = idMor
-- Returns the domain of a morphism
dom = source
-- Returns the codomain of a morphism
cod = target
-- check if morphism is inclusion
isInclusion = Map.null . operatorMap
-- composition of morphisms
composeMorphisms = composeMor
-- | Instance of Sentences for reduce logic
instance Sentences CSL CMD
Sign Morphism Symbol where
negation CSL = Just . negateFormula
-- returns the set of symbols --> including operators
sym_of CSL = singletonList . symOf
{- returns the symbol map -->
the internal map only contains changes but the external symbol map
must also contain identity mappings for all remaining symbols -}
symmap_of CSL = getSymbolMap
-- returns the name of a symbol --> id
sym_name CSL = getSymbolName
{- translation of sentences along signature morphism -->
rename the used operators according to the morphism -}
map_sen CSL = mapSentence
-- there is nothing to leave out
simplify_sen CSL _ = id
instance Monoid BASIC_SPEC where
mempty = Basic_spec []
mappend (Basic_spec l1) (Basic_spec l2) = Basic_spec $ l1 ++ l2
-- | Syntax of CSL logic
instance Syntax CSL BASIC_SPEC Symbol
SYMB_ITEMS SYMB_MAP_ITEMS where
parse_basic_spec CSL = parseBasicSpec
parse_symb_items CSL = parseSymbItems
parse_symb_map_items CSL = parseSymbMapItems
-- | Instance of Logic for reduce logc
instance Logic CSL
() -- Sublogics
BASIC_SPEC -- basic_spec
CMD -- sentences are CAS commands
SYMB_ITEMS -- symb_items
SYMB_MAP_ITEMS -- symb_map_items
Sign -- sign
Morphism -- morphism
Symbol -- symbol
Symbol -- raw_symbol
[EXPRESSION] -- proof_tree
where
stability CSL = Experimental
empty_proof_tree CSL = []
-- supplied provers
provers CSL = [reduceProver]
-- | Static Analysis for reduce logic
instance StaticAnalysis CSL
BASIC_SPEC -- basic_spec
CMD -- sentence
SYMB_ITEMS -- symb_items
SYMB_MAP_ITEMS -- symb_map_items
Sign -- sign
Morphism -- morphism
Symbol -- symbol
Symbol -- raw_symbol
where
basic_analysis CSL = Just basicCSLAnalysis
empty_signature CSL = emptySig
is_subsig CSL = isSubSigOf
subsig_inclusion CSL s = return . inclusionMap s
signature_union CSL = sigUnion
symbol_to_raw CSL = symbolToRaw
id_to_raw CSL = idToRaw
{- matches CSL = Symbol.matches
stat_symb_items CSL = mkStatSymbItems
stat_symb_map_items CSL = mkStatSymbMapItem -}
morphism_union CSL = morphismUnion
{- induced_from_morphism CSL = inducedFromMorphism
induced_from_to_morphism CSL = inducedFromToMorphism -}
| gnn/Hets | CSL/Logic_CSL.hs | gpl-2.0 | 4,124 | 0 | 8 | 1,222 | 524 | 295 | 229 | 73 | 0 |
{-| Implementation of command-line functions.
This module holds the common command-line related functions for the
binaries, separated into this module since "Ganeti.Utils" is
used in many other places and this is more IO oriented.
-}
{-
Copyright (C) 2009, 2010, 2011, 2012, 2013 Google Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
-}
module Ganeti.HTools.CLI
( Options(..)
, OptType
, defaultOptions
, Ganeti.HTools.CLI.parseOpts
, parseOptsInner
, parseYesNo
, parseISpecString
, shTemplate
, maybePrintNodes
, maybePrintInsts
, maybeShowWarnings
, printKeys
, printFinal
, setNodeStatus
-- * The options
, oDataFile
, oDiskMoves
, oDiskTemplate
, oSpindleUse
, oDynuFile
, oMonD
, oMonDDataFile
, oEvacMode
, oExInst
, oExTags
, oExecJobs
, oForce
, oFullEvacuation
, oGroup
, oIAllocSrc
, oIgnoreDyn
, oIgnoreNonRedundant
, oInstMoves
, oJobDelay
, genOLuxiSocket
, oLuxiSocket
, oMachineReadable
, oMaxCpu
, oMaxSolLength
, oMinDisk
, oMinGain
, oMinGainLim
, oMinScore
, oNoHeaders
, oNoSimulation
, oNodeSim
, oNodeTags
, oOfflineMaintenance
, oOfflineNode
, oOneStepOnly
, oOutputDir
, oPrintCommands
, oPrintInsts
, oPrintMoves
, oPrintNodes
, oQuiet
, oRapiMaster
, oSaveCluster
, oSelInst
, oShowHelp
, oShowVer
, oShowComp
, oSkipNonRedundant
, oStdSpec
, oTieredSpec
, oVerbose
, oPriority
, genericOpts
) where
import Control.Monad
import Data.Char (toUpper)
import Data.Maybe (fromMaybe)
import System.Console.GetOpt
import System.IO
import Text.Printf (printf)
import qualified Ganeti.HTools.Container as Container
import qualified Ganeti.HTools.Node as Node
import qualified Ganeti.Path as Path
import Ganeti.HTools.Types
import Ganeti.BasicTypes
import Ganeti.Common as Common
import Ganeti.Types
import Ganeti.Utils
-- * Data types
-- | Command line options structure.
data Options = Options
{ optDataFile :: Maybe FilePath -- ^ Path to the cluster data file
, optDiskMoves :: Bool -- ^ Allow disk moves
, optInstMoves :: Bool -- ^ Allow instance moves
, optDiskTemplate :: Maybe DiskTemplate -- ^ Override for the disk template
, optSpindleUse :: Maybe Int -- ^ Override for the spindle usage
, optDynuFile :: Maybe FilePath -- ^ Optional file with dynamic use data
, optIgnoreDynu :: Bool -- ^ Do not use dynamic use data
, optMonD :: Bool -- ^ Query MonDs
, optMonDFile :: Maybe FilePath -- ^ Optional file with data provided
-- ^ by MonDs
, optEvacMode :: Bool -- ^ Enable evacuation mode
, optExInst :: [String] -- ^ Instances to be excluded
, optExTags :: Maybe [String] -- ^ Tags to use for exclusion
, optExecJobs :: Bool -- ^ Execute the commands via Luxi
, optForce :: Bool -- ^ Force the execution
, optFullEvacuation :: Bool -- ^ Fully evacuate nodes to be rebooted
, optGroup :: Maybe GroupID -- ^ The UUID of the group to process
, optIAllocSrc :: Maybe FilePath -- ^ The iallocation spec
, optIgnoreNonRedundant :: Bool -- ^ Ignore non-redundant instances
, optSelInst :: [String] -- ^ Instances to be excluded
, optLuxi :: Maybe FilePath -- ^ Collect data from Luxi
, optJobDelay :: Double -- ^ Delay before executing first job
, optMachineReadable :: Bool -- ^ Output machine-readable format
, optMaster :: String -- ^ Collect data from RAPI
, optMaxLength :: Int -- ^ Stop after this many steps
, optMcpu :: Maybe Double -- ^ Override max cpu ratio for nodes
, optMdsk :: Double -- ^ Max disk usage ratio for nodes
, optMinGain :: Score -- ^ Min gain we aim for in a step
, optMinGainLim :: Score -- ^ Limit below which we apply mingain
, optMinScore :: Score -- ^ The minimum score we aim for
, optNoHeaders :: Bool -- ^ Do not show a header line
, optNoSimulation :: Bool -- ^ Skip the rebalancing dry-run
, optNodeSim :: [String] -- ^ Cluster simulation mode
, optNodeTags :: Maybe [String] -- ^ List of node tags to restrict to
, optOffline :: [String] -- ^ Names of offline nodes
, optOfflineMaintenance :: Bool -- ^ Pretend all instances are offline
, optOneStepOnly :: Bool -- ^ Only do the first step
, optOutPath :: FilePath -- ^ Path to the output directory
, optPrintMoves :: Bool -- ^ Whether to show the instance moves
, optSaveCluster :: Maybe FilePath -- ^ Save cluster state to this file
, optShowCmds :: Maybe FilePath -- ^ Whether to show the command list
, optShowHelp :: Bool -- ^ Just show the help
, optShowComp :: Bool -- ^ Just show the completion info
, optShowInsts :: Bool -- ^ Whether to show the instance map
, optShowNodes :: Maybe [String] -- ^ Whether to show node status
, optShowVer :: Bool -- ^ Just show the program version
, optSkipNonRedundant :: Bool -- ^ Skip nodes with non-redundant instance
, optStdSpec :: Maybe RSpec -- ^ Requested standard specs
, optTestCount :: Maybe Int -- ^ Optional test count override
, optTieredSpec :: Maybe RSpec -- ^ Requested specs for tiered mode
, optReplay :: Maybe String -- ^ Unittests: RNG state
, optVerbose :: Int -- ^ Verbosity level
, optPriority :: Maybe OpSubmitPriority -- ^ OpCode submit priority
} deriving Show
-- | Default values for the command line options.
defaultOptions :: Options
defaultOptions = Options
{ optDataFile = Nothing
, optDiskMoves = True
, optInstMoves = True
, optDiskTemplate = Nothing
, optSpindleUse = Nothing
, optIgnoreDynu = False
, optDynuFile = Nothing
, optMonD = False
, optMonDFile = Nothing
, optEvacMode = False
, optExInst = []
, optExTags = Nothing
, optExecJobs = False
, optForce = False
, optFullEvacuation = False
, optGroup = Nothing
, optIAllocSrc = Nothing
, optIgnoreNonRedundant = False
, optSelInst = []
, optLuxi = Nothing
, optJobDelay = 10
, optMachineReadable = False
, optMaster = ""
, optMaxLength = -1
, optMcpu = Nothing
, optMdsk = defReservedDiskRatio
, optMinGain = 1e-2
, optMinGainLim = 1e-1
, optMinScore = 1e-9
, optNoHeaders = False
, optNoSimulation = False
, optNodeSim = []
, optNodeTags = Nothing
, optSkipNonRedundant = False
, optOffline = []
, optOfflineMaintenance = False
, optOneStepOnly = False
, optOutPath = "."
, optPrintMoves = False
, optSaveCluster = Nothing
, optShowCmds = Nothing
, optShowHelp = False
, optShowComp = False
, optShowInsts = False
, optShowNodes = Nothing
, optShowVer = False
, optStdSpec = Nothing
, optTestCount = Nothing
, optTieredSpec = Nothing
, optReplay = Nothing
, optVerbose = 1
, optPriority = Nothing
}
-- | Abbreviation for the option type.
type OptType = GenericOptType Options
instance StandardOptions Options where
helpRequested = optShowHelp
verRequested = optShowVer
compRequested = optShowComp
requestHelp o = o { optShowHelp = True }
requestVer o = o { optShowVer = True }
requestComp o = o { optShowComp = True }
-- * Helper functions
parseISpecString :: String -> String -> Result RSpec
parseISpecString descr inp = do
let sp = sepSplit ',' inp
err = Bad ("Invalid " ++ descr ++ " specification: '" ++ inp ++
"', expected disk,ram,cpu")
when (length sp < 3 || length sp > 4) err
prs <- mapM (\(fn, val) -> fn val) $
zip [ annotateResult (descr ++ " specs disk") . parseUnit
, annotateResult (descr ++ " specs memory") . parseUnit
, tryRead (descr ++ " specs cpus")
, tryRead (descr ++ " specs spindles")
] sp
case prs of
{- Spindles are optional, so that they are not needed when exclusive storage
is disabled. When exclusive storage is disabled, spindles are ignored,
so the actual value doesn't matter. We use 1 as a default so that in
case someone forgets and exclusive storage is enabled, we don't run into
weird situations. -}
[dsk, ram, cpu] -> return $ RSpec cpu ram dsk 1
[dsk, ram, cpu, spn] -> return $ RSpec cpu ram dsk spn
_ -> err
-- | Disk template choices.
optComplDiskTemplate :: OptCompletion
optComplDiskTemplate = OptComplChoices $
map diskTemplateToRaw [minBound..maxBound]
-- * Command line options
oDataFile :: OptType
oDataFile =
(Option "t" ["text-data"]
(ReqArg (\ f o -> Ok o { optDataFile = Just f }) "FILE")
"the cluster data FILE",
OptComplFile)
oDiskMoves :: OptType
oDiskMoves =
(Option "" ["no-disk-moves"]
(NoArg (\ opts -> Ok opts { optDiskMoves = False}))
"disallow disk moves from the list of allowed instance changes,\
\ thus allowing only the 'cheap' failover/migrate operations",
OptComplNone)
oMonD :: OptType
oMonD =
(Option "" ["mond"]
(NoArg (\ opts -> Ok opts {optMonD = True}))
"Query MonDs",
OptComplNone)
oMonDDataFile :: OptType
oMonDDataFile =
(Option "" ["mond-data"]
(ReqArg (\ f opts -> Ok opts { optMonDFile = Just f }) "FILE")
"Import data provided by MonDs from the given FILE",
OptComplFile)
oDiskTemplate :: OptType
oDiskTemplate =
(Option "" ["disk-template"]
(reqWithConversion diskTemplateFromRaw
(\dt opts -> Ok opts { optDiskTemplate = Just dt })
"TEMPLATE") "select the desired disk template",
optComplDiskTemplate)
oSpindleUse :: OptType
oSpindleUse =
(Option "" ["spindle-use"]
(reqWithConversion (tryRead "parsing spindle-use")
(\su opts -> do
when (su < 0) $
fail "Invalid value of the spindle-use (expected >= 0)"
return $ opts { optSpindleUse = Just su })
"SPINDLES") "select how many virtual spindle instances use\
\ [default read from cluster]",
OptComplFloat)
oSelInst :: OptType
oSelInst =
(Option "" ["select-instances"]
(ReqArg (\ f opts -> Ok opts { optSelInst = sepSplit ',' f }) "INSTS")
"only select given instances for any moves",
OptComplManyInstances)
oInstMoves :: OptType
oInstMoves =
(Option "" ["no-instance-moves"]
(NoArg (\ opts -> Ok opts { optInstMoves = False}))
"disallow instance (primary node) moves from the list of allowed,\
\ instance changes, thus allowing only slower, but sometimes\
\ safer, drbd secondary changes",
OptComplNone)
oDynuFile :: OptType
oDynuFile =
(Option "U" ["dynu-file"]
(ReqArg (\ f opts -> Ok opts { optDynuFile = Just f }) "FILE")
"Import dynamic utilisation data from the given FILE",
OptComplFile)
oIgnoreDyn :: OptType
oIgnoreDyn =
(Option "" ["ignore-dynu"]
(NoArg (\ opts -> Ok opts {optIgnoreDynu = True}))
"Ignore any dynamic utilisation information",
OptComplNone)
oEvacMode :: OptType
oEvacMode =
(Option "E" ["evac-mode"]
(NoArg (\opts -> Ok opts { optEvacMode = True }))
"enable evacuation mode, where the algorithm only moves\
\ instances away from offline and drained nodes",
OptComplNone)
oExInst :: OptType
oExInst =
(Option "" ["exclude-instances"]
(ReqArg (\ f opts -> Ok opts { optExInst = sepSplit ',' f }) "INSTS")
"exclude given instances from any moves",
OptComplManyInstances)
oExTags :: OptType
oExTags =
(Option "" ["exclusion-tags"]
(ReqArg (\ f opts -> Ok opts { optExTags = Just $ sepSplit ',' f })
"TAG,...") "Enable instance exclusion based on given tag prefix",
OptComplString)
oExecJobs :: OptType
oExecJobs =
(Option "X" ["exec"]
(NoArg (\ opts -> Ok opts { optExecJobs = True}))
"execute the suggested moves via Luxi (only available when using\
\ it for data gathering)",
OptComplNone)
oForce :: OptType
oForce =
(Option "f" ["force"]
(NoArg (\ opts -> Ok opts {optForce = True}))
"force the execution of this program, even if warnings would\
\ otherwise prevent it",
OptComplNone)
oFullEvacuation :: OptType
oFullEvacuation =
(Option "" ["full-evacuation"]
(NoArg (\ opts -> Ok opts { optFullEvacuation = True}))
"fully evacuate the nodes to be rebooted",
OptComplNone)
oGroup :: OptType
oGroup =
(Option "G" ["group"]
(ReqArg (\ f o -> Ok o { optGroup = Just f }) "ID")
"the target node group (name or UUID)",
OptComplOneGroup)
oIAllocSrc :: OptType
oIAllocSrc =
(Option "I" ["ialloc-src"]
(ReqArg (\ f opts -> Ok opts { optIAllocSrc = Just f }) "FILE")
"Specify an iallocator spec as the cluster data source",
OptComplFile)
oIgnoreNonRedundant :: OptType
oIgnoreNonRedundant =
(Option "" ["ignore-non-redundant"]
(NoArg (\ opts -> Ok opts { optIgnoreNonRedundant = True }))
"Pretend that there are no non-redundant instances in the cluster",
OptComplNone)
oJobDelay :: OptType
oJobDelay =
(Option "" ["job-delay"]
(reqWithConversion (tryRead "job delay")
(\d opts -> Ok opts { optJobDelay = d }) "SECONDS")
"insert this much delay before the execution of repair jobs\
\ to allow the tool to continue processing instances",
OptComplFloat)
genOLuxiSocket :: String -> OptType
genOLuxiSocket defSocket =
(Option "L" ["luxi"]
(OptArg ((\ f opts -> Ok opts { optLuxi = Just f }) .
fromMaybe defSocket) "SOCKET")
("collect data via Luxi, optionally using the given SOCKET path [" ++
defSocket ++ "]"),
OptComplFile)
oLuxiSocket :: IO OptType
oLuxiSocket = liftM genOLuxiSocket Path.defaultLuxiSocket
oMachineReadable :: OptType
oMachineReadable =
(Option "" ["machine-readable"]
(OptArg (\ f opts -> do
flag <- parseYesNo True f
return $ opts { optMachineReadable = flag }) "CHOICE")
"enable machine readable output (pass either 'yes' or 'no' to\
\ explicitly control the flag, or without an argument defaults to\
\ yes)",
optComplYesNo)
oMaxCpu :: OptType
oMaxCpu =
(Option "" ["max-cpu"]
(reqWithConversion (tryRead "parsing max-cpu")
(\mcpu opts -> do
when (mcpu <= 0) $
fail "Invalid value of the max-cpu ratio, expected >0"
return $ opts { optMcpu = Just mcpu }) "RATIO")
"maximum virtual-to-physical cpu ratio for nodes (from 0\
\ upwards) [default read from cluster]",
OptComplFloat)
oMaxSolLength :: OptType
oMaxSolLength =
(Option "l" ["max-length"]
(reqWithConversion (tryRead "max solution length")
(\i opts -> Ok opts { optMaxLength = i }) "N")
"cap the solution at this many balancing or allocation\
\ rounds (useful for very unbalanced clusters or empty\
\ clusters)",
OptComplInteger)
oMinDisk :: OptType
oMinDisk =
(Option "" ["min-disk"]
(reqWithConversion (tryRead "min free disk space")
(\n opts -> Ok opts { optMdsk = n }) "RATIO")
"minimum free disk space for nodes (between 0 and 1) [0]",
OptComplFloat)
oMinGain :: OptType
oMinGain =
(Option "g" ["min-gain"]
(reqWithConversion (tryRead "min gain")
(\g opts -> Ok opts { optMinGain = g }) "DELTA")
"minimum gain to aim for in a balancing step before giving up",
OptComplFloat)
oMinGainLim :: OptType
oMinGainLim =
(Option "" ["min-gain-limit"]
(reqWithConversion (tryRead "min gain limit")
(\g opts -> Ok opts { optMinGainLim = g }) "SCORE")
"minimum cluster score for which we start checking the min-gain",
OptComplFloat)
oMinScore :: OptType
oMinScore =
(Option "e" ["min-score"]
(reqWithConversion (tryRead "min score")
(\e opts -> Ok opts { optMinScore = e }) "EPSILON")
"mininum score to aim for",
OptComplFloat)
oNoHeaders :: OptType
oNoHeaders =
(Option "" ["no-headers"]
(NoArg (\ opts -> Ok opts { optNoHeaders = True }))
"do not show a header line",
OptComplNone)
oNoSimulation :: OptType
oNoSimulation =
(Option "" ["no-simulation"]
(NoArg (\opts -> Ok opts {optNoSimulation = True}))
"do not perform rebalancing simulation",
OptComplNone)
oNodeSim :: OptType
oNodeSim =
(Option "" ["simulate"]
(ReqArg (\ f o -> Ok o { optNodeSim = f:optNodeSim o }) "SPEC")
"simulate an empty cluster, given as\
\ 'alloc_policy,num_nodes,disk,ram,cpu'",
OptComplString)
oNodeTags :: OptType
oNodeTags =
(Option "" ["node-tags"]
(ReqArg (\ f opts -> Ok opts { optNodeTags = Just $ sepSplit ',' f })
"TAG,...") "Restrict to nodes with the given tags",
OptComplString)
oOfflineMaintenance :: OptType
oOfflineMaintenance =
(Option "" ["offline-maintenance"]
(NoArg (\ opts -> Ok opts {optOfflineMaintenance = True}))
"Schedule offline maintenance, i.e., pretend that all instance are\
\ offline.",
OptComplNone)
oOfflineNode :: OptType
oOfflineNode =
(Option "O" ["offline"]
(ReqArg (\ n o -> Ok o { optOffline = n:optOffline o }) "NODE")
"set node as offline",
OptComplOneNode)
oOneStepOnly :: OptType
oOneStepOnly =
(Option "" ["one-step-only"]
(NoArg (\ opts -> Ok opts {optOneStepOnly = True}))
"Only do the first step",
OptComplNone)
oOutputDir :: OptType
oOutputDir =
(Option "d" ["output-dir"]
(ReqArg (\ d opts -> Ok opts { optOutPath = d }) "PATH")
"directory in which to write output files",
OptComplDir)
oPrintCommands :: OptType
oPrintCommands =
(Option "C" ["print-commands"]
(OptArg ((\ f opts -> Ok opts { optShowCmds = Just f }) .
fromMaybe "-")
"FILE")
"print the ganeti command list for reaching the solution,\
\ if an argument is passed then write the commands to a\
\ file named as such",
OptComplNone)
oPrintInsts :: OptType
oPrintInsts =
(Option "" ["print-instances"]
(NoArg (\ opts -> Ok opts { optShowInsts = True }))
"print the final instance map",
OptComplNone)
oPrintMoves :: OptType
oPrintMoves =
(Option "" ["print-moves"]
(NoArg (\ opts -> Ok opts { optPrintMoves = True }))
"print the moves of the instances",
OptComplNone)
oPrintNodes :: OptType
oPrintNodes =
(Option "p" ["print-nodes"]
(OptArg ((\ f opts ->
let (prefix, realf) = case f of
'+':rest -> (["+"], rest)
_ -> ([], f)
splitted = prefix ++ sepSplit ',' realf
in Ok opts { optShowNodes = Just splitted }) .
fromMaybe []) "FIELDS")
"print the final node list",
OptComplNone)
oQuiet :: OptType
oQuiet =
(Option "q" ["quiet"]
(NoArg (\ opts -> Ok opts { optVerbose = optVerbose opts - 1 }))
"decrease the verbosity level",
OptComplNone)
oRapiMaster :: OptType
oRapiMaster =
(Option "m" ["master"]
(ReqArg (\ m opts -> Ok opts { optMaster = m }) "ADDRESS")
"collect data via RAPI at the given ADDRESS",
OptComplHost)
oSaveCluster :: OptType
oSaveCluster =
(Option "S" ["save"]
(ReqArg (\ f opts -> Ok opts { optSaveCluster = Just f }) "FILE")
"Save cluster state at the end of the processing to FILE",
OptComplNone)
oSkipNonRedundant :: OptType
oSkipNonRedundant =
(Option "" ["skip-non-redundant"]
(NoArg (\ opts -> Ok opts { optSkipNonRedundant = True }))
"Skip nodes that host a non-redundant instance",
OptComplNone)
oStdSpec :: OptType
oStdSpec =
(Option "" ["standard-alloc"]
(ReqArg (\ inp opts -> do
tspec <- parseISpecString "standard" inp
return $ opts { optStdSpec = Just tspec } )
"STDSPEC")
"enable standard specs allocation, given as 'disk,ram,cpu'",
OptComplString)
oTieredSpec :: OptType
oTieredSpec =
(Option "" ["tiered-alloc"]
(ReqArg (\ inp opts -> do
tspec <- parseISpecString "tiered" inp
return $ opts { optTieredSpec = Just tspec } )
"TSPEC")
"enable tiered specs allocation, given as 'disk,ram,cpu'",
OptComplString)
oVerbose :: OptType
oVerbose =
(Option "v" ["verbose"]
(NoArg (\ opts -> Ok opts { optVerbose = optVerbose opts + 1 }))
"increase the verbosity level",
OptComplNone)
oPriority :: OptType
oPriority =
(Option "" ["priority"]
(ReqArg (\ inp opts -> do
prio <- parseSubmitPriority inp
Ok opts { optPriority = Just prio }) "PRIO")
"set the priority of submitted jobs",
OptComplChoices (map fmtSubmitPriority [minBound..maxBound]))
-- | Generic options.
genericOpts :: [GenericOptType Options]
genericOpts = [ oShowVer
, oShowHelp
, oShowComp
]
-- * Functions
-- | Wrapper over 'Common.parseOpts' with our custom options.
parseOpts :: [String] -- ^ The command line arguments
-> String -- ^ The program name
-> [OptType] -- ^ The supported command line options
-> [ArgCompletion] -- ^ The supported command line arguments
-> IO (Options, [String]) -- ^ The resulting options and leftover
-- arguments
parseOpts = Common.parseOpts defaultOptions
-- | A shell script template for autogenerated scripts.
shTemplate :: String
shTemplate =
printf "#!/bin/sh\n\n\
\# Auto-generated script for executing cluster rebalancing\n\n\
\# To stop, touch the file /tmp/stop-htools\n\n\
\set -e\n\n\
\check() {\n\
\ if [ -f /tmp/stop-htools ]; then\n\
\ echo 'Stop requested, exiting'\n\
\ exit 0\n\
\ fi\n\
\}\n\n"
-- | Optionally print the node list.
maybePrintNodes :: Maybe [String] -- ^ The field list
-> String -- ^ Informational message
-> ([String] -> String) -- ^ Function to generate the listing
-> IO ()
maybePrintNodes Nothing _ _ = return ()
maybePrintNodes (Just fields) msg fn = do
hPutStrLn stderr ""
hPutStrLn stderr (msg ++ " status:")
hPutStrLn stderr $ fn fields
-- | Optionally print the instance list.
maybePrintInsts :: Bool -- ^ Whether to print the instance list
-> String -- ^ Type of the instance map (e.g. initial)
-> String -- ^ The instance data
-> IO ()
maybePrintInsts do_print msg instdata =
when do_print $ do
hPutStrLn stderr ""
hPutStrLn stderr $ msg ++ " instance map:"
hPutStr stderr instdata
-- | Function to display warning messages from parsing the cluster
-- state.
maybeShowWarnings :: [String] -- ^ The warning messages
-> IO ()
maybeShowWarnings fix_msgs =
unless (null fix_msgs) $ do
hPutStrLn stderr "Warning: cluster has inconsistent data:"
hPutStrLn stderr . unlines . map (printf " - %s") $ fix_msgs
-- | Format a list of key, value as a shell fragment.
printKeys :: String -- ^ Prefix to printed variables
-> [(String, String)] -- ^ List of (key, value) pairs to be printed
-> IO ()
printKeys prefix =
mapM_ (\(k, v) ->
printf "%s_%s=%s\n" prefix (map toUpper k) (ensureQuoted v))
-- | Prints the final @OK@ marker in machine readable output.
printFinal :: String -- ^ Prefix to printed variable
-> Bool -- ^ Whether output should be machine readable;
-- note: if not, there is nothing to print
-> IO ()
printFinal prefix True =
-- this should be the final entry
printKeys prefix [("OK", "1")]
printFinal _ False = return ()
-- | Potentially set the node as offline based on passed offline list.
setNodeOffline :: [Ndx] -> Node.Node -> Node.Node
setNodeOffline offline_indices n =
if Node.idx n `elem` offline_indices
then Node.setOffline n True
else n
-- | Set node properties based on command line options.
setNodeStatus :: Options -> Node.List -> IO Node.List
setNodeStatus opts fixed_nl = do
let offline_passed = optOffline opts
all_nodes = Container.elems fixed_nl
offline_lkp = map (lookupName (map Node.name all_nodes)) offline_passed
offline_wrong = filter (not . goodLookupResult) offline_lkp
offline_names = map lrContent offline_lkp
offline_indices = map Node.idx $
filter (\n -> Node.name n `elem` offline_names)
all_nodes
m_cpu = optMcpu opts
m_dsk = optMdsk opts
unless (null offline_wrong) .
exitErr $ printf "wrong node name(s) set as offline: %s\n"
(commaJoin (map lrContent offline_wrong))
let setMCpuFn = case m_cpu of
Nothing -> id
Just new_mcpu -> flip Node.setMcpu new_mcpu
let nm = Container.map (setNodeOffline offline_indices .
flip Node.setMdsk m_dsk .
setMCpuFn) fixed_nl
return nm
| vladimir-ipatov/ganeti | src/Ganeti/HTools/CLI.hs | gpl-2.0 | 25,572 | 0 | 21 | 6,618 | 5,419 | 3,068 | 2,351 | 618 | 3 |
{-# LANGUAGE TypeSynonymInstances, FlexibleInstances #-}
{- |
Module : $Header$
Description : matching of terms modulo definition expansion
Copyright : (c) Ewaryst Schulz, DFKI Bremen 2010
License : GPLv2 or higher, see LICENSE.txt
Maintainer : ewaryst.schulz@dfki.de
Stability : experimental
Portability : non-portable (see extensions)
Matching of terms modulo constant definition expansion and constraints
-}
module HasCASL.MatchingWithDefinitions where
import HasCASL.Subst
import HasCASL.PrintSubst
import HasCASL.As
import HasCASL.Le
import HasCASL.PrintAs ()
import Common.Id
import Common.ConvertGlobalAnnos()
import Common.Doc
import Common.DocUtils
import qualified Data.Map as Map
import qualified Data.Set as Set
-- For candidate generation and DG navigation
import Data.List
import Data.Maybe
import Static.DGNavigation
import Logic.Grothendieck
import Logic.Coerce
import HasCASL.Logic_HasCASL
{- | We need two classes:
1. A class for lookup definitions and checking for good functions
2. A class for storing the match (substitution plus constraints)
-}
class DefStore a where
isGood :: a -> Term -> Bool
isMapable :: a -> (Id, TypeScheme) -> Bool
getDefinition :: a -> (Id, TypeScheme) -> Maybe Term
getEnv :: a -> Env
logMsg :: a -> Doc -> IO ()
class Match a where
addMatch :: a -> SubstConst -> Term -> a
addConstraint :: a -> Term -> Term -> a
newtype DefinitionStore = DefinitionStore (Env, Set.Set Symbol)
initialDefStore :: Env -> Set.Set Symbol -> DefinitionStore
initialDefStore e syms = DefinitionStore (e, syms)
instance DefStore DefinitionStore where
isGood _ _ = True
isMapable (DefinitionStore (_, syms)) (opid, typ) =
Set.member (idToOpSymbol opid typ) syms
getDefinition (DefinitionStore (e, _)) = getOpDefinition e
getEnv (DefinitionStore (e, _)) = e
-- logMsg _ _ = return ()
logMsg def d = let e = getEnv def
in appendFile "/tmp/matcher.out" $ (++"\n") $ show
$ useGlobalAnnos (globAnnos e) d
newtype MatchResult = MatchResult (Subst, [(Term, Term)]) deriving Show
getMatchResult :: MatchResult -> (Subst, [(Term, Term)])
getMatchResult (MatchResult x) = x
emptyMatchResult :: MatchResult
emptyMatchResult = MatchResult (emptySubstitution, [])
instance PrettyInEnv MatchResult where
prettyInEnv e (MatchResult (sb, ctrts)) =
vcat [ prettyInEnv e sb
, if null ctrts then empty else text "Constraints"
, prettyTermMapping e ctrts ]
instance Match MatchResult where
addMatch mr@(MatchResult (sb, ctrts)) sc t =
case lookupContent sb sc of
Just t' | t == t' -> mr
| otherwise ->
addConstraint mr t t'
_ -> MatchResult (addTerm sb sc t, ctrts)
addConstraint (MatchResult (sb, ctrts)) t1 t2 = MatchResult (sb, (t1, t2):ctrts)
{- | The rules of matching:
f,g are functions
c is a constant
v is a variable
t1, t2 are arbitrary terms
"good" functions are the list-constructor, the solidworks datatype constructors and all other constructors.
f != g
1a. f(x_i) f(y_i) -> match x_i y_i, if f is a "good" function
AddConstraint f(x_i) = f(y_i), otherwise
1b. f(...) g(...) -> AddConstraint f(...) = g(...)
2a. c t2 -> match t1 t2, if c is defined by term t1
AddMatch c t2, if c is mapable
AddConstraint c = t2, otherwise
2b. t1 c -> match t1 t2, if c is defined by term t2
AddConstraint t1 = c, otherwise
3. v t2 -> AddMatch v t2
-}
match :: (DefStore d, Match a) => d -> a -> Term -> Term -> IO (Either String a)
match def mtch t1 t2 =
case (t1, t2) of
-- handle the 'skip-cases' first
(TypedTerm term _ _ _, _) -> match' term t2 -- logg "typed1" $ match' term t2
(_, TypedTerm term _ _ _) -> match' t1 term -- logg "typed2" $ match' t1 term
-- check for clash, handle constraints and definition expansion
(ApplTerm f1 a1 _, _) ->
case t2 of
ApplTerm f2 a2 _
-- 1a1.
| f1 == f2 && isGood def f1 -> logg msg1a1 $ match' a1 a2
-- 1a2., 1b.
| otherwise -> logg msg1a21b addLocalConstraint
-- eventually 2b.
_ -> logg msg2b $ tryDefExpand2
(TupleTerm l _, _) ->
case t2 of
TupleTerm l' _ | length l == length l' ->
logg msg1aT $ matchfold mtch $ zip l l'
| otherwise ->
logg "tclash" $ tupleClash
-- eventually 2b.
_ -> logg msg2bT $ tryDefExpand2
-- 3.: add the mapping v->t2 to output
(QualVar v, _) -> logg "mapped" $ addMapping v
-- 2a.: follow the definition of pattern
(QualOp _ (PolyId opid _ _) typ _ _ _, _) ->
logg msg2a $ tryDefExpand1 (opid, typ)
-- all other terms are not expected and accepted here
_ -> return $ Left "match: unhandled term"
where match' = match def mtch
-- The definition expansion application case
-- (for ApplTerm and TupleTerm) is handled uniformly
tryDefExpand1 oi = case getTermDef t1 of
Just t1' -> match' t1' t2
_ | isMapable def oi -> addMapping oi
| otherwise -> addLocalConstraint
tryDefExpand2 = case getTermDef t2 of
Just t2' -> match' t1 t2'
_ -> addLocalConstraint
getTermDef t = getTermOp t >>= getDefinition def
addLocalConstraint
-- Do not add constraints for equal terms
| t1 == t2 = return $ Right mtch
| otherwise = return $ Right $ addConstraint mtch t1 t2
addMapping t = return $ Right $ addMatch mtch (toSC t) t2
matchfold mtch' (x:l) = do
res <- uncurry (match def mtch') x
case res of
Right mtch'' -> matchfold mtch'' l
err -> return $ err
matchfold mtch' [] = return $ Right mtch'
--clash = return $ Left $ "match: Clash for " ++ show (pretty (t1,t2))
tupleClash = return $ Left $ "match: Clash for tuples "
++ show (pretty (t1,t2))
-- Logging stuff
logg s a = do
let e = getEnv def
logMsg def $ text "Log" <+> text s
$++$ text "t1:" $+$ prettyInEnv e t1 $++$ text "t2:"
$+$ prettyInEnv e t2 $++$ text "==============="
a
msg1a1 = "1a1: good same function"
msg1a21b = "1a2, 1b: not good or not same function"
msg1aT = "1aT: tuple: same tuple"
msg2bT = "2bT:"
msg2a = "2a: pattern constant"
msg2b = "2b: term constant"
------------------------- term tools -------------------------
getTermOp :: Term -> Maybe (Id, TypeScheme)
getTermOp (QualOp _ (PolyId opid _ _) typ _ _ _) = Just (opid, typ)
getTermOp _ = Nothing
getOpInfo :: Env -> (Id, TypeScheme) -> Maybe OpInfo
getOpInfo e (opid, typ) =
case Map.lookup opid (assumps e) of
Just soi ->
let fs = Set.filter f soi
in if Set.null fs then Nothing
else Just $ Set.findMin fs
_ -> Nothing
where
f oi = opType oi == typ
getOpDefinition :: Env -> (Id, TypeScheme) -> Maybe Term
getOpDefinition e t =
case fmap opDefn $ getOpInfo e t of
Just (Definition _ t') -> Just t'
_ -> Nothing
-- * Match Candidates
-- ** 1. Injections
newtype Injection a b = Injection [(a, b)]
instance (Show a, Show b) => Show (Injection a b) where
show (Injection l) = "{" ++ intercalate ", " (map sf l) ++ "}"
where sf (x, y) = show x ++ " --> " ++ show y
toList :: Injection a b -> [(a, b)]
toList (Injection l) = l
insertMapping :: (a, b) -> Injection a b -> Injection a b
insertMapping p (Injection l) = Injection (p:l)
combine :: Injection a b -> Injection a b -> Injection a b
combine (Injection l) (Injection l') = Injection (l++l')
singleton :: (a, b) -> Injection a b
singleton p = Injection [p]
-- Build all injections from source list to target list
injections :: [a] -> [b] -> [Injection a b]
injections l l'
| length l > length l' = []
| otherwise =
case l of
[] -> [Injection []]
[x] -> [ singleton (x, y) | y <- l' ]
x:xl -> f [] l'
where
f a (y:b) = f (y:a) b ++
(map (insertMapping (x,y)) $ injections xl $ a ++ b)
f _ [] = []
crossInjs :: [[Injection a b]] -> [Injection a b]
crossInjs = crosscombine combine
-- Build all combinations from the list of lists
crosscombine :: (a -> a -> a) -> [[a]] -> [a]
crosscombine _ [] = []
crosscombine _ [x] = x
crosscombine f cl@(x:l)
| any null cl = []
| otherwise = [ f a b | a <- x, b <- crosscombine f l ]
-- ** 2. Candidates from operators
{- | Candidate generation
a. For a symbol set make a map from Types to 'MatchOp'-lists: 'typePartition'
b. From two such maps make a list of 'Injection's, each injection is a candidate
(a list of 'MatchOp' pairs, wich will be matched using their definitions):
'candidatesAux'
-}
type MatchOp = (Id, TypeScheme, Term)
instance PrettyInEnv MatchOp where
prettyInEnv e (opid, typ, t) = pretty opid <> text ":" <+> pretty typ
<+> text "=" <+> prettyInEnv e t
candType :: MatchOp -> TypeScheme
candType (_, typ, _) = typ
candTerm :: MatchOp -> Term
candTerm (_, _, t) = t
-- *** a.
typePartition :: ((Id, TypeScheme) -> Maybe Term) -- ^ Definiens extractor
-> (TypeScheme -> Bool) -- ^ Filter predicate for types
-> Set.Set Symbol -- ^ MatchOp symbol set
-> Map.Map TypeScheme [MatchOp]
typePartition df tPred s =
Map.fromListWith (++) $ mapMaybe g $ Set.toList s
where f x = let typ = candType x
in if tPred typ then Just (typ, [x]) else Nothing
g x = candFromSymbol df x >>= f
candFromSymbol :: ((Id, TypeScheme) -> Maybe Term) -- ^ Definiens extractor
-> Symbol -> Maybe MatchOp
candFromSymbol df (Symbol {symName = opid, symType = OpAsItemType typ}) =
fmap ((,,) opid typ) $ df (opid, typ)
candFromSymbol _ _ = Nothing
-- *** b.
candidatesAux :: Map.Map TypeScheme [MatchOp]
-> Map.Map TypeScheme [MatchOp]
-> [Injection MatchOp MatchOp]
candidatesAux patMap cMap = crossInjs $ Map.foldWithKey f [] patMap where
f typ l injL = let l' = Map.findWithDefault err typ cMap
err = error $ "candidates: No concrete ops for type: "
++ (show $ pretty typ)
in injections l l' : injL
candidates :: ((Id, TypeScheme) -> Maybe Term) -- ^ Definiens extractor
-> (TypeScheme -> Bool) -- ^ Filter predicate for types
-> Set.Set Symbol -> Set.Set Symbol -> [[(MatchOp, MatchOp)]]
candidates df tPred s1 s2 = map toList $ candidatesAux tp1 tp2
where (tp1, tp2) = (typePartition df tPred s1, typePartition df tPred s2)
-- ** 3. Matching of candidates
matchCandidate :: (DefStore d) => d -> [(MatchOp, MatchOp)]
-> IO (Either String MatchResult)
matchCandidate def = f emptyMatchResult where
f mtch [] = return $ Right mtch
f mtch ((pat, c):l) = do
let e = getEnv def
logMsg def $ text "Matching Candidate Pattern"
$+$ prettyInEnv e pat $+$ text " with" $+$ prettyInEnv e c
res <- match def mtch (candTerm pat) $ candTerm c
case res of
Right mtch' -> f mtch' l
x -> return x
matchCandidates :: (DefStore d) => d -> [[(MatchOp, MatchOp)]]
-> IO (Either String MatchResult, [[(MatchOp, MatchOp)]])
matchCandidates _ [] = return (Left "matchCandidates: Out of candidates", [])
matchCandidates def (cand:l) = do
res <- matchCandidate def cand
case res of
Left _ -> matchCandidates def l
x -> return (x, l)
getCandidates :: (DefStore d, DevGraphNavigator nav) =>
d -> nav
-> (TypeScheme -> Bool) -- ^ Filter predicate for types
-> String -- ^ Name of pattern spec
-> String -- ^ Name of concrete spec
-> Maybe [[(MatchOp, MatchOp)]]
getCandidates def dgnav tFilter patN cN =
let
-- g s dgnav' = getInLibEnv dgnav' lookupLocalNodeByName s
g = getNamedSpec
f s = fromSearchResult (g s) getLocalSyms dgnav
mGp = f patN
mGc = f cN
pSyms = Set.map gsymToSym $ fromJust mGp
cSyms = Set.map gsymToSym $ fromJust mGc
cands = candidates (getDefinition def) tFilter pSyms cSyms
in if isJust mGp && isJust mGc then Just cands else Nothing
-- | Utility function for symbol conversion
gsymToSym :: G_symbol -> Symbol
gsymToSym (G_symbol lid sym) = coerceSymbol lid HasCASL sym
{- | The main matching function using specifications:
The pattern specification is expected to be a parameterized specification
containing the constants to be mapped in the actual parameter specification.
The candidates for the matching stem from those operators which have a
definition and a certain type satisfying the given type predicate.
A typical such predicate is:
'(flip elem ["typename1", "typename2", ...]) . show . pretty'
Only operators with the same type can be matched, and all possible
combinations of matching candidates are computed.
With the given Number (> 0) you can constrain the number of candidates to
try before giving up the matching (0 means all candidates).
If one candidate gives a correct match result the following candidates are
not tried and the 'MatchResult' is returned together with the list of non
tried candidates.
-}
matchSpecs :: (DefStore d, DevGraphNavigator nav) =>
d -> nav
-> (TypeScheme -> Bool) -- ^ Filter predicate for types
-> Int -- ^ Number of candidates to try
-> String -- ^ Name of pattern spec
-> String -- ^ Name of concrete spec
-> IO (Either String MatchResult, [[(MatchOp, MatchOp)]])
matchSpecs def dgnav tFilter n patN cN =
case getCandidates def dgnav tFilter patN cN of
Nothing -> return (Left "matchSpecs: specs not found.", [])
Just cl
| null cl ->
return (Left "matchSpecs: no candidates available.", [])
| otherwise -> do
let (cands, remC) = if n > 0 then splitAt n cl else (cl, [])
(mr, l) <- matchCandidates def cands
return (mr, l ++ remC)
| nevrenato/Hets_Fork | HasCASL/MatchingWithDefinitions.hs | gpl-2.0 | 15,022 | 0 | 19 | 4,693 | 4,047 | 2,080 | 1,967 | 251 | 13 |
module Game.Kittens.KittenData where
import Game.NetworkedGameEngine
import Data.List
import Data.Maybe
import Control.Concurrent
data Card = DefuseCard
| NopeCard
| ExplodingKittenCard
| AttackCard
| SkipCard
| FavorCard
| ShuffleCard
| SeeFutureCard
| ComboCard Int deriving (Eq, Show, Read)
possibleActions = [
"Draw",
"PlayNopeCard",
"PlayAttackCard",
"PlaySkipCard",
"PlayFavorCard",
"PlayShuffleCard",
"PlaySeeFutureCard",
"PlayComboCard 1",
"PlayComboCard 2",
"PlayComboCard 3",
"PlayComboCard 4",
"PlayComboCard 5"
]
data Player = Player {
plaCli :: Client,
hand :: [Card],
name :: String,
comm :: MVar String} deriving Eq
instance Show Player where
show p = "Player " ++ name p ++ ": " ++ show (hand p)
type PlayerAction = Player -> KittenState -> IO KittenState
type PlayerActionSignal = Player -> KittenState -> IO (Maybe Player,KittenState)
data KittenState = KittenState {
playerList :: [Player],
deck :: [Card],
nextPlayers :: [Player]} deriving (Eq,Show)
descriptorName :: String
descriptorName = "ExplodingKittens"
consoleLog :: String -> IO ()
consoleLog = putStrLn . ("[" ++) . (descriptorName ++) . ("]" ++)
| Lazersmoke/exploding-kittens | src/Game/Kittens/KittenData.hs | gpl-3.0 | 1,260 | 0 | 9 | 288 | 341 | 201 | 140 | 44 | 1 |
module Authentication(
clientId,
clientSecret,
maybeDisplayName,
maybeUserIdent,
clearAuthSession
) where
import Import.NoFoundation
-- import Yesod.Auth.BrowserId
import Yesod.Auth.GoogleEmail2
-- Replace with Google client ID.
clientId :: Text
clientId = "197748900362-pj584nskcninquf5mmgse28fg2tv2c4a.apps.googleusercontent.com"
-- Replace with Google secret ID.
clientSecret :: Text
clientSecret = "SMbJxghU_ci-Fg2OzO1cwDkY"
displayNameString :: Text
displayNameString = "displayName"
userIdentString :: Text
userIdentString = "userIdent"
cacheSession :: MonadHandler m => Text -> m (Maybe Text) -> m (Maybe Text)
cacheSession key m = do
mval <- lookupSession key
case mval of
Just val -> return $ Just val
Nothing -> do mval' <- m
case mval' of
Just val' -> do setSession key val'
return $ Just val'
Nothing -> return Nothing
clearAuthSession :: YesodAuth master => HandlerT master IO ()
clearAuthSession = do
deleteSession displayNameString
deleteSession userIdentString
deleteSession "credsIdent"
deleteSession "_GOOGLE_ACCESS_TOKEN"
deleteSession "_GOOGLE_CSRF_TOKEN"
-- clearCreds False
maybeDisplayName :: YesodAuth master => HandlerT master IO (Maybe Text)
maybeDisplayName = cacheSession displayNameString maybeDisplayNameGoogle
maybeDisplayNameGoogle :: YesodAuth master => HandlerT master IO (Maybe Text)
maybeDisplayNameGoogle = do
maybeToken <- getUserAccessToken
case maybeToken of
Just token -> do
app <-getYesod
maybePerson <- getPerson (authHttpManager app) token
return $ join $ fmap personDisplayName maybePerson
Nothing -> return Nothing
maybeUserIdent :: MonadHandler m => m (Maybe Text)
maybeUserIdent = lookupSession "credsIdent"
-- cacheSession userIdentString maybeUserIdentGoogle
-- maybeUserIdentGoogle :: YesodAuth master => HandlerT master IO (Maybe Text)
-- maybeUserIdentGoogle = do
-- mtoken <- getUserAccessToken
-- case mtoken of
-- Nothing -> return Nothing
-- Just token -> do app <- getYesod
-- mperson <- getPerson (authHttpManager app) token
-- case mperson of
-- Nothing -> return Nothing
-- Just person ->
-- case personEmails person of
-- (Yesod.Auth.GoogleEmail2.Email val _type : _ ) -> do
-- $(logInfo) $ T.pack $ "AuthId=" ++ show val
-- return $ Just val
-- _ -> return Nothing
| nishiuramakoto/logiku | app/Authentication.hs | gpl-3.0 | 2,673 | 0 | 18 | 741 | 452 | 227 | 225 | 46 | 3 |
module HsPredictor.SQL.Queries where
--standard
import Data.Text (pack)
--3rd party
import Database.Esqueleto ((^.))
import qualified Database.Esqueleto as E
import Database.Persist.Sql (Entity (..), Filter, Key (..),
SelectOpt (LimitTo), selectList,
toSqlKey, (==.), (>.) )
import Database.Persist.Sqlite (runSqlite)
--own
import HsPredictor.SQL.Models.League
import HsPredictor.Types.Types
import HsPredictor.SQL.Raw
unVal :: E.Value a -> a
unVal (E.Value a) = a
headUnVal :: Num a => [E.Value a] -> a
headUnVal v = case v of
[] -> 0
(x:xs) -> unVal x
unValMaybe :: (Num s) => E.Value (Maybe s) -> s
unValMaybe val = case val of
E.Value (Just a) -> a
E.Value Nothing -> 0
getResultsAll dbname = do
r <- getResultsAllQuery dbname
return $ map extract r
where
extract (d, t1, t2, gh, ga) = (unVal d, unVal t1,
unVal t2, unVal gh, unVal ga)
getUpcoming dbname = do
u <- getUpcomingQuery dbname
return $ map extract u
where
extract (d, t1, t2, gh, ga) = (unVal d, unVal t1,
unVal t2, unVal gh, unVal ga)
getTeams dbname = do
t <- getTeamsQuery dbname
return $ map (\x -> teamsName . entityVal $ x) t
getStats dbname team = do
st <- getStatsQuery team dbname
return $ extract st
where
getStat f = f . entityVal . head
extract st = [getStat statsTableWin st,
getStat statsTableDraw st,
getStat statsTableLoss st]
getStat dbname team stat = do
st <- getStatQuery dbname team stat
return $ headUnVal st
getMaxStat dbname stat = do
st <- getMaxStatQuery dbname stat
return $ unValMaybe . head $ st
getMinStat dbname stat = do
st <- getMinStatQuery dbname stat
return $ unValMaybe . head $ st
getStatsAll :: DbPath -> IO [(String, [Int])]
getStatsAll dbname = do
stats <- getStatsAllQuery dbname
return $ map extract stats
where
extract (team, st) = (unVal team, [ statsTableWin . entityVal $ st
, statsTableDraw . entityVal $ st
, statsTableLoss . entityVal $ st])
| jacekm-git/HsPredictor | library/HsPredictor/SQL/Queries.hs | gpl-3.0 | 2,313 | 0 | 12 | 765 | 797 | 417 | 380 | 57 | 2 |
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE UnicodeSyntax #-}
{-# LANGUAGE ViewPatterns #-}
module C.Common where
import Data.List
import Data.String
import Data.Monoid
import CLL
import Data.Char
import Data.Function
(~+~) :: C -> C -> C
x ~+~ y = x <> " + " <> y
cSum ∷ [C] → C
cSum [] = "0"
cSum xs = foldr1 (~+~) xs
(~=~) :: C -> C -> C
x ~=~ y = x <> " = " <> y
(<+>) ∷ ∀ m. (IsString m, Monoid m) ⇒ m → m → m
x <+> y = x <> " " <> y
commas [] = ""
commas xs = foldr1 (\x y -> x <> ", " <> y) xs
parens x = "(" <> x <> ")"
braces x = "{\n" <> x <> "}"
pair x y = parens $ x <> "," <> y
data C = Code {cCode :: String, cOccs :: [(String,Type)], cDecls :: [String], cDefs :: [String], cStructs :: [(String,C)]}
instance IsString C where
fromString = lit
lit ∷ String → C
lit s = Code s [] [] [] []
var ∷ (String,Type) → C
var (s,t) = Code (quoteVar s) [(s,t)] [] [] []
dcl' :: (String,Type) -> C
dcl' = dcl . fst
dcl :: String -> C
dcl s = Code (quoteVar s) [] [s] [] []
def :: C -> C
def (Code s occs decls defs structs) = Code [] occs decls (s:defs) structs
cCast ∷ C -> C -> C
cCast typ expr = parens ("*" <> parens (typ <> "*") <> parens ("&" <> expr))
cStructDef :: String -> C -> C
cStructDef name body = Code ("struct " <> n) [] [] [] [(n,stmt ("struct " <> lit n <> braces body))]
where n = quoteVar name
instance Semigroup C where
(Code c1 v1 d1 f1 s1) <> (Code c2 v2 d2 f2 s2) = Code (c1 <> c2) (v1 <> (v2 \\\ d1)) (d1 <> d2) (f1 <> f2) (s1 <> s2)
instance Monoid C where
mempty = Code mempty mempty mempty mempty mempty
quoteVar :: String -> String
quoteVar = concatMap quoteChar
quoteChar :: Char -> String
quoteChar '_' = "__"
quoteChar '\'' = "_p"
quoteChar x | isAlphaNum x = [x]
| otherwise = show (ord x)
stmt ∷ C → C
stmt x = x <> lit ";\n"
-- would be nice to use a map for this to avoid nubBy complexity. However we
-- need to remember the order things appeared so that we can sort the
-- declarations in reverse dependency order.
cleanStructs :: [(String,C)] -> [C]
cleanStructs = map snd . nubBy ((==) `on` fst) . reverse
cCall :: (Semigroup a, IsString a) => a -> [a] -> a
cCall x args = x <> parens (commas args)
| jyp/inox | C/Common.hs | gpl-3.0 | 2,645 | 0 | 12 | 569 | 1,074 | 581 | 493 | 72 | 1 |
{-# LANGUAGE TemplateHaskell
, DeriveTraversable
, DeriveFoldable
, DeriveDataTypeable
, GeneralizedNewtypeDeriving
, OverloadedStrings
#-}
module Types
( module Types
, module Export
) where
import Control.Applicative
import Crypto.Scrypt(Salt)
import Data.Aeson as Export
import Data.ByteString.Lazy (ByteString)
import Data.ByteString.Lazy.Char8 (pack)
import Data.Data (Data, Typeable)
import Data.IxSet (Indexable(..), IxSet, ixFun, ixSet)
import Data.Morgue.AgendaGenerator as Export (AgendaMode(..))
import Data.Morgue.Format as Export (OutputFormat(..))
import qualified Data.Morgue.Options as O
import Data.SafeCopy (base, deriveSafeCopy)
import qualified Data.Text as T
import Data.Text (Text)
import Data.Time
-- = newtypes for names
-- | a User's name
newtype UserName = UserName { getUName :: Text }
deriving (Show, Read, Eq, Ord, Data, FromJSON, ToJSON)
-- | a Group's name
newtype GroupName = GroupName { getGName :: Text }
deriving (Show, Read, Eq, Ord, Data, FromJSON, ToJSON)
-- | a File's name
newtype FileName = FileName { getFName :: Text }
deriving (Show, Read, Eq, Ord, Data, FromJSON, ToJSON)
-- = type synonyms for less ambiguity
type FileContent = Text
type ApiKey = Text
type Password = ByteString
-- = To/FromJSON instances for elementar types and things exported by morgue
instance FromJSON ByteString where
parseJSON (String s) = pure . pack $ T.unpack s
parseJSON _ = mempty
-- | get setting data from a JSON string
instance FromJSON AgendaMode where
parseJSON (String s) =
case s of
"Todo" -> pure Todo
"Timed" -> pure Timed
"Both" -> pure Both
_ -> mempty
parseJSON _ = mempty
-- | get setting data from a JSON string
instance FromJSON OutputFormat where
parseJSON (String s) =
case s of
"ANSI" -> pure ANSI
"Plaintext" -> pure Plaintext
"Pango" -> pure Pango
_ -> mempty
parseJSON _ = mempty
-- | parse morgue's 'O.SimpleOptions' type
instance FromJSON O.SimpleOptions where
parseJSON (Object v) = (O.SAgendaOptions <$>
(v .: "mode") <*>
(v .: "double_spaces") <*>
(v .:? "tags") <*>
(v .:? "skip_tags") <*>
(v .: "num_days") <*>
(v .: "format")) <|> (O.SOutlineOptions <$>
(v .: "format"))
parseJSON _ = mempty
-- = User definition
-- | a user of our system
data User = User { userName :: UserName -- ^ name of the user
, apiKey :: ApiKey -- ^ user's API key
} deriving (Show, Read, Eq, Ord)
-- | Users are frequently passed to the API
instance ToJSON User where
toJSON (User n a) = object ["name" .= n, "api_key" .= a]
-- | Users are returned from the API as well
instance FromJSON User where
parseJSON (Object v) = User <$> v .: "name" <*> v .: "api_key"
parseJSON _ = mempty
-- == Internal user representation
-- | internal representation of a user
data InternalUser = InternalUser
{ iUserName :: UserName
, iApiKey :: ApiKey
, iPassword :: Password
, iUserFiles :: [File]
} deriving (Eq, Ord, Show, Read, Data, Typeable)
-- = User groups
-- | a group of users
data Group = Group { groupName :: GroupName
, users :: [UserName]
}
-- | Groups are returned from the API as well
instance ToJSON Group where
toJSON (Group n u) = object ["name" .= n, "members" .= u]
-- == Internal group representation
-- | internal representation of a group
data InternalGroup = InternalGroup
{ iGroupName :: GroupName
, iUsers :: [UserName]
, iGroupFiles :: [File] -- maybe we should rather use a Set
} deriving (Eq, Ord, Show, Read, Data, Typeable)
-- = Files
-- | a file
data File = File
{ fileName :: FileName
, fileContents :: FileContent
} deriving (Show, Read, Data, Typeable)
-- | We only need to check for equality by name, since we just look whether
-- files with a certain name exist.
instance Eq File where
a == b = fileName a == fileName b
-- | Ordering is name-oriented as well.
instance Ord File where
a <= b = fileName a <= fileName b
instance ToJSON File where
toJSON (File n c) = object ["name" .= n, "content" .= c]
instance FromJSON File where
parseJSON (Object v) = File <$> v .: "name" <*> v .: "content"
parseJSON _ = mempty
-- == File lists
-- | a list of files belonging to a specific group
data GroupFileList = GroupFileList { gFileListName :: GroupName
, gFileListFiles :: [FileName]
} deriving (Eq, Show)
instance FromJSON GroupFileList where
parseJSON (Object v) = GroupFileList <$>
(v .: "group") <*> (v .: "files")
parseJSON _ = mempty
instance ToJSON GroupFileList where
toJSON (GroupFileList gName gFiles) =
object [ "group" .= gName, "files" .= gFiles]
-- | a list of files, possibly owned by up to one user and zero or more groups
data FileList = FileList
{ fUserFiles :: [FileName]
, fGroupFiles :: [GroupFileList]
}
instance FromJSON FileList where
parseJSON (Object v) = FileList <$>
(v .: "user_files") <*> (v .: "group_files")
parseJSON _ = mempty
instance ToJSON FileList where
toJSON (FileList uFiles gFiles) =
object [ "user_files" .= uFiles
, "group_files" .= gFiles
]
-- = Request types and intermediate data
-- == Pushing files
-- | a request to push a file to a user's filestore
data PushURequest = PushURequest { pURqUser :: User
, pURqFile :: File
}
instance FromJSON PushURequest where
parseJSON (Object v) = PushURequest <$>
(v .: "user" >>= parseJSON) <*> (v .: "file" >>= parseJSON)
parseJSON _ = mempty
type PushUData = (Maybe InternalUser, File)
-- | a request to push a file to a group's filestore
data PushGRequest = PushGRequest { pGRqUser :: User
, pGRqGroup :: GroupName
, pGRqFile :: File
}
instance FromJSON PushGRequest where
parseJSON (Object v) = PushGRequest <$>
(v .: "user" >>= parseJSON) <*> (v .: "group") <*>
(v .: "file" >>= parseJSON)
parseJSON _ = mempty
type PushGData = (Maybe InternalUser, Maybe InternalGroup, File)
-- == Deleting files belonging to a user
-- | A request to remove a file by name
data DeleteURequest = DeleteURequest { dUUser :: User
, dUFileName :: FileName
}
type DeleteUData = (Maybe InternalUser, FileName)
instance FromJSON DeleteURequest where
parseJSON (Object v) = DeleteURequest <$>
(v .: "user" >>= parseJSON) <*> v .: "filename"
parseJSON _ = mempty
data DeleteGRequest = DeleteGRequest { dGUser :: User
, dGGroupName :: GroupName
, dGFileName :: FileName
}
type DeleteGData = (Maybe InternalUser, Maybe InternalGroup, FileName)
instance FromJSON DeleteGRequest where
parseJSON (Object v) = DeleteGRequest <$>
(v .: "user" >>= parseJSON) <*> v .: "group" <*> v .: "filename"
-- == Listing files belonging to a user
-- | A request to list all available files
newtype ListRequest = ListRequest { lRqUser :: User }
deriving FromJSON
type ListData = (Maybe InternalUser, [InternalGroup])
-- == Pulling files
-- | A request to pull a file from a user's filestore
data PullURequest = PullURequest { fURqUser :: User
, fURqFileName :: FileName
}
instance FromJSON PullURequest where
parseJSON (Object v) = PullURequest <$>
(v .: "user" >>= parseJSON) <*> v .: "filename"
parseJSON _ = mempty
type PullUData = (Maybe InternalUser, FileName)
-- | A request to pull a file from a group's filestore
data PullGRequest = PullGRequest { fGRqUser :: User
, fGRqGroup :: GroupName
, fGRqFileName :: FileName
}
instance FromJSON PullGRequest where
parseJSON (Object v) = PullGRequest <$>
(v .: "user" >>= parseJSON) <*> v .: "group" <*> v .: "filename"
parseJSON _ = mempty
type PullGData = (Maybe InternalUser, Maybe InternalGroup, FileName)
-- == Adding groups
-- | A request to create a group
data GroupNewRequest = GroupNewRequest { gRqUser :: User
, gRqGroupName :: GroupName
}
instance FromJSON GroupNewRequest where
parseJSON (Object v) = GroupNewRequest <$>
(v .: "user" >>= parseJSON) <*> v .: "groupname"
parseJSON _ = mempty
type GroupNewData = (Maybe InternalUser, GroupName, Maybe InternalGroup)
-- == Adding users to groups
-- | A request to add a fellow user to a group
data GroupAddRequest = GroupAddRequest { gaRqUser :: User
, gaRqGroup :: GroupName
, gaRqUserName :: UserName
}
instance FromJSON GroupAddRequest where
parseJSON (Object v) = GroupAddRequest <$>
(v .: "user" >>= parseJSON) <*>
(v .: "group" >>= parseJSON) <*>
v .: "username"
parseJSON _ = mempty
type GroupAddData =
(Maybe InternalUser, Maybe InternalGroup, Maybe InternalUser)
-- == Processing files to an agenda or outline
-- | A request to get an agenda or outline for a set of files
data ProcessingRequest = ProcessingRequest { prRqUser :: User
, prRqOptions :: O.SimpleOptions
, prRqFiles :: FileList
, utcTime :: UTCTime
, timezone :: TimeZone
}
type ProcessingData = (Maybe InternalUser, [InternalGroup], FileList,
O.SimpleOptions, UTCTime, TimeZone)
data ProcessingRequest' = ProcessingRequest' { prRqUser' :: User
, prRqOptions' :: O.SimpleOptions
, prRqFiles' :: FileList
}
instance FromJSON ProcessingRequest' where
parseJSON (Object v) = ProcessingRequest' <$>
(v .: "user" >>= parseJSON) <*>
(v .: "options" >>= parseJSON) <*>
v .: "files"
parseJSON _ = mempty
-- == Patching
-- | Request to patch a user's file
data PatchURequest = PatchURequest { paURqUser :: User
, paURqFile :: FileName
, paURqPatch :: Text
}
type PatchUData = (Maybe InternalUser, FileName, Text)
instance FromJSON PatchURequest where
parseJSON (Object v) = PatchURequest <$>
(v .: "user" >>= parseJSON) <*>
(v .: "filename") <*>
v .: "patch"
parseJSON _ = mempty
-- | Request to patch a group's file
data PatchGRequest = PatchGRequest { paGRqUser :: User
, paGRqGroup :: GroupName
, paGRqFile :: FileName
, paGRqPatch :: Text
}
type PatchGData = (Maybe InternalUser, Maybe InternalGroup, FileName, Text)
instance FromJSON PatchGRequest where
parseJSON (Object v) = PatchGRequest <$>
(v .: "user" >>= parseJSON) <*>
(v .: "group") <*>
(v .: "filename") <*>
v .: "patch"
parseJSON _ = mempty
-- == Authentication
-- | A username and a password
data Credentials = Credentials { cName :: UserName
, cPass :: Password
}
instance FromJSON Credentials where
parseJSON (Object v) = Credentials <$> v .: "name" <*> v .: "password"
parseJSON _ = mempty
-- | Request to sign up a new user, as used /internally/
data SignUpRequest = SignUpRequest { suRqCreds :: Credentials
, suApiKey :: ApiKey
, suSalt :: Salt
}
-- | Request to sign up a new user, as passed to the API
newtype SignUpRequest' = SignUpRequest' { suRqCreds' :: Credentials }
deriving FromJSON
type SignUpData = (SignUpRequest, Maybe InternalUser)
-- | Request to authenticate as an existing user, as used /internally/
data SignInRequest = SignInRequest { siRqCreds :: Credentials
, siApiKey :: ApiKey
}
-- | Request to authenticate as an existing user, as pased to the API
newtype SignInRequest' = SignInRequest' { siRqCreds' :: Credentials }
deriving FromJSON
type SignInData = (Password, Maybe InternalUser, ApiKey)
-- = API structure
-- | an error returned by the API
data ApiError = BadRequest
| AuthError
| NoAccess
| NoSuchFile FileName
| FileExists
| NoSuchUser
| UserExists
| MemberExists
| GroupExists
| NoSuchGroup
deriving (Show, Eq)
instance ToJSON ApiError where
toJSON (NoSuchFile (FileName fName)) =
String $ T.append "NoSuchFile: " fName
toJSON a = String . T.pack $ show a
-- | a response as returned by the API
newtype ApiResponse r = ApiResponse (Either ApiError r)
deriving (Eq, Show, Functor, Applicative, Monad, Foldable, Traversable)
instance ToJSON r => ToJSON (ApiResponse r) where
toJSON (ApiResponse (Left e)) =
object ["result" .= Null, "error" .= e]
toJSON (ApiResponse (Right r)) =
object ["result" .= r, "error" .= Null]
-- = State datatypes used in the datastore
-- | our main application
data Morgue = Morgue { allUsers :: IxSet InternalUser
, allGroups :: IxSet InternalGroup
}
-- = 'SafeCopy' instances for all types in need
-- derive 'SafeCopy' instances for our types
$(deriveSafeCopy 0 'base ''O.SimpleOptions)
$(deriveSafeCopy 0 'base ''OutputFormat)
$(deriveSafeCopy 0 'base ''AgendaMode)
$(deriveSafeCopy 0 'base ''Salt)
$(deriveSafeCopy 0 'base ''Morgue)
$(deriveSafeCopy 0 'base ''UserName)
$(deriveSafeCopy 0 'base ''GroupName)
$(deriveSafeCopy 0 'base ''FileName)
$(deriveSafeCopy 0 'base ''Credentials)
$(deriveSafeCopy 0 'base ''InternalUser)
$(deriveSafeCopy 0 'base ''User)
$(deriveSafeCopy 0 'base ''InternalGroup)
$(deriveSafeCopy 0 'base ''Group)
$(deriveSafeCopy 0 'base ''File)
$(deriveSafeCopy 0 'base ''GroupFileList)
$(deriveSafeCopy 0 'base ''FileList)
$(deriveSafeCopy 0 'base ''SignUpRequest)
$(deriveSafeCopy 0 'base ''SignInRequest)
$(deriveSafeCopy 0 'base ''ListRequest)
$(deriveSafeCopy 0 'base ''PushURequest)
$(deriveSafeCopy 0 'base ''PushGRequest)
$(deriveSafeCopy 0 'base ''DeleteURequest)
$(deriveSafeCopy 0 'base ''DeleteGRequest)
$(deriveSafeCopy 0 'base ''PullURequest)
$(deriveSafeCopy 0 'base ''PullGRequest)
$(deriveSafeCopy 0 'base ''GroupNewRequest)
$(deriveSafeCopy 0 'base ''GroupAddRequest)
$(deriveSafeCopy 0 'base ''ProcessingRequest)
$(deriveSafeCopy 0 'base ''PatchURequest)
$(deriveSafeCopy 0 'base ''PatchGRequest)
$(deriveSafeCopy 0 'base ''ApiError)
$(deriveSafeCopy 0 'base ''ApiResponse)
-- = 'Indexable' instances needed by the datastore
-- | we want to index a user by his name, API key and names of his files
instance Indexable InternalUser where
empty = ixSet
[ ixFun $ (:[]) . iUserName
, ixFun $ \us -> [User (iUserName us) (iApiKey us)]
, ixFun $ map fileName . iUserFiles
]
-- | we want to index a group by it's name, it's members' names, and names of
-- it's files
instance Indexable InternalGroup where
empty = ixSet
[ ixFun $ (:[]) . iGroupName
, ixFun iUsers
, ixFun $ map fileName . iGroupFiles
]
| ibabushkin/morgue-server | src/Types.hs | gpl-3.0 | 16,142 | 0 | 15 | 4,934 | 3,838 | 2,113 | 1,725 | 304 | 0 |
module TB.Transformers.Except.Examples (
safeDiv,
safeDiv',
runSafeDiv'
) where
import Control.Applicative
import Control.Monad.IO.Class
import Control.Monad.Trans.Except
import Data.Foldable
import Data.Functor.Identity
import Data.Traversable
-- | safe division
--
-- >>> safeDiv 4 2
-- Right 2
--
-- >>> safeDiv 4 0
-- Left "division by zero"
safeDiv :: (Num a, Integral a) => a -> a -> Either String a
safeDiv x y =
runExcept $ do
case y of
0 -> throwE "division by zero"
_ -> return (x `div` y)
-- | safe division
--
-- >>> runExcept (safeDiv' 4 2)
-- Right 2
--
-- >>> runExcept (safeDiv' 4 0)
-- Left "division by zero"
safeDiv' :: (Num a, Integral a) => a -> a -> Except String a
safeDiv' _ 0 = throwE "division by zero"
safeDiv' x y = return (x `div` y)
runSafeDiv' x y = runExcept (safeDiv' x y)
-- | except
t_except = runIdentity $ runExceptT $ except $ safeDiv 0 0
-- | mapExcept
t_mapExcept :: (Num a, Integral a) => Either String a
t_mapExcept = runExcept (mapExcept (\r -> either (Left . id) (Right . (+1)) r) (safeDiv' 4 2))
-- | withExcept
t_withExcept :: (Num a, Integral a) => Except String a
t_withExcept = withExcept id (safeDiv' 4 2)
-- | catchE
t_catchE :: (Num a, Integral a) => ExceptT Bool IO a
t_catchE = safeDivIO' 4 0 `catchE` (\_ -> throwE False)
t_catchE' :: (Num a, Integral a) => IO (Either Bool a)
t_catchE' = runExceptT t_catchE
-- some io variations
safeDivIO :: (Num a, Integral a) => a -> a -> IO (Either String a)
safeDivIO x y =
runExceptT $ do
case y of
0 -> (liftIO $ putStrLn "division by zero") >> throwE "division by zero"
_ -> return (x `div` y)
safeDivIO' :: (Num a, Integral a) => a -> a -> ExceptT String IO a
safeDivIO' _ 0 = do
liftIO $ putStrLn "division by zero"
throwE "division by zero"
safeDivIO' x y = return (x `div` y)
-- | other
--
-- >>> runExcept (throwE "error" >> return True)
-- Left "error"
--
-- >>> runExcept (return True >> throwE "error" >> return False)
-- Left "error"
--
-- >>> fmap (+1) $ runExcept (return 1)
-- Right 2
--
-- >>> (+1) <$> runExcept (return 1)
-- Right 2
--
-- >>> foldMap (fmap (+1)) $ runExcept (return [1,2,3])
-- [2,3,4]
--
-- >>> traverse (fmap (+1)) $ runExcept (return [1,2,3])
-- [Right 2,Right 3,Right 4]
--
-- >>> :{
-- do
-- x <- runExcept (return 1)
-- y <- runExcept (return 1)
-- return (x + y)
-- :}
-- Right 2
| adarqui/ToyBox | haskell/ross/transformers/src/TB/Transformers/Except/Examples.hs | gpl-3.0 | 2,446 | 0 | 14 | 570 | 694 | 386 | 308 | 40 | 2 |
-----------------------------------------------------------------------------
-- |
-- Module :
-- Copyright : (c) 2013 Boyun Tang
-- License : BSD-style
-- Maintainer : tangboyun@hotmail.com
-- Stability : experimental
-- Portability : ghc
--
--
--
-----------------------------------------------------------------------------
module MiRanda.Enrich where
import Control.Monad
import Control.Monad.ST
import qualified Data.Vector.Generic as G
import qualified Data.Vector.Generic.Mutable as GM
import qualified Data.Vector as V
import qualified Data.Vector.Unboxed as UV
import System.Random.MWC
shuffle :: G.Vector v a => Seed -> v a -> (v a,Seed)
shuffle s v =
runST $ do
let len = G.length v
n = len-1
mv <- GM.new len
gen <- restore s
G.unsafeCopy mv v
forM_ [0..n] $ \idx -> do
idx' <- uniformR (idx,n) gen
GM.unsafeSwap mv idx idx'
s' <- save gen
v' <- G.unsafeFreeze mv
return $ (v',s')
{-# INLINE shuffle #-}
permute :: G.Vector v a => Seed -> Int -> v a -> ([v a],Seed)
permute s n vec =
let l = G.length vec
v = G.concat $ replicate n vec
t = n * l
end = l - 1
in runST $ do
mv <- G.unsafeThaw v
gen <- restore s
forM_ [0..t-1] $ \idx -> do
let (c,r) = idx `divMod` l
i = c * l
i' <- uniformR (r,end) gen
GM.unsafeSwap mv idx (i+i')
v' <- G.unsafeFreeze mv
s' <- save gen
return (map ((\i -> G.unsafeSlice i l v').(*l)) [0..n-1] ,s')
{-# INLINE permute #-}
permEnrich :: (UV.Unbox a,Ord a, Num a) => Seed -> Int -> UV.Vector Int -> UV.Vector a -> (Double,Seed)
permEnrich seed nPerm hitIdxVec geneScoreVec =
let s = UV.sum $ UV.unsafeBackpermute geneScoreVec hitIdxVec
nGene = UV.length hitIdxVec
(vs,seed') = permute seed nPerm $ V.enumFromN 0 (UV.length geneScoreVec)
ivs = map (V.convert . V.force . V.slice 0 nGene) vs
n' = length . filter (> s) . map (UV.sum . UV.unsafeBackpermute geneScoreVec) $ ivs
p = fromIntegral n' / fromIntegral nPerm
in (p,seed')
| tangboyun/miranda | src/MiRanda/Enrich.hs | gpl-3.0 | 2,112 | 1 | 18 | 557 | 800 | 415 | 385 | 50 | 1 |
module Wiretap.Data.Program
( Program
, fromFolder
, empty
, nullInst
, findInstruction
, Method(..)
, Field(..)
, Instruction(..)
, instName
, fieldName
, methodName
) where
-- The programs of wiretap are represented in this module. This module therefore
-- contains all the static information about the program. For dynamic
-- information about the program see (Wiretap.Data.Event).
import Data.Binary
import Data.Binary.Get
import Data.Binary.Put
import GHC.Int
import System.IO (withFile, IOMode (ReadMode))
import qualified Data.IntMap.Strict as IM
import Data.Maybe
import qualified Data.ByteString.Lazy as BL
import System.FilePath
data Program = Program
{ _fieldNames :: !(IM.IntMap String)
, _instructionNames :: !(IM.IntMap String)
, _methodNames :: !(IM.IntMap String)
, _instructionFolder :: !(Maybe FilePath)
}
fromFolder :: FilePath -> IO Program
fromFolder folder = do
fields <- IM.map cleanField <$> intMapFromFile "fields.txt"
instructions <- intMapFromFile "instructions.txt"
methods <- intMapFromFile "methods.txt"
return $ Program
{ _fieldNames = fields
, _instructionNames = instructions
, _instructionFolder = Just $ folder </> "instructions"
, _methodNames = methods
}
where
intMapFromFile f =
IM.fromAscList . zip [0..] . lines <$> readFile (folder </> f)
cleanField = takeWhile (/= ':') . tail . dropWhile (/= '.')
empty :: Program
empty =
Program { _fieldNames = IM.empty
, _instructionNames = IM.empty
, _instructionFolder = Nothing
, _methodNames = IM.empty
}
findInstruction :: Program -> Int32 -> Int32 -> IO Instruction
findInstruction p tid oid =
case _instructionFolder p of
Just folder -> do
withFile (folder </> show tid) ReadMode $ \h -> do
bs <- BL.hGetContents h
return $! (decode $ BL.drop (fromIntegral oid * 4) bs)
Nothing -> do
return nullInst
instName :: Program -> Instruction -> String
instName p i =
fromMaybe missing $ IM.lookup (fromIntegral $ instructionId i) (_instructionNames p)
where missing = "<missing-" ++ show (instructionId i) ++ ">"
fieldName :: Program -> Field -> String
fieldName p f =
fromMaybe missing $ IM.lookup (fromIntegral $ fieldId f) (_fieldNames p)
where missing = "<missing-" ++ show (fieldId f) ++ ">"
methodName :: Program -> Method -> String
methodName p m =
fromMaybe missing $ IM.lookup (fromIntegral $ methodId m) (_methodNames p)
where missing = "<missing-" ++ show (methodId m) ++ ">"
newtype Instruction =
Instruction
{ instructionId :: Int32
} deriving (Show, Eq, Ord)
instance Binary Instruction where
put = putInt32be . instructionId
get = Instruction <$> getInt32be
nullInst :: Instruction
nullInst = Instruction (-1)
newtype Field = Field
{ fieldId :: Int32
} deriving (Show, Eq, Ord)
instance Binary Field where
put = putInt32be . fieldId
get = Field <$> getInt32be
newtype Method = Method
{ methodId :: Int32
} deriving (Show, Eq, Ord)
instance Binary Method where
put = putInt32be . methodId
get = Method <$> getInt32be
| ucla-pls/wiretap-tools | src/Wiretap/Data/Program.hs | gpl-3.0 | 3,220 | 0 | 20 | 750 | 941 | 510 | 431 | 95 | 2 |
module Core.Http where
import Control.Lens ((^.))
import Data.Text (Text)
import qualified Data.Text as Text
import qualified Data.Text.Lazy as L
import Data.Text.Lazy.Encoding (decodeUtf8)
import qualified Network.Wreq as Wreq
data Version = Version
{ major :: {-# UNPACK #-}!Int
, minor :: {-# UNPACK #-}!Int
} deriving (Show)
fetch :: Text -> IO Text
fetch url = do
response <- Wreq.get $ Text.unpack url
return $ L.toStrict $ decodeUtf8 (response ^. Wreq.responseBody)
| inq/agitpunkt | src/Core/Http.hs | agpl-3.0 | 575 | 0 | 11 | 172 | 160 | 94 | 66 | 15 | 1 |
{-# LANGUAGE TypeFamilies #-}
f :: ((~) a b) => a -> b
f = id
| lspitzner/brittany | data/Test341.hs | agpl-3.0 | 62 | 0 | 6 | 16 | 29 | 17 | 12 | -1 | -1 |
{-|
Module : Moonbase.Theme
Copyright : (c) Felix Schnizlein, 2014
License : GPL-2
Maintainer : felix@none.io
Stability : experimental
Portability : POSIX
Some helper function to complete Gtk's functionality
-}
module Moonbase.Util.Gtk
( iosync
, ioasync
, withDisplay
, pangoColor
, pangoSanitize
, moveWindow
, widgetGetSize
, setWindowHints
, setWindowStruts
, getAbsoluteMousePosition
, parseColorGtk
, parseColorTuple
, clamp
, setStyle
, checkDisplay
) where
import Control.Applicative
import Control.Exception
import Control.Monad.Reader
import qualified Graphics.UI.Gtk as Gtk
import qualified Graphics.UI.Gtk.General.CssProvider as Gtk
import qualified Graphics.UI.Gtk.General.StyleContext as Gtk
import Moonbase.Core
import Moonbase.Signal
import Moonbase.Theme
import Moonbase.Util
import Moonbase.Util.StrutProperties
withDisplay :: (Gtk.Display -> Moon a) -> Moon a
withDisplay f = do
disp <- liftIO Gtk.displayGetDefault
case disp of
Just d -> f d
Nothing -> do
fatal "Could not open display!"
liftIO $ throw CouldNotOpenDisplay
-- | Wrapper arroung liftIO . Gtk.postGUISync
iosync :: (MonadIO m) => IO a -> m a
iosync = liftIO . Gtk.postGUISync
ioasync :: (MonadIO m) => IO () -> m ()
ioasync = liftIO . Gtk.postGUIAsync
-- | Applys pango color formatting to a 'String'
pangoColor :: String -> String -> String
pangoColor fg str = left ++ str ++ right
where
left = "<span foreground=\"" ++ color_ fg ++ "\">"
right = "</span>"
-- | Sanatize few basic characters
pangoSanitize :: String -> String
pangoSanitize = foldr sanitize ""
where
sanitize '>' xs = ">" ++ xs
sanitize '<' xs = "<" ++ xs
sanitize '\"' xs = """ ++ xs
sanitize '&' xs = "&" ++ xs
sanitize x xs = x:xs
-- | Move Window to given position
moveWindow :: Gtk.Window -- ^ Window which should be moved
-> Position -- ^ Position where the window should moved
-> Gtk.Rectangle -- ^ Size of the monitor
-> IO ()
moveWindow win pos (Gtk.Rectangle x _ _ h) = do
(_, height) <- Gtk.windowGetSize win
Gtk.windowMove win x (offset height)
where
offset height = case pos of
Top -> 0
Bottom -> h - height
Custom height' -> h - height - height'
-- | Set window geometry hints (a easy wrapper for full horizontal windows)
setWindowHints :: Gtk.Window -- ^ Window where geometry hints should set
-> Gtk.Rectangle -- ^ Size of the monitor where the window is on
-> IO ()
setWindowHints win (Gtk.Rectangle _ _ w _) = do
(_, h) <- Gtk.windowGetSize win
Gtk.windowSetGeometryHints win noWidget (Just (w,h)) (Just (w,h)) Nothing Nothing Nothing
where
noWidget = Nothing :: Maybe Gtk.Widget
-- | Generate strutProperties for fully horizontal windows
strutProperties :: Position -- ^ Window position
-> Int -- ^ Window height
-> Gtk.Rectangle -- ^ Current monitor rectangle
-> [Gtk.Rectangle] -- ^ All monitors
-> StrutProperties
strutProperties pos bh (Gtk.Rectangle mX mY mW mH) monitors = propertize pos sX sW sH
where
sX = mX
sW = mW - 1
sH = case pos of
Top -> bh + mY
Bottom -> bh + totalH - mY - mH
totalH = maximum $ map bottomY monitors
bottomY (Gtk.Rectangle _ y _ h) = y + h
propertize p x w h = case p of
Top -> StrutProperties 0 0 h 0 0 0 0 0 x (x+w) 0 0
Bottom -> StrutProperties 0 0 0 h 0 0 0 0 0 0 x (x+w)
-- | Sets window struts
setWindowStruts :: Gtk.Window -> Position -> Int -> Gtk.Rectangle -> IO ()
setWindowStruts win pos height geo = do
scr <- Gtk.windowGetScreen win
moNum <- Gtk.screenGetNMonitors scr
moGeos <- mapM (Gtk.screenGetMonitorGeometry scr) [0 .. (moNum - 1)]
setStrutProperties win $ strutProperties pos height geo moGeos
-- | Returns the absolute mouse position
--
-- If the mouse pointer is not on the screen (which is usual the case with Xinerama and nvidia twinview)
-- this function return (0,0)
getAbsoluteMousePosition :: Gtk.Screen -> IO (Int, Int)
getAbsoluteMousePosition scr = do
root <- Gtk.screenGetRootWindow scr
mPos <- Gtk.drawWindowGetPointer root
return $ check mPos
where
check (Just (True, x, y, _)) = (x,y)
check _ = (0,0)
parseColorGtk :: Color -> Gtk.Color
parseColorGtk c = Gtk.Color (imp r) (imp g) (imp b)
where
imp = (*) 257
(r,g,b,_) = parseColorTuple c
setStyle :: (Gtk.WidgetClass widget) => widget -> String -> [(String, String)] -> IO ()
setStyle w name settings = do
Gtk.widgetSetName w name
provider <- Gtk.cssProviderNew
context <- Gtk.widgetGetStyleContext w
Gtk.cssProviderLoadFromString provider css
Gtk.styleContextAddProvider context provider 800
where
parsedList ((k, p) : xs) = (k ++ ": " ++ p ++ ";") : parsedList xs
parsedList [] = []
css = "#" ++ name ++ " {"
++ unwords (parsedList settings)
++ "}"
checkDisplay :: Maybe Gtk.Display -> Moon Gtk.Display
checkDisplay Nothing = fatal "Could not open display" >> error "Could not open display"
checkDisplay (Just disp) = return disp
widgetGetSize :: (Gtk.WidgetClass o, MonadIO m, Num a) => o -> m (a, a)
widgetGetSize chart = do
area <- liftIO $ Gtk.widgetGetWindow chart
case area of
Nothing -> return (0,0)
Just win -> do
w <- liftIO $ Gtk.drawWindowGetWidth win
h <- liftIO $ Gtk.drawWindowGetHeight win
return (fromIntegral w, fromIntegral h)
| felixsch/moonbase-ng | src/Moonbase/Util/Gtk.hs | lgpl-2.1 | 5,905 | 0 | 15 | 1,693 | 1,665 | 862 | 803 | 125 | 5 |
module Main where
import Lib
import Test.QuickCheck
functorIdentity :: (Functor f, Eq (f a)) => f a -> Bool
functorIdentity f = fmap id f == f
functorCompose :: (Eq (f c), Functor f) => (a -> b) -> (b -> c) -> f a -> Bool
functorCompose f g x = (fmap g (fmap f x) == (fmap (g . f) x))
instance Arbitrary a => Arbitrary (Identity a) where
arbitrary = do
a <- arbitrary
return (Identity a)
instance Arbitrary a => Arbitrary (Pair a) where
arbitrary = do
a <- arbitrary
a' <- arbitrary
return (Pair a a')
instance (Arbitrary a, Arbitrary b) => Arbitrary (Two a b) where
arbitrary = do
a <- arbitrary
b <- arbitrary
return (Two a b)
instance (Arbitrary a, Arbitrary b, Arbitrary c) => Arbitrary (Three a b c) where
arbitrary = do
a <- arbitrary
b <- arbitrary
c <- arbitrary
return (Three a b c)
instance (Arbitrary a, Arbitrary b) => Arbitrary (Three' a b) where
arbitrary = do
a <- arbitrary
b <- arbitrary
b' <- arbitrary
return (Three' a b b')
instance (Arbitrary a, Arbitrary b) => Arbitrary (Four' a b) where
arbitrary = do
a0 <- arbitrary
a1 <- arbitrary
a2 <- arbitrary
b <- arbitrary
return (Four' a0 a1 a2 b)
main :: IO ()
main = do
quickCheck $ \x -> functorIdentity (x :: [Int])
quickCheck $ \x -> functorCompose (+1) (*2) (x :: [Int])
quickCheck $ \x -> functorIdentity (x :: Identity Int)
quickCheck $ \x -> functorCompose (+1) (*2) (x :: (Identity Int))
quickCheck $ \x -> functorIdentity (x :: Pair Int)
quickCheck $ \x -> functorCompose (+1) (*2) (x :: (Pair Int))
quickCheck $ \x -> functorIdentity (x :: Two Int String)
quickCheck $ \x -> functorCompose (+1) (*2) (x :: (Two Int Double))
quickCheck $ \x -> functorIdentity (x :: Three Int String Double)
quickCheck $ \x -> functorCompose (+1) (*2) (x :: (Three Int String Double))
quickCheck $ \x -> functorIdentity (x :: Three' Int String)
quickCheck $ \x -> functorCompose (+1) (*2) (x :: (Three' Int Double))
quickCheck $ \x -> functorIdentity (x :: Four' Int String)
quickCheck $ \x -> functorCompose (+1) (*2) (x :: (Four' Int Double))
| thewoolleyman/haskellbook | 16/10/maor/test/Spec.hs | unlicense | 2,159 | 0 | 12 | 524 | 1,036 | 532 | 504 | 56 | 1 |
-- This file is needed because cabal check complains about the -main-is option
-- for ghc, and we would like to import the app code into a test file at the
-- same time.
module Main where
import Options.Applicative
import DockerFu(parse, dockerTodo)
main :: IO ()
main = putStrLn "Hello World" {- do args <- execParser parse
runAll $ dockerTodo args -}
| lancelet/bored-robot | app/docker-fu.hs | apache-2.0 | 366 | 0 | 6 | 75 | 42 | 26 | 16 | 5 | 1 |
module AlecSequences.A270654 (a270654) where
import Data.List (genericIndex, genericLength)
import Helpers.AlecHelper (buildAlecSequence)
import Helpers.Primes (isPrime)
import HelperSequences.A032741 (a032741)
a270654 :: Integral a => a -> a
a270654 i = genericIndex a270654_list (i - 1)
a270654_list :: Integral a => [a]
a270654_list = buildAlecSequence matchingIndices sum [0]
matchingIndices :: Integral a => [a] -> [a]
matchingIndices list = filter f [1..n - 1] where
n = 1 + genericLength list
f i = isPrime $ n + a_i where
a_i = genericIndex list (i - 1)
| peterokagey/haskellOEIS | src/AlecSequences/A270654.hs | apache-2.0 | 573 | 0 | 11 | 95 | 213 | 115 | 98 | 14 | 1 |
{-# LANGUAGE TemplateHaskell #-}
{-| Unittests for the MonD data parse function -}
{-
Copyright (C) 2013 Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-}
module Test.Ganeti.HTools.ExtLoader
( testHTools_ExtLoader
) where
import qualified Test.HUnit as HUnit
import qualified Text.JSON as J
import qualified Ganeti.BasicTypes as BT
import qualified Ganeti.DataCollectors.CPUload as CPUload
import Ganeti.Cpu.Types (CPUavgload(..))
import Ganeti.DataCollectors.Types (DCReport(..))
import Ganeti.HTools.ExtLoader
import Ganeti.JSON
import Test.Ganeti.TestCommon
import Test.Ganeti.TestHelper
{-# ANN module "HLint: ignore Use camelCase" #-}
-- | Test a MonD data file.
case_parseMonDData :: HUnit.Assertion
case_parseMonDData = do
let mond_data_file = "mond-data.txt"
n1 = "node1.example.com"
n2 = "node2.example.com"
t1 = 1379507272000000000
t2 = 1379507280000000000
cpu_number1 = 4
cpu_number2 = 2
cpus1 = [ 0.04108859597350646,0.04456554528165781
, 0.06203619909502262,0.05595448881893895]
cpus2 = [0.004155409618511363,0.0034586452012150787]
cpu_total1 = 0.203643517607712
cpu_total2 = 0.007614031289927129
dcr1 = DCReport CPUload.dcName CPUload.dcVersion CPUload.dcFormatVersion
t1 CPUload.dcCategory CPUload.dcKind
(J.showJSON (CPUavgload cpu_number1 cpus1 cpu_total1))
dcr2 = DCReport CPUload.dcName CPUload.dcVersion CPUload.dcFormatVersion
t2 CPUload.dcCategory CPUload.dcKind
(J.showJSON (CPUavgload cpu_number2 cpus2 cpu_total2))
expected_list = [(n1,[dcr1]),(n2,[dcr2])]
ans <- readTestData mond_data_file
case pMonDData ans of
BT.Ok l -> HUnit.assertBool ("Parsing " ++ mond_data_file ++ " failed")
(isAlEqual expected_list l)
BT.Bad s -> HUnit.assertFailure $ "Parsing failed: " ++ s
-- | Check for quality two list of tuples.
isAlEqual :: [(String, [DCReport])] -> [(String, [DCReport])] -> Bool
isAlEqual a b = and (zipWith tupleIsAlEqual a b)
-- | Check a tuple for quality.
tupleIsAlEqual :: (String, [DCReport]) -> (String, [DCReport]) -> Bool
tupleIsAlEqual (na, a) (nb, b) =
na == nb
&& and (zipWith dcReportIsAlmostEqual a b)
-- | Check if two DCReports are equal. Only reports from CPUload Data
-- Collectors are supported.
dcReportIsAlmostEqual :: DCReport -> DCReport -> Bool
dcReportIsAlmostEqual a b =
dcReportName a == dcReportName b
&& dcReportVersion a == dcReportVersion b
&& dcReportFormatVersion a == dcReportFormatVersion b
&& dcReportTimestamp a == dcReportTimestamp b
&& dcReportCategory a == dcReportCategory b
&& dcReportKind a == dcReportKind b
&& case () of
_ | CPUload.dcName == dcReportName a ->
cpuavgloadDataIsAlmostEq (dcReportData a) (dcReportData b)
| otherwise -> False
-- | Converts two JSValue objects and compares them.
cpuavgloadDataIsAlmostEq :: J.JSValue -> J.JSValue -> Bool
cpuavgloadDataIsAlmostEq a b =
case fromJVal a :: BT.Result CPUavgload of
BT.Bad _ -> False
BT.Ok cavA ->
case fromJVal b :: BT.Result CPUavgload of
BT.Bad _ -> False
BT.Ok cavB -> compareCPUavgload cavA cavB
-- | Compares two CPuavgload objects.
compareCPUavgload :: CPUavgload -> CPUavgload -> Bool
compareCPUavgload a b =
let relError x y = relativeError x y <= 1e-9
in cavCpuNumber a == cavCpuNumber b
&& relError (cavCpuTotal a) (cavCpuTotal b)
&& length (cavCpus a) == length (cavCpus b)
&& and (zipWith relError (cavCpus a) (cavCpus b))
testSuite "HTools/ExtLoader"
[ 'case_parseMonDData
]
| apyrgio/ganeti | test/hs/Test/Ganeti/HTools/ExtLoader.hs | bsd-2-clause | 4,877 | 0 | 17 | 953 | 948 | 503 | 445 | 75 | 3 |
-----------------------------------------------------------------------------
-- |
-- Module : Text.ParserCombinators.Parsec
-- Copyright : (c) Daan Leijen 1999-2001
-- License : BSD-style (see the file libraries/parsec/LICENSE)
--
-- Maintainer : Antoine Latter <aslatter@gmail.com>
-- Stability : provisional
-- Portability : portable
--
-- Parsec, the Fast Monadic Parser combinator library, see
-- <http://www.cs.uu.nl/people/daan/parsec.html>.
--
-- Inspired by:
--
-- * Graham Hutton and Erik Meijer:
-- Monadic Parser Combinators.
-- Technical report NOTTCS-TR-96-4.
-- Department of Computer Science, University of Nottingham, 1996.
-- <http://www.cs.nott.ac.uk/~gmh/monparsing.ps>
--
-- * Andrew Partridge, David Wright:
-- Predictive parser combinators need four values to report errors.
-- Journal of Functional Programming 6(2): 355-364, 1996
--
-- This helper module exports elements from the basic libraries.
--
-----------------------------------------------------------------------------
module Text.ParserCombinators.Parsec
( -- complete modules
module Text.ParserCombinators.Parsec.Prim
, module Text.ParserCombinators.Parsec.Combinator
, module Text.ParserCombinators.Parsec.Char
-- module Text.ParserCombinators.Parsec.Error
, ParseError
, errorPos
-- module Text.ParserCombinators.Parsec.Pos
, SourcePos
, SourceName, Line, Column
, sourceName, sourceLine, sourceColumn
, incSourceLine, incSourceColumn
, setSourceLine, setSourceColumn, setSourceName
) where
import Text.ParserCombinators.Parsec.Pos -- textual positions
import Text.ParserCombinators.Parsec.Error -- parse errors
import Text.ParserCombinators.Parsec.Prim -- primitive combinators
import Text.ParserCombinators.Parsec.Combinator -- derived combinators
import Text.ParserCombinators.Parsec.Char -- character parsers
| aslatter/parsec2 | Text/ParserCombinators/Parsec.hs | bsd-2-clause | 2,160 | 0 | 5 | 544 | 146 | 113 | 33 | 17 | 0 |
{-# OPTIONS -fglasgow-exts #-}
-----------------------------------------------------------------------------
{-| Module : QTranslator_h.hs
Copyright : (c) David Harley 2010
Project : qtHaskell
Version : 1.1.4
Modified : 2010-09-02 17:02:31
Warning : this file is machine generated - do not modify.
--}
-----------------------------------------------------------------------------
module Qtc.Core.QTranslator_h (
Qqtranslate_h(..)
) where
import Qtc.Enums.Base
import Qtc.Classes.Base
import Qtc.Classes.Qccs_h
import Qtc.Classes.Core_h
import Qtc.ClassTypes.Core
import Qth.ClassTypes.Core
import Foreign.Marshal.Array
instance QunSetUserMethod (QTranslator ()) where
unSetUserMethod qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
qtc_QTranslator_unSetUserMethod cobj_qobj (toCInt 0) (toCInt evid)
foreign import ccall "qtc_QTranslator_unSetUserMethod" qtc_QTranslator_unSetUserMethod :: Ptr (TQTranslator a) -> CInt -> CInt -> IO (CBool)
instance QunSetUserMethod (QTranslatorSc a) where
unSetUserMethod qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
qtc_QTranslator_unSetUserMethod cobj_qobj (toCInt 0) (toCInt evid)
instance QunSetUserMethodVariant (QTranslator ()) where
unSetUserMethodVariant qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
qtc_QTranslator_unSetUserMethod cobj_qobj (toCInt 1) (toCInt evid)
instance QunSetUserMethodVariant (QTranslatorSc a) where
unSetUserMethodVariant qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
qtc_QTranslator_unSetUserMethod cobj_qobj (toCInt 1) (toCInt evid)
instance QunSetUserMethodVariantList (QTranslator ()) where
unSetUserMethodVariantList qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
qtc_QTranslator_unSetUserMethod cobj_qobj (toCInt 2) (toCInt evid)
instance QunSetUserMethodVariantList (QTranslatorSc a) where
unSetUserMethodVariantList qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
qtc_QTranslator_unSetUserMethod cobj_qobj (toCInt 2) (toCInt evid)
instance QsetUserMethod (QTranslator ()) (QTranslator x0 -> IO ()) where
setUserMethod _eobj _eid _handler
= do
funptr <- wrapSetUserMethod_QTranslator setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetUserMethod_QTranslator_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
qtc_QTranslator_setUserMethod cobj_eobj (toCInt _eid) (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return ()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> IO ()
setHandlerWrapper x0
= do
x0obj <- objectFromPtr_nf x0
if (objectIsNull x0obj)
then return ()
else _handler x0obj
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
foreign import ccall "qtc_QTranslator_setUserMethod" qtc_QTranslator_setUserMethod :: Ptr (TQTranslator a) -> CInt -> Ptr (Ptr (TQTranslator x0) -> IO ()) -> Ptr () -> Ptr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO ()
foreign import ccall "wrapper" wrapSetUserMethod_QTranslator :: (Ptr (TQTranslator x0) -> IO ()) -> IO (FunPtr (Ptr (TQTranslator x0) -> IO ()))
foreign import ccall "wrapper" wrapSetUserMethod_QTranslator_d :: (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO (FunPtr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()))
instance QsetUserMethod (QTranslatorSc a) (QTranslator x0 -> IO ()) where
setUserMethod _eobj _eid _handler
= do
funptr <- wrapSetUserMethod_QTranslator setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetUserMethod_QTranslator_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
qtc_QTranslator_setUserMethod cobj_eobj (toCInt _eid) (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return ()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> IO ()
setHandlerWrapper x0
= do
x0obj <- objectFromPtr_nf x0
if (objectIsNull x0obj)
then return ()
else _handler x0obj
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
instance QsetUserMethod (QTranslator ()) (QTranslator x0 -> QVariant () -> IO (QVariant ())) where
setUserMethod _eobj _eid _handler
= do
funptr <- wrapSetUserMethodVariant_QTranslator setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetUserMethodVariant_QTranslator_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
qtc_QTranslator_setUserMethodVariant cobj_eobj (toCInt _eid) (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return ()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQVariant ()) -> IO (Ptr (TQVariant ()))
setHandlerWrapper x0 x1
= do
x0obj <- objectFromPtr_nf x0
x1obj <- objectFromPtr_nf x1
rv <- if (objectIsNull x0obj)
then return $ objectCast x0obj
else _handler x0obj x1obj
withObjectPtr rv $ \cobj_rv -> return cobj_rv
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
foreign import ccall "qtc_QTranslator_setUserMethodVariant" qtc_QTranslator_setUserMethodVariant :: Ptr (TQTranslator a) -> CInt -> Ptr (Ptr (TQTranslator x0) -> Ptr (TQVariant ()) -> IO (Ptr (TQVariant ()))) -> Ptr () -> Ptr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO ()
foreign import ccall "wrapper" wrapSetUserMethodVariant_QTranslator :: (Ptr (TQTranslator x0) -> Ptr (TQVariant ()) -> IO (Ptr (TQVariant ()))) -> IO (FunPtr (Ptr (TQTranslator x0) -> Ptr (TQVariant ()) -> IO (Ptr (TQVariant ()))))
foreign import ccall "wrapper" wrapSetUserMethodVariant_QTranslator_d :: (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO (FunPtr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()))
instance QsetUserMethod (QTranslatorSc a) (QTranslator x0 -> QVariant () -> IO (QVariant ())) where
setUserMethod _eobj _eid _handler
= do
funptr <- wrapSetUserMethodVariant_QTranslator setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetUserMethodVariant_QTranslator_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
qtc_QTranslator_setUserMethodVariant cobj_eobj (toCInt _eid) (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return ()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQVariant ()) -> IO (Ptr (TQVariant ()))
setHandlerWrapper x0 x1
= do
x0obj <- objectFromPtr_nf x0
x1obj <- objectFromPtr_nf x1
rv <- if (objectIsNull x0obj)
then return $ objectCast x0obj
else _handler x0obj x1obj
withObjectPtr rv $ \cobj_rv -> return cobj_rv
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
instance QunSetHandler (QTranslator ()) where
unSetHandler qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
withCWString evid $ \cstr_evid ->
qtc_QTranslator_unSetHandler cobj_qobj cstr_evid
foreign import ccall "qtc_QTranslator_unSetHandler" qtc_QTranslator_unSetHandler :: Ptr (TQTranslator a) -> CWString -> IO (CBool)
instance QunSetHandler (QTranslatorSc a) where
unSetHandler qobj evid
= withBoolResult $
withObjectPtr qobj $ \cobj_qobj ->
withCWString evid $ \cstr_evid ->
qtc_QTranslator_unSetHandler cobj_qobj cstr_evid
instance QsetHandler (QTranslator ()) (QTranslator x0 -> IO (Bool)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator1 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator1_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler1 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> IO (CBool)
setHandlerWrapper x0
= do x0obj <- qTranslatorFromPtr x0
let rv =
if (objectIsNull x0obj)
then return False
else _handler x0obj
rvf <- rv
return (toCBool rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
foreign import ccall "qtc_QTranslator_setHandler1" qtc_QTranslator_setHandler1 :: Ptr (TQTranslator a) -> CWString -> Ptr (Ptr (TQTranslator x0) -> IO (CBool)) -> Ptr () -> Ptr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO ()
foreign import ccall "wrapper" wrapSetHandler_QTranslator1 :: (Ptr (TQTranslator x0) -> IO (CBool)) -> IO (FunPtr (Ptr (TQTranslator x0) -> IO (CBool)))
foreign import ccall "wrapper" wrapSetHandler_QTranslator1_d :: (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO (FunPtr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()))
instance QsetHandler (QTranslatorSc a) (QTranslator x0 -> IO (Bool)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator1 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator1_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler1 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> IO (CBool)
setHandlerWrapper x0
= do x0obj <- qTranslatorFromPtr x0
let rv =
if (objectIsNull x0obj)
then return False
else _handler x0obj
rvf <- rv
return (toCBool rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
instance QqisEmpty_h (QTranslator ()) (()) where
qisEmpty_h x0 ()
= withBoolResult $
withObjectPtr x0 $ \cobj_x0 ->
qtc_QTranslator_isEmpty cobj_x0
foreign import ccall "qtc_QTranslator_isEmpty" qtc_QTranslator_isEmpty :: Ptr (TQTranslator a) -> IO CBool
instance QqisEmpty_h (QTranslatorSc a) (()) where
qisEmpty_h x0 ()
= withBoolResult $
withObjectPtr x0 $ \cobj_x0 ->
qtc_QTranslator_isEmpty cobj_x0
instance QsetHandler (QTranslator ()) (QTranslator x0 -> String -> String -> String -> Int -> IO (String)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator2 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator2_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler2 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQString ()) -> Ptr (TQString ()) -> Ptr (TQString ()) -> CInt -> IO (CWString)
setHandlerWrapper x0 x1 x2 x3 x4
= do x0obj <- qTranslatorFromPtr x0
x1str <- stringFromPtr x1
x2str <- stringFromPtr x2
x3str <- stringFromPtr x3
let x4int = fromCInt x4
let rv =
if (objectIsNull x0obj)
then return ("")
else _handler x0obj x1str x2str x3str x4int
rvf <- rv
withCWString rvf $ \cstr_rvf -> return (cstr_rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
foreign import ccall "qtc_QTranslator_setHandler2" qtc_QTranslator_setHandler2 :: Ptr (TQTranslator a) -> CWString -> Ptr (Ptr (TQTranslator x0) -> Ptr (TQString ()) -> Ptr (TQString ()) -> Ptr (TQString ()) -> CInt -> IO (CWString)) -> Ptr () -> Ptr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO ()
foreign import ccall "wrapper" wrapSetHandler_QTranslator2 :: (Ptr (TQTranslator x0) -> Ptr (TQString ()) -> Ptr (TQString ()) -> Ptr (TQString ()) -> CInt -> IO (CWString)) -> IO (FunPtr (Ptr (TQTranslator x0) -> Ptr (TQString ()) -> Ptr (TQString ()) -> Ptr (TQString ()) -> CInt -> IO (CWString)))
foreign import ccall "wrapper" wrapSetHandler_QTranslator2_d :: (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO (FunPtr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()))
instance QsetHandler (QTranslatorSc a) (QTranslator x0 -> String -> String -> String -> Int -> IO (String)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator2 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator2_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler2 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQString ()) -> Ptr (TQString ()) -> Ptr (TQString ()) -> CInt -> IO (CWString)
setHandlerWrapper x0 x1 x2 x3 x4
= do x0obj <- qTranslatorFromPtr x0
x1str <- stringFromPtr x1
x2str <- stringFromPtr x2
x3str <- stringFromPtr x3
let x4int = fromCInt x4
let rv =
if (objectIsNull x0obj)
then return ("")
else _handler x0obj x1str x2str x3str x4int
rvf <- rv
withCWString rvf $ \cstr_rvf -> return (cstr_rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
class Qqtranslate_h x0 x1 where
qtranslate_h :: x0 -> x1 -> IO (String)
instance Qqtranslate_h (QTranslator ()) ((String, String, String, Int)) where
qtranslate_h x0 (x1, x2, x3, x4)
= withStringResult $
withObjectPtr x0 $ \cobj_x0 ->
withCWString x1 $ \cstr_x1 ->
withCWString x2 $ \cstr_x2 ->
withCWString x3 $ \cstr_x3 ->
qtc_QTranslator_translate2 cobj_x0 cstr_x1 cstr_x2 cstr_x3 (toCInt x4)
foreign import ccall "qtc_QTranslator_translate2" qtc_QTranslator_translate2 :: Ptr (TQTranslator a) -> CWString -> CWString -> CWString -> CInt -> IO (Ptr (TQString ()))
instance Qqtranslate_h (QTranslatorSc a) ((String, String, String, Int)) where
qtranslate_h x0 (x1, x2, x3, x4)
= withStringResult $
withObjectPtr x0 $ \cobj_x0 ->
withCWString x1 $ \cstr_x1 ->
withCWString x2 $ \cstr_x2 ->
withCWString x3 $ \cstr_x3 ->
qtc_QTranslator_translate2 cobj_x0 cstr_x1 cstr_x2 cstr_x3 (toCInt x4)
instance QsetHandler (QTranslator ()) (QTranslator x0 -> QEvent t1 -> IO (Bool)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator3 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator3_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler3 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQEvent t1) -> IO (CBool)
setHandlerWrapper x0 x1
= do x0obj <- qTranslatorFromPtr x0
x1obj <- objectFromPtr_nf x1
let rv =
if (objectIsNull x0obj)
then return False
else _handler x0obj x1obj
rvf <- rv
return (toCBool rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
foreign import ccall "qtc_QTranslator_setHandler3" qtc_QTranslator_setHandler3 :: Ptr (TQTranslator a) -> CWString -> Ptr (Ptr (TQTranslator x0) -> Ptr (TQEvent t1) -> IO (CBool)) -> Ptr () -> Ptr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO ()
foreign import ccall "wrapper" wrapSetHandler_QTranslator3 :: (Ptr (TQTranslator x0) -> Ptr (TQEvent t1) -> IO (CBool)) -> IO (FunPtr (Ptr (TQTranslator x0) -> Ptr (TQEvent t1) -> IO (CBool)))
foreign import ccall "wrapper" wrapSetHandler_QTranslator3_d :: (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO (FunPtr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()))
instance QsetHandler (QTranslatorSc a) (QTranslator x0 -> QEvent t1 -> IO (Bool)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator3 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator3_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler3 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQEvent t1) -> IO (CBool)
setHandlerWrapper x0 x1
= do x0obj <- qTranslatorFromPtr x0
x1obj <- objectFromPtr_nf x1
let rv =
if (objectIsNull x0obj)
then return False
else _handler x0obj x1obj
rvf <- rv
return (toCBool rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
instance Qevent_h (QTranslator ()) ((QEvent t1)) where
event_h x0 (x1)
= withBoolResult $
withObjectPtr x0 $ \cobj_x0 ->
withObjectPtr x1 $ \cobj_x1 ->
qtc_QTranslator_event cobj_x0 cobj_x1
foreign import ccall "qtc_QTranslator_event" qtc_QTranslator_event :: Ptr (TQTranslator a) -> Ptr (TQEvent t1) -> IO CBool
instance Qevent_h (QTranslatorSc a) ((QEvent t1)) where
event_h x0 (x1)
= withBoolResult $
withObjectPtr x0 $ \cobj_x0 ->
withObjectPtr x1 $ \cobj_x1 ->
qtc_QTranslator_event cobj_x0 cobj_x1
instance QsetHandler (QTranslator ()) (QTranslator x0 -> QObject t1 -> QEvent t2 -> IO (Bool)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator4 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator4_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler4 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQObject t1) -> Ptr (TQEvent t2) -> IO (CBool)
setHandlerWrapper x0 x1 x2
= do x0obj <- qTranslatorFromPtr x0
x1obj <- qObjectFromPtr x1
x2obj <- objectFromPtr_nf x2
let rv =
if (objectIsNull x0obj)
then return False
else _handler x0obj x1obj x2obj
rvf <- rv
return (toCBool rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
foreign import ccall "qtc_QTranslator_setHandler4" qtc_QTranslator_setHandler4 :: Ptr (TQTranslator a) -> CWString -> Ptr (Ptr (TQTranslator x0) -> Ptr (TQObject t1) -> Ptr (TQEvent t2) -> IO (CBool)) -> Ptr () -> Ptr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO ()
foreign import ccall "wrapper" wrapSetHandler_QTranslator4 :: (Ptr (TQTranslator x0) -> Ptr (TQObject t1) -> Ptr (TQEvent t2) -> IO (CBool)) -> IO (FunPtr (Ptr (TQTranslator x0) -> Ptr (TQObject t1) -> Ptr (TQEvent t2) -> IO (CBool)))
foreign import ccall "wrapper" wrapSetHandler_QTranslator4_d :: (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()) -> IO (FunPtr (Ptr fun -> Ptr state -> Ptr fun_d -> IO ()))
instance QsetHandler (QTranslatorSc a) (QTranslator x0 -> QObject t1 -> QEvent t2 -> IO (Bool)) where
setHandler _eobj _eid _handler
= do
funptr <- wrapSetHandler_QTranslator4 setHandlerWrapper
stptr <- newStablePtr (Wrap _handler)
funptr_d <- wrapSetHandler_QTranslator4_d setHandlerWrapper_d
withObjectPtr _eobj $ \cobj_eobj ->
withCWString _eid $ \cstr_eid ->
qtc_QTranslator_setHandler4 cobj_eobj cstr_eid (toCFunPtr funptr) (castStablePtrToPtr stptr) (toCFunPtr funptr_d)
return()
where
setHandlerWrapper :: Ptr (TQTranslator x0) -> Ptr (TQObject t1) -> Ptr (TQEvent t2) -> IO (CBool)
setHandlerWrapper x0 x1 x2
= do x0obj <- qTranslatorFromPtr x0
x1obj <- qObjectFromPtr x1
x2obj <- objectFromPtr_nf x2
let rv =
if (objectIsNull x0obj)
then return False
else _handler x0obj x1obj x2obj
rvf <- rv
return (toCBool rvf)
setHandlerWrapper_d :: Ptr fun -> Ptr () -> Ptr fun_d -> IO ()
setHandlerWrapper_d funptr stptr funptr_d
= do when (stptr/=ptrNull)
(freeStablePtr (castPtrToStablePtr stptr))
when (funptr/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr))
when (funptr_d/=ptrNull)
(freeHaskellFunPtr (castPtrToFunPtr funptr_d))
return ()
instance QeventFilter_h (QTranslator ()) ((QObject t1, QEvent t2)) where
eventFilter_h x0 (x1, x2)
= withBoolResult $
withObjectPtr x0 $ \cobj_x0 ->
withObjectPtr x1 $ \cobj_x1 ->
withObjectPtr x2 $ \cobj_x2 ->
qtc_QTranslator_eventFilter cobj_x0 cobj_x1 cobj_x2
foreign import ccall "qtc_QTranslator_eventFilter" qtc_QTranslator_eventFilter :: Ptr (TQTranslator a) -> Ptr (TQObject t1) -> Ptr (TQEvent t2) -> IO CBool
instance QeventFilter_h (QTranslatorSc a) ((QObject t1, QEvent t2)) where
eventFilter_h x0 (x1, x2)
= withBoolResult $
withObjectPtr x0 $ \cobj_x0 ->
withObjectPtr x1 $ \cobj_x1 ->
withObjectPtr x2 $ \cobj_x2 ->
qtc_QTranslator_eventFilter cobj_x0 cobj_x1 cobj_x2
| uduki/hsQt | Qtc/Core/QTranslator_h.hs | bsd-2-clause | 25,032 | 0 | 18 | 5,848 | 8,370 | 3,991 | 4,379 | -1 | -1 |
module Game.LambdaPad.Core
( ButtonState(Pressed, Released)
, Button
-- | A lens into whether the button is pressed.
, buttonState
, Direction (C, N, NE, E, SE, S, SW, W, NW)
, DPad
-- | A lens into the 'Direction' a 'DPad' is pressed.
, dir
, Trigger
-- | A lens into how far a trigger has been pressed, where 0.0 is neutral and
-- 1.0 is fully depressed.
, pull
, Stick
-- | A lens into the horizontal displacement of a 'Stick', where 0.0 is
-- neutral, -1.0 is fully W, and 1.0 is fully E. Think of it as the X axis
-- bounded on [-1.0, 1.0]
, horiz
-- | A lens into the vertical displacement of a 'Stick', where 0.0 is neutral,
-- -1.0 is fully S, and 1.0 is fully N. Think of it as the X axis bounded on
-- [-1.0, 1.0]
, vert
, tilt, push
, Pad
, PadButton
, a, b, x, y
, lb, rb, ls, rs
, back, start, home
, PadDPad
, dpad
, PadTrigger
, leftTrigger, rightTrigger
, PadStick
, leftStick, rightStick
) where
import Game.LambdaPad.Core.Internal
| zearen-wover/lambda-pad-core | src/Game/LambdaPad/Core.hs | bsd-3-clause | 1,020 | 0 | 5 | 261 | 154 | 113 | 41 | 29 | 0 |
{-# LANGUAGE RecordWildCards, FlexibleInstances #-}
------------------------------------------------------------------------------
-- |
-- Module: Database.PostgreSQL.Simple.FromRow
-- Copyright: (c) 2012 Leon P Smith
-- License: BSD3
-- Maintainer: Leon P Smith <leon@melding-monads.com>
-- Stability: experimental
--
-- The 'FromRow' typeclass, for converting a row of results
-- returned by a SQL query into a more useful Haskell representation.
--
-- Predefined instances are provided for tuples containing up to ten
-- elements. The instances for 'Maybe' types return 'Nothing' if all
-- the columns that would have been otherwise consumed are null, otherwise
-- it attempts a regular conversion.
--
------------------------------------------------------------------------------
module Database.PostgreSQL.Simple.FromRow
( FromRow(..)
, RowParser
, field
, fieldWith
, numFieldsRemaining
) where
import Prelude hiding (null)
import Control.Applicative (Applicative(..), (<$>), (<|>), (*>))
import Control.Monad (replicateM, replicateM_)
import Control.Monad.Trans.State.Strict
import Control.Monad.Trans.Reader
import Control.Monad.Trans.Class
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as B
import Data.Vector (Vector)
import qualified Data.Vector as V
import Database.PostgreSQL.Simple.Types (Only(..))
import qualified Database.PostgreSQL.LibPQ as PQ
import Database.PostgreSQL.Simple.Internal
import Database.PostgreSQL.Simple.Compat
import Database.PostgreSQL.Simple.FromField
import Database.PostgreSQL.Simple.Ok
import Database.PostgreSQL.Simple.Types ((:.)(..), Null)
import Database.PostgreSQL.Simple.TypeInfo
-- | A collection type that can be converted from a sequence of fields.
-- Instances are provided for tuples up to 10 elements and lists of any length.
--
-- Note that instances can be defined outside of postgresql-simple, which is
-- often useful. For example, here's an instance for a user-defined pair:
--
-- @data User = User { name :: String, fileQuota :: Int }
--
-- instance 'FromRow' User where
-- fromRow = User \<$\> 'field' \<*\> 'field'
-- @
--
-- The number of calls to 'field' must match the number of fields returned
-- in a single row of the query result. Otherwise, a 'ConversionFailed'
-- exception will be thrown.
--
-- Note that 'field' evaluates it's result to WHNF, so the caveats listed in
-- mysql-simple and very early versions of postgresql-simple no longer apply.
-- Instead, look at the caveats associated with user-defined implementations
-- of 'fromField'.
class FromRow a where
fromRow :: RowParser a
getvalue :: PQ.Result -> PQ.Row -> PQ.Column -> Maybe ByteString
getvalue result row col = unsafeDupablePerformIO (PQ.getvalue' result row col)
nfields :: PQ.Result -> PQ.Column
nfields result = unsafeDupablePerformIO (PQ.nfields result)
getTypeInfoByCol :: Row -> PQ.Column -> Conversion TypeInfo
getTypeInfoByCol Row{..} col =
Conversion $ \conn -> do
oid <- PQ.ftype rowresult col
Ok <$> getTypeInfo conn oid
getTypenameByCol :: Row -> PQ.Column -> Conversion ByteString
getTypenameByCol row col = typname <$> getTypeInfoByCol row col
fieldWith :: FieldParser a -> RowParser a
fieldWith fieldP = RP $ do
let unCol (PQ.Col x) = fromIntegral x :: Int
r@Row{..} <- ask
column <- lift get
lift (put (column + 1))
let ncols = nfields rowresult
if (column >= ncols)
then lift $ lift $ do
vals <- mapM (getTypenameByCol r) [0..ncols-1]
let err = ConversionFailed
(show (unCol ncols) ++ " values: " ++ show (map ellipsis vals))
Nothing
""
("at least " ++ show (unCol column + 1)
++ " slots in target type")
"mismatch between number of columns to \
\convert and number in target type"
conversionError err
else do
let !result = rowresult
!typeOid = unsafeDupablePerformIO (PQ.ftype result column)
!field = Field{..}
lift (lift (fieldP field (getvalue result row column)))
field :: FromField a => RowParser a
field = fieldWith fromField
ellipsis :: ByteString -> ByteString
ellipsis bs
| B.length bs > 15 = B.take 10 bs `B.append` "[...]"
| otherwise = bs
numFieldsRemaining :: RowParser Int
numFieldsRemaining = RP $ do
Row{..} <- ask
column <- lift get
return $! (\(PQ.Col x) -> fromIntegral x) (nfields rowresult - column)
null :: RowParser Null
null = field
instance (FromField a) => FromRow (Only a) where
fromRow = Only <$> field
instance (FromField a) => FromRow (Maybe (Only a)) where
fromRow = (null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b) => FromRow (a,b) where
fromRow = (,) <$> field <*> field
instance (FromField a, FromField b) => FromRow (Maybe (a,b)) where
fromRow = (null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c) => FromRow (a,b,c) where
fromRow = (,,) <$> field <*> field <*> field
instance (FromField a, FromField b, FromField c) => FromRow (Maybe (a,b,c)) where
fromRow = (null *> null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d) =>
FromRow (a,b,c,d) where
fromRow = (,,,) <$> field <*> field <*> field <*> field
instance (FromField a, FromField b, FromField c, FromField d) =>
FromRow (Maybe (a,b,c,d)) where
fromRow = (null *> null *> null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d, FromField e) =>
FromRow (a,b,c,d,e) where
fromRow = (,,,,) <$> field <*> field <*> field <*> field <*> field
instance (FromField a, FromField b, FromField c, FromField d, FromField e) =>
FromRow (Maybe (a,b,c,d,e)) where
fromRow = (null *> null *> null *> null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f) =>
FromRow (a,b,c,d,e,f) where
fromRow = (,,,,,) <$> field <*> field <*> field <*> field <*> field
<*> field
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f) =>
FromRow (Maybe (a,b,c,d,e,f)) where
fromRow = (null *> null *> null *> null *> null *>
null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g) =>
FromRow (a,b,c,d,e,f,g) where
fromRow = (,,,,,,) <$> field <*> field <*> field <*> field <*> field
<*> field <*> field
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g) =>
FromRow (Maybe (a,b,c,d,e,f,g)) where
fromRow = (null *> null *> null *> null *> null *>
null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g, FromField h) =>
FromRow (a,b,c,d,e,f,g,h) where
fromRow = (,,,,,,,) <$> field <*> field <*> field <*> field <*> field
<*> field <*> field <*> field
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g, FromField h) =>
FromRow (Maybe (a,b,c,d,e,f,g,h)) where
fromRow = (null *> null *> null *> null *> null *>
null *> null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g, FromField h, FromField i) =>
FromRow (a,b,c,d,e,f,g,h,i) where
fromRow = (,,,,,,,,) <$> field <*> field <*> field <*> field <*> field
<*> field <*> field <*> field <*> field
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g, FromField h, FromField i) =>
FromRow (Maybe (a,b,c,d,e,f,g,h,i)) where
fromRow = (null *> null *> null *> null *> null *>
null *> null *> null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g, FromField h, FromField i, FromField j) =>
FromRow (a,b,c,d,e,f,g,h,i,j) where
fromRow = (,,,,,,,,,) <$> field <*> field <*> field <*> field <*> field
<*> field <*> field <*> field <*> field <*> field
instance (FromField a, FromField b, FromField c, FromField d, FromField e,
FromField f, FromField g, FromField h, FromField i, FromField j) =>
FromRow (Maybe (a,b,c,d,e,f,g,h,i,j)) where
fromRow = (null *> null *> null *> null *> null *>
null *> null *> null *> null *> null *> pure Nothing)
<|> (Just <$> fromRow)
instance FromField a => FromRow [a] where
fromRow = do
n <- numFieldsRemaining
replicateM n field
instance FromField a => FromRow (Maybe [a]) where
fromRow = do
n <- numFieldsRemaining
(replicateM_ n null *> pure Nothing) <|> (Just <$> replicateM n field)
instance FromField a => FromRow (Vector a) where
fromRow = do
n <- numFieldsRemaining
V.replicateM n field
instance FromField a => FromRow (Maybe (Vector a)) where
fromRow = do
n <- numFieldsRemaining
(replicateM_ n null *> pure Nothing) <|> (Just <$> V.replicateM n field)
instance (FromRow a, FromRow b) => FromRow (a :. b) where
fromRow = (:.) <$> fromRow <*> fromRow
| avieth/postgresql-simple | src/Database/PostgreSQL/Simple/FromRow.hs | bsd-3-clause | 9,954 | 0 | 22 | 2,483 | 3,274 | 1,784 | 1,490 | -1 | -1 |
{- | Inclusion of bundle-local copies of libraries in application bundles.
OS X application bundles can include local copies of libraries and
frameworks (ie dependencies of the executable) which aids distribution
and eases installation. Xcode and the traditional OS X development
toolchain support this fairly transparently; this module is an attempt
to provide similar functionality in the cabal-macosx package.
The basic approach is as follows:
1. Discover the libraries an object file (executable, other binary, or
library) references using @otool -L /path/@
2. Copy those libraries into the application bundle, at the right
place, ie @\@executable_path\/..\/Frameworks\/@ where
@\@executable_path@ represents the path to the exeutable in the
bundle.
3. Modify the object file so it refers to the local copy, using
@install_name_tool -change /oldLibPath/ /newLibPath/ /path/@ where
@/newlibPath/@ points to @\@executable_path\/..\/Frameworks@ as
described above (@\@executable_path@ is a special symbol recognised
by the loader).
Complications:
* There's some stuff we don't want to include because we can
expect it to be present everywhere, eg the Cocoa framework; see
/Exclusions/, below.
* Libraries can themselves depend on other libraries; thus, we
need to copy them in recursively.
* Because of these transitive dependencies, dependencies can
arise on multiple copies of the same library, in different
locations (eg @\/usr\/lib\/libfoo@ and @\/opt\/local\/lib\/libfoo@).
Thus, we preserve that path info, and (for example) copy
@\/usr\/lib\/libFoo@ to
@\@executable_path\/..\/Frameworks\/usr\/lib\/@.
The approach followed is to build a dependency graph, seeded with the
executable and any other binaries being included in the bundle, using
@otool@; then to walk that graph, copying in the libraries, and
calling @install_name_tool@ to update the dependencies of entities in
the bundle. Going via a dependency graph is a bit unnecessary - we
could just recursively @otool@/@install_name_tool@, but its helpful if
we need to debug, etc., and a nice clear abstraction.
/Exclusions/: as described above, a lot of truly common stuff would
get copied in, so we provide a mechanism to exclude libraries from
this process: 'buildDependencyGraph' can be passed a list of strings,
and a library whose path includes any of those strings is excluded.
If an empty list is passed, then nothing is excluded (which is almost
certainly not what you want).
-}
module Distribution.MacOSX.Dependencies (
includeDependencies,
appDependencyGraph
) where
import Control.Monad
import Data.List
import Data.Maybe
import System.Directory
import System.FilePath
import System.Process
import System.Exit
import Text.ParserCombinators.Parsec
import Distribution.MacOSX.Common
import Distribution.MacOSX.DG
-- | Include any library dependencies required in the app.
includeDependencies ::
FilePath -- ^ Path to application bundle root.
-> MacApp -> IO ()
includeDependencies appPath app =
do dg <- appDependencyGraph appPath app
let fDeps = dgFDeps dg
mapM_ (copyInDependency appPath app) fDeps
mapM_ (updateDependencies appPath app) fDeps
-- | Compute application's library dependency graph.
appDependencyGraph ::
FilePath -- ^ Path to application bundle root.
-> MacApp -> IO DG
appDependencyGraph appPath app =
case (appDeps app) of
ChaseWithDefaults -> appDependencyGraph appPath app {
appDeps = ChaseWith defaultExclusions
}
ChaseWith xs -> appDependencyGraph appPath app {
appDeps = FilterDeps $ checkExclude xs
}
DoNotChase -> return dgInitial
FilterDeps f -> do
putStrLn "Building dependency graph"
buildDependencyGraph appPath app dgInitial roots [] f
where roots = appName app : otherCompiledBins app ++ otherBins app
dgInitial = dgEmpty `dgAddPaths` roots
checkExclude :: Exclusions -> FilePath -> Bool
checkExclude excls f = not $ any (`isInfixOf` f) excls
-- | Recursive dependency-graph builder.
buildDependencyGraph ::
FilePath -- ^ Path to application bundle root.
-> MacApp
-> DG -- ^ Dependency graph to be extended.
-> [FilePath] -- ^ Queue of paths to object files to be examined for
-- dependencies.
-> [FilePath] -- ^ List of paths of object files which have already
-- been dealt with.
-> DepsFilter -- ^ filter function for dependency-chasing.
-> IO DG
buildDependencyGraph _ _ dg [] _ _ = return dg
buildDependencyGraph appPath app dg (x:xs) done fil =
do (dg', tgts) <- addFilesDependencies appPath app dg x fil
let done' = (x:done)
xs' = addToQueue xs done' tgts
buildDependencyGraph appPath app dg' xs' done' fil
where addToQueue :: [FilePath] -> [FilePath] -> [FilePath] -> [FilePath]
addToQueue q done' = foldl (addOneToQueue (q ++ done')) q
addOneToQueue :: [FilePath] -> [FilePath] -> FilePath -> [FilePath]
addOneToQueue done' q n = if n `elem` done' then q else q ++ [n]
-- | Add an object file's dependencies to a dependency graph,
-- returning that new graph and a list of the discovered dependencies.
addFilesDependencies ::
FilePath -- ^ Path to application bundle root.
-> MacApp
-> DG -- ^ Dependency graph to be extended.
-> FilePath -- ^ Path to object file to be examined for dependencies.
-> DepsFilter -- ^ filter function for dependency chasing.
-> IO (DG, [FilePath])
addFilesDependencies appPath app dg p fil =
do (FDeps _ tgts) <- getFDeps appPath app p fil
let dg' = dgAddFDeps dg (FDeps p tgts)
return (dg', tgts)
-- | Compute the library dependencies for some file, removing any
-- exclusions.
getFDeps ::
FilePath -- ^ Path to application bundle root.
-> MacApp
-> FilePath -- ^ Path to object file to be examined for dependencies.
-> DepsFilter -- ^ filter function for dependency chasing.
-> IO FDeps
getFDeps appPath app path fil =
do absPath <- getAbsPath
contents <- readProcess oTool ["-L", absPath] ""
case parse parseFileDeps "" contents of
Left err -> error $ show err
Right fDeps -> return $ exclude fil fDeps
where getAbsPath = if isRoot app path then
return (appPath </> pathInApp app path)
else lookupLibrary path
parseFileDeps :: Parser FDeps
parseFileDeps = do f <- manyTill (noneOf ":") (char ':')
_ <- char '\n'
deps <- parseDepOrName `sepEndBy` char '\n'
eof
return $ FDeps f $ filter (f /=) $ catMaybes deps
parseDepOrName :: Parser (Maybe FilePath)
parseDepOrName = do c <- oneOf "\t/"
case c of
'\t' -> -- A dependency.
do dep <- parseDep
return $ Just dep
'/' -> -- Same filename, alternative arch
do _ <- manyTill (noneOf ":") (char ':')
return Nothing
_ -> error "Can't happen"
parseDep :: Parser FilePath
parseDep = do dep <- manyTill (noneOf " ") (char ' ')
_ <- char '('
_ <- manyTill (noneOf ")") (char ')')
return dep
-- | Apply a filter function to an 'FDeps' value; any dependencies
-- for which the filter function returns False are excluded.
exclude :: DepsFilter -> FDeps -> FDeps
exclude fil (FDeps p ds) =
FDeps p $ filter fil ds
-- | Copy some object file's library dependencies into the application
-- bundle.
copyInDependency ::
FilePath -- ^ Path to application bundle root.
-> MacApp
-> FDeps -- ^ Dependencies to copy in.
-> IO ()
copyInDependency appPath app (FDeps src _) =
Control.Monad.unless (isRoot app src) $
do putStrLn $ "Copying " ++ src ++ " to " ++ tgt
createDirectoryIfMissing True $ takeDirectory tgt
absSrc <- lookupLibrary src
copyFile absSrc tgt
where tgt = appPath </> pathInApp app src
-- | Update some object file's library dependencies to point to
-- bundled copies of libraries.
updateDependencies ::
FilePath -- ^ Path to application bundle root.
-> MacApp
-> FDeps -- ^ Dependencies to update.
-> IO ()
updateDependencies appPath app (FDeps src tgts) =
mapM_ (updateDependency appPath app src) tgts
-- | Update some object file's dependency on some particular library,
-- to point to the bundled copy of that library.
updateDependency ::
FilePath -- ^ Path to application bundle root.
-> MacApp
-> FilePath -- ^ Path to object file to update.
-> FilePath -- ^ Path to library which was copied in (path before copy).
-> IO ()
updateDependency appPath app src tgt =
do putStrLn $ "Updating " ++ newLib ++ "'s dependency on " ++ tgt ++
" to " ++ tgt'
let cmd = iTool ++ " -change " ++ show tgt ++ " " ++ show tgt' ++
" " ++ show newLib
--putStrLn cmd
ExitSuccess <- system cmd
return ()
where origBin = makeRelative (appPath </> "Contents/Resources") newLib
rels = if origBin `elem` otherBins app || "/" ++ origBin `elem` otherBins app
then concatMap (const "../") (splitPath origBin)
else "../"
tgt' = "@executable_path/" ++ rels ++ "Frameworks/" </> makeRelative "/" tgt
newLib = appPath </> pathInApp app src
-- | Path to @otool@ tool.
oTool :: FilePath
oTool = "/usr/bin/otool"
-- | Path to @install_name_tool@ tool.
iTool :: FilePath
iTool = "/usr/bin/install_name_tool"
| soenkehahn/cabal-macosx | Distribution/MacOSX/Dependencies.hs | bsd-3-clause | 9,845 | 0 | 17 | 2,554 | 1,691 | 858 | 833 | 148 | 5 |
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FlexibleContexts #-}
module LDrive.Tests.LED where
import Ivory.Tower
import LDrive.Platforms
import LDrive.LED
app :: (e -> ColoredLEDs) -> Tower e ()
app toleds = do
leds <- fmap toleds getEnv
blink (Milliseconds 1000) [redLED leds]
blink (Milliseconds 666) [greenLED leds]
| sorki/odrive | src/LDrive/Tests/LED.hs | bsd-3-clause | 337 | 0 | 9 | 54 | 107 | 55 | 52 | 11 | 1 |
module Main where
import Data.Char
import Data.List
import Data.List.Split
import Data.Ord
import Lib
{-
Each room consists of an encrypted name (lowercase letters separated by dashes) followed by a dash, a sector ID, and a checksum in square brackets.
A room is real (not a decoy) if the checksum is the five most common letters in the encrypted name, in order, with ties broken by alphabetization.
What is the sum of the sector IDs of the real rooms?
First sort the name by letter frequency, then subsort by alphabet
or simpler, sort by alphabet, then group, then sort by size, assuming a stable sort
-}
yes = ["aaaaa-bbb-z-y-x-123[abxyz]","a-b-c-d-e-f-g-h-987[abcde]","not-a-real-room-404[oarel]"]
no = ["totally-real-room-200[decoy]"]
-- this could use a real parser
parse room = (name,sectorid,chksum)
where name = takeWhile (not . isDigit) room
sectorid = takeWhile isDigit (dropWhile (not . isDigit) room)
chksum = tail $ takeWhile (/= ']') (dropWhile (/= '[') room)
makechksum :: Ord a => [a] -> [[a]]
makechksum = sortBy byLength . group . sort
byLength a b = if length a < length b then GT else LT
check (roomname,sectorid,checksum) = thissum == checksum
where triple = parse roomname
thissum = concatMap (take 1) (take 5 . makechksum . filter isAlpha $ first triple)
validrooms cs = filter check (map parse $ lines cs)
main = do contents <- readFile "input.txt"
let valid = map numit $ validrooms contents
print . sum $ map second valid
mapM_ print $ filter (\(r,s) -> take 5 r == "north") $ map decrypt valid
numit :: (a,String,c) -> (a,Int,c)
numit (a,b,c) = (a,read b,c)
decrypt (r,s,_) = (rotate s r,s)
aval = ord 'a'
rotate n = map (\x -> chr . (+ aval)$ (ord x - aval + m) `mod` 26)
where m = n `mod` 26
| shapr/adventofcode2016 | src/Four/Main.hs | bsd-3-clause | 1,846 | 0 | 13 | 422 | 560 | 302 | 258 | 29 | 2 |
module Data.Blockchain.Crypto.HashTree
( HashTreeRoot
, unHashTreeRoot
, hashTreeRoot
) where
import qualified Data.Aeson as Aeson
import Data.Blockchain.Crypto.Hash
newtype HashTreeRoot a = HashTreeRoot { unHashTreeRoot :: Hash a }
deriving (Aeson.FromJSON, Aeson.ToJSON, Eq, Ord, Show)
-- Note: hash tree constructed with extra leaves at end of tree.
-- This is NOT compatible with the Bitcoin implementation.
-- ┌───┴──┐ ┌────┴───┐ ┌─────┴─────┐
-- ┌──┴──┐ │ ┌──┴──┐ │ ┌──┴──┐ ┌──┴──┐
-- ┌─┴─┐ ┌─┴─┐ │ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ │
-- (5-leaf) (6-leaf) (7-leaf)
hashTreeRoot :: forall a. ToHash a => [a] -> HashTreeRoot a
hashTreeRoot = HashTreeRoot . buildHash
where
buildHash :: [a] -> Hash a
buildHash [] = mempty
buildHash [x] = hash x
buildHash xs = mappend (buildHash left) (buildHash right)
where
(left, right) = splitAt i xs
i = until (\x -> x * 2 >= length xs) (*2) 1
| TGOlson/blockchain | lib/Data/Blockchain/Crypto/HashTree.hs | bsd-3-clause | 1,252 | 0 | 13 | 299 | 240 | 136 | 104 | -1 | -1 |
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE NoMonomorphismRestriction #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE DataKinds #-}
module Smoothing where
import Data
import Data.List
import Data.Generics.Record
-- | Given frame_{n} and frame_{n+1}, yield a new frame_{n+1} where
-- the people have had any joints that were 0 made given the value
-- from f_{n}
forwardFill :: Frame Person -> Frame Person -> Frame Person
forwardFill f0 f1 = setField @"people" smoothed f1
where
ps0 = getField @"people" f0
ps1 = getField @"people" f1
candidates = map match ps1
smoothed = map (uncurry fillPerson) candidates
match :: Person -> (Maybe Person, Person)
match p = (find (\p' -> getField @"name" p == getField @"name" p') ps0, p)
-- | Returns a _new_ p1 where it has those values that it thought were
-- zero filled in by the value from p0.
fillPerson :: Maybe Person -> Person -> Person
fillPerson Nothing p1 = p1
fillPerson (Just p0) p1 = setField @"poseKeyPoints" newKeyPoints p1
where
newKeyPoints :: [Float]
newKeyPoints = zipWith f (getField @"poseKeyPoints" p0) (getField @"poseKeyPoints" p1)
f k0 k1 = if k1 == 0 then k0 else k1
| silky/DanceView | src/Smoothing.hs | bsd-3-clause | 1,649 | 0 | 13 | 462 | 306 | 166 | 140 | 30 | 2 |
{-# LANGUAGE OverloadedStrings, GeneralizedNewtypeDeriving, StandaloneDeriving,
FlexibleContexts, FlexibleInstances, MultiParamTypeClasses,
TypeSynonymInstances #-}
import Data.Monoid
import Data.Foldable
import Data.Traversable hiding (mapM)
import Control.Applicative ((<$>))
import Control.Monad (forM_, when, void)
import Control.Monad.IO.Class
import Control.Monad.Reader
import Control.Monad.Trans.Class
import Control.Concurrent.STM
import qualified Data.Map as M
import qualified Data.Text.Lazy as TL
import qualified Data.Vector as V
import qualified Data.Csv as Csv
import Control.Error
import Network.HTTP.Types.Status (status200, status202, status500, status404)
import Data.Aeson (ToJSON(..), FromJSON(..), (.=))
import qualified Data.Aeson as JSON
import Web.Scotty.Trans hiding (ScottyM, ActionM)
import DataLogger (DataLogger, DeviceId, Sample, SensorId, MeasurableId)
import qualified DataLogger as DL
import DeviceList
import Average
deriving instance Parsable DeviceId
deriving instance Parsable DeviceName
deriving instance Parsable DL.SensorId
deriving instance Parsable DL.MeasurableId
deriving instance ToJSON DeviceId
deriving instance ToJSON DL.SensorId
deriving instance ToJSON DL.MeasurableId
instance ToJSON DL.Sensor where
toJSON s = JSON.object [ "sensor_id" .= DL.sensorId s
, "name" .= DL.sensorName s
]
instance ToJSON DL.Measurable where
toJSON s = JSON.object [ "measurable_id" .= DL.measurableId s
, "name" .= DL.measurableName s
, "unit" .= DL.measurableUnit s
]
type ScottyM = ScottyT TL.Text (DeviceListT IO)
type ActionM = ActionT TL.Text (DeviceListT IO)
data SensorMeasurables = SM DL.Sensor [DL.Measurable]
instance ToJSON SensorMeasurables where
toJSON (SM s m) = JSON.object [ "sensor_id" .= DL.sensorId s
, "name" .= DL.sensorName s
, "measurables" .= m
]
main = do
devList <- newDeviceList
let run = runDeviceListT devList
run refreshDevices
scottyT 3000 run run routes
withDevice :: (Device -> ActionM ()) -> ActionM ()
withDevice action = do
dId <- param "device"
devices <- lift $ lookupDeviceId dId
case devices of
Nothing -> do status status404
html "Can't find device"
Just dev -> action dev
withBackedDevice :: (Device -> DataLogger -> ActionM ()) -> ActionM ()
withBackedDevice action = withDevice $ \dev->do
case devLogger dev of
Nothing -> do status status404
html "unbacked device"
Just dl -> action dev dl
getSetting :: (ToJSON a)
=> String -> DL.Setting a -> ScottyM ()
getSetting settingName setting =
get (capture $ "/devices/:device/"<>settingName) $ withBackedDevice $ \dev dl->do
value <- liftIO $ runEitherT $ DL.get dl setting
case value of
Left error -> do
text $ TL.pack error
status status500
Right value -> do
json $ JSON.object [ "setting" .= settingName
, "deviceId" .= devId dev
, "value" .= toJSON value
]
putSetting :: (Parsable a, ToJSON a)
=> String -> DL.Setting a -> Bool -> ScottyM ()
putSetting settingName setting saveNV =
post (capture $ "/devices/:device/"<>settingName) $ withBackedDevice $ \dev dl->do
value <- param "value"
result <- liftIO $ runEitherT $ do
DL.set dl setting value
when saveNV $ DL.saveNVConfig dl
case result of
Left err -> do status status500
text $ "Error: "<>TL.pack err
Right _ -> json $ JSON.object [ "setting" .= settingName
, "deviceId" .= devId dev
, "value" .= toJSON value
]
getPutSetting :: (ToJSON a, Parsable a)
=> String -> DL.Setting a -> ScottyM ()
getPutSetting name setting = do
getSetting name setting
putSetting name setting False
getPutNVSetting :: (ToJSON a, Parsable a)
=> String -> DL.Setting a -> ScottyM ()
getPutNVSetting name setting = do
getSetting name setting
putSetting name setting True
instance ToJSON DL.Sample where
toJSON s =
JSON.object [ "time" .= DL.sampleTime s
, "sensor" .= case DL.sampleSensor s of DL.SID n -> n
, "measurable" .= case DL.sampleMeasurable s of DL.MID n -> n
, "value" .= DL.sampleValue s
]
csv :: Csv.ToRecord a => [a] -> ActionM ()
csv xs = do
setHeader "Content-Type" "text/plain"
raw $ Csv.encode xs
filterSensor :: DL.SensorId -> Sample -> Bool
filterSensor sensor s = DL.sampleSensor s == sensor
filterMeasurable :: DL.SensorId -> DL.MeasurableId -> Sample -> Bool
filterMeasurable sensor measurable s =
DL.sampleSensor s == sensor && DL.sampleMeasurable s == measurable
data Pair a b = Pair a b
deriving (Show)
instance (Monoid a, Monoid b) => Monoid (Pair a b) where
mempty = Pair mempty mempty
Pair a b `mappend` Pair x y = Pair (a <> x) (b <> y)
decimate :: Integer -> V.Vector Sample -> [Sample]
decimate res = go
where
averageMeasurables :: V.Vector Sample -> [Sample]
averageMeasurables samples =
let a :: M.Map (SensorId, MeasurableId) (Pair (Average Double) (Average Float))
a = foldMap (\s->M.singleton (DL.sampleSensor s, DL.sampleMeasurable s)
$ Pair (average $ realToFrac $ DL.sampleTime s)
(average $ DL.sampleValue s))
samples
avgSamples = map (\((sid,mid), Pair t v)->DL.Sample (round $ getAverage t) sid mid (getAverage v))
(M.assocs a)
in avgSamples
go samples
| V.null samples = []
| DL.Sample {DL.sampleTime=startTime} <- V.head samples =
let (ss, rest) = V.span (\s->startTime + res < DL.sampleTime s) samples
in averageMeasurables ss ++ go rest
getSamplesAction :: Device -> (Sample -> Bool)
-> ([Sample] -> ActionM ()) -> ActionM ()
getSamplesAction dev filterFn format = do
result <- lift (fetch dev)
resolution <- reqHeader "resolution"
let decim = maybe V.toList decimate (resolution >>= readMay . TL.unpack)
case result of
(samples, Nothing) -> format $ decim $ V.filter filterFn samples
(samples, Just (FetchProgress done total)) -> do
addHeader "X-Samples-Done" (TL.pack $ show done)
addHeader "X-Samples-Total" (TL.pack $ show total)
format $ decim $ V.filter filterFn samples
status status202
withLoggerResult :: EitherT String IO a -> (a -> ActionM ()) -> ActionM ()
withLoggerResult loggerAction go = do
result <- liftIO $ runEitherT loggerAction
case result of
Left error -> do html ("<h1>Error</h1><p>"<>TL.pack error<>"</p>")
status status500
Right result -> go result
routes :: ScottyM ()
routes = do
get "/devices" $ do
lift getDeviceList >>= json . map devId
post "/devices" $ do
lift refreshDevices
lift getDeviceList >>= json . map devId
post "/devices/add-test" $ do
lift $ runEitherT $ addTestDevice (DN "hello")
status status200
getPutSetting "acquiring" DL.acquiring
getPutSetting "sample-period" DL.samplePeriod
getPutSetting "rtc-time" DL.rtcTime
getPutNVSetting "acquire-on-boot" DL.acquireOnBoot
getPutNVSetting "name" DL.deviceName
post "/devices/:device/erase" $ withBackedDevice $ \dev dl->do
withLoggerResult (DL.resetSampleCount dl) $ \_->do
json $ JSON.object [("success", toJSON True)]
post "/devices/:device/eject" $ withDevice $ \dev->do
-- TODO
json $ JSON.object [("success", toJSON True)]
get "/devices/:device/sample_count" $ withDevice $ \dev->do
count <- getSampleCount dev
json $ JSON.object ["value" .= count]
get "/devices/:device/samples/csv" $ withDevice $ \dev->
getSamplesAction dev (const True) csv
get "/devices/:device/samples/json" $ withDevice $ \dev->
getSamplesAction dev (const True) json
get "/devices/:device/sensors" $ withBackedDevice $ \dev dl->do
withLoggerResult (DL.getSensors dl) $ \sensors->do
json sensors
get "/devices/:device/sensors/:sensor/measurables" $ withBackedDevice $ \dev dl->do
sensor <- param "sensor"
withLoggerResult (DL.getMeasurables dl sensor) $ \measurables->do
json measurables
get "/devices/:device/sensors/:sensor/samples/json" $ withBackedDevice $ \dev dl->do
sensor <- param "sensor"
withLoggerResult (DL.getMeasurables dl sensor) $ \measurables->do
json measurables
get "/devices/:device/sensors/:sensor/samples/csv" $ withDevice $ \dev->do
sensor <- param "sensor"
getSamplesAction dev (filterSensor sensor) csv
get "/devices/:device/sensors/:sensor/samples/json" $ withDevice $ \dev->do
sensor <- param "sensor"
getSamplesAction dev (filterSensor sensor) json
get "/" $ file "index.html"
get "/logo.svg" $ file "logo.svg"
get "/jquery.js" $ file "jquery-2.0.3.js"
get "/ui.js" $ file "ui.js"
get "/app.css" $ file "app.css"
get "/app.js" $ file "app.js"
get "/chart.css" $ file "chart.css"
get "/chart.js" $ file "chart.js"
get "/d3.v3.js" $ file "d3.v3.min.js"
| bgamari/datalogger-web | WebApp.hs | bsd-3-clause | 9,929 | 0 | 19 | 2,910 | 3,072 | 1,492 | 1,580 | 212 | 2 |
{-# OPTIONS -cpp #-}
module Main where
{-
This file is part of funsat.
funsat is free software: it is released under the BSD3 open source license.
You can find details of this license in the file LICENSE at the root of the
source tree.
Copyright 2008 Denis Bueno
-}
import Control.Monad( when, forM_ )
import Data.Array.Unboxed( elems )
import Data.List( intersperse )
import Data.Version( showVersion )
import Funsat.Solver
( solve
, verify
, defaultConfig
, ShowWrapped(..)
, statTable )
import Funsat.Types( CNF(..), FunsatConfig(..), ConflictCut(..) )
import Paths_funsat( version )
import Prelude hiding ( elem )
import System.Console.GetOpt
import System.Environment( getArgs )
import System.Exit( ExitCode(..), exitWith )
import Data.Time.Clock
import qualified Data.Set as Set
import qualified Language.CNF.Parse.ParseDIMACS as ParseDIMACS
import qualified Text.Tabular as Tabular
import qualified Properties
options :: [OptDescr (Options -> Options)]
options =
[ Option [] ["restart-at"]
(ReqArg (\i o ->
let c = optFunsatConfig o
in o{ optFunsatConfig = c{configRestart = read i} }) "INT")
(withDefault (configRestart . optFunsatConfig)
"Restart after INT conflicts.")
, Option [] ["restart-bump"]
(ReqArg (\d o ->
let c = optFunsatConfig o
in o{ optFunsatConfig = c{configRestartBump = read d} }) "FLOAT")
(withDefault (configRestartBump . optFunsatConfig)
"Alter the number of conflicts required to restart by multiplying by FLOAT.")
, Option [] ["no-vsids"] (NoArg $ \o ->
let c = optFunsatConfig o
in o{ optFunsatConfig = c{configUseVSIDS = False} })
"Use static variable ordering."
, Option [] ["no-restarts"] (NoArg $ \o ->
let c = optFunsatConfig o
in o{ optFunsatConfig = c{configUseRestarts = False} })
"Never restart."
, Option [] ["conflict-cut"]
(ReqArg (\cut o ->
let c = optFunsatConfig o
in o{ optFunsatConfig = c{configCut = readCutOption cut} }) "1|d")
"Which cut of the conflict graph to use for learning. 1=first UIP; d=decision lit"
, Option [] ["verify"] (NoArg $ \o -> o{ optVerify = True })
"Run quickcheck properties and unit tests."
, Option [] ["profile"] (NoArg $ \o -> o{ optProfile = True })
"Run solver. (assumes profiling build)"
, Option [] ["print-features"] (NoArg $ \o -> o{ optPrintFeatures = True })
"Print the optimisations the SAT solver supports and exit."
, Option [] ["version"] (NoArg $ \o -> o{ optVersion = True })
"Print the version of funsat and exit."
]
data Options = Options
{ optVerify :: Bool
, optProfile :: Bool
, optPrintFeatures :: Bool
, optFunsatConfig :: FunsatConfig
, optVersion :: Bool }
deriving (Show)
defaultOptions :: Options
defaultOptions = Options
{ optVerify = False
, optProfile = False
, optVersion = False
, optPrintFeatures = False
, optFunsatConfig = defaultConfig }
optUseVsids, optUseRestarts :: Options -> Bool
optUseVsids = configUseVSIDS . optFunsatConfig
optUseRestarts = configUseRestarts . optFunsatConfig
readCutOption ('1':_) = FirstUipCut
readCutOption ('d':_) = DecisionLitCut
readCutOption _ = error "error parsing cut option"
-- | Show default value of option at the end of the given string.
withDefault :: (Show v) => (Options -> v) -> String -> String
withDefault f s = s ++ " Default " ++ show (f defaultOptions) ++ "."
validateArgv :: [String] -> IO (Options, [FilePath])
validateArgv argv = do
case getOpt Permute options argv of
(o,n,[] ) -> return (foldl (flip ($)) defaultOptions o, n)
(_,_,errs) -> ioError (userError (concat errs ++ usageInfo usageHeader options))
usageHeader :: String
usageHeader = "\nUsage: funsat [OPTION...] cnf-files..."
main :: IO ()
main = do
(opts, files) <- getArgs >>= validateArgv
when (optVerify opts) $ do
Properties.main
exitWith ExitSuccess
when (optProfile opts) $ do
putStrLn "Solving ..."
Properties.profile
exitWith ExitSuccess
when (optVersion opts) $ do
putStr "funsat "
putStrLn (showVersion version)
exitWith ExitSuccess
putStr "Feature config: "
putStr . concat $ intersperse ", "
[ if (optUseVsids opts) then "vsids" else "no vsids"
, if (optUseRestarts opts) then "restarts" else "no restarts"
, "unsat checking"
]
putStr "\n"
when (optPrintFeatures opts) $ exitWith ExitSuccess
when (null files) $
ioError (userError (usageInfo usageHeader options))
forM_ files (parseAndSolve opts)
where
parseAndSolve opts path = do
parseStart <- getCurrentTime
cnf <- parseCNF path
putStrLn $ show (numVars cnf) ++ " variables, "
++ show (numClauses cnf) ++ " clauses"
Set.map seqList (clauses cnf)
`seq` putStrLn ("Solving " ++ path ++ " ...")
parseEnd <- getCurrentTime
startingTime <- getCurrentTime
let cfg = optFunsatConfig opts
(solution, stats, rt) = solve cfg cnf
endingTime <- solution `seq` getCurrentTime
print solution
print $ statTable stats `Tabular.combine`
Tabular.mkTable
[[ WrapString "Parsing time "
, WrapString $ show (diffUTCTime parseEnd parseStart) ]
,[ WrapString "Real time "
, WrapString $ show (diffUTCTime endingTime startingTime)]]
putStr "Verifying solution..."
case verify solution rt cnf of
Just errorWitness ->
do putStrLn "\n--> VERIFICATION ERROR!"
print errorWitness
Nothing -> putStrLn "succeeded."
seqList l@[] = l
seqList l@(x:xs) = x `seq` seqList xs `seq` l
parseCNF :: FilePath -> IO CNF
parseCNF path = do
result <- ParseDIMACS.parseFile path
case result of
Left err -> error . show $ err
Right c -> return . asCNF $ c
-- | Convert parsed CNF to internal representation.
asCNF :: ParseDIMACS.CNF -> CNF
asCNF (ParseDIMACS.CNF v c is) =
CNF { numVars = v
, numClauses = c
, clauses = Set.fromList . map (map fromIntegral . elems) $ is }
| dbueno/funsat | Main.hs | bsd-3-clause | 6,579 | 0 | 17 | 1,909 | 1,819 | 969 | 850 | 149 | 4 |
{-# LANGUAGE ScopedTypeVariables, ViewPatterns #-}
module Lab5a where
import Data.Maybe
import Data.List
import Control.Arrow
import Data.Map (Map)
import qualified Data.Map as M
import Control.Monad.State
import Debug.Trace
data Fixity = Prefix | Infixl | Infixr
deriving (Show, Eq)
data Op a = Op { term :: a
, pri :: Integer
, fixty :: Fixity
}
deriving (Show, Eq)
data Atom a = RT a
| RS
| RA
| ROBrace
| RCBrace
deriving (Show, Eq, Ord)
type RelationTable a = Map (a, a) Ordering
relationTable :: forall a. (Eq a, Ord a) => [Op a] -> RelationTable (Atom a)
relationTable ops =
let ords = concatMap (\op -> map (first $ \a -> (RT $ term op, RT a)) $ rel op) ops
generic = concatMap genrel ops
extra = [ ((ROBrace, RCBrace), EQ)
, ((RS, ROBrace), LT)
, ((RS, RA), LT)
, ((ROBrace, ROBrace), LT)
, ((RA, RS), GT)
, ((RCBrace, RS), GT)
, ((ROBrace, RA), LT)
, ((RA, RCBrace), GT)
, ((RCBrace, RCBrace), GT)
]
in M.fromList $ ords ++ generic ++ extra
where filterterm :: (Op a -> Bool) -> Ordering -> [(a, Ordering)]
filterterm f ord = zip (map term $ filter f ops) (repeat ord)
genrel :: Op a -> [((Atom a, Atom a), Ordering)]
genrel (RT . term -> rt) = [ ((rt, RA), LT)
, ((RA, rt), GT)
, ((rt, ROBrace), LT)
, ((ROBrace, rt), LT)
, ((RCBrace, rt), GT)
, ((rt, RCBrace), GT)
, ((rt, RS), GT)
, ((RS, rt), LT)
, ((RS, RS), EQ)
]
rel :: Op a -> [(a, Ordering)]
rel op =
let
-- infix and prefix
lesser = filterterm (\op' -> fixty op' == Prefix || pri op < pri op') LT
greater = filterterm (\op' -> fixty op' /= Prefix && pri op > pri op') GT
eqord = case fixty op of
Infixl -> GT
Infixr -> LT
-- infix only
equal = if fixty op == Prefix then [] else filterterm (\op' -> fixty op == fixty op' && pri op == pri op') eqord
in lesser ++ greater ++ equal
data Parsed a = POp (Atom a) [Parsed a]
| Val a
deriving (Show, Eq)
type OPState = State ([Parsed String], [Atom String]) ()
type OpCompare = [Op String] -> Atom String -> Atom String -> Ordering
opParser' :: OpCompare -> [Op String] -> [String] -> Parsed String
opParser' comp' ops str' = head $ fst $ execState (mapM_ parse str' >> finish) ([], [RS])
where comp = comp' ops
lens :: Map (Atom String) Int
lens = M.fromList $ map (\op -> (RT $ term op, if fixty op == Prefix then 1 else 2)) ops
++ [ (RCBrace, 1)
, (RA, 1)
]
reduceWhile :: Atom String -> OPState
reduceWhile op = do
(_, (op':_)) <- get
case comp op' op of
GT -> do
modify $ second $ tail
modify $ first $ \stk ->
let (args, rest) = splitAt (lens M.! op') stk
in (POp op' $ reverse args) : rest
reduceWhile op
EQ -> do
modify $ second $ tail
_ -> return ()
parse :: String -> OPState
parse c = do
reduceWhile oc
modify $ second (oc:)
when (oc == RA) $ modify $ first (Val c:)
where oc = case c of
"(" -> ROBrace
")" -> RCBrace
_ | RT c `M.member` lens -> RT c
| otherwise -> RA
finish :: OPState
finish = reduceWhile RS
tableOp :: OpCompare
tableOp op a b = relationTable op M.! (a, b)
tableOpParser :: [Op String] -> String -> Parsed String
tableOpParser ops = opParser' tableOp ops . words
| abbradar/comps | src/Lab5a.hs | bsd-3-clause | 4,179 | 0 | 22 | 1,775 | 1,564 | 857 | 707 | 99 | 6 |
{-# LANGUAGE FlexibleInstances #-}
-----------------------------------------------------------------------------
-- |
-- Module : Foreign.BLAS.Level2
-- Copyright : Copyright (c) 2010, Patrick Perry <patperry@gmail.com>
-- License : BSD3
-- Maintainer : Patrick Perry <patperry@gmail.com>
-- Stability : experimental
--
-- Matrix-Vector operations.
--
module Foreign.BLAS.Level2 (
BLAS2(..),
) where
import Data.Complex
import Foreign( Ptr, Storable, with )
import Foreign.BLAS.Types
import Foreign.BLAS.Level1
import Foreign.BLAS.Double
import Foreign.BLAS.Zomplex
-- | Types with matrix-vector operations.
class (BLAS1 a) => BLAS2 a where
gbmv :: Trans -> Int -> Int -> Int -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> a -> Ptr a -> Int -> IO ()
gemv :: Trans -> Int -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> a -> Ptr a -> Int -> IO ()
gerc :: Int -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
geru :: Int -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
hbmv :: Uplo -> Int -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> a -> Ptr a -> Int -> IO ()
hemv :: Uplo -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> a -> Ptr a -> Int -> IO ()
her :: Uplo -> Int -> Double -> Ptr a -> Int -> Ptr a -> Int -> IO ()
her2 :: Uplo -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
hpmv :: Uplo -> Int -> a -> Ptr a -> Ptr a -> Int -> a -> Ptr a -> Int -> IO ()
hpr :: Uplo -> Int -> Double -> Ptr a -> Int -> Ptr a -> IO ()
hpr2 :: Uplo -> Int -> a -> Ptr a -> Int -> Ptr a -> Int -> Ptr a -> IO ()
tbmv :: Uplo -> Trans -> Diag -> Int -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
tbsv :: Uplo -> Trans -> Diag -> Int -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
tpmv :: Uplo -> Trans -> Diag -> Int -> Ptr a -> Ptr a -> Int -> IO ()
tpsv :: Uplo -> Trans -> Diag -> Int -> Ptr a -> Ptr a -> Int -> IO ()
trmv :: Uplo -> Trans -> Diag -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
trsv :: Uplo -> Trans -> Diag -> Int -> Ptr a -> Int -> Ptr a -> Int -> IO ()
withEnum :: (Enum a, Storable a) => Int -> (Ptr a -> IO b) -> IO b
withEnum = with . toEnum
{-# INLINE withEnum #-}
instance BLAS2 Double where
gemv transa m n alpha pa lda px incx beta py incy =
withTrans transa $ \ptransa ->
withEnum m $ \pm ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
dgemv ptransa pm pn palpha pa plda px pincx pbeta py pincy
{-# INLINE gemv #-}
gbmv transa m n kl ku alpha pa lda px incx beta py incy =
withTrans transa $ \ptransa ->
withEnum m $ \pm ->
withEnum n $ \pn ->
withEnum kl $ \pkl ->
withEnum ku $ \pku ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
dgbmv ptransa pm pn pkl pku palpha pa plda px pincx pbeta py pincy
{-# INLINE gbmv #-}
trmv uplo trans diag n pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
dtrmv puplo ptrans pdiag pn pa plda px pincx
{-# INLINE trmv #-}
tpmv uplo trans diag n pap px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum incx $ \pincx ->
dtpmv puplo ptrans pdiag pn pap px pincx
{-# INLINE tpmv #-}
tpsv uplo trans diag n pap px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum incx $ \pincx ->
dtpsv puplo ptrans pdiag pn pap px pincx
{-# INLINE tpsv #-}
tbmv uplo trans diag n k pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum k $ \pk ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
dtbmv puplo ptrans pdiag pn pk pa plda px pincx
{-# INLINE tbmv #-}
trsv uplo trans diag n pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
dtrsv puplo ptrans pdiag pn pa plda px pincx
{-# INLINE trsv #-}
tbsv uplo trans diag n k pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum k $ \pk ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
dtbsv puplo ptrans pdiag pn pk pa plda px pincx
{-# INLINE tbsv #-}
hemv uplo n alpha pa lda px incx beta py incy =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
dsymv puplo pn palpha pa plda px pincx pbeta py pincy
{-# INLINE hemv #-}
hbmv uplo n k alpha pa lda px incx beta py incy =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
withEnum k $ \pk ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
dsbmv puplo pn pk palpha pa plda px pincx pbeta py pincy
{-# INLINE hbmv #-}
gerc m n alpha px incx py incy pa lda =
withEnum m $ \pm ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
withEnum lda $ \plda ->
dger pm pn palpha px pincx py pincy pa plda
{-# INLINE gerc #-}
geru = gerc
{-# INLINE geru #-}
her uplo n alpha px incx pa lda =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum lda $ \plda ->
dsyr puplo pn palpha px pincx pa plda
{-# INLINE her #-}
her2 uplo n alpha px incx py incy pa lda =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
withEnum lda $ \plda ->
dsyr2 puplo pn palpha px pincx py pincy pa plda
{-# INLINE her2 #-}
hpmv uplo n alpha pap px incx beta py incy =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
dspmv puplo pn palpha pap px pincx pbeta py pincy
{-# INLINE hpmv #-}
hpr uplo n alpha px incx pap =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
dspr puplo pn palpha px pincx pap
{-# INLINE hpr #-}
hpr2 uplo n alpha px incx py incy pap =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
dspr2 puplo pn palpha px pincx py pincy pap
{-# INLINE hpr2 #-}
instance BLAS2 (Complex Double) where
gemv transa m n alpha pa lda px incx beta py incy =
withTrans transa $ \ptransa ->
withEnum m $ \pm ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
zgemv ptransa pm pn palpha pa plda px pincx pbeta py pincy
{-# INLINE gemv #-}
gbmv transa m n kl ku alpha pa lda px incx beta py incy =
withTrans transa $ \ptransa ->
withEnum m $ \pm ->
withEnum n $ \pn ->
withEnum kl $ \pkl ->
withEnum ku $ \pku ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
zgbmv ptransa pm pn pkl pku palpha pa plda px pincx pbeta py pincy
{-# INLINE gbmv #-}
trmv uplo trans diag n pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
ztrmv puplo ptrans pdiag pn pa plda px pincx
{-# INLINE trmv #-}
tpmv uplo trans diag n pap px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum incx $ \pincx ->
ztpmv puplo ptrans pdiag pn pap px pincx
{-# INLINE tpmv #-}
tpsv uplo trans diag n pap px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum incx $ \pincx ->
ztpsv puplo ptrans pdiag pn pap px pincx
{-# INLINE tpsv #-}
tbmv uplo trans diag n k pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum k $ \pk ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
ztbmv puplo ptrans pdiag pn pk pa plda px pincx
{-# INLINE tbmv #-}
trsv uplo trans diag n pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
ztrsv puplo ptrans pdiag pn pa plda px pincx
{-# INLINE trsv #-}
tbsv uplo trans diag n k pa lda px incx =
withUplo uplo $ \puplo ->
withTrans trans $ \ptrans ->
withDiag diag $ \pdiag ->
withEnum n $ \pn ->
withEnum k $ \pk ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
ztbsv puplo ptrans pdiag pn pk pa plda px pincx
{-# INLINE tbsv #-}
hemv uplo n alpha pa lda px incx beta py incy =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
zhemv puplo pn palpha pa plda px pincx pbeta py pincy
{-# INLINE hemv #-}
hbmv uplo n k alpha pa lda px incx beta py incy =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
withEnum k $ \pk ->
with alpha $ \palpha ->
withEnum lda $ \plda ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
zhbmv puplo pn pk palpha pa plda px pincx pbeta py pincy
{-# INLINE hbmv #-}
gerc m n alpha px incx py incy pa lda =
withEnum m $ \pm ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
withEnum lda $ \plda ->
zgerc pm pn palpha px pincx py pincy pa plda
{-# INLINE gerc #-}
geru m n alpha px incx py incy pa lda =
withEnum m $ \pm ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
withEnum lda $ \plda ->
zgeru pm pn palpha px pincx py pincy pa plda
{-# INLINE geru #-}
her uplo n alpha px incx pa lda =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum lda $ \plda ->
zher puplo pn palpha px pincx pa plda
{-# INLINE her #-}
her2 uplo n alpha px incx py incy pa lda =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
withEnum lda $ \plda ->
zher2 puplo pn palpha px pincx py pincy pa plda
{-# INLINE her2 #-}
hpmv uplo n alpha pap px incx beta py incy =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
with beta $ \pbeta ->
withEnum incy $ \pincy ->
zhpmv puplo pn palpha pap px pincx pbeta py pincy
{-# INLINE hpmv #-}
hpr uplo n alpha px incx pap =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
zhpr puplo pn palpha px pincx pap
{-# INLINE hpr #-}
hpr2 uplo n alpha px incx py incy pap =
withUplo uplo $ \puplo ->
withEnum n $ \pn ->
with alpha $ \palpha ->
withEnum incx $ \pincx ->
withEnum incy $ \pincy ->
zhpr2 puplo pn palpha px pincx py pincy pap
{-# INLINE hpr2 #-}
| patperry/hs-linear-algebra | lib/Foreign/BLAS/Level2.hs | bsd-3-clause | 13,419 | 0 | 26 | 4,889 | 4,995 | 2,494 | 2,501 | 341 | 1 |
module Tests.FindFiles (findFiles, findAllFiles) where
import Control.Monad
import Data.List
import System.Directory
import System.FilePath
-- concatMapM, partitionM: pull out into monad utility lib? Does some version
-- of these already exist somewhere?
concatMapM :: Monad m => (a -> m [b]) -> [a] -> m [b]
concatMapM f = liftM concat . mapM f
-- Performs monadic predicate on list and partitions the result into two lists.
-- I don't think we can use partition p xs = (filter p xs, filter (not . p) xs)
-- as an implementation guide since we only want to run the predicate
-- actions once.
partitionM :: Monad m => (a -> m Bool) -> [a] -> m ([a], [a])
partitionM p xs = mapM p xs >>= \bools -> return $ partition' bools xs [] []
where partition' :: [Bool] -> [a] -> [a] -> [a] -> ([a], [a])
partition' _ [] yes no = (reverse yes, reverse no)
partition' (b:bs) (x:xs) yes no = if b then partition' bs xs (x:yes) no
else partition' bs xs yes (x:no)
-- Find all files (recursively) in the given directory matching the predicate f.
findFiles :: FilePath -> (FilePath -> Bool) -> IO [FilePath]
findFiles path f = (liftM (filter f) . findAllFiles) path
-- Big Assumption: every element that isn't a directory is a file! I assume
-- that the Directory functions treat links reasonably, so that everything
-- falls into these two nice neat buckets.
findAllFiles :: FilePath -> IO [FilePath]
findAllFiles path = do elems <- getElems path
(subdirs, files) <- partitionM doesDirectoryExist elems
nested_files <- concatMapM findAllFiles subdirs
return $ files ++ nested_files
where
getElems = liftM filterElems . getDirectoryContents
filterElems = map addPath . filter notDotOrDotDot
addPath = (path </>)
notDotOrDotDot = (`notElem` [".", ".."])
| Mistuke/CraftGen | Tests/FindFiles.hs | bsd-3-clause | 1,908 | 0 | 12 | 466 | 520 | 280 | 240 | 24 | 3 |
{-# LANGUAGE NamedFieldPuns #-}
module Astro.Orbit.Anomaly where
-- [1] http://mathworld.wolfram.com/KeplersEquation.html
import Numeric.Units.Dimensional.Prelude
import Numeric.Units.Dimensional.Coercion
import qualified Prelude
import Astro.Orbit.Types
import Data.AEq
-- | Compute eccentric anomaly from true anomaly using atan2.
ta2ea :: RealFloat a => Eccentricity a -> Anomaly True a -> Anomaly Eccentric a
ta2ea Ecc{ecc} Anom{anom} = Anom $ atan2 (sqrt (_1 - ecc ^ pos2) * sin anom) (ecc + cos anom)
-- | Compute eccentric anomaly from true anomaly using atan.
ta2ea' :: RealFloat a => Eccentricity a -> Anomaly True a -> Anomaly Eccentric a
ta2ea' Ecc{ecc} (Anom ta) = Anom $ _2 * atan (sqrt ((_1 - ecc) / (_1 + ecc)) * tan (ta / _2))
-- | Compute mean anomaly from eccentric anomaly.
ea2ma :: RealFloat a => Eccentricity a -> Anomaly Eccentric a -> Anomaly Mean a
ea2ma Ecc{ecc} (Anom ea) = Anom $ ea - ecc * sin ea
-- | Compute true anomaly from eccentric anomaly. (Wikipedia)
ea2ta :: RealFloat a => Eccentricity a -> Anomaly Eccentric a -> Anomaly True a
ea2ta Ecc{ecc} (Anom ea) = Anom $ _2 * atan (sqrt ((_1 + ecc) / (_1 - ecc)) * tan (ea / _2))
-- | Compute eccentric anomaly from mean anomaly using Newton's
-- method as shown on [1].
ma2ea :: (RealFloat a, AEq a) => Eccentricity a -> Anomaly Mean a -> Anomaly Eccentric a
ma2ea ecc ma = iterateUntil converged (keplerStep ecc ma) ea0
where
ea0 = coerce ma
converged ea1 ea2 = abs (anom ea1 - anom ea2) < 1 *~ arcsecond -- TODO select a more principled delta?
-- | Compute true anomaly from mean anomaly using Newton's
-- method as shown on [1].
ma2ta :: (RealFloat a, AEq a) => Eccentricity a -> Anomaly Mean a -> Anomaly True a
ma2ta ecc = ea2ta ecc . ma2ea ecc
-- | Compute mean anomaly from true anomaly.
ta2ma :: RealFloat a => Eccentricity a -> Anomaly True a -> Anomaly Mean a
ta2ma ecc = ea2ma ecc . ta2ea ecc
-- Kepler's Equation
-- =================
-- | A step in solving Kepler's equation per [1].
keplerStep :: Floating a => Eccentricity a -> Anomaly Mean a
-> Anomaly Eccentric a -> Anomaly Eccentric a
keplerStep (Ecc ecc) (Anom ma) (Anom ea) = Anom $
ea + (ma + ecc * sin ea - ea) / (_1 - ecc * cos ea)
-- | Iterate a function on its result until the predicate
-- holds true for two subsequent results. Then returns
-- the latter of the results. (Note: will diverge if the
-- predicate is never fulfilled.)
iterateUntil :: (a -> a -> Bool) -> (a -> a) -> a -> a
iterateUntil p f = until p . iterate f
where
until p (x1:x2:xs) = if p x1 x2 then x2 else until p (x2:xs)
| bjornbm/astro | src/Astro/Orbit/Anomaly.hs | bsd-3-clause | 2,595 | 0 | 13 | 518 | 876 | 443 | 433 | 30 | 2 |
{-
(c) The GRASP/AQUA Project, Glasgow University, 1993-1998
\section[Specialise]{Stamping out overloading, and (optionally) polymorphism}
-}
{-# LANGUAGE CPP #-}
module Specialise ( specProgram, specUnfolding ) where
#include "HsVersions.h"
import Id
import TcType hiding( substTy )
import Type hiding( substTy, extendTvSubstList )
import Module( Module, HasModule(..) )
import Coercion( Coercion )
import CoreMonad
import qualified CoreSubst
import CoreUnfold
import VarSet
import VarEnv
import CoreSyn
import Rules
import CoreUtils ( exprIsTrivial, applyTypeToArgs, mkCast )
import CoreFVs ( exprFreeVars, exprsFreeVars, idFreeVars, exprsFreeIdsList )
import UniqSupply
import Name
import MkId ( voidArgId, voidPrimId )
import Maybes ( catMaybes, isJust )
import BasicTypes
import HscTypes
import Bag
import DynFlags
import Util
import Outputable
import FastString
import State
import UniqDFM
import TrieMap
#if __GLASGOW_HASKELL__ < 709
import Control.Applicative (Applicative(..))
#endif
import Control.Monad
#if __GLASGOW_HASKELL__ > 710
import qualified Control.Monad.Fail as MonadFail
#endif
{-
************************************************************************
* *
\subsection[notes-Specialise]{Implementation notes [SLPJ, Aug 18 1993]}
* *
************************************************************************
These notes describe how we implement specialisation to eliminate
overloading.
The specialisation pass works on Core
syntax, complete with all the explicit dictionary application,
abstraction and construction as added by the type checker. The
existing type checker remains largely as it is.
One important thought: the {\em types} passed to an overloaded
function, and the {\em dictionaries} passed are mutually redundant.
If the same function is applied to the same type(s) then it is sure to
be applied to the same dictionary(s)---or rather to the same {\em
values}. (The arguments might look different but they will evaluate
to the same value.)
Second important thought: we know that we can make progress by
treating dictionary arguments as static and worth specialising on. So
we can do without binding-time analysis, and instead specialise on
dictionary arguments and no others.
The basic idea
~~~~~~~~~~~~~~
Suppose we have
let f = <f_rhs>
in <body>
and suppose f is overloaded.
STEP 1: CALL-INSTANCE COLLECTION
We traverse <body>, accumulating all applications of f to types and
dictionaries.
(Might there be partial applications, to just some of its types and
dictionaries? In principle yes, but in practice the type checker only
builds applications of f to all its types and dictionaries, so partial
applications could only arise as a result of transformation, and even
then I think it's unlikely. In any case, we simply don't accumulate such
partial applications.)
STEP 2: EQUIVALENCES
So now we have a collection of calls to f:
f t1 t2 d1 d2
f t3 t4 d3 d4
...
Notice that f may take several type arguments. To avoid ambiguity, we
say that f is called at type t1/t2 and t3/t4.
We take equivalence classes using equality of the *types* (ignoring
the dictionary args, which as mentioned previously are redundant).
STEP 3: SPECIALISATION
For each equivalence class, choose a representative (f t1 t2 d1 d2),
and create a local instance of f, defined thus:
f@t1/t2 = <f_rhs> t1 t2 d1 d2
f_rhs presumably has some big lambdas and dictionary lambdas, so lots
of simplification will now result. However we don't actually *do* that
simplification. Rather, we leave it for the simplifier to do. If we
*did* do it, though, we'd get more call instances from the specialised
RHS. We can work out what they are by instantiating the call-instance
set from f's RHS with the types t1, t2.
Add this new id to f's IdInfo, to record that f has a specialised version.
Before doing any of this, check that f's IdInfo doesn't already
tell us about an existing instance of f at the required type/s.
(This might happen if specialisation was applied more than once, or
it might arise from user SPECIALIZE pragmas.)
Recursion
~~~~~~~~~
Wait a minute! What if f is recursive? Then we can't just plug in
its right-hand side, can we?
But it's ok. The type checker *always* creates non-recursive definitions
for overloaded recursive functions. For example:
f x = f (x+x) -- Yes I know its silly
becomes
f a (d::Num a) = let p = +.sel a d
in
letrec fl (y::a) = fl (p y y)
in
fl
We still have recusion for non-overloaded functions which we
speciailise, but the recursive call should get specialised to the
same recursive version.
Polymorphism 1
~~~~~~~~~~~~~~
All this is crystal clear when the function is applied to *constant
types*; that is, types which have no type variables inside. But what if
it is applied to non-constant types? Suppose we find a call of f at type
t1/t2. There are two possibilities:
(a) The free type variables of t1, t2 are in scope at the definition point
of f. In this case there's no problem, we proceed just as before. A common
example is as follows. Here's the Haskell:
g y = let f x = x+x
in f y + f y
After typechecking we have
g a (d::Num a) (y::a) = let f b (d'::Num b) (x::b) = +.sel b d' x x
in +.sel a d (f a d y) (f a d y)
Notice that the call to f is at type type "a"; a non-constant type.
Both calls to f are at the same type, so we can specialise to give:
g a (d::Num a) (y::a) = let f@a (x::a) = +.sel a d x x
in +.sel a d (f@a y) (f@a y)
(b) The other case is when the type variables in the instance types
are *not* in scope at the definition point of f. The example we are
working with above is a good case. There are two instances of (+.sel a d),
but "a" is not in scope at the definition of +.sel. Can we do anything?
Yes, we can "common them up", a sort of limited common sub-expression deal.
This would give:
g a (d::Num a) (y::a) = let +.sel@a = +.sel a d
f@a (x::a) = +.sel@a x x
in +.sel@a (f@a y) (f@a y)
This can save work, and can't be spotted by the type checker, because
the two instances of +.sel weren't originally at the same type.
Further notes on (b)
* There are quite a few variations here. For example, the defn of
+.sel could be floated ouside the \y, to attempt to gain laziness.
It certainly mustn't be floated outside the \d because the d has to
be in scope too.
* We don't want to inline f_rhs in this case, because
that will duplicate code. Just commoning up the call is the point.
* Nothing gets added to +.sel's IdInfo.
* Don't bother unless the equivalence class has more than one item!
Not clear whether this is all worth it. It is of course OK to
simply discard call-instances when passing a big lambda.
Polymorphism 2 -- Overloading
~~~~~~~~~~~~~~
Consider a function whose most general type is
f :: forall a b. Ord a => [a] -> b -> b
There is really no point in making a version of g at Int/Int and another
at Int/Bool, because it's only instancing the type variable "a" which
buys us any efficiency. Since g is completely polymorphic in b there
ain't much point in making separate versions of g for the different
b types.
That suggests that we should identify which of g's type variables
are constrained (like "a") and which are unconstrained (like "b").
Then when taking equivalence classes in STEP 2, we ignore the type args
corresponding to unconstrained type variable. In STEP 3 we make
polymorphic versions. Thus:
f@t1/ = /\b -> <f_rhs> t1 b d1 d2
We do this.
Dictionary floating
~~~~~~~~~~~~~~~~~~~
Consider this
f a (d::Num a) = let g = ...
in
...(let d1::Ord a = Num.Ord.sel a d in g a d1)...
Here, g is only called at one type, but the dictionary isn't in scope at the
definition point for g. Usually the type checker would build a
definition for d1 which enclosed g, but the transformation system
might have moved d1's defn inward. Solution: float dictionary bindings
outwards along with call instances.
Consider
f x = let g p q = p==q
h r s = (r+s, g r s)
in
h x x
Before specialisation, leaving out type abstractions we have
f df x = let g :: Eq a => a -> a -> Bool
g dg p q = == dg p q
h :: Num a => a -> a -> (a, Bool)
h dh r s = let deq = eqFromNum dh
in (+ dh r s, g deq r s)
in
h df x x
After specialising h we get a specialised version of h, like this:
h' r s = let deq = eqFromNum df
in (+ df r s, g deq r s)
But we can't naively make an instance for g from this, because deq is not in scope
at the defn of g. Instead, we have to float out the (new) defn of deq
to widen its scope. Notice that this floating can't be done in advance -- it only
shows up when specialisation is done.
User SPECIALIZE pragmas
~~~~~~~~~~~~~~~~~~~~~~~
Specialisation pragmas can be digested by the type checker, and implemented
by adding extra definitions along with that of f, in the same way as before
f@t1/t2 = <f_rhs> t1 t2 d1 d2
Indeed the pragmas *have* to be dealt with by the type checker, because
only it knows how to build the dictionaries d1 and d2! For example
g :: Ord a => [a] -> [a]
{-# SPECIALIZE f :: [Tree Int] -> [Tree Int] #-}
Here, the specialised version of g is an application of g's rhs to the
Ord dictionary for (Tree Int), which only the type checker can conjure
up. There might not even *be* one, if (Tree Int) is not an instance of
Ord! (All the other specialision has suitable dictionaries to hand
from actual calls.)
Problem. The type checker doesn't have to hand a convenient <f_rhs>, because
it is buried in a complex (as-yet-un-desugared) binding group.
Maybe we should say
f@t1/t2 = f* t1 t2 d1 d2
where f* is the Id f with an IdInfo which says "inline me regardless!".
Indeed all the specialisation could be done in this way.
That in turn means that the simplifier has to be prepared to inline absolutely
any in-scope let-bound thing.
Again, the pragma should permit polymorphism in unconstrained variables:
h :: Ord a => [a] -> b -> b
{-# SPECIALIZE h :: [Int] -> b -> b #-}
We *insist* that all overloaded type variables are specialised to ground types,
(and hence there can be no context inside a SPECIALIZE pragma).
We *permit* unconstrained type variables to be specialised to
- a ground type
- or left as a polymorphic type variable
but nothing in between. So
{-# SPECIALIZE h :: [Int] -> [c] -> [c] #-}
is *illegal*. (It can be handled, but it adds complication, and gains the
programmer nothing.)
SPECIALISING INSTANCE DECLARATIONS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
instance Foo a => Foo [a] where
...
{-# SPECIALIZE instance Foo [Int] #-}
The original instance decl creates a dictionary-function
definition:
dfun.Foo.List :: forall a. Foo a -> Foo [a]
The SPECIALIZE pragma just makes a specialised copy, just as for
ordinary function definitions:
dfun.Foo.List@Int :: Foo [Int]
dfun.Foo.List@Int = dfun.Foo.List Int dFooInt
The information about what instance of the dfun exist gets added to
the dfun's IdInfo in the same way as a user-defined function too.
Automatic instance decl specialisation?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Can instance decls be specialised automatically? It's tricky.
We could collect call-instance information for each dfun, but
then when we specialised their bodies we'd get new call-instances
for ordinary functions; and when we specialised their bodies, we might get
new call-instances of the dfuns, and so on. This all arises because of
the unrestricted mutual recursion between instance decls and value decls.
Still, there's no actual problem; it just means that we may not do all
the specialisation we could theoretically do.
Furthermore, instance decls are usually exported and used non-locally,
so we'll want to compile enough to get those specialisations done.
Lastly, there's no such thing as a local instance decl, so we can
survive solely by spitting out *usage* information, and then reading that
back in as a pragma when next compiling the file. So for now,
we only specialise instance decls in response to pragmas.
SPITTING OUT USAGE INFORMATION
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To spit out usage information we need to traverse the code collecting
call-instance information for all imported (non-prelude?) functions
and data types. Then we equivalence-class it and spit it out.
This is done at the top-level when all the call instances which escape
must be for imported functions and data types.
*** Not currently done ***
Partial specialisation by pragmas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What about partial specialisation:
k :: (Ord a, Eq b) => [a] -> b -> b -> [a]
{-# SPECIALIZE k :: Eq b => [Int] -> b -> b -> [a] #-}
or even
{-# SPECIALIZE k :: Eq b => [Int] -> [b] -> [b] -> [a] #-}
Seems quite reasonable. Similar things could be done with instance decls:
instance (Foo a, Foo b) => Foo (a,b) where
...
{-# SPECIALIZE instance Foo a => Foo (a,Int) #-}
{-# SPECIALIZE instance Foo b => Foo (Int,b) #-}
Ho hum. Things are complex enough without this. I pass.
Requirements for the simplifer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The simplifier has to be able to take advantage of the specialisation.
* When the simplifier finds an application of a polymorphic f, it looks in
f's IdInfo in case there is a suitable instance to call instead. This converts
f t1 t2 d1 d2 ===> f_t1_t2
Note that the dictionaries get eaten up too!
* Dictionary selection operations on constant dictionaries must be
short-circuited:
+.sel Int d ===> +Int
The obvious way to do this is in the same way as other specialised
calls: +.sel has inside it some IdInfo which tells that if it's applied
to the type Int then it should eat a dictionary and transform to +Int.
In short, dictionary selectors need IdInfo inside them for constant
methods.
* Exactly the same applies if a superclass dictionary is being
extracted:
Eq.sel Int d ===> dEqInt
* Something similar applies to dictionary construction too. Suppose
dfun.Eq.List is the function taking a dictionary for (Eq a) to
one for (Eq [a]). Then we want
dfun.Eq.List Int d ===> dEq.List_Int
Where does the Eq [Int] dictionary come from? It is built in
response to a SPECIALIZE pragma on the Eq [a] instance decl.
In short, dfun Ids need IdInfo with a specialisation for each
constant instance of their instance declaration.
All this uses a single mechanism: the SpecEnv inside an Id
What does the specialisation IdInfo look like?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The SpecEnv of an Id maps a list of types (the template) to an expression
[Type] |-> Expr
For example, if f has this RuleInfo:
[Int, a] -> \d:Ord Int. f' a
it means that we can replace the call
f Int t ===> (\d. f' t)
This chucks one dictionary away and proceeds with the
specialised version of f, namely f'.
What can't be done this way?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There is no way, post-typechecker, to get a dictionary for (say)
Eq a from a dictionary for Eq [a]. So if we find
==.sel [t] d
we can't transform to
eqList (==.sel t d')
where
eqList :: (a->a->Bool) -> [a] -> [a] -> Bool
Of course, we currently have no way to automatically derive
eqList, nor to connect it to the Eq [a] instance decl, but you
can imagine that it might somehow be possible. Taking advantage
of this is permanently ruled out.
Still, this is no great hardship, because we intend to eliminate
overloading altogether anyway!
A note about non-tyvar dictionaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some Ids have types like
forall a,b,c. Eq a -> Ord [a] -> tau
This seems curious at first, because we usually only have dictionary
args whose types are of the form (C a) where a is a type variable.
But this doesn't hold for the functions arising from instance decls,
which sometimes get arguments with types of form (C (T a)) for some
type constructor T.
Should we specialise wrt this compound-type dictionary? We used to say
"no", saying:
"This is a heuristic judgement, as indeed is the fact that we
specialise wrt only dictionaries. We choose *not* to specialise
wrt compound dictionaries because at the moment the only place
they show up is in instance decls, where they are simply plugged
into a returned dictionary. So nothing is gained by specialising
wrt them."
But it is simpler and more uniform to specialise wrt these dicts too;
and in future GHC is likely to support full fledged type signatures
like
f :: Eq [(a,b)] => ...
************************************************************************
* *
\subsubsection{The new specialiser}
* *
************************************************************************
Our basic game plan is this. For let(rec) bound function
f :: (C a, D c) => (a,b,c,d) -> Bool
* Find any specialised calls of f, (f ts ds), where
ts are the type arguments t1 .. t4, and
ds are the dictionary arguments d1 .. d2.
* Add a new definition for f1 (say):
f1 = /\ b d -> (..body of f..) t1 b t3 d d1 d2
Note that we abstract over the unconstrained type arguments.
* Add the mapping
[t1,b,t3,d] |-> \d1 d2 -> f1 b d
to the specialisations of f. This will be used by the
simplifier to replace calls
(f t1 t2 t3 t4) da db
by
(\d1 d1 -> f1 t2 t4) da db
All the stuff about how many dictionaries to discard, and what types
to apply the specialised function to, are handled by the fact that the
SpecEnv contains a template for the result of the specialisation.
We don't build *partial* specialisations for f. For example:
f :: Eq a => a -> a -> Bool
{-# SPECIALISE f :: (Eq b, Eq c) => (b,c) -> (b,c) -> Bool #-}
Here, little is gained by making a specialised copy of f.
There's a distinct danger that the specialised version would
first build a dictionary for (Eq b, Eq c), and then select the (==)
method from it! Even if it didn't, not a great deal is saved.
We do, however, generate polymorphic, but not overloaded, specialisations:
f :: Eq a => [a] -> b -> b -> b
... SPECIALISE f :: [Int] -> b -> b -> b ...
Hence, the invariant is this:
*** no specialised version is overloaded ***
************************************************************************
* *
\subsubsection{The exported function}
* *
************************************************************************
-}
-- | Specialise calls to type-class overloaded functions occuring in a program.
specProgram :: ModGuts -> CoreM ModGuts
specProgram guts@(ModGuts { mg_module = this_mod
, mg_rules = local_rules
, mg_binds = binds })
= do { dflags <- getDynFlags
-- Specialise the bindings of this module
; (binds', uds) <- runSpecM dflags this_mod (go binds)
-- Specialise imported functions
; hpt_rules <- getRuleBase
; let rule_base = extendRuleBaseList hpt_rules local_rules
; (new_rules, spec_binds) <- specImports dflags this_mod top_env emptyVarSet
[] rule_base (ud_calls uds)
-- Don't forget to wrap the specialized bindings with bindings
-- for the needed dictionaries.
-- See Note [Wrap bindings returned by specImports]
; let spec_binds' = wrapDictBinds (ud_binds uds) spec_binds
; let final_binds
| null spec_binds' = binds'
| otherwise = Rec (flattenBinds spec_binds') : binds'
-- Note [Glom the bindings if imported functions are specialised]
; return (guts { mg_binds = final_binds
, mg_rules = new_rules ++ local_rules }) }
where
-- We need to start with a Subst that knows all the things
-- that are in scope, so that the substitution engine doesn't
-- accidentally re-use a unique that's already in use
-- Easiest thing is to do it all at once, as if all the top-level
-- decls were mutually recursive
top_env = SE { se_subst = CoreSubst.mkEmptySubst $ mkInScopeSet $ mkVarSet $
bindersOfBinds binds
, se_interesting = emptyVarSet }
go [] = return ([], emptyUDs)
go (bind:binds) = do (binds', uds) <- go binds
(bind', uds') <- specBind top_env bind uds
return (bind' ++ binds', uds')
{-
Note [Wrap bindings returned by specImports]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'specImports' returns a set of specialized bindings. However, these are lacking
necessary floated dictionary bindings, which are returned by
UsageDetails(ud_binds). These dictionaries need to be brought into scope with
'wrapDictBinds' before the bindings returned by 'specImports' can be used. See,
for instance, the 'specImports' call in 'specProgram'.
Note [Disabling cross-module specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since GHC 7.10 we have performed specialisation of INLINEABLE bindings living
in modules outside of the current module. This can sometimes uncover user code
which explodes in size when aggressively optimized. The
-fno-cross-module-specialise option was introduced to allow users to being
bitten by such instances to revert to the pre-7.10 behavior.
See Trac #10491
-}
-- | Specialise a set of calls to imported bindings
specImports :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these ones
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module and the home package
-- (but not external packages, which can change)
-> CallDetails -- Calls for imported things, and floating bindings
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
-- See Note [Wrapping bindings returned by specImports]
specImports dflags this_mod top_env done callers rule_base cds
-- See Note [Disabling cross-module specialisation]
| not $ gopt Opt_CrossModuleSpecialise dflags =
return ([], [])
| otherwise =
do { let import_calls = dVarEnvElts cds
; (rules, spec_binds) <- go rule_base import_calls
; return (rules, spec_binds) }
where
go :: RuleBase -> [CallInfoSet] -> CoreM ([CoreRule], [CoreBind])
go _ [] = return ([], [])
go rb (cis@(CIS fn _calls_for_fn) : other_calls)
= do { (rules1, spec_binds1) <- specImport dflags this_mod top_env
done callers rb fn $
ciSetToList cis
; (rules2, spec_binds2) <- go (extendRuleBaseList rb rules1) other_calls
; return (rules1 ++ rules2, spec_binds1 ++ spec_binds2) }
specImport :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module
-> Id -> [CallInfo] -- Imported function and calls for it
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
specImport dflags this_mod top_env done callers rb fn calls_for_fn
| fn `elemVarSet` done
= return ([], []) -- No warning. This actually happens all the time
-- when specialising a recursive function, because
-- the RHS of the specialised function contains a recursive
-- call to the original function
| null calls_for_fn -- We filtered out all the calls in deleteCallsMentioning
= return ([], [])
| wantSpecImport dflags unfolding
, Just rhs <- maybeUnfoldingTemplate unfolding
= do { -- Get rules from the external package state
-- We keep doing this in case we "page-fault in"
-- more rules as we go along
; hsc_env <- getHscEnv
; eps <- liftIO $ hscEPS hsc_env
; vis_orphs <- getVisibleOrphanMods
; let full_rb = unionRuleBase rb (eps_rule_base eps)
rules_for_fn = getRules (RuleEnv full_rb vis_orphs) fn
; (rules1, spec_pairs, uds) <- -- pprTrace "specImport1" (vcat [ppr fn, ppr calls_for_fn, ppr rhs]) $
runSpecM dflags this_mod $
specCalls (Just this_mod) top_env rules_for_fn calls_for_fn fn rhs
; let spec_binds1 = [NonRec b r | (b,r) <- spec_pairs]
-- After the rules kick in we may get recursion, but
-- we rely on a global GlomBinds to sort that out later
-- See Note [Glom the bindings if imported functions are specialised]
-- Now specialise any cascaded calls
; (rules2, spec_binds2) <- -- pprTrace "specImport 2" (ppr fn $$ ppr rules1 $$ ppr spec_binds1) $
specImports dflags this_mod top_env
(extendVarSet done fn)
(fn:callers)
(extendRuleBaseList rb rules1)
(ud_calls uds)
-- Don't forget to wrap the specialized bindings with bindings
-- for the needed dictionaries
-- See Note [Wrap bindings returned by specImports]
; let final_binds = wrapDictBinds (ud_binds uds)
(spec_binds2 ++ spec_binds1)
; return (rules2 ++ rules1, final_binds) }
| warnMissingSpecs dflags callers
= do { warnMsg (vcat [ hang (text "Could not specialise imported function" <+> quotes (ppr fn))
2 (vcat [ text "when specialising" <+> quotes (ppr caller)
| caller <- callers])
, ifPprDebug (text "calls:" <+> vcat (map (pprCallInfo fn) calls_for_fn))
, text "Probable fix: add INLINEABLE pragma on" <+> quotes (ppr fn) ])
; return ([], []) }
| otherwise
= return ([], [])
where
unfolding = realIdUnfolding fn -- We want to see the unfolding even for loop breakers
warnMissingSpecs :: DynFlags -> [Id] -> Bool
-- See Note [Warning about missed specialisations]
warnMissingSpecs dflags callers
| wopt Opt_WarnAllMissedSpecs dflags = True
| not (wopt Opt_WarnMissedSpecs dflags) = False
| null callers = False
| otherwise = all has_inline_prag callers
where
has_inline_prag id = isAnyInlinePragma (idInlinePragma id)
wantSpecImport :: DynFlags -> Unfolding -> Bool
-- See Note [Specialise imported INLINABLE things]
wantSpecImport dflags unf
= case unf of
NoUnfolding -> False
OtherCon {} -> False
DFunUnfolding {} -> True
CoreUnfolding { uf_src = src, uf_guidance = _guidance }
| gopt Opt_SpecialiseAggressively dflags -> True
| isStableSource src -> True
-- Specialise even INLINE things; it hasn't inlined yet,
-- so perhaps it never will. Moreover it may have calls
-- inside it that we want to specialise
| otherwise -> False -- Stable, not INLINE, hence INLINEABLE
{- Note [Warning about missed specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose
* In module Lib, you carefully mark a function 'foo' INLINEABLE
* Import Lib(foo) into another module M
* Call 'foo' at some specialised type in M
Then you jolly well expect it to be specialised in M. But what if
'foo' calls another fuction 'Lib.bar'. Then you'd like 'bar' to be
specialised too. But if 'bar' is not marked INLINEABLE it may well
not be specialised. The warning Opt_WarnMissedSpecs warns about this.
It's more noisy to warning about a missed specialisation opportunity
for /every/ overloaded imported function, but sometimes useful. That
is what Opt_WarnAllMissedSpecs does.
ToDo: warn about missed opportunities for local functions.
Note [Specialise imported INLINABLE things]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What imported functions do we specialise? The basic set is
* DFuns and things with INLINABLE pragmas.
but with -fspecialise-aggressively we add
* Anything with an unfolding template
Trac #8874 has a good example of why we want to auto-specialise DFuns.
We have the -fspecialise-aggressively flag (usually off), because we
risk lots of orphan modules from over-vigorous specialisation.
However it's not a big deal: anything non-recursive with an
unfolding-template will probably have been inlined already.
Note [Glom the bindings if imported functions are specialised]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we have an imported, *recursive*, INLINABLE function
f :: Eq a => a -> a
f = /\a \d x. ...(f a d)...
In the module being compiled we have
g x = f (x::Int)
Now we'll make a specialised function
f_spec :: Int -> Int
f_spec = \x -> ...(f Int dInt)...
{-# RULE f Int _ = f_spec #-}
g = \x. f Int dInt x
Note that f_spec doesn't look recursive
After rewriting with the RULE, we get
f_spec = \x -> ...(f_spec)...
BUT since f_spec was non-recursive before it'll *stay* non-recursive.
The occurrence analyser never turns a NonRec into a Rec. So we must
make sure that f_spec is recursive. Easiest thing is to make all
the specialisations for imported bindings recursive.
Note [Avoiding recursive specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise 'f' we may find new overloaded calls to 'g', 'h' in
'f's RHS. So we want to specialise g,h. But we don't want to
specialise f any more! It's possible that f's RHS might have a
recursive yet-more-specialised call, so we'd diverge in that case.
And if the call is to the same type, one specialisation is enough.
Avoiding this recursive specialisation loop is the reason for the
'done' VarSet passed to specImports and specImport.
************************************************************************
* *
\subsubsection{@specExpr@: the main function}
* *
************************************************************************
-}
data SpecEnv
= SE { se_subst :: CoreSubst.Subst
-- We carry a substitution down:
-- a) we must clone any binding that might float outwards,
-- to avoid name clashes
-- b) we carry a type substitution to use when analysing
-- the RHS of specialised bindings (no type-let!)
, se_interesting :: VarSet
-- Dict Ids that we know something about
-- and hence may be worth specialising against
-- See Note [Interesting dictionary arguments]
}
specVar :: SpecEnv -> Id -> CoreExpr
specVar env v = CoreSubst.lookupIdSubst (text "specVar") (se_subst env) v
specExpr :: SpecEnv -> CoreExpr -> SpecM (CoreExpr, UsageDetails)
---------------- First the easy cases --------------------
specExpr env (Type ty) = return (Type (substTy env ty), emptyUDs)
specExpr env (Coercion co) = return (Coercion (substCo env co), emptyUDs)
specExpr env (Var v) = return (specVar env v, emptyUDs)
specExpr _ (Lit lit) = return (Lit lit, emptyUDs)
specExpr env (Cast e co)
= do { (e', uds) <- specExpr env e
; return ((mkCast e' (substCo env co)), uds) }
specExpr env (Tick tickish body)
= do { (body', uds) <- specExpr env body
; return (Tick (specTickish env tickish) body', uds) }
---------------- Applications might generate a call instance --------------------
specExpr env expr@(App {})
= go expr []
where
go (App fun arg) args = do (arg', uds_arg) <- specExpr env arg
(fun', uds_app) <- go fun (arg':args)
return (App fun' arg', uds_arg `plusUDs` uds_app)
go (Var f) args = case specVar env f of
Var f' -> return (Var f', mkCallUDs env f' args)
e' -> return (e', emptyUDs) -- I don't expect this!
go other _ = specExpr env other
---------------- Lambda/case require dumping of usage details --------------------
specExpr env e@(Lam _ _) = do
(body', uds) <- specExpr env' body
let (free_uds, dumped_dbs) = dumpUDs bndrs' uds
return (mkLams bndrs' (wrapDictBindsE dumped_dbs body'), free_uds)
where
(bndrs, body) = collectBinders e
(env', bndrs') = substBndrs env bndrs
-- More efficient to collect a group of binders together all at once
-- and we don't want to split a lambda group with dumped bindings
specExpr env (Case scrut case_bndr ty alts)
= do { (scrut', scrut_uds) <- specExpr env scrut
; (scrut'', case_bndr', alts', alts_uds)
<- specCase env scrut' case_bndr alts
; return (Case scrut'' case_bndr' (substTy env ty) alts'
, scrut_uds `plusUDs` alts_uds) }
---------------- Finally, let is the interesting case --------------------
specExpr env (Let bind body)
= do { -- Clone binders
(rhs_env, body_env, bind') <- cloneBindSM env bind
-- Deal with the body
; (body', body_uds) <- specExpr body_env body
-- Deal with the bindings
; (binds', uds) <- specBind rhs_env bind' body_uds
-- All done
; return (foldr Let body' binds', uds) }
specTickish :: SpecEnv -> Tickish Id -> Tickish Id
specTickish env (Breakpoint ix ids)
= Breakpoint ix [ id' | id <- ids, Var id' <- [specVar env id]]
-- drop vars from the list if they have a non-variable substitution.
-- should never happen, but it's harmless to drop them anyway.
specTickish _ other_tickish = other_tickish
specCase :: SpecEnv
-> CoreExpr -- Scrutinee, already done
-> Id -> [CoreAlt]
-> SpecM ( CoreExpr -- New scrutinee
, Id
, [CoreAlt]
, UsageDetails)
specCase env scrut' case_bndr [(con, args, rhs)]
| isDictId case_bndr -- See Note [Floating dictionaries out of cases]
, interestingDict env scrut'
, not (isDeadBinder case_bndr && null sc_args')
= do { (case_bndr_flt : sc_args_flt) <- mapM clone_me (case_bndr' : sc_args')
; let sc_rhss = [ Case (Var case_bndr_flt) case_bndr' (idType sc_arg')
[(con, args', Var sc_arg')]
| sc_arg' <- sc_args' ]
-- Extend the substitution for RHS to map the *original* binders
-- to their floated verions.
mb_sc_flts :: [Maybe DictId]
mb_sc_flts = map (lookupVarEnv clone_env) args'
clone_env = zipVarEnv sc_args' sc_args_flt
subst_prs = (case_bndr, Var case_bndr_flt)
: [ (arg, Var sc_flt)
| (arg, Just sc_flt) <- args `zip` mb_sc_flts ]
env_rhs' = env_rhs { se_subst = CoreSubst.extendIdSubstList (se_subst env_rhs) subst_prs
, se_interesting = se_interesting env_rhs `extendVarSetList`
(case_bndr_flt : sc_args_flt) }
; (rhs', rhs_uds) <- specExpr env_rhs' rhs
; let scrut_bind = mkDB (NonRec case_bndr_flt scrut')
case_bndr_set = unitVarSet case_bndr_flt
sc_binds = [(NonRec sc_arg_flt sc_rhs, case_bndr_set)
| (sc_arg_flt, sc_rhs) <- sc_args_flt `zip` sc_rhss ]
flt_binds = scrut_bind : sc_binds
(free_uds, dumped_dbs) = dumpUDs (case_bndr':args') rhs_uds
all_uds = flt_binds `addDictBinds` free_uds
alt' = (con, args', wrapDictBindsE dumped_dbs rhs')
; return (Var case_bndr_flt, case_bndr', [alt'], all_uds) }
where
(env_rhs, (case_bndr':args')) = substBndrs env (case_bndr:args)
sc_args' = filter is_flt_sc_arg args'
clone_me bndr = do { uniq <- getUniqueM
; return (mkUserLocalOrCoVar occ uniq ty loc) }
where
name = idName bndr
ty = idType bndr
occ = nameOccName name
loc = getSrcSpan name
arg_set = mkVarSet args'
is_flt_sc_arg var = isId var
&& not (isDeadBinder var)
&& isDictTy var_ty
&& not (tyCoVarsOfType var_ty `intersectsVarSet` arg_set)
where
var_ty = idType var
specCase env scrut case_bndr alts
= do { (alts', uds_alts) <- mapAndCombineSM spec_alt alts
; return (scrut, case_bndr', alts', uds_alts) }
where
(env_alt, case_bndr') = substBndr env case_bndr
spec_alt (con, args, rhs) = do
(rhs', uds) <- specExpr env_rhs rhs
let (free_uds, dumped_dbs) = dumpUDs (case_bndr' : args') uds
return ((con, args', wrapDictBindsE dumped_dbs rhs'), free_uds)
where
(env_rhs, args') = substBndrs env_alt args
{-
Note [Floating dictionaries out of cases]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
g = \d. case d of { MkD sc ... -> ...(f sc)... }
Naively we can't float d2's binding out of the case expression,
because 'sc' is bound by the case, and that in turn means we can't
specialise f, which seems a pity.
So we invert the case, by floating out a binding
for 'sc_flt' thus:
sc_flt = case d of { MkD sc ... -> sc }
Now we can float the call instance for 'f'. Indeed this is just
what'll happen if 'sc' was originally bound with a let binding,
but case is more efficient, and necessary with equalities. So it's
good to work with both.
You might think that this won't make any difference, because the
call instance will only get nuked by the \d. BUT if 'g' itself is
specialised, then transitively we should be able to specialise f.
In general, given
case e of cb { MkD sc ... -> ...(f sc)... }
we transform to
let cb_flt = e
sc_flt = case cb_flt of { MkD sc ... -> sc }
in
case cb_flt of bg { MkD sc ... -> ....(f sc_flt)... }
The "_flt" things are the floated binds; we use the current substitution
to substitute sc -> sc_flt in the RHS
************************************************************************
* *
Dealing with a binding
* *
************************************************************************
-}
specBind :: SpecEnv -- Use this for RHSs
-> CoreBind
-> UsageDetails -- Info on how the scope of the binding
-> SpecM ([CoreBind], -- New bindings
UsageDetails) -- And info to pass upstream
-- Returned UsageDetails:
-- No calls for binders of this bind
specBind rhs_env (NonRec fn rhs) body_uds
= do { (rhs', rhs_uds) <- specExpr rhs_env rhs
; (fn', spec_defns, body_uds1) <- specDefn rhs_env body_uds fn rhs
; let pairs = spec_defns ++ [(fn', rhs')]
-- fn' mentions the spec_defns in its rules,
-- so put the latter first
combined_uds = body_uds1 `plusUDs` rhs_uds
-- This way round a call in rhs_uds of a function f
-- at type T will override a call of f at T in body_uds1; and
-- that is good because it'll tend to keep "earlier" calls
-- See Note [Specialisation of dictionary functions]
(free_uds, dump_dbs, float_all) = dumpBindUDs [fn] combined_uds
-- See Note [From non-recursive to recursive]
final_binds :: [DictBind]
final_binds
| isEmptyBag dump_dbs = [mkDB $ NonRec b r | (b,r) <- pairs]
| otherwise = [flattenDictBinds dump_dbs pairs]
; if float_all then
-- Rather than discard the calls mentioning the bound variables
-- we float this binding along with the others
return ([], free_uds `snocDictBinds` final_binds)
else
-- No call in final_uds mentions bound variables,
-- so we can just leave the binding here
return (map fst final_binds, free_uds) }
specBind rhs_env (Rec pairs) body_uds
-- Note [Specialising a recursive group]
= do { let (bndrs,rhss) = unzip pairs
; (rhss', rhs_uds) <- mapAndCombineSM (specExpr rhs_env) rhss
; let scope_uds = body_uds `plusUDs` rhs_uds
-- Includes binds and calls arising from rhss
; (bndrs1, spec_defns1, uds1) <- specDefns rhs_env scope_uds pairs
; (bndrs3, spec_defns3, uds3)
<- if null spec_defns1 -- Common case: no specialisation
then return (bndrs1, [], uds1)
else do { -- Specialisation occurred; do it again
(bndrs2, spec_defns2, uds2)
<- specDefns rhs_env uds1 (bndrs1 `zip` rhss)
; return (bndrs2, spec_defns2 ++ spec_defns1, uds2) }
; let (final_uds, dumped_dbs, float_all) = dumpBindUDs bndrs uds3
bind = flattenDictBinds dumped_dbs
(spec_defns3 ++ zip bndrs3 rhss')
; if float_all then
return ([], final_uds `snocDictBind` bind)
else
return ([fst bind], final_uds) }
---------------------------
specDefns :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> [(Id,CoreExpr)] -- The things being bound and their un-processed RHS
-> SpecM ([Id], -- Original Ids with RULES added
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
-- Specialise a list of bindings (the contents of a Rec), but flowing usages
-- upwards binding by binding. Example: { f = ...g ...; g = ...f .... }
-- Then if the input CallDetails has a specialised call for 'g', whose specialisation
-- in turn generates a specialised call for 'f', we catch that in this one sweep.
-- But not vice versa (it's a fixpoint problem).
specDefns _env uds []
= return ([], [], uds)
specDefns env uds ((bndr,rhs):pairs)
= do { (bndrs1, spec_defns1, uds1) <- specDefns env uds pairs
; (bndr1, spec_defns2, uds2) <- specDefn env uds1 bndr rhs
; return (bndr1 : bndrs1, spec_defns1 ++ spec_defns2, uds2) }
---------------------------
specDefn :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> Id -> CoreExpr -- The thing being bound and its un-processed RHS
-> SpecM (Id, -- Original Id with added RULES
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
specDefn env body_uds fn rhs
= do { let (body_uds_without_me, calls_for_me) = callsForMe fn body_uds
rules_for_me = idCoreRules fn
; (rules, spec_defns, spec_uds) <- specCalls Nothing env rules_for_me
calls_for_me fn rhs
; return ( fn `addIdSpecialisations` rules
, spec_defns
, body_uds_without_me `plusUDs` spec_uds) }
-- It's important that the `plusUDs` is this way
-- round, because body_uds_without_me may bind
-- dictionaries that are used in calls_for_me passed
-- to specDefn. So the dictionary bindings in
-- spec_uds may mention dictionaries bound in
-- body_uds_without_me
---------------------------
specCalls :: Maybe Module -- Just this_mod => specialising imported fn
-- Nothing => specialising local fn
-> SpecEnv
-> [CoreRule] -- Existing RULES for the fn
-> [CallInfo]
-> Id -> CoreExpr
-> SpecM ([CoreRule], -- New RULES for the fn
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- New usage details from the specialised RHSs
-- This function checks existing rules, and does not create
-- duplicate ones. So the caller does not need to do this filtering.
-- See 'already_covered'
specCalls mb_mod env rules_for_me calls_for_me fn rhs
-- The first case is the interesting one
| rhs_tyvars `lengthIs` n_tyvars -- Rhs of fn's defn has right number of big lambdas
&& rhs_ids `lengthAtLeast` n_dicts -- and enough dict args
&& notNull calls_for_me -- And there are some calls to specialise
&& not (isNeverActive (idInlineActivation fn))
-- Don't specialise NOINLINE things
-- See Note [Auto-specialisation and RULES]
-- && not (certainlyWillInline (idUnfolding fn)) -- And it's not small
-- See Note [Inline specialisation] for why we do not
-- switch off specialisation for inline functions
= -- pprTrace "specDefn: some" (ppr fn $$ ppr calls_for_me $$ ppr rules_for_me) $
do { stuff <- mapM spec_call calls_for_me
; let (spec_defns, spec_uds, spec_rules) = unzip3 (catMaybes stuff)
; return (spec_rules, spec_defns, plusUDList spec_uds) }
| otherwise -- No calls or RHS doesn't fit our preconceptions
= WARN( not (exprIsTrivial rhs) && notNull calls_for_me,
text "Missed specialisation opportunity for"
<+> ppr fn $$ _trace_doc )
-- Note [Specialisation shape]
-- pprTrace "specDefn: none" (ppr fn <+> ppr calls_for_me) $
return ([], [], emptyUDs)
where
_trace_doc = sep [ ppr rhs_tyvars, ppr n_tyvars
, ppr rhs_ids, ppr n_dicts
, ppr (idInlineActivation fn) ]
fn_type = idType fn
fn_arity = idArity fn
fn_unf = realIdUnfolding fn -- Ignore loop-breaker-ness here
(tyvars, theta, _) = tcSplitSigmaTy fn_type
n_tyvars = length tyvars
n_dicts = length theta
inl_prag = idInlinePragma fn
inl_act = inlinePragmaActivation inl_prag
is_local = isLocalId fn
-- Figure out whether the function has an INLINE pragma
-- See Note [Inline specialisations]
(rhs_tyvars, rhs_ids, rhs_body) = collectTyAndValBinders rhs
rhs_dict_ids = take n_dicts rhs_ids
body = mkLams (drop n_dicts rhs_ids) rhs_body
-- Glue back on the non-dict lambdas
already_covered :: DynFlags -> [CoreExpr] -> Bool
already_covered dflags args -- Note [Specialisations already covered]
= isJust (lookupRule dflags
(CoreSubst.substInScope (se_subst env), realIdUnfolding)
(const True)
fn args rules_for_me)
mk_ty_args :: [Maybe Type] -> [TyVar] -> [CoreExpr]
mk_ty_args [] poly_tvs
= ASSERT( null poly_tvs ) []
mk_ty_args (Nothing : call_ts) (poly_tv : poly_tvs)
= Type (mkTyVarTy poly_tv) : mk_ty_args call_ts poly_tvs
mk_ty_args (Just ty : call_ts) poly_tvs
= Type ty : mk_ty_args call_ts poly_tvs
mk_ty_args (Nothing : _) [] = panic "mk_ty_args"
----------------------------------------------------------
-- Specialise to one particular call pattern
spec_call :: CallInfo -- Call instance
-> SpecM (Maybe ((Id,CoreExpr), -- Specialised definition
UsageDetails, -- Usage details from specialised body
CoreRule)) -- Info for the Id's SpecEnv
spec_call _call_info@(CallKey call_ts, (call_ds, _))
= ASSERT( call_ts `lengthIs` n_tyvars && call_ds `lengthIs` n_dicts )
-- Suppose f's defn is f = /\ a b c -> \ d1 d2 -> rhs
-- Suppose the call is for f [Just t1, Nothing, Just t3] [dx1, dx2]
-- Construct the new binding
-- f1 = SUBST[a->t1,c->t3, d1->d1', d2->d2'] (/\ b -> rhs)
-- PLUS the rule
-- RULE "SPEC f" forall b d1' d2'. f b d1' d2' = f1 b
-- In the rule, d1' and d2' are just wildcards, not used in the RHS
-- PLUS the usage-details
-- { d1' = dx1; d2' = dx2 }
-- where d1', d2' are cloned versions of d1,d2, with the type substitution
-- applied. These auxiliary bindings just avoid duplication of dx1, dx2
--
-- Note that the substitution is applied to the whole thing.
-- This is convenient, but just slightly fragile. Notably:
-- * There had better be no name clashes in a/b/c
do { let
-- poly_tyvars = [b] in the example above
-- spec_tyvars = [a,c]
-- ty_args = [t1,b,t3]
spec_tv_binds = [(tv,ty) | (tv, Just ty) <- rhs_tyvars `zip` call_ts]
env1 = extendTvSubstList env spec_tv_binds
(rhs_env, poly_tyvars) = substBndrs env1
[tv | (tv, Nothing) <- rhs_tyvars `zip` call_ts]
-- Clone rhs_dicts, including instantiating their types
; inst_dict_ids <- mapM (newDictBndr rhs_env) rhs_dict_ids
; let (rhs_env2, dx_binds, spec_dict_args)
= bindAuxiliaryDicts rhs_env rhs_dict_ids call_ds inst_dict_ids
ty_args = mk_ty_args call_ts poly_tyvars
ev_args = map varToCoreExpr inst_dict_ids -- ev_args, ev_bndrs:
ev_bndrs = exprsFreeIdsList ev_args -- See Note [Evidence foralls]
rule_args = ty_args ++ ev_args
rule_bndrs = poly_tyvars ++ ev_bndrs
; dflags <- getDynFlags
; if already_covered dflags rule_args then
return Nothing
else -- pprTrace "spec_call" (vcat [ ppr _call_info, ppr fn, ppr rhs_dict_ids
-- , text "rhs_env2" <+> ppr (se_subst rhs_env2)
-- , ppr dx_binds ]) $
do
{ -- Figure out the type of the specialised function
let body_ty = applyTypeToArgs rhs fn_type rule_args
(lam_args, app_args) -- Add a dummy argument if body_ty is unlifted
| isUnliftedType body_ty -- C.f. WwLib.mkWorkerArgs
= (poly_tyvars ++ [voidArgId], poly_tyvars ++ [voidPrimId])
| otherwise = (poly_tyvars, poly_tyvars)
spec_id_ty = mkPiTypes lam_args body_ty
; spec_f <- newSpecIdSM fn spec_id_ty
; (spec_rhs, rhs_uds) <- specExpr rhs_env2 (mkLams lam_args body)
; this_mod <- getModule
; let
-- The rule to put in the function's specialisation is:
-- forall b, d1',d2'. f t1 b t3 d1' d2' = f1 b
herald = case mb_mod of
Nothing -- Specialising local fn
-> text "SPEC"
Just this_mod -- Specialising imoprted fn
-> text "SPEC/" <> ppr this_mod
rule_name = mkFastString $ showSDocForUser dflags neverQualify $
herald <+> ppr fn <+> hsep (map ppr_call_key_ty call_ts)
-- This name ends up in interface files, so use showSDocForUser,
-- otherwise uniques end up there, making builds
-- less deterministic (See #4012 comment:61 ff)
spec_env_rule = mkRule
this_mod
True {- Auto generated -}
is_local
rule_name
inl_act -- Note [Auto-specialisation and RULES]
(idName fn)
rule_bndrs
rule_args
(mkVarApps (Var spec_f) app_args)
-- Add the { d1' = dx1; d2' = dx2 } usage stuff
final_uds = foldr consDictBind rhs_uds dx_binds
--------------------------------------
-- Add a suitable unfolding if the spec_inl_prag says so
-- See Note [Inline specialisations]
(spec_inl_prag, spec_unf)
| not is_local && isStrongLoopBreaker (idOccInfo fn)
= (neverInlinePragma, noUnfolding)
-- See Note [Specialising imported functions] in OccurAnal
| InlinePragma { inl_inline = Inlinable } <- inl_prag
= (inl_prag { inl_inline = EmptyInlineSpec }, noUnfolding)
| otherwise
= (inl_prag, specUnfolding dflags spec_unf_subst poly_tyvars
spec_unf_args fn_unf)
spec_unf_args = ty_args ++ spec_dict_args
spec_unf_subst = CoreSubst.setInScope (se_subst env)
(CoreSubst.substInScope (se_subst rhs_env2))
-- Extend the in-scope set to satisfy the precondition of
-- specUnfolding, namely that in-scope(unf_subst) includes
-- the free vars of spec_unf_args. The in-scope set of rhs_env2
-- is just the ticket; but the actual substitution we want is
-- the same old one from 'env'
--------------------------------------
-- Adding arity information just propagates it a bit faster
-- See Note [Arity decrease] in Simplify
-- Copy InlinePragma information from the parent Id.
-- So if f has INLINE[1] so does spec_f
spec_f_w_arity = spec_f `setIdArity` max 0 (fn_arity - n_dicts)
`setInlinePragma` spec_inl_prag
`setIdUnfolding` spec_unf
; return (Just ((spec_f_w_arity, spec_rhs), final_uds, spec_env_rule)) } }
{- Note [Evidence foralls]
~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose (Trac #12212) that we are specialising
f :: forall a b. (Num a, F a ~ F b) => blah
with a=b=Int. Then the RULE will be something like
RULE forall (d:Num Int) (g :: F Int ~ F Int).
f Int Int d g = f_spec
But both varToCoreExpr (when constructing the LHS args), and the
simplifier (when simplifying the LHS args), will transform to
RULE forall (d:Num Int) (g :: F Int ~ F Int).
f Int Int d <F Int> = f_spec
by replacing g with Refl. So now 'g' is unbound, which results in a later
crash. So we use Refl right off the bat, and do not forall-quantify 'g':
* varToCoreExpr generates a Refl
* exprsFreeIdsList returns the Ids bound by the args,
which won't include g
You might wonder if this will match as often, but the simplifer replaces
complicated Refl coercions with Refl pretty aggressively.
Note [Orphans and auto-generated rules]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise an INLINEABLE function, or when we have
-fspecialise-aggressively, we auto-generate RULES that are orphans.
We don't want to warn about these, or we'd generate a lot of warnings.
Thus, we only warn about user-specified orphan rules.
Indeed, we don't even treat the module as an orphan module if it has
auto-generated *rule* orphans. Orphan modules are read every time we
compile, so they are pretty obtrusive and slow down every compilation,
even non-optimised ones. (Reason: for type class instances it's a
type correctness issue.) But specialisation rules are strictly for
*optimisation* only so it's fine not to read the interface.
What this means is that a SPEC rules from auto-specialisation in
module M will be used in other modules only if M.hi has been read for
some other reason, which is actually pretty likely.
-}
bindAuxiliaryDicts
:: SpecEnv
-> [DictId] -> [CoreExpr] -- Original dict bndrs, and the witnessing expressions
-> [DictId] -- A cloned dict-id for each dict arg
-> (SpecEnv, -- Substitute for all orig_dicts
[DictBind], -- Auxiliary dict bindings
[CoreExpr]) -- Witnessing expressions (all trivial)
-- Bind any dictionary arguments to fresh names, to preserve sharing
bindAuxiliaryDicts env@(SE { se_subst = subst, se_interesting = interesting })
orig_dict_ids call_ds inst_dict_ids
= (env', dx_binds, spec_dict_args)
where
(dx_binds, spec_dict_args) = go call_ds inst_dict_ids
env' = env { se_subst = subst `CoreSubst.extendSubstList`
(orig_dict_ids `zip` spec_dict_args)
`CoreSubst.extendInScopeList` dx_ids
, se_interesting = interesting `unionVarSet` interesting_dicts }
dx_ids = [dx_id | (NonRec dx_id _, _) <- dx_binds]
interesting_dicts = mkVarSet [ dx_id | (NonRec dx_id dx, _) <- dx_binds
, interestingDict env dx ]
-- See Note [Make the new dictionaries interesting]
go :: [CoreExpr] -> [CoreBndr] -> ([DictBind], [CoreExpr])
go [] _ = ([], [])
go (dx:dxs) (dx_id:dx_ids)
| exprIsTrivial dx = (dx_binds, dx : args)
| otherwise = (mkDB (NonRec dx_id dx) : dx_binds, Var dx_id : args)
where
(dx_binds, args) = go dxs dx_ids
-- In the first case extend the substitution but not bindings;
-- in the latter extend the bindings but not the substitution.
-- For the former, note that we bind the *original* dict in the substitution,
-- overriding any d->dx_id binding put there by substBndrs
go _ _ = pprPanic "bindAuxiliaryDicts" (ppr orig_dict_ids $$ ppr call_ds $$ ppr inst_dict_ids)
{-
Note [Make the new dictionaries interesting]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Important! We're going to substitute dx_id1 for d
and we want it to look "interesting", else we won't gather *any*
consequential calls. E.g.
f d = ...g d....
If we specialise f for a call (f (dfun dNumInt)), we'll get
a consequent call (g d') with an auxiliary definition
d' = df dNumInt
We want that consequent call to look interesting
Note [From non-recursive to recursive]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Even in the non-recursive case, if any dict-binds depend on 'fn' we might
have built a recursive knot
f a d x = <blah>
MkUD { ud_binds = d7 = MkD ..f..
, ud_calls = ...(f T d7)... }
The we generate
Rec { fs x = <blah>[T/a, d7/d]
f a d x = <blah>
RULE f T _ = fs
d7 = ...f... }
Here the recursion is only through the RULE.
Note [Specialisation of dictionary functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is a nasty example that bit us badly: see Trac #3591
class Eq a => C a
instance Eq [a] => C [a]
---------------
dfun :: Eq [a] -> C [a]
dfun a d = MkD a d (meth d)
d4 :: Eq [T] = <blah>
d2 :: C [T] = dfun T d4
d1 :: Eq [T] = $p1 d2
d3 :: C [T] = dfun T d1
None of these definitions is recursive. What happened was that we
generated a specialisation:
RULE forall d. dfun T d = dT :: C [T]
dT = (MkD a d (meth d)) [T/a, d1/d]
= MkD T d1 (meth d1)
But now we use the RULE on the RHS of d2, to get
d2 = dT = MkD d1 (meth d1)
d1 = $p1 d2
and now d1 is bottom! The problem is that when specialising 'dfun' we
should first dump "below" the binding all floated dictionary bindings
that mention 'dfun' itself. So d2 and d3 (and hence d1) must be
placed below 'dfun', and thus unavailable to it when specialising
'dfun'. That in turn means that the call (dfun T d1) must be
discarded. On the other hand, the call (dfun T d4) is fine, assuming
d4 doesn't mention dfun.
But look at this:
class C a where { foo,bar :: [a] -> [a] }
instance C Int where
foo x = r_bar x
bar xs = reverse xs
r_bar :: C a => [a] -> [a]
r_bar xs = bar (xs ++ xs)
That translates to:
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs }
The call (r_bar $fCInt) mentions $fCInt,
which mentions foo_help,
which mentions r_bar
But we DO want to specialise r_bar at Int:
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
RULE r_bar Int _ = r_bar_Int
r_bar_Int xs = bar Int $fCInt (xs ++ xs)
}
Note that, because of its RULE, r_bar joins the recursive
group. (In this case it'll unravel a short moment later.)
Conclusion: we catch the nasty case using filter_dfuns in
callsForMe. To be honest I'm not 100% certain that this is 100%
right, but it works. Sigh.
Note [Specialising a recursive group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
let rec { f x = ...g x'...
; g y = ...f y'.... }
in f 'a'
Here we specialise 'f' at Char; but that is very likely to lead to
a specialisation of 'g' at Char. We must do the latter, else the
whole point of specialisation is lost.
But we do not want to keep iterating to a fixpoint, because in the
presence of polymorphic recursion we might generate an infinite number
of specialisations.
So we use the following heuristic:
* Arrange the rec block in dependency order, so far as possible
(the occurrence analyser already does this)
* Specialise it much like a sequence of lets
* Then go through the block a second time, feeding call-info from
the RHSs back in the bottom, as it were
In effect, the ordering maxmimises the effectiveness of each sweep,
and we do just two sweeps. This should catch almost every case of
monomorphic recursion -- the exception could be a very knotted-up
recursion with multiple cycles tied up together.
This plan is implemented in the Rec case of specBindItself.
Note [Specialisations already covered]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We obviously don't want to generate two specialisations for the same
argument pattern. There are two wrinkles
1. We do the already-covered test in specDefn, not when we generate
the CallInfo in mkCallUDs. We used to test in the latter place, but
we now iterate the specialiser somewhat, and the Id at the call site
might therefore not have all the RULES that we can see in specDefn
2. What about two specialisations where the second is an *instance*
of the first? If the more specific one shows up first, we'll generate
specialisations for both. If the *less* specific one shows up first,
we *don't* currently generate a specialisation for the more specific
one. (See the call to lookupRule in already_covered.) Reasons:
(a) lookupRule doesn't say which matches are exact (bad reason)
(b) if the earlier specialisation is user-provided, it's
far from clear that we should auto-specialise further
Note [Auto-specialisation and RULES]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider:
g :: Num a => a -> a
g = ...
f :: (Int -> Int) -> Int
f w = ...
{-# RULE f g = 0 #-}
Suppose that auto-specialisation makes a specialised version of
g::Int->Int That version won't appear in the LHS of the RULE for f.
So if the specialisation rule fires too early, the rule for f may
never fire.
It might be possible to add new rules, to "complete" the rewrite system.
Thus when adding
RULE forall d. g Int d = g_spec
also add
RULE f g_spec = 0
But that's a bit complicated. For now we ask the programmer's help,
by *copying the INLINE activation pragma* to the auto-specialised
rule. So if g says {-# NOINLINE[2] g #-}, then the auto-spec rule
will also not be active until phase 2. And that's what programmers
should jolly well do anyway, even aside from specialisation, to ensure
that g doesn't inline too early.
This in turn means that the RULE would never fire for a NOINLINE
thing so not much point in generating a specialisation at all.
Note [Specialisation shape]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
We only specialise a function if it has visible top-level lambdas
corresponding to its overloading. E.g. if
f :: forall a. Eq a => ....
then its body must look like
f = /\a. \d. ...
Reason: when specialising the body for a call (f ty dexp), we want to
substitute dexp for d, and pick up specialised calls in the body of f.
This doesn't always work. One example I came across was this:
newtype Gen a = MkGen{ unGen :: Int -> a }
choose :: Eq a => a -> Gen a
choose n = MkGen (\r -> n)
oneof = choose (1::Int)
It's a silly exapmle, but we get
choose = /\a. g `cast` co
where choose doesn't have any dict arguments. Thus far I have not
tried to fix this (wait till there's a real example).
Mind you, then 'choose' will be inlined (since RHS is trivial) so
it doesn't matter. This comes up with single-method classes
class C a where { op :: a -> a }
instance C a => C [a] where ....
==>
$fCList :: C a => C [a]
$fCList = $copList |> (...coercion>...)
....(uses of $fCList at particular types)...
So we suppress the WARN if the rhs is trivial.
Note [Inline specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is what we do with the InlinePragma of the original function
* Activation/RuleMatchInfo: both transferred to the
specialised function
* InlineSpec:
(a) An INLINE pragma is transferred
(b) An INLINABLE pragma is *not* transferred
Why (a): transfer INLINE pragmas? The point of INLINE was precisely to
specialise the function at its call site, and arguably that's not so
important for the specialised copies. BUT *pragma-directed*
specialisation now takes place in the typechecker/desugarer, with
manually specified INLINEs. The specialisation here is automatic.
It'd be very odd if a function marked INLINE was specialised (because
of some local use), and then forever after (including importing
modules) the specialised version wasn't INLINEd. After all, the
programmer said INLINE!
You might wonder why we specialise INLINE functions at all. After
all they should be inlined, right? Two reasons:
* Even INLINE functions are sometimes not inlined, when they aren't
applied to interesting arguments. But perhaps the type arguments
alone are enough to specialise (even though the args are too boring
to trigger inlining), and it's certainly better to call the
specialised version.
* The RHS of an INLINE function might call another overloaded function,
and we'd like to generate a specialised version of that function too.
This actually happens a lot. Consider
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINABLE replicateM_ #-}
replicateM_ d x ma = ...
The strictness analyser may transform to
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINE replicateM_ #-}
replicateM_ d x ma = case x of I# x' -> $wreplicateM_ d x' ma
$wreplicateM_ :: (Monad m) => Int# -> m a -> m ()
{-# INLINABLE $wreplicateM_ #-}
$wreplicateM_ = ...
Now an importing module has a specialised call to replicateM_, say
(replicateM_ dMonadIO). We certainly want to specialise $wreplicateM_!
This particular example had a huge effect on the call to replicateM_
in nofib/shootout/n-body.
Why (b): discard INLINEABLE pragmas? See Trac #4874 for persuasive examples.
Suppose we have
{-# INLINABLE f #-}
f :: Ord a => [a] -> Int
f xs = letrec f' = ...f'... in f'
Then, when f is specialised and optimised we might get
wgo :: [Int] -> Int#
wgo = ...wgo...
f_spec :: [Int] -> Int
f_spec xs = case wgo xs of { r -> I# r }
and we clearly want to inline f_spec at call sites. But if we still
have the big, un-optimised of f (albeit specialised) captured in an
INLINABLE pragma for f_spec, we won't get that optimisation.
So we simply drop INLINABLE pragmas when specialising. It's not really
a complete solution; ignoring specalisation for now, INLINABLE functions
don't get properly strictness analysed, for example. But it works well
for examples involving specialisation, which is the dominant use of
INLINABLE. See Trac #4874.
************************************************************************
* *
\subsubsection{UsageDetails and suchlike}
* *
************************************************************************
-}
data UsageDetails
= MkUD {
ud_binds :: !(Bag DictBind),
-- Floated dictionary bindings
-- The order is important;
-- in ds1 `union` ds2, bindings in ds2 can depend on those in ds1
-- (Remember, Bags preserve order in GHC.)
ud_calls :: !CallDetails
-- INVARIANT: suppose bs = bindersOf ud_binds
-- Then 'calls' may *mention* 'bs',
-- but there should be no calls *for* bs
}
instance Outputable UsageDetails where
ppr (MkUD { ud_binds = dbs, ud_calls = calls })
= text "MkUD" <+> braces (sep (punctuate comma
[text "binds" <+> equals <+> ppr dbs,
text "calls" <+> equals <+> ppr calls]))
-- | A 'DictBind' is a binding along with a cached set containing its free
-- variables (both type variables and dictionaries)
type DictBind = (CoreBind, VarSet)
type DictExpr = CoreExpr
emptyUDs :: UsageDetails
emptyUDs = MkUD { ud_binds = emptyBag, ud_calls = emptyDVarEnv }
------------------------------------------------------------
type CallDetails = DIdEnv CallInfoSet
-- The order of specialized binds and rules depends on how we linearize
-- CallDetails, so to get determinism we must use a deterministic set here.
-- See Note [Deterministic UniqFM] in UniqDFM
newtype CallKey = CallKey [Maybe Type]
-- Nothing => unconstrained type argument
data CallInfoSet = CIS Id (Bag CallInfo)
-- The list of types and dictionaries is guaranteed to
-- match the type of f
{-
Note [CallInfoSet determinism]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CallInfoSet holds a Bag of (CallKey, [DictExpr], VarSet) triplets for a given
Id. They represent the types that the function is instantiated at along with
the dictionaries and free variables.
We use this information to generate specialized versions of a given function.
CallInfoSet used to be defined as:
data CallInfoSet = CIS Id (Map CallKey ([DictExpr], VarSet))
Unfortunately this was not deterministic. The Ord instance of CallKey was
defined in terms of cmpType which is not deterministic.
See Note [cmpType nondeterminism].
The end result was that if the function had multiple specializations they would
be generated in arbitrary order.
We need a container that:
a) when turned into a list has only one element per each CallKey and the list
has deterministic order
b) supports union
c) supports singleton
d) supports filter
We can't use UniqDFM here because there's no one Unique that we can key on.
The current approach is to implement the set as a Bag with duplicates.
This makes b), c), d) trivial and pushes a) towards the end. The deduplication
is done by using a TrieMap for membership tests on CallKey. This lets us delete
the nondeterministic Ord CallKey instance.
An alternative approach would be to augument the Map the same way that UniqDFM
is augumented, by keeping track of insertion order and using it to order the
resulting lists. It would mean keeping the nondeterministic Ord CallKey
instance making it easy to reintroduce nondeterminism in the future.
-}
ciSetToList :: CallInfoSet -> [CallInfo]
ciSetToList (CIS _ b) = snd $ foldrBag combine (emptyTM, []) b
where
-- This is where we eliminate duplicates, recording the CallKeys we've
-- already seen in the TrieMap. See Note [CallInfoSet determinism].
combine :: CallInfo -> (CallKeySet, [CallInfo]) -> (CallKeySet, [CallInfo])
combine ci@(CallKey key, _) (set, acc)
| Just _ <- lookupTM key set = (set, acc)
| otherwise = (insertTM key () set, ci:acc)
type CallKeySet = ListMap (MaybeMap TypeMap) ()
-- We only use it in ciSetToList to check for membership
ciSetFilter :: (CallInfo -> Bool) -> CallInfoSet -> CallInfoSet
ciSetFilter p (CIS id a) = CIS id (filterBag p a)
type CallInfo = (CallKey, ([DictExpr], VarSet))
-- Range is dict args and the vars of the whole
-- call (including tyvars)
-- [*not* include the main id itself, of course]
instance Outputable CallInfoSet where
ppr (CIS fn map) = hang (text "CIS" <+> ppr fn)
2 (ppr map)
pprCallInfo :: Id -> CallInfo -> SDoc
pprCallInfo fn (CallKey mb_tys, (_dxs, _))
= hang (ppr fn)
2 (fsep (map ppr_call_key_ty mb_tys {- ++ map pprParendExpr _dxs -}))
ppr_call_key_ty :: Maybe Type -> SDoc
ppr_call_key_ty Nothing = char '_'
ppr_call_key_ty (Just ty) = char '@' <+> pprParendType ty
instance Outputable CallKey where
ppr (CallKey ts) = ppr ts
unionCalls :: CallDetails -> CallDetails -> CallDetails
unionCalls c1 c2 = plusDVarEnv_C unionCallInfoSet c1 c2
unionCallInfoSet :: CallInfoSet -> CallInfoSet -> CallInfoSet
unionCallInfoSet (CIS f calls1) (CIS _ calls2) =
CIS f (calls1 `unionBags` calls2)
callDetailsFVs :: CallDetails -> VarSet
callDetailsFVs calls =
nonDetFoldUDFM (unionVarSet . callInfoFVs) emptyVarSet calls
-- It's OK to use nonDetFoldUDFM here because we forget the ordering
-- immediately by converting to a nondeterministic set.
callInfoFVs :: CallInfoSet -> VarSet
callInfoFVs (CIS _ call_info) =
foldrBag (\(_, (_,fv)) vs -> unionVarSet fv vs) emptyVarSet call_info
------------------------------------------------------------
singleCall :: Id -> [Maybe Type] -> [DictExpr] -> UsageDetails
singleCall id tys dicts
= MkUD {ud_binds = emptyBag,
ud_calls = unitDVarEnv id $ CIS id $
unitBag (CallKey tys, (dicts, call_fvs)) }
where
call_fvs = exprsFreeVars dicts `unionVarSet` tys_fvs
tys_fvs = tyCoVarsOfTypes (catMaybes tys)
-- The type args (tys) are guaranteed to be part of the dictionary
-- types, because they are just the constrained types,
-- and the dictionary is therefore sure to be bound
-- inside the binding for any type variables free in the type;
-- hence it's safe to neglect tyvars free in tys when making
-- the free-var set for this call
-- BUT I don't trust this reasoning; play safe and include tys_fvs
--
-- We don't include the 'id' itself.
mkCallUDs, mkCallUDs' :: SpecEnv -> Id -> [CoreExpr] -> UsageDetails
mkCallUDs env f args
= -- pprTrace "mkCallUDs" (vcat [ ppr f, ppr args, ppr res ])
res
where
res = mkCallUDs' env f args
mkCallUDs' env f args
| not (want_calls_for f) -- Imported from elsewhere
|| null theta -- Not overloaded
= emptyUDs
| not (all type_determines_value theta)
|| not (spec_tys `lengthIs` n_tyvars)
|| not ( dicts `lengthIs` n_dicts)
|| not (any (interestingDict env) dicts) -- Note [Interesting dictionary arguments]
-- See also Note [Specialisations already covered]
= -- pprTrace "mkCallUDs: discarding" _trace_doc
emptyUDs -- Not overloaded, or no specialisation wanted
| otherwise
= -- pprTrace "mkCallUDs: keeping" _trace_doc
singleCall f spec_tys dicts
where
_trace_doc = vcat [ppr f, ppr args, ppr n_tyvars, ppr n_dicts
, ppr (map (interestingDict env) dicts)]
(tyvars, theta, _) = tcSplitSigmaTy (idType f)
constrained_tyvars = tyCoVarsOfTypes theta
n_tyvars = length tyvars
n_dicts = length theta
spec_tys = [mk_spec_ty tv ty | (tv, ty) <- tyvars `type_zip` args]
dicts = [dict_expr | (_, dict_expr) <- theta `zip` (drop n_tyvars args)]
-- ignores Coercion arguments
type_zip :: [TyVar] -> [CoreExpr] -> [(TyVar, Type)]
type_zip tvs (Coercion _ : args) = type_zip tvs args
type_zip (tv:tvs) (Type ty : args) = (tv, ty) : type_zip tvs args
type_zip _ _ = []
mk_spec_ty tyvar ty
| tyvar `elemVarSet` constrained_tyvars = Just ty
| otherwise = Nothing
want_calls_for f = isLocalId f || isJust (maybeUnfoldingTemplate (realIdUnfolding f))
-- For imported things, we gather call instances if
-- there is an unfolding that we could in principle specialise
-- We might still decide not to use it (consulting dflags)
-- in specImports
-- Use 'realIdUnfolding' to ignore the loop-breaker flag!
type_determines_value pred -- See Note [Type determines value]
= case classifyPredType pred of
ClassPred cls _ -> not (isIPClass cls) -- Superclasses can't be IPs
EqPred {} -> True
IrredPred {} -> True -- Things like (D []) where D is a
-- Constraint-ranged family; Trac #7785
{-
Note [Type determines value]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Only specialise if all overloading is on non-IP *class* params,
because these are the ones whose *type* determines their *value*. In
parrticular, with implicit params, the type args *don't* say what the
value of the implicit param is! See Trac #7101
However, consider
type family D (v::*->*) :: Constraint
type instance D [] = ()
f :: D v => v Char -> Int
If we see a call (f "foo"), we'll pass a "dictionary"
() |> (g :: () ~ D [])
and it's good to specialise f at this dictionary.
So the question is: can an implicit parameter "hide inside" a
type-family constraint like (D a). Well, no. We don't allow
type instance D Maybe = ?x:Int
Hence the IrredPred case in type_determines_value.
See Trac #7785.
Note [Interesting dictionary arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
\a.\d:Eq a. let f = ... in ...(f d)...
There really is not much point in specialising f wrt the dictionary d,
because the code for the specialised f is not improved at all, because
d is lambda-bound. We simply get junk specialisations.
What is "interesting"? Just that it has *some* structure. But what about
variables?
* A variable might be imported, in which case its unfolding
will tell us whether it has useful structure
* Local variables are cloned on the way down (to avoid clashes when
we float dictionaries), and cloning drops the unfolding
(cloneIdBndr). Moreover, we make up some new bindings, and it's a
nuisance to give them unfoldings. So we keep track of the
"interesting" dictionaries as a VarSet in SpecEnv.
We have to take care to put any new interesting dictionary
bindings in the set.
We accidentally lost accurate tracking of local variables for a long
time, because cloned variables don't have unfoldings. But makes a
massive difference in a few cases, eg Trac #5113. For nofib as a
whole it's only a small win: 2.2% improvement in allocation for ansi,
1.2% for bspt, but mostly 0.0! Average 0.1% increase in binary size.
-}
interestingDict :: SpecEnv -> CoreExpr -> Bool
-- A dictionary argument is interesting if it has *some* structure
-- NB: "dictionary" arguments include constraints of all sorts,
-- including equality constraints; hence the Coercion case
interestingDict env (Var v) = hasSomeUnfolding (idUnfolding v)
|| isDataConWorkId v
|| v `elemVarSet` se_interesting env
interestingDict _ (Type _) = False
interestingDict _ (Coercion _) = False
interestingDict env (App fn (Type _)) = interestingDict env fn
interestingDict env (App fn (Coercion _)) = interestingDict env fn
interestingDict env (Tick _ a) = interestingDict env a
interestingDict env (Cast e _) = interestingDict env e
interestingDict _ _ = True
plusUDs :: UsageDetails -> UsageDetails -> UsageDetails
plusUDs (MkUD {ud_binds = db1, ud_calls = calls1})
(MkUD {ud_binds = db2, ud_calls = calls2})
= MkUD { ud_binds = db1 `unionBags` db2
, ud_calls = calls1 `unionCalls` calls2 }
plusUDList :: [UsageDetails] -> UsageDetails
plusUDList = foldr plusUDs emptyUDs
-----------------------------
_dictBindBndrs :: Bag DictBind -> [Id]
_dictBindBndrs dbs = foldrBag ((++) . bindersOf . fst) [] dbs
-- | Construct a 'DictBind' from a 'CoreBind'
mkDB :: CoreBind -> DictBind
mkDB bind = (bind, bind_fvs bind)
-- | Identify the free variables of a 'CoreBind'
bind_fvs :: CoreBind -> VarSet
bind_fvs (NonRec bndr rhs) = pair_fvs (bndr,rhs)
bind_fvs (Rec prs) = foldl delVarSet rhs_fvs bndrs
where
bndrs = map fst prs
rhs_fvs = unionVarSets (map pair_fvs prs)
pair_fvs :: (Id, CoreExpr) -> VarSet
pair_fvs (bndr, rhs) = exprFreeVars rhs `unionVarSet` idFreeVars bndr
-- Don't forget variables mentioned in the
-- rules of the bndr. C.f. OccAnal.addRuleUsage
-- Also tyvars mentioned in its type; they may not appear in the RHS
-- type T a = Int
-- x :: T a = 3
-- | Flatten a set of 'DictBind's and some other binding pairs into a single
-- recursive binding, including some additional bindings.
flattenDictBinds :: Bag DictBind -> [(Id,CoreExpr)] -> DictBind
flattenDictBinds dbs pairs
= (Rec bindings, fvs)
where
(bindings, fvs) = foldrBag add
([], emptyVarSet)
(dbs `snocBag` mkDB (Rec pairs))
add (NonRec b r, fvs') (pairs, fvs) =
((b,r) : pairs, fvs `unionVarSet` fvs')
add (Rec prs1, fvs') (pairs, fvs) =
(prs1 ++ pairs, fvs `unionVarSet` fvs')
snocDictBinds :: UsageDetails -> [DictBind] -> UsageDetails
-- Add ud_binds to the tail end of the bindings in uds
snocDictBinds uds dbs
= uds { ud_binds = ud_binds uds `unionBags`
foldr consBag emptyBag dbs }
consDictBind :: DictBind -> UsageDetails -> UsageDetails
consDictBind bind uds = uds { ud_binds = bind `consBag` ud_binds uds }
addDictBinds :: [DictBind] -> UsageDetails -> UsageDetails
addDictBinds binds uds = uds { ud_binds = listToBag binds `unionBags` ud_binds uds }
snocDictBind :: UsageDetails -> DictBind -> UsageDetails
snocDictBind uds bind = uds { ud_binds = ud_binds uds `snocBag` bind }
wrapDictBinds :: Bag DictBind -> [CoreBind] -> [CoreBind]
wrapDictBinds dbs binds
= foldrBag add binds dbs
where
add (bind,_) binds = bind : binds
wrapDictBindsE :: Bag DictBind -> CoreExpr -> CoreExpr
wrapDictBindsE dbs expr
= foldrBag add expr dbs
where
add (bind,_) expr = Let bind expr
----------------------
dumpUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpUDs bndrs uds@(MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
| null bndrs = (uds, emptyBag) -- Common in case alternatives
| otherwise = -- pprTrace "dumpUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsMentioning dump_set $ -- Drop calls mentioning bndr_set on the floor
deleteCallsFor bndrs orig_calls -- Discard calls for bndr_set; there should be
-- no calls for any of the dicts in dump_dbs
dumpBindUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind, Bool)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpBindUDs bndrs (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace "dumpBindUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs, float_all)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsFor bndrs orig_calls
float_all = dump_set `intersectsVarSet` callDetailsFVs free_calls
callsForMe :: Id -> UsageDetails -> (UsageDetails, [CallInfo])
callsForMe fn (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace ("callsForMe")
-- (vcat [ppr fn,
-- text "Orig dbs =" <+> ppr (_dictBindBndrs orig_dbs),
-- text "Orig calls =" <+> ppr orig_calls,
-- text "Dep set =" <+> ppr dep_set,
-- text "Calls for me =" <+> ppr calls_for_me]) $
(uds_without_me, calls_for_me)
where
uds_without_me = MkUD { ud_binds = orig_dbs
, ud_calls = delDVarEnv orig_calls fn }
calls_for_me = case lookupDVarEnv orig_calls fn of
Nothing -> []
Just cis -> filter_dfuns (ciSetToList cis)
dep_set = foldlBag go (unitVarSet fn) orig_dbs
go dep_set (db,fvs) | fvs `intersectsVarSet` dep_set
= extendVarSetList dep_set (bindersOf db)
| otherwise = dep_set
-- Note [Specialisation of dictionary functions]
filter_dfuns | isDFunId fn = filter ok_call
| otherwise = \cs -> cs
ok_call (_, (_,fvs)) = not (fvs `intersectsVarSet` dep_set)
----------------------
splitDictBinds :: Bag DictBind -> IdSet -> (Bag DictBind, Bag DictBind, IdSet)
-- Returns (free_dbs, dump_dbs, dump_set)
splitDictBinds dbs bndr_set
= foldlBag split_db (emptyBag, emptyBag, bndr_set) dbs
-- Important that it's foldl not foldr;
-- we're accumulating the set of dumped ids in dump_set
where
split_db (free_dbs, dump_dbs, dump_idset) db@(bind, fvs)
| dump_idset `intersectsVarSet` fvs -- Dump it
= (free_dbs, dump_dbs `snocBag` db,
extendVarSetList dump_idset (bindersOf bind))
| otherwise -- Don't dump it
= (free_dbs `snocBag` db, dump_dbs, dump_idset)
----------------------
deleteCallsMentioning :: VarSet -> CallDetails -> CallDetails
-- Remove calls *mentioning* bs
deleteCallsMentioning bs calls
= mapDVarEnv (ciSetFilter keep_call) calls
where
keep_call (_, (_, fvs)) = not (fvs `intersectsVarSet` bs)
deleteCallsFor :: [Id] -> CallDetails -> CallDetails
-- Remove calls *for* bs
deleteCallsFor bs calls = delDVarEnvList calls bs
{-
************************************************************************
* *
\subsubsection{Boring helper functions}
* *
************************************************************************
-}
newtype SpecM a = SpecM (State SpecState a)
data SpecState = SpecState {
spec_uniq_supply :: UniqSupply,
spec_module :: Module,
spec_dflags :: DynFlags
}
instance Functor SpecM where
fmap = liftM
instance Applicative SpecM where
pure x = SpecM $ return x
(<*>) = ap
instance Monad SpecM where
SpecM x >>= f = SpecM $ do y <- x
case f y of
SpecM z ->
z
return = pure
fail str = SpecM $ fail str
#if __GLASGOW_HASKELL__ > 710
instance MonadFail.MonadFail SpecM where
fail str = SpecM $ fail str
#endif
instance MonadUnique SpecM where
getUniqueSupplyM
= SpecM $ do st <- get
let (us1, us2) = splitUniqSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us2 }
return us1
getUniqueM
= SpecM $ do st <- get
let (u,us') = takeUniqFromSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us' }
return u
instance HasDynFlags SpecM where
getDynFlags = SpecM $ liftM spec_dflags get
instance HasModule SpecM where
getModule = SpecM $ liftM spec_module get
runSpecM :: DynFlags -> Module -> SpecM a -> CoreM a
runSpecM dflags this_mod (SpecM spec)
= do us <- getUniqueSupplyM
let initialState = SpecState {
spec_uniq_supply = us,
spec_module = this_mod,
spec_dflags = dflags
}
return $ evalState spec initialState
mapAndCombineSM :: (a -> SpecM (b, UsageDetails)) -> [a] -> SpecM ([b], UsageDetails)
mapAndCombineSM _ [] = return ([], emptyUDs)
mapAndCombineSM f (x:xs) = do (y, uds1) <- f x
(ys, uds2) <- mapAndCombineSM f xs
return (y:ys, uds1 `plusUDs` uds2)
extendTvSubstList :: SpecEnv -> [(TyVar,Type)] -> SpecEnv
extendTvSubstList env tv_binds
= env { se_subst = CoreSubst.extendTvSubstList (se_subst env) tv_binds }
substTy :: SpecEnv -> Type -> Type
substTy env ty = CoreSubst.substTy (se_subst env) ty
substCo :: SpecEnv -> Coercion -> Coercion
substCo env co = CoreSubst.substCo (se_subst env) co
substBndr :: SpecEnv -> CoreBndr -> (SpecEnv, CoreBndr)
substBndr env bs = case CoreSubst.substBndr (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
substBndrs :: SpecEnv -> [CoreBndr] -> (SpecEnv, [CoreBndr])
substBndrs env bs = case CoreSubst.substBndrs (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
cloneBindSM :: SpecEnv -> CoreBind -> SpecM (SpecEnv, SpecEnv, CoreBind)
-- Clone the binders of the bind; return new bind with the cloned binders
-- Return the substitution to use for RHSs, and the one to use for the body
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (NonRec bndr rhs)
= do { us <- getUniqueSupplyM
; let (subst', bndr') = CoreSubst.cloneIdBndr subst us bndr
interesting' | interestingDict env rhs
= interesting `extendVarSet` bndr'
| otherwise = interesting
; return (env, env { se_subst = subst', se_interesting = interesting' }
, NonRec bndr' rhs) }
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (Rec pairs)
= do { us <- getUniqueSupplyM
; let (subst', bndrs') = CoreSubst.cloneRecIdBndrs subst us (map fst pairs)
env' = env { se_subst = subst'
, se_interesting = interesting `extendVarSetList`
[ v | (v,r) <- pairs, interestingDict env r ] }
; return (env', env', Rec (bndrs' `zip` map snd pairs)) }
newDictBndr :: SpecEnv -> CoreBndr -> SpecM CoreBndr
-- Make up completely fresh binders for the dictionaries
-- Their bindings are going to float outwards
newDictBndr env b = do { uniq <- getUniqueM
; let n = idName b
ty' = substTy env (idType b)
; return (mkUserLocalOrCoVar (nameOccName n) uniq ty' (getSrcSpan n)) }
newSpecIdSM :: Id -> Type -> SpecM Id
-- Give the new Id a similar occurrence name to the old one
newSpecIdSM old_id new_ty
= do { uniq <- getUniqueM
; let name = idName old_id
new_occ = mkSpecOcc (nameOccName name)
new_id = mkUserLocalOrCoVar new_occ uniq new_ty (getSrcSpan name)
; return new_id }
{-
Old (but interesting) stuff about unboxed bindings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What should we do when a value is specialised to a *strict* unboxed value?
map_*_* f (x:xs) = let h = f x
t = map f xs
in h:t
Could convert let to case:
map_*_Int# f (x:xs) = case f x of h# ->
let t = map f xs
in h#:t
This may be undesirable since it forces evaluation here, but the value
may not be used in all branches of the body. In the general case this
transformation is impossible since the mutual recursion in a letrec
cannot be expressed as a case.
There is also a problem with top-level unboxed values, since our
implementation cannot handle unboxed values at the top level.
Solution: Lift the binding of the unboxed value and extract it when it
is used:
map_*_Int# f (x:xs) = let h = case (f x) of h# -> _Lift h#
t = map f xs
in case h of
_Lift h# -> h#:t
Now give it to the simplifier and the _Lifting will be optimised away.
The benfit is that we have given the specialised "unboxed" values a
very simplep lifted semantics and then leave it up to the simplifier to
optimise it --- knowing that the overheads will be removed in nearly
all cases.
In particular, the value will only be evaluted in the branches of the
program which use it, rather than being forced at the point where the
value is bound. For example:
filtermap_*_* p f (x:xs)
= let h = f x
t = ...
in case p x of
True -> h:t
False -> t
==>
filtermap_*_Int# p f (x:xs)
= let h = case (f x) of h# -> _Lift h#
t = ...
in case p x of
True -> case h of _Lift h#
-> h#:t
False -> t
The binding for h can still be inlined in the one branch and the
_Lifting eliminated.
Question: When won't the _Lifting be eliminated?
Answer: When they at the top-level (where it is necessary) or when
inlining would duplicate work (or possibly code depending on
options). However, the _Lifting will still be eliminated if the
strictness analyser deems the lifted binding strict.
-}
| GaloisInc/halvm-ghc | compiler/specialise/Specialise.hs | bsd-3-clause | 96,844 | 1 | 22 | 27,993 | 11,192 | 6,076 | 5,116 | -1 | -1 |
{-# LANGUAGE GeneralizedNewtypeDeriving, RecordWildCards #-}
{-|
Module: Yesod.Contrib.League.Crud.TVarMap
Description: Representing CRUD entities in memory
Copyright: ©2015 Christopher League
Maintainer: league@contrapunctus.net
This is a proof of concept for implementing CRUD operations that are not based
on Database.Persist. It uses a 'TVar' and a 'Map' from 'UUID' keys to the CRUD
entity.
-}
module Yesod.Contrib.League.Crud.TVarMap
( CrudTVarKey
, CrudTVarMap
, crudTVarMapDefaults
) where
import ClassyPrelude
import qualified Data.Map as Map
import Data.UUID
import System.Random
import Yesod.Contrib.League.Crud
import Yesod.Core
-- |The key type. A wrapper for 'UUID' that implements all the necessary
-- classes, including 'PathPiece'.
newtype CrudTVarKey =
CrudTKey { crudUUID :: UUID }
deriving (Eq, Ord, Read, Show, Random)
instance PathPiece CrudTVarKey where
toPathPiece = tshow . crudUUID
fromPathPiece = fmap CrudTKey . readMay
-- |Synonym for the map type.
type CrudTVarMap sub = Map CrudTVarKey (Obj sub)
-- |Retrieve a record of database operations for using a 'CrudTVarMap'.
crudTVarMapDefaults ::
( ObjId sub ~ CrudTVarKey )
=> CrudM sub (TVar (CrudTVarMap sub))
-> CrudDB sub
crudTVarMapDefaults getMap = CrudDB {..}
where
crudSelect' = Map.toList <$> getTV
crudGet' k = Map.lookup k <$> getTV
crudReplace' k = modTV . Map.insert k
crudDelete' = modTV . Map.delete
crudInsert' o = do
k <- liftIO randomIO
modTV $ Map.insert k o
return k
getTV = getMap >>= atomically . readTVar
modTV g = getMap >>= atomically . flip modifyTVar g
| league/yesod-crud | Yesod/Contrib/League/Crud/TVarMap.hs | bsd-3-clause | 1,705 | 0 | 11 | 375 | 328 | 178 | 150 | 33 | 1 |
{-# LANGUAGE CPP, Rank2Types, TypeFamilies #-}
-- |
-- Module : Data.Vector.Unboxed
-- Copyright : (c) Roman Leshchinskiy 2009-2010
-- License : BSD-style
--
-- Maintainer : Roman Leshchinskiy <rl@cse.unsw.edu.au>
-- Stability : experimental
-- Portability : non-portable
--
-- Adaptive unboxed vectors. The implementation is based on type families
-- and picks an efficient, specialised representation for every element type.
-- In particular, unboxed vectors of pairs are represented as pairs of unboxed
-- vectors.
--
-- Implementing unboxed vectors for new data types can be very easy. Here is
-- how the library does this for 'Complex' by simply wrapping vectors of
-- pairs.
--
-- @
-- newtype instance 'MVector' s ('Complex' a) = MV_Complex ('MVector' s (a,a))
-- newtype instance 'Vector' ('Complex' a) = V_Complex ('Vector' (a,a))
--
-- instance ('RealFloat' a, 'Unbox' a) => 'Data.Vector.Generic.Mutable.MVector' 'MVector' ('Complex' a) where
-- {-\# INLINE basicLength \#-}
-- basicLength (MV_Complex v) = 'Data.Vector.Generic.Mutable.basicLength' v
-- ...
--
-- instance ('RealFloat' a, 'Unbox' a) => Data.Vector.Generic.Vector 'Vector' ('Complex' a) where
-- {-\# INLINE basicLength \#-}
-- basicLength (V_Complex v) = Data.Vector.Generic.basicLength v
-- ...
--
-- instance ('RealFloat' a, 'Unbox' a) => 'Unbox' ('Complex' a)
-- @
module Data.Vector.Unboxed (
-- * Unboxed vectors
Vector, MVector(..), Unbox,
-- * Accessors
-- ** Length information
length, null,
-- ** Indexing
(!), (!?), head, last,
unsafeIndex, unsafeHead, unsafeLast,
-- ** Monadic indexing
indexM, headM, lastM,
unsafeIndexM, unsafeHeadM, unsafeLastM,
-- ** Extracting subvectors (slicing)
slice, init, tail, take, drop, splitAt,
unsafeSlice, unsafeInit, unsafeTail, unsafeTake, unsafeDrop,
-- * Construction
-- ** Initialisation
empty, singleton, replicate, generate, iterateN,
-- ** Monadic initialisation
replicateM, generateM, create,
-- ** Unfolding
unfoldr, unfoldrN,
constructN, constructrN,
-- ** Enumeration
enumFromN, enumFromStepN, enumFromTo, enumFromThenTo,
-- ** Concatenation
cons, snoc, (++), concat,
-- ** Restricting memory usage
force,
-- * Modifying vectors
-- ** Bulk updates
(//), update, update_,
unsafeUpd, unsafeUpdate, unsafeUpdate_,
-- ** Accumulations
accum, accumulate, accumulate_,
unsafeAccum, unsafeAccumulate, unsafeAccumulate_,
-- ** Permutations
reverse, backpermute, unsafeBackpermute,
-- ** Safe destructive updates
modify,
-- * Elementwise operations
-- ** Indexing
indexed,
-- ** Mapping
map, imap, concatMap,
-- ** Monadic mapping
mapM, imapM, mapM_, imapM_, forM, forM_,
-- ** Zipping
zipWith, zipWith3, zipWith4, zipWith5, zipWith6,
izipWith, izipWith3, izipWith4, izipWith5, izipWith6,
zip, zip3, zip4, zip5, zip6,
-- ** Monadic zipping
zipWithM, izipWithM, zipWithM_, izipWithM_,
-- ** Unzipping
unzip, unzip3, unzip4, unzip5, unzip6,
-- * Working with predicates
-- ** Filtering
filter, ifilter, filterM,
takeWhile, dropWhile,
-- ** Partitioning
partition, unstablePartition, span, break,
-- ** Searching
elem, notElem, find, findIndex, findIndices, elemIndex, elemIndices,
-- * Folding
foldl, foldl1, foldl', foldl1', foldr, foldr1, foldr', foldr1',
ifoldl, ifoldl', ifoldr, ifoldr',
-- ** Specialised folds
all, any, and, or,
sum, product,
maximum, maximumBy, minimum, minimumBy,
minIndex, minIndexBy, maxIndex, maxIndexBy,
-- ** Monadic folds
foldM, ifoldM, foldM', ifoldM',
fold1M, fold1M', foldM_, ifoldM_,
foldM'_, ifoldM'_, fold1M_, fold1M'_,
-- * Prefix sums (scans)
prescanl, prescanl',
postscanl, postscanl',
scanl, scanl', scanl1, scanl1',
prescanr, prescanr',
postscanr, postscanr',
scanr, scanr', scanr1, scanr1',
-- * Conversions
-- ** Lists
toList, fromList, fromListN,
-- ** Other vector types
G.convert,
-- ** Mutable vectors
freeze, thaw, copy, unsafeFreeze, unsafeThaw, unsafeCopy
) where
import Data.Vector.Unboxed.Base
import qualified Data.Vector.Generic as G
import qualified Data.Vector.Fusion.Bundle as Bundle
import Data.Vector.Fusion.Util ( delayed_min )
import Control.Monad.ST ( ST )
import Control.Monad.Primitive
import Prelude hiding ( length, null,
replicate, (++), concat,
head, last,
init, tail, take, drop, splitAt, reverse,
map, concatMap,
zipWith, zipWith3, zip, zip3, unzip, unzip3,
filter, takeWhile, dropWhile, span, break,
elem, notElem,
foldl, foldl1, foldr, foldr1,
all, any, and, or, sum, product, minimum, maximum,
scanl, scanl1, scanr, scanr1,
enumFromTo, enumFromThenTo,
mapM, mapM_ )
import Text.Read ( Read(..), readListPrecDefault )
#if !MIN_VERSION_base(4,8,0)
import Data.Monoid ( Monoid(..) )
#endif
#if __GLASGOW_HASKELL__ >= 708
import qualified GHC.Exts as Exts (IsList(..))
#endif
#define NOT_VECTOR_MODULE
#include "vector.h"
-- See http://trac.haskell.org/vector/ticket/12
instance (Unbox a, Eq a) => Eq (Vector a) where
{-# INLINE (==) #-}
xs == ys = Bundle.eq (G.stream xs) (G.stream ys)
{-# INLINE (/=) #-}
xs /= ys = not (Bundle.eq (G.stream xs) (G.stream ys))
-- See http://trac.haskell.org/vector/ticket/12
instance (Unbox a, Ord a) => Ord (Vector a) where
{-# INLINE compare #-}
compare xs ys = Bundle.cmp (G.stream xs) (G.stream ys)
{-# INLINE (<) #-}
xs < ys = Bundle.cmp (G.stream xs) (G.stream ys) == LT
{-# INLINE (<=) #-}
xs <= ys = Bundle.cmp (G.stream xs) (G.stream ys) /= GT
{-# INLINE (>) #-}
xs > ys = Bundle.cmp (G.stream xs) (G.stream ys) == GT
{-# INLINE (>=) #-}
xs >= ys = Bundle.cmp (G.stream xs) (G.stream ys) /= LT
instance Unbox a => Monoid (Vector a) where
{-# INLINE mempty #-}
mempty = empty
{-# INLINE mappend #-}
mappend = (++)
{-# INLINE mconcat #-}
mconcat = concat
instance (Show a, Unbox a) => Show (Vector a) where
showsPrec = G.showsPrec
instance (Read a, Unbox a) => Read (Vector a) where
readPrec = G.readPrec
readListPrec = readListPrecDefault
#if __GLASGOW_HASKELL__ >= 708
instance (Unbox e) => Exts.IsList (Vector e) where
type Item (Vector e) = e
fromList = fromList
fromListN = fromListN
toList = toList
#endif
-- Length information
-- ------------------
-- | /O(1)/ Yield the length of the vector.
length :: Unbox a => Vector a -> Int
{-# INLINE length #-}
length = G.length
-- | /O(1)/ Test whether a vector if empty
null :: Unbox a => Vector a -> Bool
{-# INLINE null #-}
null = G.null
-- Indexing
-- --------
-- | O(1) Indexing
(!) :: Unbox a => Vector a -> Int -> a
{-# INLINE (!) #-}
(!) = (G.!)
-- | O(1) Safe indexing
(!?) :: Unbox a => Vector a -> Int -> Maybe a
{-# INLINE (!?) #-}
(!?) = (G.!?)
-- | /O(1)/ First element
head :: Unbox a => Vector a -> a
{-# INLINE head #-}
head = G.head
-- | /O(1)/ Last element
last :: Unbox a => Vector a -> a
{-# INLINE last #-}
last = G.last
-- | /O(1)/ Unsafe indexing without bounds checking
unsafeIndex :: Unbox a => Vector a -> Int -> a
{-# INLINE unsafeIndex #-}
unsafeIndex = G.unsafeIndex
-- | /O(1)/ First element without checking if the vector is empty
unsafeHead :: Unbox a => Vector a -> a
{-# INLINE unsafeHead #-}
unsafeHead = G.unsafeHead
-- | /O(1)/ Last element without checking if the vector is empty
unsafeLast :: Unbox a => Vector a -> a
{-# INLINE unsafeLast #-}
unsafeLast = G.unsafeLast
-- Monadic indexing
-- ----------------
-- | /O(1)/ Indexing in a monad.
--
-- The monad allows operations to be strict in the vector when necessary.
-- Suppose vector copying is implemented like this:
--
-- > copy mv v = ... write mv i (v ! i) ...
--
-- For lazy vectors, @v ! i@ would not be evaluated which means that @mv@
-- would unnecessarily retain a reference to @v@ in each element written.
--
-- With 'indexM', copying can be implemented like this instead:
--
-- > copy mv v = ... do
-- > x <- indexM v i
-- > write mv i x
--
-- Here, no references to @v@ are retained because indexing (but /not/ the
-- elements) is evaluated eagerly.
--
indexM :: (Unbox a, Monad m) => Vector a -> Int -> m a
{-# INLINE indexM #-}
indexM = G.indexM
-- | /O(1)/ First element of a vector in a monad. See 'indexM' for an
-- explanation of why this is useful.
headM :: (Unbox a, Monad m) => Vector a -> m a
{-# INLINE headM #-}
headM = G.headM
-- | /O(1)/ Last element of a vector in a monad. See 'indexM' for an
-- explanation of why this is useful.
lastM :: (Unbox a, Monad m) => Vector a -> m a
{-# INLINE lastM #-}
lastM = G.lastM
-- | /O(1)/ Indexing in a monad without bounds checks. See 'indexM' for an
-- explanation of why this is useful.
unsafeIndexM :: (Unbox a, Monad m) => Vector a -> Int -> m a
{-# INLINE unsafeIndexM #-}
unsafeIndexM = G.unsafeIndexM
-- | /O(1)/ First element in a monad without checking for empty vectors.
-- See 'indexM' for an explanation of why this is useful.
unsafeHeadM :: (Unbox a, Monad m) => Vector a -> m a
{-# INLINE unsafeHeadM #-}
unsafeHeadM = G.unsafeHeadM
-- | /O(1)/ Last element in a monad without checking for empty vectors.
-- See 'indexM' for an explanation of why this is useful.
unsafeLastM :: (Unbox a, Monad m) => Vector a -> m a
{-# INLINE unsafeLastM #-}
unsafeLastM = G.unsafeLastM
-- Extracting subvectors (slicing)
-- -------------------------------
-- | /O(1)/ Yield a slice of the vector without copying it. The vector must
-- contain at least @i+n@ elements.
slice :: Unbox a => Int -- ^ @i@ starting index
-> Int -- ^ @n@ length
-> Vector a
-> Vector a
{-# INLINE slice #-}
slice = G.slice
-- | /O(1)/ Yield all but the last element without copying. The vector may not
-- be empty.
init :: Unbox a => Vector a -> Vector a
{-# INLINE init #-}
init = G.init
-- | /O(1)/ Yield all but the first element without copying. The vector may not
-- be empty.
tail :: Unbox a => Vector a -> Vector a
{-# INLINE tail #-}
tail = G.tail
-- | /O(1)/ Yield at the first @n@ elements without copying. The vector may
-- contain less than @n@ elements in which case it is returned unchanged.
take :: Unbox a => Int -> Vector a -> Vector a
{-# INLINE take #-}
take = G.take
-- | /O(1)/ Yield all but the first @n@ elements without copying. The vector may
-- contain less than @n@ elements in which case an empty vector is returned.
drop :: Unbox a => Int -> Vector a -> Vector a
{-# INLINE drop #-}
drop = G.drop
-- | /O(1)/ Yield the first @n@ elements paired with the remainder without copying.
--
-- Note that @'splitAt' n v@ is equivalent to @('take' n v, 'drop' n v)@
-- but slightly more efficient.
{-# INLINE splitAt #-}
splitAt :: Unbox a => Int -> Vector a -> (Vector a, Vector a)
splitAt = G.splitAt
-- | /O(1)/ Yield a slice of the vector without copying. The vector must
-- contain at least @i+n@ elements but this is not checked.
unsafeSlice :: Unbox a => Int -- ^ @i@ starting index
-> Int -- ^ @n@ length
-> Vector a
-> Vector a
{-# INLINE unsafeSlice #-}
unsafeSlice = G.unsafeSlice
-- | /O(1)/ Yield all but the last element without copying. The vector may not
-- be empty but this is not checked.
unsafeInit :: Unbox a => Vector a -> Vector a
{-# INLINE unsafeInit #-}
unsafeInit = G.unsafeInit
-- | /O(1)/ Yield all but the first element without copying. The vector may not
-- be empty but this is not checked.
unsafeTail :: Unbox a => Vector a -> Vector a
{-# INLINE unsafeTail #-}
unsafeTail = G.unsafeTail
-- | /O(1)/ Yield the first @n@ elements without copying. The vector must
-- contain at least @n@ elements but this is not checked.
unsafeTake :: Unbox a => Int -> Vector a -> Vector a
{-# INLINE unsafeTake #-}
unsafeTake = G.unsafeTake
-- | /O(1)/ Yield all but the first @n@ elements without copying. The vector
-- must contain at least @n@ elements but this is not checked.
unsafeDrop :: Unbox a => Int -> Vector a -> Vector a
{-# INLINE unsafeDrop #-}
unsafeDrop = G.unsafeDrop
-- Initialisation
-- --------------
-- | /O(1)/ Empty vector
empty :: Unbox a => Vector a
{-# INLINE empty #-}
empty = G.empty
-- | /O(1)/ Vector with exactly one element
singleton :: Unbox a => a -> Vector a
{-# INLINE singleton #-}
singleton = G.singleton
-- | /O(n)/ Vector of the given length with the same value in each position
replicate :: Unbox a => Int -> a -> Vector a
{-# INLINE replicate #-}
replicate = G.replicate
-- | /O(n)/ Construct a vector of the given length by applying the function to
-- each index
generate :: Unbox a => Int -> (Int -> a) -> Vector a
{-# INLINE generate #-}
generate = G.generate
-- | /O(n)/ Apply function n times to value. Zeroth element is original value.
iterateN :: Unbox a => Int -> (a -> a) -> a -> Vector a
{-# INLINE iterateN #-}
iterateN = G.iterateN
-- Unfolding
-- ---------
-- | /O(n)/ Construct a vector by repeatedly applying the generator function
-- to a seed. The generator function yields 'Just' the next element and the
-- new seed or 'Nothing' if there are no more elements.
--
-- > unfoldr (\n -> if n == 0 then Nothing else Just (n,n-1)) 10
-- > = <10,9,8,7,6,5,4,3,2,1>
unfoldr :: Unbox a => (b -> Maybe (a, b)) -> b -> Vector a
{-# INLINE unfoldr #-}
unfoldr = G.unfoldr
-- | /O(n)/ Construct a vector with at most @n@ by repeatedly applying the
-- generator function to the a seed. The generator function yields 'Just' the
-- next element and the new seed or 'Nothing' if there are no more elements.
--
-- > unfoldrN 3 (\n -> Just (n,n-1)) 10 = <10,9,8>
unfoldrN :: Unbox a => Int -> (b -> Maybe (a, b)) -> b -> Vector a
{-# INLINE unfoldrN #-}
unfoldrN = G.unfoldrN
-- | /O(n)/ Construct a vector with @n@ elements by repeatedly applying the
-- generator function to the already constructed part of the vector.
--
-- > constructN 3 f = let a = f <> ; b = f <a> ; c = f <a,b> in f <a,b,c>
--
constructN :: Unbox a => Int -> (Vector a -> a) -> Vector a
{-# INLINE constructN #-}
constructN = G.constructN
-- | /O(n)/ Construct a vector with @n@ elements from right to left by
-- repeatedly applying the generator function to the already constructed part
-- of the vector.
--
-- > constructrN 3 f = let a = f <> ; b = f<a> ; c = f <b,a> in f <c,b,a>
--
constructrN :: Unbox a => Int -> (Vector a -> a) -> Vector a
{-# INLINE constructrN #-}
constructrN = G.constructrN
-- Enumeration
-- -----------
-- | /O(n)/ Yield a vector of the given length containing the values @x@, @x+1@
-- etc. This operation is usually more efficient than 'enumFromTo'.
--
-- > enumFromN 5 3 = <5,6,7>
enumFromN :: (Unbox a, Num a) => a -> Int -> Vector a
{-# INLINE enumFromN #-}
enumFromN = G.enumFromN
-- | /O(n)/ Yield a vector of the given length containing the values @x@, @x+y@,
-- @x+y+y@ etc. This operations is usually more efficient than 'enumFromThenTo'.
--
-- > enumFromStepN 1 0.1 5 = <1,1.1,1.2,1.3,1.4>
enumFromStepN :: (Unbox a, Num a) => a -> a -> Int -> Vector a
{-# INLINE enumFromStepN #-}
enumFromStepN = G.enumFromStepN
-- | /O(n)/ Enumerate values from @x@ to @y@.
--
-- /WARNING:/ This operation can be very inefficient. If at all possible, use
-- 'enumFromN' instead.
enumFromTo :: (Unbox a, Enum a) => a -> a -> Vector a
{-# INLINE enumFromTo #-}
enumFromTo = G.enumFromTo
-- | /O(n)/ Enumerate values from @x@ to @y@ with a specific step @z@.
--
-- /WARNING:/ This operation can be very inefficient. If at all possible, use
-- 'enumFromStepN' instead.
enumFromThenTo :: (Unbox a, Enum a) => a -> a -> a -> Vector a
{-# INLINE enumFromThenTo #-}
enumFromThenTo = G.enumFromThenTo
-- Concatenation
-- -------------
-- | /O(n)/ Prepend an element
cons :: Unbox a => a -> Vector a -> Vector a
{-# INLINE cons #-}
cons = G.cons
-- | /O(n)/ Append an element
snoc :: Unbox a => Vector a -> a -> Vector a
{-# INLINE snoc #-}
snoc = G.snoc
infixr 5 ++
-- | /O(m+n)/ Concatenate two vectors
(++) :: Unbox a => Vector a -> Vector a -> Vector a
{-# INLINE (++) #-}
(++) = (G.++)
-- | /O(n)/ Concatenate all vectors in the list
concat :: Unbox a => [Vector a] -> Vector a
{-# INLINE concat #-}
concat = G.concat
-- Monadic initialisation
-- ----------------------
-- | /O(n)/ Execute the monadic action the given number of times and store the
-- results in a vector.
replicateM :: (Monad m, Unbox a) => Int -> m a -> m (Vector a)
{-# INLINE replicateM #-}
replicateM = G.replicateM
-- | /O(n)/ Construct a vector of the given length by applying the monadic
-- action to each index
generateM :: (Monad m, Unbox a) => Int -> (Int -> m a) -> m (Vector a)
{-# INLINE generateM #-}
generateM = G.generateM
-- | Execute the monadic action and freeze the resulting vector.
--
-- @
-- create (do { v \<- new 2; write v 0 \'a\'; write v 1 \'b\'; return v }) = \<'a','b'\>
-- @
create :: Unbox a => (forall s. ST s (MVector s a)) -> Vector a
{-# INLINE create #-}
-- NOTE: eta-expanded due to http://hackage.haskell.org/trac/ghc/ticket/4120
create p = G.create p
-- Restricting memory usage
-- ------------------------
-- | /O(n)/ Yield the argument but force it not to retain any extra memory,
-- possibly by copying it.
--
-- This is especially useful when dealing with slices. For example:
--
-- > force (slice 0 2 <huge vector>)
--
-- Here, the slice retains a reference to the huge vector. Forcing it creates
-- a copy of just the elements that belong to the slice and allows the huge
-- vector to be garbage collected.
force :: Unbox a => Vector a -> Vector a
{-# INLINE force #-}
force = G.force
-- Bulk updates
-- ------------
-- | /O(m+n)/ For each pair @(i,a)@ from the list, replace the vector
-- element at position @i@ by @a@.
--
-- > <5,9,2,7> // [(2,1),(0,3),(2,8)] = <3,9,8,7>
--
(//) :: Unbox a => Vector a -- ^ initial vector (of length @m@)
-> [(Int, a)] -- ^ list of index/value pairs (of length @n@)
-> Vector a
{-# INLINE (//) #-}
(//) = (G.//)
-- | /O(m+n)/ For each pair @(i,a)@ from the vector of index/value pairs,
-- replace the vector element at position @i@ by @a@.
--
-- > update <5,9,2,7> <(2,1),(0,3),(2,8)> = <3,9,8,7>
--
update :: Unbox a
=> Vector a -- ^ initial vector (of length @m@)
-> Vector (Int, a) -- ^ vector of index/value pairs (of length @n@)
-> Vector a
{-# INLINE update #-}
update = G.update
-- | /O(m+min(n1,n2))/ For each index @i@ from the index vector and the
-- corresponding value @a@ from the value vector, replace the element of the
-- initial vector at position @i@ by @a@.
--
-- > update_ <5,9,2,7> <2,0,2> <1,3,8> = <3,9,8,7>
--
-- The function 'update' provides the same functionality and is usually more
-- convenient.
--
-- @
-- update_ xs is ys = 'update' xs ('zip' is ys)
-- @
update_ :: Unbox a
=> Vector a -- ^ initial vector (of length @m@)
-> Vector Int -- ^ index vector (of length @n1@)
-> Vector a -- ^ value vector (of length @n2@)
-> Vector a
{-# INLINE update_ #-}
update_ = G.update_
-- | Same as ('//') but without bounds checking.
unsafeUpd :: Unbox a => Vector a -> [(Int, a)] -> Vector a
{-# INLINE unsafeUpd #-}
unsafeUpd = G.unsafeUpd
-- | Same as 'update' but without bounds checking.
unsafeUpdate :: Unbox a => Vector a -> Vector (Int, a) -> Vector a
{-# INLINE unsafeUpdate #-}
unsafeUpdate = G.unsafeUpdate
-- | Same as 'update_' but without bounds checking.
unsafeUpdate_ :: Unbox a => Vector a -> Vector Int -> Vector a -> Vector a
{-# INLINE unsafeUpdate_ #-}
unsafeUpdate_ = G.unsafeUpdate_
-- Accumulations
-- -------------
-- | /O(m+n)/ For each pair @(i,b)@ from the list, replace the vector element
-- @a@ at position @i@ by @f a b@.
--
-- > accum (+) <5,9,2> [(2,4),(1,6),(0,3),(1,7)] = <5+3, 9+6+7, 2+4>
accum :: Unbox a
=> (a -> b -> a) -- ^ accumulating function @f@
-> Vector a -- ^ initial vector (of length @m@)
-> [(Int,b)] -- ^ list of index/value pairs (of length @n@)
-> Vector a
{-# INLINE accum #-}
accum = G.accum
-- | /O(m+n)/ For each pair @(i,b)@ from the vector of pairs, replace the vector
-- element @a@ at position @i@ by @f a b@.
--
-- > accumulate (+) <5,9,2> <(2,4),(1,6),(0,3),(1,7)> = <5+3, 9+6+7, 2+4>
accumulate :: (Unbox a, Unbox b)
=> (a -> b -> a) -- ^ accumulating function @f@
-> Vector a -- ^ initial vector (of length @m@)
-> Vector (Int,b) -- ^ vector of index/value pairs (of length @n@)
-> Vector a
{-# INLINE accumulate #-}
accumulate = G.accumulate
-- | /O(m+min(n1,n2))/ For each index @i@ from the index vector and the
-- corresponding value @b@ from the the value vector,
-- replace the element of the initial vector at
-- position @i@ by @f a b@.
--
-- > accumulate_ (+) <5,9,2> <2,1,0,1> <4,6,3,7> = <5+3, 9+6+7, 2+4>
--
-- The function 'accumulate' provides the same functionality and is usually more
-- convenient.
--
-- @
-- accumulate_ f as is bs = 'accumulate' f as ('zip' is bs)
-- @
accumulate_ :: (Unbox a, Unbox b)
=> (a -> b -> a) -- ^ accumulating function @f@
-> Vector a -- ^ initial vector (of length @m@)
-> Vector Int -- ^ index vector (of length @n1@)
-> Vector b -- ^ value vector (of length @n2@)
-> Vector a
{-# INLINE accumulate_ #-}
accumulate_ = G.accumulate_
-- | Same as 'accum' but without bounds checking.
unsafeAccum :: Unbox a => (a -> b -> a) -> Vector a -> [(Int,b)] -> Vector a
{-# INLINE unsafeAccum #-}
unsafeAccum = G.unsafeAccum
-- | Same as 'accumulate' but without bounds checking.
unsafeAccumulate :: (Unbox a, Unbox b)
=> (a -> b -> a) -> Vector a -> Vector (Int,b) -> Vector a
{-# INLINE unsafeAccumulate #-}
unsafeAccumulate = G.unsafeAccumulate
-- | Same as 'accumulate_' but without bounds checking.
unsafeAccumulate_ :: (Unbox a, Unbox b) =>
(a -> b -> a) -> Vector a -> Vector Int -> Vector b -> Vector a
{-# INLINE unsafeAccumulate_ #-}
unsafeAccumulate_ = G.unsafeAccumulate_
-- Permutations
-- ------------
-- | /O(n)/ Reverse a vector
reverse :: Unbox a => Vector a -> Vector a
{-# INLINE reverse #-}
reverse = G.reverse
-- | /O(n)/ Yield the vector obtained by replacing each element @i@ of the
-- index vector by @xs'!'i@. This is equivalent to @'map' (xs'!') is@ but is
-- often much more efficient.
--
-- > backpermute <a,b,c,d> <0,3,2,3,1,0> = <a,d,c,d,b,a>
backpermute :: Unbox a => Vector a -> Vector Int -> Vector a
{-# INLINE backpermute #-}
backpermute = G.backpermute
-- | Same as 'backpermute' but without bounds checking.
unsafeBackpermute :: Unbox a => Vector a -> Vector Int -> Vector a
{-# INLINE unsafeBackpermute #-}
unsafeBackpermute = G.unsafeBackpermute
-- Safe destructive updates
-- ------------------------
-- | Apply a destructive operation to a vector. The operation will be
-- performed in place if it is safe to do so and will modify a copy of the
-- vector otherwise.
--
-- @
-- modify (\\v -> write v 0 \'x\') ('replicate' 3 \'a\') = \<\'x\',\'a\',\'a\'\>
-- @
modify :: Unbox a => (forall s. MVector s a -> ST s ()) -> Vector a -> Vector a
{-# INLINE modify #-}
modify p = G.modify p
-- Indexing
-- --------
-- | /O(n)/ Pair each element in a vector with its index
indexed :: Unbox a => Vector a -> Vector (Int,a)
{-# INLINE indexed #-}
indexed = G.indexed
-- Mapping
-- -------
-- | /O(n)/ Map a function over a vector
map :: (Unbox a, Unbox b) => (a -> b) -> Vector a -> Vector b
{-# INLINE map #-}
map = G.map
-- | /O(n)/ Apply a function to every element of a vector and its index
imap :: (Unbox a, Unbox b) => (Int -> a -> b) -> Vector a -> Vector b
{-# INLINE imap #-}
imap = G.imap
-- | Map a function over a vector and concatenate the results.
concatMap :: (Unbox a, Unbox b) => (a -> Vector b) -> Vector a -> Vector b
{-# INLINE concatMap #-}
concatMap = G.concatMap
-- Monadic mapping
-- ---------------
-- | /O(n)/ Apply the monadic action to all elements of the vector, yielding a
-- vector of results
mapM :: (Monad m, Unbox a, Unbox b) => (a -> m b) -> Vector a -> m (Vector b)
{-# INLINE mapM #-}
mapM = G.mapM
-- | /O(n)/ Apply the monadic action to every element of a vector and its
-- index, yielding a vector of results
imapM :: (Monad m, Unbox a, Unbox b)
=> (Int -> a -> m b) -> Vector a -> m (Vector b)
{-# INLINE imapM #-}
imapM = G.imapM
-- | /O(n)/ Apply the monadic action to all elements of a vector and ignore the
-- results
mapM_ :: (Monad m, Unbox a) => (a -> m b) -> Vector a -> m ()
{-# INLINE mapM_ #-}
mapM_ = G.mapM_
-- | /O(n)/ Apply the monadic action to every element of a vector and its
-- index, ignoring the results
imapM_ :: (Monad m, Unbox a) => (Int -> a -> m b) -> Vector a -> m ()
{-# INLINE imapM_ #-}
imapM_ = G.imapM_
-- | /O(n)/ Apply the monadic action to all elements of the vector, yielding a
-- vector of results. Equvalent to @flip 'mapM'@.
forM :: (Monad m, Unbox a, Unbox b) => Vector a -> (a -> m b) -> m (Vector b)
{-# INLINE forM #-}
forM = G.forM
-- | /O(n)/ Apply the monadic action to all elements of a vector and ignore the
-- results. Equivalent to @flip 'mapM_'@.
forM_ :: (Monad m, Unbox a) => Vector a -> (a -> m b) -> m ()
{-# INLINE forM_ #-}
forM_ = G.forM_
-- Zipping
-- -------
-- | /O(min(m,n))/ Zip two vectors with the given function.
zipWith :: (Unbox a, Unbox b, Unbox c)
=> (a -> b -> c) -> Vector a -> Vector b -> Vector c
{-# INLINE zipWith #-}
zipWith = G.zipWith
-- | Zip three vectors with the given function.
zipWith3 :: (Unbox a, Unbox b, Unbox c, Unbox d)
=> (a -> b -> c -> d) -> Vector a -> Vector b -> Vector c -> Vector d
{-# INLINE zipWith3 #-}
zipWith3 = G.zipWith3
zipWith4 :: (Unbox a, Unbox b, Unbox c, Unbox d, Unbox e)
=> (a -> b -> c -> d -> e)
-> Vector a -> Vector b -> Vector c -> Vector d -> Vector e
{-# INLINE zipWith4 #-}
zipWith4 = G.zipWith4
zipWith5 :: (Unbox a, Unbox b, Unbox c, Unbox d, Unbox e, Unbox f)
=> (a -> b -> c -> d -> e -> f)
-> Vector a -> Vector b -> Vector c -> Vector d -> Vector e
-> Vector f
{-# INLINE zipWith5 #-}
zipWith5 = G.zipWith5
zipWith6 :: (Unbox a, Unbox b, Unbox c, Unbox d, Unbox e, Unbox f, Unbox g)
=> (a -> b -> c -> d -> e -> f -> g)
-> Vector a -> Vector b -> Vector c -> Vector d -> Vector e
-> Vector f -> Vector g
{-# INLINE zipWith6 #-}
zipWith6 = G.zipWith6
-- | /O(min(m,n))/ Zip two vectors with a function that also takes the
-- elements' indices.
izipWith :: (Unbox a, Unbox b, Unbox c)
=> (Int -> a -> b -> c) -> Vector a -> Vector b -> Vector c
{-# INLINE izipWith #-}
izipWith = G.izipWith
-- | Zip three vectors and their indices with the given function.
izipWith3 :: (Unbox a, Unbox b, Unbox c, Unbox d)
=> (Int -> a -> b -> c -> d)
-> Vector a -> Vector b -> Vector c -> Vector d
{-# INLINE izipWith3 #-}
izipWith3 = G.izipWith3
izipWith4 :: (Unbox a, Unbox b, Unbox c, Unbox d, Unbox e)
=> (Int -> a -> b -> c -> d -> e)
-> Vector a -> Vector b -> Vector c -> Vector d -> Vector e
{-# INLINE izipWith4 #-}
izipWith4 = G.izipWith4
izipWith5 :: (Unbox a, Unbox b, Unbox c, Unbox d, Unbox e, Unbox f)
=> (Int -> a -> b -> c -> d -> e -> f)
-> Vector a -> Vector b -> Vector c -> Vector d -> Vector e
-> Vector f
{-# INLINE izipWith5 #-}
izipWith5 = G.izipWith5
izipWith6 :: (Unbox a, Unbox b, Unbox c, Unbox d, Unbox e, Unbox f, Unbox g)
=> (Int -> a -> b -> c -> d -> e -> f -> g)
-> Vector a -> Vector b -> Vector c -> Vector d -> Vector e
-> Vector f -> Vector g
{-# INLINE izipWith6 #-}
izipWith6 = G.izipWith6
-- Monadic zipping
-- ---------------
-- | /O(min(m,n))/ Zip the two vectors with the monadic action and yield a
-- vector of results
zipWithM :: (Monad m, Unbox a, Unbox b, Unbox c)
=> (a -> b -> m c) -> Vector a -> Vector b -> m (Vector c)
{-# INLINE zipWithM #-}
zipWithM = G.zipWithM
-- | /O(min(m,n))/ Zip the two vectors with a monadic action that also takes
-- the element index and yield a vector of results
izipWithM :: (Monad m, Unbox a, Unbox b, Unbox c)
=> (Int -> a -> b -> m c) -> Vector a -> Vector b -> m (Vector c)
{-# INLINE izipWithM #-}
izipWithM = G.izipWithM
-- | /O(min(m,n))/ Zip the two vectors with the monadic action and ignore the
-- results
zipWithM_ :: (Monad m, Unbox a, Unbox b)
=> (a -> b -> m c) -> Vector a -> Vector b -> m ()
{-# INLINE zipWithM_ #-}
zipWithM_ = G.zipWithM_
-- | /O(min(m,n))/ Zip the two vectors with a monadic action that also takes
-- the element index and ignore the results
izipWithM_ :: (Monad m, Unbox a, Unbox b)
=> (Int -> a -> b -> m c) -> Vector a -> Vector b -> m ()
{-# INLINE izipWithM_ #-}
izipWithM_ = G.izipWithM_
-- Filtering
-- ---------
-- | /O(n)/ Drop elements that do not satisfy the predicate
filter :: Unbox a => (a -> Bool) -> Vector a -> Vector a
{-# INLINE filter #-}
filter = G.filter
-- | /O(n)/ Drop elements that do not satisfy the predicate which is applied to
-- values and their indices
ifilter :: Unbox a => (Int -> a -> Bool) -> Vector a -> Vector a
{-# INLINE ifilter #-}
ifilter = G.ifilter
-- | /O(n)/ Drop elements that do not satisfy the monadic predicate
filterM :: (Monad m, Unbox a) => (a -> m Bool) -> Vector a -> m (Vector a)
{-# INLINE filterM #-}
filterM = G.filterM
-- | /O(n)/ Yield the longest prefix of elements satisfying the predicate
-- without copying.
takeWhile :: Unbox a => (a -> Bool) -> Vector a -> Vector a
{-# INLINE takeWhile #-}
takeWhile = G.takeWhile
-- | /O(n)/ Drop the longest prefix of elements that satisfy the predicate
-- without copying.
dropWhile :: Unbox a => (a -> Bool) -> Vector a -> Vector a
{-# INLINE dropWhile #-}
dropWhile = G.dropWhile
-- Parititioning
-- -------------
-- | /O(n)/ Split the vector in two parts, the first one containing those
-- elements that satisfy the predicate and the second one those that don't. The
-- relative order of the elements is preserved at the cost of a sometimes
-- reduced performance compared to 'unstablePartition'.
partition :: Unbox a => (a -> Bool) -> Vector a -> (Vector a, Vector a)
{-# INLINE partition #-}
partition = G.partition
-- | /O(n)/ Split the vector in two parts, the first one containing those
-- elements that satisfy the predicate and the second one those that don't.
-- The order of the elements is not preserved but the operation is often
-- faster than 'partition'.
unstablePartition :: Unbox a => (a -> Bool) -> Vector a -> (Vector a, Vector a)
{-# INLINE unstablePartition #-}
unstablePartition = G.unstablePartition
-- | /O(n)/ Split the vector into the longest prefix of elements that satisfy
-- the predicate and the rest without copying.
span :: Unbox a => (a -> Bool) -> Vector a -> (Vector a, Vector a)
{-# INLINE span #-}
span = G.span
-- | /O(n)/ Split the vector into the longest prefix of elements that do not
-- satisfy the predicate and the rest without copying.
break :: Unbox a => (a -> Bool) -> Vector a -> (Vector a, Vector a)
{-# INLINE break #-}
break = G.break
-- Searching
-- ---------
infix 4 `elem`
-- | /O(n)/ Check if the vector contains an element
elem :: (Unbox a, Eq a) => a -> Vector a -> Bool
{-# INLINE elem #-}
elem = G.elem
infix 4 `notElem`
-- | /O(n)/ Check if the vector does not contain an element (inverse of 'elem')
notElem :: (Unbox a, Eq a) => a -> Vector a -> Bool
{-# INLINE notElem #-}
notElem = G.notElem
-- | /O(n)/ Yield 'Just' the first element matching the predicate or 'Nothing'
-- if no such element exists.
find :: Unbox a => (a -> Bool) -> Vector a -> Maybe a
{-# INLINE find #-}
find = G.find
-- | /O(n)/ Yield 'Just' the index of the first element matching the predicate
-- or 'Nothing' if no such element exists.
findIndex :: Unbox a => (a -> Bool) -> Vector a -> Maybe Int
{-# INLINE findIndex #-}
findIndex = G.findIndex
-- | /O(n)/ Yield the indices of elements satisfying the predicate in ascending
-- order.
findIndices :: Unbox a => (a -> Bool) -> Vector a -> Vector Int
{-# INLINE findIndices #-}
findIndices = G.findIndices
-- | /O(n)/ Yield 'Just' the index of the first occurence of the given element or
-- 'Nothing' if the vector does not contain the element. This is a specialised
-- version of 'findIndex'.
elemIndex :: (Unbox a, Eq a) => a -> Vector a -> Maybe Int
{-# INLINE elemIndex #-}
elemIndex = G.elemIndex
-- | /O(n)/ Yield the indices of all occurences of the given element in
-- ascending order. This is a specialised version of 'findIndices'.
elemIndices :: (Unbox a, Eq a) => a -> Vector a -> Vector Int
{-# INLINE elemIndices #-}
elemIndices = G.elemIndices
-- Folding
-- -------
-- | /O(n)/ Left fold
foldl :: Unbox b => (a -> b -> a) -> a -> Vector b -> a
{-# INLINE foldl #-}
foldl = G.foldl
-- | /O(n)/ Left fold on non-empty vectors
foldl1 :: Unbox a => (a -> a -> a) -> Vector a -> a
{-# INLINE foldl1 #-}
foldl1 = G.foldl1
-- | /O(n)/ Left fold with strict accumulator
foldl' :: Unbox b => (a -> b -> a) -> a -> Vector b -> a
{-# INLINE foldl' #-}
foldl' = G.foldl'
-- | /O(n)/ Left fold on non-empty vectors with strict accumulator
foldl1' :: Unbox a => (a -> a -> a) -> Vector a -> a
{-# INLINE foldl1' #-}
foldl1' = G.foldl1'
-- | /O(n)/ Right fold
foldr :: Unbox a => (a -> b -> b) -> b -> Vector a -> b
{-# INLINE foldr #-}
foldr = G.foldr
-- | /O(n)/ Right fold on non-empty vectors
foldr1 :: Unbox a => (a -> a -> a) -> Vector a -> a
{-# INLINE foldr1 #-}
foldr1 = G.foldr1
-- | /O(n)/ Right fold with a strict accumulator
foldr' :: Unbox a => (a -> b -> b) -> b -> Vector a -> b
{-# INLINE foldr' #-}
foldr' = G.foldr'
-- | /O(n)/ Right fold on non-empty vectors with strict accumulator
foldr1' :: Unbox a => (a -> a -> a) -> Vector a -> a
{-# INLINE foldr1' #-}
foldr1' = G.foldr1'
-- | /O(n)/ Left fold (function applied to each element and its index)
ifoldl :: Unbox b => (a -> Int -> b -> a) -> a -> Vector b -> a
{-# INLINE ifoldl #-}
ifoldl = G.ifoldl
-- | /O(n)/ Left fold with strict accumulator (function applied to each element
-- and its index)
ifoldl' :: Unbox b => (a -> Int -> b -> a) -> a -> Vector b -> a
{-# INLINE ifoldl' #-}
ifoldl' = G.ifoldl'
-- | /O(n)/ Right fold (function applied to each element and its index)
ifoldr :: Unbox a => (Int -> a -> b -> b) -> b -> Vector a -> b
{-# INLINE ifoldr #-}
ifoldr = G.ifoldr
-- | /O(n)/ Right fold with strict accumulator (function applied to each
-- element and its index)
ifoldr' :: Unbox a => (Int -> a -> b -> b) -> b -> Vector a -> b
{-# INLINE ifoldr' #-}
ifoldr' = G.ifoldr'
-- Specialised folds
-- -----------------
-- | /O(n)/ Check if all elements satisfy the predicate.
all :: Unbox a => (a -> Bool) -> Vector a -> Bool
{-# INLINE all #-}
all = G.all
-- | /O(n)/ Check if any element satisfies the predicate.
any :: Unbox a => (a -> Bool) -> Vector a -> Bool
{-# INLINE any #-}
any = G.any
-- | /O(n)/ Check if all elements are 'True'
and :: Vector Bool -> Bool
{-# INLINE and #-}
and = G.and
-- | /O(n)/ Check if any element is 'True'
or :: Vector Bool -> Bool
{-# INLINE or #-}
or = G.or
-- | /O(n)/ Compute the sum of the elements
sum :: (Unbox a, Num a) => Vector a -> a
{-# INLINE sum #-}
sum = G.sum
-- | /O(n)/ Compute the produce of the elements
product :: (Unbox a, Num a) => Vector a -> a
{-# INLINE product #-}
product = G.product
-- | /O(n)/ Yield the maximum element of the vector. The vector may not be
-- empty.
maximum :: (Unbox a, Ord a) => Vector a -> a
{-# INLINE maximum #-}
maximum = G.maximum
-- | /O(n)/ Yield the maximum element of the vector according to the given
-- comparison function. The vector may not be empty.
maximumBy :: Unbox a => (a -> a -> Ordering) -> Vector a -> a
{-# INLINE maximumBy #-}
maximumBy = G.maximumBy
-- | /O(n)/ Yield the minimum element of the vector. The vector may not be
-- empty.
minimum :: (Unbox a, Ord a) => Vector a -> a
{-# INLINE minimum #-}
minimum = G.minimum
-- | /O(n)/ Yield the minimum element of the vector according to the given
-- comparison function. The vector may not be empty.
minimumBy :: Unbox a => (a -> a -> Ordering) -> Vector a -> a
{-# INLINE minimumBy #-}
minimumBy = G.minimumBy
-- | /O(n)/ Yield the index of the maximum element of the vector. The vector
-- may not be empty.
maxIndex :: (Unbox a, Ord a) => Vector a -> Int
{-# INLINE maxIndex #-}
maxIndex = G.maxIndex
-- | /O(n)/ Yield the index of the maximum element of the vector according to
-- the given comparison function. The vector may not be empty.
maxIndexBy :: Unbox a => (a -> a -> Ordering) -> Vector a -> Int
{-# INLINE maxIndexBy #-}
maxIndexBy = G.maxIndexBy
-- | /O(n)/ Yield the index of the minimum element of the vector. The vector
-- may not be empty.
minIndex :: (Unbox a, Ord a) => Vector a -> Int
{-# INLINE minIndex #-}
minIndex = G.minIndex
-- | /O(n)/ Yield the index of the minimum element of the vector according to
-- the given comparison function. The vector may not be empty.
minIndexBy :: Unbox a => (a -> a -> Ordering) -> Vector a -> Int
{-# INLINE minIndexBy #-}
minIndexBy = G.minIndexBy
-- Monadic folds
-- -------------
-- | /O(n)/ Monadic fold
foldM :: (Monad m, Unbox b) => (a -> b -> m a) -> a -> Vector b -> m a
{-# INLINE foldM #-}
foldM = G.foldM
-- | /O(n)/ Monadic fold (action applied to each element and its index)
ifoldM :: (Monad m, Unbox b) => (a -> Int -> b -> m a) -> a -> Vector b -> m a
{-# INLINE ifoldM #-}
ifoldM = G.ifoldM
-- | /O(n)/ Monadic fold over non-empty vectors
fold1M :: (Monad m, Unbox a) => (a -> a -> m a) -> Vector a -> m a
{-# INLINE fold1M #-}
fold1M = G.fold1M
-- | /O(n)/ Monadic fold with strict accumulator
foldM' :: (Monad m, Unbox b) => (a -> b -> m a) -> a -> Vector b -> m a
{-# INLINE foldM' #-}
foldM' = G.foldM'
-- | /O(n)/ Monadic fold with strict accumulator (action applied to each
-- element and its index)
ifoldM' :: (Monad m, Unbox b) => (a -> Int -> b -> m a) -> a -> Vector b -> m a
{-# INLINE ifoldM' #-}
ifoldM' = G.ifoldM'
-- | /O(n)/ Monadic fold over non-empty vectors with strict accumulator
fold1M' :: (Monad m, Unbox a) => (a -> a -> m a) -> Vector a -> m a
{-# INLINE fold1M' #-}
fold1M' = G.fold1M'
-- | /O(n)/ Monadic fold that discards the result
foldM_ :: (Monad m, Unbox b) => (a -> b -> m a) -> a -> Vector b -> m ()
{-# INLINE foldM_ #-}
foldM_ = G.foldM_
-- | /O(n)/ Monadic fold that discards the result (action applied to each
-- element and its index)
ifoldM_ :: (Monad m, Unbox b) => (a -> Int -> b -> m a) -> a -> Vector b -> m ()
{-# INLINE ifoldM_ #-}
ifoldM_ = G.ifoldM_
-- | /O(n)/ Monadic fold over non-empty vectors that discards the result
fold1M_ :: (Monad m, Unbox a) => (a -> a -> m a) -> Vector a -> m ()
{-# INLINE fold1M_ #-}
fold1M_ = G.fold1M_
-- | /O(n)/ Monadic fold with strict accumulator that discards the result
foldM'_ :: (Monad m, Unbox b) => (a -> b -> m a) -> a -> Vector b -> m ()
{-# INLINE foldM'_ #-}
foldM'_ = G.foldM'_
-- | /O(n)/ Monadic fold with strict accumulator that discards the result
-- (action applied to each element and its index)
ifoldM'_ :: (Monad m, Unbox b)
=> (a -> Int -> b -> m a) -> a -> Vector b -> m ()
{-# INLINE ifoldM'_ #-}
ifoldM'_ = G.ifoldM'_
-- | /O(n)/ Monadic fold over non-empty vectors with strict accumulator
-- that discards the result
fold1M'_ :: (Monad m, Unbox a) => (a -> a -> m a) -> Vector a -> m ()
{-# INLINE fold1M'_ #-}
fold1M'_ = G.fold1M'_
-- Prefix sums (scans)
-- -------------------
-- | /O(n)/ Prescan
--
-- @
-- prescanl f z = 'init' . 'scanl' f z
-- @
--
-- Example: @prescanl (+) 0 \<1,2,3,4\> = \<0,1,3,6\>@
--
prescanl :: (Unbox a, Unbox b) => (a -> b -> a) -> a -> Vector b -> Vector a
{-# INLINE prescanl #-}
prescanl = G.prescanl
-- | /O(n)/ Prescan with strict accumulator
prescanl' :: (Unbox a, Unbox b) => (a -> b -> a) -> a -> Vector b -> Vector a
{-# INLINE prescanl' #-}
prescanl' = G.prescanl'
-- | /O(n)/ Scan
--
-- @
-- postscanl f z = 'tail' . 'scanl' f z
-- @
--
-- Example: @postscanl (+) 0 \<1,2,3,4\> = \<1,3,6,10\>@
--
postscanl :: (Unbox a, Unbox b) => (a -> b -> a) -> a -> Vector b -> Vector a
{-# INLINE postscanl #-}
postscanl = G.postscanl
-- | /O(n)/ Scan with strict accumulator
postscanl' :: (Unbox a, Unbox b) => (a -> b -> a) -> a -> Vector b -> Vector a
{-# INLINE postscanl' #-}
postscanl' = G.postscanl'
-- | /O(n)/ Haskell-style scan
--
-- > scanl f z <x1,...,xn> = <y1,...,y(n+1)>
-- > where y1 = z
-- > yi = f y(i-1) x(i-1)
--
-- Example: @scanl (+) 0 \<1,2,3,4\> = \<0,1,3,6,10\>@
--
scanl :: (Unbox a, Unbox b) => (a -> b -> a) -> a -> Vector b -> Vector a
{-# INLINE scanl #-}
scanl = G.scanl
-- | /O(n)/ Haskell-style scan with strict accumulator
scanl' :: (Unbox a, Unbox b) => (a -> b -> a) -> a -> Vector b -> Vector a
{-# INLINE scanl' #-}
scanl' = G.scanl'
-- | /O(n)/ Scan over a non-empty vector
--
-- > scanl f <x1,...,xn> = <y1,...,yn>
-- > where y1 = x1
-- > yi = f y(i-1) xi
--
scanl1 :: Unbox a => (a -> a -> a) -> Vector a -> Vector a
{-# INLINE scanl1 #-}
scanl1 = G.scanl1
-- | /O(n)/ Scan over a non-empty vector with a strict accumulator
scanl1' :: Unbox a => (a -> a -> a) -> Vector a -> Vector a
{-# INLINE scanl1' #-}
scanl1' = G.scanl1'
-- | /O(n)/ Right-to-left prescan
--
-- @
-- prescanr f z = 'reverse' . 'prescanl' (flip f) z . 'reverse'
-- @
--
prescanr :: (Unbox a, Unbox b) => (a -> b -> b) -> b -> Vector a -> Vector b
{-# INLINE prescanr #-}
prescanr = G.prescanr
-- | /O(n)/ Right-to-left prescan with strict accumulator
prescanr' :: (Unbox a, Unbox b) => (a -> b -> b) -> b -> Vector a -> Vector b
{-# INLINE prescanr' #-}
prescanr' = G.prescanr'
-- | /O(n)/ Right-to-left scan
postscanr :: (Unbox a, Unbox b) => (a -> b -> b) -> b -> Vector a -> Vector b
{-# INLINE postscanr #-}
postscanr = G.postscanr
-- | /O(n)/ Right-to-left scan with strict accumulator
postscanr' :: (Unbox a, Unbox b) => (a -> b -> b) -> b -> Vector a -> Vector b
{-# INLINE postscanr' #-}
postscanr' = G.postscanr'
-- | /O(n)/ Right-to-left Haskell-style scan
scanr :: (Unbox a, Unbox b) => (a -> b -> b) -> b -> Vector a -> Vector b
{-# INLINE scanr #-}
scanr = G.scanr
-- | /O(n)/ Right-to-left Haskell-style scan with strict accumulator
scanr' :: (Unbox a, Unbox b) => (a -> b -> b) -> b -> Vector a -> Vector b
{-# INLINE scanr' #-}
scanr' = G.scanr'
-- | /O(n)/ Right-to-left scan over a non-empty vector
scanr1 :: Unbox a => (a -> a -> a) -> Vector a -> Vector a
{-# INLINE scanr1 #-}
scanr1 = G.scanr1
-- | /O(n)/ Right-to-left scan over a non-empty vector with a strict
-- accumulator
scanr1' :: Unbox a => (a -> a -> a) -> Vector a -> Vector a
{-# INLINE scanr1' #-}
scanr1' = G.scanr1'
-- Conversions - Lists
-- ------------------------
-- | /O(n)/ Convert a vector to a list
toList :: Unbox a => Vector a -> [a]
{-# INLINE toList #-}
toList = G.toList
-- | /O(n)/ Convert a list to a vector
fromList :: Unbox a => [a] -> Vector a
{-# INLINE fromList #-}
fromList = G.fromList
-- | /O(n)/ Convert the first @n@ elements of a list to a vector
--
-- @
-- fromListN n xs = 'fromList' ('take' n xs)
-- @
fromListN :: Unbox a => Int -> [a] -> Vector a
{-# INLINE fromListN #-}
fromListN = G.fromListN
-- Conversions - Mutable vectors
-- -----------------------------
-- | /O(1)/ Unsafe convert a mutable vector to an immutable one without
-- copying. The mutable vector may not be used after this operation.
unsafeFreeze :: (Unbox a, PrimMonad m) => MVector (PrimState m) a -> m (Vector a)
{-# INLINE unsafeFreeze #-}
unsafeFreeze = G.unsafeFreeze
-- | /O(1)/ Unsafely convert an immutable vector to a mutable one without
-- copying. The immutable vector may not be used after this operation.
unsafeThaw :: (Unbox a, PrimMonad m) => Vector a -> m (MVector (PrimState m) a)
{-# INLINE unsafeThaw #-}
unsafeThaw = G.unsafeThaw
-- | /O(n)/ Yield a mutable copy of the immutable vector.
thaw :: (Unbox a, PrimMonad m) => Vector a -> m (MVector (PrimState m) a)
{-# INLINE thaw #-}
thaw = G.thaw
-- | /O(n)/ Yield an immutable copy of the mutable vector.
freeze :: (Unbox a, PrimMonad m) => MVector (PrimState m) a -> m (Vector a)
{-# INLINE freeze #-}
freeze = G.freeze
-- | /O(n)/ Copy an immutable vector into a mutable one. The two vectors must
-- have the same length. This is not checked.
unsafeCopy
:: (Unbox a, PrimMonad m) => MVector (PrimState m) a -> Vector a -> m ()
{-# INLINE unsafeCopy #-}
unsafeCopy = G.unsafeCopy
-- | /O(n)/ Copy an immutable vector into a mutable one. The two vectors must
-- have the same length.
copy :: (Unbox a, PrimMonad m) => MVector (PrimState m) a -> Vector a -> m ()
{-# INLINE copy #-}
copy = G.copy
#define DEFINE_IMMUTABLE
#include "unbox-tuple-instances"
| thomie/vector | Data/Vector/Unboxed.hs | bsd-3-clause | 45,001 | 0 | 14 | 9,783 | 10,049 | 5,586 | 4,463 | -1 | -1 |
{-# LANGUAGE ScopedTypeVariables, OverloadedStrings #-}
import System.Environment
import Network
import Client
main :: IO ()
main = do
(pn :: Int) : _ <- mapM readIO =<< getArgs
sv <- connectTo "localhost" (PortNumber $ fromIntegral pn)
httpPost sv "I am client.\n" >>= print
| YoshikuniJujo/forest | subprojects/http-analysis/get/post.hs | bsd-3-clause | 282 | 0 | 11 | 49 | 87 | 43 | 44 | 9 | 1 |
{- |
Module : SAWScript.Crucible.JVM.MethodSpecIR
Description : Provides type-checked representation for Crucible/JVM function
specifications and functions for creating it from a SAWscript AST.
Maintainer : atomb
Stability : provisional
-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DoAndIfThenElse #-}
{-# LANGUAGE EmptyDataDecls #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE ImplicitParams #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE PatternGuards #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE Rank2Types #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE ViewPatterns #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module SAWScript.Crucible.JVM.MethodSpecIR where
import Control.Lens
import qualified Prettyprinter as PPL
-- what4
import What4.ProgramLoc (ProgramLoc)
import qualified Lang.Crucible.FunctionHandle as Crucible (HandleAllocator)
-- crucible-jvm
import qualified Lang.Crucible.JVM as CJ
import qualified Lang.JVM.Codebase as CB
-- jvm-parser
import qualified Language.JVM.Parser as J
-- cryptol-saw-core
import Verifier.SAW.TypedTerm (TypedTerm)
import SAWScript.Crucible.Common
import qualified SAWScript.Crucible.Common.MethodSpec as MS
import qualified SAWScript.Crucible.Common.Setup.Type as Setup
--------------------------------------------------------------------------------
-- ** Language features
type instance MS.HasSetupNull CJ.JVM = 'True
type instance MS.HasSetupGlobal CJ.JVM = 'False
type instance MS.HasSetupStruct CJ.JVM = 'False
type instance MS.HasSetupArray CJ.JVM = 'False
type instance MS.HasSetupElem CJ.JVM = 'False
type instance MS.HasSetupField CJ.JVM = 'False
type instance MS.HasSetupCast CJ.JVM = 'False
type instance MS.HasSetupUnion CJ.JVM = 'False
type instance MS.HasSetupGlobalInitializer CJ.JVM = 'False
type instance MS.HasGhostState CJ.JVM = 'False
type JIdent = String -- FIXME(huffman): what to put here?
type instance MS.TypeName CJ.JVM = JIdent
type instance MS.ExtType CJ.JVM = J.Type
type instance MS.CastType CJ.JVM = ()
type instance MS.ResolvedState CJ.JVM = ()
--------------------------------------------------------------------------------
-- *** JVMMethodId
data JVMMethodId =
JVMMethodId
{ _jvmMethodKey :: J.MethodKey
, _jvmClassName :: J.ClassName
}
deriving (Eq, Ord, Show)
makeLenses ''JVMMethodId
jvmMethodName :: Getter JVMMethodId String
jvmMethodName = jvmMethodKey . to J.methodKeyName
csMethodKey :: Lens' (MS.CrucibleMethodSpecIR CJ.JVM) J.MethodKey
csMethodKey = MS.csMethod . jvmMethodKey
csMethodName :: Getter (MS.CrucibleMethodSpecIR CJ.JVM) String
csMethodName = MS.csMethod . jvmMethodName
-- TODO: avoid intermediate 'String' values
instance PPL.Pretty JVMMethodId where
pretty (JVMMethodId methKey className) =
PPL.pretty (concat [J.unClassName className ,".", J.methodKeyName methKey])
type instance MS.MethodId CJ.JVM = JVMMethodId
--------------------------------------------------------------------------------
-- *** Allocation
data Allocation
= AllocObject J.ClassName
| AllocArray Int J.Type
deriving (Eq, Ord, Show)
allocationType :: Allocation -> J.Type
allocationType alloc =
case alloc of
AllocObject cname -> J.ClassType cname
AllocArray _len ty -> J.ArrayType ty
-- TODO: We should probably use a more structured datatype (record), like in LLVM
type instance MS.AllocSpec CJ.JVM = (ProgramLoc, Allocation)
--------------------------------------------------------------------------------
-- *** PointsTo
type instance MS.PointsTo CJ.JVM = JVMPointsTo
data JVMPointsTo
= JVMPointsToField ProgramLoc MS.AllocIndex J.FieldId (Maybe (MS.SetupValue CJ.JVM))
| JVMPointsToStatic ProgramLoc J.FieldId (Maybe (MS.SetupValue CJ.JVM))
| JVMPointsToElem ProgramLoc MS.AllocIndex Int (Maybe (MS.SetupValue CJ.JVM))
| JVMPointsToArray ProgramLoc MS.AllocIndex (Maybe TypedTerm)
overlapPointsTo :: JVMPointsTo -> JVMPointsTo -> Bool
overlapPointsTo =
\case
JVMPointsToField _ p1 f1 _ ->
\case
JVMPointsToField _ p2 f2 _ -> p1 == p2 && f1 == f2
_ -> False
JVMPointsToStatic _ f1 _ ->
\case
JVMPointsToStatic _ f2 _ -> f1 == f2
_ -> False
JVMPointsToElem _ p1 i1 _ ->
\case
JVMPointsToElem _ p2 i2 _ -> p1 == p2 && i1 == i2
JVMPointsToArray _ p2 _ -> p1 == p2
_ -> False
JVMPointsToArray _ p1 _ ->
\case
JVMPointsToElem _ p2 _ _ -> p1 == p2
JVMPointsToArray _ p2 _ -> p1 == p2
_ -> False
ppPointsTo :: JVMPointsTo -> PPL.Doc ann
ppPointsTo =
\case
JVMPointsToField _loc ptr fid val ->
MS.ppAllocIndex ptr <> PPL.pretty "." <> PPL.pretty (J.fieldIdName fid)
PPL.<+> PPL.pretty "points to"
PPL.<+> opt MS.ppSetupValue val
JVMPointsToStatic _loc fid val ->
PPL.pretty (J.unClassName (J.fieldIdClass fid)) <> PPL.pretty "." <> PPL.pretty (J.fieldIdName fid)
PPL.<+> PPL.pretty "points to"
PPL.<+> opt MS.ppSetupValue val
JVMPointsToElem _loc ptr idx val ->
MS.ppAllocIndex ptr <> PPL.pretty "[" <> PPL.pretty idx <> PPL.pretty "]"
PPL.<+> PPL.pretty "points to"
PPL.<+> opt MS.ppSetupValue val
JVMPointsToArray _loc ptr val ->
MS.ppAllocIndex ptr
PPL.<+> PPL.pretty "points to"
PPL.<+> opt MS.ppTypedTerm val
where
opt = maybe (PPL.pretty "<unspecified>")
instance PPL.Pretty JVMPointsTo where
pretty = ppPointsTo
--------------------------------------------------------------------------------
-- *** JVMCrucibleContext
type instance MS.Codebase CJ.JVM = CB.Codebase
data JVMCrucibleContext =
JVMCrucibleContext
{ _jccJVMClass :: J.Class
, _jccCodebase :: CB.Codebase
, _jccJVMContext :: CJ.JVMContext
, _jccBackend :: SomeOnlineBackend
, _jccHandleAllocator :: Crucible.HandleAllocator
}
makeLenses ''JVMCrucibleContext
type instance MS.CrucibleContext CJ.JVM = JVMCrucibleContext
jccWithBackend ::
JVMCrucibleContext ->
(forall solver. OnlineSolver solver => Backend solver -> a) ->
a
jccWithBackend cc k =
case cc^.jccBackend of SomeOnlineBackend bak -> k bak
jccSym :: Getter JVMCrucibleContext Sym
jccSym = to (\jcc -> jccWithBackend jcc backendGetSym)
--------------------------------------------------------------------------------
initialDefCrucibleMethodSpecIR ::
CB.Codebase ->
J.ClassName ->
J.Method ->
ProgramLoc ->
MS.CrucibleMethodSpecIR CJ.JVM
initialDefCrucibleMethodSpecIR cb cname method loc =
let methId = JVMMethodId (J.methodKey method) cname
retTy = J.methodReturnType method
argTys = thisType ++ J.methodParameterTypes method
in MS.makeCrucibleMethodSpecIR methId argTys retTy loc cb
where thisType = if J.methodIsStatic method then [] else [J.ClassType cname]
initialCrucibleSetupState ::
JVMCrucibleContext ->
(J.Class, J.Method) ->
ProgramLoc ->
Setup.CrucibleSetupState CJ.JVM
initialCrucibleSetupState cc (cls, method) loc =
Setup.makeCrucibleSetupState () cc $
initialDefCrucibleMethodSpecIR
(cc ^. jccCodebase)
(J.className cls)
method
loc
--------------------------------------------------------------------------------
{-
-- | Represent `CrucibleMethodSpecIR` as a function term in SAW-Core.
methodSpecToTerm :: SharedContext -> CrucibleMethodSpecIR -> MaybeT IO Term
methodSpecToTerm sc spec =
-- 1. fill in the post-state user variable holes with final
-- symbolic state
let _ppts = _csPointsTos $ _csPostState $ instantiateUserVars spec
-- 2. get the free variables in post points to's (note: these
-- should be contained in variables bound by pre-points-tos)
-- 3. abstract the free variables in each post-points-to
-- 4. put every abstracted post-points-to in a tuple
-- 5. Create struct type with fields being names of free variables
-- 6. Create a lambda term bound to a struct-typed variable that returns the tuple
in lift $ scLambda sc undefined undefined undefined
-- | Rewrite the `csPostPointsTos` to substitute the elements of the
-- final symbolic state for the fresh variables created by the user in
-- the post-state.
instantiateUserVars :: CrucibleMethodSpecIR -> CrucibleMethodSpecIR
instantiateUserVars _spec = undefined
-}
| GaloisInc/saw-script | src/SAWScript/Crucible/JVM/MethodSpecIR.hs | bsd-3-clause | 8,640 | 0 | 17 | 1,643 | 1,774 | 949 | 825 | 163 | 10 |
module Generics.Spine (
module Generics.Spine.Base
) where
import Generics.Spine.Base
| spl/spine | src/Generics/Spine.hs | bsd-3-clause | 90 | 0 | 5 | 13 | 21 | 14 | 7 | 3 | 0 |
-- |Tests execution of code generated by the contract compiler by running it
-- on Rhino.
--
-- The JsContracts library must be built and installed. Rhino's js.jar must
-- be in your CLASSPATH.
module Execution where
import Test.HUnit.Base
import Test.HUnit.Text
import System.Cmd
import System.Exit
import BrownPLT.JavaScript.Contracts.Compiler
import BrownPLT.JavaScript.Contracts.Template
import BrownPLT.JavaScript.Contracts.Parser
import BrownPLT.JavaScript.Parser (parseJavaScriptFromFile)
import BrownPLT.JavaScript.PrettyPrint (renderStatements)
expandTests :: String -> String
expandTests testSuite = renderTemplate
$ expandCall "testExn" expandTest
$ expandCall "test" expandTest (stmtTemplate testSuite) where
expandTest [try,expected] = [thunkExpr try, expected]
expandTest _ = error "expandTests: invalid number of arguments to test"
testExecution :: String -- module.js
-> String -- interactions.js
-> Assertion
testExecution implPath interactionsJs = do
impl <- parseJavaScriptFromFile implPath
iface <- parseInterface (implPath ++ "i")
impl' <- compile' impl iface
interactions <- readFile interactionsJs
let js = "window = {};\n" ++ (renderStatements [impl']) ++
expandTests interactions
code <- rawSystem "java" ["org.mozilla.javascript.tools.shell.Main","-e",js]
case code of
ExitSuccess -> return ()
ExitFailure _ -> do
putStrLn js
assertFailure "failed"
testCalls = TestLabel "test function calls" $ TestCase $ do
testExecution "moduleFunctions.js" "testCalls.js"
return ()
allTests = TestList
[ testCalls
]
main = return allTests
| brownplt/javascript-contracts | tests/Execution.hs | bsd-3-clause | 1,661 | 0 | 14 | 295 | 367 | 191 | 176 | 38 | 2 |
--------------------------------------------------------------------------------------
-- Testing
--------------------------------------------------------------------------------------
module Main where
import Types
import Game
import AI
import Control.Monad
import Control.Monad.Random
randIndex :: RandomGen r => Int -> Rand r Int
randIndex len = getRandomR (0, len - 1)
randMove :: RandomGen r => Game -> Rand r Move
randMove g = do
let ls = legalSquares g
n = length ls
idx <- randIndex n
return $ move (ls !! idx)
playGame :: RandomGen r => Int -> Rand r Game -> Rand r Game
playGame n g = do
g'@(Game _ b) <- g
if isOver b
then g
else if odd n
then playGame (n + 1) (return $ (nextMove 3 g' g'))
else do
m <- randMove g'
playGame (n + 1) (return $ m g')
main :: IO ()
main = do
gs <- forM ([1..10] :: [Int]) $ const (evalRandIO $ playGame 1 (return newGame))
hs <- forM ([1..10] :: [Int]) $ const (evalRandIO $ playGame 0 (return newGame))
let resultsB = map (findWinner . board) gs
blackWins = sum $ map (oneIfEq Black) resultsB
resultsW = map (findWinner . board) hs
whiteWins = sum $ map (oneIfEq White) resultsW
print $ "Computer playing black wins "
++ show blackWins
++ " out of 10 games against random play."
print $ "Computer playing white wins "
++ show whiteWins
++ " out of 10 games against random play."
| jeffreyrosenbluth/Othello | tests/Test.hs | bsd-3-clause | 1,438 | 0 | 14 | 346 | 519 | 260 | 259 | 38 | 3 |
module Sexy.Instances.Monad.List () where
import Sexy.Classes (Monad(..), Monoid(..))
import Sexy.Instances.Monoid.List ()
import Sexy.Instances.Applicative.List ()
instance Monad [] where
-- join :: [[a]] -> [a]
join = concat
| DanBurton/sexy | src/Sexy/Instances/Monad/List.hs | bsd-3-clause | 234 | 0 | 6 | 34 | 68 | 45 | 23 | 6 | 0 |
module Wikirick.Repository where
import Control.Exception hiding (Handler)
import Control.Monad.CatchIO
import qualified Data.ByteString as BS
import qualified Data.Text as T
import qualified Data.Time as Time
import Data.Typeable
import Snap
import Wikirick.Import
data EditLog = EditLog
{ _editDate :: Time.UTCTime
, _editComment :: T.Text
} deriving (Show, Eq)
makeLenses ''EditLog
data Article = Article
{ _articleTitle :: T.Text
, _articleSource :: T.Text
, _articleRevision :: Maybe Integer
, _editLog :: Maybe EditLog
} deriving (Show, Eq)
makeLenses ''Article
instance Default Article where
def = Article "" "" Nothing Nothing
data Repository = Repository
{ _fetchArticle :: MonadCatchIO m => T.Text -> m Article
, _fetchRevision :: MonadCatchIO m => T.Text -> Integer -> m Article
, _postArticle :: MonadCatchIO m => Article -> m ()
, _fetchAllArticleTitles :: MonadCatchIO m => m [T.Text]
}
fetchArticle :: (MonadState Repository m, MonadCatchIO m) => T.Text -> m Article
fetchArticle t = get >>= \self ->
_fetchArticle self t
fetchRevision :: (MonadState Repository m, MonadCatchIO m) => T.Text -> Integer -> m Article
fetchRevision t rev = get >>= \self ->
_fetchRevision self t rev
postArticle :: (MonadState Repository m, MonadCatchIO m) => Article -> m ()
postArticle a = get >>= \self ->
_postArticle self a
fetchAllArticleTitles :: (MonadState Repository m, MonadCatchIO m) => m [T.Text]
fetchAllArticleTitles = get >>= _fetchAllArticleTitles
data RepositoryException
= ArticleNotFound
| InvalidRevision
| RepositoryException BS.ByteString deriving
( Eq
, Show
, Typeable
)
instance Exception RepositoryException
| keitax/wikirick | src/Wikirick/Repository.hs | bsd-3-clause | 1,697 | 0 | 12 | 304 | 533 | 288 | 245 | -1 | -1 |
module FP.Prelude.Monads where
import FP.Prelude.Core
import FP.Prelude.Effects
import FP.Prelude.Constraints
import FP.Prelude.Morphism
import FP.Prelude.Lattice
-- E and I effects can be implemented by combining unit, discard and commute
-- All effects are implemented this way except for continuation effects
class FunctorUnit (t ∷ (★ → ★) → (★ → ★)) where funit ∷ (Functor m) ⇒ m ↝ t m
class FunctorDiscard (t ∷ (★ → ★) → (★ → ★)) where fdiscard ∷ (Functor m) ⇒ t (t m) ↝ t m
-- For commuting effects
class FunctorFunctor (t ∷ (★ → ★) → (★ → ★)) where fmap ∷ m ↝ n → t m ↝ t n
class FunctorIsoFunctor (t ∷ (★ → ★) → (★ → ★)) where fisomap ∷ (m ↝ n,n ↝ m) → (t m ↝ t n)
-- # ID
newtype ID a = ID { runID ∷ a }
deriving
( Eq,Ord
, POrd,Bot,Join,Meet,Top,JoinLattice,Monoid
)
instance Functor ID where
map ∷ (a → b) → ID a → ID b
map f = ID ∘ f ∘ runID
instance FunctorM ID where
mapM ∷ (Monad m) ⇒ (a → m b) → ID a → m (ID b)
mapM f = map ID ∘ f ∘ runID
instance Monad ID where
return ∷ a → ID a
return = ID
(≫=) ∷ ID a → (a → ID b) → ID b
aM ≫= k = k $ runID aM
instance Functorial Bot ID where functorial = W
instance Functorial Join ID where functorial = W
instance Functorial Meet ID where functorial = W
instance Functorial Top ID where functorial = W
instance Functorial JoinLattice ID where functorial = W
instance Functorial Monoid ID where functorial = W
-- # Failure
-- Base Effect
type Failure = FailureT ID
failure ∷ Maybe a → Failure a
failure = abortMaybe
runFailure ∷ Failure a → Maybe a
runFailure = runID ∘ runFailureT
-- Commuting with self
commuteFailure ∷ (Functor m) ⇒ FailureT (FailureT m) ↝ FailureT (FailureT m)
commuteFailure aMM = FailureT $ FailureT $ ff ^$ runFailureT $ runFailureT aMM
where
ff ∷ Maybe (Maybe a) → Maybe (Maybe a)
ff Nothing = Just Nothing
ff (Just Nothing) = Nothing
ff (Just (Just a)) = Just (Just a)
-- Functor and Monad
instance (Functor m) ⇒ Functor (FailureT m) where
map ∷ (a → b) → FailureT m a → FailureT m b
map f = FailureT ∘ map (map f) ∘ runFailureT
instance (Monad m) ⇒ Monad (FailureT m) where
return ∷ a → FailureT m a
return = FailureT ∘ return ∘ Just
(≫=) ∷ FailureT m a → (a → FailureT m b) → FailureT m b
aM ≫= k = FailureT $ do
aM' ← runFailureT aM
case aM' of
Nothing → return Nothing
Just a → runFailureT $ k a
-- Higher Functor
instance FunctorUnit FailureT where
funit ∷ (Functor m) ⇒ m ↝ FailureT m
funit = FailureT ∘ map Just
instance FunctorDiscard FailureT where
fdiscard ∷ (Functor m) ⇒ FailureT (FailureT m) ↝ FailureT m
fdiscard = FailureT ∘ ff ^∘ runFailureT ∘ runFailureT
where
ff ∷ Maybe (Maybe a) → Maybe a
ff Nothing = Nothing
ff (Just aM) = aM
instance FunctorFunctor FailureT where
fmap ∷ (m ↝ n) → FailureT m ↝ FailureT n
fmap f = FailureT ∘ f ∘ runFailureT
-- MonadMonoid and MonadJoin
instance (MonadMonoid m) ⇒ MonadMonoid (FailureT m) where
mzero ∷ FailureT m a
mzero = FailureT mzero
(<⧺>) ∷ FailureT m a → FailureT m a → FailureT m a
aM₁ <⧺> aM₂ = FailureT $ runFailureT aM₁ <⧺> runFailureT aM₂
instance (MonadBot m) ⇒ MonadBot (FailureT m) where
mbot ∷ FailureT m a
mbot = FailureT mbot
instance (MonadJoin m) ⇒ MonadJoin (FailureT m) where
(<⊔>) ∷ FailureT m a → FailureT m a → FailureT m a
aM₁ <⊔> aM₂ = FailureT $ runFailureT aM₁ <⊔> runFailureT aM₂
instance (MonadJoinLattice m) ⇒ MonadJoinLattice (FailureT m)
-- Failure Effect
instance (Functor m) ⇒ MonadFailure (FailureT m) where
failureE ∷ FailureT (FailureT m) ↝ FailureT m
failureE = fdiscard ∘ commuteFailure
failureI ∷ FailureT m ↝ FailureT (FailureT m)
failureI = commuteFailure ∘ funit
-- Maybe Failure Effect
instance MonadFailure Maybe where
failureE ∷ FailureT Maybe ↝ Maybe
failureE = runFailure ∘ failureE ∘ fmap failure
failureI ∷ Maybe ↝ FailureT Maybe
failureI = fmap runFailure ∘ failureI ∘ failure
-- # Error
-- Base Effect
type Error e = ErrorT e ID
runError ∷ Error e a → e ⨄ a
runError = runID ∘ runErrorT
-- Commuting with self
errorCommute ∷ (Functor m) ⇒ ErrorT e (ErrorT e m) ↝ ErrorT e (ErrorT e m)
errorCommute = ErrorT ∘ ErrorT ∘ ff ^∘ runErrorT ∘ runErrorT
where
ff ∷ e ⨄ (e ⨄ a) → e ⨄ (e ⨄ a)
ff (Left e) = Right (Left e)
ff (Right (Left e)) = Left e
ff (Right (Right a)) = Right $ Right a
-- Functor and Monad
instance (Functor m) ⇒ Functor (ErrorT e m) where
map ∷ (a → b) → ErrorT e m a → ErrorT e m b
map f aM = ErrorT $ mapRight f ^$ runErrorT aM
instance (Monad m) ⇒ Monad (ErrorT e m) where
return ∷ a → ErrorT e m a
return = ErrorT ∘ return ∘ Right
(≫=) ∷ ErrorT e m a → (a → ErrorT e m b) → ErrorT e m b
aM ≫= k = ErrorT $ do
aeM ← runErrorT aM
case aeM of
Left e → return $ Left e
Right a → runErrorT $ k a
-- Higher Functor
instance FunctorUnit (ErrorT e) where
funit ∷ (Functor m) ⇒ m ↝ ErrorT e m
funit aM = ErrorT $ Right ^$ aM
instance FunctorDiscard (ErrorT e) where
fdiscard ∷ (Functor m) ⇒ ErrorT e (ErrorT e m) ↝ ErrorT e m
fdiscard = ErrorT ∘ ff ^∘ runErrorT ∘ runErrorT
where
ff (Left e) = Left e
ff (Right ea) = ea
instance FunctorFunctor (ErrorT e) where
fmap ∷ m ↝ n → ErrorT e m ↝ ErrorT e n
fmap f = ErrorT ∘ f ∘ runErrorT
-- MonadMonoid and MonadJoin
instance (MonadMonoid m) ⇒ MonadMonoid (ErrorT e m) where
mzero ∷ ErrorT e m a
mzero = ErrorT mzero
(<⧺>) ∷ ErrorT e m a → ErrorT e m a → ErrorT e m a
aM₁ <⧺> aM₂ = ErrorT $ runErrorT aM₁ <⧺> runErrorT aM₂
instance (MonadBot m) ⇒ MonadBot (ErrorT e m) where
mbot ∷ ErrorT e m a
mbot = ErrorT mbot
instance (MonadJoin m) ⇒ MonadJoin (ErrorT e m) where
(<⊔>) ∷ ErrorT e m a → ErrorT e m a → ErrorT e m a
aM₁ <⊔> aM₂ = ErrorT $ runErrorT aM₁ <⊔> runErrorT aM₂
-- Error Effect
instance (Functor m) ⇒ MonadError e (ErrorT e m) where
errorE ∷ ErrorT e (ErrorT e m) ↝ ErrorT e m
errorE = fdiscard ∘ errorCommute
errorI ∷ ErrorT e m ↝ ErrorT e (ErrorT e m)
errorI = errorCommute ∘ funit
-- Sum Error Effect
instance MonadError e ((⨄) e) where
errorE ∷ ErrorT e ((⨄) e) ↝ (⨄) e
errorE = runError ∘ errorE ∘ fmap throwSum
errorI ∷ (⨄) e ↝ ErrorT e ((⨄) e)
errorI = fmap runError ∘ errorI ∘ throwSum
-- # Reader
-- Base Effect
type Reader r = ReaderT r ID
reader ∷ (r → a) → Reader r a
reader f = ReaderT $ ID ∘ f
runReader ∷ Reader r a → r → a
runReader = runID ∘∘ runReaderT
runReaderWith ∷ r → Reader r a → a
runReaderWith = flip runReader
-- Commuting with self
readerCommute ∷ ReaderT r₁ (ReaderT r₂ m) ↝ ReaderT r₂ (ReaderT r₁ m)
readerCommute aMM = ReaderT $ \ r₂ → ReaderT $ \ r₁ → runReaderTWith r₂ $ runReaderTWith r₁ aMM
-- Functor and Monad
instance (Functor m) ⇒ Functor (ReaderT r m) where
map ∷ (a → b) → ReaderT r m a → ReaderT r m b
map f aM = ReaderT $ map f ∘ runReaderT aM
instance (Monad m) ⇒ Monad (ReaderT r m) where
return ∷ a → ReaderT r m a
return = ReaderT ∘ const ∘ return
(≫=) ∷ ReaderT r m a → (a → ReaderT r m b) → ReaderT r m b
aM ≫= k = ReaderT $ \ r → runReaderTWith r ∘ k *$ runReaderTWith r aM
-- Higher Functor
instance FunctorUnit (ReaderT r) where
funit ∷ (Functor m) ⇒ m ↝ ReaderT r m
funit = ReaderT ∘ const
instance FunctorDiscard (ReaderT r) where
fdiscard ∷ (Functor m) ⇒ ReaderT r (ReaderT r m) ↝ ReaderT r m
fdiscard aMM = ReaderT $ \ r → runReaderTWith r $ runReaderTWith r aMM
instance FunctorFunctor (ReaderT r) where
fmap ∷ (m ↝ n) → (ReaderT r m ↝ ReaderT r n)
fmap f aM = ReaderT $ \ r → f $ runReaderTWith r aM
-- MonadMonoid and Join
instance (MonadMonoid m) ⇒ MonadMonoid (ReaderT r m) where
mzero ∷ ReaderT r m a
mzero = ReaderT $ const mzero
(<⧺>) ∷ ReaderT r m a → ReaderT r m a → ReaderT r m a
aM₁ <⧺> aM₂ = ReaderT $ \ r → runReaderT aM₁ r <⧺> runReaderT aM₂ r
instance (MonadBot m) ⇒ MonadBot (ReaderT r m) where
mbot ∷ ReaderT r m a
mbot = ReaderT $ const mbot
instance (MonadJoin m) ⇒ MonadJoin (ReaderT r m) where
(<⊔>) ∷ ReaderT r m a → ReaderT r m a → ReaderT r m a
aM₁ <⊔> aM₂ = ReaderT $ \ r → runReaderT aM₁ r <⊔> runReaderT aM₂ r
instance (MonadJoinLattice m) ⇒ MonadJoinLattice (ReaderT r m)
-- Reader Effect
instance (Functor m) ⇒ MonadReader r (ReaderT r m) where
readerE ∷ ReaderT r (ReaderT r m) ↝ ReaderT r m
readerE = fdiscard ∘ readerCommute
readerI ∷ ReaderT r m ↝ ReaderT r (ReaderT r m)
readerI = readerCommute ∘ funit
-- Base Reader Effect
instance MonadReader r ((→) r) where
readerE ∷ ReaderT r ((→) r) ↝ (→) r
readerE = runReader ∘ readerE ∘ fmap reader
readerI ∷ (→) r ↝ ReaderT r ((→) r)
readerI = fmap runReader ∘ readerI ∘ reader
-- # Writer
-- Base Effect
type Writer o = WriterT o ID
writer ∷ (o,a) → Writer o a
writer = WriterT ∘ ID
runWriter ∷ Writer o a → (o,a)
runWriter = runID ∘ runWriterT
execWriter ∷ Writer o a → o
execWriter = fst ∘ runWriter
-- Commuting with self
writerCommute ∷ ∀ m o₁ o₂. (Functor m) ⇒ WriterT o₁ (WriterT o₂ m) ↝ WriterT o₂ (WriterT o₁ m)
writerCommute aMM = WriterT $ WriterT $ ff ^$ runWriterT $ runWriterT aMM
where
ff ∷ (o₂,(o₁,a)) → (o₁,(o₂,a))
ff (o₂,(o₁,a)) = (o₁,(o₂,a))
-- Functor and Monad
instance (Functor m) ⇒ Functor (WriterT o m) where
map ∷ (a → b) → WriterT o m a → WriterT o m b
map f = WriterT ∘ mapSnd f ^∘ runWriterT
instance (Monad m,Monoid o) ⇒ Monad (WriterT o m) where
return ∷ a → WriterT o m a
return = WriterT ∘ return ∘ (null,)
(≫=) ∷ WriterT o m a → (a → WriterT o m b) → WriterT o m b
aM ≫= k = WriterT $ do
(o₁,a) ← runWriterT aM
(o₂,b) ← runWriterT $ k a
o' ← return ♯$ o₁ ⧺ o₂
return (o',b)
-- Higher Functor
instance (Monoid o) ⇒ FunctorUnit (WriterT o) where
funit ∷ (Functor m) ⇒ m ↝ WriterT o m
funit = WriterT ∘ map (null,)
instance FunctorDiscard (WriterT o) where
fdiscard ∷ (Functor m) ⇒ WriterT o (WriterT o m) ↝ WriterT o m
fdiscard = snd ^∘ runWriterT
instance FunctorFunctor (WriterT o) where
fmap ∷ (m ↝ n) → (WriterT o m ↝ WriterT o n)
fmap f aM = WriterT $ f $ runWriterT aM
-- MonadMonoid and MonadJoin
instance (MonadMonoid m,Monoid o) ⇒ MonadMonoid (WriterT o m) where
mzero ∷ WriterT o m a
mzero = WriterT mzero
(<⧺>) ∷ WriterT o m a → WriterT o m a → WriterT o m a
aM₁ <⧺> aM₂ = WriterT $ runWriterT aM₁ <⧺> runWriterT aM₂
instance (MonadBot m,Monoid o) ⇒ MonadBot (WriterT o m) where
mbot ∷ WriterT o m a
mbot = WriterT mbot
instance (MonadJoin m,Monoid o) ⇒ MonadJoin (WriterT o m) where
(<⊔>) ∷ WriterT o m a → WriterT o m a → WriterT o m a
aM₁ <⊔> aM₂ = WriterT $ runWriterT aM₁ <⊔> runWriterT aM₂
instance (MonadJoinLattice m,Monoid o) ⇒ MonadJoinLattice (WriterT o m)
-- Monoid Functor
instance (Functorial Monoid m,Monoid o,Monoid a) ⇒ Monoid (WriterT o m a) where
null ∷ WriterT o m a
null =
with (functorial ∷ W (Monoid (m (o,a)))) $
WriterT null
(⧺) ∷ WriterT o m a → WriterT o m a → WriterT o m a
aM₁ ⧺ aM₂ =
with (functorial ∷ W (Monoid (m (o,a)))) $
WriterT $ runWriterT aM₁ ⧺ runWriterT aM₂
instance (Functorial Monoid m,Monoid o) ⇒ Functorial Monoid (WriterT o m) where functorial = W
-- Writer Effect
instance (Functor m,Monoid o) ⇒ MonadWriter o (WriterT o m) where
writerE ∷ WriterT o (WriterT o m) ↝ WriterT o m
writerE = fdiscard ∘ writerCommute
writerI ∷ WriterT o m ↝ WriterT o (WriterT o m)
writerI = writerCommute ∘ funit
-- Base Writer Effect
instance (Monoid o) ⇒ MonadWriter o ((,) o) where
writerE ∷ WriterT o ((,) o) ↝ ((,) o)
writerE = runWriter ∘ writerE ∘ fmap writer
writerI ∷ ((,) o) ↝ WriterT o ((,) o)
writerI = fmap runWriter ∘ writerI ∘ writer
-- # State
-- Base Effect
type State s = StateT s ID
runStateWith ∷ s → State s a → (s,a)
runStateWith = runID ∘∘ runStateTWith
evalStateWith ∷ s → State s a → a
evalStateWith = snd ∘∘ runStateWith
execStateWith ∷ s → State s a → s
execStateWith = fst ∘∘ runStateWith
-- Commuting with self
stateCommute ∷ ∀ m s₁ s₂. (Functor m) ⇒ StateT s₁ (StateT s₂ m) ↝ StateT s₂ (StateT s₁ m)
stateCommute aMM = StateT $ \ s₂ → StateT $ \ s₁ → ff ^$ runStateTWith s₂ $ runStateTWith s₁ aMM
where
ff ∷ (s₂,(s₁,a)) → (s₁,(s₂,a))
ff (s₂,(s₁,a)) = (s₁,(s₂,a))
-- Functor and Monad
instance (Functor m) ⇒ Functor (StateT s m) where
map ∷ (a → b) → StateT s m a → StateT s m b
map f aM = StateT $ \ s → mapSnd f ^$ runStateT aM s
instance (Monad m) ⇒ Monad (StateT s m) where
return ∷ a → StateT s m a
return x = StateT $ \ s → return (s,x)
(≫=) ∷ StateT s m a → (a → StateT s m b) → StateT s m b
aM ≫= k = StateT $ \ s → do
(s',a) ← runStateT aM s
runStateT (k a) s'
-- Higher Functor
instance FunctorUnit (StateT s) where
funit ∷ (Functor m) ⇒ m ↝ StateT s m
funit aM = StateT $ \ s → (s,) ^$ aM
instance FunctorDiscard (StateT s) where
fdiscard ∷ (Functor m) ⇒ StateT s (StateT s m) ↝ StateT s m
fdiscard aMM = StateT $ \ s → runStateTWith s $ snd ^$ runStateTWith s aMM
instance FunctorFunctor (StateT s) where
fmap ∷ (m ↝ n) → StateT s m ↝ StateT s n
fmap f aM = StateT $ f ∘ runStateT aM
-- MonadMonoid and MonadJoin
instance (MonadMonoid m) ⇒ MonadMonoid (StateT s m) where
mzero ∷ StateT s m a
mzero = StateT $ const mzero
(<⧺>) ∷ StateT s m a → StateT s m a → StateT s m a
aM₁ <⧺> aM₂ = StateT $ \ s → runStateT aM₁ s <⧺> runStateT aM₂ s
instance (MonadBot m) ⇒ MonadBot (StateT s m) where
mbot ∷ StateT s m a
mbot = StateT $ const mbot
instance (MonadJoin m) ⇒ MonadJoin (StateT s m) where
(<⊔>) ∷ StateT s m a → StateT s m a → StateT s m a
aM₁ <⊔> aM₂ = StateT $ \ s → runStateT aM₁ s <⊔> runStateT aM₂ s
instance (MonadJoinLattice m) ⇒ MonadJoinLattice (StateT s m)
instance (MonadTop m) ⇒ MonadTop (StateT s m) where
mtop ∷ StateT s m a
mtop = StateT $ const mtop
-- Monoid Functor
instance (Functorial Monoid m,Monoid s,Monoid a) ⇒ Monoid (StateT s m a) where
null ∷ StateT s m a
null =
with (functorial ∷ W (Monoid (m (s,a)))) $
StateT $ \ _ → null
(⧺) ∷ StateT s m a → StateT s m a → StateT s m a
aM₁ ⧺ aM₂ =
with (functorial ∷ W (Monoid (m (s,a)))) $
StateT $ \ s → runStateT aM₁ s ⧺ runStateT aM₂ s
instance (Functorial Monoid m,Monoid s) ⇒ Functorial Monoid (StateT s m) where functorial = W
-- JoinLattice Functor
instance (Functorial Bot m,Bot s,Bot a) ⇒ Bot (StateT s m a) where
bot ∷ StateT s m a
bot =
with (functorial ∷ W (Bot (m (s,a)))) $
StateT $ \ _ → bot
instance (Functorial Join m,Join s,Join a) ⇒ Join (StateT s m a) where
(⊔) ∷ StateT s m a → StateT s m a → StateT s m a
aM₁ ⊔ aM₂ =
with (functorial ∷ W (Join (m (s,a)))) $
StateT $ \ s → runStateT aM₁ s ⊔ runStateT aM₂ s
instance (Functorial Bot m,Functorial Join m,JoinLattice s,JoinLattice a) ⇒ JoinLattice (StateT s m a)
instance (Functorial Bot m,Functorial Join m,JoinLattice s) ⇒ Functorial JoinLattice (StateT s m) where functorial = W
-- State Effect
instance (Functor m) ⇒ MonadState s (StateT s m) where
stateE ∷ StateT s (StateT s m) ↝ StateT s m
stateE = fdiscard ∘ stateCommute
stateI ∷ StateT s m ↝ StateT s (StateT s m)
stateI = stateCommute ∘ funit
-- # NondetT
-- Commuting with self
pluck ∷ [a] → [[a]] → Maybe ([a],[[a]])
pluck [] _ = Nothing
pluck (x:xs) [] = Just ([x],[xs])
pluck (x₁:xs₁) (xs₂:xss) = case pluck xs₂ xss of
Nothing → Nothing
Just (ys₂,xss') → Just (x₁:ys₂,xs₁:xss')
transpose ∷ [[a]] → [[a]]
transpose [] = [[]]
transpose (xs:xss) =
case pluck xs xss of
Nothing → []
Just (ys,xss') → ys:transpose xss'
nondetCommute ∷ (Functor m) ⇒ NondetT (NondetT m) ↝ NondetT (NondetT m)
nondetCommute = NondetT ∘ NondetT ∘ map transpose ∘ runNondetT ∘ runNondetT
-- Functor and Monad
instance (Functor m) ⇒ Functor (NondetT m) where
map ∷ (a → b) → NondetT m a → NondetT m b
map f = NondetT ∘ map (map f) ∘ runNondetT
instance (Monad m,Functorial Monoid m) ⇒ Monad (NondetT m) where
return ∷ a → NondetT m a
return = NondetT ∘ return ∘ single
(≫=) ∷ NondetT m a → (a → NondetT m b) → NondetT m b
aM ≫= k = NondetT $ do
xs ← runNondetT aM
runNondetT $ concat $ k ^$ xs
-- Higher Functor
instance FunctorUnit NondetT where
funit ∷ (Functor m) ⇒ m ↝ NondetT m
funit = NondetT ∘ map return
instance FunctorDiscard NondetT where
fdiscard ∷ (Functor m) ⇒ NondetT (NondetT m) ↝ NondetT m
fdiscard = NondetT ∘ concat ^∘ runNondetT ∘ runNondetT
instance FunctorFunctor NondetT where
fmap ∷ m ↝ n → NondetT m ↝ NondetT n
fmap f = NondetT ∘ f ∘ runNondetT
-- Monad Monoid
instance (Monad m,Functorial Monoid m) ⇒ MonadMonoid (NondetT m) where {mzero = null;(<⧺>) = (⧺)}
instance MonadMonoid [] where {mzero = null;(<⧺>) = (⧺)}
-- Monoid Functor
instance (Functorial Monoid m) ⇒ Monoid (NondetT m a) where
null ∷ NondetT m a
null =
with (functorial ∷ W (Monoid (m [a]))) $
NondetT null
(⧺) ∷ NondetT m a → NondetT m a → NondetT m a
xs ⧺ ys =
with (functorial ∷ W (Monoid (m [a]))) $
NondetT $ runNondetT xs ⧺ runNondetT ys
instance (Functorial Monoid m) ⇒ Functorial Monoid (NondetT m) where functorial = W
-- Nondet Effect
instance (Functor m) ⇒ MonadNondet (NondetT m) where
nondetE ∷ NondetT (NondetT m) ↝ NondetT m
nondetE = fdiscard ∘ nondetCommute
nondetI ∷ NondetT m ↝ NondetT (NondetT m)
nondetI = nondetCommute ∘ funit
-- # ContT
-- Base Effect
type Cont r = ContT r ID
cont ∷ ((a → r) → r) → Cont r a
cont k = ContT $ \ k' → ID $ k $ \ a → runID $ k' a
runCont ∷ Cont r a → (a → r) → r
runCont aM f = runID $ runContT aM (ID ∘ f)
evalCont ∷ Cont r r → r
evalCont aM = runCont aM id
-- Functor and Monad
instance Functor (ContT r m) where map = mmap
instance Monad (ContT r m) where
return ∷ a → ContT r m a
return a = ContT $ \ k → k a
(≫=) ∷ ContT r m a → (a → ContT r m b) → ContT r m b
aM ≫= kM = ContT $ \ (k ∷ b → m r) → runContT aM $ \ a → runContT (kM a) k
-- Higher Functor
instance FunctorIsoFunctor (ContT r) where
fisomap ∷ (m ↝ n,n ↝ m) → (ContT r m ↝ ContT r n)
fisomap (to,from) aM = ContT $ \ (k ∷ a → n r) → to $ runContT aM $ \ a → from $ k a
-- Cont Effect
instance (Monad m) ⇒ MonadCont r (ContT r m) where
contE ∷ ContT r (ContT r m) ↝ ContT r m
contE aMM = ContT $ \ (k ∷ a → m r) →
evalContT $ runContT aMM $ \ a → ContT $ \ (k' ∷ r → m r) → k' *$ k a
contI ∷ ContT r m ↝ ContT r (ContT r m)
contI aM = ContT $ \ (k₁ ∷ a → ContT r m r) → ContT $ \ (k₂ ∷ r → m r) →
k₂ *$ runContT aM $ \ a → evalContT (k₁ a)
-- # OpaqueContT
-- Base Effect
type OpaqueCont k r = OpaqueContT k r ID
opaqueCont ∷ (k r ID a → r) → OpaqueCont k r a
opaqueCont nk = OpaqueContT $ ID ∘ nk
runOpaqueCont ∷ OpaqueCont k r a → k r ID a → r
runOpaqueCont = runID ∘∘ runOpaqueContT
metaCont ∷ (Isomorphism3 (k r) (ContFun r)) ⇒ ((a → r) → r) → OpaqueCont k r a
metaCont nk = opaqueCont $ \ (k ∷ k r ID a) → nk $ (∘) runID ∘ runContFun $ isoTo3 k
runMetaCont ∷ (Isomorphism3 (ContFun r) (k r)) ⇒ OpaqueCont k r a → (a → r) → r
runMetaCont aM k = runOpaqueCont aM $ isoTo3 $ ContFun $ ID ∘ k
evalOpaqueCont ∷ (Isomorphism3 (ContFun r) (k r)) ⇒ OpaqueCont k r r → r
evalOpaqueCont aM = runMetaCont aM id
-- Functor and Monad
instance (Monad m,Isomorphism3 (ContFun r) (k r)) ⇒ Functor (OpaqueContT k r m) where map = mmap
instance (Monad m,Isomorphism3 (ContFun r) (k r)) ⇒ Monad (OpaqueContT k r m) where
return ∷ a → OpaqueContT k r m a
return a = OpaqueContT $ \ k → runContFun (isoFrom3 k) a
(≫=) ∷ OpaqueContT k r m a → (a → OpaqueContT k r m b) → OpaqueContT k r m b
aM ≫= kM = OpaqueContT $ \ (k ∷ k r m a) → runMetaContT aM $ \ a → runOpaqueContT (kM a) k
-- Higher Functor
instance (Isomorphism3 (ContFun r) (k r)) ⇒ FunctorIsoFunctor (OpaqueContT k r) where
fisomap ∷ (m ↝ n,n ↝ m) → OpaqueContT k r m ↝ OpaqueContT k r n
fisomap tofrom = opaque ∘ fisomap tofrom ∘ meta
-- OpaqueCont Effect
class Balloon k r | k → r where
inflate ∷ (Monad m) ⇒ k r m ↝ k r (OpaqueContT k r m)
deflate ∷ (Monad m) ⇒ k r (OpaqueContT k r m) ↝ k r m
instance (Monad m,Isomorphism3 (ContFun r) (k r),Balloon k r) ⇒ MonadOpaqueCont k r (OpaqueContT k r m) where
opaqueContE ∷ OpaqueContT k r (OpaqueContT k r m) a → OpaqueContT k r m a
opaqueContE kk = OpaqueContT $ \ (k ∷ k r m a ) → runMetaContTWith return $ runOpaqueContT kk $ inflate k
opaqueContI ∷ OpaqueContT k r m a → OpaqueContT k r (OpaqueContT k r m) a
opaqueContI aM = OpaqueContT $ \ k₁ → metaContT $ \ (k₂ ∷ r → m r) → k₂ *$ runOpaqueContT aM $ deflate k₁
instance (Monad m,Isomorphism3 (ContFun r) (k r)) ⇒ MonadCont r (OpaqueContT k r m) where
contE ∷ ContT r (OpaqueContT k r m) ↝ OpaqueContT k r m
contE aMM = metaContT $ \ (k ∷ a → m r) →
runMetaContTWith return $ runContT aMM $ \ a → metaContT $ \ (k' ∷ r → m r) → k' *$ k a
contI ∷ OpaqueContT k r m ↝ ContT r (OpaqueContT k r m)
contI aM = ContT $ \ (k₁ ∷ a → OpaqueContT k r m r) → metaContT $ \ (k₂ ∷ r → m r) →
k₂ *$ runMetaContT aM $ \ a → runMetaContT (k₁ a) return
----------------------
-- Monads Commuting --
----------------------
-- # Failure // *
-- ## Failure // Error [ISO]
failureErrorCommute ∷ (Functor m) ⇒ FailureT (ErrorT e m) ↝ ErrorT e (FailureT m)
failureErrorCommute = ErrorT ∘ FailureT ∘ map ff ∘ runErrorT ∘ runFailureT
where
ff ∷ (e ⨄ Maybe a) → Maybe (e ⨄ a)
ff (Left e) = Just (Left e)
ff (Right Nothing) = Nothing
ff (Right (Just x)) = Just (Right x)
errorFailureCommute ∷ (Functor m) ⇒ ErrorT e (FailureT m) ↝ FailureT (ErrorT e m)
errorFailureCommute = FailureT ∘ ErrorT ∘ map ff ∘ runFailureT ∘ runErrorT
where
ff ∷ Maybe (e ⨄ a) → (e ⨄ Maybe a)
ff Nothing = Right Nothing
ff (Just (Left e)) = Left e
ff (Just (Right x)) = Right (Just x)
instance (Functor m,MonadFailure m) ⇒ MonadFailure (ErrorT e m) where
failureE ∷ FailureT (ErrorT e m) ↝ ErrorT e m
failureE = fmap failureE ∘ failureErrorCommute
failureI ∷ ErrorT e m ↝ FailureT (ErrorT e m)
failureI = errorFailureCommute ∘ fmap failureI
instance (Functor m,MonadError e m) ⇒ MonadError e (FailureT m) where
errorE ∷ ErrorT e (FailureT m) ↝ FailureT m
errorE = fmap errorE ∘ errorFailureCommute
errorI ∷ FailureT m ↝ ErrorT e (FailureT m)
errorI = failureErrorCommute ∘ fmap errorI
-- ## Failure // Reader [ISO]
failureReaderCommute ∷ (Functor m) ⇒ FailureT (ReaderT r m) ↝ ReaderT r (FailureT m)
failureReaderCommute aMRM = ReaderT $ \ r → FailureT $ runReaderTWith r $ runFailureT aMRM
readerFailureCommute ∷ (Functor m) ⇒ ReaderT r (FailureT m) ↝ FailureT (ReaderT r m)
readerFailureCommute aRMM = FailureT $ ReaderT $ \ r → runFailureT $ runReaderTWith r aRMM
instance (Functor m,MonadFailure m) ⇒ MonadFailure (ReaderT r m) where
failureE ∷ FailureT (ReaderT r m) ↝ ReaderT r m
failureE = fmap failureE ∘ failureReaderCommute
failureI ∷ ReaderT r m ↝ FailureT (ReaderT r m)
failureI = readerFailureCommute ∘ fmap failureI
instance (Functor m,MonadReader r m) ⇒ MonadReader r (FailureT m) where
readerE ∷ ReaderT r (FailureT m) ↝ FailureT m
readerE = fmap readerE ∘ readerFailureCommute
readerI ∷ FailureT m ↝ ReaderT r (FailureT m)
readerI = failureReaderCommute ∘ fmap readerI
-- ## Failure // Writer [ADJ]
failureWriterCommute ∷ (Functor m) ⇒ FailureT (WriterT o m) ↝ WriterT o (FailureT m)
failureWriterCommute aMRM = WriterT $ FailureT $ ff ^$ runWriterT $ runFailureT aMRM
where
ff ∷ (o,Maybe a) → Maybe (o,a)
ff (_,Nothing) = Nothing
ff (o,Just a) = Just (o,a)
writerFailureCommute ∷ (Monoid o,Functor m) ⇒ WriterT o (FailureT m) ↝ FailureT (WriterT o m)
writerFailureCommute aRMM = FailureT $ WriterT $ ff ^$ runFailureT $ runWriterT aRMM
where
ff ∷ (Monoid o) ⇒ Maybe (o,a) → (o,Maybe a)
ff Nothing = (null,Nothing)
ff (Just (o,a)) = (o,Just a)
instance (Monoid o,Functor m,MonadFailure m) ⇒ MonadFailure (WriterT o m) where
failureE ∷ FailureT (WriterT o m) ↝ WriterT o m
failureE = fmap failureE ∘ failureWriterCommute
failureI ∷ WriterT o m ↝ FailureT (WriterT o m)
failureI = writerFailureCommute ∘ fmap failureI
instance (Monoid o,Functor m,MonadWriter o m) ⇒ MonadWriter o (FailureT m) where
writerE ∷ WriterT o (FailureT m) ↝ FailureT m
writerE = fmap writerE ∘ writerFailureCommute
writerI ∷ FailureT m ↝ WriterT o (FailureT m)
writerI = failureWriterCommute ∘ fmap writerI
-- ## Failure // State [ADJ]
failureStateCommute ∷ ∀ s m. (Functor m) ⇒ FailureT (StateT s m) ↝ StateT s (FailureT m)
failureStateCommute aMSM = StateT $ \ s₁ → FailureT $ ff ^$ runStateTWith s₁ $ runFailureT aMSM
where
ff ∷ (s,Maybe a) → Maybe (s,a)
ff (_,Nothing) = Nothing
ff (s₂,Just a) = Just (s₂,a)
stateFailureCommute ∷ ∀ s m. (Functor m) ⇒ StateT s (FailureT m) ↝ FailureT (StateT s m)
stateFailureCommute aSMM = FailureT $ StateT $ \ s₁ → ff s₁ ^$ runFailureT $ runStateTWith s₁ aSMM
where
ff ∷ s → (Maybe (s,a)) → (s,Maybe a)
ff s₁ Nothing = (s₁,Nothing)
ff _ (Just (s₂,a)) = (s₂,Just a)
instance (Functor m,MonadFailure m) ⇒ MonadFailure (StateT s m) where
failureE ∷ FailureT (StateT s m) ↝ StateT s m
failureE = fmap failureE ∘ failureStateCommute
failureI ∷ StateT s m ↝ FailureT (StateT s m)
failureI = stateFailureCommute ∘ fmap failureI
instance (Functor m,MonadState s m) ⇒ MonadState s (FailureT m) where
stateE ∷ StateT s (FailureT m) ↝ FailureT m
stateE = fmap stateE ∘ stateFailureCommute
stateI ∷ FailureT m ↝ StateT s (FailureT m)
stateI = failureStateCommute ∘ fmap stateI
-- ## Failure // Nondet [ADJ]
failureNondetCommute ∷ (Functor m) ⇒ FailureT (NondetT m) ↝ NondetT (FailureT m)
failureNondetCommute = NondetT ∘ FailureT ∘ map ff ∘ runNondetT ∘ runFailureT
where
ff ∷ [Maybe a] → Maybe [a]
ff [] = Just []
ff (Nothing:_) = Nothing
ff (Just x:xMs) = (x:) ^$ ff xMs
nondetFailureCommute ∷ (Functor m) ⇒ NondetT (FailureT m) ↝ FailureT (NondetT m)
nondetFailureCommute = FailureT ∘ NondetT ∘ map ff ∘ runFailureT ∘ runNondetT
where
ff ∷ Maybe [a] → [Maybe a]
ff Nothing = [Nothing]
ff (Just xs) = map Just xs
instance (Functor m,MonadFailure m) ⇒ MonadFailure (NondetT m) where
failureE ∷ FailureT (NondetT m) ↝ NondetT m
failureE = fmap failureE ∘ failureNondetCommute
failureI ∷ NondetT m ↝ FailureT (NondetT m)
failureI = nondetFailureCommute ∘ fmap failureI
instance (Functor m,MonadNondet m) ⇒ MonadNondet (FailureT m) where
nondetE ∷ NondetT (FailureT m) ↝ FailureT m
nondetE = fmap nondetE ∘ nondetFailureCommute
nondetI ∷ FailureT m ↝ NondetT (FailureT m)
nondetI = failureNondetCommute ∘ fmap nondetI
-- ## Failure // Cont
-- TODO:
-- ## Failure // OpaqueCont
-- TODO:
-- # Error // *
-- ## Error // Reader [ISO]
errorReaderCommute ∷ ErrorT e (ReaderT r m) ↝ ReaderT r (ErrorT e m)
errorReaderCommute aMRM = ReaderT $ \ r → ErrorT $ runReaderTWith r $ runErrorT aMRM
readerErrorCommute ∷ ReaderT r (ErrorT e m) ↝ ErrorT e (ReaderT r m)
readerErrorCommute aRMM = ErrorT $ ReaderT $ \ r → runErrorT $ runReaderTWith r aRMM
instance (Functor m,MonadError e m) ⇒ MonadError e (ReaderT r m) where
errorE ∷ ErrorT e (ReaderT r m) ↝ ReaderT r m
errorE = fmap errorE ∘ errorReaderCommute
errorI ∷ ReaderT r m ↝ ErrorT e (ReaderT r m)
errorI = readerErrorCommute ∘ fmap errorI
instance (Functor m,MonadReader r m) ⇒ MonadReader r (ErrorT e m) where
readerE ∷ ReaderT r (ErrorT e m) ↝ ErrorT e m
readerE = fmap readerE ∘ readerErrorCommute
readerI ∷ ErrorT e m ↝ ReaderT r (ErrorT e m)
readerI = errorReaderCommute ∘ fmap readerI
-- ## Error // Writer [ADJ]
errorWriterCommute ∷ ∀ e o m. (Functor m) ⇒ ErrorT e (WriterT o m) ↝ WriterT o (ErrorT e m)
errorWriterCommute = WriterT ∘ ErrorT ∘ ff ^∘ runWriterT ∘ runErrorT
where
ff ∷ (o,e ⨄ a) → (e ⨄ (o,a))
ff (_,Left e) = Left e
ff (e,Right a) = Right (e,a)
writerErrorCommute ∷ (Functor m,Monoid o) ⇒ WriterT o (ErrorT e m) ↝ ErrorT e (WriterT o m)
writerErrorCommute = ErrorT ∘ WriterT ∘ ff ^∘ runErrorT ∘ runWriterT
where
ff ∷ (Monoid o) ⇒ (e ⨄ (o,a)) → (o,e ⨄ a)
ff (Left e) = (null,Left e)
ff (Right (o,a)) = (o,Right a)
instance (Functor m,MonadError e m,Monoid o) ⇒ MonadError e (WriterT o m) where
errorE ∷ ErrorT e (WriterT o m) ↝ WriterT o m
errorE = fmap errorE ∘ errorWriterCommute
errorI ∷ WriterT o m ↝ ErrorT e (WriterT o m)
errorI = writerErrorCommute ∘ fmap errorI
instance (Functor m,MonadWriter o m,Monoid o) ⇒ MonadWriter o (ErrorT e m) where
writerE ∷ WriterT o (ErrorT e m) ↝ ErrorT e m
writerE = fmap writerE ∘ writerErrorCommute
writerI ∷ ErrorT e m ↝ WriterT o (ErrorT e m)
writerI = errorWriterCommute ∘ fmap writerI
-- ## Error // State [ADJ]
errorStateCommute ∷ (Functor m) ⇒ ErrorT e (StateT s m) ↝ StateT s (ErrorT e m)
errorStateCommute aMRM = StateT $ \ s → ErrorT $ ff ^$ runStateTWith s $ runErrorT aMRM
where
ff ∷ (s,e ⨄ a) → e ⨄ (s,a)
ff (_,Left e) = Left e
ff (s,Right a) = Right (s,a)
stateErrorCommute ∷ (Functor m) ⇒ StateT s (ErrorT e m) ↝ ErrorT e (StateT s m)
stateErrorCommute aRMM = ErrorT $ StateT $ \ s → ff s ^$ runErrorT $ runStateTWith s aRMM
where
ff ∷ s → e ⨄ (s,a) → (s,e ⨄ a)
ff s (Left e) = (s,Left e)
ff _ (Right (s,a)) = (s,Right a)
instance (Functor m,MonadError e m) ⇒ MonadError e (StateT s m) where
errorE ∷ ErrorT e (StateT s m) ↝ StateT s m
errorE = fmap errorE ∘ errorStateCommute
errorI ∷ StateT s m ↝ ErrorT e (StateT s m)
errorI = stateErrorCommute ∘ fmap errorI
instance (Functor m,MonadState s m) ⇒ MonadState s (ErrorT e m) where
stateE ∷ StateT s (ErrorT e m) ↝ ErrorT e m
stateE = fmap stateE ∘ stateErrorCommute
stateI ∷ ErrorT e m ↝ StateT s (ErrorT e m)
stateI = errorStateCommute ∘ fmap stateI
-- ## Error // Nondet [ADJ]
errorNondetCommute ∷ (Functor m) ⇒ ErrorT e (NondetT m) ↝ NondetT (ErrorT e m)
errorNondetCommute = NondetT ∘ ErrorT ∘ map ff ∘ runNondetT ∘ runErrorT
where
ff ∷ [e ⨄ a] → e ⨄ [a]
ff [] = Right []
ff (Left e:_) = Left e
ff (Right x:xsM) = (x:) ^$ ff xsM
nondetErrorCommute ∷ (Functor m) ⇒ NondetT (ErrorT e m) ↝ ErrorT e (NondetT m)
nondetErrorCommute = ErrorT ∘ NondetT ∘ map ff ∘ runErrorT ∘ runNondetT
where
ff ∷ e ⨄ [a] → [e ⨄ a]
ff (Left e) = [Left e]
ff (Right xs) = map Right xs
instance (Functor m,MonadError e m) ⇒ MonadError e (NondetT m) where
errorE ∷ ErrorT e (NondetT m) ↝ NondetT m
errorE = fmap errorE ∘ errorNondetCommute
errorI ∷ NondetT m ↝ ErrorT e (NondetT m)
errorI = nondetErrorCommute ∘ fmap errorI
instance (Functor m,MonadNondet m) ⇒ MonadNondet (ErrorT e m) where
nondetE ∷ NondetT (ErrorT e m) ↝ ErrorT e m
nondetE = fmap nondetE ∘ nondetErrorCommute
nondetI ∷ ErrorT e m ↝ NondetT (ErrorT e m)
nondetI = errorNondetCommute ∘ fmap nondetI
-- ## Error // Cont
-- TODO:
-- ## Error // OpaqueCont
-- TODO:
-- # Reader // *
-- ## Reader // Writer [ISO]
readerWriterCommute ∷ ReaderT r (WriterT w m) ↝ WriterT w (ReaderT r m)
readerWriterCommute aRWM = WriterT $ ReaderT $ \ r → runWriterT $ runReaderTWith r aRWM
writerReaderCommute ∷ WriterT w (ReaderT r m) ↝ ReaderT r (WriterT w m)
writerReaderCommute aWRM = ReaderT $ \ r → WriterT $ runReaderTWith r $ runWriterT aWRM
instance (Monoid w,Functor m,MonadReader r m) ⇒ MonadReader r (WriterT w m) where
readerE ∷ ReaderT r (WriterT w m) ↝ WriterT w m
readerE = fmap readerE ∘ readerWriterCommute
readerI ∷ WriterT w m ↝ ReaderT r (WriterT w m)
readerI = writerReaderCommute ∘ fmap readerI
instance (Monoid w,Functor m,MonadWriter w m) ⇒ MonadWriter w (ReaderT r m) where
writerE ∷ WriterT w (ReaderT r m) ↝ ReaderT r m
writerE = fmap writerE ∘ writerReaderCommute
writerI ∷ ReaderT r m ↝ WriterT w (ReaderT r m)
writerI = readerWriterCommute ∘ fmap writerI
-- ## Reader // State [ISO]
readerStateCommute ∷ (Functor m) ⇒ ReaderT r (StateT s m) ↝ StateT s (ReaderT r m)
readerStateCommute aRSM = StateT $ \ s → ReaderT $ \ r → runStateTWith s $ runReaderTWith r aRSM
stateReaderCommute ∷ (Functor m) ⇒ StateT s (ReaderT r m) ↝ ReaderT r (StateT s m)
stateReaderCommute aSRM = ReaderT $ \ r → StateT $ \ s → runReaderTWith r $ runStateTWith s aSRM
instance (Functor m,MonadReader r m) ⇒ MonadReader r (StateT s m) where
readerE ∷ ReaderT r (StateT s m) ↝ StateT s m
readerE = fmap readerE ∘ readerStateCommute
readerI ∷ StateT s m ↝ ReaderT r (StateT s m)
readerI = stateReaderCommute ∘ fmap readerI
instance (Functor m,MonadState s m) ⇒ MonadState s (ReaderT r m) where
stateE ∷ StateT s (ReaderT r m) ↝ ReaderT r m
stateE = fmap stateE ∘ stateReaderCommute
stateI ∷ ReaderT r m ↝ StateT s (ReaderT r m)
stateI = readerStateCommute ∘ fmap stateI
-- ## Reader // Nondet [ISO]
readerNondetCommute ∷ (Functor m) ⇒ ReaderT r (NondetT m) ↝ NondetT (ReaderT r m)
readerNondetCommute aM = NondetT $ ReaderT $ \ r → runNondetT $ runReaderTWith r aM
nondetReaderCommute ∷ (Functor m) ⇒ NondetT (ReaderT r m) ↝ ReaderT r (NondetT m)
nondetReaderCommute aM = ReaderT $ \ r → NondetT $ runReaderTWith r $ runNondetT aM
instance (Functor m,MonadReader r m) ⇒ MonadReader r (NondetT m) where
readerE ∷ ReaderT r (NondetT m) ↝ NondetT m
readerE = fmap readerE ∘ readerNondetCommute
readerI ∷ NondetT m ↝ ReaderT r (NondetT m)
readerI = nondetReaderCommute ∘ fmap readerI
instance (Functor m,MonadNondet m) ⇒ MonadNondet (ReaderT r m) where
nondetE ∷ NondetT (ReaderT r m) ↝ ReaderT r m
nondetE = fmap nondetE ∘ nondetReaderCommute
nondetI ∷ ReaderT r m ↝ NondetT (ReaderT r m)
nondetI = readerNondetCommute ∘ fmap nondetI
-- ## Reader // Cont
-- TODO
-- ## Reader // OpaqueCont
-- TODO
-- # Writer // *
-- ## Writer // State [ISO]
writerStateCommute ∷ ∀ o s m. (Functor m) ⇒ WriterT o (StateT s m) ↝ StateT s (WriterT o m)
writerStateCommute aRMM = StateT $ \ s₁ → WriterT $ ff ^$ runStateTWith s₁ $ runWriterT aRMM
where
ff ∷ (s,(o,a)) → (o,(s,a))
ff (s,(o,a)) = (o,(s,a))
stateWriterCommute ∷ ∀ o s m. (Functor m) ⇒ StateT s (WriterT o m) ↝ WriterT o (StateT s m)
stateWriterCommute aMRM = WriterT $ StateT $ ff ^∘ runWriterT ∘ runStateT aMRM
where
ff ∷ (o,(s,a)) → (s,(o,a))
ff (o,(s,a)) = (s,(o,a))
instance (Monoid o,Functor m,MonadWriter o m) ⇒ MonadWriter o (StateT s m) where
writerE ∷ WriterT o (StateT s m) ↝ StateT s m
writerE = fmap writerE ∘ writerStateCommute
writerI ∷ StateT s m ↝ WriterT o (StateT s m)
writerI = stateWriterCommute ∘ fmap writerI
instance (Functor m,Monoid o,MonadState s m) ⇒ MonadState s (WriterT o m) where
stateE ∷ StateT s (WriterT o m) ↝ WriterT o m
stateE = fmap stateE ∘ stateWriterCommute
stateI ∷ WriterT o m ↝ StateT s (WriterT o m)
stateI = writerStateCommute ∘ fmap stateI
-- ## Writer // Nondet [ADJ]
writerNondetCommute ∷ ∀ o m. (Functor m,Monoid o) ⇒ WriterT o (NondetT m) ↝ NondetT (WriterT o m)
writerNondetCommute aMM = NondetT $ WriterT $ ff ^$ runNondetT $ runWriterT aMM
where
ff ∷ [(o,a)] → (o,[a])
ff asL = (concat $ fst ^$ asL,snd ^$ asL)
nondetWriterCommute ∷ ∀ o m. (Functor m) ⇒ NondetT (WriterT o m) ↝ WriterT o (NondetT m)
nondetWriterCommute aMM = WriterT $ NondetT $ ff ^$ runWriterT $ runNondetT aMM
where
ff ∷ (o,[a]) → [(o,a)]
ff (o,xs) = (o,) ^$ xs
instance (Functor m,MonadWriter o m,Monoid o) ⇒ MonadWriter o (NondetT m) where
writerE ∷ WriterT o (NondetT m) ↝ NondetT m
writerE = fmap writerE ∘ writerNondetCommute
writerI ∷ NondetT m ↝ WriterT o (NondetT m)
writerI = nondetWriterCommute ∘ fmap writerI
instance (Functor m,MonadNondet m,Monoid o) ⇒ MonadNondet (WriterT o m) where
nondetE ∷ NondetT (WriterT o m) ↝ WriterT o m
nondetE = fmap nondetE ∘ nondetWriterCommute
nondetI ∷ WriterT o m ↝ NondetT (WriterT o m)
nondetI = writerNondetCommute ∘ fmap nondetI
-- ## Writer // Cont
-- TODO
-- ## Writer // OpaqeuCont
-- TODO
-- # State // *
-- ## State // Nondet [ADJ]
stateNondetCommute ∷ ∀ s m. (Functor m,Monoid s) ⇒ StateT s (NondetT m) ↝ NondetT (StateT s m)
stateNondetCommute aMM = NondetT $ StateT $ \ s → ff ^$ runNondetT $ runStateTWith s aMM
where
ff ∷ [(s,a)] → (s,[a])
ff asL = (concat $ fst ^$ asL,snd ^$ asL)
nondetStateCommute ∷ ∀ s m. (Functor m) ⇒ NondetT (StateT s m) ↝ StateT s (NondetT m)
nondetStateCommute aMM = StateT $ \ s → NondetT $ ff ^$ runStateTWith s $ runNondetT aMM
where
ff ∷ (s,[a]) → [(s,a)]
ff (s,xs) = (s,) ^$ xs
instance (Functor m,MonadState s m,Monoid s) ⇒ MonadState s (NondetT m) where
stateE ∷ StateT s (NondetT m) ↝ NondetT m
stateE = fmap stateE ∘ stateNondetCommute
stateI ∷ NondetT m ↝ StateT s (NondetT m)
stateI = nondetStateCommute ∘ fmap stateI
instance (Functor m,MonadNondet m,Monoid s) ⇒ MonadNondet (StateT s m) where
nondetE ∷ NondetT (StateT s m) ↝ StateT s m
nondetE = fmap nondetE ∘ nondetStateCommute
nondetI ∷ StateT s m ↝ NondetT (StateT s m)
nondetI = stateNondetCommute ∘ fmap nondetI
-- ## State // Cont [???]
stateKonCommute ∷ StateT s (ContT (s,r) m) ↝ ContT r (StateT s m)
stateKonCommute aSK = ContT $ \ (k ∷ a → StateT s m r) → StateT $ \ s →
runContT (runStateTWith s aSK) $ \ (s',a) → runStateTWith s' $ k a
konStateCommute ∷ ContT r (StateT s m) ↝ StateT s (ContT (s,r) m)
konStateCommute aKS = StateT $ \ s → ContT $ \ (k ∷ (s,a) → m (s,r)) →
runStateTWith s $ runContT aKS $ \ a → StateT $ \ s' → k (s',a)
instance (Monad m,MonadState s m) ⇒ MonadState s (ContT r m) where
stateE ∷ StateT s (ContT r m) ↝ ContT r m
stateE =
fisomap (stateE,stateI)
∘ stateKonCommute
∘ stateE
∘ fmap (konStateCommute ∘ fisomap (stateI,stateE ∷ StateT s m ↝ m))
stateI ∷ ContT r m ↝ StateT s (ContT r m)
stateI =
fmap (fisomap (stateE,stateI) ∘ stateKonCommute)
∘ stateI
∘ konStateCommute
∘ fisomap (stateI,stateE ∷ StateT s m ↝ m)
-- # State // OpaqueCont [???]
instance (Monad m,MonadState s m,Isomorphism3 (ContFun r) (k r)) ⇒ MonadState s (OpaqueContT k r m) where
stateE ∷ StateT s (OpaqueContT k r m) ↝ OpaqueContT k r m
stateE =
opaque
∘ stateE
∘ fmap meta
stateI ∷ OpaqueContT k r m ↝ StateT s (OpaqueContT k r m)
stateI =
fmap opaque
∘ stateI
∘ meta
---------
-- RWS --
---------
newtype RWST r o s m a = RWST { runRWST ∷ ReaderT r (WriterT o (StateT s m)) a }
deriving
( Functor,Monad
, MonadFailure
, MonadError e
, MonadReader r,MonadWriter o,MonadState s
)
runRWSTWith ∷ ∀ r o s m a. (Functor m) ⇒ r → s → RWST r o s m a → m (s,o,a)
runRWSTWith r s₀ aM = ff ^$ runStateTWith s₀ $ runWriterT $ runReaderTWith r $ runRWST aM
where
ff ∷ (s,(o,a)) → (s,o,a)
ff (s,(o,a)) = (s,o,a)
-- Base Effect
type RWS r o s = RWST r o s ID
runRWSWith ∷ r → s → RWS r o s a → (s,o,a)
runRWSWith r s aM = runID $ runRWSTWith r s aM
-- Higher Functor
instance FunctorFunctor (RWST r o s) where
fmap ∷ (m ↝ n) → RWST r o s m a → RWST r o s n a
fmap f = RWST ∘ fmap (fmap (fmap f)) ∘ runRWST
--------------------
-- Adding Effects --
--------------------
-- # AddWriterT
newtype AddWriterT o₁₂ o₁ m a = AddWriterT { runAddWriterT ∷ WriterT o₁ m a }
deriving
( Functor,Monad
, MonadMonoid,MonadBot,MonadJoin
, MonadFailure
, MonadError e
, MonadReader r
, MonadState s
-- TODO: implement
-- , MonadCont r
-- TODO: implement and role annotation
-- , MonadOpaqueCont k r
)
mergeWriter ∷ (Functor m) ⇒ WriterT o₁ (WriterT o₂ m) a → WriterT (o₁,o₂) m a
mergeWriter = WriterT ∘ ff ^∘ runWriterT ∘ runWriterT
where
ff ∷ (o₂,(o₁,a)) → ((o₁,o₂),a)
ff (o₂,(o₁,a)) = ((o₁,o₂),a)
splitWriter ∷ (Functor m) ⇒ WriterT (o₁,o₂) m a → WriterT o₁ (WriterT o₂ m) a
splitWriter = WriterT ∘ WriterT ∘ ff ^∘ runWriterT
where
ff ∷ ((o₁,o₂),a) → (o₂,(o₁,a))
ff ((o₁,o₂),a) = (o₂,(o₁,a))
instance (Functor m,MonadWriter o₂ m,Monoid o₁,Isomorphism o₁₂ (o₁,o₂)) ⇒ MonadWriter o₁₂ (AddWriterT o₁₂ o₁ m) where
writerI ∷ AddWriterT o₁₂ o₁ m ↝ WriterT o₁₂ (AddWriterT o₁₂ o₁ m)
writerI =
fmap AddWriterT
∘ mapOutput isoFrom
∘ mergeWriter
∘ fmap (writerCommute ∘ fmap writerI)
∘ writerI
∘ runAddWriterT
writerE ∷ WriterT o₁₂ (AddWriterT o₁₂ o₁ m) ↝ AddWriterT o₁₂ o₁ m
writerE =
AddWriterT
∘ writerE
∘ fmap (fmap writerE ∘ writerCommute)
∘ splitWriter
∘ mapOutput isoTo
∘ fmap runAddWriterT
-- # AddStateT
newtype AddStateT s₁₂ s₁ m a = AddStateT { runAddStateT ∷ StateT s₁ m a }
deriving
( Functor,Monad
, MonadMonoid,MonadBot,MonadJoin
, MonadFailure
, MonadError e
, MonadReader r
, MonadWriter o
-- TODO: implement
-- , MonadCont r
-- TODO: implement
-- , MonadOpaqueCont k r
)
mergeState ∷ (Functor m) ⇒ StateT s₁ (StateT s₂ m) a → StateT (s₁,s₂) m a
mergeState aMM = StateT $ \ (s₁,s₂) → ff ^$ runStateT (runStateT aMM s₁) s₂
where
ff ∷ (s₂,(s₁,a)) → ((s₁,s₂),a)
ff (s₂,(s₁,a)) = ((s₁,s₂),a)
splitState ∷ (Functor m) ⇒ StateT (s₁,s₂) m a → StateT s₁ (StateT s₂ m) a
splitState aM = StateT $ \ s₁ → StateT $ \ s₂ → ff ^$ runStateT aM (s₁,s₂)
where
ff ∷ ((s₁,s₂),a) → (s₂,(s₁,a))
ff ((s₁,s₂),a) = (s₂,(s₁,a))
instance (Functor m,MonadState s₂ m,Isomorphism s₁₂ (s₁,s₂)) ⇒ MonadState s₁₂ (AddStateT s₁₂ s₁ m) where
stateI ∷ AddStateT s₁₂ s₁ m ↝ StateT s₁₂ (AddStateT s₁₂ s₁ m)
stateI =
fmap AddStateT
∘ mapStateT isoFrom isoTo
∘ mergeState
∘ fmap (stateCommute ∘ fmap stateI)
∘ stateI
∘ runAddStateT
stateE ∷ StateT s₁₂ (AddStateT s₁₂ s₁ m) ↝ AddStateT s₁₂ s₁ m
stateE =
AddStateT
∘ stateE
∘ fmap (fmap stateE ∘ stateCommute)
∘ splitState
∘ mapStateT isoTo isoFrom
∘ fmap runAddStateT
| davdar/darailude | src/FP/Prelude/Monads.hs | bsd-3-clause | 44,603 | 1,925 | 78 | 9,708 | 19,344 | 9,949 | 9,395 | -1 | -1 |
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE TypeFamilies #-}
module FirstQuantization.SpinHalf where
import FirstQuantization.Algebra.Operators
import FirstQuantization.Algebra.States
import FirstQuantization.Algebra.Types
import Protolude
data Spin = SpinUp
| SpinDown deriving (Eq, Show)
instance QuantumBasis Spin where
basisSum f = sum $ map f [SpinUp, SpinDown]
spinKernel :: (Scalar, Scalar) -> (Spin -> Scalar)
spinKernel ker x = case x of
SpinUp -> fst ker
SpinDown -> snd ker
zUp :: QuantumState Spin
zUp = mkState (spinKernel (1,0))
zDown :: QuantumState Spin
zDown = mkState (spinKernel (0,1))
xUp :: QuantumState Spin
xUp = normalize (zUp + zDown)
xDown :: QuantumState Spin
xDown = normalize (zUp - zDown)
yUp :: QuantumState Spin
yUp = normalize (zUp + (0 :+ 1 :: Scalar) |*| zDown)
yDown :: QuantumState Spin
yDown = normalize (zUp - (0 :+ 1 :: Scalar) |*| zDown)
data PauliMatrix = PauliX
| PauliY
| PauliZ deriving (Eq, Show)
instance OperatorKernel PauliMatrix Spin where
actOn pMatrix spin =
case pMatrix of
PauliX -> case spin of
SpinUp -> zDown
SpinDown -> zUp
PauliY -> case spin of
SpinUp -> (0 :+ 1 :: Scalar) |*| zDown
SpinDown -> (0 :+ (-1) :: Scalar) |*| zUp
PauliZ -> case spin of
SpinUp -> zUp
SpinDown -> (-1 :: Scalar) |*| zDown
pauliX :: QuantumOperator Spin
pauliX = mkOperator (actOn PauliX)
pauliY :: QuantumOperator Spin
pauliY = mkOperator (actOn PauliY)
pauliZ :: QuantumOperator Spin
pauliZ = mkOperator (actOn PauliZ)
jX :: QuantumOperator Spin
jX = (0.5 :: Scalar) |*| pauliX
jY :: QuantumOperator Spin
jY = (0.5 :: Scalar) |*| pauliY
jZ :: QuantumOperator Spin
jZ = (0.5 :: Scalar) |*| pauliZ
jUp :: QuantumOperator Spin
jUp = jX + (0:+1 :: Scalar) |*| jY
jDown :: QuantumOperator Spin
jDown = jX - (0:+1 :: Scalar) |*| jY
rot :: QuantumOperator Spin
rot = (0 :+ 1 :: Scalar)|*| ((pi/2 :: Scalar) |*| jY)
rotRes :: Int -> Text
rotRes x = show (realPart $ (SpinUp |>|)$ expOpN rot zUp x :: Double)
| rodinalex/quantum | src/FirstQuantization/SpinHalf.hs | bsd-3-clause | 2,250 | 0 | 16 | 599 | 757 | 411 | 346 | 63 | 2 |
module I2 where
import C1
import C2
import T
instance (C1 a) => C2 (T a) where
c2 = T c1
| phischu/fragnix | tests/quick/CrazyCycles/I2.hs | bsd-3-clause | 95 | 0 | 7 | 27 | 44 | 25 | 19 | 6 | 0 |
{-# LANGUAGE MultiParamTypeClasses, TypeFamilies, FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE TemplateHaskell #-}
{-# OPTIONS -Wall #-}
module FiniteStateMachine.Machine2 (
FSM(FSM), compile, FSM1
) where
import Basic.Types
import Basic.Memory
import Basic.Features
import Basic.Operations
import Control.Lens(makeLenses)
--------------------------------------------------------------------------
---------------------------- Finite State Machine
--------------------------------------------------------------------------
data FSM s f i =
FSM { _sta :: s
, _finalstates :: f
, _trans :: Trans f (s, i)
, _inpt :: i }
makeLenses ''FSM
-- The following is close to being automatable
instance HasQ (FSM s f i) where
type Q (FSM s f i) = s
q = sta
instance HasFinal (FSM s f i) where
type Final (FSM s f i) = f
final = finalstates
instance HasTransition (FSM s f i) where
transition = trans
instance HasInput (FSM s f i) where
type Input (FSM s f i) = i
input = inpt
instance HasState (FSM s f i) where
type State (FSM s f i) = (s, i)
state g (FSM s f tr i) = fmap (\(a, b) -> FSM a f tr b) (g (s, i))
instance HasStatic (FSM s f i) where
type Static (FSM s f i) = f
static = finalstates
compile :: (Forward m, HeadMem m) =>
(Value m -> qs -> qs) -> Trans d (qs, m)
compile f = Trans g
where g _ (qs, inp) = (f (focus inp) qs, next inp)
type FSM1 qs i c = FSM qs (c qs) (c i)
instance (SetLike c, Eq qs, IsAtProgramEnd (c i)) =>
EndCond (FSM qs (c qs) (c i)) where
endcond m = isProgramEnd m
| davidzhulijun/TAM | FiniteStateMachine/Machine3.hs | bsd-3-clause | 1,605 | 0 | 10 | 358 | 604 | 330 | 274 | 45 | 1 |
{-# LANGUAGE DoAndIfThenElse #-}
{-# LANGUAGE OverloadedStrings #-}
module AuthorizationHelper where
import Control.Concurrent.Chan
import Control.Concurrent.MVar
import Control.Monad.Trans (liftIO)
import Data.Maybe
import qualified Data.Text as T
import Finance.Blpapi.ElementFormatter
import Finance.Blpapi.ElementParser as P
import Finance.Blpapi.Event
import Finance.Blpapi.Session
import Finance.Blpapi.Types as BT
import Options.Applicative
-- | This should go in a different utility, but currently placing it here
throwOnError :: (Either String a) -> Blpapi a
throwOnError (Right a) = return a
throwOnError (Left str) = fail str
data AuthenticationType = User
| App !String
| UserApp !String
| Dir !String
| None
deriving (Show)
authUser :: String
authUser = "AuthenticationType=OS_LOGON"
authAppPrefix :: String
authAppPrefix = "AuthenticationMode=APPLICATION_ONLY;"
++ "ApplicationAuthenticationType=APPNAME_AND_KEY;ApplicationName="
authUserAppPrefix :: String
authUserAppPrefix = "AuthenticationMode=USER_AND_APPLICATION;"
++ "AuthenticationType=OS_LOGON;"
++ "ApplicationAuthenticationType=APPNAME_AND_KEY;ApplicationName="
authDirPrefix :: String
authDirPrefix = "AuthenticationType=DIRECTORY_SERVICE;DirSvcPropertyName="
authParser :: Parser AuthenticationType
authParser = nullOption
( long "auth"
<> value User
<> metavar "AuthOptions"
<> eitherReader parseAutParserString
<> help ("authentication option: "
++ "user|none|app=<app>|userapp=<app>|dir=<property> (default: user)")
)
parseAutParserString :: String -> Either String AuthenticationType
parseAutParserString "user" = Right User
parseAutParserString "none" = Right None
parseAutParserString ('a':'p':'p':'=':xs) = Right (App xs)
parseAutParserString ('u':'s':'e':'r':'a':'p':'p':'=':xs) = Right (App xs)
parseAutParserString ('d':'i':'r':'=':xs) = Right (App xs)
parseAutParserString _ = Left "Bad Auth Argument"
getAuthType :: Maybe String -> AuthenticationType
getAuthType Nothing = User
getAuthType (Just s) = undefined
getAuthString :: AuthenticationType -> String
getAuthString User = authUser
getAuthString (App s) = authAppPrefix ++ s
getAuthString (UserApp s) = authUserAppPrefix ++ s
getAuthString (Dir s) = authDirPrefix ++ s
getAuthString _ = ""
extractToken :: Message -> Either String T.Text
extractToken m =
P.getElement "TokenGenerationSuccess" (messageData m)
>>= P.getElement "token"
>>= P.getValue
populateAuthRequest :: T.Text -> Request -> Blpapi ()
populateAuthRequest token req =
formatRequest req
$! formatSubElement "token"
$! setValue (BT.BlpString token)
noopEventHandler :: Event -> Blpapi ()
noopEventHandler _ = return ()
data AuthHandlers = AuthHandlers {
authHandlerResult :: Maybe SessionHandler,
authHandlerUpdate :: Maybe SessionHandler
}
data AuthState = SendinAuth | AuthSuccess | AuthTerminated
isMessageAuthFailure :: Message -> Bool
isMessageAuthFailure m
= case P.getElement "AuthorizationFailure" (messageData m) of
Left str -> False
Right _ -> True
authorizationEventHandler :: MVar AuthState
-> Chan (Either String ())
-> AuthHandlers
-> Event
-> Blpapi ()
authorizationEventHandler m ch (AuthHandlers r u) e = do
authState <- liftIO $ takeMVar m
case authState of
SendinAuth -> do
fromMaybe noopEventHandler r e
liftIO $ if isMessageAuthFailure (eventContent e)
then do
putMVar m AuthTerminated
writeChan ch $ Left $ "AuthFailed: " ++ show e
else do
putMVar m AuthSuccess
writeChan ch $ Right ()
return ()
AuthSuccess -> fromMaybe noopEventHandler u e
AuthTerminated -> return ()
tokenEventHandler :: Chan (Either String T.Text) -> Event -> Blpapi ()
tokenEventHandler ch e = do
let tokenResult = extractToken (eventContent e)
liftIO $ writeChan ch tokenResult
setupAuthorize :: AuthHandlers -> Blpapi (Either String Identity)
setupAuthorize handlers = do
authService <- openService "//blp/apiauth" >>= throwOnError
tokenChannel <- liftIO newChan
generateToken' Nothing $ tokenEventHandler tokenChannel
tokenMessage <- liftIO $ readChan tokenChannel
iden <- createIdentity
case tokenMessage of
Left str -> return $ Left $ "Failed to get Token: " ++ show tokenMessage
Right token -> do
authReq <- createAuthorizationRequest authService >>= throwOnError
populateAuthRequest token authReq
authChan <- liftIO newChan
authState <- liftIO $ newMVar SendinAuth
sendAuthorizationRequest' authReq iden Nothing
(authorizationEventHandler authState authChan handlers)
authResult <- liftIO $ readChan authChan
case authResult of
Left err -> return $ Left err
Right () -> return $ Right iden
authorize' :: Blpapi (Either String Identity)
authorize' = setupAuthorize (AuthHandlers Nothing Nothing)
| bitemyapp/blpapi-hs | examples/AuthorizationHelper.hs | mit | 5,275 | 0 | 19 | 1,238 | 1,351 | 664 | 687 | 131 | 4 |
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TupleSections #-}
-- | Alternative (and, in fact, more standard) representation
-- of a derivation trees, i.e., a tree of elementary trees.
module NLP.Partage.AStar.Deriv.Gorn
( Deriv (..)
, size
, deriv4show
, fromDeriv
) where
import qualified Control.Arrow as Arr
import qualified Data.Map.Strict as M
import qualified Data.Tree as R
import qualified NLP.Partage.AStar.Deriv as D
-- import qualified NLP.Partage.EdgeTree as Edge
import NLP.Partage.Tree (Path)
import qualified NLP.Partage.Tree.Other as O
---------------------------------------------------
-- Derivation Tree
---------------------------------------------------
-- | A derivation tree contains ETs in its nodes and Gorn addresses in its
-- edges. A Gorn address indicates to which node of the parent ET the given ET
-- attaches. Note that the address determines the type of the operation:
-- substitution or adjunction.
data Deriv n t = Deriv
{ rootET :: O.Tree n (Maybe t)
-- ^ Root (elementary tree, ET) of the derivation tree
-- (reminder: using the `rootET` name because it doesn't stem from
-- the type the the root is an ET)
, modifs :: M.Map Path [Deriv n t]
-- ^ Derivations attached to the individual nodes (specified by the
-- corresponding Gorn addresses) of the root ET; note that, in case of
-- adjunction, many derivations can attach at one and the same Gorn address
-- and in this case the attachement (adjunction) order matters.
}
-- type Deriv n t = Edge.Tree (O.Tree n t) Gorn
-- | Size of a derivation tree (i.e., number of nodes).
size :: Deriv n t -> Int
size Deriv{..} = 1 + sum
[ size deriv
| (_path, derivs) <- M.toList modifs
, deriv <- derivs ]
-- | Transform the derivation tree into a tree which is easy
-- to draw using the standard `R.draw` function.
deriv4show :: Deriv n t -> R.Tree (Either Path (O.Node n (Maybe t)))
deriv4show =
go
where
go Deriv{..} = addChildren
(fmap Right rootET)
[ (path, go deriv)
| (path, derivs) <- M.toList modifs
, deriv <- derivs ]
addChildren R.Node{..} ts = R.Node
{ R.rootLabel = rootLabel
, R.subForest = subForest ++
[ R.Node (Left path) [deriv]
| (path, deriv) <- ts ]
}
---------------------------------------------------
-- Conversion
---------------------------------------------------
-- | Conversion from the base derivation data type.
fromDeriv :: D.Deriv D.UnNorm n t -> Deriv n t
fromDeriv =
go . D.normalize
where
go t = Deriv
{ rootET = getRootET t
, modifs = M.fromList
[ (gorn, map go ts)
| (gorn, ts) <- getModifs t ] }
-- | Extract the root ET from the given derivation.
getRootET :: D.Deriv D.Norm n t -> O.Tree n (Maybe t)
getRootET = fmap D.node
-- | Get the derivations (and their corresponding Gorn addresses)
-- modifying the rootET.
getModifs :: D.Deriv D.Norm n t -> [(Path, [D.Deriv D.Norm n t])]
getModifs =
map (Arr.first reverse) . go []
where
go gorn R.Node{..}
= (gorn, D.modif rootLabel)
: concat
[ go (i:gorn) child
| (i, child) <- zip [0..] subForest ]
| kawu/partage | src/NLP/Partage/AStar/Deriv/Gorn.hs | bsd-2-clause | 3,229 | 0 | 13 | 758 | 708 | 400 | 308 | 52 | 1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TemplateHaskell #-}
-- | Perform a build
module Stack.Build.Execute
( printPlan
, preFetch
, executePlan
-- * Running Setup.hs
, ExecuteEnv
, withExecuteEnv
, withSingleContext
) where
import Control.Applicative
import Control.Arrow ((&&&))
import Control.Concurrent.Execute
import Control.Concurrent.Async (withAsync, wait)
import Control.Concurrent.MVar.Lifted
import Control.Concurrent.STM
import Control.Exception.Enclosed (catchIO, tryIO)
import Control.Exception.Lifted
import Control.Monad (liftM, when, unless, void, join, guard, filterM, (<=<))
import Control.Monad.Catch (MonadCatch, MonadMask)
import Control.Monad.IO.Class
import Control.Monad.Logger
import Control.Monad.Reader (MonadReader, asks)
import Control.Monad.Trans.Control (liftBaseWith)
import Control.Monad.Trans.Resource
import qualified Data.ByteString as S
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as S8
import Data.Conduit
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.List as CL
import Data.Foldable (forM_)
import Data.Function
import Data.IORef.RunOnce (runOnce)
import Data.List
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as Map
import Data.Maybe
import Data.Monoid ((<>))
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Streaming.Process hiding (callProcess, env)
import qualified Data.Streaming.Process as Process
import Data.Traversable (forM)
import Data.Text (Text)
import qualified Data.Text as T
import Data.Word8 (_colon)
import Distribution.System (OS (Windows),
Platform (Platform))
import qualified Distribution.Text
import Language.Haskell.TH as TH (location)
import Network.HTTP.Client.Conduit (HasHttpManager)
import Path
import Path.IO
import Prelude hiding (FilePath, writeFile)
import Stack.Build.Cache
import Stack.Build.Coverage
import Stack.Build.Haddock
import Stack.Build.Installed
import Stack.Build.Source
import Stack.Types.Build
import Stack.Fetch as Fetch
import Stack.GhcPkg
import Stack.Package
import Stack.PackageDump
import Stack.Constants
import Stack.Types
import Stack.Types.StackT
import Stack.Types.Internal
import qualified System.Directory as D
import System.Environment (getExecutablePath)
import System.Exit (ExitCode (ExitSuccess))
import qualified System.FilePath as FP
import System.IO
import System.IO.Temp (withSystemTempDirectory)
import System.PosixCompat.Files (createLink)
import System.Process.Read
import System.Process.Run
import System.Process.Log (showProcessArgDebug)
#if !MIN_VERSION_process(1,2,1)
import System.Process.Internals (createProcess_)
#endif
type M env m = (MonadIO m,MonadReader env m,HasHttpManager env,HasBuildConfig env,MonadLogger m,MonadBaseControl IO m,MonadCatch m,MonadMask m,HasLogLevel env,HasEnvConfig env,HasTerminal env)
preFetch :: M env m => Plan -> m ()
preFetch plan
| Set.null idents = $logDebug "Nothing to fetch"
| otherwise = do
$logDebug $ T.pack $
"Prefetching: " ++
intercalate ", " (map packageIdentifierString $ Set.toList idents)
menv <- getMinimalEnvOverride
fetchPackages menv idents
where
idents = Set.unions $ map toIdent $ Map.toList $ planTasks plan
toIdent (name, task) =
case taskType task of
TTLocal _ -> Set.empty
TTUpstream package _ -> Set.singleton $ PackageIdentifier
name
(packageVersion package)
printPlan :: M env m
=> Plan
-> m ()
printPlan plan = do
case Map.elems $ planUnregisterLocal plan of
[] -> $logInfo "No packages would be unregistered."
xs -> do
$logInfo "Would unregister locally:"
forM_ xs $ \(ident, mreason) -> $logInfo $ T.concat
[ T.pack $ packageIdentifierString ident
, case mreason of
Nothing -> ""
Just reason -> T.concat
[ " ("
, reason
, ")"
]
]
$logInfo ""
case Map.elems $ planTasks plan of
[] -> $logInfo "Nothing to build."
xs -> do
$logInfo "Would build:"
mapM_ ($logInfo . displayTask) xs
let hasTests = not . Set.null . lptbTests
hasBenches = not . Set.null . lptbBenches
tests = Map.elems $ fmap fst $ Map.filter (hasTests . snd) $ planFinals plan
benches = Map.elems $ fmap fst $ Map.filter (hasBenches . snd) $ planFinals plan
unless (null tests) $ do
$logInfo ""
$logInfo "Would test:"
mapM_ ($logInfo . displayTask) tests
unless (null benches) $ do
$logInfo ""
$logInfo "Would benchmark:"
mapM_ ($logInfo . displayTask) benches
$logInfo ""
case Map.toList $ planInstallExes plan of
[] -> $logInfo "No executables to be installed."
xs -> do
$logInfo "Would install executables:"
forM_ xs $ \(name, loc) -> $logInfo $ T.concat
[ name
, " from "
, case loc of
Snap -> "snapshot"
Local -> "local"
, " database"
]
-- | For a dry run
displayTask :: Task -> Text
displayTask task = T.pack $ concat
[ packageIdentifierString $ taskProvides task
, ": database="
, case taskLocation task of
Snap -> "snapshot"
Local -> "local"
, ", source="
, case taskType task of
TTLocal lp -> concat
[ toFilePath $ lpDir lp
]
TTUpstream _ _ -> "package index"
, if Set.null missing
then ""
else ", after: " ++ intercalate "," (map packageIdentifierString $ Set.toList missing)
]
where
missing = tcoMissing $ taskConfigOpts task
data ExecuteEnv = ExecuteEnv
{ eeEnvOverride :: !EnvOverride
, eeConfigureLock :: !(MVar ())
, eeInstallLock :: !(MVar ())
, eeBuildOpts :: !BuildOpts
, eeBaseConfigOpts :: !BaseConfigOpts
, eeGhcPkgIds :: !(TVar (Map PackageIdentifier Installed))
, eeTempDir :: !(Path Abs Dir)
, eeSetupHs :: !(Path Abs File)
-- ^ Temporary Setup.hs for simple builds
, eeSetupExe :: !(Maybe (Path Abs File))
-- ^ Compiled version of eeSetupHs
, eeCabalPkgVer :: !Version
, eeTotalWanted :: !Int
, eeWanted :: !(Set PackageName)
, eeLocals :: ![LocalPackage]
, eeSourceMap :: !SourceMap
, eeGlobalDB :: !(Path Abs Dir)
, eeGlobalPackages :: ![DumpPackage () ()]
}
-- | Get a compiled Setup exe
getSetupExe :: M env m
=> Path Abs File -- ^ Setup.hs input file
-> Path Abs Dir -- ^ temporary directory
-> m (Maybe (Path Abs File))
getSetupExe setupHs tmpdir = do
wc <- getWhichCompiler
econfig <- asks getEnvConfig
let config = getConfig econfig
baseNameS = concat
[ "setup-Simple-Cabal-"
, versionString $ envConfigCabalVersion econfig
, "-"
, Distribution.Text.display $ configPlatform config
, "-"
, T.unpack $ compilerVersionName
$ envConfigCompilerVersion econfig
]
exeNameS = baseNameS ++
case configPlatform config of
Platform _ Windows -> ".exe"
_ -> ""
outputNameS =
case wc of
Ghc -> exeNameS
Ghcjs -> baseNameS ++ ".jsexe"
jsExeNameS =
baseNameS ++ ".jsexe"
setupDir =
configStackRoot config </>
$(mkRelDir "setup-exe-cache")
exePath <- fmap (setupDir </>) $ parseRelFile exeNameS
jsExePath <- fmap (setupDir </>) $ parseRelDir jsExeNameS
exists <- liftIO $ D.doesFileExist $ toFilePath exePath
if exists
then return $ Just exePath
else do
tmpExePath <- fmap (setupDir </>) $ parseRelFile $ "tmp-" ++ exeNameS
tmpOutputPath <- fmap (setupDir </>) $ parseRelFile $ "tmp-" ++ outputNameS
tmpJsExePath <- fmap (setupDir </>) $ parseRelDir $ "tmp-" ++ jsExeNameS
liftIO $ D.createDirectoryIfMissing True $ toFilePath setupDir
menv <- getMinimalEnvOverride
let args =
[ "-clear-package-db"
, "-global-package-db"
, "-hide-all-packages"
, "-package"
, "base"
, "-package"
, "Cabal-" ++ versionString (envConfigCabalVersion econfig)
, toFilePath setupHs
, "-o"
, toFilePath tmpOutputPath
] ++
["-build-runner" | wc == Ghcjs]
runIn tmpdir (compilerExeName wc) menv args Nothing
when (wc == Ghcjs) $ renameDir tmpJsExePath jsExePath
renameFile tmpExePath exePath
return $ Just exePath
withExecuteEnv :: M env m
=> EnvOverride
-> BuildOpts
-> BaseConfigOpts
-> [LocalPackage]
-> [DumpPackage () ()] -- ^ global packages
-> SourceMap
-> (ExecuteEnv -> m a)
-> m a
withExecuteEnv menv bopts baseConfigOpts locals globals sourceMap inner = do
withSystemTempDirectory stackProgName $ \tmpdir -> do
tmpdir' <- parseAbsDir tmpdir
configLock <- newMVar ()
installLock <- newMVar ()
idMap <- liftIO $ newTVarIO Map.empty
let setupHs = tmpdir' </> $(mkRelFile "Setup.hs")
liftIO $ writeFile (toFilePath setupHs) "import Distribution.Simple\nmain = defaultMain"
setupExe <- getSetupExe setupHs tmpdir'
cabalPkgVer <- asks (envConfigCabalVersion . getEnvConfig)
globalDB <- getGlobalDB menv =<< getWhichCompiler
inner ExecuteEnv
{ eeEnvOverride = menv
, eeBuildOpts = bopts
-- Uncertain as to why we cannot run configures in parallel. This appears
-- to be a Cabal library bug. Original issue:
-- https://github.com/fpco/stack/issues/84. Ideally we'd be able to remove
-- this.
, eeConfigureLock = configLock
, eeInstallLock = installLock
, eeBaseConfigOpts = baseConfigOpts
, eeGhcPkgIds = idMap
, eeTempDir = tmpdir'
, eeSetupHs = setupHs
, eeSetupExe = setupExe
, eeCabalPkgVer = cabalPkgVer
, eeTotalWanted = length $ filter lpWanted locals
, eeWanted = wantedLocalPackages locals
, eeLocals = locals
, eeSourceMap = sourceMap
, eeGlobalDB = globalDB
, eeGlobalPackages = globals
}
-- | Perform the actual plan
executePlan :: M env m
=> EnvOverride
-> BuildOpts
-> BaseConfigOpts
-> [LocalPackage]
-> [DumpPackage () ()] -- ^ globals
-> SourceMap
-> InstalledMap
-> Plan
-> m ()
executePlan menv bopts baseConfigOpts locals globals sourceMap installedMap plan = do
withExecuteEnv menv bopts baseConfigOpts locals globals sourceMap (executePlan' installedMap plan)
unless (Map.null $ planInstallExes plan) $ do
snapBin <- (</> bindirSuffix) `liftM` installationRootDeps
localBin <- (</> bindirSuffix) `liftM` installationRootLocal
destDir <- asks $ configLocalBin . getConfig
createTree destDir
destDir' <- liftIO . D.canonicalizePath . toFilePath $ destDir
isInPATH <- liftIO . fmap (any (FP.equalFilePath destDir')) . (mapM D.canonicalizePath <=< filterM D.doesDirectoryExist) $ (envSearchPath menv)
when (not isInPATH) $
$logWarn $ T.concat
[ "Installation path "
, T.pack destDir'
, " not found in PATH environment variable"
]
platform <- asks getPlatform
let ext =
case platform of
Platform _ Windows -> ".exe"
_ -> ""
currExe <- liftIO getExecutablePath -- needed for windows, see below
installed <- forM (Map.toList $ planInstallExes plan) $ \(name, loc) -> do
let bindir =
case loc of
Snap -> snapBin
Local -> localBin
mfp <- resolveFileMaybe bindir $ T.unpack name ++ ext
case mfp of
Nothing -> do
$logWarn $ T.concat
[ "Couldn't find executable "
, name
, " in directory "
, T.pack $ toFilePath bindir
]
return Nothing
Just file -> do
let destFile = destDir' FP.</> T.unpack name ++ ext
$logInfo $ T.concat
[ "Copying from "
, T.pack $ toFilePath file
, " to "
, T.pack destFile
]
liftIO $ case platform of
Platform _ Windows | FP.equalFilePath destFile currExe ->
windowsRenameCopy (toFilePath file) destFile
_ -> D.copyFile (toFilePath file) destFile
return $ Just (destDir', [T.append name (T.pack ext)])
let destToInstalled = Map.fromListWith (++) (catMaybes installed)
unless (Map.null destToInstalled) $ $logInfo ""
forM_ (Map.toList destToInstalled) $ \(dest, executables) -> do
$logInfo $ T.concat
[ "Copied executables to "
, T.pack dest
, ":"]
forM_ executables $ \exe -> $logInfo $ T.append "- " exe
config <- asks getConfig
menv' <- liftIO $ configEnvOverride config EnvSettings
{ esIncludeLocals = True
, esIncludeGhcPackagePath = True
, esStackExe = True
, esLocaleUtf8 = False
}
forM_ (boptsExec bopts) $ \(cmd, args) -> do
$logProcessRun cmd args
callProcess Nothing menv' cmd args
-- | Windows can't write over the current executable. Instead, we rename the
-- current executable to something else and then do the copy.
windowsRenameCopy :: FilePath -> FilePath -> IO ()
windowsRenameCopy src dest = do
D.copyFile src new
D.renameFile dest old
D.renameFile new dest
where
new = dest ++ ".new"
old = dest ++ ".old"
-- | Perform the actual plan (internal)
executePlan' :: M env m
=> InstalledMap
-> Plan
-> ExecuteEnv
-> m ()
executePlan' installedMap plan ee@ExecuteEnv {..} = do
wc <- getWhichCompiler
cv <- asks $ envConfigCompilerVersion . getEnvConfig
case Map.toList $ planUnregisterLocal plan of
[] -> return ()
ids -> do
localDB <- packageDatabaseLocal
forM_ ids $ \(id', (ident, mreason)) -> do
$logInfo $ T.concat
[ T.pack $ packageIdentifierString ident
, ": unregistering"
, case mreason of
Nothing -> ""
Just reason -> T.concat
[ " ("
, reason
, ")"
]
]
unregisterGhcPkgId eeEnvOverride wc cv localDB id' ident
-- Yes, we're explicitly discarding result values, which in general would
-- be bad. monad-unlift does this all properly at the type system level,
-- but I don't want to pull it in for this one use case, when we know that
-- stack always using transformer stacks that are safe for this use case.
runInBase <- liftBaseWith $ \run -> return (void . run)
let actions = concatMap (toActions installedMap' runInBase ee) $ Map.elems $ Map.mergeWithKey
(\_ b f -> Just (Just b, Just f))
(fmap (\b -> (Just b, Nothing)))
(fmap (\f -> (Nothing, Just f)))
(planTasks plan)
(planFinals plan)
threads <- asks $ configJobs . getConfig
concurrentTests <- asks $ configConcurrentTests . getConfig
let keepGoing =
case boptsKeepGoing eeBuildOpts of
Just kg -> kg
Nothing -> boptsTests eeBuildOpts || boptsBenchmarks eeBuildOpts
concurrentFinal =
-- TODO it probably makes more sense to use a lock for test suites
-- and just have the execution blocked. Turning off all concurrency
-- on finals based on the --test option doesn't fit in well.
if boptsTests eeBuildOpts
then concurrentTests
else True
terminal <- asks getTerminal
errs <- liftIO $ runActions threads keepGoing concurrentFinal actions $ \doneVar -> do
let total = length actions
loop prev
| prev == total =
runInBase $ $logStickyDone ("Completed all " <> T.pack (show total) <> " actions.")
| otherwise = do
when terminal $ runInBase $
$logSticky ("Progress: " <> T.pack (show prev) <> "/" <> T.pack (show total))
done <- atomically $ do
done <- readTVar doneVar
check $ done /= prev
return done
loop done
if total > 1
then loop 0
else return ()
unless (null errs) $ throwM $ ExecutionFailure errs
when (boptsHaddock eeBuildOpts) $ do
generateLocalHaddockIndex eeEnvOverride wc eeBaseConfigOpts eeLocals
generateDepsHaddockIndex eeEnvOverride wc eeBaseConfigOpts eeLocals
generateSnapHaddockIndex eeEnvOverride wc eeBaseConfigOpts eeGlobalDB
when (toCoverage $ boptsTestOpts eeBuildOpts) generateHpcMarkupIndex
where
installedMap' = Map.difference installedMap
$ Map.fromList
$ map (\(ident, _) -> (packageIdentifierName ident, ()))
$ Map.elems
$ planUnregisterLocal plan
toActions :: M env m
=> InstalledMap
-> (m () -> IO ())
-> ExecuteEnv
-> (Maybe Task, Maybe (Task, LocalPackageTB)) -- build and final
-> [Action]
toActions installedMap runInBase ee (mbuild, mfinal) =
abuild ++ afinal
where
abuild =
case mbuild of
Nothing -> []
Just task@Task {..} ->
[ Action
{ actionId = ActionId taskProvides ATBuild
, actionDeps =
(Set.map (\ident -> ActionId ident ATBuild) (tcoMissing taskConfigOpts))
, actionDo = \ac -> runInBase $ singleBuild runInBase ac ee task installedMap
}
]
afinal =
case mfinal of
Nothing -> []
Just (task@Task {..}, lptb) ->
[ Action
{ actionId = ActionId taskProvides ATFinal
, actionDeps = addBuild taskProvides $
(Set.map (\ident -> ActionId ident ATBuild) (tcoMissing taskConfigOpts))
, actionDo = \ac -> runInBase $ do
unless (Set.null $ lptbTests lptb) $ do
singleTest runInBase topts lptb ac ee task installedMap
unless (Set.null $ lptbBenches lptb) $ do
singleBench runInBase beopts lptb ac ee task installedMap
}
]
where
addBuild ident =
case mbuild of
Nothing -> id
Just _ -> Set.insert $ ActionId ident ATBuild
bopts = eeBuildOpts ee
topts = boptsTestOpts bopts
beopts = boptsBenchmarkOpts bopts
-- | Generate the ConfigCache
getConfigCache :: MonadIO m
=> ExecuteEnv -> Task -> [Text]
-> m (Map PackageIdentifier GhcPkgId, ConfigCache)
getConfigCache ExecuteEnv {..} Task {..} extra = do
idMap <- liftIO $ readTVarIO eeGhcPkgIds
let getMissing ident =
case Map.lookup ident idMap of
Nothing -> error "singleBuild: invariant violated, missing package ID missing"
Just (Library ident' x) -> assert (ident == ident') $ Just (ident, x)
Just (Executable _) -> Nothing
missing' = Map.fromList $ mapMaybe getMissing $ Set.toList missing
TaskConfigOpts missing mkOpts = taskConfigOpts
opts = mkOpts missing'
allDeps = Set.fromList $ Map.elems missing' ++ Map.elems taskPresent
cache = ConfigCache
{ configCacheOpts = opts
{ coNoDirs = coNoDirs opts ++ map T.unpack extra
}
, configCacheDeps = allDeps
, configCacheComponents =
case taskType of
TTLocal lp -> Set.map renderComponent $ lpComponents lp
TTUpstream _ _ -> Set.empty
, configCacheHaddock =
shouldHaddockPackage eeBuildOpts eeWanted (packageIdentifierName taskProvides)
}
allDepsMap = Map.union missing' taskPresent
return (allDepsMap, cache)
-- | Ensure that the configuration for the package matches what is given
ensureConfig :: M env m
=> ConfigCache -- ^ newConfigCache
-> Path Abs Dir -- ^ package directory
-> ExecuteEnv
-> m () -- ^ announce
-> (Bool -> [String] -> m ()) -- ^ cabal
-> Path Abs File -- ^ .cabal file
-> m Bool
ensureConfig newConfigCache pkgDir ExecuteEnv {..} announce cabal cabalfp = do
newCabalMod <- liftIO (fmap modTime (D.getModificationTime (toFilePath cabalfp)))
needConfig <-
if boptsReconfigure eeBuildOpts
then return True
else do
-- Determine the old and new configuration in the local directory, to
-- determine if we need to reconfigure.
mOldConfigCache <- tryGetConfigCache pkgDir
mOldCabalMod <- tryGetCabalMod pkgDir
return $ mOldConfigCache /= Just newConfigCache
|| mOldCabalMod /= Just newCabalMod
let ConfigureOpts dirs nodirs = configCacheOpts newConfigCache
when needConfig $ withMVar eeConfigureLock $ \_ -> do
deleteCaches pkgDir
announce
cabal False $ "configure" : dirs ++ nodirs
writeConfigCache pkgDir newConfigCache
writeCabalMod pkgDir newCabalMod
return needConfig
announceTask :: MonadLogger m => Task -> Text -> m ()
announceTask task x = $logInfo $ T.concat
[ T.pack $ packageIdentifierString $ taskProvides task
, ": "
, x
]
withSingleContext :: M env m
=> (m () -> IO ())
-> ActionContext
-> ExecuteEnv
-> Task
-> Maybe (Map PackageIdentifier GhcPkgId)
-- ^ All dependencies' package ids to provide to Setup.hs. If
-- Nothing, just provide global and snapshot package
-- databases.
-> Maybe String
-> ( Package
-> Path Abs File
-> Path Abs Dir
-> (Bool -> [String] -> m ())
-> (Text -> m ())
-> Bool
-> Maybe (Path Abs File, Handle)
-> m a)
-> m a
withSingleContext runInBase ActionContext {..} ExecuteEnv {..} task@Task {..} mdeps msuffix inner0 =
withPackage $ \package cabalfp pkgDir ->
withLogFile package $ \mlogFile ->
withCabal package pkgDir mlogFile $ \cabal ->
inner0 package cabalfp pkgDir cabal announce console mlogFile
where
announce = announceTask task
wanted =
case taskType of
TTLocal lp -> lpWanted lp
TTUpstream _ _ -> False
console = wanted
&& all (\(ActionId ident _) -> ident == taskProvides) (Set.toList acRemaining)
&& eeTotalWanted == 1
withPackage inner =
case taskType of
TTLocal lp -> inner (lpPackage lp) (lpCabalFile lp) (lpDir lp)
TTUpstream package _ -> do
mdist <- liftM Just distRelativeDir
m <- unpackPackageIdents eeEnvOverride eeTempDir mdist $ Set.singleton taskProvides
case Map.toList m of
[(ident, dir)]
| ident == taskProvides -> do
let name = packageIdentifierName taskProvides
cabalfpRel <- parseRelFile $ packageNameString name ++ ".cabal"
let cabalfp = dir </> cabalfpRel
inner package cabalfp dir
_ -> error $ "withPackage: invariant violated: " ++ show m
withLogFile package inner
| console = inner Nothing
| otherwise = do
logPath <- buildLogPath package msuffix
createTree (parent logPath)
let fp = toFilePath logPath
bracket
(liftIO $ openBinaryFile fp WriteMode)
(liftIO . hClose)
$ \h -> inner (Just (logPath, h))
withCabal package pkgDir mlogFile inner = do
config <- asks getConfig
menv <- liftIO $ configEnvOverride config EnvSettings
{ esIncludeLocals = taskLocation task == Local
, esIncludeGhcPackagePath = False
, esStackExe = False
, esLocaleUtf8 = True
}
getRunhaskellPath <- runOnce $ liftIO $ join $ findExecutable menv "runhaskell"
getGhcjsPath <- runOnce $ liftIO $ join $ findExecutable menv "ghcjs"
distRelativeDir' <- distRelativeDir
esetupexehs <-
-- Avoid broken Setup.hs files causing problems for simple build
-- types, see:
-- https://github.com/commercialhaskell/stack/issues/370
case (packageSimpleType package, eeSetupExe) of
(True, Just setupExe) -> return $ Left setupExe
_ -> liftIO $ fmap Right $ getSetupHs pkgDir
inner $ \stripTHLoading args -> do
let cabalPackageArg =
"-package=" ++ packageIdentifierString
(PackageIdentifier cabalPackageName
eeCabalPkgVer)
packageArgs =
case mdeps of
Just deps ->
-- Stack always builds with the global Cabal for various
-- reproducibility issues.
let depsMinusCabal
= map ghcPkgIdString
$ Set.toList
$ addGlobalPackages deps eeGlobalPackages
in
"-clear-package-db"
: "-global-package-db"
: ("-package-db=" ++ toFilePath (bcoSnapDB eeBaseConfigOpts))
: ("-package-db=" ++ toFilePath (bcoLocalDB eeBaseConfigOpts))
: "-hide-all-packages"
: cabalPackageArg
: map ("-package-id=" ++) depsMinusCabal
-- This branch is debatable. It adds access to the
-- snapshot package database for Cabal. There are two
-- possible objections:
--
-- 1. This doesn't isolate the build enough; arbitrary
-- other packages available could cause the build to
-- succeed or fail.
--
-- 2. This doesn't provide enough packages: we should also
-- include the local database when building local packages.
--
-- Currently, this branch is only taken via `stack sdist`.
Nothing ->
[ cabalPackageArg
, "-clear-package-db"
, "-global-package-db"
, "-package-db=" ++ toFilePath (bcoSnapDB eeBaseConfigOpts)
]
setupArgs = ("--builddir=" ++ toFilePath distRelativeDir') : args
runExe exeName fullArgs = do
$logProcessRun (toFilePath exeName) fullArgs
-- Use createProcess_ to avoid the log file being closed afterwards
(Nothing, moutH, merrH, ph) <- liftIO $ createProcess_ "singleBuild" cp
let makeAbsolute = stripTHLoading -- If users want control, we should add a config option for this
ec <-
liftIO $
withAsync (runInBase $ maybePrintBuildOutput stripTHLoading makeAbsolute LevelInfo mlogFile moutH) $ \outThreadID ->
withAsync (runInBase $ maybePrintBuildOutput False makeAbsolute LevelWarn mlogFile merrH) $ \errThreadID -> do
ec <- waitForProcess ph
wait errThreadID
wait outThreadID
return ec
case ec of
ExitSuccess -> return ()
_ -> do
bs <- liftIO $
case mlogFile of
Nothing -> return ""
Just (logFile, h) -> do
hClose h
S.readFile $ toFilePath logFile
throwM $ CabalExitedUnsuccessfully
ec
taskProvides
exeName
fullArgs
(fmap fst mlogFile)
bs
where
cp0 = proc (toFilePath exeName) fullArgs
cp = cp0
{ cwd = Just $ toFilePath pkgDir
, Process.env = envHelper menv
-- Ideally we'd create a new pipe here and then close it
-- below to avoid the child process from taking from our
-- stdin. However, if we do this, the child process won't
-- be able to get the codepage on Windows that we want.
-- See:
-- https://github.com/commercialhaskell/stack/issues/738
-- , std_in = CreatePipe
, std_out =
case mlogFile of
Nothing -> CreatePipe
Just (_, h) -> UseHandle h
, std_err =
case mlogFile of
Nothing -> CreatePipe
Just (_, h) -> UseHandle h
}
wc <- getWhichCompiler
(exeName, fullArgs) <- case (esetupexehs, wc) of
(Left setupExe, _) -> return (setupExe, setupArgs)
(Right setuphs, Ghc) -> do
exeName <- getRunhaskellPath
let fullArgs = packageArgs ++ (toFilePath setuphs : setupArgs)
return (exeName, fullArgs)
(Right setuphs, Ghcjs) -> do
distDir <- distDirFromDir pkgDir
let setupDir = distDir </> $(mkRelDir "setup")
outputFile = setupDir </> $(mkRelFile "setup")
createTree setupDir
ghcjsPath <- getGhcjsPath
runExe ghcjsPath $
[ "--make"
, "-odir", toFilePath setupDir
, "-hidir", toFilePath setupDir
, "-i", "-i."
] ++ packageArgs ++
[ toFilePath setuphs
, "-o", toFilePath outputFile
, "-build-runner"
]
return (outputFile, setupArgs)
runExe exeName $ (if boptsCabalVerbose eeBuildOpts then ("--verbose":) else id) fullArgs
maybePrintBuildOutput stripTHLoading makeAbsolute level mlogFile mh =
case mh of
Just h ->
case mlogFile of
Just{} -> return ()
Nothing -> printBuildOutput stripTHLoading makeAbsolute level h
Nothing -> return ()
singleBuild :: M env m
=> (m () -> IO ())
-> ActionContext
-> ExecuteEnv
-> Task
-> InstalledMap
-> m ()
singleBuild runInBase ac@ActionContext {..} ee@ExecuteEnv {..} task@Task {..} installedMap = do
(allDepsMap, cache) <- getCache
mprecompiled <- getPrecompiled cache
minstalled <-
case mprecompiled of
Just precompiled -> copyPreCompiled precompiled
Nothing -> realConfigAndBuild cache allDepsMap
case minstalled of
Nothing -> return ()
Just installed -> do
writeFlagCache installed cache
liftIO $ atomically $ modifyTVar eeGhcPkgIds $ Map.insert taskProvides installed
where
pname = packageIdentifierName taskProvides
shouldHaddockPackage' = shouldHaddockPackage eeBuildOpts eeWanted pname
doHaddock package = shouldHaddockPackage' &&
-- Works around haddock failing on bytestring-builder since it has no modules
-- when bytestring is new enough.
packageHasExposedModules package
getCache = do
let extra =
-- We enable tests if the test suite dependencies are already
-- installed, so that we avoid unnecessary recompilation based on
-- cabal_macros.h changes when switching between 'stack build' and
-- 'stack test'. See:
-- https://github.com/commercialhaskell/stack/issues/805
case taskType of
TTLocal lp -> concat
[ ["--enable-tests" | depsPresent installedMap $ lpTestDeps lp]
, ["--enable-benchmarks" | depsPresent installedMap $ lpBenchDeps lp]
]
_ -> []
getConfigCache ee task extra
getPrecompiled cache =
case taskLocation task of
Snap | not shouldHaddockPackage' -> do
mpc <- readPrecompiledCache taskProvides $ configCacheOpts cache
case mpc of
Nothing -> return Nothing
Just pc -> do
let allM _ [] = return True
allM f (x:xs) = do
b <- f x
if b then allM f xs else return False
b <- liftIO $ allM D.doesFileExist $ maybe id (:) (pcLibrary pc) $ pcExes pc
return $ if b then Just pc else Nothing
_ -> return Nothing
copyPreCompiled (PrecompiledCache mlib exes) = do
announceTask task "copying precompiled package"
forM_ mlib $ \libpath -> do
menv <- getMinimalEnvOverride
withMVar eeInstallLock $ \() ->
readProcessNull Nothing menv "ghc-pkg"
[ "register"
, "--no-user-package-db"
, "--package-db=" ++ toFilePath (bcoSnapDB eeBaseConfigOpts)
, "--force"
, libpath
]
liftIO $ forM_ exes $ \exe -> do
D.createDirectoryIfMissing True bindir
let dst = bindir FP.</> FP.takeFileName exe
createLink exe dst `catchIO` \_ -> D.copyFile exe dst
-- Find the package in the database
wc <- getWhichCompiler
let pkgDbs = [bcoSnapDB eeBaseConfigOpts]
mpkgid <- findGhcPkgId eeEnvOverride wc pkgDbs pname
return $ Just $
case mpkgid of
Nothing -> Executable taskProvides
Just pkgid -> Library taskProvides pkgid
where
bindir = toFilePath $ bcoSnapInstallRoot eeBaseConfigOpts </> bindirSuffix
realConfigAndBuild cache allDepsMap = withSingleContext runInBase ac ee task (Just allDepsMap) Nothing
$ \package cabalfp pkgDir cabal announce console _mlogFile -> do
_neededConfig <- ensureConfig cache pkgDir ee (announce "configure") cabal cabalfp
if boptsOnlyConfigure eeBuildOpts
then return Nothing
else liftM Just $ realBuild cache package pkgDir cabal announce console
realBuild cache package pkgDir cabal announce console = do
wc <- getWhichCompiler
markExeNotInstalled (taskLocation task) taskProvides
case taskType of
TTLocal lp -> writeBuildCache pkgDir $ lpNewBuildCache lp
TTUpstream _ _ -> return ()
() <- announce "build"
config <- asks getConfig
extraOpts <- extraBuildOptions
cabal (console && configHideTHLoading config) $
(case taskType of
TTLocal lp -> concat
[ ["build"]
, ["lib:" ++ packageNameString (packageName package)
-- TODO: get this information from target parsing instead,
-- which will allow users to turn off library building if
-- desired
| packageHasLibrary package]
, map (T.unpack . T.append "exe:") $ Set.toList $
case lpExeComponents lp of
Just exes -> exes
-- Build all executables in the event that no
-- specific list is provided (as happens with
-- extra-deps).
Nothing -> packageExes package
]
TTUpstream _ _ -> ["build"]) ++ extraOpts
when (doHaddock package) $ do
announce "haddock"
hscolourExists <- doesExecutableExist eeEnvOverride "HsColour"
unless hscolourExists $ $logWarn
("Warning: haddock not generating hyperlinked sources because 'HsColour' not\n" <>
"found on PATH (use 'stack build hscolour --copy-bins' to install).")
cabal False (concat [["haddock", "--html", "--hoogle", "--html-location=../$pkg-$version/"]
,["--hyperlink-source" | hscolourExists]
,["--ghcjs" | wc == Ghcjs]])
withMVar eeInstallLock $ \() -> do
announce "install"
cabal False ["install"]
let pkgDbs =
case taskLocation task of
Snap -> [bcoSnapDB eeBaseConfigOpts]
Local ->
[ bcoSnapDB eeBaseConfigOpts
, bcoLocalDB eeBaseConfigOpts
]
mpkgid <- findGhcPkgId eeEnvOverride wc pkgDbs (packageName package)
let ident = PackageIdentifier (packageName package) (packageVersion package)
mpkgid' <- case (packageHasLibrary package, mpkgid) of
(False, _) -> assert (isNothing mpkgid) $ do
markExeInstalled (taskLocation task) taskProvides -- TODO unify somehow with writeFlagCache?
return $ Executable ident
(True, Nothing) -> throwM $ Couldn'tFindPkgId $ packageName package
(True, Just pkgid) -> return $ Library ident pkgid
when (doHaddock package && shouldHaddockDeps eeBuildOpts) $
withMVar eeInstallLock $ \() ->
copyDepHaddocks
eeEnvOverride
wc
eeBaseConfigOpts
(pkgDbs ++ [eeGlobalDB])
(PackageIdentifier (packageName package) (packageVersion package))
Set.empty
case taskLocation task of
Snap -> writePrecompiledCache eeBaseConfigOpts taskProvides (configCacheOpts cache) mpkgid (packageExes package)
Local -> return ()
return mpkgid'
-- | Determine if all of the dependencies given are installed
depsPresent :: InstalledMap -> Map PackageName VersionRange -> Bool
depsPresent installedMap deps = all
(\(name, range) ->
case Map.lookup name installedMap of
Just (version, _, _) -> version `withinRange` range
Nothing -> False)
(Map.toList deps)
singleTest :: M env m
=> (m () -> IO ())
-> TestOpts
-> LocalPackageTB
-> ActionContext
-> ExecuteEnv
-> Task
-> InstalledMap
-> m ()
singleTest runInBase topts lptb ac ee task installedMap = do
(allDepsMap, cache) <- getConfigCache ee task $
case taskType task of
TTLocal lp -> concat
[ ["--enable-tests"]
, ["--enable-benchmarks" | depsPresent installedMap $ lpBenchDeps lp]
]
_ -> []
withSingleContext runInBase ac ee task (Just allDepsMap) (Just "test") $ \package cabalfp pkgDir cabal announce console mlogFile -> do
neededConfig <- ensureConfig cache pkgDir ee (announce "configure (test)") cabal cabalfp
config <- asks getConfig
testBuilt <- checkTestBuilt pkgDir
let needBuild = neededConfig ||
(case taskType task of
TTLocal lp -> lpDirtyFiles lp
_ -> assert False True) ||
not testBuilt
needHpc = toCoverage topts
testsToRun = Set.toList $ lptbTests lptb
components = map (T.unpack . T.append "test:") testsToRun
when needBuild $ do
announce "build (test)"
unsetTestBuilt pkgDir
unsetTestSuccess pkgDir
case taskType task of
TTLocal lp -> writeBuildCache pkgDir $ lpNewBuildCache lp
TTUpstream _ _ -> assert False $ return ()
extraOpts <- extraBuildOptions
cabal (console && configHideTHLoading config) $
"build" : (components ++ extraOpts)
setTestBuilt pkgDir
toRun <-
if toDisableRun topts
then do
announce "Test running disabled by --no-run-tests flag."
return False
else if toRerunTests topts
then return True
else do
success <- checkTestSuccess pkgDir
if success
then do
unless (null testsToRun) $ announce "skipping already passed test"
return False
else return True
when toRun $ do
bconfig <- asks getBuildConfig
buildDir <- distDirFromDir pkgDir
hpcDir <- hpcDirFromDir pkgDir
when needHpc (createTree hpcDir)
let exeExtension =
case configPlatform $ getConfig bconfig of
Platform _ Windows -> ".exe"
_ -> ""
errs <- liftM Map.unions $ forM testsToRun $ \testName -> do
nameDir <- parseRelDir $ T.unpack testName
nameExe <- parseRelFile $ T.unpack testName ++ exeExtension
nameTix <- liftM (pkgDir </>) $ parseRelFile $ T.unpack testName ++ ".tix"
let exeName = buildDir </> $(mkRelDir "build") </> nameDir </> nameExe
exists <- fileExists exeName
menv <- liftIO $ configEnvOverride config EnvSettings
{ esIncludeLocals = taskLocation task == Local
, esIncludeGhcPackagePath = True
, esStackExe = True
, esLocaleUtf8 = False
}
if exists
then do
-- We clear out the .tix files before doing a run.
when needHpc $ do
tixexists <- fileExists nameTix
when tixexists $
$logWarn ("Removing HPC file " <> T.pack (toFilePath nameTix))
removeFileIfExists nameTix
let args = toAdditionalArgs topts
argsDisplay = case args of
[] -> ""
_ -> ", args: " <> T.intercalate " " (map showProcessArgDebug args)
announce $ "test (suite: " <> testName <> argsDisplay <> ")"
let cp = (proc (toFilePath exeName) args)
{ cwd = Just $ toFilePath pkgDir
, Process.env = envHelper menv
, std_in = CreatePipe
, std_out =
case mlogFile of
Nothing -> Inherit
Just (_, h) -> UseHandle h
, std_err =
case mlogFile of
Nothing -> Inherit
Just (_, h) -> UseHandle h
}
-- Use createProcess_ to avoid the log file being closed afterwards
(Just inH, Nothing, Nothing, ph) <- liftIO $ createProcess_ "singleBuild.runTests" cp
liftIO $ hClose inH
ec <- liftIO $ waitForProcess ph
-- Move the .tix file out of the package directory
-- into the hpc work dir, for tidiness.
when needHpc $
moveFileIfExists nameTix hpcDir
return $ case ec of
ExitSuccess -> Map.empty
_ -> Map.singleton testName $ Just ec
else do
$logError $ T.concat
[ "Test suite "
, testName
, " executable not found for "
, packageNameText $ packageName package
]
return $ Map.singleton testName Nothing
when needHpc $ forM_ testsToRun $ \testName -> do
let pkgName = packageNameText (packageName package)
pkgId = packageIdentifierText (packageIdentifier package)
generateHpcReport pkgDir pkgName pkgId testName
bs <- liftIO $
case mlogFile of
Nothing -> return ""
Just (logFile, h) -> do
hClose h
S.readFile $ toFilePath logFile
unless (Map.null errs) $ throwM $ TestSuiteFailure
(taskProvides task)
errs
(fmap fst mlogFile)
bs
setTestSuccess pkgDir
singleBench :: M env m
=> (m () -> IO ())
-> BenchmarkOpts
-> LocalPackageTB
-> ActionContext
-> ExecuteEnv
-> Task
-> InstalledMap
-> m ()
singleBench runInBase beopts _lptb ac ee task installedMap = do
(allDepsMap, cache) <- getConfigCache ee task $
case taskType task of
TTLocal lp -> concat
[ ["--enable-tests" | depsPresent installedMap $ lpTestDeps lp]
, ["--enable-benchmarks"]
]
_ -> []
withSingleContext runInBase ac ee task (Just allDepsMap) (Just "bench") $ \_package cabalfp pkgDir cabal announce console _mlogFile -> do
neededConfig <- ensureConfig cache pkgDir ee (announce "configure (benchmarks)") cabal cabalfp
benchBuilt <- checkBenchBuilt pkgDir
let needBuild = neededConfig ||
(case taskType task of
TTLocal lp -> lpDirtyFiles lp
_ -> assert False True) ||
not benchBuilt
when needBuild $ do
announce "build (benchmarks)"
unsetBenchBuilt pkgDir
case taskType task of
TTLocal lp -> writeBuildCache pkgDir $ lpNewBuildCache lp
TTUpstream _ _ -> assert False $ return ()
config <- asks getConfig
extraOpts <- extraBuildOptions
cabal (console && configHideTHLoading config) ("build" : extraOpts)
setBenchBuilt pkgDir
let args = maybe []
((:[]) . ("--benchmark-options=" <>))
(beoAdditionalArgs beopts)
toRun <-
if beoDisableRun beopts
then do
announce "Benchmark running disabled by --no-run-benchmarks flag."
return False
else do
return True
when toRun $ do
announce "benchmarks"
cabal False ("bench" : args)
-- | Grab all output from the given @Handle@ and print it to stdout, stripping
-- Template Haskell "Loading package" lines. Does work in a separate thread.
printBuildOutput :: (MonadIO m, MonadBaseControl IO m, MonadLogger m)
=> Bool -- ^ exclude TH loading?
-> Bool -- ^ convert paths to absolute?
-> LogLevel
-> Handle -> m ()
printBuildOutput excludeTHLoading makeAbsolute level outH = void $
CB.sourceHandle outH
$$ CB.lines
=$ CL.map stripCarriageReturn
=$ CL.filter (not . isTHLoading)
=$ CL.mapM toAbsolutePath
=$ CL.mapM_ (monadLoggerLog $(TH.location >>= liftLoc) "" level)
where
-- | Is this line a Template Haskell "Loading package" line
-- ByteString
isTHLoading :: S8.ByteString -> Bool
isTHLoading _ | not excludeTHLoading = False
isTHLoading bs =
"Loading package " `S8.isPrefixOf` bs &&
("done." `S8.isSuffixOf` bs || "done.\r" `S8.isSuffixOf` bs)
-- | Convert GHC error lines with file paths to have absolute file paths
toAbsolutePath bs | not makeAbsolute = return bs
toAbsolutePath bs = do
let (x, y) = S.break (== _colon) bs
mabs <-
if isValidSuffix y
then do
efp <- liftIO $ tryIO $ D.canonicalizePath $ S8.unpack x
case efp of
Left _ -> return Nothing
Right fp -> return $ Just $ S8.pack fp
else return Nothing
case mabs of
Nothing -> return bs
Just fp -> return $ fp `S.append` y
-- | Match the line:column format at the end of lines
isValidSuffix bs0 = maybe False (const True) $ do
guard $ not $ S.null bs0
guard $ S.head bs0 == _colon
(_, bs1) <- S8.readInt $ S.drop 1 bs0
guard $ not $ S.null bs1
guard $ S.head bs1 == _colon
(_, bs2) <- S8.readInt $ S.drop 1 bs1
guard $ bs2 == ":"
-- | Strip @\r@ characters from the byte vector. Used because Windows.
stripCarriageReturn :: ByteString -> ByteString
stripCarriageReturn = S8.filter (not . (=='\r'))
-- | Find the Setup.hs or Setup.lhs in the given directory. If none exists,
-- throw an exception.
getSetupHs :: Path Abs Dir -- ^ project directory
-> IO (Path Abs File)
getSetupHs dir = do
exists1 <- fileExists fp1
if exists1
then return fp1
else do
exists2 <- fileExists fp2
if exists2
then return fp2
else throwM $ NoSetupHsFound dir
where
fp1 = dir </> $(mkRelFile "Setup.hs")
fp2 = dir </> $(mkRelFile "Setup.lhs")
extraBuildOptions :: M env m => m [String]
extraBuildOptions = do
hpcIndexDir <- toFilePath . (</> dotHpc) <$> hpcRelativeDir
return ["--ghc-options", "-hpcdir " ++ hpcIndexDir ++ " -ddump-hi -ddump-to-file"]
-- | Take the given list of package dependencies and the contents of the global
-- package database, and construct a set of installed package IDs that:
--
-- * Excludes the Cabal library (it's added later)
--
-- * Includes all packages depended on by this package
--
-- * Includes all global packages, unless: (1) it's hidden, (2) it's shadowed
-- by a depended-on package, or (3) one of its dependencies is not met.
--
-- See:
--
-- * https://github.com/commercialhaskell/stack/issues/941
--
-- * https://github.com/commercialhaskell/stack/issues/944
--
-- * https://github.com/commercialhaskell/stack/issues/949
addGlobalPackages :: Map PackageIdentifier GhcPkgId -- ^ dependencies of the package
-> [DumpPackage () ()] -- ^ global packages
-> Set GhcPkgId
addGlobalPackages deps globals0 =
res
where
-- Initial set of packages: the installed IDs of all dependencies
res0 = Map.elems $ Map.filterWithKey (\ident _ -> not $ isCabal ident) deps
-- First check on globals: it's not shadowed by a dep, it's not Cabal, and
-- it's exposed
goodGlobal1 dp = not (isDep dp)
&& not (isCabal $ dpPackageIdent dp)
&& dpIsExposed dp
globals1 = filter goodGlobal1 globals0
-- Create a Map of unique package names in the global database
globals2 = Map.fromListWith chooseBest
$ map (packageIdentifierName . dpPackageIdent &&& id) globals1
-- Final result: add in globals that have their dependencies met
res = loop id (Map.elems globals2) $ Set.fromList res0
----------------------------------
-- Some auxiliary helper functions
----------------------------------
-- Is the given package identifier for any version of Cabal
isCabal (PackageIdentifier name _) = name == $(mkPackageName "Cabal")
-- Is the given package name provided by the package dependencies?
isDep dp = packageIdentifierName (dpPackageIdent dp) `Set.member` depNames
depNames = Set.map packageIdentifierName $ Map.keysSet deps
-- Choose the best of two competing global packages (the newest version)
chooseBest dp1 dp2
| getVer dp1 < getVer dp2 = dp2
| otherwise = dp1
where
getVer = packageIdentifierVersion . dpPackageIdent
-- Are all dependencies of the given package met by the given Set of
-- installed packages
depsMet dp gids = all (`Set.member` gids) (dpDepends dp)
-- Find all globals that have all of their dependencies met
loop front (dp:dps) gids
-- This package has its deps met. Add it to the list of dependencies
-- and then traverse the list from the beginning (this package may have
-- been a dependency of an earlier one).
| depsMet dp gids = loop id (front dps) (Set.insert (dpGhcPkgId dp) gids)
-- Deps are not met, keep going
| otherwise = loop (front . (dp:)) dps gids
-- None of the packages we checked can be added, therefore drop them all
-- and return our results
loop _ [] gids = gids
| cabrera/stack | src/Stack/Build/Execute.hs | bsd-3-clause | 57,778 | 396 | 28 | 23,085 | 10,821 | 5,805 | 5,016 | -1 | -1 |
{-# LANGUAGE DeriveDataTypeable, StandaloneDeriving #-}
module Github.Data.Definitions where
import Data.Time
import Data.Data
import qualified Control.Exception as E
-- | Errors have been tagged according to their source, so you can more easily
-- dispatch and handle them.
data Error =
HTTPConnectionError E.SomeException -- ^ A HTTP error occurred. The actual caught error is included.
| ParseError String -- ^ An error in the parser itself.
| JsonError String -- ^ The JSON is malformed or unexpected.
| UserError String -- ^ Incorrect input.
deriving Show
-- | A date in the Github format, which is a special case of ISO-8601.
newtype GithubDate = GithubDate { fromGithubDate :: UTCTime }
deriving (Show, Data, Typeable, Eq, Ord)
data Commit = Commit {
commitSha :: String
,commitParents :: [Tree]
,commitUrl :: String
,commitGitCommit :: GitCommit
,commitCommitter :: Maybe GithubOwner
,commitAuthor :: Maybe GithubOwner
,commitFiles :: [File]
,commitStats :: Maybe Stats
} deriving (Show, Data, Typeable, Eq, Ord)
data Tree = Tree {
treeSha :: String
,treeUrl :: String
,treeGitTrees :: [GitTree]
} deriving (Show, Data, Typeable, Eq, Ord)
data GitTree = GitTree {
gitTreeType :: String
,gitTreeSha :: String
,gitTreeUrl :: String
,gitTreeSize :: Maybe Int
,gitTreePath :: String
,gitTreeMode :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data GitCommit = GitCommit {
gitCommitMessage :: String
,gitCommitUrl :: String
,gitCommitCommitter :: GitUser
,gitCommitAuthor :: GitUser
,gitCommitTree :: Tree
,gitCommitSha :: Maybe String
,gitCommitParents :: [Tree]
} deriving (Show, Data, Typeable, Eq, Ord)
data GithubOwner = GithubUser {
githubOwnerAvatarUrl :: String
,githubOwnerLogin :: String
,githubOwnerUrl :: String
,githubOwnerId :: Int
,githubOwnerGravatarId :: Maybe String
}
| GithubOrganization {
githubOwnerAvatarUrl :: String
,githubOwnerLogin :: String
,githubOwnerUrl :: String
,githubOwnerId :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data GitUser = GitUser {
gitUserName :: String
,gitUserEmail :: String
,gitUserDate :: GithubDate
} deriving (Show, Data, Typeable, Eq, Ord)
data File = File {
fileBlobUrl :: String
,fileStatus :: String
,fileRawUrl :: String
,fileAdditions :: Int
,fileSha :: String
,fileChanges :: Int
,filePatch :: String
,fileFilename :: String
,fileDeletions :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data Stats = Stats {
statsAdditions :: Int
,statsTotal :: Int
,statsDeletions :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data Comment = Comment {
commentPosition :: Maybe Int
,commentLine :: Maybe Int
,commentBody :: String
,commentCommitId :: String
,commentUpdatedAt :: UTCTime
,commentHtmlUrl :: Maybe String
,commentUrl :: String
,commentCreatedAt :: UTCTime
,commentPath :: Maybe String
,commentUser :: GithubOwner
,commentId :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data NewComment = NewComment {
newCommentBody :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data EditComment = EditComment {
editCommentBody :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data Diff = Diff {
diffStatus :: String
,diffBehindBy :: Int
,diffPatchUrl :: String
,diffUrl :: String
,diffBaseCommit :: Commit
,diffCommits :: [Commit]
,diffTotalCommits :: Int
,diffHtmlUrl :: String
,diffFiles :: [File]
,diffAheadBy :: Int
,diffDiffUrl :: String
,diffPermalinkUrl :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data Gist = Gist {
gistUser :: GithubOwner
,gistGitPushUrl :: String
,gistUrl :: String
,gistDescription :: Maybe String
,gistCreatedAt :: GithubDate
,gistPublic :: Bool
,gistComments :: Int
,gistUpdatedAt :: GithubDate
,gistHtmlUrl :: String
,gistId :: String
,gistFiles :: [GistFile]
,gistGitPullUrl :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data GistFile = GistFile {
gistFileType :: String
,gistFileRawUrl :: String
,gistFileSize :: Int
,gistFileLanguage :: Maybe String
,gistFileFilename :: String
,gistFileContent :: Maybe String
} deriving (Show, Data, Typeable, Eq, Ord)
data GistComment = GistComment {
gistCommentUser :: GithubOwner
,gistCommentUrl :: String
,gistCommentCreatedAt :: GithubDate
,gistCommentBody :: String
,gistCommentUpdatedAt :: GithubDate
,gistCommentId :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data Blob = Blob {
blobUrl :: String
,blobEncoding :: String
,blobContent :: String
,blobSha :: String
,blobSize :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data GitReference = GitReference {
gitReferenceObject :: GitObject
,gitReferenceUrl :: String
,gitReferenceRef :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data GitObject = GitObject {
gitObjectType :: String
,gitObjectSha :: String
,gitObjectUrl :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data Issue = Issue {
issueClosedAt :: Maybe GithubDate
,issueUpdatedAt :: GithubDate
,issueHtmlUrl :: Maybe String
,issueClosedBy :: Maybe GithubOwner
,issueLabels :: [IssueLabel]
,issueNumber :: Int
,issueAssignee :: Maybe GithubOwner
,issueUser :: GithubOwner
,issueTitle :: String
,issuePullRequest :: PullRequestReference
,issueUrl :: String
,issueCreatedAt :: GithubDate
,issueBody :: Maybe String
,issueState :: String
,issueId :: Int
,issueComments :: Int
,issueMilestone :: Maybe Milestone
} deriving (Show, Data, Typeable, Eq, Ord)
data NewIssue = NewIssue {
newIssueTitle :: String
, newIssueBody :: Maybe String
, newIssueAssignee :: Maybe String
, newIssueMilestone :: Maybe Int
, newIssueLabels :: Maybe [String]
} deriving (Show, Data, Typeable, Eq, Ord)
data EditIssue = EditIssue {
editIssueTitle :: Maybe String
, editIssueBody :: Maybe String
, editIssueAssignee :: Maybe String
, editIssueState :: Maybe String
, editIssueMilestone :: Maybe Int
, editIssueLabels :: Maybe [String]
} deriving (Show, Data, Typeable, Eq, Ord)
data Milestone = Milestone {
milestoneCreator :: GithubOwner
,milestoneDueOn :: Maybe GithubDate
,milestoneOpenIssues :: Int
,milestoneNumber :: Int
,milestoneClosedIssues :: Int
,milestoneDescription :: String
,milestoneTitle :: String
,milestoneUrl :: String
,milestoneCreatedAt :: GithubDate
,milestoneState :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data IssueLabel = IssueLabel {
labelColor :: String
,labelUrl :: String
,labelName :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data PullRequestReference = PullRequestReference {
pullRequestReferenceHtmlUrl :: Maybe String
,pullRequestReferencePatchUrl :: Maybe String
,pullRequestReferenceDiffUrl :: Maybe String
} deriving (Show, Data, Typeable, Eq, Ord)
data IssueComment = IssueComment {
issueCommentUpdatedAt :: GithubDate
,issueCommentUser :: GithubOwner
,issueCommentUrl :: String
,issueCommentCreatedAt :: GithubDate
,issueCommentBody :: String
,issueCommentId :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
-- | Data describing an @Event@.
data EventType =
Mentioned -- ^ The actor was @mentioned in an issue body.
| Subscribed -- ^ The actor subscribed to receive notifications for an issue.
| Unsubscribed -- ^ The issue was unsubscribed from by the actor.
| Referenced -- ^ The issue was referenced from a commit message. The commit_id attribute is the commit SHA1 of where that happened.
| Merged -- ^ The issue was merged by the actor. The commit_id attribute is the SHA1 of the HEAD commit that was merged.
| Assigned -- ^ The issue was assigned to the actor.
| Closed -- ^ The issue was closed by the actor. When the commit_id is present, it identifies the commit that closed the issue using “closes / fixes #NN” syntax.
| Reopened -- ^ The issue was reopened by the actor.
deriving (Show, Data, Typeable, Eq, Ord)
data Event = Event {
eventActor :: GithubOwner
,eventType :: EventType
,eventCommitId :: Maybe String
,eventUrl :: String
,eventCreatedAt :: GithubDate
,eventId :: Int
,eventIssue :: Maybe Issue
} deriving (Show, Data, Typeable, Eq, Ord)
data SimpleOrganization = SimpleOrganization {
simpleOrganizationUrl :: String
,simpleOrganizationAvatarUrl :: String
,simpleOrganizationId :: Int
,simpleOrganizationLogin :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data Organization = Organization {
organizationType :: String
,organizationBlog :: Maybe String
,organizationLocation :: Maybe String
,organizationLogin :: String
,organizationFollowers :: Int
,organizationCompany :: Maybe String
,organizationAvatarUrl :: String
,organizationPublicGists :: Int
,organizationHtmlUrl :: String
,organizationEmail :: Maybe String
,organizationFollowing :: Int
,organizationPublicRepos :: Int
,organizationUrl :: String
,organizationCreatedAt :: GithubDate
,organizationName :: Maybe String
,organizationId :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data PullRequest = PullRequest {
pullRequestClosedAt :: Maybe GithubDate
,pullRequestCreatedAt :: GithubDate
,pullRequestUser :: GithubOwner
,pullRequestPatchUrl :: String
,pullRequestState :: String
,pullRequestNumber :: Int
,pullRequestHtmlUrl :: String
,pullRequestUpdatedAt :: GithubDate
,pullRequestBody :: String
,pullRequestIssueUrl :: String
,pullRequestDiffUrl :: String
,pullRequestUrl :: String
,pullRequestLinks :: PullRequestLinks
,pullRequestMergedAt :: Maybe GithubDate
,pullRequestTitle :: String
,pullRequestId :: Int
} deriving (Show, Data, Typeable, Eq, Ord)
data DetailedPullRequest = DetailedPullRequest {
-- this is a duplication of a PullRequest
detailedPullRequestClosedAt :: Maybe GithubDate
,detailedPullRequestCreatedAt :: GithubDate
,detailedPullRequestUser :: GithubOwner
,detailedPullRequestPatchUrl :: String
,detailedPullRequestState :: String
,detailedPullRequestNumber :: Int
,detailedPullRequestHtmlUrl :: String
,detailedPullRequestUpdatedAt :: GithubDate
,detailedPullRequestBody :: String
,detailedPullRequestIssueUrl :: String
,detailedPullRequestDiffUrl :: String
,detailedPullRequestUrl :: String
,detailedPullRequestLinks :: PullRequestLinks
,detailedPullRequestMergedAt :: Maybe GithubDate
,detailedPullRequestTitle :: String
,detailedPullRequestId :: Int
,detailedPullRequestMergedBy :: Maybe GithubOwner
,detailedPullRequestChangedFiles :: Int
,detailedPullRequestHead :: PullRequestCommit
,detailedPullRequestComments :: Int
,detailedPullRequestDeletions :: Int
,detailedPullRequestAdditions :: Int
,detailedPullRequestReviewComments :: Int
,detailedPullRequestBase :: PullRequestCommit
,detailedPullRequestCommits :: Int
,detailedPullRequestMerged :: Bool
,detailedPullRequestMergeable :: Bool
} deriving (Show, Data, Typeable, Eq, Ord)
data PullRequestLinks = PullRequestLinks {
pullRequestLinksReviewComments :: String
,pullRequestLinksComments :: String
,pullRequestLinksHtml :: String
,pullRequestLinksSelf :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data PullRequestCommit = PullRequestCommit {
} deriving (Show, Data, Typeable, Eq, Ord)
data Repo = Repo {
repoSshUrl :: String
,repoDescription :: Maybe String
,repoCreatedAt :: GithubDate
,repoHtmlUrl :: String
,repoSvnUrl :: String
,repoForks :: Int
,repoHomepage :: Maybe String
,repoFork :: Bool
,repoGitUrl :: String
,repoPrivate :: Bool
,repoCloneUrl :: String
,repoSize :: Int
,repoUpdatedAt :: GithubDate
,repoWatchers :: Int
,repoOwner :: GithubOwner
,repoName :: String
,repoLanguage :: Maybe String
,repoMasterBranch :: Maybe String
,repoPushedAt :: Maybe GithubDate -- ^ this is Nothing for new repositories
,repoId :: Int
,repoUrl :: String
,repoOpenIssues :: Int
,repoHasWiki :: Maybe Bool
,repoHasIssues :: Maybe Bool
,repoHasDownloads :: Maybe Bool
} deriving (Show, Data, Typeable, Eq, Ord)
data Contributor
-- | An existing Github user, with their number of contributions, avatar
-- URL, login, URL, ID, and Gravatar ID.
= KnownContributor Int String String String Int String
-- | An unknown Github user with their number of contributions and recorded name.
| AnonymousContributor Int String
deriving (Show, Data, Typeable, Eq, Ord)
-- | This is only used for the FromJSON instance.
data Languages = Languages { getLanguages :: [Language] }
deriving (Show, Data, Typeable, Eq, Ord)
-- | A programming language with the name and number of characters written in
-- it.
data Language = Language String Int
deriving (Show, Data, Typeable, Eq, Ord)
data Tag = Tag {
tagName :: String
,tagZipballUrl :: String
,tagTarballUrl :: String
,tagCommit :: BranchCommit
} deriving (Show, Data, Typeable, Eq, Ord)
data Branch = Branch {
branchName :: String
,branchCommit :: BranchCommit
} deriving (Show, Data, Typeable, Eq, Ord)
data BranchCommit = BranchCommit {
branchCommitSha :: String
,branchCommitUrl :: String
} deriving (Show, Data, Typeable, Eq, Ord)
data DetailedOwner = DetailedUser {
detailedOwnerCreatedAt :: GithubDate
,detailedOwnerType :: String
,detailedOwnerPublicGists :: Int
,detailedOwnerAvatarUrl :: String
,detailedOwnerFollowers :: Int
,detailedOwnerFollowing :: Int
,detailedOwnerHireable :: Bool
,detailedOwnerGravatarId :: Maybe String
,detailedOwnerBlog :: Maybe String
,detailedOwnerBio :: Maybe String
,detailedOwnerPublicRepos :: Int
,detailedOwnerName :: Maybe String
,detailedOwnerLocation :: Maybe String
,detailedOwnerCompany :: Maybe String
,detailedOwnerEmail :: String
,detailedOwnerUrl :: String
,detailedOwnerId :: Int
,detailedOwnerHtmlUrl :: String
,detailedOwnerLogin :: String
}
| DetailedOrganization {
detailedOwnerCreatedAt :: GithubDate
,detailedOwnerType :: String
,detailedOwnerPublicGists :: Int
,detailedOwnerAvatarUrl :: String
,detailedOwnerFollowers :: Int
,detailedOwnerFollowing :: Int
,detailedOwnerBlog :: Maybe String
,detailedOwnerBio :: Maybe String
,detailedOwnerPublicRepos :: Int
,detailedOwnerName :: Maybe String
,detailedOwnerLocation :: Maybe String
,detailedOwnerCompany :: Maybe String
,detailedOwnerUrl :: String
,detailedOwnerId :: Int
,detailedOwnerHtmlUrl :: String
,detailedOwnerLogin :: String
} deriving (Show, Data, Typeable, Eq, Ord)
| adinapoli/github | Github/Data/Definitions.hs | bsd-3-clause | 14,529 | 1 | 10 | 2,600 | 3,507 | 2,097 | 1,410 | 407 | 0 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE Rank2Types #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FlexibleInstances #-}
#ifndef MIN_VERSION_profunctors
#define MIN_VERSION_profunctors(x,y,z) 1
#endif
#if __GLASGOW_HASKELL__ < 708 || !(MIN_VERSION_profunctors(4,4,0))
{-# LANGUAGE Trustworthy #-}
#endif
#if __GLASGOW_HASKELL__ && __GLASGOW_HASKELL__ >= 704
{-# LANGUAGE NoPolyKinds #-}
{-# LANGUAGE NoDataKinds #-}
#endif
-------------------------------------------------------------------------------
-- |
-- Module : Control.Lens.Getter
-- Copyright : (C) 2012-15 Edward Kmett
-- License : BSD-style (see the file LICENSE)
-- Maintainer : Edward Kmett <ekmett@gmail.com>
-- Stability : provisional
-- Portability : Rank2Types
--
--
-- A @'Getter' s a@ is just any function @(s -> a)@, which we've flipped
-- into continuation passing style, @(a -> r) -> s -> r@ and decorated
-- with 'Const' to obtain:
--
-- @type 'Getting' r s a = (a -> 'Const' r a) -> s -> 'Const' r s@
--
-- If we restrict access to knowledge about the type 'r', we could get:
--
-- @type 'Getter' s a = forall r. 'Getting' r s a@
--
-- However, for 'Getter' (but not for 'Getting') we actually permit any
-- functor @f@ which is an instance of both 'Functor' and 'Contravariant':
--
-- @type 'Getter' s a = forall f. ('Contravariant' f, 'Functor' f) => (a -> f a) -> s -> f s@
--
-- Everything you can do with a function, you can do with a 'Getter', but
-- note that because of the continuation passing style ('.') composes them
-- in the opposite order.
--
-- Since it is only a function, every 'Getter' obviously only retrieves a
-- single value for a given input.
--
-------------------------------------------------------------------------------
module Control.Lens.Getter
(
-- * Getters
Getter, IndexedGetter
, Getting, IndexedGetting
, Accessing
-- * Building Getters
, to
, ito
, like
, ilike
-- * Combinators for Getters and Folds
, (^.)
, view, views
, use, uses
, listening, listenings
-- * Indexed Getters
-- ** Indexed Getter Combinators
, (^@.)
, iview, iviews
, iuse, iuses
, ilistening, ilistenings
-- * Implementation Details
, Contravariant(..)
, getting
, Const(..)
) where
import Control.Applicative
import Control.Lens.Internal.Indexed
import Control.Lens.Type
import Control.Monad.Reader.Class as Reader
import Control.Monad.State as State
import Control.Monad.Writer as Writer
import Data.Functor.Contravariant
import Data.Profunctor
import Data.Profunctor.Unsafe
-- $setup
-- >>> :set -XNoOverloadedStrings
-- >>> import Control.Lens
-- >>> import Data.List.Lens
-- >>> import Debug.SimpleReflect.Expr
-- >>> import Debug.SimpleReflect.Vars as Vars hiding (f,g)
-- >>> let f :: Expr -> Expr; f = Debug.SimpleReflect.Vars.f
-- >>> let g :: Expr -> Expr; g = Debug.SimpleReflect.Vars.g
infixl 8 ^., ^@.
-------------------------------------------------------------------------------
-- Getters
-------------------------------------------------------------------------------
-- | Build an (index-preserving) 'Getter' from an arbitrary Haskell function.
--
-- @
-- 'to' f '.' 'to' g ≡ 'to' (g '.' f)
-- @
--
-- @
-- a '^.' 'to' f ≡ f a
-- @
--
-- >>> a ^.to f
-- f a
--
-- >>> ("hello","world")^.to snd
-- "world"
--
-- >>> 5^.to succ
-- 6
--
-- >>> (0, -5)^._2.to abs
-- 5
--
-- @
-- 'to' :: (s -> a) -> 'IndexPreservingGetter' s a
-- @
to :: (Profunctor p, Contravariant f) => (s -> a) -> Optic' p f s a
to k = dimap k (contramap k)
{-# INLINE to #-}
-- |
-- @
-- 'ito' :: (s -> (i, a)) -> 'IndexedGetter' i s a
-- @
ito :: (Indexable i p, Contravariant f) => (s -> (i, a)) -> Over' p f s a
ito k = dimap k (contramap (snd . k)) . uncurry . indexed
{-# INLINE ito #-}
-- | Build an constant-valued (index-preserving) 'Getter' from an arbitrary Haskell value.
--
-- @
-- 'like' a '.' 'like' b ≡ 'like' b
-- a '^.' 'like' b ≡ b
-- a '^.' 'like' b ≡ a '^.' 'to' ('const' b)
-- @
--
-- This can be useful as a second case 'failing' a 'Fold'
-- e.g. @foo `failing` 'like' 0@
--
-- @
-- 'like' :: a -> 'IndexPreservingGetter' s a
-- @
like :: (Profunctor p, Contravariant f) => a -> Optic' p f s a
like a = to (const a)
{-# INLINE like #-}
-- |
-- @
-- 'ilike' :: i -> a -> 'IndexedGetter' i s a
-- @
ilike :: (Indexable i p, Contravariant f) => i -> a -> Over' p f s a
ilike i a = ito (const (i, a))
{-# INLINE ilike #-}
-- | When you see this in a type signature it indicates that you can
-- pass the function a 'Lens', 'Getter',
-- 'Control.Lens.Traversal.Traversal', 'Control.Lens.Fold.Fold',
-- 'Control.Lens.Prism.Prism', 'Control.Lens.Iso.Iso', or one of
-- the indexed variants, and it will just \"do the right thing\".
--
-- Most 'Getter' combinators are able to be used with both a 'Getter' or a
-- 'Control.Lens.Fold.Fold' in limited situations, to do so, they need to be
-- monomorphic in what we are going to extract with 'Control.Applicative.Const'. To be compatible
-- with 'Lens', 'Control.Lens.Traversal.Traversal' and
-- 'Control.Lens.Iso.Iso' we also restricted choices of the irrelevant @t@ and
-- @b@ parameters.
--
-- If a function accepts a @'Getting' r s a@, then when @r@ is a 'Data.Monoid.Monoid', then
-- you can pass a 'Control.Lens.Fold.Fold' (or
-- 'Control.Lens.Traversal.Traversal'), otherwise you can only pass this a
-- 'Getter' or 'Lens'.
type Getting r s a = (a -> Const r a) -> s -> Const r s
-- | Used to consume an 'Control.Lens.Fold.IndexedFold'.
type IndexedGetting i m s a = Indexed i a (Const m a) -> s -> Const m s
-- | This is a convenient alias used when consuming (indexed) getters and (indexed) folds
-- in a highly general fashion.
type Accessing p m s a = p a (Const m a) -> s -> Const m s
-------------------------------------------------------------------------------
-- Getting Values
-------------------------------------------------------------------------------
-- | View the value pointed to by a 'Getter', 'Control.Lens.Iso.Iso' or
-- 'Lens' or the result of folding over all the results of a
-- 'Control.Lens.Fold.Fold' or 'Control.Lens.Traversal.Traversal' that points
-- at a monoidal value.
--
-- @
-- 'view' '.' 'to' ≡ 'id'
-- @
--
-- >>> view (to f) a
-- f a
--
-- >>> view _2 (1,"hello")
-- "hello"
--
-- >>> view (to succ) 5
-- 6
--
-- >>> view (_2._1) ("hello",("world","!!!"))
-- "world"
--
--
-- As 'view' is commonly used to access the target of a 'Getter' or obtain a monoidal summary of the targets of a 'Fold',
-- It may be useful to think of it as having one of these more restricted signatures:
--
-- @
-- 'view' :: 'Getter' s a -> s -> a
-- 'view' :: 'Data.Monoid.Monoid' m => 'Control.Lens.Fold.Fold' s m -> s -> m
-- 'view' :: 'Control.Lens.Iso.Iso'' s a -> s -> a
-- 'view' :: 'Lens'' s a -> s -> a
-- 'view' :: 'Data.Monoid.Monoid' m => 'Control.Lens.Traversal.Traversal'' s m -> s -> m
-- @
--
-- In a more general setting, such as when working with a 'Monad' transformer stack you can use:
--
-- @
-- 'view' :: 'MonadReader' s m => 'Getter' s a -> m a
-- 'view' :: ('MonadReader' s m, 'Data.Monoid.Monoid' a) => 'Control.Lens.Fold.Fold' s a -> m a
-- 'view' :: 'MonadReader' s m => 'Control.Lens.Iso.Iso'' s a -> m a
-- 'view' :: 'MonadReader' s m => 'Lens'' s a -> m a
-- 'view' :: ('MonadReader' s m, 'Data.Monoid.Monoid' a) => 'Control.Lens.Traversal.Traversal'' s a -> m a
-- @
view :: MonadReader s m => Getting a s a -> m a
view l = Reader.asks (getConst #. l Const)
{-# INLINE view #-}
-- | View a function of the value pointed to by a 'Getter' or 'Lens' or the result of
-- folding over the result of mapping the targets of a 'Control.Lens.Fold.Fold' or
-- 'Control.Lens.Traversal.Traversal'.
--
-- @
-- 'views' l f ≡ 'view' (l '.' 'to' f)
-- @
--
-- >>> views (to f) g a
-- g (f a)
--
-- >>> views _2 length (1,"hello")
-- 5
--
-- As 'views' is commonly used to access the target of a 'Getter' or obtain a monoidal summary of the targets of a 'Fold',
-- It may be useful to think of it as having one of these more restricted signatures:
--
-- @
-- 'views' :: 'Getter' s a -> (a -> r) -> s -> r
-- 'views' :: 'Data.Monoid.Monoid' m => 'Control.Lens.Fold.Fold' s a -> (a -> m) -> s -> m
-- 'views' :: 'Control.Lens.Iso.Iso'' s a -> (a -> r) -> s -> r
-- 'views' :: 'Lens'' s a -> (a -> r) -> s -> r
-- 'views' :: 'Data.Monoid.Monoid' m => 'Control.Lens.Traversal.Traversal'' s a -> (a -> m) -> s -> m
-- @
--
-- In a more general setting, such as when working with a 'Monad' transformer stack you can use:
--
-- @
-- 'views' :: 'MonadReader' s m => 'Getter' s a -> (a -> r) -> m r
-- 'views' :: ('MonadReader' s m, 'Data.Monoid.Monoid' r) => 'Control.Lens.Fold.Fold' s a -> (a -> r) -> m r
-- 'views' :: 'MonadReader' s m => 'Control.Lens.Iso.Iso'' s a -> (a -> r) -> m r
-- 'views' :: 'MonadReader' s m => 'Lens'' s a -> (a -> r) -> m r
-- 'views' :: ('MonadReader' s m, 'Data.Monoid.Monoid' r) => 'Control.Lens.Traversal.Traversal'' s a -> (a -> r) -> m r
-- @
--
-- @
-- 'views' :: 'MonadReader' s m => 'Getting' r s a -> (a -> r) -> m r
-- @
views :: MonadReader s m => LensLike' (Const r) s a -> (a -> r) -> m r
views l f = Reader.asks (getConst #. l (Const #. f))
{-# INLINE views #-}
-- | View the value pointed to by a 'Getter' or 'Lens' or the
-- result of folding over all the results of a 'Control.Lens.Fold.Fold' or
-- 'Control.Lens.Traversal.Traversal' that points at a monoidal values.
--
-- This is the same operation as 'view' with the arguments flipped.
--
-- The fixity and semantics are such that subsequent field accesses can be
-- performed with ('Prelude..').
--
-- >>> (a,b)^._2
-- b
--
-- >>> ("hello","world")^._2
-- "world"
--
-- >>> import Data.Complex
-- >>> ((0, 1 :+ 2), 3)^._1._2.to magnitude
-- 2.23606797749979
--
-- @
-- ('^.') :: s -> 'Getter' s a -> a
-- ('^.') :: 'Data.Monoid.Monoid' m => s -> 'Control.Lens.Fold.Fold' s m -> m
-- ('^.') :: s -> 'Control.Lens.Iso.Iso'' s a -> a
-- ('^.') :: s -> 'Lens'' s a -> a
-- ('^.') :: 'Data.Monoid.Monoid' m => s -> 'Control.Lens.Traversal.Traversal'' s m -> m
-- @
(^.) :: s -> Getting a s a -> a
s ^. l = getConst (l Const s)
{-# INLINE (^.) #-}
-------------------------------------------------------------------------------
-- MonadState
-------------------------------------------------------------------------------
-- | Use the target of a 'Lens', 'Control.Lens.Iso.Iso', or
-- 'Getter' in the current state, or use a summary of a
-- 'Control.Lens.Fold.Fold' or 'Control.Lens.Traversal.Traversal' that points
-- to a monoidal value.
--
-- >>> evalState (use _1) (a,b)
-- a
--
-- >>> evalState (use _1) ("hello","world")
-- "hello"
--
-- @
-- 'use' :: 'MonadState' s m => 'Getter' s a -> m a
-- 'use' :: ('MonadState' s m, 'Data.Monoid.Monoid' r) => 'Control.Lens.Fold.Fold' s r -> m r
-- 'use' :: 'MonadState' s m => 'Control.Lens.Iso.Iso'' s a -> m a
-- 'use' :: 'MonadState' s m => 'Lens'' s a -> m a
-- 'use' :: ('MonadState' s m, 'Data.Monoid.Monoid' r) => 'Control.Lens.Traversal.Traversal'' s r -> m r
-- @
use :: MonadState s m => Getting a s a -> m a
use l = State.gets (view l)
{-# INLINE use #-}
-- | Use the target of a 'Lens', 'Control.Lens.Iso.Iso' or
-- 'Getter' in the current state, or use a summary of a
-- 'Control.Lens.Fold.Fold' or 'Control.Lens.Traversal.Traversal' that
-- points to a monoidal value.
--
-- >>> evalState (uses _1 length) ("hello","world")
-- 5
--
-- @
-- 'uses' :: 'MonadState' s m => 'Getter' s a -> (a -> r) -> m r
-- 'uses' :: ('MonadState' s m, 'Data.Monoid.Monoid' r) => 'Control.Lens.Fold.Fold' s a -> (a -> r) -> m r
-- 'uses' :: 'MonadState' s m => 'Lens'' s a -> (a -> r) -> m r
-- 'uses' :: 'MonadState' s m => 'Control.Lens.Iso.Iso'' s a -> (a -> r) -> m r
-- 'uses' :: ('MonadState' s m, 'Data.Monoid.Monoid' r) => 'Control.Lens.Traversal.Traversal'' s a -> (a -> r) -> m r
-- @
--
-- @
-- 'uses' :: 'MonadState' s m => 'Getting' r s t a b -> (a -> r) -> m r
-- @
uses :: MonadState s m => LensLike' (Const r) s a -> (a -> r) -> m r
uses l f = State.gets (views l f)
{-# INLINE uses #-}
-- | This is a generalized form of 'listen' that only extracts the portion of
-- the log that is focused on by a 'Getter'. If given a 'Fold' or a 'Traversal'
-- then a monoidal summary of the parts of the log that are visited will be
-- returned.
--
-- @
-- 'listening' :: 'MonadWriter' w m => 'Getter' w u -> m a -> m (a, u)
-- 'listening' :: 'MonadWriter' w m => 'Lens'' w u -> m a -> m (a, u)
-- 'listening' :: 'MonadWriter' w m => 'Iso'' w u -> m a -> m (a, u)
-- 'listening' :: ('MonadWriter' w m, 'Monoid' u) => 'Fold' w u -> m a -> m (a, u)
-- 'listening' :: ('MonadWriter' w m, 'Monoid' u) => 'Traversal'' w u -> m a -> m (a, u)
-- 'listening' :: ('MonadWriter' w m, 'Monoid' u) => 'Prism'' w u -> m a -> m (a, u)
-- @
listening :: MonadWriter w m => Getting u w u -> m a -> m (a, u)
listening l m = do
(a, w) <- listen m
return (a, view l w)
{-# INLINE listening #-}
-- | This is a generalized form of 'listen' that only extracts the portion of
-- the log that is focused on by a 'Getter'. If given a 'Fold' or a 'Traversal'
-- then a monoidal summary of the parts of the log that are visited will be
-- returned.
--
-- @
-- 'ilistening' :: 'MonadWriter' w m => 'IndexedGetter' i w u -> m a -> m (a, (i, u))
-- 'ilistening' :: 'MonadWriter' w m => 'IndexedLens'' i w u -> m a -> m (a, (i, u))
-- 'ilistening' :: ('MonadWriter' w m, 'Monoid' u) => 'IndexedFold' i w u -> m a -> m (a, (i, u))
-- 'ilistening' :: ('MonadWriter' w m, 'Monoid' u) => 'IndexedTraversal'' i w u -> m a -> m (a, (i, u))
-- @
ilistening :: MonadWriter w m => IndexedGetting i (i, u) w u -> m a -> m (a, (i, u))
ilistening l m = do
(a, w) <- listen m
return (a, iview l w)
{-# INLINE ilistening #-}
-- | This is a generalized form of 'listen' that only extracts the portion of
-- the log that is focused on by a 'Getter'. If given a 'Fold' or a 'Traversal'
-- then a monoidal summary of the parts of the log that are visited will be
-- returned.
--
-- @
-- 'listenings' :: 'MonadWriter' w m => 'Getter' w u -> (u -> v) -> m a -> m (a, v)
-- 'listenings' :: 'MonadWriter' w m => 'Lens'' w u -> (u -> v) -> m a -> m (a, v)
-- 'listenings' :: 'MonadWriter' w m => 'Iso'' w u -> (u -> v) -> m a -> m (a, v)
-- 'listenings' :: ('MonadWriter' w m, 'Monoid' v) => 'Fold' w u -> (u -> v) -> m a -> m (a, v)
-- 'listenings' :: ('MonadWriter' w m, 'Monoid' v) => 'Traversal'' w u -> (u -> v) -> m a -> m (a, v)
-- 'listenings' :: ('MonadWriter' w m, 'Monoid' v) => 'Prism'' w u -> (u -> v) -> m a -> m (a, v)
-- @
listenings :: MonadWriter w m => Getting v w u -> (u -> v) -> m a -> m (a, v)
listenings l uv m = do
(a, w) <- listen m
return (a, views l uv w)
{-# INLINE listenings #-}
-- | This is a generalized form of 'listen' that only extracts the portion of
-- the log that is focused on by a 'Getter'. If given a 'Fold' or a 'Traversal'
-- then a monoidal summary of the parts of the log that are visited will be
-- returned.
--
-- @
-- 'ilistenings' :: 'MonadWriter' w m => 'IndexedGetter' w u -> (i -> u -> v) -> m a -> m (a, v)
-- 'ilistenings' :: 'MonadWriter' w m => 'IndexedLens'' w u -> (i -> u -> v) -> m a -> m (a, v)
-- 'ilistenings' :: ('MonadWriter' w m, 'Monoid' v) => 'IndexedFold' w u -> (i -> u -> v) -> m a -> m (a, v)
-- 'ilistenings' :: ('MonadWriter' w m, 'Monoid' v) => 'IndexedTraversal'' w u -> (i -> u -> v) -> m a -> m (a, v)
-- @
ilistenings :: MonadWriter w m => IndexedGetting i v w u -> (i -> u -> v) -> m a -> m (a, v)
ilistenings l iuv m = do
(a, w) <- listen m
return (a, iviews l iuv w)
{-# INLINE ilistenings #-}
------------------------------------------------------------------------------
-- Indexed Getters
------------------------------------------------------------------------------
-- | View the index and value of an 'IndexedGetter' into the current environment as a pair.
--
-- When applied to an 'IndexedFold' the result will most likely be a nonsensical monoidal summary of
-- the indices tupled with a monoidal summary of the values and probably not whatever it is you wanted.
iview :: MonadReader s m => IndexedGetting i (i,a) s a -> m (i,a)
iview l = asks (getConst #. l (Indexed $ \i -> Const #. (,) i))
{-# INLINE iview #-}
-- | View a function of the index and value of an 'IndexedGetter' into the current environment.
--
-- When applied to an 'IndexedFold' the result will be a monoidal summary instead of a single answer.
--
-- @
-- 'iviews' ≡ 'Control.Lens.Fold.ifoldMapOf'
-- @
iviews :: MonadReader s m => IndexedGetting i r s a -> (i -> a -> r) -> m r
iviews l f = asks (getConst #. l (Const #. Indexed f))
{-# INLINE iviews #-}
-- | Use the index and value of an 'IndexedGetter' into the current state as a pair.
--
-- When applied to an 'IndexedFold' the result will most likely be a nonsensical monoidal summary of
-- the indices tupled with a monoidal summary of the values and probably not whatever it is you wanted.
iuse :: MonadState s m => IndexedGetting i (i,a) s a -> m (i,a)
iuse l = gets (getConst #. l (Indexed $ \i -> Const #. (,) i))
{-# INLINE iuse #-}
-- | Use a function of the index and value of an 'IndexedGetter' into the current state.
--
-- When applied to an 'IndexedFold' the result will be a monoidal summary instead of a single answer.
iuses :: MonadState s m => IndexedGetting i r s a -> (i -> a -> r) -> m r
iuses l f = gets (getConst #. l (Const #. Indexed f))
{-# INLINE iuses #-}
-- | View the index and value of an 'IndexedGetter' or 'IndexedLens'.
--
-- This is the same operation as 'iview' with the arguments flipped.
--
-- The fixity and semantics are such that subsequent field accesses can be
-- performed with ('Prelude..').
--
-- @
-- ('^@.') :: s -> 'IndexedGetter' i s a -> (i, a)
-- ('^@.') :: s -> 'IndexedLens'' i s a -> (i, a)
-- @
--
-- The result probably doesn't have much meaning when applied to an 'IndexedFold'.
(^@.) :: s -> IndexedGetting i (i, a) s a -> (i, a)
s ^@. l = getConst $ l (Indexed $ \i -> Const #. (,) i) s
{-# INLINE (^@.) #-}
-- | Coerce a 'Getter'-compatible 'LensLike' to a 'LensLike''. This
-- is useful when using a 'Traversal' that is not simple as a 'Getter' or a
-- 'Fold'.
getting :: (Functor f, Contravariant f) => LensLike f s t a b -> LensLike' f s a
getting l f = phantom . l (phantom . f)
| omefire/lens | src/Control/Lens/Getter.hs | bsd-3-clause | 18,867 | 0 | 14 | 4,194 | 2,056 | 1,259 | 797 | 102 | 1 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="el-GR">
<title>Passive Scan Rules - Alpha | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Ευρετήριο</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Αναζήτηση</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | kingthorin/zap-extensions | addOns/pscanrulesAlpha/src/main/javahelp/org/zaproxy/zap/extension/pscanrulesAlpha/resources/help_el_GR/helpset_el_GR.hs | apache-2.0 | 1,012 | 90 | 29 | 162 | 440 | 231 | 209 | -1 | -1 |
module Main (main) where
import System.FilePath.Glob (glob)
import Test.DocTest (doctest)
main :: IO ()
main = glob "library/**/*.hs" >>= doctest
| yaccz/saturnin | tests/DocTest.hs | bsd-3-clause | 176 | 0 | 6 | 50 | 52 | 30 | 22 | 5 | 1 |
-- (c) Simon Marlow 2011, see the file LICENSE for copying terms.
--
-- Sample geturls.hs (CEFP summer school notes, 2011)
--
-- Downloading multiple URLs concurrently, timing the downloads
--
-- Compile with:
-- ghc -threaded --make geturls.hs
import GetURL
import TimeIt
import Control.Monad
import Control.Concurrent
import Control.Exception
import Text.Printf
import Control.Concurrent.STM
import qualified Data.ByteString as B
-----------------------------------------------------------------------------
-- Our Async API:
data Async a = Async (TVar (Maybe a))
async :: IO a -> IO (Async a)
async action = do
var <- atomically $ newTVar Nothing
forkIO (do a <- action; atomically (writeTVar var (Just a)))
return (Async var)
wait :: Async a -> IO a
wait (Async var) = atomically $ do
m <- readTVar var
case m of
Nothing -> retry
Just a -> return a
-----------------------------------------------------------------------------
sites = ["http://www.google.com",
"http://www.bing.com",
"http://www.yahoo.com",
"http://www.wikipedia.com/wiki/Spade",
"http://www.wikipedia.com/wiki/Shovel"]
main = mapM (async.http) sites >>= mapM wait
where
http url = do
(page, time) <- timeit $ getURL url
printf "downloaded: %s (%d bytes, %.2fs)\n" url (B.length page) time
| prt2121/haskell-practice | parconc/geturlsstm.hs | apache-2.0 | 1,345 | 3 | 15 | 253 | 346 | 172 | 174 | 29 | 2 |
module NoBlockArgumentsFail where
import Control.Monad
foo :: IO ()
foo = when True do
return ()
| shlevy/ghc | testsuite/tests/parser/should_fail/NoBlockArgumentsFail.hs | bsd-3-clause | 101 | 0 | 9 | 20 | 37 | 19 | 18 | -1 | -1 |
{-# LANGUAGE ConstraintKinds, FlexibleContexts, FlexibleInstances, MultiParamTypeClasses #-}
module T13267 where
type C1 a = (Show (a -> Bool))
instance C1 Int where
type C2 a = (Show Bool, Show Int)
instance C2 Int where
| ezyang/ghc | testsuite/tests/polykinds/T13267.hs | bsd-3-clause | 226 | 0 | 8 | 38 | 60 | 35 | 25 | 6 | 0 |
module T4138 where
import T4138_A
-- We NOINLINE f because we want to count the number of F#s in the
-- -ddump-simpl output, so we don't want to be confused by F#s
-- appearing in the inlining
{-# NOINLINE f #-}
f :: (Float, Float) -> ()
f = rnf
{-
We're hoping that the output will include something like:
\ (ds_afa :: (GHC.Types.Float, GHC.Types.Float)) ->
case ds_afa of _ { (x_afd, y_afe) ->
case x_afd of _ { GHC.Types.F# ipv_afm ->
T4138_A.$fNFDataFloat_$crnf y_afe } }
-}
| hferreiro/replay | testsuite/tests/simplCore/should_compile/T4138.hs | bsd-3-clause | 498 | 0 | 6 | 106 | 34 | 23 | 11 | 5 | 1 |
-- #hide
module Hidden where
hidden :: Int -> Int
hidden a = a
| wxwxwwxxx/ghc | testsuite/tests/haddock/haddock_examples/Hidden.hs | bsd-3-clause | 63 | 0 | 5 | 14 | 22 | 13 | 9 | 3 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.