code stringlengths 5 1.03M | repo_name stringlengths 5 90 | path stringlengths 4 158 | license stringclasses 15 values | size int64 5 1.03M | n_ast_errors int64 0 53.9k | ast_max_depth int64 2 4.17k | n_whitespaces int64 0 365k | n_ast_nodes int64 3 317k | n_ast_terminals int64 1 171k | n_ast_nonterminals int64 1 146k | loc int64 -1 37.3k | cycloplexity int64 -1 1.31k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
module Main where
import AbstractedCommunication
main = do
putStrLn "nothinggggg"
| armoredsoftware/protocol | tpm/mainline/shared/protocolprotocol/AbstractedCommunicationMain.hs | bsd-3-clause | 90 | 0 | 7 | 18 | 18 | 10 | 8 | 4 | 1 |
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree. An additional grant
-- of patent rights can be found in the PATENTS file in the same directory.
module Duckling.Time.KO.Tests
( tests ) where
import Prelude
import Data.String
import Test.Tasty
import Duckling.Dimensions.Types
import Duckling.Testing.Asserts
import Duckling.Time.KO.Corpus
tests :: TestTree
tests = testGroup "KO Tests"
[ makeCorpusTest [This Time] corpus
]
| rfranek/duckling | tests/Duckling/Time/KO/Tests.hs | bsd-3-clause | 591 | 0 | 9 | 96 | 80 | 51 | 29 | 11 | 1 |
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE TypeFamilies #-}
-- | Symbolic evaluation for the schema 'Invariant' language (as opposed to
-- the 'Prop' or 'Term' languages).
module Pact.Analyze.Eval.Invariant where
import Control.Lens (at, view, (%=), (?~))
import Control.Monad.Except (MonadError (throwError))
import Control.Monad.Reader (MonadReader, ReaderT, local)
import Control.Monad.State.Strict (MonadState,
StateT (StateT, runStateT))
import Data.Map.Strict (Map)
import Data.SBV (Mergeable (symbolicMerge))
import Pact.Analyze.Errors
import Pact.Analyze.Eval.Core
import Pact.Analyze.Types
import Pact.Analyze.Types.Eval
import Pact.Analyze.Util
newtype InvariantCheck a = InvariantCheck
{ unInvariantCheck :: StateT SymbolicSuccess
(ReaderT
(Located (Map VarId AVal))
(Either AnalyzeFailure)) a
} deriving (Functor, Applicative, Monad, MonadError AnalyzeFailure,
MonadReader (Located (Map VarId AVal)), MonadState SymbolicSuccess)
instance (Mergeable a) => Mergeable (InvariantCheck a) where
symbolicMerge force test left right = InvariantCheck $ StateT $ \s0 -> do
(resL, sL) <- runStateT (unInvariantCheck left) s0
(resR, sR) <- runStateT (unInvariantCheck right) s0
pure ( symbolicMerge force test resL resR
, symbolicMerge force test sL sR
)
instance Analyzer InvariantCheck where
type TermOf InvariantCheck = Invariant
eval (CoreInvariant tm) = evalCore tm
throwErrorNoLoc err = do
info <- view location
throwError $ AnalyzeFailure info err
getVar vid = view (located . at vid)
withVar vid val m = local (located . at vid ?~ val) m
markFailure b = id %= (.&& SymbolicSuccess (sNot b))
withMergeableAnalyzer ty f = withSymVal ty f
| kadena-io/pact | src-tool/Pact/Analyze/Eval/Invariant.hs | bsd-3-clause | 2,006 | 0 | 13 | 560 | 525 | 290 | 235 | -1 | -1 |
{- |
Module : ./CASL/Taxonomy.hs
Description : converters for theories to MMiSSOntology
(subsorting and concept taxonomies)
Copyright : (c) Klaus Luettich, Uni Bremen 2002-2004
License : GPLv2 or higher, see LICENSE.txt
Maintainer : luecke@informatik.uni-bremen.de
Stability : provisional
Portability : portable
Converters for theories to MMiSSOntology (subsorting and concept taxonomies)
the functions showOntClass, showRelationName and showRelation may be used
for printing out MMiSS Ontologies in LaTeX to Stdout
(see commets marked with --printOut).
Please do not remove them without reason!!
-}
module CASL.Taxonomy
( -- * Conversion
convTaxo
-- * Printing of MMiSS ontologies in LaTeX
, showOntClass, showRelationName, showRelation) where
import CASL.AS_Basic_CASL
import CASL.Sign
import Taxonomy.MMiSSOntology
import Common.Taxonomy
import Common.Result
import Common.Id ()
import Common.AS_Annotation
import qualified Common.Lib.MapSet as MapSet
import qualified Common.Lib.Rel as Rel
import qualified Data.Map as Map
import qualified Data.Set as Set
{- | convert a generic CASL signature into the MMiSS ontology
datastructure for display as taxonomy graph -}
convTaxo :: TaxoGraphKind -> MMiSSOntology
-> Sign f e
-> [Named (FORMULA f)] -> Result MMiSSOntology
convTaxo kind onto sign sens =
fromWithError $
case kind of
KSubsort -> convSign KSubsort onto sign
KConcept -> foldl convSen (convSign KConcept onto sign) sens
convSign :: TaxoGraphKind
-> MMiSSOntology -> Sign f e -> WithError MMiSSOntology
convSign KConcept o s =
case convSign KSubsort o s of
wOnto -> weither (const wOnto) (convPred s) wOnto
convSign KSubsort onto sign =
Set.fold addSor (hasValue onto) $ sortSet sign
-- start with top sorts (maybe use Rel.mostRight?)
where relMap = Rel.toMap $ Rel.intransKernel $ sortRel sign
addSor sort weOnto =
let sortStr = show sort
in weither (const weOnto)
(\ on -> insClass on sortStr
(maybe [] toStrL $
Map.lookup sort relMap))
weOnto
insClass o nm supL =
insertClass o nm nm supL (Just SubSort)
toStrL = map show . Set.toList
convPred :: Sign f e -> MMiSSOntology -> WithError MMiSSOntology
convPred s o =
-- first only binary preds; later also unary preds
Map.foldWithKey addPred (hasValue o) $ MapSet.toMap $ predMap s
where addPred pn tSet wOnto =
weither (const wOnto) insBinaryPred wOnto
where insBinaryPred on =
let binT = Set.filter isBinPredType tSet
in if Set.null binT
then hasValue on
else Set.fold insType (insName on) binT
insName on = insertBaseRelation on (show pn) (show pn)
Nothing Nothing
insType t wOn =
weither (const wOn)
(\ ont ->
let [a1, a2] = predArgs t
src = show a1
tar = show a2
in insertRelationType ont (show pn)
src tar)
wOn
convSen :: WithError MMiSSOntology
-> Named (FORMULA f) -> WithError MMiSSOntology
convSen weOnto _nSen = weither (const weOnto) hasValue weOnto
-- implemented but not used by now
showOntClass :: String -> [String] -> String
showOntClass cln =
foldl (\ res sup -> res ++ ontClass sup) ""
where ontClass s = "\\Class{" ++ cln ++ "}{" ++ cln ++ "}{" ++ s ++ "}"
showRelationName :: String -> String
showRelationName rn = "\\RelationName{" ++ rn ++ "}{" ++ rn ++ "}"
showRelation :: String -> String -> String -> String
showRelation rn s t = "\\Relation{" ++ rn ++ "}{" ++ s ++ "}{" ++ t ++ "}{}"
| spechub/Hets | CASL/Taxonomy.hs | gpl-2.0 | 4,056 | 0 | 16 | 1,300 | 915 | 467 | 448 | 73 | 2 |
-- (C) 2011-14 Nicola Bonelli <nicola@pfq.io>
--
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation; either version 2 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program; if not, write to the Free Software Foundation,
-- Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
--
-- The full GNU General Public License is included in this distribution in
-- the file called "COPYING".
import Network.PFq.Default
import Network.PFq.Experimental
import Network.PFq.Lang
import Control.Monad
-- prettyPrinter (debug):
prettyPrinter :: Serializable a => a -> IO ()
prettyPrinter comp = let (xs,_) = serialize comp 0
in forM_ (zip [0..] xs) $ \(n, x) -> putStrLn $ " " ++ show n ++ ": " ++ show x
main = do
let mycond = is_ip .&&. (is_tcp .||. is_udp)
let mycond1 = is_udp
let comp = par' (ip >-> udp) (ip >-> tcp) >-> steer_rtp >-> dummy 24
>-> conditional (mycond .||. mycond1)
steer_ip
(inc 1 >-> drop')
>-> when' is_tcp (inc 2) >-> dummy 11
putStrLn "\nFunctional computation (show):"
print comp
putStrLn "\nFunctional computation (prettyPrint):"
putStrLn $ pretty comp
putStrLn "\nSerialized AST:"
prettyPrinter comp
| Mr-Click/PFQ | user/Haskell/test/test-lang.hs | gpl-2.0 | 1,844 | 0 | 17 | 513 | 309 | 159 | 150 | 21 | 1 |
{-# LANGUAGE TemplateHaskell, QuasiQuotes, OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
module Handler.Wiki
( getWikiR
, postWikiR
, postUnlinkWikiR
) where
import Wiki
import qualified Data.Text as T
import Handler.Topic (getTopicR')
wikiForm :: Handler ((FormResult TopicId, Widget), Enctype)
wikiForm = do
ts <- runDB $ selectList [TopicAllWrite ==. True] [Asc TopicTitle]
runFormPost $ renderTable $ areq (selectField $ map go ts) (FieldSettings MsgWikiTopic Nothing Nothing Nothing) Nothing
where
go :: (TopicId, Topic) -> (Text, TopicId)
go (tid, t) = (topicTitle t, tid)
getWikiR :: [Text] -> Handler RepHtml
getWikiR ps = do
x <- runDB $ getBy $ UniqueWikiPage $ T.intercalate "/" ps
muid <- maybeAuthId
((_, form), _) <- wikiForm
case x of
Nothing -> defaultLayout $(widgetFile "wiki-blank")
Just (_, wp) -> getTopicR' $(widgetFile "wiki-unlink") False $ wikiPageTopic wp
postWikiR :: [Text] -> Handler RepHtml
postWikiR ps = do
uid <- requireAuthId
((res, form), _) <- wikiForm
case res of
FormSuccess tid -> do
runDB $ do
let p = T.intercalate "/" ps
deleteBy $ UniqueWikiPage p
_ <- insert $ WikiPage p tid
return ()
redirect RedirectTemporary $ WikiR ps
_ -> do
let muid = Just uid
defaultLayout $(widgetFile "wiki-blank")
postUnlinkWikiR :: [Text] -> Handler ()
postUnlinkWikiR ps = do
_ <- requireAuthId
runDB $ deleteBy $ UniqueWikiPage $ T.intercalate "/" ps
redirect RedirectTemporary $ WikiR ps
| snoyberg/yesodwiki | Handler/Wiki.hs | bsd-2-clause | 1,647 | 0 | 19 | 442 | 550 | 274 | 276 | 43 | 2 |
{-# LANGUAGE Trustworthy #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE ScopedTypeVariables #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.Monoid
-- Copyright : (c) Andy Gill 2001,
-- (c) Oregon Graduate Institute of Science and Technology, 2001
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- Maintainer : libraries@haskell.org
-- Stability : experimental
-- Portability : portable
--
-- A class for monoids (types with an associative binary operation that
-- has an identity) with various general-purpose instances.
--
-----------------------------------------------------------------------------
module Data.Monoid (
-- * 'Monoid' typeclass
Monoid(..),
(<>),
Dual(..),
Endo(..),
-- * 'Bool' wrappers
All(..),
Any(..),
-- * 'Num' wrappers
Sum(..),
Product(..),
-- * 'Maybe' wrappers
-- $MaybeExamples
First(..),
Last(..),
-- * 'Alternative' wrapper
Alt (..)
) where
-- Push down the module in the dependency hierarchy.
import GHC.Base hiding (Any)
import GHC.Read
import GHC.Show
import GHC.Generics
import Data.Semigroup.Internal
-- $MaybeExamples
-- To implement @find@ or @findLast@ on any 'Foldable':
--
-- @
-- findLast :: Foldable t => (a -> Bool) -> t a -> Maybe a
-- findLast pred = getLast . foldMap (\x -> if pred x
-- then Last (Just x)
-- else Last Nothing)
-- @
--
-- Much of Data.Map's interface can be implemented with
-- Data.Map.alter. Some of the rest can be implemented with a new
-- @alterA@ function and either 'First' or 'Last':
--
-- > alterA :: (Applicative f, Ord k) =>
-- > (Maybe a -> f (Maybe a)) -> k -> Map k a -> f (Map k a)
-- >
-- > instance Monoid a => Applicative ((,) a) -- from Control.Applicative
--
-- @
-- insertLookupWithKey :: Ord k => (k -> v -> v -> v) -> k -> v
-- -> Map k v -> (Maybe v, Map k v)
-- insertLookupWithKey combine key value =
-- Arrow.first getFirst . alterA doChange key
-- where
-- doChange Nothing = (First Nothing, Just value)
-- doChange (Just oldValue) =
-- (First (Just oldValue),
-- Just (combine key value oldValue))
-- @
-- | Maybe monoid returning the leftmost non-Nothing value.
--
-- @'First' a@ is isomorphic to @'Alt' 'Maybe' a@, but precedes it
-- historically.
--
-- >>> getFirst (First (Just "hello") <> First Nothing <> First (Just "world"))
-- Just "hello"
newtype First a = First { getFirst :: Maybe a }
deriving (Eq, Ord, Read, Show, Generic, Generic1,
Functor, Applicative, Monad)
-- | @since 4.9.0.0
instance Semigroup (First a) where
First Nothing <> b = b
a <> _ = a
stimes = stimesIdempotentMonoid
-- | @since 2.01
instance Monoid (First a) where
mempty = First Nothing
-- | Maybe monoid returning the rightmost non-Nothing value.
--
-- @'Last' a@ is isomorphic to @'Dual' ('First' a)@, and thus to
-- @'Dual' ('Alt' 'Maybe' a)@
--
-- >>> getLast (Last (Just "hello") <> Last Nothing <> Last (Just "world"))
-- Just "world"
newtype Last a = Last { getLast :: Maybe a }
deriving (Eq, Ord, Read, Show, Generic, Generic1,
Functor, Applicative, Monad)
-- | @since 4.9.0.0
instance Semigroup (Last a) where
a <> Last Nothing = a
_ <> b = b
stimes = stimesIdempotentMonoid
-- | @since 2.01
instance Monoid (Last a) where
mempty = Last Nothing
{-
{--------------------------------------------------------------------
Testing
--------------------------------------------------------------------}
instance Arbitrary a => Arbitrary (Maybe a) where
arbitrary = oneof [return Nothing, Just `fmap` arbitrary]
prop_mconcatMaybe :: [Maybe [Int]] -> Bool
prop_mconcatMaybe x =
fromMaybe [] (mconcat x) == mconcat (catMaybes x)
prop_mconcatFirst :: [Maybe Int] -> Bool
prop_mconcatFirst x =
getFirst (mconcat (map First x)) == listToMaybe (catMaybes x)
prop_mconcatLast :: [Maybe Int] -> Bool
prop_mconcatLast x =
getLast (mconcat (map Last x)) == listLastToMaybe (catMaybes x)
where listLastToMaybe [] = Nothing
listLastToMaybe lst = Just (last lst)
-- -}
-- $setup
-- >>> import Prelude
| ezyang/ghc | libraries/base/Data/Monoid.hs | bsd-3-clause | 4,547 | 0 | 7 | 1,142 | 438 | 284 | 154 | 41 | 0 |
{-# OPTIONS_GHC -cpp -fglasgow-exts -fno-warn-orphans -fno-warn-incomplete-patterns #-}
-- #prune
-- |
-- Module : Data.ByteString.Lazy
-- Copyright : (c) Don Stewart 2006
-- (c) Duncan Coutts 2006
-- License : BSD-style
--
-- Maintainer : dons@galois.com
-- Stability : experimental
-- Portability : portable
--
-- A time and space-efficient implementation of lazy byte vectors
-- using lists of packed 'Word8' arrays, suitable for high performance
-- use, both in terms of large data quantities, or high speed
-- requirements. Byte vectors are encoded as lazy lists of strict 'Word8'
-- arrays of bytes. They provide a means to manipulate large byte vectors
-- without requiring the entire vector be resident in memory.
--
-- Some operations, such as concat, append, reverse and cons, have
-- better complexity than their "Data.ByteString" equivalents, due to
-- optimisations resulting from the list spine structure. And for other
-- operations lazy ByteStrings are usually within a few percent of
-- strict ones, but with better heap usage. For data larger than the
-- available memory, or if you have tight memory constraints, this
-- module will be the only option. The default chunk size is 64k, which
-- should be good in most circumstances. For people with large L2
-- caches, you may want to increase this to fit your cache.
--
-- This module is intended to be imported @qualified@, to avoid name
-- clashes with "Prelude" functions. eg.
--
-- > import qualified Data.ByteString.Lazy as B
--
-- Original GHC implementation by Bryan O\'Sullivan.
-- Rewritten to use 'Data.Array.Unboxed.UArray' by Simon Marlow.
-- Rewritten to support slices and use 'Foreign.ForeignPtr.ForeignPtr'
-- by David Roundy.
-- Polished and extended by Don Stewart.
-- Lazy variant by Duncan Coutts and Don Stewart.
--
{-@ LIQUID "--real" @-}
module Data.ByteString.Lazy (
-- * The @ByteString@ type
ByteString, -- instances: Eq, Ord, Show, Read, Data, Typeable
-- * Introducing and eliminating 'ByteString's
empty, -- :: ByteString
singleton, -- :: Word8 -> ByteString
pack, -- :: [Word8] -> ByteString
unpack, -- :: ByteString -> [Word8]
fromChunks, -- :: [Strict.ByteString] -> ByteString
toChunks, -- :: ByteString -> [Strict.ByteString]
-- * Basic interface
cons, -- :: Word8 -> ByteString -> ByteString
cons', -- :: Word8 -> ByteString -> ByteString
snoc, -- :: ByteString -> Word8 -> ByteString
append, -- :: ByteString -> ByteString -> ByteString
head, -- :: ByteString -> Word8
uncons, -- :: ByteString -> Maybe (Word8, ByteString)
last, -- :: ByteString -> Word8
tail, -- :: ByteString -> ByteString
init, -- :: ByteString -> ByteString
null, -- :: ByteString -> Bool
length, -- :: ByteString -> Int64
-- * Transforming ByteStrings
map, -- :: (Word8 -> Word8) -> ByteString -> ByteString
reverse, -- :: ByteString -> ByteString
intersperse, -- :: Word8 -> ByteString -> ByteString
intercalate, -- :: ByteString -> [ByteString] -> ByteString
transpose, -- :: [ByteString] -> [ByteString]
-- * Reducing 'ByteString's (folds)
foldl, -- :: (a -> Word8 -> a) -> a -> ByteString -> a
foldl', -- :: (a -> Word8 -> a) -> a -> ByteString -> a
foldl1, -- :: (Word8 -> Word8 -> Word8) -> ByteString -> Word8
foldl1', -- :: (Word8 -> Word8 -> Word8) -> ByteString -> Word8
foldr, -- :: (Word8 -> a -> a) -> a -> ByteString -> a
foldr1, -- :: (Word8 -> Word8 -> Word8) -> ByteString -> Word8
-- ** Special folds
concat, -- :: [ByteString] -> ByteString
concatMap, -- :: (Word8 -> ByteString) -> ByteString -> ByteString
any, -- :: (Word8 -> Bool) -> ByteString -> Bool
all, -- :: (Word8 -> Bool) -> ByteString -> Bool
maximum, -- :: ByteString -> Word8
minimum, -- :: ByteString -> Word8
-- * Building ByteStrings
-- ** Scans
scanl, -- :: (Word8 -> Word8 -> Word8) -> Word8 -> ByteString -> ByteString
-- scanl1, -- :: (Word8 -> Word8 -> Word8) -> ByteString -> ByteString
-- scanr, -- :: (Word8 -> Word8 -> Word8) -> Word8 -> ByteString -> ByteString
-- scanr1, -- :: (Word8 -> Word8 -> Word8) -> ByteString -> ByteString
-- ** Accumulating maps
mapAccumL, -- :: (acc -> Word8 -> (acc, Word8)) -> acc -> ByteString -> (acc, ByteString)
mapAccumR, -- :: (acc -> Word8 -> (acc, Word8)) -> acc -> ByteString -> (acc, ByteString)
mapIndexed, -- :: (Int64 -> Word8 -> Word8) -> ByteString -> ByteString
-- ** Infinite ByteStrings
repeat, -- :: Word8 -> ByteString
replicate, -- :: Int64 -> Word8 -> ByteString
cycle, -- :: ByteString -> ByteString
iterate, -- :: (Word8 -> Word8) -> Word8 -> ByteString
-- ** Unfolding ByteStrings
unfoldr, -- :: (a -> Maybe (Word8, a)) -> a -> ByteString
-- * Substrings
-- ** Breaking strings
take, -- :: Int64 -> ByteString -> ByteString
drop, -- :: Int64 -> ByteString -> ByteString
splitAt, -- :: Int64 -> ByteString -> (ByteString, ByteString)
takeWhile, -- :: (Word8 -> Bool) -> ByteString -> ByteString
dropWhile, -- :: (Word8 -> Bool) -> ByteString -> ByteString
span, -- :: (Word8 -> Bool) -> ByteString -> (ByteString, ByteString)
break, -- :: (Word8 -> Bool) -> ByteString -> (ByteString, ByteString)
group, -- :: ByteString -> [ByteString]
groupBy, -- :: (Word8 -> Word8 -> Bool) -> ByteString -> [ByteString]
inits, -- :: ByteString -> [ByteString]
tails, -- :: ByteString -> [ByteString]
-- ** Breaking into many substrings
split, -- :: Word8 -> ByteString -> [ByteString]
splitWith, -- :: (Word8 -> Bool) -> ByteString -> [ByteString]
-- * Predicates
isPrefixOf, -- :: ByteString -> ByteString -> Bool
isSuffixOf, -- :: ByteString -> ByteString -> Bool
-- isInfixOf, -- :: ByteString -> ByteString -> Bool
-- ** Search for arbitrary substrings
-- isSubstringOf, -- :: ByteString -> ByteString -> Bool
-- findSubstring, -- :: ByteString -> ByteString -> Maybe Int
-- findSubstrings, -- :: ByteString -> ByteString -> [Int]
-- * Searching ByteStrings
-- ** Searching by equality
elem, -- :: Word8 -> ByteString -> Bool
notElem, -- :: Word8 -> ByteString -> Bool
-- ** Searching with a predicate
find, -- :: (Word8 -> Bool) -> ByteString -> Maybe Word8
filter, -- :: (Word8 -> Bool) -> ByteString -> ByteString
partition, -- :: (Word8 -> Bool) -> ByteString -> (ByteString, ByteString)
-- * Indexing ByteStrings
index, -- :: ByteString -> Int64 -> Word8
elemIndex, -- :: Word8 -> ByteString -> Maybe Int64
elemIndices, -- :: Word8 -> ByteString -> [Int64]
findIndex, -- :: (Word8 -> Bool) -> ByteString -> Maybe Int64
findIndices, -- :: (Word8 -> Bool) -> ByteString -> [Int64]
count, -- :: Word8 -> ByteString -> Int64
-- * Zipping and unzipping ByteStrings
zip, -- :: ByteString -> ByteString -> [(Word8,Word8)]
zipWith, -- :: (Word8 -> Word8 -> c) -> ByteString -> ByteString -> [c]
unzip, -- :: [(Word8,Word8)] -> (ByteString,ByteString)
-- * Ordered ByteStrings
-- sort, -- :: ByteString -> ByteString
-- * Low level conversions
-- ** Copying ByteStrings
copy, -- :: ByteString -> ByteString
-- defrag, -- :: ByteString -> ByteString
-- * I\/O with 'ByteString's
-- ** Standard input and output
getContents, -- :: IO ByteString
putStr, -- :: ByteString -> IO ()
putStrLn, -- :: ByteString -> IO ()
interact, -- :: (ByteString -> ByteString) -> IO ()
-- ** Files
readFile, -- :: FilePath -> IO ByteString
writeFile, -- :: FilePath -> ByteString -> IO ()
appendFile, -- :: FilePath -> ByteString -> IO ()
-- ** I\/O with Handles
hGetContents, -- :: Handle -> IO ByteString
hGet, -- :: Handle -> Int -> IO ByteString
hGetNonBlocking, -- :: Handle -> Int -> IO ByteString
hPut, -- :: Handle -> ByteString -> IO ()
hPutStr, -- :: Handle -> ByteString -> IO ()
-- hGetN, -- :: Int -> Handle -> Int -> IO ByteString
-- hGetContentsN, -- :: Int -> Handle -> IO ByteString
-- hGetNonBlockingN, -- :: Int -> Handle -> IO ByteString
-- undocumented deprecated things:
join -- :: ByteString -> [ByteString] -> ByteString
) where
import qualified Prelude
import Prelude hiding
(reverse,head,tail,last,init,null,length,map,lines,foldl,foldr,unlines
,concat,any,take,drop,splitAt,takeWhile,dropWhile,span,break,elem,filter,maximum
,minimum,all,concatMap,foldl1,foldr1,scanl, scanl1, scanr, scanr1
,repeat, cycle, interact, iterate,readFile,writeFile,appendFile,replicate
,getContents,getLine,putStr,putStrLn ,zip,zipWith,unzip,notElem)
import qualified Data.List as L -- L for list/lazy
import qualified Data.ByteString as S -- S for strict (hmm...)
import qualified Data.ByteString.Internal as S
import qualified Data.ByteString.Unsafe as S
import Data.ByteString.Lazy.Internal
import qualified Data.ByteString.Fusion as F
import Data.Monoid (Monoid(..))
import Data.Word (Word8)
import Data.Int (Int64)
import qualified Data.List
import System.IO (Handle,stdin,stdout,openBinaryFile,IOMode(..)
,hClose,hWaitForInput,hIsEOF)
import System.IO.Unsafe
#ifndef __NHC__
import Control.Exception (bracket)
#else
import IO (bracket)
#endif
import Foreign.ForeignPtr (withForeignPtr)
import Foreign.Ptr
import Foreign.Storable
--LIQUID
import Data.ByteString.Fusion (PairS(..), MaybeS(..))
import Data.Int
import Data.Word (Word, Word8, Word16, Word32, Word64)
import Foreign.ForeignPtr (ForeignPtr)
{-@ measure sumLens :: [[a]] -> Int
sumLens ([]) = 0
sumLens (x:xs) = len x + (sumLens xs)
@-}
{-@ invariant {v:[[a]] | sumLens v >= 0} @-}
{-@ qualif SumLensEq(v:List (List a), x:List (List a)): (sumLens v) = (sumLens x) @-}
{-@ qualif SumLensEq(v:List (List a), x:List a): (sumLens v) = (len x) @-}
{-@ qualif SumLensLe(v:List (List a), x:List (List a)): (sumLens v) <= (sumLens x) @-}
-- ByteString qualifiers
{-@ qualif LBLensAcc(v:ByteString,
bs:List ByteString,
b:ByteString):
lbLength(v) = lbLengths(bs) + lbLength(b)
@-}
{-@ qualif ByteStringNE(v:S.ByteString): (bLength v) > 0 @-}
{-@ qualif BLengthsAcc(v:List S.ByteString,
c:S.ByteString,
cs:List S.ByteString):
(bLengths v) = (bLength c) + (bLengths cs)
@-}
{-@ qualif BLengthsSum(v:List (List a), bs:List S.ByteString):
(sumLens v) = (bLengths bs)
@-}
{-@ qualif BLenLE(v:S.ByteString, n:int): (bLength v) <= n @-}
{-@ qualif BLenEq(v:S.ByteString,
b:S.ByteString):
(bLength v) = (bLength b)
@-}
{-@ qualif BLenAcc(v:int,
b1:S.ByteString,
b2:S.ByteString):
v = (bLength b1) + (bLength b2)
@-}
{-@ qualif BLenAcc(v:int,
b:S.ByteString,
n:int):
v = (bLength b) + n
@-}
-- lazy ByteString qualifiers
{-@ qualif LByteStringN(v:ByteString, n:int): (lbLength v) = n @-}
{-@ qualif LByteStringNE(v:ByteString): (lbLength v) > 0 @-}
{-@ qualif LByteStringSZ(v:ByteString,
b:ByteString):
(lbLength v) = (lbLength b)
@-}
{-@ qualif LBLenAcc(v:int,
b1:ByteString,
b2:ByteString):
v = (lbLength b1) + (lbLength b2)
@-}
{-@ qualif LBLenAcc(v:int,
b:ByteString,
n:int):
v = (lbLength b) + n
@-}
{-@ qualif Chunk(v:ByteString,
sb:S.ByteString,
lb:ByteString):
(lbLength v) = (bLength sb) + (lbLength lb)
@-}
--LIQUID for the myriad `comb` inner functions
{-@ qualif LBComb(v:List ByteString,
acc:List S.ByteString,
ss:List S.ByteString,
cs:ByteString):
((lbLengths v) + (len v) - 1) = ((bLengths acc) + ((bLengths ss) + (len ss) - 1) + (lbLength cs))
@-}
{-@ qualif LBGroup(v:List ByteString,
acc:List S.ByteString,
ss:List S.ByteString,
cs:ByteString):
(lbLengths v) = ((bLengths acc) + (bLengths ss) + (lbLength cs))
@-}
{-@ qualif LBLenIntersperse(v:ByteString,
sb:S.ByteString,
lb:ByteString):
(lbLength v) = ((bLength sb) * 2) + (lbLength lb)
@-}
{-@ qualif BLenDouble(v:S.ByteString,
b:S.ByteString):
(bLength v) = (bLength b) * 2
@-}
{-@ qualif LBLenDouble(v:ByteString,
b:ByteString):
(lbLength v) = (lbLength b) * 2
@-}
{-@ qualif RevChunksAcc(v:ByteString,
acc:ByteString,
cs:List S.ByteString):
(lbLength v) = (lbLength acc) + (bLengths cs)
@-}
{-@ qualif LBSumLens(v:ByteString,
z:ByteString,
cs:List (List a)):
(lbLength v) = (lbLength z) + (sumLens cs)
@-}
{-@ qualif LBCountAcc(v:int,
c:S.ByteString,
cs:ByteString):
v <= (bLength c) + (lbLength cs)
@-}
-- -----------------------------------------------------------------------------
--
-- Useful macros, until we have bang patterns
--
#define STRICT1(f) f a | a `seq` False = undefined
#define STRICT2(f) f a b | a `seq` b `seq` False = undefined
#define STRICT3(f) f a b c | a `seq` b `seq` c `seq` False = undefined
#define STRICT4(f) f a b c d | a `seq` b `seq` c `seq` d `seq` False = undefined
#define STRICT5(f) f a b c d e | a `seq` b `seq` c `seq` d `seq` e `seq` False = undefined
-- -----------------------------------------------------------------------------
instance Eq ByteString
where (==) = eq
instance Ord ByteString
where compare = cmp
instance Monoid ByteString where
mempty = empty
mappend = append
mconcat = concat
{-@ eq :: ByteString -> ByteString -> Bool @-}
eq :: ByteString -> ByteString -> Bool
eq Empty Empty = True
eq Empty _ = False
eq _ Empty = False
eq (Chunk a as) (Chunk b bs) =
case compare (S.length a) (S.length b) of
LT -> a == (S.take (S.length a) b) && eq as (Chunk (S.drop (S.length a) b) bs)
EQ -> a == b && eq as bs
GT -> (S.take (S.length b) a) == b && eq (Chunk (S.drop (S.length b) a) as) bs
{-@ cmp :: ByteString -> ByteString -> Ordering @-}
cmp :: ByteString -> ByteString -> Ordering
cmp Empty Empty = EQ
cmp Empty _ = LT
cmp _ Empty = GT
cmp (Chunk a as) (Chunk b bs) =
case compare (S.length a) (S.length b) of
LT -> case compare a (S.take (S.length a) b) of
EQ -> cmp as (Chunk (S.drop (S.length a) b) bs)
result -> result
EQ -> case compare a b of
EQ -> cmp as bs
result -> result
GT -> case compare (S.take (S.length b) a) b of
EQ -> cmp (Chunk (S.drop (S.length b) a) as) bs
result -> result
-- -----------------------------------------------------------------------------
-- Introducing and eliminating 'ByteString's
-- | /O(1)/ The empty 'ByteString'
{-@ empty :: {v:ByteString | (lbLength v) = 0} @-}
empty :: ByteString
empty = Empty
{-# INLINE empty #-}
-- | /O(1)/ Convert a 'Word8' into a 'ByteString'
{-@ singleton :: Word8 -> {v:ByteString | (lbLength v) = 1} @-}
singleton :: Word8 -> ByteString
singleton w = Chunk (S.singleton w) Empty
{-# INLINE singleton #-}
-- | /O(n)/ Convert a '[Word8]' into a 'ByteString'.
{-@ pack :: cs:[Word8] -> {v:ByteString | (lbLength v) = (len cs)} @-}
pack :: [Word8] -> ByteString
--LIQUID INLINE pack ws = L.foldr (Chunk . S.pack) Empty (chunks defaultChunkSize ws)
pack ws = go Empty (chunks defaultChunkSize ws)
where
{-@ Decrease go 2 @-}
go z [] = z
go z (c:cs) = Chunk (S.pack c) (go z cs)
{-@ Decrease chunks 2 @-}
chunks :: Int -> [a] -> [[a]]
chunks _ [] = []
chunks size xs = case L.splitAt size xs of
(xs', xs'') -> xs' : chunks size xs''
-- | /O(n)/ Converts a 'ByteString' to a '[Word8]'.
-- TODO: disabled because type of `concat` changed between ghc 7.8 and 7.10
{- unpack :: b:ByteString -> {v:[Word8] | (len v) = (lbLength b)} @-}
unpack :: ByteString -> [Word8]
--LIQUID INLINE unpack cs = L.concatMap S.unpack (toChunks cs)
unpack cs = L.concat $ mapINLINE $ toChunks cs
where mapINLINE [] = []
mapINLINE (c:cs) = S.unpack c : mapINLINE cs
--TODO: we can do better here by integrating the concat with the unpack
-- | /O(c)/ Convert a list of strict 'ByteString' into a lazy 'ByteString'
{-@ fromChunks :: bs:[S.ByteString] -> {v:ByteString | (lbLength v) = (bLengths bs)} @-}
fromChunks :: [S.ByteString] -> ByteString
--LIQUID INLINE fromChunks cs = L.foldr chunk Empty cs
fromChunks [] = Empty
fromChunks (c:cs) = chunk c (fromChunks cs)
-- | /O(n)/ Convert a lazy 'ByteString' into a list of strict 'ByteString'
{-@ toChunks :: b:ByteString -> {v:[S.ByteString] | (bLengths v) = (lbLength b)} @-}
toChunks :: ByteString -> [S.ByteString]
--LIQUID GHOST toChunks cs = foldrChunks (:) [] cs
toChunks cs = foldrChunks (const (:)) [] cs
------------------------------------------------------------------------
{-
-- | /O(n)/ Convert a '[a]' into a 'ByteString' using some
-- conversion function
packWith :: (a -> Word8) -> [a] -> ByteString
packWith k str = LPS $ L.map (P.packWith k) (chunk defaultChunkSize str)
{-# INLINE packWith #-}
{-# SPECIALIZE packWith :: (Char -> Word8) -> [Char] -> ByteString #-}
-- | /O(n)/ Converts a 'ByteString' to a '[a]', using a conversion function.
unpackWith :: (Word8 -> a) -> ByteString -> [a]
unpackWith k (LPS ss) = L.concatMap (S.unpackWith k) ss
{-# INLINE unpackWith #-}
{-# SPECIALIZE unpackWith :: (Word8 -> Char) -> ByteString -> [Char] #-}
-}
-- ---------------------------------------------------------------------
-- Basic interface
-- | /O(1)/ Test whether a ByteString is empty.
{-@ null :: b:ByteString -> {v:Bool | ((Prop v) <=> ((lbLength b) = 0))} @-}
null :: ByteString -> Bool
null Empty = True
null _ = False
{-# INLINE null #-}
-- | /O(n\/c)/ 'length' returns the length of a ByteString as an 'Int64'
{-@ length :: b:ByteString -> {v:Int64 | v = (lbLength b)} @-}
length :: ByteString -> Int64
--LIQUID GHOST length cs = foldlChunks (\n c -> n + fromIntegral (S.length c)) 0 cs
length cs = foldrChunks (\_ c n -> n + fromIntegral (S.length c)) 0 cs
{-# INLINE length #-}
-- | /O(1)/ 'cons' is analogous to '(:)' for lists.
--
{-@ cons :: Word8 -> b:ByteString -> {v:ByteString | (lbLength v) = ((lbLength b) + 1)}
@-}
cons :: Word8 -> ByteString -> ByteString
cons c cs = Chunk (S.singleton c) cs
{-# INLINE cons #-}
-- | /O(1)/ Unlike 'cons', 'cons\'' is
-- strict in the ByteString that we are consing onto. More precisely, it forces
-- the head and the first chunk. It does this because, for space efficiency, it
-- may coalesce the new byte onto the first \'chunk\' rather than starting a
-- new \'chunk\'.
--
-- So that means you can't use a lazy recursive contruction like this:
--
-- > let xs = cons\' c xs in xs
--
-- You can however use 'cons', as well as 'repeat' and 'cycle', to build
-- infinite lazy ByteStrings.
--
{-@ cons' :: Word8 -> b:ByteString -> {v:ByteString | (lbLength v) = ((lbLength b) + 1)} @-}
cons' :: Word8 -> ByteString -> ByteString
cons' w (Chunk c cs) | S.length c < 16 = Chunk (S.cons w c) cs
cons' w cs = Chunk (S.singleton w) cs
{-# INLINE cons' #-}
-- | /O(n\/c)/ Append a byte to the end of a 'ByteString'
{-@ snoc :: b:ByteString -> Word8 -> {v:ByteString | (lbLength v) = ((lbLength b) + 1)} @-}
snoc :: ByteString -> Word8 -> ByteString
--LIQUID GHOST snoc cs w = foldrChunks Chunk (singleton w) cs
snoc cs w = foldrChunks (const Chunk) (singleton w) cs
{-# INLINE snoc #-}
-- | /O(1)/ Extract the first element of a ByteString, which must be non-empty.
{-@ head :: LByteStringNE -> Word8 @-}
head :: ByteString -> Word8
head Empty = errorEmptyList "head"
head (Chunk c _) = S.unsafeHead c
{-# INLINE head #-}
-- | /O(1)/ Extract the head and tail of a ByteString, returning Nothing
-- if it is empty.
{-@ uncons :: b:ByteString
-> Maybe (Word8, {v:ByteString | (lbLength v) = (lbLength b) - 1})
@-}
uncons :: ByteString -> Maybe (Word8, ByteString)
uncons Empty = Nothing
uncons (Chunk c cs)
= Just (S.unsafeHead c,
if S.length c == 1 then cs else Chunk (S.unsafeTail c) cs)
{-# INLINE uncons #-}
-- | /O(1)/ Extract the elements after the head of a ByteString, which must be
-- non-empty.
{-@ tail :: b:LByteStringNE -> {v:ByteString | (lbLength v) = ((lbLength b) - 1)} @-}
tail :: ByteString -> ByteString
tail Empty = errorEmptyList "tail"
tail (Chunk c cs)
| S.length c == 1 = cs
| otherwise = Chunk (S.unsafeTail c) cs
{-# INLINE tail #-}
-- | /O(n\/c)/ Extract the last element of a ByteString, which must be finite
-- and non-empty.
{-@ last :: LByteStringNE -> Word8 @-}
last :: ByteString -> Word8
last Empty = errorEmptyList "last"
last (Chunk c0 cs0) = go c0 cs0
{-@ Decrease go 2 @-}
where go c Empty = S.last c
go _ (Chunk c cs) = go c cs
-- XXX Don't inline this. Something breaks with 6.8.2 (haven't investigated yet)
{-@ qualif LBLenAcc(v:ByteString,
sb:S.ByteString,
lb:ByteString):
(lbLength v) = ((bLength sb) + (lbLength lb) - 1)
@-}
-- | /O(n\/c)/ Return all the elements of a 'ByteString' except the last one.
{-@ init :: b:LByteStringNE -> {v:ByteString | (lbLength v) = ((lbLength b) - 1)} @-}
init :: ByteString -> ByteString
init Empty = errorEmptyList "init"
init (Chunk c0 cs0) = go c0 cs0
{-@ Decrease go 2 @-}
where go c Empty | S.length c == 1 = Empty
| otherwise = Chunk (S.init c) Empty
go c (Chunk c' cs) = Chunk c (go c' cs)
-- | /O(n\/c)/ Append two ByteStrings
{-@ append :: b1:ByteString -> b2:ByteString
-> {v:ByteString | (lbLength v) = (lbLength b1) + (lbLength b2)}
@-}
append :: ByteString -> ByteString -> ByteString
--LIQUID GHOST append xs ys = foldrChunks Chunk ys xs
append xs ys = foldrChunks (const Chunk) ys xs
{-# INLINE append #-}
-- ---------------------------------------------------------------------
-- Transformations
-- | /O(n)/ 'map' @f xs@ is the ByteString obtained by applying @f@ to each
-- element of @xs@.
{-@ map :: (Word8 -> Word8) -> b:ByteString -> (LByteStringSZ b) @-}
map :: (Word8 -> Word8) -> ByteString -> ByteString
map f s = map_go s
where
--LIQUID RENAME
map_go Empty = Empty
map_go (Chunk x xs) = Chunk y ys
where
y = S.map f x
ys = map_go xs
{-# INLINE map #-}
-- | /O(n)/ 'reverse' @xs@ returns the elements of @xs@ in reverse order.
{-@ reverse :: b:ByteString -> (LByteStringSZ b) @-}
reverse :: ByteString -> ByteString
reverse cs0 = rev Empty cs0
{-@ Decrease rev 2 @-}
where rev a Empty = a
rev a (Chunk c cs) = rev (Chunk (S.reverse c) a) cs
{-# INLINE reverse #-}
-- | The 'intersperse' function takes a 'Word8' and a 'ByteString' and
-- \`intersperses\' that byte between the elements of the 'ByteString'.
-- It is analogous to the intersperse function on Lists.
{-@ intersperse :: Word8 -> b:ByteString
-> {v:ByteString | if (lbLength b > 0) then (lbLength v = (2 * lbLength b) - 1) else (lbLength v = 0) }
@-}
intersperse :: Word8 -> ByteString -> ByteString
intersperse _ Empty = Empty
intersperse w (Chunk c cs) = Chunk (S.intersperse w c)
--LIQUID GHOST (foldrChunks (Chunk . intersperse') Empty cs)
(foldrChunks (\_ c cs -> Chunk (intersperse' c) cs) Empty cs)
where intersperse' :: S.ByteString -> S.ByteString
intersperse' (S.PS fp o l) =
S.unsafeCreate {-LIQUID MULTIPLY (2*l)-} (l+l) $ \p' -> withForeignPtr fp $ \p -> do
poke p' w
S.c_intersperse (p' `plusPtr` 1) (p `plusPtr` o) (fromIntegral l) w
-- | The 'transpose' function transposes the rows and columns of its
-- 'ByteString' argument.
transpose :: [ByteString] -> [ByteString]
transpose css = L.map (\ss -> Chunk (S.pack ss) Empty)
(L.transpose (L.map unpack css))
--TODO: make this fast
-- ---------------------------------------------------------------------
-- Reducing 'ByteString's
-- | 'foldl', applied to a binary operator, a starting value (typically
-- the left-identity of the operator), and a ByteString, reduces the
-- ByteString using the binary operator, from left to right.
foldl :: (a -> Word8 -> a) -> a -> ByteString -> a
foldl f z = go z
where go a Empty = a
go a (Chunk c cs) = go (S.foldl f a c) cs
{-# INLINE foldl #-}
-- | 'foldl\'' is like 'foldl', but strict in the accumulator.
foldl' :: (a -> Word8 -> a) -> a -> ByteString -> a
foldl' f z = go z
where go a _ | a `seq` False = undefined
go a Empty = a
go a (Chunk c cs) = go (S.foldl f a c) cs
{-# INLINE foldl' #-}
-- | 'foldr', applied to a binary operator, a starting value
-- (typically the right-identity of the operator), and a ByteString,
-- reduces the ByteString using the binary operator, from right to left.
foldr :: (Word8 -> a -> a) -> a -> ByteString -> a
--LIQUID GHOST foldr k z cs = foldrChunks (flip (S.foldr k)) z cs
foldr k z cs = foldrChunks (const $ flip (S.foldr k)) z cs
{-# INLINE foldr #-}
-- | 'foldl1' is a variant of 'foldl' that has no starting value
-- argument, and thus must be applied to non-empty 'ByteStrings'.
-- This function is subject to array fusion.
--LIQUID FIXME: S.unsafeTail breaks the lazy invariant, but since the
--bytestring is immediately consumed by foldl it may actually be safe
{-@ foldl1 :: (Word8 -> Word8 -> Word8) -> LByteStringNE -> Word8 @-}
foldl1 :: (Word8 -> Word8 -> Word8) -> ByteString -> Word8
foldl1 _ Empty = errorEmptyList "foldl1"
--LIQUID SAFETY foldl1 f (Chunk c cs) = foldl f (S.unsafeHead c) (Chunk (S.unsafeTail c) cs)
foldl1 f (Chunk c cs) = foldl f (S.unsafeHead c)
(case S.unsafeTail c of
c' | S.null c' -> cs
| otherwise -> Chunk c cs)
-- | 'foldl1\'' is like 'foldl1', but strict in the accumulator.
{-@ foldl1' :: (Word8 -> Word8 -> Word8) -> LByteStringNE -> Word8 @-}
foldl1' :: (Word8 -> Word8 -> Word8) -> ByteString -> Word8
foldl1' _ Empty = errorEmptyList "foldl1'"
--LIQUID SAFETY foldl1' f (Chunk c cs) = foldl' f (S.unsafeHead c) (Chunk (S.unsafeTail c) cs)
foldl1' f (Chunk c cs) = foldl' f (S.unsafeHead c)
(case S.unsafeTail c of
c' | S.null c' -> cs
| otherwise -> Chunk c cs)
-- | 'foldr1' is a variant of 'foldr' that has no starting value argument,
-- and thus must be applied to non-empty 'ByteString's
{-@ foldr1 :: (Word8 -> Word8 -> Word8) -> LByteStringNE -> Word8 @-}
foldr1 :: (Word8 -> Word8 -> Word8) -> ByteString -> Word8
foldr1 _ Empty = errorEmptyList "foldr1"
foldr1 f (Chunk c0 cs0) = go c0 cs0
{-@ Decrease go 2 @-}
where go c Empty = S.foldr1 f c
go c (Chunk c' cs) = S.foldr f (go c' cs) c
-- ---------------------------------------------------------------------
-- Special folds
-- | /O(n)/ Concatenate a list of ByteStrings.
{-@ Lazy concat @-}
{-@ concat :: bs:[ByteString] -> {v:ByteString | (lbLength v) = (lbLengths bs)} @-}
concat :: [ByteString] -> ByteString
concat css0 = to css0
where
go Empty css = to css
go (Chunk c cs) css = Chunk c (go cs css)
to [] = Empty
to (cs:css) = go cs css
-- | Map a function over a 'ByteString' and concatenate the results
{-@ Lazy concatMap @-}
concatMap :: (Word8 -> ByteString) -> ByteString -> ByteString
concatMap _ Empty = Empty
concatMap f (Chunk c0 cs0) = to c0 cs0 0
where
{-@ Decrease go 1 3 @-}
go :: S.ByteString -> ByteString -> ByteString -> Int -> ByteString
go c' cs' Empty _ = to c' cs' 0
go c' cs' (Chunk c cs) _ = Chunk c (go c' cs' cs 1)
{-@ Decrease to 2 3 @-}
to :: S.ByteString -> ByteString -> Int -> ByteString
to c cs _ | S.null c = case cs of
Empty -> Empty
(Chunk c' cs') -> to c' cs' 0
| otherwise = go (S.unsafeTail c) cs (f (S.unsafeHead c)) 1
-- | /O(n)/ Applied to a predicate and a ByteString, 'any' determines if
-- any element of the 'ByteString' satisfies the predicate.
any :: (Word8 -> Bool) -> ByteString -> Bool
--LIQUID GHOST any f cs = foldrChunks (\c rest -> S.any f c || rest) False cs
any f cs = foldrChunks (\_ c rest -> S.any f c || rest) False cs
{-# INLINE any #-}
-- todo fuse
-- | /O(n)/ Applied to a predicate and a 'ByteString', 'all' determines
-- if all elements of the 'ByteString' satisfy the predicate.
all :: (Word8 -> Bool) -> ByteString -> Bool
--LIQUID GHOST all f cs = foldrChunks (\c rest -> S.all f c && rest) True cs
all f cs = foldrChunks (\_ c rest -> S.all f c && rest) True cs
{-# INLINE all #-}
-- todo fuse
-- | /O(n)/ 'maximum' returns the maximum value from a 'ByteString'
{-@ maximum :: LByteStringNE -> Word8 @-}
maximum :: ByteString -> Word8
maximum Empty = errorEmptyList "maximum"
maximum (Chunk c cs) = foldlChunks (\n c' -> n `max` S.maximum c')
(S.maximum c) cs
{-# INLINE maximum #-}
-- | /O(n)/ 'minimum' returns the minimum value from a 'ByteString'
{-@ minimum :: LByteStringNE -> Word8 @-}
minimum :: ByteString -> Word8
minimum Empty = errorEmptyList "minimum"
minimum (Chunk c cs) = foldlChunks (\n c' -> n `min` S.minimum c')
(S.minimum c) cs
{-# INLINE minimum #-}
-- | The 'mapAccumL' function behaves like a combination of 'map' and
-- 'foldl'; it applies a function to each element of a ByteString,
-- passing an accumulating parameter from left to right, and returning a
-- final value of this accumulator together with the new ByteString.
{-@ mapAccumL :: (acc -> Word8 -> (acc, Word8)) -> acc -> b:ByteString -> (acc, LByteStringSZ b) @-}
mapAccumL :: (acc -> Word8 -> (acc, Word8)) -> acc -> ByteString -> (acc, ByteString)
mapAccumL f s0 cs0 = mapAccum_go s0 cs0
where
--LIQUID RENAME
{-@ Decrease mapAccum_go 2 @-}
mapAccum_go s Empty = (s, Empty)
mapAccum_go s (Chunk c cs) = (s'', Chunk c' cs')
where (s', c') = S.mapAccumL f s c
(s'', cs') = mapAccum_go s' cs
-- | The 'mapAccumR' function behaves like a combination of 'map' and
-- 'foldr'; it applies a function to each element of a ByteString,
-- passing an accumulating parameter from right to left, and returning a
-- final value of this accumulator together with the new ByteString.
{-@ mapAccumR :: (acc -> Word8 -> (acc, Word8)) -> acc -> b:ByteString -> (acc, LByteStringSZ b) @-}
mapAccumR :: (acc -> Word8 -> (acc, Word8)) -> acc -> ByteString -> (acc, ByteString)
mapAccumR f s0 cs0 = go s0 cs0
where
{-@ Decrease go 2 @-}
go s Empty = (s, Empty)
go s (Chunk c cs) = (s'', Chunk c' cs')
where (s'', c') = S.mapAccumR f s' c
(s', cs') = go s cs
-- | /O(n)/ map Word8 functions, provided with the index at each position
{-@ mapIndexed :: (Int -> Word8 -> Word8) -> ByteString -> ByteString @-}
mapIndexed :: (Int -> Word8 -> Word8) -> ByteString -> ByteString
mapIndexed f = F.loopArr . F.loopL (F.mapIndexEFL f) 0
-- ---------------------------------------------------------------------
-- Building ByteStrings
-- | 'scanl' is similar to 'foldl', but returns a list of successive
-- reduced values from the left. This function will fuse.
--
-- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...]
--
-- Note that
--
-- > last (scanl f z xs) == foldl f z xs.
{-LIQUID scanl :: (Word8 -> Word8 -> Word8) -> Word8 -> b:ByteString
-> {v:ByteString | (lbLength v) = 1 + (lbLength b)}
@-}
scanl :: (Word8 -> Word8 -> Word8) -> Word8 -> ByteString -> ByteString
scanl f z ps = F.loopArr . F.loopL (F.scanEFL f) z $ (ps `snoc` 0)
{-# INLINE scanl #-}
-- ---------------------------------------------------------------------
-- Unfolds and replicates
-- | @'iterate' f x@ returns an infinite ByteString of repeated applications
-- of @f@ to @x@:
--
-- > iterate f x == [x, f x, f (f x), ...]
--
{-@ iterate :: (Word8 -> Word8) -> Word8 -> ByteString @-}
{-@ Strict Data.ByteString.Lazy.iterate @-}
iterate :: (Word8 -> Word8) -> Word8 -> ByteString
iterate f = unfoldr (\x -> case f x of x' -> x' `seq` Just (x', x'))
-- | @'repeat' x@ is an infinite ByteString, with @x@ the value of every
-- element.
--
{-@ repeat :: Word8 -> ByteString @-}
{-@ Strict Data.ByteString.Lazy.repeat @-}
repeat :: Word8 -> ByteString
repeat w = cs where cs = Chunk (S.replicate smallChunkSize w) cs
-- | /O(n)/ @'replicate' n x@ is a ByteString of length @n@ with @x@
-- the value of every element.
--
--LIQUID FIXME: can we somehow sneak multiplication into `nChunks`?
{- replicate :: n:Nat64 -> Word8 -> {v:ByteString | (lbLength v) = (if n > 0 then n else 0)} @-}
replicate :: Int64 -> Word8 -> ByteString
replicate n w
| n <= 0 = Empty
| n < fromIntegral smallChunkSize = Chunk (S.replicate (fromIntegral n) w) Empty
| otherwise =
let c = S.replicate smallChunkSize w
cs = nChunks q
(q, r) = quotRem n (fromIntegral smallChunkSize)
--LIQUID CAST
nChunks (0 :: Int64) = Empty
nChunks m = Chunk c (nChunks (m-1))
in if r == 0 then cs -- preserve invariant
else Chunk (S.unsafeTake (fromIntegral r) c) cs
--LIQUID LAZY | r == 0 = cs -- preserve invariant
--LIQUID LAZY | otherwise = Chunk (S.unsafeTake (fromIntegral r) c) cs
--LIQUID LAZY where
--LIQUID LAZY c = S.replicate smallChunkSize w
--LIQUID LAZY cs = nChunks q
--LIQUID LAZY (q, r) = quotRem n (fromIntegral smallChunkSize)
--LIQUID LAZY nChunks 0 = Empty
--LIQUID LAZY nChunks m = Chunk c (nChunks (m-1))
-- | 'cycle' ties a finite ByteString into a circular one, or equivalently,
-- the infinite repetition of the original ByteString.
--
{-@ cycle :: ByteString -> ByteString @-}
{-@ Strict Data.ByteString.Lazy.cycle @-}
cycle :: ByteString -> ByteString
cycle Empty = errorEmptyList "cycle"
--LIQUID GHOST cycle cs = cs' where cs' = foldrChunks Chunk cs' cs
cycle cs = cs' where cs' = foldrChunks (const Chunk) cs' cs
-- | /O(n)/ The 'unfoldr' function is analogous to the List \'unfoldr\'.
-- 'unfoldr' builds a ByteString from a seed value. The function takes
-- the element and returns 'Nothing' if it is done producing the
-- ByteString or returns 'Just' @(a,b)@, in which case, @a@ is a
-- prepending to the ByteString and @b@ is used as the next element in a
-- recursive call.
{-@ Strict Data.ByteString.Lazy.unfoldr @-}
unfoldr :: (a -> Maybe (Word8, a)) -> a -> ByteString
unfoldr f s0 = unfoldChunk 32 s0
where unfoldChunk n s =
case S.unfoldrN n f s of
(c, Nothing)
| S.null c -> Empty
| otherwise -> Chunk c Empty
(c, Just s') -> Chunk c (unfoldChunk (n*2) s')
-- ---------------------------------------------------------------------
-- Substrings
-- | /O(n\/c)/ 'take' @n@, applied to a ByteString @xs@, returns the prefix
-- of @xs@ of length @n@, or @xs@ itself if @n > 'length' xs@.
{-@ take :: n:Nat64
-> b:ByteString
-> {v:ByteString | (Min (lbLength v) (lbLength b) n)}
@-}
take :: Int64 -> ByteString -> ByteString
take i _ | i <= 0 = Empty
take i cs0 = take' i cs0
where --LIQUID CAST FIXME: (Num a) isn't embedded as int so this loses some
--LIQUID refinements without the explicit type
take' :: Int64 -> ByteString -> ByteString
take' 0 _ = Empty
take' _ Empty = Empty
take' n (Chunk c cs) =
if n < fromIntegral (S.length c)
then Chunk (S.take (fromIntegral n) c) Empty
else Chunk c (take' (n - fromIntegral (S.length c)) cs)
-- | /O(n\/c)/ 'drop' @n xs@ returns the suffix of @xs@ after the first @n@
-- elements, or @[]@ if @n > 'length' xs@.
{-@ drop :: n:Nat64
-> b:ByteString
-> {v:ByteString | lbLength v = (if lbLength b <= n then 0 else (lbLength b - n))}
@-}
drop :: Int64 -> ByteString -> ByteString
drop i p | i <= 0 = p
drop i cs0 = drop' i cs0
where drop' :: Int64 -> ByteString -> ByteString
drop' 0 cs = cs
drop' _ Empty = Empty
drop' n (Chunk c cs) =
if n < fromIntegral (S.length c)
then Chunk (S.drop (fromIntegral n) c) cs
else drop' (n - fromIntegral (S.length c)) cs
-- | /O(n\/c)/ 'splitAt' @n xs@ is equivalent to @('take' n xs, 'drop' n xs)@.
{-@ splitAt :: n:Nat64
-> b:ByteString
-> ( {v:ByteString | (Min (lbLength v) (lbLength b) n)}
, ByteString)<{\x y -> ((lbLength y) = ((lbLength b) - (lbLength x)))}>
@-}
splitAt :: Int64 -> ByteString -> (ByteString, ByteString)
splitAt i cs0 | i <= 0 = (Empty, cs0)
splitAt i cs0 = splitAt' i cs0
where splitAt' :: Int64 -> ByteString -> (ByteString, ByteString)
splitAt' 0 cs = (Empty, cs)
splitAt' _ Empty = (Empty, Empty)
splitAt' n (Chunk c cs) =
if n < fromIntegral (S.length c)
then (Chunk (S.take (fromIntegral n) c) Empty
,Chunk (S.drop (fromIntegral n) c) cs)
else let (cs', cs'') = splitAt' (n - fromIntegral (S.length c)) cs
in (Chunk c cs', cs'')
-- | 'takeWhile', applied to a predicate @p@ and a ByteString @xs@,
-- returns the longest prefix (possibly empty) of @xs@ of elements that
-- satisfy @p@.
{-@ takeWhile :: (Word8 -> Bool) -> b:ByteString -> (LByteStringLE b) @-}
takeWhile :: (Word8 -> Bool) -> ByteString -> ByteString
takeWhile f cs0 = takeWhile' cs0
where takeWhile' Empty = Empty
takeWhile' (Chunk c cs) =
case findIndexOrEnd (not . f) c of
0 -> Empty
n | n < S.length c -> Chunk (S.take n c) Empty
| otherwise -> Chunk c (takeWhile' cs)
-- | 'dropWhile' @p xs@ returns the suffix remaining after 'takeWhile' @p xs@.
{-@ dropWhile :: (Word8 -> Bool) -> b:ByteString -> (LByteStringLE b) @-}
dropWhile :: (Word8 -> Bool) -> ByteString -> ByteString
dropWhile f cs0 = dropWhile' cs0
where dropWhile' Empty = Empty
dropWhile' (Chunk c cs) =
case findIndexOrEnd (not . f) c of
n | n < S.length c -> Chunk (S.drop n c) cs
| otherwise -> dropWhile' cs
-- | 'break' @p@ is equivalent to @'span' ('not' . p)@.
{-@ break :: (Word8 -> Bool) -> b:ByteString -> (LByteStringPair b) @-}
break :: (Word8 -> Bool) -> ByteString -> (ByteString, ByteString)
break f cs0 = break' cs0
where break' Empty = (Empty, Empty)
break' (Chunk c cs) =
case findIndexOrEnd f c of
0 -> (Empty, Chunk c cs)
n | n < S.length c -> (Chunk (S.take n c) Empty
,Chunk (S.drop n c) cs)
| otherwise -> let (cs', cs'') = break' cs
in (Chunk c cs', cs'')
--
-- TODO
--
-- Add rules
--
{-
-- | 'breakByte' breaks its ByteString argument at the first occurence
-- of the specified byte. It is more efficient than 'break' as it is
-- implemented with @memchr(3)@. I.e.
--
-- > break (=='c') "abcd" == breakByte 'c' "abcd"
--
breakByte :: Word8 -> ByteString -> (ByteString, ByteString)
breakByte c (LPS ps) = case (breakByte' ps) of (a,b) -> (LPS a, LPS b)
where breakByte' [] = ([], [])
breakByte' (x:xs) =
case P.elemIndex c x of
Just 0 -> ([], x : xs)
Just n -> (P.take n x : [], P.drop n x : xs)
Nothing -> let (xs', xs'') = breakByte' xs
in (x : xs', xs'')
-- | 'spanByte' breaks its ByteString argument at the first
-- occurence of a byte other than its argument. It is more efficient
-- than 'span (==)'
--
-- > span (=='c') "abcd" == spanByte 'c' "abcd"
--
spanByte :: Word8 -> ByteString -> (ByteString, ByteString)
spanByte c (LPS ps) = case (spanByte' ps) of (a,b) -> (LPS a, LPS b)
where spanByte' [] = ([], [])
spanByte' (x:xs) =
case P.spanByte c x of
(x', x'') | P.null x' -> ([], x : xs)
| P.null x'' -> let (xs', xs'') = spanByte' xs
in (x : xs', xs'')
| otherwise -> (x' : [], x'' : xs)
-}
-- | 'span' @p xs@ breaks the ByteString into two segments. It is
-- equivalent to @('takeWhile' p xs, 'dropWhile' p xs)@
{-@ span :: (Word8 -> Bool) -> b:ByteString -> (LByteStringPair b) @-}
span :: (Word8 -> Bool) -> ByteString -> (ByteString, ByteString)
span p = break (not . p)
-- | /O(n)/ Splits a 'ByteString' into components delimited by
-- separators, where the predicate returns True for a separator element.
-- The resulting components do not contain the separators. Two adjacent
-- separators result in an empty component in the output. eg.
--
-- > splitWith (=='a') "aabbaca" == ["","","bb","c",""]
-- > splitWith (=='a') [] == []
--
{-@ splitWith :: (Word8 -> Bool) -> b:LByteStringNE -> (LByteStringSplit b) @-}
splitWith :: (Word8 -> Bool) -> ByteString -> [ByteString]
splitWith _ Empty = []
--LIQUID PARAM splitWith w (Chunk c0 cs0) = comb [] (S.splitWith w c0) cs0
--LIQUID PARAM where comb :: [S.ByteString] -> [S.ByteString] -> ByteString -> [ByteString]
--LIQUID PARAM comb acc (s:[]) Empty = revChunks (s:acc) : []
--LIQUID PARAM comb acc (s:[]) (Chunk c cs) = comb (s:acc) (S.splitWith w c) cs
--LIQUID PARAM comb acc (s:ss) cs = revChunks (s:acc) : comb [] ss cs
splitWith w (Chunk c0 cs0) = comb [] cs0 (S.splitWith w c0)
{-@ Decrease comb 2 3 @-}
where comb :: [S.ByteString] -> ByteString -> [S.ByteString] -> [ByteString]
comb acc Empty (s:[]) = revChunks (s:acc) : []
comb acc (Chunk c cs) (s:[]) = comb (s:acc) cs (S.splitWith w c)
comb acc cs (s:ss) = revChunks (s:acc) : comb [] cs ss
{-# INLINE splitWith #-}
-- | /O(n)/ Break a 'ByteString' into pieces separated by the byte
-- argument, consuming the delimiter. I.e.
--
-- > split '\n' "a\nb\nd\ne" == ["a","b","d","e"]
-- > split 'a' "aXaXaXa" == ["","X","X","X",""]
-- > split 'x' "x" == ["",""]
--
-- and
--
-- > intercalate [c] . split c == id
-- > split == splitWith . (==)
--
-- As for all splitting functions in this library, this function does
-- not copy the substrings, it just constructs new 'ByteStrings' that
-- are slices of the original.
--
{-@ split :: Word8 -> b:LByteStringNE -> (LByteStringSplit b) @-}
split :: Word8 -> ByteString -> [ByteString]
split _ Empty = []
--LIQUID PARAM split w (Chunk c0 cs0) = comb [] (S.split w c0) cs0
--LIQUID PARAM where comb :: [S.ByteString] -> [S.ByteString] -> ByteString -> [ByteString]
--LIQUID PARAM comb acc (s:[]) Empty = revChunks (s:acc) : []
--LIQUID PARAM comb acc (s:[]) (Chunk c cs) = comb (s:acc) (S.split w c) cs
--LIQUID PARAM comb acc (s:ss) cs = revChunks (s:acc) : comb [] ss cs
split w (Chunk c0 cs0) = comb [] cs0 (S.split w c0)
{-@ Decrease comb 2 3 @-}
where comb :: [S.ByteString] -> ByteString -> [S.ByteString] -> [ByteString]
comb acc Empty (s:[]) = revChunks (s:acc) : []
comb acc (Chunk c cs) (s:[]) = comb (s:acc) cs (S.split w c)
comb acc cs (s:ss) = revChunks (s:acc) : comb [] cs ss
{-# INLINE split #-}
{-
-- | Like 'splitWith', except that sequences of adjacent separators are
-- treated as a single separator. eg.
--
-- > tokens (=='a') "aabbaca" == ["bb","c"]
--
tokens :: (Word8 -> Bool) -> ByteString -> [ByteString]
tokens f = L.filter (not.null) . splitWith f
-}
-- | The 'group' function takes a ByteString and returns a list of
-- ByteStrings such that the concatenation of the result is equal to the
-- argument. Moreover, each sublist in the result contains only equal
-- elements. For example,
--
-- > group "Mississippi" = ["M","i","ss","i","ss","i","pp","i"]
--
-- It is a special case of 'groupBy', which allows the programmer to
-- supply their own equality test.
{-@ group :: b:ByteString -> {v: [ByteString] | (lbLengths v) = (lbLength b)} @-}
group :: ByteString -> [ByteString]
group Empty = []
--LIQUID PARAM group (Chunk c0 cs0) = group' [] (S.group c0) cs0
--LIQUID PARAM where
--LIQUID PARAM group' :: [S.ByteString] -> [S.ByteString] -> ByteString -> [ByteString]
--LIQUID PARAM group' acc@(s':_) ss@(s:_) cs
--LIQUID PARAM | S.unsafeHead s'
--LIQUID PARAM /= S.unsafeHead s = revNonEmptyChunks acc : group' [] ss cs
--LIQUID PARAM group' acc (s:[]) Empty = revNonEmptyChunks (s:acc) : []
--LIQUID PARAM group' acc (s:[]) (Chunk c cs) = group' (s:acc) (S.group c) cs
--LIQUID PARAM group' acc (s:ss) cs = revNonEmptyChunks (s:acc) : group' [] ss cs
group (Chunk c0 cs0) = group_go cs0 (S.group c0) []
where
{-@ Decrease group_go 1 2 3 @-}
group_go :: ByteString -> [S.ByteString] -> [S.ByteString] -> [ByteString]
group_go cs ss@(s:_) acc@(s':_)
| S.unsafeHead s'
/= S.unsafeHead s = revNonEmptyChunks acc : group_go cs ss []
group_go Empty (s:[]) acc = revNonEmptyChunks (s:acc) : []
group_go (Chunk c cs) (s:[]) acc = group_go cs (S.group c) (s:acc)
group_go cs (s:ss) acc = revNonEmptyChunks (s:acc) : group_go cs ss []
{-
TODO: check if something like this might be faster
group :: ByteString -> [ByteString]
group xs
| null xs = []
| otherwise = ys : group zs
where
(ys, zs) = spanByte (unsafeHead xs) xs
-}
-- | The 'groupBy' function is the non-overloaded version of 'group'.
--
{-@ groupBy :: (Word8 -> Word8 -> Bool) -> b:ByteString -> {v:[ByteString] | (lbLengths v) = (lbLength b)} @-}
groupBy :: (Word8 -> Word8 -> Bool) -> ByteString -> [ByteString]
groupBy _ Empty = []
--LIQUID PARAM groupBy k (Chunk c0 cs0) = groupBy' [] 0 (S.groupBy k c0) cs0
--LIQUID PARAM where
--LIQUID PARAM groupBy' :: [S.ByteString] -> Word8 -> [S.ByteString] -> ByteString -> [ByteString]
--LIQUID PARAM groupBy' acc@(_:_) c ss@(s:_) cs
--LIQUID PARAM | not (c `k` S.unsafeHead s) = revNonEmptyChunks acc : groupBy' [] 0 ss cs
--LIQUID PARAM groupBy' acc _ (s:[]) Empty = revNonEmptyChunks (s : acc) : []
--LIQUID PARAM groupBy' acc w (s:[]) (Chunk c cs) = groupBy' (s:acc) w' (S.groupBy k c) cs
--LIQUID PARAM where w' | L.null acc = S.unsafeHead s
--LIQUID PARAM | otherwise = w
--LIQUID PARAM groupBy' acc _ (s:ss) cs = revNonEmptyChunks (s : acc) : groupBy' [] 0 ss cs
groupBy k (Chunk c0 cs0) = groupBy_go cs0 (S.groupBy k c0) []
where
{-@ Decrease groupBy_go 1 2 3 @-}
groupBy_go :: ByteString -> [S.ByteString] -> [S.ByteString] -> [ByteString]
groupBy_go cs ss@(s:_) acc@(s':_)
| S.unsafeHead s'
/= S.unsafeHead s = revNonEmptyChunks acc : groupBy_go cs ss []
groupBy_go Empty (s:[]) acc = revNonEmptyChunks (s:acc) : []
groupBy_go (Chunk c cs) (s:[]) acc = groupBy_go cs (S.groupBy k c) (s:acc)
groupBy_go cs (s:ss) acc = revNonEmptyChunks (s:acc) : groupBy_go cs ss []
{-
TODO: check if something like this might be faster
groupBy :: (Word8 -> Word8 -> Bool) -> ByteString -> [ByteString]
groupBy k xs
| null xs = []
| otherwise = take n xs : groupBy k (drop n xs)
where
n = 1 + findIndexOrEnd (not . k (head xs)) (tail xs)
-}
-- | /O(n)/ The 'intercalate' function takes a 'ByteString' and a list of
-- 'ByteString's and concatenates the list after interspersing the first
-- argument between each element of the list.
intercalate :: ByteString -> [ByteString] -> ByteString
intercalate s = concat . (L.intersperse s)
join :: ByteString -> [ByteString] -> ByteString
join = intercalate
{-# DEPRECATED join "use intercalate" #-}
-- ---------------------------------------------------------------------
-- Indexing ByteStrings
-- | /O(c)/ 'ByteString' index (subscript) operator, starting from 0.
{-@ index :: b:ByteString -> n:{v:Nat64 | (LBValid b v)} -> Word8 @-}
index :: ByteString -> Int64 -> Word8
index _ i | i < 0 = moduleError "index" ("negative index: " ++ show i)
index cs0 i = index' cs0 i
where index' Empty n = moduleError "index" ("index too large: " ++ show n)
index' (Chunk c cs) n
| n >= fromIntegral (S.length c) =
index' cs (n - fromIntegral (S.length c))
| otherwise = S.unsafeIndex c (fromIntegral n)
-- | /O(n)/ The 'elemIndex' function returns the index of the first
-- element in the given 'ByteString' which is equal to the query
-- element, or 'Nothing' if there is no such element.
-- This implementation uses memchr(3).
{-@ elemIndex :: Word8 -> b:ByteString -> Maybe {v:Nat64 | v < (lbLength b)} @-}
elemIndex :: Word8 -> ByteString -> Maybe Int64
elemIndex w cs0 = elemIndex_go 0 cs0
--LIQUID RENAME
{-@ Decrease elemIndex_go 2 @-}
where elemIndex_go _ Empty = Nothing
elemIndex_go (n::Int64) (Chunk c cs) = --LIQUID CAST
case S.elemIndex w c of
Nothing -> elemIndex_go (n + fromIntegral (S.length c)) cs
Just i -> Just (n + fromIntegral i)
{-
-- | /O(n)/ The 'elemIndexEnd' function returns the last index of the
-- element in the given 'ByteString' which is equal to the query
-- element, or 'Nothing' if there is no such element. The following
-- holds:
--
-- > elemIndexEnd c xs ==
-- > (-) (length xs - 1) `fmap` elemIndex c (reverse xs)
--
elemIndexEnd :: Word8 -> ByteString -> Maybe Int
elemIndexEnd ch (PS x s l) = inlinePerformIO $ withForeignPtr x $ \p ->
go (p `plusPtr` s) (l-1)
where
STRICT2(go)
go p i | i < 0 = return Nothing
| otherwise = do ch' <- peekByteOff p i
if ch == ch'
then return $ Just i
else go p (i-1)
-}
-- | /O(n)/ The 'elemIndices' function extends 'elemIndex', by returning
-- the indices of all elements equal to the query element, in ascending order.
-- This implementation uses memchr(3).
{-@ elemIndices :: Word8 -> b:ByteString -> [{v:Nat64 | v < (lbLength b) }] @-}
elemIndices :: Word8 -> ByteString -> [Int64]
elemIndices w cs0 = elemIndices_go 0 cs0
--LIQUID RENAME
{-@ Decrease elemIndices_go 2 @-}
where elemIndices_go _ Empty = []
elemIndices_go (n::Int64) (Chunk c cs) = --LIQUID CAST
L.map ((+n).fromIntegral) (S.elemIndices w c)
++ elemIndices_go (n + fromIntegral (S.length c)) cs
-- | count returns the number of times its argument appears in the ByteString
--
-- > count = length . elemIndices
--
-- But more efficiently than using length on the intermediate list.
{-@ count :: Word8 -> b:ByteString -> {v:Nat64 | v <= (lbLength b) } @-}
count :: Word8 -> ByteString -> Int64
--LIQUID GHOST count w cs = foldlChunks (\n c -> n + fromIntegral (S.count w c)) 0 cs
count w cs = foldrChunks (\_ c n -> n + fromIntegral (S.count w c)) 0 cs
-- | The 'findIndex' function takes a predicate and a 'ByteString' and
-- returns the index of the first element in the ByteString
-- satisfying the predicate.
{-@ findIndex :: (Word8 -> Bool) -> b:ByteString -> (Maybe {v:Nat64 | v < (lbLength b)}) @-}
findIndex :: (Word8 -> Bool) -> ByteString -> Maybe Int64
findIndex k cs0 = findIndex_go 0 cs0
--LIQUID RENAME
{-@ Decrease findIndex_go 2 @-}
where findIndex_go _ Empty = Nothing
findIndex_go (n::Int64) (Chunk c cs) = --LIQUID CAST
case S.findIndex k c of
Nothing -> findIndex_go (n + fromIntegral (S.length c)) cs
Just i -> Just (n + fromIntegral i)
{-# INLINE findIndex #-}
-- | /O(n)/ The 'find' function takes a predicate and a ByteString,
-- and returns the first element in matching the predicate, or 'Nothing'
-- if there is no such element.
--
-- > find f p = case findIndex f p of Just n -> Just (p ! n) ; _ -> Nothing
--
find :: (Word8 -> Bool) -> ByteString -> Maybe Word8
find f cs0 = find' cs0
where find' Empty = Nothing
find' (Chunk c cs) = case S.find f c of
Nothing -> find' cs
Just w -> Just w
{-# INLINE find #-}
-- | The 'findIndices' function extends 'findIndex', by returning the
-- indices of all elements satisfying the predicate, in ascending order.
{-@ findIndices :: (Word8 -> Bool) -> b:ByteString -> [{v:Nat64 | v < (lbLength b)}] @-}
findIndices :: (Word8 -> Bool) -> ByteString -> [Int64]
findIndices k cs0 = findIndices_go 0 cs0
--LIQUID RENAME
{-@ Decrease findIndices_go 2 @-}
where findIndices_go _ Empty = []
findIndices_go (n::Int64) (Chunk c cs) = --LIQUID CAST
L.map ((+n).fromIntegral) (S.findIndices k c)
++ findIndices_go (n + fromIntegral (S.length c)) cs
-- ---------------------------------------------------------------------
-- Searching ByteStrings
-- | /O(n)/ 'elem' is the 'ByteString' membership predicate.
elem :: Word8 -> ByteString -> Bool
elem w cs = case elemIndex w cs of Nothing -> False ; _ -> True
-- | /O(n)/ 'notElem' is the inverse of 'elem'
notElem :: Word8 -> ByteString -> Bool
notElem w cs = not (elem w cs)
-- | /O(n)/ 'filter', applied to a predicate and a ByteString,
-- returns a ByteString containing those characters that satisfy the
-- predicate.
{-@ filter :: (Word8 -> Bool) -> b:ByteString -> (LByteStringLE b) @-}
filter :: (Word8 -> Bool) -> ByteString -> ByteString
filter p s = filter_go s
where
--LIQUID RENAME
filter_go Empty = Empty
filter_go (Chunk x xs) = chunk (S.filter p x) (filter_go xs)
#if __GLASGOW_HASKELL__
{-# INLINE [1] filter #-}
#endif
-- | /O(n)/ and /O(n\/c) space/ A first order equivalent of /filter .
-- (==)/, for the common case of filtering a single byte. It is more
-- efficient to use /filterByte/ in this case.
--
-- > filterByte == filter . (==)
--
-- filterByte is around 10x faster, and uses much less space, than its
-- filter equivalent
--LIQUID TODO: needs the spec for replicate
{- filterByte :: Word8 -> b:ByteString -> (LByteStringLE b) @-}
filterByte :: Word8 -> ByteString -> ByteString
filterByte w ps = replicate (count w ps) w
{-# INLINE filterByte #-}
{-# RULES
"FPS specialise filter (== x)" forall x.
filter ((==) x) = filterByte x
#-}
{-# RULES
"FPS specialise filter (== x)" forall x.
filter (== x) = filterByte x
#-}
{-
-- | /O(n)/ A first order equivalent of /filter . (\/=)/, for the common
-- case of filtering a single byte out of a list. It is more efficient
-- to use /filterNotByte/ in this case.
--
-- > filterNotByte == filter . (/=)
--
-- filterNotByte is around 2x faster than its filter equivalent.
filterNotByte :: Word8 -> ByteString -> ByteString
filterNotByte w (LPS xs) = LPS (filterMap (P.filterNotByte w) xs)
-}
-- | /O(n)/ The 'partition' function takes a predicate a ByteString and returns
-- the pair of ByteStrings with elements which do and do not satisfy the
-- predicate, respectively; i.e.,
--
-- > partition p bs == (filter p xs, filter (not . p) xs)
--
{-@ partition :: (Word8 -> Bool) -> b:ByteString -> ((LByteStringLE b), (LByteStringLE b)) @-}
partition :: (Word8 -> Bool) -> ByteString -> (ByteString, ByteString)
partition f p = (filter f p, filter (not . f) p)
--TODO: use a better implementation
-- ---------------------------------------------------------------------
-- Searching for substrings
-- | /O(n)/ The 'isPrefixOf' function takes two ByteStrings and returns 'True'
-- iff the first is a prefix of the second.
{-@ isPrefixOf :: ByteString -> ByteString -> Bool @-}
isPrefixOf :: ByteString -> ByteString -> Bool
isPrefixOf Empty _ = True
isPrefixOf _ Empty = False
isPrefixOf (Chunk x xs) (Chunk y ys)
| S.length x == S.length y = x == y && isPrefixOf xs ys
--LIQUID LAZY pushing bindings inward for safety
--LIQUID LAZY | S.length x < S.length y = x == yh && isPrefixOf xs (Chunk yt ys)
--LIQUID LAZY | otherwise = xh == y && isPrefixOf (Chunk xt xs) ys
--LIQUID LAZY where (xh,xt) = S.splitAt (S.length y) x
--LIQUID LAZY (yh,yt) = S.splitAt (S.length x) y
| otherwise = if S.length x < S.length y
then let (xh,xt) = S.splitAt (S.length y) x
(yh,yt) = S.splitAt (S.length x) y
in x == yh && isPrefixOf xs (Chunk yt ys)
else let (xh,xt) = S.splitAt (S.length y) x
(yh,yt) = S.splitAt (S.length x) y
in xh == y && isPrefixOf (Chunk xt xs) ys
-- | /O(n)/ The 'isSuffixOf' function takes two ByteStrings and returns 'True'
-- iff the first is a suffix of the second.
--
-- The following holds:
--
-- > isSuffixOf x y == reverse x `isPrefixOf` reverse y
--
isSuffixOf :: ByteString -> ByteString -> Bool
isSuffixOf x y = reverse x `isPrefixOf` reverse y
--TODO: a better implementation
-- ---------------------------------------------------------------------
-- Zipping
--LIQUID TODO: zip and zipWith are in LazyZip.hs because they need a
--qualifier that takes 4 parameters and this module is slow enough to
--verify as is.
-- | /O(n)/ 'zip' takes two ByteStrings and returns a list of
-- corresponding pairs of bytes. If one input ByteString is short,
-- excess elements of the longer ByteString are discarded. This is
-- equivalent to a pair of 'unpack' operations.
{-@ predicate LZipLen V X Y = (len V) = (if (lbLength X) <= (lbLength Y) then (lbLength X) else (lbLength Y)) @-}
{-@ zip :: x:ByteString -> y:ByteString -> {v:[(Word8, Word8)] | (LZipLen v x y) } @-}
zip :: ByteString -> ByteString -> [(Word8,Word8)]
zip = zipWith (,)
-- | 'zipWith' generalises 'zip' by zipping with the function given as
-- the first argument, instead of a tupling function. For example,
-- @'zipWith' (+)@ is applied to two ByteStrings to produce the list of
-- corresponding sums.
{-@ zipWith :: (Word8 -> Word8 -> a) -> x:ByteString -> y:ByteString -> {v:[a] | (LZipLen v x y)} @-}
--LIQUID see LazyZip.hs
zipWith :: (Word8 -> Word8 -> a) -> ByteString -> ByteString -> [a]
zipWith = undefined
--LIQUID zipWith _ Empty _ = []
--LIQUID zipWith _ _ Empty = []
--LIQUID zipWith f (Chunk a as) (Chunk b bs) = go a as b bs
--LIQUID where
--LIQUID go x xs y ys = f (S.unsafeHead x) (S.unsafeHead y)
--LIQUID : to (S.unsafeTail x) xs (S.unsafeTail y) ys
--LIQUID
--LIQUID to x Empty _ _ | S.null x = []
--LIQUID to _ _ y Empty | S.null y = []
--LIQUID to x xs y ys | not (S.null x)
--LIQUID && not (S.null y) = go x xs y ys
--LIQUID to x xs _ (Chunk y' ys) | not (S.null x) = go x xs y' ys
--LIQUID to _ (Chunk x' xs) y ys | not (S.null y) = go x' xs y ys
--LIQUID to _ (Chunk x' xs) _ (Chunk y' ys) = go x' xs y' ys
-- | /O(n)/ 'unzip' transforms a list of pairs of bytes into a pair of
-- ByteStrings. Note that this performs two 'pack' operations.
{-@ unzip :: z:[(Word8,Word8)] -> ({v:ByteString | (lbLength v) = (len z)}, {v:ByteString | (lbLength v) = (len z) }) @-}
unzip :: [(Word8,Word8)] -> (ByteString,ByteString)
unzip ls = (pack (L.map fst ls), pack (L.map snd ls))
{-# INLINE unzip #-}
-- ---------------------------------------------------------------------
-- Special lists
-- | /O(n)/ Return all initial segments of the given 'ByteString', shortest first.
{-@ inits :: ByteString -> [ByteString] @-}
inits :: ByteString -> [ByteString]
inits = (Empty :) . inits'
where inits' Empty = []
inits' (Chunk c cs) = let (c':cs') = S.inits c in
L.map (\c' -> Chunk c' Empty) cs' --LIQUID INLINE (L.tail (S.inits c))
++ L.map (Chunk c) (inits' cs)
-- | /O(n)/ Return all final segments of the given 'ByteString', longest first.
{-@ tails :: ByteString -> [ByteString] @-}
tails :: ByteString -> [ByteString]
tails Empty = Empty : []
tails cs@(Chunk c cs')
| S.length c == 1 = cs : tails cs'
| otherwise = cs : tails (Chunk (S.unsafeTail c) cs')
-- ---------------------------------------------------------------------
-- Low level constructors
-- | /O(n)/ Make a copy of the 'ByteString' with its own storage.
-- This is mainly useful to allow the rest of the data pointed
-- to by the 'ByteString' to be garbage collected, for example
-- if a large string has been read in, and only a small part of it
-- is needed in the rest of the program.
{-@ copy :: b:ByteString -> LByteStringSZ b @-}
copy :: ByteString -> ByteString
--LIQUID GHOST copy cs = foldrChunks (Chunk . S.copy) Empty cs
copy cs = foldrChunks (\_ c cs -> Chunk (S.copy c) cs) Empty cs
--TODO, we could coalese small blocks here
--FIXME: probably not strict enough, if we're doing this to avoid retaining
-- the parent blocks then we'd better copy strictly.
-- ---------------------------------------------------------------------
-- TODO defrag func that concatenates block together that are below a threshold
-- defrag :: ByteString -> ByteString
-- ---------------------------------------------------------------------
-- Lazy ByteString IO
-- | Read entire handle contents /lazily/ into a 'ByteString'. Chunks
-- are read on demand, in at most @k@-sized chunks. It does not block
-- waiting for a whole @k@-sized chunk, so if less than @k@ bytes are
-- available then they will be returned immediately as a smaller chunk.
{-@ hGetContentsN :: Nat -> Handle -> IO ByteString @-}
hGetContentsN :: Int -> Handle -> IO ByteString
hGetContentsN k h = lazyRead
where
{-@ Lazy lazyRead @-}
lazyRead = unsafeInterleaveIO loop
{-@ Lazy loop @-}
loop = do
c <- S.hGetNonBlocking h k
--TODO: I think this should distinguish EOF from no data available
-- the underlying POSIX call makes this distincion, returning either
-- 0 or EAGAIN
if S.null c
then do eof <- hIsEOF h
if eof then return Empty
else hWaitForInput h (-1)
>> loop
else do cs <- lazyRead
return (Chunk c cs)
-- | Read @n@ bytes into a 'ByteString', directly from the
-- specified 'Handle', in chunks of size @k@.
{-@ hGetN :: Nat -> Handle -> n:Nat -> IO {v:ByteString | (lbLength v) <= n} @-}
hGetN :: Int -> Handle -> Int -> IO ByteString
hGetN _ _ 0 = return empty
hGetN k h n = readChunks n
where
STRICT1(readChunks)
readChunks i = do
c <- S.hGet h (min k i)
case S.length c of
0 -> return Empty
m -> do cs <- readChunks (i - m)
return (Chunk c cs)
-- | hGetNonBlockingN is similar to 'hGetContentsN', except that it will never block
-- waiting for data to become available, instead it returns only whatever data
-- is available. Chunks are read on demand, in @k@-sized chunks.
{-@ hGetNonBlockingN :: Nat -> Handle -> n:Nat -> IO {v:ByteString | (lbLength v) <= n} @-}
hGetNonBlockingN :: Int -> Handle -> Int -> IO ByteString
#if defined(__GLASGOW_HASKELL__)
hGetNonBlockingN _ _ 0 = return empty
hGetNonBlockingN k h n = readChunks n
where
STRICT1(readChunks)
readChunks i = do
c <- S.hGetNonBlocking h (min k i)
case S.length c of
0 -> return Empty
m -> do cs <- readChunks (i - m)
return (Chunk c cs)
#else
hGetNonBlockingN = hGetN
#endif
-- | Read entire handle contents /lazily/ into a 'ByteString'. Chunks
-- are read on demand, using the default chunk size.
hGetContents :: Handle -> IO ByteString
hGetContents = hGetContentsN defaultChunkSize
-- | Read @n@ bytes into a 'ByteString', directly from the specified 'Handle'.
{-@ hGet :: Handle -> Nat -> IO ByteString @-}
hGet :: Handle -> Int -> IO ByteString
hGet = hGetN defaultChunkSize
-- | hGetNonBlocking is similar to 'hGet', except that it will never block
-- waiting for data to become available, instead it returns only whatever data
-- is available.
#if defined(__GLASGOW_HASKELL__)
{-@ hGetNonBlocking :: Handle -> Nat -> IO ByteString @-}
hGetNonBlocking :: Handle -> Int -> IO ByteString
hGetNonBlocking = hGetNonBlockingN defaultChunkSize
#else
hGetNonBlocking = hGet
#endif
-- | Read an entire file /lazily/ into a 'ByteString'.
readFile :: FilePath -> IO ByteString
readFile f = openBinaryFile f ReadMode >>= hGetContents
-- | Write a 'ByteString' to a file.
writeFile :: FilePath -> ByteString -> IO ()
writeFile f txt = bracket (openBinaryFile f WriteMode) hClose
(\hdl -> hPut hdl txt)
-- | Append a 'ByteString' to a file.
appendFile :: FilePath -> ByteString -> IO ()
appendFile f txt = bracket (openBinaryFile f AppendMode) hClose
(\hdl -> hPut hdl txt)
-- | getContents. Equivalent to hGetContents stdin. Will read /lazily/
getContents :: IO ByteString
getContents = hGetContents stdin
-- | Outputs a 'ByteString' to the specified 'Handle'.
hPut :: Handle -> ByteString -> IO ()
--LIQUID GHOST hPut h cs = foldrChunks (\c rest -> S.hPut h c >> rest) (return ()) cs
hPut h cs = foldrChunks (\_ c rest -> S.hPut h c >> rest) (return ()) cs
-- | A synonym for @hPut@, for compatibility
hPutStr :: Handle -> ByteString -> IO ()
hPutStr = hPut
-- | Write a ByteString to stdout
putStr :: ByteString -> IO ()
putStr = hPut stdout
-- | Write a ByteString to stdout, appending a newline byte
putStrLn :: ByteString -> IO ()
putStrLn ps = hPut stdout ps >> hPut stdout (singleton 0x0a)
-- | The interact function takes a function of type @ByteString -> ByteString@
-- as its argument. The entire input from the standard input device is passed
-- to this function as its argument, and the resulting string is output on the
-- standard output device. It's great for writing one line programs!
interact :: (ByteString -> ByteString) -> IO ()
interact transformer = putStr . transformer =<< getContents
-- ---------------------------------------------------------------------
-- Internal utilities
-- Common up near identical calls to `error' to reduce the number
-- constant strings created when compiled:
errorEmptyList :: String -> a
errorEmptyList fun = moduleError fun "empty ByteString"
{-@ moduleError :: String -> String -> a @-}
moduleError :: String -> String -> a
moduleError fun msg = error ("Data.ByteString.Lazy." ++ fun ++ ':':' ':msg)
-- reverse a list of non-empty chunks into a lazy ByteString
{-@ revNonEmptyChunks :: bs:[ByteStringNE] -> {v:ByteString | (lbLength v) = (bLengths bs)} @-}
revNonEmptyChunks :: [S.ByteString] -> ByteString
--LIQUID INLINE revNonEmptyChunks cs = L.foldl' (flip Chunk) Empty cs
revNonEmptyChunks cs = go Empty cs
{-@ Decrease go 2 @-}
where go acc [] = acc
go acc (c:cs) = go (Chunk c acc) cs
-- reverse a list of possibly-empty chunks into a lazy ByteString
{-@ revChunks :: bs:[S.ByteString] -> {v:ByteString | (lbLength v) = (bLengths bs)} @-}
revChunks :: [S.ByteString] -> ByteString
--LIQUID INLINE revChunks cs = L.foldl' (flip chunk) Empty cs
revChunks cs = go Empty cs
{-@ Decrease go 2 @-}
where go acc [] = acc
go acc (c:cs) = go (chunk c acc) cs
{-@ qualif Blah(v:int, l:int, p:Ptr a): (v + (plen p)) >= l @-}
-- | 'findIndexOrEnd' is a variant of findIndex, that returns the length
-- of the string if no element is found, rather than Nothing.
findIndexOrEnd :: (Word8 -> Bool) -> S.ByteString -> Int
findIndexOrEnd k (S.PS x s l) = S.inlinePerformIO $ withForeignPtr x $ \f -> go l (f `plusPtr` s) 0
where
--LIQUID GHOST
STRICT3(go)
{- LIQUID WITNESS -}
go (d::Int) ptr n
| n >= l = return l
| otherwise = do w <- peek ptr
if k w
then return n
else go (d-1) (ptr `plusPtr` 1) (n+1)
{-# INLINE findIndexOrEnd #-}
{- liquidCanary :: x:Int -> {v: Int | v > x} @-}
liquidCanary :: Int -> Int
liquidCanary x = x - 1
| abakst/liquidhaskell | benchmarks/bytestring-0.9.2.1/Data/ByteString/Lazy.hs | bsd-3-clause | 71,172 | 0 | 18 | 19,406 | 10,884 | 5,952 | 4,932 | -1 | -1 |
{-
(c) The University of Glasgow 2006
(c) The AQUA Project, Glasgow University, 1998
This module contains definitions for the IdInfo for things that
have a standard form, namely:
- data constructors
- record selectors
- method and superclass selectors
- primitive operations
-}
{-# LANGUAGE CPP #-}
module Eta.BasicTypes.MkId (
mkDictFunId, mkDictFunTy, mkDictSelId, mkDictSelRhs,
mkPrimOpId, mkFCallId,
wrapNewTypeBody, unwrapNewTypeBody,
wrapFamInstBody, unwrapFamInstScrut,
wrapTypeFamInstBody, wrapTypeUnbranchedFamInstBody, unwrapTypeFamInstScrut,
unwrapTypeUnbranchedFamInstScrut,
DataConBoxer(..), mkDataConRep, mkDataConWorkId,
-- And some particular Ids; see below for why they are wired in
wiredInIds, ghcPrimIds,
unsafeCoerceName, unsafeCoerceId, realWorldPrimId,
voidPrimId, voidArgId,
nullAddrId, seqId, lazyId, lazyIdKey, runRWId,
coercionTokenId, magicDictId, coerceId,
proxyHashId, noinlineId, noinlineIdName,
-- Re-export error Ids
module Eta.Prelude.PrelRules
) where
#include "HsVersions.h"
import Eta.Specialise.Rules
import Eta.Prelude.TysPrim
import Eta.Prelude.TysWiredIn
import Eta.Prelude.PrelRules
import Eta.Types.Type
import Eta.Types.FamInstEnv
import Eta.Types.Coercion
import Eta.TypeCheck.TcType
import qualified Eta.TypeCheck.TcType as TcType
import Eta.Core.MkCore
import Eta.Core.CoreUtils ( exprType, mkCast )
import Eta.Core.CoreUnfold
import Eta.BasicTypes.Literal
import Eta.Types.TyCon
import Eta.Types.CoAxiom
import Eta.Types.Class
import Eta.BasicTypes.NameSet
import Eta.BasicTypes.VarSet
import Eta.BasicTypes.Name
import Eta.Prelude.PrimOp
import Eta.Prelude.ForeignCall
import Eta.BasicTypes.DataCon
import Eta.BasicTypes.Id
import Eta.BasicTypes.IdInfo
import Eta.BasicTypes.Demand
import Eta.Core.CoreSyn
import Eta.BasicTypes.Unique
import Eta.BasicTypes.UniqSupply
import Eta.Prelude.PrelNames
import Eta.BasicTypes.BasicTypes hiding ( SuccessFlag(..) )
import Eta.Utils.Util
import Eta.Utils.Pair
import Eta.Main.DynFlags
import Eta.Utils.Outputable
import Eta.Utils.FastString
import Eta.Utils.ListSetOps
import qualified Eta.LanguageExtensions as LangExt
import Data.Maybe ( maybeToList )
{-
************************************************************************
* *
\subsection{Wired in Ids}
* *
************************************************************************
Note [Wired-in Ids]
~~~~~~~~~~~~~~~~~~~
There are several reasons why an Id might appear in the wiredInIds:
(1) The ghcPrimIds are wired in because they can't be defined in
Haskell at all, although the can be defined in Core. They have
compulsory unfoldings, so they are always inlined and they have
no definition site. Their home module is GHC.Prim, so they
also have a description in primops.txt.pp, where they are called
'pseudoops'.
(2) The 'error' function, eRROR_ID, is wired in because we don't yet have
a way to express in an interface file that the result type variable
is 'open'; that is can be unified with an unboxed type
[The interface file format now carry such information, but there's
no way yet of expressing at the definition site for these
error-reporting functions that they have an 'open'
result type. -- sof 1/99]
(3) Other error functions (rUNTIME_ERROR_ID) are wired in (a) because
the desugarer generates code that mentiones them directly, and
(b) for the same reason as eRROR_ID
(4) lazyId is wired in because the wired-in version overrides the
strictness of the version defined in GHC.Base
(5) noinlineId is wired in because when we serialize to interfaces
we may insert noinline statements.
In cases (2-4), the function has a definition in a library module, and
can be called; but the wired-in version means that the details are
never read from that module's interface file; instead, the full definition
is right here.
-}
wiredInIds :: [Id]
wiredInIds
= [lazyId, dollarId, oneShotId, runRWId, noinlineId]
++ errorIds -- Defined in MkCore
++ ghcPrimIds
-- These Ids are exported from GHC.Prim
ghcPrimIds :: [Id]
ghcPrimIds
= [ -- These can't be defined in Haskell, but they have
-- perfectly reasonable unfoldings in Core
realWorldPrimId,
voidPrimId,
unsafeCoerceId,
nullAddrId,
seqId,
magicDictId,
coerceId,
proxyHashId
]
{-
************************************************************************
* *
\subsection{Data constructors}
* *
************************************************************************
The wrapper for a constructor is an ordinary top-level binding that evaluates
any strict args, unboxes any args that are going to be flattened, and calls
the worker.
We're going to build a constructor that looks like:
data (Data a, C b) => T a b = T1 !a !Int b
T1 = /\ a b ->
\d1::Data a, d2::C b ->
\p q r -> case p of { p ->
case q of { q ->
Con T1 [a,b] [p,q,r]}}
Notice that
* d2 is thrown away --- a context in a data decl is used to make sure
one *could* construct dictionaries at the site the constructor
is used, but the dictionary isn't actually used.
* We have to check that we can construct Data dictionaries for
the types a and Int. Once we've done that we can throw d1 away too.
* We use (case p of q -> ...) to evaluate p, rather than "seq" because
all that matters is that the arguments are evaluated. "seq" is
very careful to preserve evaluation order, which we don't need
to be here.
You might think that we could simply give constructors some strictness
info, like PrimOps, and let CoreToStg do the let-to-case transformation.
But we don't do that because in the case of primops and functions strictness
is a *property* not a *requirement*. In the case of constructors we need to
do something active to evaluate the argument.
Making an explicit case expression allows the simplifier to eliminate
it in the (common) case where the constructor arg is already evaluated.
Note [Wrappers for data instance tycons]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the case of data instances, the wrapper also applies the coercion turning
the representation type into the family instance type to cast the result of
the wrapper. For example, consider the declarations
data family Map k :: * -> *
data instance Map (a, b) v = MapPair (Map a (Pair b v))
The tycon to which the datacon MapPair belongs gets a unique internal
name of the form :R123Map, and we call it the representation tycon.
In contrast, Map is the family tycon (accessible via
tyConFamInst_maybe). A coercion allows you to move between
representation and family type. It is accessible from :R123Map via
tyConFamilyCoercion_maybe and has kind
Co123Map a b v :: {Map (a, b) v ~ :R123Map a b v}
The wrapper and worker of MapPair get the types
-- Wrapper
$WMapPair :: forall a b v. Map a (Map a b v) -> Map (a, b) v
$WMapPair a b v = MapPair a b v `cast` sym (Co123Map a b v)
-- Worker
MapPair :: forall a b v. Map a (Map a b v) -> :R123Map a b v
This coercion is conditionally applied by wrapFamInstBody.
It's a bit more complicated if the data instance is a GADT as well!
data instance T [a] where
T1 :: forall b. b -> T [Maybe b]
Hence we translate to
-- Wrapper
$WT1 :: forall b. b -> T [Maybe b]
$WT1 b v = T1 (Maybe b) b (Maybe b) v
`cast` sym (Co7T (Maybe b))
-- Worker
T1 :: forall c b. (c ~ Maybe b) => b -> :R7T c
-- Coercion from family type to representation type
Co7T a :: T [a] ~ :R7T a
Note [Newtype datacons]
~~~~~~~~~~~~~~~~~~~~~~~
The "data constructor" for a newtype should always be vanilla. At one
point this wasn't true, because the newtype arising from
class C a => D a
looked like
newtype T:D a = D:D (C a)
so the data constructor for T:C had a single argument, namely the
predicate (C a). But now we treat that as an ordinary argument, not
part of the theta-type, so all is well.
************************************************************************
* *
\subsection{Dictionary selectors}
* *
************************************************************************
Selecting a field for a dictionary. If there is just one field, then
there's nothing to do.
Dictionary selectors may get nested forall-types. Thus:
class Foo a where
op :: forall b. Ord b => a -> b -> b
Then the top-level type for op is
op :: forall a. Foo a =>
forall b. Ord b =>
a -> b -> b
This is unlike ordinary record selectors, which have all the for-alls
at the outside. When dealing with classes it's very convenient to
recover the original type signature from the class op selector.
-}
mkDictSelId :: Name -- Name of one of the *value* selectors
-- (dictionary superclass or method)
-> Class -> Id
mkDictSelId name clas
= mkGlobalId (ClassOpId clas) name sel_ty info
where
tycon = classTyCon clas
sel_names = map idName (classAllSelIds clas)
new_tycon = isNewTyCon tycon
[data_con] = tyConDataCons tycon
tyvars = dataConUnivTyVars data_con
arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
val_index = assoc "MkId.mkDictSelId" (sel_names `zip` [0..]) name
sel_ty = mkForAllTys tyvars (mkFunTy (mkClassPred clas (mkTyVarTys tyvars))
(getNth arg_tys val_index))
base_info = noCafIdInfo
`setArityInfo` 1
`setStrictnessInfo` strict_sig
info | new_tycon
= base_info `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkInlineUnfolding (Just 1) (mkDictSelRhs clas val_index)
-- See Note [Single-method classes] in TcInstDcls
-- for why alwaysInlinePragma
| otherwise
= base_info `setRuleInfo` mkRuleInfo [rule]
-- Add a magic BuiltinRule, but no unfolding
-- so that the rule is always available to fire.
-- See Note [ClassOp/DFun selection] in TcInstDcls
n_ty_args = length tyvars
-- This is the built-in rule that goes
-- op (dfT d1 d2) ---> opT d1 d2
rule = BuiltinRule { ru_name = fsLit "Class op " `appendFS`
occNameFS (getOccName name)
, ru_fn = name
, ru_nargs = n_ty_args + 1
, ru_try = dictSelRule val_index n_ty_args }
-- The strictness signature is of the form U(AAAVAAAA) -> T
-- where the V depends on which item we are selecting
-- It's worth giving one, so that absence info etc is generated
-- even if the selector isn't inlined
strict_sig = mkClosedStrictSig [arg_dmd] topRes
arg_dmd | new_tycon = evalDmd
| otherwise = mkManyUsedDmd $
mkProdDmd [ if name == sel_name then evalDmd else absDmd
| sel_name <- sel_names ]
mkDictSelRhs :: Class
-> Int -- 0-indexed selector among (superclasses ++ methods)
-> CoreExpr
mkDictSelRhs clas val_index
= mkLams tyvars (Lam dict_id rhs_body)
where
tycon = classTyCon clas
new_tycon = isNewTyCon tycon
[data_con] = tyConDataCons tycon
tyvars = dataConUnivTyVars data_con
arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
the_arg_id = getNth arg_ids val_index
pred = mkClassPred clas (mkTyVarTys tyvars)
dict_id = mkTemplateLocal 1 pred
arg_ids = mkTemplateLocalsNum 2 arg_tys
rhs_body | new_tycon = unwrapNewTypeBody tycon (map mkTyVarTy tyvars) (Var dict_id)
| otherwise = Case (Var dict_id) dict_id (idType the_arg_id)
[(DataAlt data_con, arg_ids, varToCoreExpr the_arg_id)]
-- varToCoreExpr needed for equality superclass selectors
-- sel a b d = case x of { MkC _ (g:a~b) _ -> CO g }
dictSelRule :: Int -> Arity -> RuleFun
-- Tries to persuade the argument to look like a constructor
-- application, using exprIsConApp_maybe, and then selects
-- from it
-- sel_i t1..tk (D t1..tk op1 ... opm) = opi
--
dictSelRule val_index n_ty_args _ id_unf _ args
| (dict_arg : _) <- drop n_ty_args args
, Just (_, _, con_args) <- exprIsConApp_maybe id_unf dict_arg
= Just (getNth con_args val_index)
| otherwise
= Nothing
{-
************************************************************************
* *
Data constructors
* *
************************************************************************
-}
mkDataConWorkId :: Name -> DataCon -> Id
mkDataConWorkId wkr_name data_con
| isNewTyCon tycon
= mkGlobalId (DataConWrapId data_con) wkr_name nt_wrap_ty nt_work_info
| otherwise
= mkGlobalId (DataConWorkId data_con) wkr_name alg_wkr_ty wkr_info
where
tycon = dataConTyCon data_con
----------- Workers for data types --------------
alg_wkr_ty = dataConRepType data_con
wkr_arity = dataConRepArity data_con
wkr_info = noCafIdInfo
`setArityInfo` wkr_arity
`setStrictnessInfo` wkr_sig
`setUnfoldingInfo` evaldUnfolding -- Record that it's evaluated,
-- even if arity = 0
wkr_sig = mkClosedStrictSig (replicate wkr_arity topDmd) (dataConCPR data_con)
-- Note [Data-con worker strictness]
-- Notice that we do *not* say the worker is strict
-- even if the data constructor is declared strict
-- e.g. data T = MkT !(Int,Int)
-- Why? Because the *wrapper* is strict (and its unfolding has case
-- expresssions that do the evals) but the *worker* itself is not.
-- If we pretend it is strict then when we see
-- case x of y -> $wMkT y
-- the simplifier thinks that y is "sure to be evaluated" (because
-- $wMkT is strict) and drops the case. No, $wMkT is not strict.
--
-- When the simplifer sees a pattern
-- case e of MkT x -> ...
-- it uses the dataConRepStrictness of MkT to mark x as evaluated;
-- but that's fine... dataConRepStrictness comes from the data con
-- not from the worker Id.
----------- Workers for newtypes --------------
(nt_tvs, _, nt_arg_tys, _) = dataConSig data_con
res_ty_args = mkTyVarTys nt_tvs
nt_wrap_ty = dataConUserType data_con
nt_work_info = noCafIdInfo -- The NoCaf-ness is set by noCafIdInfo
`setArityInfo` 1 -- Arity 1
`setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` newtype_unf
id_arg1 = mkTemplateLocal 1 (head nt_arg_tys)
newtype_unf = ASSERT2( isVanillaDataCon data_con &&
isSingleton nt_arg_tys, ppr data_con )
-- Note [Newtype datacons]
mkCompulsoryUnfolding $
mkLams nt_tvs $ Lam id_arg1 $
wrapNewTypeBody tycon res_ty_args (Var id_arg1)
dataConCPR :: DataCon -> DmdResult
dataConCPR con
| isDataTyCon tycon -- Real data types only; that is,
-- not unboxed tuples or newtypes
, isVanillaDataCon con -- No existentials
, wkr_arity > 0
, wkr_arity <= mAX_CPR_SIZE
= if is_prod then vanillaCprProdRes (dataConRepArity con)
else cprSumRes (dataConTag con)
| otherwise
= topRes
where
is_prod = isProductTyCon tycon
tycon = dataConTyCon con
wkr_arity = dataConRepArity con
mAX_CPR_SIZE :: Arity
mAX_CPR_SIZE = 10
-- We do not treat very big tuples as CPR-ish:
-- a) for a start we get into trouble because there aren't
-- "enough" unboxed tuple types (a tiresome restriction,
-- but hard to fix),
-- b) more importantly, big unboxed tuples get returned mainly
-- on the stack, and are often then allocated in the heap
-- by the caller. So doing CPR for them may in fact make
-- things worse.
{-
-------------------------------------------------
-- Data constructor representation
--
-- This is where we decide how to wrap/unwrap the
-- constructor fields
--
--------------------------------------------------
-}
type Unboxer = Var -> UniqSM ([Var], CoreExpr -> CoreExpr)
-- Unbox: bind rep vars by decomposing src var
data Boxer = UnitBox | Boxer (TvSubst -> UniqSM ([Var], CoreExpr))
-- Box: build src arg using these rep vars
newtype DataConBoxer = DCB ([Type] -> [Var] -> UniqSM ([Var], [CoreBind]))
-- Bind these src-level vars, returning the
-- rep-level vars to bind in the pattern
mkDataConRep :: DynFlags
-> FamInstEnvs
-> Name
-> Maybe [HsImplBang]
-> DataCon
-> UniqSM DataConRep
mkDataConRep dflags fam_envs wrap_name mb_bangs data_con
| not wrapper_reqd
= return NoDataConRep
| otherwise
= do { wrap_args <- mapM newLocal wrap_arg_tys
; wrap_body <- mk_rep_app (wrap_args `zip` dropList eq_spec unboxers)
initial_wrap_app
; let wrap_id = mkGlobalId (DataConWrapId data_con) wrap_name wrap_ty wrap_info
wrap_info = noCafIdInfo
`setArityInfo` wrap_arity
-- It's important to specify the arity, so that partial
-- applications are treated as values
`setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` wrap_unf
`setStrictnessInfo` wrap_sig
-- We need to get the CAF info right here because TidyPgm
-- does not tidy the IdInfo of implicit bindings (like the wrapper)
-- so it not make sure that the CAF info is sane
wrap_sig = mkClosedStrictSig wrap_arg_dmds (dataConCPR data_con)
wrap_arg_dmds = map mk_dmd arg_ibangs
mk_dmd str | isBanged str = evalDmd
| otherwise = topDmd
-- The Cpr info can be important inside INLINE rhss, where the
-- wrapper constructor isn't inlined.
-- And the argument strictness can be important too; we
-- may not inline a contructor when it is partially applied.
-- For example:
-- data W = C !Int !Int !Int
-- ...(let w = C x in ...(w p q)...)...
-- we want to see that w is strict in its two arguments
wrap_unf = mkInlineUnfolding (Just wrap_arity) wrap_rhs
wrap_tvs = (univ_tvs `minusList` map fst eq_spec) ++ ex_tvs
wrap_rhs = mkLams wrap_tvs $
mkLams wrap_args $
wrapFamInstBody tycon res_ty_args $
wrap_body
; return (DCR { dcr_wrap_id = wrap_id
, dcr_boxer = mk_boxer boxers
, dcr_arg_tys = rep_tys
, dcr_stricts = rep_strs
, dcr_bangs = arg_ibangs }) }
where
(univ_tvs, ex_tvs, eq_spec, theta, orig_arg_tys, _) = dataConFullSig data_con
res_ty_args = substTyVars (mkTopTvSubst eq_spec) univ_tvs
tycon = dataConTyCon data_con -- The representation TyCon (not family)
wrap_ty = dataConUserType data_con
ev_tys = eqSpecPreds eq_spec ++ theta
all_arg_tys = ev_tys ++ orig_arg_tys
ev_ibangs = map mk_pred_strict_mark ev_tys
orig_bangs = dataConSrcBangs data_con
wrap_arg_tys = theta ++ orig_arg_tys
wrap_arity = length wrap_arg_tys
-- The wrap_args are the arguments *other than* the eq_spec
-- Because we are going to apply the eq_spec args manually in the
-- wrapper
arg_ibangs =
case mb_bangs of
Nothing -> zipWith (dataConSrcToImplBang dflags fam_envs)
orig_arg_tys orig_bangs
Just bangs -> bangs
(rep_tys_w_strs, wrappers)
= unzip (zipWith dataConArgRep all_arg_tys (ev_ibangs ++ arg_ibangs))
(unboxers, boxers) = unzip wrappers
(rep_tys, rep_strs) = unzip (concat rep_tys_w_strs)
wrapper_reqd = not (isNewTyCon tycon) -- Newtypes have only a worker
&& (any isBanged (ev_ibangs ++ arg_ibangs)
-- Some forcing/unboxing (includes eq_spec)
|| isFamInstTyCon tycon) -- Cast result
initial_wrap_app = Var (dataConWorkId data_con)
`mkTyApps` res_ty_args
`mkVarApps` ex_tvs
`mkCoApps` map (mkReflCo Nominal . snd) eq_spec
-- Dont box the eq_spec coercions since they are
-- marked as HsUnpack by mk_dict_strict_mark
mk_boxer :: [Boxer] -> DataConBoxer
mk_boxer boxers = DCB (\ ty_args src_vars ->
do { let ex_vars = takeList ex_tvs src_vars
subst1 = mkTopTvSubst (univ_tvs `zip` ty_args)
subst2 = extendTvSubstList subst1 ex_tvs
(mkTyVarTys ex_vars)
; (rep_ids, binds) <- go subst2 boxers (dropList ex_tvs src_vars)
; return (ex_vars ++ rep_ids, binds) } )
go _ [] src_vars = ASSERT2( null src_vars, ppr data_con ) return ([], [])
go subst (UnitBox : boxers) (src_var : src_vars)
= do { (rep_ids2, binds) <- go subst boxers src_vars
; return (src_var : rep_ids2, binds) }
go subst (Boxer boxer : boxers) (src_var : src_vars)
= do { (rep_ids1, arg) <- boxer subst
; (rep_ids2, binds) <- go subst boxers src_vars
; return (rep_ids1 ++ rep_ids2, NonRec src_var arg : binds) }
go _ (_:_) [] = pprPanic "mk_boxer" (ppr data_con)
mk_rep_app :: [(Id,Unboxer)] -> CoreExpr -> UniqSM CoreExpr
mk_rep_app [] con_app
= return con_app
mk_rep_app ((wrap_arg, unboxer) : prs) con_app
= do { (rep_ids, unbox_fn) <- unboxer wrap_arg
; expr <- mk_rep_app prs (mkVarApps con_app rep_ids)
; return (unbox_fn expr) }
{-
Note [Bangs on imported data constructors]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We pass Maybe [HsImplBang] to mkDataConRep to make use of HsImplBangs
from imported modules.
- Nothing <=> use HsSrcBangs
- Just bangs <=> use HsImplBangs
For imported types we can't work it all out from the HsSrcBangs,
because we want to be very sure to follow what the original module
(where the data type was declared) decided, and that depends on what
flags were enabled when it was compiled. So we record the decisions in
the interface file.
The HsImplBangs passed are in 1-1 correspondence with the
dataConOrigArgTys of the DataCon.
-}
-------------------------
newLocal :: Type -> UniqSM Var
newLocal ty = do { uniq <- getUniqueM
; return (mkSysLocal (fsLit "dt") uniq ty) }
-------------------------
dataConSrcToImplBang
:: DynFlags
-> FamInstEnvs
-> Type
-> HsSrcBang
-> HsImplBang
dataConSrcToImplBang dflags fam_envs arg_ty
(HsSrcBang ann unpk NoSrcStrict)
| xopt LangExt.StrictData dflags -- StrictData => strict field
= dataConSrcToImplBang dflags fam_envs arg_ty
(HsSrcBang ann unpk SrcStrict)
| otherwise -- no StrictData => lazy field
= HsLazy
dataConSrcToImplBang _ _ _ (HsSrcBang _ _ SrcLazy)
= HsLazy
dataConSrcToImplBang dflags fam_envs arg_ty
(HsSrcBang _ unpk_prag SrcStrict) -- {-# UNPACK #-} !
| not (gopt Opt_OmitInterfacePragmas dflags) -- Don't unpack if -fomit-iface-pragmas
-- Don't unpack if we aren't optimising; rather arbitrarily,
-- we use -fomit-iface-pragmas as the indication
, let mb_co = topNormaliseType_maybe fam_envs arg_ty
-- Unwrap type families and newtypes
arg_ty' = case mb_co of { Just (_,ty) -> ty; Nothing -> arg_ty }
, isUnpackableType dflags fam_envs arg_ty'
, (rep_tys, _) <- dataConArgUnpack arg_ty'
, case unpk_prag of
NoSrcUnpack ->
gopt Opt_UnboxStrictFields dflags
|| (gopt Opt_UnboxSmallStrictFields dflags
&& length rep_tys <= 1) -- See Note [Unpack one-wide fields]
srcUnpack -> isSrcUnpacked srcUnpack
= case mb_co of
Nothing -> HsUnpack Nothing
Just (co,_) -> HsUnpack (Just co)
| otherwise -- Record the strict-but-no-unpack decision
= HsStrict
-- | Wrappers/Workser and representation following Unpack/Strictness
-- decisions
dataConArgRep
:: Type
-> HsImplBang
-> ([(Type,StrictnessMark)] -- Rep types
,(Unboxer,Boxer))
dataConArgRep arg_ty HsLazy
= ([(arg_ty, NotMarkedStrict)], (unitUnboxer, unitBoxer))
dataConArgRep arg_ty HsStrict
= ([(arg_ty, MarkedStrict)], (seqUnboxer, unitBoxer))
dataConArgRep arg_ty (HsUnpack Nothing)
| (rep_tys, wrappers) <- dataConArgUnpack arg_ty
= (rep_tys, wrappers)
dataConArgRep _ (HsUnpack (Just co))
| let co_rep_ty = pSnd (coercionKind co)
, (rep_tys, wrappers) <- dataConArgUnpack co_rep_ty
= (rep_tys, wrapCo co co_rep_ty wrappers)
-------------------------
wrapCo :: Coercion -> Type -> (Unboxer, Boxer) -> (Unboxer, Boxer)
wrapCo co rep_ty (unbox_rep, box_rep) -- co :: arg_ty ~ rep_ty
= (unboxer, boxer)
where
unboxer arg_id = do { rep_id <- newLocal rep_ty
; (rep_ids, rep_fn) <- unbox_rep rep_id
; let co_bind = NonRec rep_id (Var arg_id `Cast` co)
; return (rep_ids, Let co_bind . rep_fn) }
boxer = Boxer $ \ subst ->
do { (rep_ids, rep_expr)
<- case box_rep of
UnitBox -> do { rep_id <- newLocal (TcType.substTy subst rep_ty)
; return ([rep_id], Var rep_id) }
Boxer boxer -> boxer subst
; let sco = substCo (tvCvSubst subst) co
; return (rep_ids, rep_expr `Cast` mkSymCo sco) }
------------------------
seqUnboxer :: Unboxer
seqUnboxer v = return ([v], \e -> Case (Var v) v (exprType e) [(DEFAULT, [], e)])
unitUnboxer :: Unboxer
unitUnboxer v = return ([v], \e -> e)
unitBoxer :: Boxer
unitBoxer = UnitBox
-------------------------
dataConArgUnpack
:: Type
-> ( [(Type, StrictnessMark)] -- Rep types
, (Unboxer, Boxer) )
dataConArgUnpack arg_ty
| Just (tc, tc_args) <- splitTyConApp_maybe arg_ty
, Just con <- tyConSingleAlgDataCon_maybe tc
-- NB: check for an *algebraic* data type
-- A recursive newtype might mean that
-- 'arg_ty' is a newtype
, let rep_tys = dataConInstArgTys con tc_args
= ASSERT( isVanillaDataCon con )
( rep_tys `zip` dataConRepStrictness con
,( \ arg_id ->
do { rep_ids <- mapM newLocal rep_tys
; let unbox_fn body
= Case (Var arg_id) arg_id (exprType body)
[(DataAlt con, rep_ids, body)]
; return (rep_ids, unbox_fn) }
, Boxer $ \ subst ->
do { rep_ids <- mapM (newLocal . TcType.substTy subst) rep_tys
; return (rep_ids, Var (dataConWorkId con)
`mkTyApps` (substTys subst tc_args)
`mkVarApps` rep_ids ) } ) )
| otherwise
= pprPanic "dataConArgUnpack" (ppr arg_ty)
-- An interface file specified Unpacked, but we couldn't unpack it
isUnpackableType :: DynFlags -> FamInstEnvs -> Type -> Bool
-- True if we can unpack the UNPACK the argument type
-- See Note [Recursive unboxing]
-- We look "deeply" inside rather than relying on the DataCons
-- we encounter on the way, because otherwise we might well
-- end up relying on ourselves!
isUnpackableType dflags fam_envs ty
| Just (tc, _) <- splitTyConApp_maybe ty
, Just con <- tyConSingleAlgDataCon_maybe tc
, isVanillaDataCon con
= ok_con_args (unitNameSet (getName tc)) con
| otherwise
= False
where
ok_arg tcs (ty, bang) = not (attempt_unpack bang) || ok_ty tcs norm_ty
where
norm_ty = topNormaliseType fam_envs ty
ok_ty tcs ty
| Just (tc, _) <- splitTyConApp_maybe ty
, let tc_name = getName tc
= not (tc_name `elemNameSet` tcs)
&& case tyConSingleAlgDataCon_maybe tc of
Just con | isVanillaDataCon con
-> ok_con_args (tcs `extendNameSet` getName tc) con
_ -> True
| otherwise
= True
ok_con_args tcs con
= all (ok_arg tcs) (dataConOrigArgTys con `zip` dataConSrcBangs con)
-- NB: dataConSrcBangs gives the *user* request;
-- We'd get a black hole if we used dataConImplBangs
attempt_unpack (HsSrcBang _ SrcUnpack NoSrcStrict)
= xopt LangExt.StrictData dflags
attempt_unpack (HsSrcBang _ SrcUnpack SrcStrict)
= True
attempt_unpack (HsSrcBang _ NoSrcUnpack SrcStrict)
= True -- Be conservative
attempt_unpack (HsSrcBang _ NoSrcUnpack NoSrcStrict)
= xopt LangExt.StrictData dflags -- Be conservative
attempt_unpack _ = False
{-
Note [Unpack one-wide fields]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The flag UnboxSmallStrictFields ensures that any field that can
(safely) be unboxed to a word-sized unboxed field, should be so unboxed.
For example:
data A = A Int#
newtype B = B A
data C = C !B
data D = D !C
data E = E !()
data F = F !D
data G = G !F !F
All of these should have an Int# as their representation, except
G which should have two Int#s.
However
data T = T !(S Int)
data S = S !a
Here we can represent T with an Int#.
Note [Recursive unboxing]
~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
data R = MkR {-# UNPACK #-} !S Int
data S = MkS {-# UNPACK #-} !Int
The representation arguments of MkR are the *representation* arguments
of S (plus Int); the rep args of MkS are Int#. This is all fine.
But be careful not to try to unbox this!
data T = MkT {-# UNPACK #-} !T Int
Because then we'd get an infinite number of arguments.
Here is a more complicated case:
data S = MkS {-# UNPACK #-} !T Int
data T = MkT {-# UNPACK #-} !S Int
Each of S and T must decide independendently whether to unpack
and they had better not both say yes. So they must both say no.
Also behave conservatively when there is no UNPACK pragma
data T = MkS !T Int
with -funbox-strict-fields or -funbox-small-strict-fields
we need to behave as if there was an UNPACK pragma there.
But it's the *argument* type that matters. This is fine:
data S = MkS S !Int
because Int is non-recursive.
Note [Unpack equality predicates]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If we have a GADT with a contructor C :: (a~[b]) => b -> T a
we definitely want that equality predicate *unboxed* so that it
takes no space at all. This is easily done: just give it
an UNPACK pragma. The rest of the unpack/repack code does the
heavy lifting. This one line makes every GADT take a word less
space for each equality predicate, so it's pretty important!
-}
mk_pred_strict_mark :: PredType -> HsImplBang
mk_pred_strict_mark pred
| isEqPred pred = HsUnpack Nothing
-- Note [Unpack equality predicates]
| otherwise = HsLazy
{-
************************************************************************
* *
Wrapping and unwrapping newtypes and type families
* *
************************************************************************
-}
wrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr
-- The wrapper for the data constructor for a newtype looks like this:
-- newtype T a = MkT (a,Int)
-- MkT :: forall a. (a,Int) -> T a
-- MkT = /\a. \(x:(a,Int)). x `cast` sym (CoT a)
-- where CoT is the coercion TyCon assoicated with the newtype
--
-- The call (wrapNewTypeBody T [a] e) returns the
-- body of the wrapper, namely
-- e `cast` (CoT [a])
--
-- If a coercion constructor is provided in the newtype, then we use
-- it, otherwise the wrap/unwrap are both no-ops
--
-- If the we are dealing with a newtype *instance*, we have a second coercion
-- identifying the family instance with the constructor of the newtype
-- instance. This coercion is applied in any case (ie, composed with the
-- coercion constructor of the newtype or applied by itself).
wrapNewTypeBody tycon args result_expr
= ASSERT( isNewTyCon tycon )
wrapFamInstBody tycon args $
mkCast result_expr (mkSymCo co)
where
co = mkUnbranchedAxInstCo Representational (newTyConCo tycon) args
-- When unwrapping, we do *not* apply any family coercion, because this will
-- be done via a CoPat by the type checker. We have to do it this way as
-- computing the right type arguments for the coercion requires more than just
-- a spliting operation (cf, TcPat.tcConPat).
unwrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr
unwrapNewTypeBody tycon args result_expr
= ASSERT( isNewTyCon tycon )
mkCast result_expr (mkUnbranchedAxInstCo Representational (newTyConCo tycon) args)
-- If the type constructor is a representation type of a data instance, wrap
-- the expression into a cast adjusting the expression type, which is an
-- instance of the representation type, to the corresponding instance of the
-- family instance type.
-- See Note [Wrappers for data instance tycons]
wrapFamInstBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr
wrapFamInstBody tycon args body
| Just co_con <- tyConFamilyCoercion_maybe tycon
= mkCast body (mkSymCo (mkUnbranchedAxInstCo Representational co_con args))
| otherwise
= body
-- Same as `wrapFamInstBody`, but for type family instances, which are
-- represented by a `CoAxiom`, and not a `TyCon`
wrapTypeFamInstBody :: CoAxiom br -> Int -> [Type] -> CoreExpr -> CoreExpr
wrapTypeFamInstBody axiom ind args body
= mkCast body (mkSymCo (mkAxInstCo Representational axiom ind args))
wrapTypeUnbranchedFamInstBody :: CoAxiom Unbranched -> [Type] -> CoreExpr -> CoreExpr
wrapTypeUnbranchedFamInstBody axiom
= wrapTypeFamInstBody axiom 0
unwrapFamInstScrut :: TyCon -> [Type] -> CoreExpr -> CoreExpr
unwrapFamInstScrut tycon args scrut
| Just co_con <- tyConFamilyCoercion_maybe tycon
= mkCast scrut (mkUnbranchedAxInstCo Representational co_con args) -- data instances only
| otherwise
= scrut
unwrapTypeFamInstScrut :: CoAxiom br -> Int -> [Type] -> CoreExpr -> CoreExpr
unwrapTypeFamInstScrut axiom ind args scrut
= mkCast scrut (mkAxInstCo Representational axiom ind args)
unwrapTypeUnbranchedFamInstScrut :: CoAxiom Unbranched -> [Type] -> CoreExpr -> CoreExpr
unwrapTypeUnbranchedFamInstScrut axiom
= unwrapTypeFamInstScrut axiom 0
{-
************************************************************************
* *
\subsection{Primitive operations}
* *
************************************************************************
-}
mkPrimOpId :: PrimOp -> Id
mkPrimOpId prim_op
= id
where
(tyvars,arg_tys,res_ty, arity, strict_sig) = primOpSig prim_op
ty = mkForAllTys tyvars (mkFunTys arg_tys res_ty)
name = mkWiredInName gHC_PRIM (primOpOcc prim_op)
(mkPrimOpIdUnique (primOpTag prim_op))
(AnId id) UserSyntax
id = mkGlobalId (PrimOpId prim_op) name ty info
info = noCafIdInfo
`setRuleInfo` mkRuleInfo (maybeToList $ primOpRules name prim_op)
`setArityInfo` arity
`setStrictnessInfo` strict_sig
`setInlinePragInfo` neverInlinePragma
-- We give PrimOps a NOINLINE pragma so that we don't
-- get silly warnings from Desugar.dsRule (the inline_shadows_rule
-- test) about a RULE conflicting with a possible inlining
-- cf Trac #7287
-- For each ccall we manufacture a separate CCallOpId, giving it
-- a fresh unique, a type that is correct for this particular ccall,
-- and a CCall structure that gives the correct details about calling
-- convention etc.
--
-- The *name* of this Id is a local name whose OccName gives the full
-- details of the ccall, type and all. This means that the interface
-- file reader can reconstruct a suitable Id
mkFCallId :: DynFlags -> Unique -> ForeignCall -> Type -> Id
mkFCallId dflags uniq fcall ty
= ASSERT( isEmptyVarSet (tyVarsOfType ty) )
-- A CCallOpId should have no free type variables;
-- when doing substitutions won't substitute over it
mkGlobalId (FCallId fcall) name ty info
where
occ_str = showSDoc dflags (braces (ppr fcall <+> ppr ty))
-- The "occurrence name" of a ccall is the full info about the
-- ccall; it is encoded, but may have embedded spaces etc!
name = mkFCallName uniq occ_str
info = noCafIdInfo
`setArityInfo` arity
`setStrictnessInfo` strict_sig
(_, tau) = tcSplitForAllTys ty
(arg_tys, _) = tcSplitFunTys tau
arity = length arg_tys
strict_sig = mkClosedStrictSig (replicate arity topDmd) topRes
-- the call does not claim to be strict in its arguments, since they
-- may be lifted (foreign import prim) and the called code doen't
-- necessarily force them. See Trac #11076.
{-
************************************************************************
* *
\subsection{DictFuns and default methods}
* *
************************************************************************
Note [Dict funs and default methods]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dict funs and default methods are *not* ImplicitIds. Their definition
involves user-written code, so we can't figure out their strictness etc
based on fixed info, as we can for constructors and record selectors (say).
NB: See also Note [Exported LocalIds] in Id
-}
mkDictFunId :: Name -- Name to use for the dict fun;
-> [TyVar]
-> ThetaType
-> Class
-> [Type]
-> Id
-- Implements the DFun Superclass Invariant (see TcInstDcls)
-- See Note [Dict funs and default methods]
mkDictFunId dfun_name tvs theta clas tys
= mkExportedLocalId (DFunId n_silent is_nt)
dfun_name
dfun_ty
where
is_nt = isNewTyCon (classTyCon clas)
(n_silent, dfun_ty) = mkDictFunTy tvs theta clas tys
mkDictFunTy :: [TyVar] -> ThetaType -> Class -> [Type] -> (Int, Type)
mkDictFunTy tvs theta clas tys
= (length silent_theta, dfun_ty)
where
dfun_ty = mkSigmaTy tvs (silent_theta ++ theta) (mkClassPred clas tys)
silent_theta
| null tvs, null theta
= []
| otherwise
= filterOut discard $
substTheta (zipTopTvSubst (classTyVars clas) tys)
(classSCTheta clas)
-- See Note [Silent Superclass Arguments]
discard pred = any (`eqPred` pred) theta
-- See the DFun Superclass Invariant in TcInstDcls
{-
************************************************************************
* *
\subsection{Un-definable}
* *
************************************************************************
These Ids can't be defined in Haskell. They could be defined in
unfoldings in the wired-in GHC.Prim interface file, but we'd have to
ensure that they were definitely, definitely inlined, because there is
no curried identifier for them. That's what mkCompulsoryUnfolding
does. If we had a way to get a compulsory unfolding from an interface
file, we could do that, but we don't right now.
unsafeCoerce# isn't so much a PrimOp as a phantom identifier, that
just gets expanded into a type coercion wherever it occurs. Hence we
add it as a built-in Id with an unfolding here.
The type variables we use here are "open" type variables: this means
they can unify with both unlifted and lifted types. Hence we provide
another gun with which to shoot yourself in the foot.
-}
lazyIdName, unsafeCoerceName, nullAddrName, seqName,
realWorldName, voidPrimIdName, coercionTokenName,
magicDictName, coerceName, proxyName, dollarName, oneShotName,
runRWName, noinlineIdName :: Name
unsafeCoerceName = mkWiredInIdName gHC_PRIM (fsLit "unsafeCoerce#") unsafeCoerceIdKey unsafeCoerceId
nullAddrName = mkWiredInIdName gHC_PRIM (fsLit "nullAddr#") nullAddrIdKey nullAddrId
seqName = mkWiredInIdName gHC_PRIM (fsLit "seq") seqIdKey seqId
realWorldName = mkWiredInIdName gHC_PRIM (fsLit "realWorld#") realWorldPrimIdKey realWorldPrimId
voidPrimIdName = mkWiredInIdName gHC_PRIM (fsLit "void#") voidPrimIdKey voidPrimId
lazyIdName = mkWiredInIdName gHC_MAGIC (fsLit "lazy") lazyIdKey lazyId
coercionTokenName = mkWiredInIdName gHC_PRIM (fsLit "coercionToken#") coercionTokenIdKey coercionTokenId
magicDictName = mkWiredInIdName gHC_PRIM (fsLit "magicDict") magicDictKey magicDictId
coerceName = mkWiredInIdName gHC_PRIM (fsLit "coerce") coerceKey coerceId
proxyName = mkWiredInIdName gHC_PRIM (fsLit "proxy#") proxyHashKey proxyHashId
dollarName = mkWiredInIdName gHC_BASE (fsLit "$") dollarIdKey dollarId
oneShotName = mkWiredInIdName gHC_MAGIC (fsLit "oneShot") oneShotKey oneShotId
runRWName = mkWiredInIdName gHC_MAGIC (fsLit "runRW#") runRWKey runRWId
noinlineIdName = mkWiredInIdName gHC_MAGIC (fsLit "noinline") noinlineIdKey noinlineId
dollarId :: Id -- Note [dollarId magic]
dollarId = pcMiscPrelId dollarName ty
(noCafIdInfo `setUnfoldingInfo` unf)
where
fun_ty = mkFunTy alphaTy openBetaTy
ty = mkForAllTys [alphaTyVar, openBetaTyVar] $
mkFunTy fun_ty fun_ty
unf = mkInlineUnfolding (Just 2) rhs
[f,x] = mkTemplateLocals [fun_ty, alphaTy]
rhs = mkLams [alphaTyVar, openBetaTyVar, f, x] $
App (Var f) (Var x)
------------------------------------------------
-- proxy# :: forall a. Proxy# a
proxyHashId :: Id
proxyHashId
= pcMiscPrelId proxyName ty
(noCafIdInfo `setUnfoldingInfo` evaldUnfolding) -- Note [evaldUnfoldings]
where
ty = mkForAllTys [kv, tv] (mkProxyPrimTy k t)
kv = kKiVar
k = mkTyVarTy kv
[tv] = mkTemplateTyVars [k]
t = mkTyVarTy tv
------------------------------------------------
-- unsafeCoerce# :: forall a b. a -> b
unsafeCoerceId :: Id
unsafeCoerceId
= pcMiscPrelId unsafeCoerceName ty info
where
info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkCompulsoryUnfolding rhs
ty = mkForAllTys [openAlphaTyVar,openBetaTyVar]
(mkFunTy openAlphaTy openBetaTy)
[x] = mkTemplateLocals [openAlphaTy]
rhs = mkLams [openAlphaTyVar,openBetaTyVar,x] $
Cast (Var x) (mkUnsafeCo openAlphaTy openBetaTy)
------------------------------------------------
nullAddrId :: Id
-- nullAddr# :: Addr#
-- The reason is is here is because we don't provide
-- a way to write this literal in Haskell.
nullAddrId = pcMiscPrelId nullAddrName addrPrimTy info
where
info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkCompulsoryUnfolding (Lit nullAddrLit)
------------------------------------------------
seqId :: Id -- See Note [seqId magic]
seqId = pcMiscPrelId seqName ty info
where
info = noCafIdInfo `setInlinePragInfo` inline_prag
`setUnfoldingInfo` mkCompulsoryUnfolding rhs
`setRuleInfo` mkRuleInfo [seq_cast_rule]
inline_prag = alwaysInlinePragma `setInlinePragmaActivation` ActiveAfter 0
-- Make 'seq' not inline-always, so that simpleOptExpr
-- (see CoreSubst.simple_app) won't inline 'seq' on the
-- LHS of rules. That way we can have rules for 'seq';
-- see Note [seqId magic]
ty = mkForAllTys [alphaTyVar,betaTyVar]
(mkFunTy alphaTy (mkFunTy betaTy betaTy))
-- NB argBetaTyVar; see Note [seqId magic]
[x,y] = mkTemplateLocals [alphaTy, betaTy]
rhs = mkLams [alphaTyVar,betaTyVar,x,y] (Case (Var x) x betaTy [(DEFAULT, [], Var y)])
-- See Note [Built-in RULES for seq]
-- NB: ru_nargs = 3, not 4, to match the code in
-- Simplify.rebuildCase which tries to apply this rule
seq_cast_rule = BuiltinRule { ru_name = fsLit "seq of cast"
, ru_fn = seqName
, ru_nargs = 3
, ru_try = match_seq_of_cast }
match_seq_of_cast :: RuleFun
-- See Note [Built-in RULES for seq]
match_seq_of_cast _ _ _ [Type _, Type res_ty, Cast scrut co]
= Just (Var seqId `mkApps` [Type (pFst (coercionKind co)), Type res_ty,
scrut])
match_seq_of_cast _ _ _ _ = Nothing
------------------------------------------------
lazyId :: Id -- See Note [lazyId magic]
lazyId = pcMiscPrelId lazyIdName ty info
where
info = noCafIdInfo
ty = mkForAllTys [alphaTyVar] (mkFunTy alphaTy alphaTy)
noinlineId :: Id -- See Note [noinlineId magic]
noinlineId = pcMiscPrelId noinlineIdName ty info
where
info = noCafIdInfo
ty = mkForAllTys [alphaTyVar] (mkFunTy alphaTy alphaTy)
oneShotId :: Id -- See Note [The oneShot function]
oneShotId = pcMiscPrelId oneShotName ty info
where
info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkCompulsoryUnfolding rhs
ty = mkForAllTys [alphaTyVar, betaTyVar] (mkFunTy fun_ty fun_ty)
fun_ty = mkFunTy alphaTy betaTy
[body, x] = mkTemplateLocals [fun_ty, alphaTy]
x' = setOneShotLambda x
rhs = mkLams [alphaTyVar, betaTyVar, body, x'] $ Var body `App` Var x
runRWId :: Id -- See Note [runRW magic] in this module
runRWId = pcMiscPrelId runRWName ty info
where
info = noCafIdInfo `setInlinePragInfo` neverInlinePragma
`setStrictnessInfo` strict_sig
`setArityInfo` 1
strict_sig = mkClosedStrictSig [strictApply1Dmd] topRes
-- Important to express its strictness,
-- since it is not inlined until CorePrep
-- Also see Note [runRW arg] in CorePrep
-- State# RealWorld
stateRW = mkTyConApp statePrimTyCon [realWorldTy]
-- o
ret_ty = openAlphaTy
-- State# RealWorld -> o
arg_ty = stateRW `mkFunTy` ret_ty
-- (State# RealWorld -> o ) -> o
ty = mkForAllTys [openAlphaTyVar] (arg_ty `mkFunTy` ret_ty)
--------------------------------------------------------------------------------
magicDictId :: Id -- See Note [magicDictId magic]
magicDictId = pcMiscPrelId magicDictName ty info
where
info = noCafIdInfo `setInlinePragInfo` neverInlinePragma
ty = mkForAllTys [alphaTyVar] alphaTy
--------------------------------------------------------------------------------
coerceId :: Id
coerceId = pcMiscPrelId coerceName ty info
where
info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkCompulsoryUnfolding rhs
eqRTy = mkTyConApp coercibleTyCon [liftedTypeKind, alphaTy, betaTy]
eqRPrimTy = mkTyConApp eqReprPrimTyCon [liftedTypeKind, alphaTy, betaTy]
ty = mkForAllTys [alphaTyVar, betaTyVar] $
mkFunTys [eqRTy, alphaTy] betaTy
[eqR,x,eq] = mkTemplateLocals [eqRTy, alphaTy, eqRPrimTy]
rhs = mkLams [alphaTyVar, betaTyVar, eqR, x] $
mkWildCase (Var eqR) eqRTy betaTy $
[(DataAlt coercibleDataCon, [eq], Cast (Var x) (CoVarCo eq))]
{-
Note [dollarId magic]
~~~~~~~~~~~~~~~~~~~~~
The only reason that ($) is wired in is so that its type can be
forall (a:*, b:Open). (a->b) -> a -> b
That is, the return type can be unboxed. E.g. this is OK
foo $ True where foo :: Bool -> Int#
because ($) doesn't inspect or move the result of the call to foo.
See Trac #8739.
There is a special typing rule for ($) in TcExpr, so the type of ($)
isn't looked at there, BUT Lint subsequently (and rightly) complains
if sees ($) applied to Int# (say), unless we give it a wired-in type
as we do here.
Note [Unsafe coerce magic]
~~~~~~~~~~~~~~~~~~~~~~~~~~
We define a *primitive*
GHC.Prim.unsafeCoerce#
and then in the base library we define the ordinary function
Unsafe.Coerce.unsafeCoerce :: forall (a:*) (b:*). a -> b
unsafeCoerce x = unsafeCoerce# x
Notice that unsafeCoerce has a civilized (albeit still dangerous)
polymorphic type, whose type args have kind *. So you can't use it on
unboxed values (unsafeCoerce 3#).
In contrast unsafeCoerce# is even more dangerous because you *can* use
it on unboxed things, (unsafeCoerce# 3#) :: Int. Its type is
forall (a:OpenKind) (b:OpenKind). a -> b
Note [seqId magic]
~~~~~~~~~~~~~~~~~~
'GHC.Prim.seq' is special in several ways.
a) Its second arg can have an unboxed type
x `seq` (v +# w)
Hence its second type variable has ArgKind
b) Its fixity is set in LoadIface.ghcPrimIface
c) It has quite a bit of desugaring magic.
See DsUtils.lhs Note [Desugaring seq (1)] and (2) and (3)
d) There is some special rule handing: Note [User-defined RULES for seq]
e) See Note [Typing rule for seq] in TcExpr.
Note [User-defined RULES for seq]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Roman found situations where he had
case (f n) of _ -> e
where he knew that f (which was strict in n) would terminate if n did.
Notice that the result of (f n) is discarded. So it makes sense to
transform to
case n of _ -> e
Rather than attempt some general analysis to support this, I've added
enough support that you can do this using a rewrite rule:
RULE "f/seq" forall n. seq (f n) = seq n
You write that rule. When GHC sees a case expression that discards
its result, it mentally transforms it to a call to 'seq' and looks for
a RULE. (This is done in Simplify.rebuildCase.) As usual, the
correctness of the rule is up to you.
VERY IMPORTANT: to make this work, we give the RULE an arity of 1, not 2.
If we wrote
RULE "f/seq" forall n e. seq (f n) e = seq n e
with rule arity 2, then two bad things would happen:
- The magical desugaring done in Note [seqId magic] item (c)
for saturated application of 'seq' would turn the LHS into
a case expression!
- The code in Simplify.rebuildCase would need to actually supply
the value argument, which turns out to be awkward.
Note [Built-in RULES for seq]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We also have the following built-in rule for seq
seq (x `cast` co) y = seq x y
This eliminates unnecessary casts and also allows other seq rules to
match more often. Notably,
seq (f x `cast` co) y --> seq (f x) y
and now a user-defined rule for seq (see Note [User-defined RULES for seq])
may fire.
Note [lazyId magic]
~~~~~~~~~~~~~~~~~~~
lazy :: forall a?. a? -> a? (i.e. works for unboxed types too)
Used to lazify pseq: pseq a b = a `seq` lazy b
Also, no strictness: by being a built-in Id, all the info about lazyId comes from here,
not from GHC.Base.hi. This is important, because the strictness
analyser will spot it as strict!
Also no unfolding in lazyId: it gets "inlined" by a HACK in CorePrep.
It's very important to do this inlining *after* unfoldings are exposed
in the interface file. Otherwise, the unfolding for (say) pseq in the
interface file will not mention 'lazy', so if we inline 'pseq' we'll totally
miss the very thing that 'lazy' was there for in the first place.
See Trac #3259 for a real world example.
lazyId is defined in GHC.Base, so we don't *have* to inline it. If it
appears un-applied, we'll end up just calling it.
Note [noinlineId magic]
~~~~~~~~~~~~~~~~~~~~~~~
noinline :: forall a. a -> a
'noinline' is used to make sure that a function f is never inlined,
e.g., as in 'noinline f x'. Ordinarily, the identity function with NOINLINE
could be used to achieve this effect; however, this has the unfortunate
result of leaving a (useless) call to noinline at runtime. So we have
a little bit of magic to optimize away 'noinline' after we are done
running the simplifier.
'noinline' needs to be wired-in because it gets inserted automatically
when we serialize an expression to the interface format, and we DON'T
want use its fingerprints.
Note [runRW magic]
~~~~~~~~~~~~~~~~~~
Some definitions, for instance @runST@, must have careful control over float out
of the bindings in their body. Consider this use of @runST@,
f x = runST ( \ s -> let (a, s') = newArray# 100 [] s
(_, s'') = fill_in_array_or_something a x s'
in freezeArray# a s'' )
If we inline @runST@, we'll get:
f x = let (a, s') = newArray# 100 [] realWorld#{-NB-}
(_, s'') = fill_in_array_or_something a x s'
in freezeArray# a s''
And now if we allow the @newArray#@ binding to float out to become a CAF,
we end up with a result that is totally and utterly wrong:
f = let (a, s') = newArray# 100 [] realWorld#{-NB-} -- YIKES!!!
in \ x ->
let (_, s'') = fill_in_array_or_something a x s'
in freezeArray# a s''
All calls to @f@ will share a {\em single} array! Clearly this is nonsense and
must be prevented.
This is what @runRW#@ gives us: by being inlined extremely late in the
optimization (right before lowering to STG, in CorePrep), we can ensure that
no further floating will occur. This allows us to safely inline things like
@runST@, which are otherwise needlessly expensive (see #10678 and #5916).
While the definition of @GHC.Magic.runRW#@, we override its type in @MkId@
to be open-kinded,
runRW# :: (o :: OpenKind) => (State# RealWorld -> (# State# RealWorld, o #))
-> (# State# RealWorld, o #)
Note [The oneShot function]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the context of making left-folds fuse somewhat okish (see ticket #7994
and Note [Left folds via right fold]) it was determined that it would be useful
if library authors could explicitly tell the compiler that a certain lambda is
called at most once. The oneShot function allows that.
Like most magic functions it has a compulsary unfolding, so there is no need
for a real definition somewhere. We have one in GHC.Magic for the convenience
of putting the documentation there.
It uses `setOneShotLambda` on the lambda's binder. That is the whole magic:
A typical call looks like
oneShot (\y. e)
after unfolding the definition `oneShot = \f \x[oneshot]. f x` we get
(\f \x[oneshot]. f x) (\y. e)
--> \x[oneshot]. ((\y.e) x)
--> \x[oneshot] e[x/y]
which is what we want.
It is only effective if this bits survives as long as possible and makes it into
the interface in unfoldings (See Note [Preserve OneShotInfo]). Also see
https://ghc.haskell.org/trac/ghc/wiki/OneShot.
Note [magicDictId magic]
~~~~~~~~~~~~~~~~~~~~~~~~~
The identifier `magicDict` is just a place-holder, which is used to
implement a primitve that we cannot define in Haskell but we can write
in Core. It is declared with a place-holder type:
magicDict :: forall a. a
The intention is that the identifier will be used in a very specific way,
to create dictionaries for classes with a single method. Consider a class
like this:
class C a where
f :: T a
We are going to use `magicDict`, in conjunction with a built-in Prelude
rule, to cast values of type `T a` into dictionaries for `C a`. To do
this, we define a function like this in the library:
data WrapC a b = WrapC (C a => Proxy a -> b)
withT :: (C a => Proxy a -> b)
-> T a -> Proxy a -> b
withT f x y = magicDict (WrapC f) x y
The purpose of `WrapC` is to avoid having `f` instantiated.
Also, it avoids impredicativity, because `magicDict`'s type
cannot be instantiated with a forall. The field of `WrapC` contains
a `Proxy` parameter which is used to link the type of the constraint,
`C a`, with the type of the `Wrap` value being made.
Next, we add a built-in Prelude rule (see prelude/PrelRules.hs),
which will replace the RHS of this definition with the appropriate
definition in Core. The rewrite rule works as follows:
magicDict@t (wrap@a@b f) x y
---->
f (x `cast` co a) y
The `co` coercion is the newtype-coercion extracted from the type-class.
The type class is obtain by looking at the type of wrap.
-------------------------------------------------------------
@realWorld#@ used to be a magic literal, \tr{void#}. If things get
nasty as-is, change it back to a literal (@Literal@).
voidArgId is a Local Id used simply as an argument in functions
where we just want an arg to avoid having a thunk of unlifted type.
E.g.
x = \ void :: Void# -> (# p, q #)
This comes up in strictness analysis
Note [evaldUnfoldings]
~~~~~~~~~~~~~~~~~~~~~~
The evaldUnfolding makes it look that some primitive value is
evaluated, which in turn makes Simplify.interestingArg return True,
which in turn makes INLINE things applied to said value likely to be
inlined.
-}
realWorldPrimId :: Id -- :: State# RealWorld
realWorldPrimId = pcMiscPrelId realWorldName realWorldStatePrimTy
(noCafIdInfo `setUnfoldingInfo` evaldUnfolding -- Note [evaldUnfoldings]
`setOneShotInfo` stateHackOneShot)
voidPrimId :: Id -- Global constant :: Void#
voidPrimId = pcMiscPrelId voidPrimIdName voidPrimTy
(noCafIdInfo `setUnfoldingInfo` evaldUnfolding) -- Note [evaldUnfoldings]
voidArgId :: Id -- Local lambda-bound :: Void#
voidArgId = mkSysLocal (fsLit "void") voidArgIdKey voidPrimTy
coercionTokenId :: Id -- :: () ~ ()
coercionTokenId -- Used to replace Coercion terms when we go to STG
= pcMiscPrelId coercionTokenName
(mkTyConApp eqPrimTyCon [liftedTypeKind, unitTy, unitTy])
noCafIdInfo
pcMiscPrelId :: Name -> Type -> IdInfo -> Id
pcMiscPrelId name ty info
= mkVanillaGlobalWithInfo name ty info
-- We lie and say the thing is imported; otherwise, we get into
-- a mess with dependency analysis; e.g., core2stg may heave in
-- random calls to GHCbase.unpackPS__. If GHCbase is the module
-- being compiled, then it's just a matter of luck if the definition
-- will be in "the right place" to be in scope.
| rahulmutt/ghcvm | compiler/Eta/BasicTypes/MkId.hs | bsd-3-clause | 60,125 | 0 | 21 | 16,139 | 7,525 | 4,116 | 3,409 | 601 | 6 |
-----------------------------------------------------------------------------
-- |
-- Module : Type.Reflection.Unsafe
-- Copyright : (c) The University of Glasgow, CWI 2001--2015
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- The representations of the types 'TyCon' and 'TypeRep', and the function
-- 'mkTyCon' which is used by derived instances of 'Typeable' to construct
-- 'TyCon's.
--
-- Be warned, these functions can be used to construct ill-kinded
-- type representations.
--
-----------------------------------------------------------------------------
{-# LANGUAGE TypeInType, ScopedTypeVariables #-}
module Type.Reflection.Unsafe (
-- * Type representations
TypeRep, mkTrApp, mkTyCon, typeRepFingerprint, someTypeRepFingerprint
-- * Kind representations
, KindRep(..), TypeLitSort(..)
-- * Type constructors
, TyCon, mkTrCon, tyConKindRep, tyConKindArgs, tyConFingerprint
) where
import Data.Typeable.Internal hiding (mkTrApp)
import qualified Data.Typeable.Internal as TI
-- | Construct a representation for a type application.
mkTrApp :: forall k1 k2 (a :: k1 -> k2) (b :: k1).
TypeRep (a :: k1 -> k2)
-> TypeRep (b :: k1)
-> TypeRep (a b)
mkTrApp = TI.mkTrAppChecked
| shlevy/ghc | libraries/base/Type/Reflection/Unsafe.hs | bsd-3-clause | 1,279 | 0 | 10 | 232 | 168 | 113 | 55 | -1 | -1 |
-- |
-- Module : Data.Array.Accelerate.CUDA.CodeGen.Tuple
-- Copyright : [2008..2011] Manuel M T Chakravarty, Gabriele Keller, Sean Lee, Trevor L. McDonell
-- License : BSD3
--
-- Maintainer : Manuel M T Chakravarty <chak@cse.unsw.edu.au>
-- Stability : experimental
-- Portability : non-partable (GHC extensions)
--
module Data.Array.Accelerate.CUDA.CodeGen.Tuple
(
mkTupleType, mkTupleTypeAsc, mkTuplePartition
)
where
import Data.Maybe
import Language.C
import Data.Array.Accelerate.CUDA.CodeGen.Data
import Data.Array.Accelerate.CUDA.CodeGen.Util
mkTupleType :: Maybe Int -> [CType] -> [CExtDecl]
mkTupleType subscript ty = types ++ [accessor]
where
n = length ty
volatile = isNothing subscript
base = maybe "Out" (\p -> "In" ++ show p) subscript
accessor = maybe (mkSet n) (mkGet n) subscript
types
| n <= 1 = [ mkTypedef ("Ty" ++ base) False False (head ty), mkTypedef ("Arr" ++ base) volatile True (head ty)]
| otherwise = [ mkStruct ("Ty" ++ base) False False ty, mkStruct ("Arr" ++ base) volatile True ty]
-- A variant of tuple generation for associative array computations, generating
-- base get and set functions, and the given number of type synonyms.
--
mkTupleTypeAsc :: Int -> [CType] -> [CExtDecl]
mkTupleTypeAsc syn ty = types ++ synonyms ++ [mkSet n, mkGet n 0]
where
n = length ty
synonyms = concat . take syn . flip map ([0..] :: [Int]) $ \v ->
[ mkTypedef ("TyIn" ++ show v) False False [CTypeDef (internalIdent "TyOut") internalNode]
, mkTypedef ("ArrIn" ++ show v) False False [CTypeDef (internalIdent "ArrOut") internalNode] ]
types
| n <= 1 = [ mkTypedef "TyOut" False False (head ty), mkTypedef "ArrOut" True True (head ty)]
| otherwise = [ mkStruct "TyOut" False False ty, mkStruct "ArrOut" True True ty]
-- Getter and setter functions for reading and writing (respectively) to global
-- device arrays. Since arrays of tuples are stored as tuples of arrays, we
-- retrieve each component separately and pack into a local structure.
--
-- This unfortunately also means that we can not declare an overloaded indexing
-- operator[], since it is not possible to return an l-value to the discrete
-- component arrays (we could read, but not write).
--
-- NOTE: The Accelerate language uses snoc based tuple projection, so the last
-- field of the structure is named 'a' instead of the first, while the
-- arrays themselves are still stored "in order".
--
mkGet :: Int -> Int -> CExtDecl
mkGet n prj =
CFDefExt
(CFunDef
[CStorageSpec (CStatic internalNode), CTypeQual (CInlineQual internalNode), CTypeQual (CAttrQual (CAttr (internalIdent "device") [] internalNode)), CTypeSpec (CTypeDef (internalIdent ("TyIn" ++ show prj)) internalNode)]
(CDeclr (Just (internalIdent ("get" ++ show prj))) [CFunDeclr (Right ([CDecl [CTypeQual (CConstQual internalNode), CTypeSpec (CTypeDef (internalIdent ("ArrIn" ++ show prj)) internalNode)] [(Just (CDeclr (Just arrIn) [] Nothing [] internalNode), Nothing, Nothing)] internalNode, CDecl [CTypeQual (CConstQual internalNode), CTypeSpec (CTypeDef (internalIdent "Ix") internalNode)] [(Just (CDeclr (Just (internalIdent "idx")) [] Nothing [] internalNode), Nothing, Nothing)] internalNode], False)) [] internalNode] Nothing [] internalNode)
[]
(CCompound [] [CBlockDecl (CDecl [CTypeSpec (CTypeDef (internalIdent ("TyIn" ++ show prj)) internalNode)] [(Just (CDeclr (Just (internalIdent "x")) [] Nothing [] internalNode),Just initList,Nothing)] internalNode),CBlockStmt (CReturn (Just (CVar (internalIdent "x") internalNode)) internalNode)] internalNode)
internalNode)
where
arrIn = internalIdent ("d_in" ++ show prj)
initList
| n <= 1 = CInitExpr (CIndex (CVar arrIn internalNode) (CVar (internalIdent "idx") internalNode) internalNode) internalNode
| otherwise = flip CInitList internalNode . reverse . take n . flip map (enumFrom 0 :: [Int]) $ \v ->
([], CInitExpr (CIndex (CMember (CVar arrIn internalNode) (internalIdent ('a':show v)) False internalNode) (CVar (internalIdent "idx") internalNode) internalNode) internalNode)
mkSet :: Int -> CExtDecl
mkSet n =
CFDefExt
(CFunDef
[CStorageSpec (CStatic internalNode),CTypeQual (CInlineQual internalNode),CTypeQual (CAttrQual (CAttr (internalIdent "device") [] internalNode)),CTypeSpec (CVoidType internalNode)]
(CDeclr (Just (internalIdent "set")) [CFunDeclr (Right ([CDecl [CTypeSpec (CTypeDef (internalIdent "ArrOut") internalNode)] [(Just (CDeclr (Just (internalIdent "d_out")) [] Nothing [] internalNode),Nothing,Nothing)] internalNode,CDecl [CTypeQual (CConstQual internalNode),CTypeSpec (CTypeDef (internalIdent "Ix") internalNode)] [(Just (CDeclr (Just (internalIdent "idx")) [] Nothing [] internalNode),Nothing,Nothing)] internalNode,CDecl [CTypeQual (CConstQual internalNode),CTypeSpec (CTypeDef (internalIdent "TyOut") internalNode)] [(Just (CDeclr (Just (internalIdent "val")) [] Nothing [] internalNode),Nothing,Nothing)] internalNode],False)) [] internalNode] Nothing [] internalNode)
[]
(CCompound [] assignList internalNode)
internalNode)
where
assignList
| n <= 1 = [CBlockStmt (CExpr (Just (CAssign CAssignOp (CIndex (CVar (internalIdent "d_out") internalNode) (CVar (internalIdent "idx") internalNode) internalNode) (CVar (internalIdent "val") internalNode) internalNode)) internalNode)]
| otherwise = reverse . take n . flip map (enumFrom 0 :: [Int]) $ \v ->
CBlockStmt (CExpr (Just (CAssign CAssignOp (CIndex (CMember (CVar (internalIdent "d_out") internalNode) (internalIdent ('a':show v)) False internalNode) (CVar (internalIdent "idx") internalNode) internalNode) (CMember (CVar (internalIdent "val") internalNode) (internalIdent ('a':show v)) False internalNode) internalNode)) internalNode)
mkTuplePartition :: String -> [CType] -> Bool -> CExtDecl
mkTuplePartition tyName ty isVolatile =
CFDefExt
(CFunDef
[CStorageSpec (CStatic internalNode),CTypeQual (CInlineQual internalNode),CTypeQual (CAttrQual (CAttr (internalIdent "device") [] internalNode)),CTypeSpec (CTypeDef (internalIdent tyName) internalNode)]
(CDeclr (Just (internalIdent "partition")) [CFunDeclr (Right ([CDecl [CTypeQual (CConstQual internalNode),CTypeSpec (CVoidType internalNode)] [(Just (CDeclr (Just (internalIdent "s_data")) [CPtrDeclr [] internalNode] Nothing [] internalNode),Nothing,Nothing)] internalNode,CDecl [CTypeQual (CConstQual internalNode),CTypeSpec (CIntType internalNode)] [(Just (CDeclr (Just (internalIdent "n")) [] Nothing [] internalNode),Nothing,Nothing)] internalNode],False)) [] internalNode] Nothing [] internalNode)
[]
(CCompound [] (stmts ++ [CBlockDecl (CDecl [CTypeSpec (CTypeDef (internalIdent tyName) internalNode)] [(Just (CDeclr (Just (internalIdent "r")) [] Nothing [] internalNode),Just initp,Nothing)] internalNode) ,CBlockStmt (CReturn (Just (CVar (internalIdent "r") internalNode)) internalNode)]) internalNode)
internalNode)
where
n = length ty
var s = CVar (internalIdent s) internalNode
names = map (('p':) . show) [n-1,n-2..0]
initp = mkInitList (map var names)
volat = [CTypeQual (CVolatQual internalNode) | isVolatile]
stmts = zipWith (\l r -> CBlockDecl (CDecl (volat ++ map CTypeSpec l) r internalNode)) ty
. zipWith3 (\p t s -> [(Just (CDeclr (Just (internalIdent p)) [CPtrDeclr [] internalNode] Nothing [] internalNode),Just (CInitExpr (CCast (CDecl (map CTypeSpec t) [(Just (CDeclr Nothing [CPtrDeclr [] internalNode] Nothing [] internalNode),Nothing,Nothing)] internalNode) s internalNode) internalNode),Nothing)]) names ty
$ var "s_data" : map (\v -> CUnary CAdrOp (CIndex (var v) (CVar (internalIdent "n") internalNode) internalNode) internalNode) names
| wilbowma/accelerate | Data/Array/Accelerate/CUDA/CodeGen/Tuple.hs | bsd-3-clause | 7,925 | 0 | 29 | 1,365 | 2,802 | 1,449 | 1,353 | 69 | 1 |
{-# LANGUAGE CPP #-}
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Client.Freeze
-- Copyright : (c) David Himmelstrup 2005
-- Duncan Coutts 2011
-- License : BSD-like
--
-- Maintainer : cabal-devel@gmail.com
-- Stability : provisional
-- Portability : portable
--
-- The cabal freeze command
-----------------------------------------------------------------------------
module Distribution.Client.Freeze (
freeze, getFreezePkgs
) where
import Distribution.Client.Config ( SavedConfig(..) )
import Distribution.Client.Types
import Distribution.Client.Targets
import Distribution.Client.Dependency
import Distribution.Client.Dependency.Types
( ConstraintSource(..), LabeledPackageConstraint(..) )
import Distribution.Client.IndexUtils as IndexUtils
( getSourcePackages, getInstalledPackages )
import Distribution.Client.InstallPlan
( InstallPlan, PlanPackage )
import qualified Distribution.Client.InstallPlan as InstallPlan
import Distribution.Client.PkgConfigDb
( PkgConfigDb, readPkgConfigDb )
import Distribution.Client.Setup
( GlobalFlags(..), FreezeFlags(..), ConfigExFlags(..)
, RepoContext(..) )
import Distribution.Client.Sandbox.PackageEnvironment
( loadUserConfig, pkgEnvSavedConfig, showPackageEnvironment,
userPackageEnvironmentFile )
import Distribution.Client.Sandbox.Types
( SandboxPackageInfo(..) )
import Distribution.Package
( Package, packageId, packageName, packageVersion )
import Distribution.Simple.Compiler
( Compiler, compilerInfo, PackageDBStack )
import Distribution.Simple.PackageIndex (InstalledPackageIndex)
import Distribution.Simple.Program
( ProgramConfiguration )
import Distribution.Simple.Setup
( fromFlag, fromFlagOrDefault, flagToMaybe )
import Distribution.Simple.Utils
( die, notice, debug, writeFileAtomic )
import Distribution.System
( Platform )
import Distribution.Text
( display )
import Distribution.Verbosity
( Verbosity )
import Control.Monad
( when )
import qualified Data.ByteString.Lazy.Char8 as BS.Char8
#if !MIN_VERSION_base(4,8,0)
import Data.Monoid
( mempty )
#endif
import Data.Version
( showVersion )
import Distribution.Version
( thisVersion )
-- ------------------------------------------------------------
-- * The freeze command
-- ------------------------------------------------------------
-- | Freeze all of the dependencies by writing a constraints section
-- constraining each dependency to an exact version.
--
freeze :: Verbosity
-> PackageDBStack
-> RepoContext
-> Compiler
-> Platform
-> ProgramConfiguration
-> Maybe SandboxPackageInfo
-> GlobalFlags
-> FreezeFlags
-> IO ()
freeze verbosity packageDBs repoCtxt comp platform conf mSandboxPkgInfo
globalFlags freezeFlags = do
pkgs <- getFreezePkgs
verbosity packageDBs repoCtxt comp platform conf mSandboxPkgInfo
globalFlags freezeFlags
if null pkgs
then notice verbosity $ "No packages to be frozen. "
++ "As this package has no dependencies."
else if dryRun
then notice verbosity $ unlines $
"The following packages would be frozen:"
: formatPkgs pkgs
else freezePackages verbosity globalFlags pkgs
where
dryRun = fromFlag (freezeDryRun freezeFlags)
-- | Get the list of packages whose versions would be frozen by the @freeze@
-- command.
getFreezePkgs :: Verbosity
-> PackageDBStack
-> RepoContext
-> Compiler
-> Platform
-> ProgramConfiguration
-> Maybe SandboxPackageInfo
-> GlobalFlags
-> FreezeFlags
-> IO [PlanPackage]
getFreezePkgs verbosity packageDBs repoCtxt comp platform conf mSandboxPkgInfo
globalFlags freezeFlags = do
installedPkgIndex <- getInstalledPackages verbosity comp packageDBs conf
sourcePkgDb <- getSourcePackages verbosity repoCtxt
pkgConfigDb <- readPkgConfigDb verbosity conf
pkgSpecifiers <- resolveUserTargets verbosity repoCtxt
(fromFlag $ globalWorldFile globalFlags)
(packageIndex sourcePkgDb)
[UserTargetLocalDir "."]
sanityCheck pkgSpecifiers
planPackages
verbosity comp platform mSandboxPkgInfo freezeFlags
installedPkgIndex sourcePkgDb pkgConfigDb pkgSpecifiers
where
sanityCheck pkgSpecifiers = do
when (not . null $ [n | n@(NamedPackage _ _) <- pkgSpecifiers]) $
die $ "internal error: 'resolveUserTargets' returned "
++ "unexpected named package specifiers!"
when (length pkgSpecifiers /= 1) $
die $ "internal error: 'resolveUserTargets' returned "
++ "unexpected source package specifiers!"
planPackages :: Verbosity
-> Compiler
-> Platform
-> Maybe SandboxPackageInfo
-> FreezeFlags
-> InstalledPackageIndex
-> SourcePackageDb
-> PkgConfigDb
-> [PackageSpecifier SourcePackage]
-> IO [PlanPackage]
planPackages verbosity comp platform mSandboxPkgInfo freezeFlags
installedPkgIndex sourcePkgDb pkgConfigDb pkgSpecifiers = do
solver <- chooseSolver verbosity
(fromFlag (freezeSolver freezeFlags)) (compilerInfo comp)
notice verbosity "Resolving dependencies..."
installPlan <- foldProgress logMsg die return $
resolveDependencies
platform (compilerInfo comp) pkgConfigDb
solver
resolverParams
return $ pruneInstallPlan installPlan pkgSpecifiers
where
resolverParams =
setMaxBackjumps (if maxBackjumps < 0 then Nothing
else Just maxBackjumps)
. setIndependentGoals independentGoals
. setReorderGoals reorderGoals
. setShadowPkgs shadowPkgs
. setStrongFlags strongFlags
. addConstraints
[ let pkg = pkgSpecifierTarget pkgSpecifier
pc = PackageConstraintStanzas pkg stanzas
in LabeledPackageConstraint pc ConstraintSourceFreeze
| pkgSpecifier <- pkgSpecifiers ]
. maybe id applySandboxInstallPolicy mSandboxPkgInfo
$ standardInstallPolicy installedPkgIndex sourcePkgDb pkgSpecifiers
logMsg message rest = debug verbosity message >> rest
stanzas = [ TestStanzas | testsEnabled ]
++ [ BenchStanzas | benchmarksEnabled ]
testsEnabled = fromFlagOrDefault False $ freezeTests freezeFlags
benchmarksEnabled = fromFlagOrDefault False $ freezeBenchmarks freezeFlags
reorderGoals = fromFlag (freezeReorderGoals freezeFlags)
independentGoals = fromFlag (freezeIndependentGoals freezeFlags)
shadowPkgs = fromFlag (freezeShadowPkgs freezeFlags)
strongFlags = fromFlag (freezeStrongFlags freezeFlags)
maxBackjumps = fromFlag (freezeMaxBackjumps freezeFlags)
-- | Remove all unneeded packages from an install plan.
--
-- A package is unneeded if it is either
--
-- 1) the package that we are freezing, or
--
-- 2) not a dependency (directly or transitively) of the package we are
-- freezing. This is useful for removing previously installed packages
-- which are no longer required from the install plan.
pruneInstallPlan :: InstallPlan
-> [PackageSpecifier SourcePackage]
-> [PlanPackage]
pruneInstallPlan installPlan pkgSpecifiers =
removeSelf pkgIds $
InstallPlan.dependencyClosure installPlan (map fakeUnitId pkgIds)
where
pkgIds = [ packageId pkg
| SpecificSourcePackage pkg <- pkgSpecifiers ]
removeSelf [thisPkg] = filter (\pp -> packageId pp /= packageId thisPkg)
removeSelf _ = error $ "internal error: 'pruneInstallPlan' given "
++ "unexpected package specifiers!"
freezePackages :: Package pkg => Verbosity -> GlobalFlags -> [pkg] -> IO ()
freezePackages verbosity globalFlags pkgs = do
pkgEnv <- fmap (createPkgEnv . addFrozenConstraints) $
loadUserConfig verbosity "" (flagToMaybe . globalConstraintsFile $ globalFlags)
writeFileAtomic userPackageEnvironmentFile $ showPkgEnv pkgEnv
where
addFrozenConstraints config =
config {
savedConfigureExFlags = (savedConfigureExFlags config) {
configExConstraints = map constraint pkgs
}
}
constraint pkg =
(pkgIdToConstraint $ packageId pkg, ConstraintSourceUserConfig userPackageEnvironmentFile)
where
pkgIdToConstraint pkgId =
UserConstraintVersion (packageName pkgId)
(thisVersion $ packageVersion pkgId)
createPkgEnv config = mempty { pkgEnvSavedConfig = config }
showPkgEnv = BS.Char8.pack . showPackageEnvironment
formatPkgs :: Package pkg => [pkg] -> [String]
formatPkgs = map $ showPkg . packageId
where
showPkg pid = name pid ++ " == " ++ version pid
name = display . packageName
version = showVersion . packageVersion
| tolysz/prepare-ghcjs | spec-lts8/cabal/cabal-install/Distribution/Client/Freeze.hs | bsd-3-clause | 9,462 | 0 | 20 | 2,472 | 1,704 | 913 | 791 | 181 | 3 |
import System.Plugins
import API
import System.Directory
main = do
#if __GLASGOW_HASKELL__ >= 604
tmpDir <- getTemporaryDirectory
#else
let tmpDir = "/tmp"
#endif
status <- make "../Plugin.hs" [ "-i../api", "-odir", tmpDir ]
o <- case status of
MakeSuccess _ o -> return o
MakeFailure e -> mapM_ putStrLn e >> error "didn't compile"
m_v <- load o ["../api"] [] "resource"
v <- case m_v of
LoadSuccess _ v -> return v
_ -> error "load failed"
putStrLn $ field v
mapM_ removeFile [(tmpDir ++ "/Plugin.hi"), (tmpDir ++ "/Plugin.o") ]
| abuiles/turbinado-blog | tmp/dependencies/hs-plugins-1.3.1/testsuite/make/odir/prog/Main.hs | bsd-3-clause | 672 | 1 | 12 | 229 | 192 | 92 | 100 | 15 | 3 |
-- |
-- TH.Lib contains lots of useful helper functions for
-- generating and manipulating Template Haskell terms
{-# LANGUAGE CPP #-}
module Language.Eta.Meta.Lib where
-- All of the exports from this module should
-- be "public" functions. The main module TH
-- re-exports them all.
import Language.Eta.Meta.Syntax hiding (Role, InjectivityAnn)
import qualified Language.Eta.Meta.Syntax as TH
import Control.Monad( liftM, liftM2 )
import Data.Word( Word8 )
----------------------------------------------------------
-- * Type synonyms
----------------------------------------------------------
type InfoQ = Q Info
type PatQ = Q Pat
type FieldPatQ = Q FieldPat
type ExpQ = Q Exp
type TExpQ a = Q (TExp a)
type DecQ = Q Dec
type DecsQ = Q [Dec]
type ConQ = Q Con
type TypeQ = Q Type
type TyLitQ = Q TyLit
type CxtQ = Q Cxt
type PredQ = Q Pred
type MatchQ = Q Match
type ClauseQ = Q Clause
type BodyQ = Q Body
type GuardQ = Q Guard
type StmtQ = Q Stmt
type RangeQ = Q Range
type SourceStrictnessQ = Q SourceStrictness
type SourceUnpackednessQ = Q SourceUnpackedness
type BangQ = Q Bang
type BangTypeQ = Q BangType
type VarBangTypeQ = Q VarBangType
type StrictTypeQ = Q StrictType
type VarStrictTypeQ = Q VarStrictType
type FieldExpQ = Q FieldExp
type RuleBndrQ = Q RuleBndr
type TySynEqnQ = Q TySynEqn
-- must be defined here for DsMeta to find it
type Role = TH.Role
type InjectivityAnn = TH.InjectivityAnn
----------------------------------------------------------
-- * Lowercase pattern syntax functions
----------------------------------------------------------
intPrimL :: Integer -> Lit
intPrimL = IntPrimL
wordPrimL :: Integer -> Lit
wordPrimL = WordPrimL
floatPrimL :: Rational -> Lit
floatPrimL = FloatPrimL
doublePrimL :: Rational -> Lit
doublePrimL = DoublePrimL
integerL :: Integer -> Lit
integerL = IntegerL
charL :: Char -> Lit
charL = CharL
charPrimL :: Char -> Lit
charPrimL = CharPrimL
stringL :: String -> Lit
stringL = StringL
stringPrimL :: [Word8] -> Lit
stringPrimL = StringPrimL
rationalL :: Rational -> Lit
rationalL = RationalL
litP :: Lit -> PatQ
litP l = return (LitP l)
varP :: Name -> PatQ
varP v = return (VarP v)
tupP :: [PatQ] -> PatQ
tupP ps = do { ps1 <- sequence ps; return (TupP ps1)}
unboxedTupP :: [PatQ] -> PatQ
unboxedTupP ps = do { ps1 <- sequence ps; return (UnboxedTupP ps1)}
conP :: Name -> [PatQ] -> PatQ
conP n ps = do ps' <- sequence ps
return (ConP n ps')
infixP :: PatQ -> Name -> PatQ -> PatQ
infixP p1 n p2 = do p1' <- p1
p2' <- p2
return (InfixP p1' n p2')
uInfixP :: PatQ -> Name -> PatQ -> PatQ
uInfixP p1 n p2 = do p1' <- p1
p2' <- p2
return (UInfixP p1' n p2')
parensP :: PatQ -> PatQ
parensP p = do p' <- p
return (ParensP p')
tildeP :: PatQ -> PatQ
tildeP p = do p' <- p
return (TildeP p')
bangP :: PatQ -> PatQ
bangP p = do p' <- p
return (BangP p')
asP :: Name -> PatQ -> PatQ
asP n p = do p' <- p
return (AsP n p')
wildP :: PatQ
wildP = return WildP
recP :: Name -> [FieldPatQ] -> PatQ
recP n fps = do fps' <- sequence fps
return (RecP n fps')
listP :: [PatQ] -> PatQ
listP ps = do ps' <- sequence ps
return (ListP ps')
sigP :: PatQ -> TypeQ -> PatQ
sigP p t = do p' <- p
t' <- t
return (SigP p' t')
viewP :: ExpQ -> PatQ -> PatQ
viewP e p = do e' <- e
p' <- p
return (ViewP e' p')
fieldPat :: Name -> PatQ -> FieldPatQ
fieldPat n p = do p' <- p
return (n, p')
-------------------------------------------------------------------------------
-- * Stmt
bindS :: PatQ -> ExpQ -> StmtQ
bindS p e = liftM2 BindS p e
letS :: [DecQ] -> StmtQ
letS ds = do { ds1 <- sequence ds; return (LetS ds1) }
noBindS :: ExpQ -> StmtQ
noBindS e = do { e1 <- e; return (NoBindS e1) }
parS :: [[StmtQ]] -> StmtQ
parS sss = do { sss1 <- mapM sequence sss; return (ParS sss1) }
-------------------------------------------------------------------------------
-- * Range
fromR :: ExpQ -> RangeQ
fromR x = do { a <- x; return (FromR a) }
fromThenR :: ExpQ -> ExpQ -> RangeQ
fromThenR x y = do { a <- x; b <- y; return (FromThenR a b) }
fromToR :: ExpQ -> ExpQ -> RangeQ
fromToR x y = do { a <- x; b <- y; return (FromToR a b) }
fromThenToR :: ExpQ -> ExpQ -> ExpQ -> RangeQ
fromThenToR x y z = do { a <- x; b <- y; c <- z;
return (FromThenToR a b c) }
-------------------------------------------------------------------------------
-- * Body
normalB :: ExpQ -> BodyQ
normalB e = do { e1 <- e; return (NormalB e1) }
guardedB :: [Q (Guard,Exp)] -> BodyQ
guardedB ges = do { ges' <- sequence ges; return (GuardedB ges') }
-------------------------------------------------------------------------------
-- * Guard
normalG :: ExpQ -> GuardQ
normalG e = do { e1 <- e; return (NormalG e1) }
normalGE :: ExpQ -> ExpQ -> Q (Guard, Exp)
normalGE g e = do { g1 <- g; e1 <- e; return (NormalG g1, e1) }
patG :: [StmtQ] -> GuardQ
patG ss = do { ss' <- sequence ss; return (PatG ss') }
patGE :: [StmtQ] -> ExpQ -> Q (Guard, Exp)
patGE ss e = do { ss' <- sequence ss;
e' <- e;
return (PatG ss', e') }
-------------------------------------------------------------------------------
-- * Match and Clause
-- | Use with 'caseE'
match :: PatQ -> BodyQ -> [DecQ] -> MatchQ
match p rhs ds = do { p' <- p;
r' <- rhs;
ds' <- sequence ds;
return (Match p' r' ds') }
-- | Use with 'funD'
clause :: [PatQ] -> BodyQ -> [DecQ] -> ClauseQ
clause ps r ds = do { ps' <- sequence ps;
r' <- r;
ds' <- sequence ds;
return (Clause ps' r' ds') }
---------------------------------------------------------------------------
-- * Exp
-- | Dynamically binding a variable (unhygenic)
dyn :: String -> ExpQ
dyn s = return (VarE (mkName s))
varE :: Name -> ExpQ
varE s = return (VarE s)
conE :: Name -> ExpQ
conE s = return (ConE s)
litE :: Lit -> ExpQ
litE c = return (LitE c)
appE :: ExpQ -> ExpQ -> ExpQ
appE x y = do { a <- x; b <- y; return (AppE a b)}
parensE :: ExpQ -> ExpQ
parensE x = do { x' <- x; return (ParensE x') }
uInfixE :: ExpQ -> ExpQ -> ExpQ -> ExpQ
uInfixE x s y = do { x' <- x; s' <- s; y' <- y;
return (UInfixE x' s' y') }
infixE :: Maybe ExpQ -> ExpQ -> Maybe ExpQ -> ExpQ
infixE (Just x) s (Just y) = do { a <- x; s' <- s; b <- y;
return (InfixE (Just a) s' (Just b))}
infixE Nothing s (Just y) = do { s' <- s; b <- y;
return (InfixE Nothing s' (Just b))}
infixE (Just x) s Nothing = do { a <- x; s' <- s;
return (InfixE (Just a) s' Nothing)}
infixE Nothing s Nothing = do { s' <- s; return (InfixE Nothing s' Nothing) }
infixApp :: ExpQ -> ExpQ -> ExpQ -> ExpQ
infixApp x y z = infixE (Just x) y (Just z)
sectionL :: ExpQ -> ExpQ -> ExpQ
sectionL x y = infixE (Just x) y Nothing
sectionR :: ExpQ -> ExpQ -> ExpQ
sectionR x y = infixE Nothing x (Just y)
lamE :: [PatQ] -> ExpQ -> ExpQ
lamE ps e = do ps' <- sequence ps
e' <- e
return (LamE ps' e')
-- | Single-arg lambda
lam1E :: PatQ -> ExpQ -> ExpQ
lam1E p e = lamE [p] e
lamCaseE :: [MatchQ] -> ExpQ
lamCaseE ms = sequence ms >>= return . LamCaseE
tupE :: [ExpQ] -> ExpQ
tupE es = do { es1 <- sequence es; return (TupE es1)}
unboxedTupE :: [ExpQ] -> ExpQ
unboxedTupE es = do { es1 <- sequence es; return (UnboxedTupE es1)}
condE :: ExpQ -> ExpQ -> ExpQ -> ExpQ
condE x y z = do { a <- x; b <- y; c <- z; return (CondE a b c)}
multiIfE :: [Q (Guard, Exp)] -> ExpQ
multiIfE alts = sequence alts >>= return . MultiIfE
letE :: [DecQ] -> ExpQ -> ExpQ
letE ds e = do { ds2 <- sequence ds; e2 <- e; return (LetE ds2 e2) }
caseE :: ExpQ -> [MatchQ] -> ExpQ
caseE e ms = do { e1 <- e; ms1 <- sequence ms; return (CaseE e1 ms1) }
doE :: [StmtQ] -> ExpQ
doE ss = do { ss1 <- sequence ss; return (DoE ss1) }
compE :: [StmtQ] -> ExpQ
compE ss = do { ss1 <- sequence ss; return (CompE ss1) }
arithSeqE :: RangeQ -> ExpQ
arithSeqE r = do { r' <- r; return (ArithSeqE r') }
listE :: [ExpQ] -> ExpQ
listE es = do { es1 <- sequence es; return (ListE es1) }
sigE :: ExpQ -> TypeQ -> ExpQ
sigE e t = do { e1 <- e; t1 <- t; return (SigE e1 t1) }
recConE :: Name -> [Q (Name,Exp)] -> ExpQ
recConE c fs = do { flds <- sequence fs; return (RecConE c flds) }
recUpdE :: ExpQ -> [Q (Name,Exp)] -> ExpQ
recUpdE e fs = do { e1 <- e; flds <- sequence fs; return (RecUpdE e1 flds) }
stringE :: String -> ExpQ
stringE = litE . stringL
fieldExp :: Name -> ExpQ -> Q (Name, Exp)
fieldExp s e = do { e' <- e; return (s,e') }
-- | @staticE x = [| static x |]@
staticE :: ExpQ -> ExpQ
staticE = fmap StaticE
unboundVarE :: Name -> ExpQ
unboundVarE s = return (UnboundVarE s)
-- ** 'arithSeqE' Shortcuts
fromE :: ExpQ -> ExpQ
fromE x = do { a <- x; return (ArithSeqE (FromR a)) }
fromThenE :: ExpQ -> ExpQ -> ExpQ
fromThenE x y = do { a <- x; b <- y; return (ArithSeqE (FromThenR a b)) }
fromToE :: ExpQ -> ExpQ -> ExpQ
fromToE x y = do { a <- x; b <- y; return (ArithSeqE (FromToR a b)) }
fromThenToE :: ExpQ -> ExpQ -> ExpQ -> ExpQ
fromThenToE x y z = do { a <- x; b <- y; c <- z;
return (ArithSeqE (FromThenToR a b c)) }
-------------------------------------------------------------------------------
-- * Dec
valD :: PatQ -> BodyQ -> [DecQ] -> DecQ
valD p b ds =
do { p' <- p
; ds' <- sequence ds
; b' <- b
; return (ValD p' b' ds')
}
funD :: Name -> [ClauseQ] -> DecQ
funD nm cs =
do { cs1 <- sequence cs
; return (FunD nm cs1)
}
tySynD :: Name -> [TyVarBndr] -> TypeQ -> DecQ
tySynD tc tvs rhs = do { rhs1 <- rhs; return (TySynD tc tvs rhs1) }
dataD :: CxtQ -> Name -> [TyVarBndr] -> Maybe Kind -> [ConQ] -> CxtQ -> DecQ
dataD ctxt tc tvs ksig cons derivs =
do
ctxt1 <- ctxt
cons1 <- sequence cons
derivs1 <- derivs
return (DataD ctxt1 tc tvs ksig cons1 derivs1)
newtypeD :: CxtQ -> Name -> [TyVarBndr] -> Maybe Kind -> ConQ -> CxtQ -> DecQ
newtypeD ctxt tc tvs ksig con derivs =
do
ctxt1 <- ctxt
con1 <- con
derivs1 <- derivs
return (NewtypeD ctxt1 tc tvs ksig con1 derivs1)
classD :: CxtQ -> Name -> [TyVarBndr] -> [FunDep] -> [DecQ] -> DecQ
classD ctxt cls tvs fds decs =
do
decs1 <- sequence decs
ctxt1 <- ctxt
return $ ClassD ctxt1 cls tvs fds decs1
instanceD :: CxtQ -> TypeQ -> [DecQ] -> DecQ
instanceD = instanceWithOverlapD Nothing
instanceWithOverlapD :: Maybe Overlap -> CxtQ -> TypeQ -> [DecQ] -> DecQ
instanceWithOverlapD o ctxt ty decs =
do
ctxt1 <- ctxt
decs1 <- sequence decs
ty1 <- ty
return $ InstanceD o ctxt1 ty1 decs1
sigD :: Name -> TypeQ -> DecQ
sigD fun ty = liftM (SigD fun) $ ty
forImpD :: Callconv -> Safety -> String -> Name -> TypeQ -> DecQ
forImpD cc s str n ty
= do ty' <- ty
return $ ForeignD (ImportF cc s str n ty')
infixLD :: Int -> Name -> DecQ
infixLD prec nm = return (InfixD (Fixity prec InfixL) nm)
infixRD :: Int -> Name -> DecQ
infixRD prec nm = return (InfixD (Fixity prec InfixR) nm)
infixND :: Int -> Name -> DecQ
infixND prec nm = return (InfixD (Fixity prec InfixN) nm)
pragInlD :: Name -> Inline -> RuleMatch -> Phases -> DecQ
pragInlD name inline rm phases
= return $ PragmaD $ InlineP name inline rm phases
pragSpecD :: Name -> TypeQ -> Phases -> DecQ
pragSpecD n ty phases
= do
ty1 <- ty
return $ PragmaD $ SpecialiseP n ty1 Nothing phases
pragSpecInlD :: Name -> TypeQ -> Inline -> Phases -> DecQ
pragSpecInlD n ty inline phases
= do
ty1 <- ty
return $ PragmaD $ SpecialiseP n ty1 (Just inline) phases
pragSpecInstD :: TypeQ -> DecQ
pragSpecInstD ty
= do
ty1 <- ty
return $ PragmaD $ SpecialiseInstP ty1
pragRuleD :: String -> [RuleBndrQ] -> ExpQ -> ExpQ -> Phases -> DecQ
pragRuleD n bndrs lhs rhs phases
= do
bndrs1 <- sequence bndrs
lhs1 <- lhs
rhs1 <- rhs
return $ PragmaD $ RuleP n bndrs1 lhs1 rhs1 phases
pragAnnD :: AnnTarget -> ExpQ -> DecQ
pragAnnD target expr
= do
exp1 <- expr
return $ PragmaD $ AnnP target exp1
pragLineD :: Int -> String -> DecQ
pragLineD line file = return $ PragmaD $ LineP line file
dataInstD :: CxtQ -> Name -> [TypeQ] -> Maybe Kind -> [ConQ] -> CxtQ -> DecQ
dataInstD ctxt tc tys ksig cons derivs =
do
ctxt1 <- ctxt
tys1 <- sequence tys
cons1 <- sequence cons
derivs1 <- derivs
return (DataInstD ctxt1 tc tys1 ksig cons1 derivs1)
newtypeInstD :: CxtQ -> Name -> [TypeQ] -> Maybe Kind -> ConQ -> CxtQ -> DecQ
newtypeInstD ctxt tc tys ksig con derivs =
do
ctxt1 <- ctxt
tys1 <- sequence tys
con1 <- con
derivs1 <- derivs
return (NewtypeInstD ctxt1 tc tys1 ksig con1 derivs1)
tySynInstD :: Name -> TySynEqnQ -> DecQ
tySynInstD tc eqn =
do
eqn1 <- eqn
return (TySynInstD tc eqn1)
dataFamilyD :: Name -> [TyVarBndr] -> Maybe Kind -> DecQ
dataFamilyD tc tvs kind
= return $ DataFamilyD tc tvs kind
openTypeFamilyD :: Name -> [TyVarBndr] -> FamilyResultSig
-> Maybe InjectivityAnn -> DecQ
openTypeFamilyD tc tvs res inj
= return $ OpenTypeFamilyD (TypeFamilyHead tc tvs res inj)
closedTypeFamilyD :: Name -> [TyVarBndr] -> FamilyResultSig
-> Maybe InjectivityAnn -> [TySynEqnQ] -> DecQ
closedTypeFamilyD tc tvs result injectivity eqns =
do eqns1 <- sequence eqns
return (ClosedTypeFamilyD (TypeFamilyHead tc tvs result injectivity) eqns1)
-- These were deprecated in GHC 8.0 with a plan to remove them in 8.2. If you
-- remove this check please also:
-- 1. remove deprecated functions
-- 2. remove CPP language extension from top of this module
-- 3. remove the FamFlavour data type from Syntax module
-- 4. make sure that all references to FamFlavour are gone from DsMeta,
-- Convert, TcSplice (follows from 3)
-- TODO: What do we do with this?
-- #if __GLASGOW_HASKELL__ > 800
-- #error Remove deprecated familyNoKindD, familyKindD, closedTypeFamilyNoKindD and closedTypeFamilyKindD
-- #endif
{-# DEPRECATED familyNoKindD, familyKindD
"This function will be removed in the next stable release. Use openTypeFamilyD/dataFamilyD instead." #-}
familyNoKindD :: FamFlavour -> Name -> [TyVarBndr] -> DecQ
familyNoKindD flav tc tvs =
case flav of
TypeFam -> return $ OpenTypeFamilyD (TypeFamilyHead tc tvs NoSig Nothing)
DataFam -> return $ DataFamilyD tc tvs Nothing
familyKindD :: FamFlavour -> Name -> [TyVarBndr] -> Kind -> DecQ
familyKindD flav tc tvs k =
case flav of
TypeFam ->
return $ OpenTypeFamilyD (TypeFamilyHead tc tvs (KindSig k) Nothing)
DataFam -> return $ DataFamilyD tc tvs (Just k)
{-# DEPRECATED closedTypeFamilyNoKindD, closedTypeFamilyKindD
"This function will be removed in the next stable release. Use closedTypeFamilyD instead." #-}
closedTypeFamilyNoKindD :: Name -> [TyVarBndr] -> [TySynEqnQ] -> DecQ
closedTypeFamilyNoKindD tc tvs eqns =
do eqns1 <- sequence eqns
return (ClosedTypeFamilyD (TypeFamilyHead tc tvs NoSig Nothing) eqns1)
closedTypeFamilyKindD :: Name -> [TyVarBndr] -> Kind -> [TySynEqnQ] -> DecQ
closedTypeFamilyKindD tc tvs kind eqns =
do eqns1 <- sequence eqns
return (ClosedTypeFamilyD (TypeFamilyHead tc tvs (KindSig kind) Nothing)
eqns1)
roleAnnotD :: Name -> [Role] -> DecQ
roleAnnotD name roles = return $ RoleAnnotD name roles
standaloneDerivD :: CxtQ -> TypeQ -> DecQ
standaloneDerivD ctxtq tyq =
do
ctxt <- ctxtq
ty <- tyq
return $ StandaloneDerivD ctxt ty
defaultSigD :: Name -> TypeQ -> DecQ
defaultSigD n tyq =
do
ty <- tyq
return $ DefaultSigD n ty
tySynEqn :: [TypeQ] -> TypeQ -> TySynEqnQ
tySynEqn lhs rhs =
do
lhs1 <- sequence lhs
rhs1 <- rhs
return (TySynEqn lhs1 rhs1)
cxt :: [PredQ] -> CxtQ
cxt = sequence
normalC :: Name -> [BangTypeQ] -> ConQ
normalC con strtys = liftM (NormalC con) $ sequence strtys
recC :: Name -> [VarBangTypeQ] -> ConQ
recC con varstrtys = liftM (RecC con) $ sequence varstrtys
infixC :: Q (Bang, Type) -> Name -> Q (Bang, Type) -> ConQ
infixC st1 con st2 = do st1' <- st1
st2' <- st2
return $ InfixC st1' con st2'
forallC :: [TyVarBndr] -> CxtQ -> ConQ -> ConQ
forallC ns ctxt con = liftM2 (ForallC ns) ctxt con
gadtC :: [Name] -> [StrictTypeQ] -> TypeQ -> ConQ
gadtC cons strtys ty = liftM2 (GadtC cons) (sequence strtys) ty
recGadtC :: [Name] -> [VarStrictTypeQ] -> TypeQ -> ConQ
recGadtC cons varstrtys ty = liftM2 (RecGadtC cons) (sequence varstrtys) ty
-------------------------------------------------------------------------------
-- * Type
forallT :: [TyVarBndr] -> CxtQ -> TypeQ -> TypeQ
forallT tvars ctxt ty = do
ctxt1 <- ctxt
ty1 <- ty
return $ ForallT tvars ctxt1 ty1
varT :: Name -> TypeQ
varT = return . VarT
conT :: Name -> TypeQ
conT = return . ConT
infixT :: TypeQ -> Name -> TypeQ -> TypeQ
infixT t1 n t2 = do t1' <- t1
t2' <- t2
return (InfixT t1' n t2')
uInfixT :: TypeQ -> Name -> TypeQ -> TypeQ
uInfixT t1 n t2 = do t1' <- t1
t2' <- t2
return (UInfixT t1' n t2')
parensT :: TypeQ -> TypeQ
parensT t = do t' <- t
return (ParensT t')
appT :: TypeQ -> TypeQ -> TypeQ
appT t1 t2 = do
t1' <- t1
t2' <- t2
return $ AppT t1' t2'
arrowT :: TypeQ
arrowT = return ArrowT
listT :: TypeQ
listT = return ListT
litT :: TyLitQ -> TypeQ
litT l = fmap LitT l
tupleT :: Int -> TypeQ
tupleT i = return (TupleT i)
unboxedTupleT :: Int -> TypeQ
unboxedTupleT i = return (UnboxedTupleT i)
sigT :: TypeQ -> Kind -> TypeQ
sigT t k
= do
t' <- t
return $ SigT t' k
equalityT :: TypeQ
equalityT = return EqualityT
wildCardT :: TypeQ
wildCardT = return WildCardT
{-# DEPRECATED classP "As of template-haskell-2.10, constraint predicates (Pred) are just types (Type), in keeping with ConstraintKinds. Please use 'conT' and 'appT'." #-}
classP :: Name -> [Q Type] -> Q Pred
classP cla tys
= do
tysl <- sequence tys
return (foldl AppT (ConT cla) tysl)
{-# DEPRECATED equalP "As of template-haskell-2.10, constraint predicates (Pred) are just types (Type), in keeping with ConstraintKinds. Please see 'equalityT'." #-}
equalP :: TypeQ -> TypeQ -> PredQ
equalP tleft tright
= do
tleft1 <- tleft
tright1 <- tright
eqT <- equalityT
return (foldl AppT eqT [tleft1, tright1])
promotedT :: Name -> TypeQ
promotedT = return . PromotedT
promotedTupleT :: Int -> TypeQ
promotedTupleT i = return (PromotedTupleT i)
promotedNilT :: TypeQ
promotedNilT = return PromotedNilT
promotedConsT :: TypeQ
promotedConsT = return PromotedConsT
noSourceUnpackedness, sourceNoUnpack, sourceUnpack :: SourceUnpackednessQ
noSourceUnpackedness = return NoSourceUnpackedness
sourceNoUnpack = return SourceNoUnpack
sourceUnpack = return SourceUnpack
noSourceStrictness, sourceLazy, sourceStrict :: SourceStrictnessQ
noSourceStrictness = return NoSourceStrictness
sourceLazy = return SourceLazy
sourceStrict = return SourceStrict
{-# DEPRECATED isStrict
["Use 'bang'. See https://ghc.haskell.org/trac/ghc/wiki/Migration/8.0. ",
"Example usage: 'bang noSourceUnpackedness sourceStrict'"] #-}
{-# DEPRECATED notStrict
["Use 'bang'. See https://ghc.haskell.org/trac/ghc/wiki/Migration/8.0. ",
"Example usage: 'bang noSourceUnpackedness noSourceStrictness'"] #-}
{-# DEPRECATED unpacked
["Use 'bang'. See https://ghc.haskell.org/trac/ghc/wiki/Migration/8.0. ",
"Example usage: 'bang sourceUnpack sourceStrict'"] #-}
isStrict, notStrict, unpacked :: Q Strict
isStrict = bang noSourceUnpackedness sourceStrict
notStrict = bang noSourceUnpackedness noSourceStrictness
unpacked = bang sourceUnpack sourceStrict
bang :: SourceUnpackednessQ -> SourceStrictnessQ -> BangQ
bang u s = do u' <- u
s' <- s
return (Bang u' s')
bangType :: BangQ -> TypeQ -> BangTypeQ
bangType = liftM2 (,)
varBangType :: Name -> BangTypeQ -> VarBangTypeQ
varBangType v bt = do (b, t) <- bt
return (v, b, t)
{-# DEPRECATED strictType
"As of @template-haskell-2.11.0.0@, 'StrictType' has been replaced by 'BangType'. Please use 'bangType' instead." #-}
strictType :: Q Strict -> TypeQ -> StrictTypeQ
strictType = bangType
{-# DEPRECATED varStrictType
"As of @template-haskell-2.11.0.0@, 'VarStrictType' has been replaced by 'VarBangType'. Please use 'varBangType' instead." #-}
varStrictType :: Name -> StrictTypeQ -> VarStrictTypeQ
varStrictType = varBangType
-- * Type Literals
numTyLit :: Integer -> TyLitQ
numTyLit n = if n >= 0 then return (NumTyLit n)
else fail ("Negative type-level number: " ++ show n)
strTyLit :: String -> TyLitQ
strTyLit s = return (StrTyLit s)
-------------------------------------------------------------------------------
-- * Kind
plainTV :: Name -> TyVarBndr
plainTV = PlainTV
kindedTV :: Name -> Kind -> TyVarBndr
kindedTV = KindedTV
varK :: Name -> Kind
varK = VarT
conK :: Name -> Kind
conK = ConT
tupleK :: Int -> Kind
tupleK = TupleT
arrowK :: Kind
arrowK = ArrowT
listK :: Kind
listK = ListT
appK :: Kind -> Kind -> Kind
appK = AppT
starK :: Kind
starK = StarT
constraintK :: Kind
constraintK = ConstraintT
-------------------------------------------------------------------------------
-- * Type family result
noSig :: FamilyResultSig
noSig = NoSig
kindSig :: Kind -> FamilyResultSig
kindSig = KindSig
tyVarSig :: TyVarBndr -> FamilyResultSig
tyVarSig = TyVarSig
-------------------------------------------------------------------------------
-- * Injectivity annotation
injectivityAnn :: Name -> [Name] -> InjectivityAnn
injectivityAnn = TH.InjectivityAnn
-------------------------------------------------------------------------------
-- * Role
nominalR, representationalR, phantomR, inferR :: Role
nominalR = NominalR
representationalR = RepresentationalR
phantomR = PhantomR
inferR = InferR
-------------------------------------------------------------------------------
-- * Callconv
cCall, stdCall, cApi, prim, javaScript, java :: Callconv
cCall = CCall
stdCall = StdCall
cApi = CApi
prim = Prim
javaScript = JavaScript
java = Java
-------------------------------------------------------------------------------
-- * Safety
unsafe, safe, interruptible :: Safety
unsafe = Unsafe
safe = Safe
interruptible = Interruptible
-------------------------------------------------------------------------------
-- * FunDep
funDep :: [Name] -> [Name] -> FunDep
funDep = FunDep
-------------------------------------------------------------------------------
-- * FamFlavour
typeFam, dataFam :: FamFlavour
typeFam = TypeFam
dataFam = DataFam
-------------------------------------------------------------------------------
-- * RuleBndr
ruleVar :: Name -> RuleBndrQ
ruleVar = return . RuleVar
typedRuleVar :: Name -> TypeQ -> RuleBndrQ
typedRuleVar n ty = ty >>= return . TypedRuleVar n
-------------------------------------------------------------------------------
-- * AnnTarget
valueAnnotation :: Name -> AnnTarget
valueAnnotation = ValueAnnotation
typeAnnotation :: Name -> AnnTarget
typeAnnotation = TypeAnnotation
moduleAnnotation :: AnnTarget
moduleAnnotation = ModuleAnnotation
--------------------------------------------------------------
-- * Useful helper function
appsE :: [ExpQ] -> ExpQ
appsE [] = error "appsE []"
appsE [x] = x
appsE (x:y:zs) = appsE ( (appE x y) : zs )
-- | Return the Module at the place of splicing. Can be used as an
-- input for 'reifyModule'.
thisModule :: Q Module
thisModule = do
loc <- location
return $ Module (mkPkgName $ loc_package loc) (mkModName $ loc_module loc)
| rahulmutt/ghcvm | libraries/eta-meta/Language/Eta/Meta/Lib.hs | bsd-3-clause | 24,547 | 0 | 13 | 6,098 | 7,999 | 4,118 | 3,881 | -1 | -1 |
{-# Language OverloadedStrings #-}
import Criterion.Main hiding (defaultConfig)
import TeraReg.DataGen
import TeraReg.LinReg
import TeraReg.MatrixMult
import TeraReg.QuadTree
import Numeric.LinearAlgebra.Data (toRows)
main :: IO ()
main = do
let (p, r) = generateData $ defaultConfig 1000000 100
let _ = toRows p
defaultMain [
bgroup "LinReg" [
bench "ols" $ nfIO $ fast_ols p r]
-- bgroup "MatrixMult" [
-- bench "qtStrass" nf qtStrass p r]
]
| pb-pravin/terareg | bench/bench.hs | agpl-3.0 | 521 | 1 | 13 | 142 | 135 | 69 | 66 | 14 | 1 |
{-# LANGUAGE DataKinds, GADTs, KindSignatures, PatternSynonyms, TypeOperators,
ViewPatterns #-}
module BundledPatterns2 (Vec((:>), Empty), RTree(..)) where
import GHC.TypeLits
import BundledPatterns
pattern Empty :: Vec 0 a
pattern Empty <- Nil
| Fuuzetsu/haddock | html-test/src/BundledPatterns2.hs | bsd-2-clause | 261 | 0 | 6 | 45 | 53 | 34 | 19 | 10 | 0 |
-- |
-- Module: FRP.Netwire.Move
-- Copyright: (c) 2013 Ertugrul Soeylemez
-- License: BSD3
-- Maintainer: Ertugrul Soeylemez <es@ertes.de>
module FRP.Netwire.Move
( -- * Calculus
derivative,
integral,
integralWith
)
where
import Control.Wire
-- | Time derivative of the input signal.
--
-- * Depends: now.
--
-- * Inhibits: at singularities.
derivative ::
(RealFloat a, HasTime t s, Monoid e)
=> Wire s e m a a
derivative = mkPure $ \_ x -> (Left mempty, loop x)
where
loop x' =
mkPure $ \ds x ->
let dt = realToFrac (dtime ds)
dx = (x - x') / dt
mdx | isNaN dx = Right 0
| isInfinite dx = Left mempty
| otherwise = Right dx
in mdx `seq` (mdx, loop x)
-- | Integrate the input signal over time.
--
-- * Depends: before now.
integral ::
(Fractional a, HasTime t s)
=> a -- ^ Integration constant (aka start value).
-> Wire s e m a a
integral x' =
mkPure $ \ds dx ->
let dt = realToFrac (dtime ds)
in x' `seq` (Right x', integral (x' + dt*dx))
-- | Integrate the left input signal over time, but apply the given
-- correction function to it. This can be used to implement collision
-- detection/reaction.
--
-- The right signal of type @w@ is the /world value/. It is just passed
-- to the correction function for reference and is not used otherwise.
--
-- The correction function must be idempotent with respect to the world
-- value: @f w (f w x) = f w x@. This is necessary and sufficient to
-- protect time continuity.
--
-- * Depends: before now.
integralWith ::
(Fractional a, HasTime t s)
=> (w -> a -> a) -- ^ Correction function.
-> a -- ^ Integration constant (aka start value).
-> Wire s e m (a, w) a
integralWith correct = loop
where
loop x' =
mkPure $ \ds (dx, w) ->
let dt = realToFrac (dtime ds)
x = correct w (x' + dt*dx)
in x' `seq` (Right x', loop x)
| jship/metronome | local_deps/netwire/FRP/Netwire/Move.hs | bsd-3-clause | 2,071 | 0 | 16 | 650 | 492 | 269 | 223 | 37 | 1 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
-----------------------------------------------------------------------------
-- |
-- Module : Diagrams.TwoD.Factorization
-- Copyright : (c) 2012 Brent Yorgey
-- License : BSD-style (see LICENSE)
-- Maintainer : byorgey@cis.upenn.edu
--
-- Factorization diagrams, as seen at
-- <http://mathlesstraveled.com/2012/10/05/factorization-diagrams/>
-- and
-- <http://mathlesstraveled.com/2012/11/05/more-factorization-diagrams/>
-- and on the cover of Hacker Monthly
-- (<http://hackermonthly.com/issue-31.html>): visually represent the
-- prime factorization of n by drawing n dots recursively grouped
-- according to the factors.
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_grid100Big.svg#diagram=grid100Big&width=600>>
--
-----------------------------------------------------------------------------
module Diagrams.TwoD.Factorization where
import Data.Char (digitToInt)
import Data.List.Split (chunksOf)
import Data.Maybe (listToMaybe)
import Diagrams.Prelude
-- | @primeLayout@ takes a positive integer p (the idea is for it to
-- be prime, though it doesn't really matter) and a diagram, and lays
-- out p rotated copies of the diagram in a circular pattern.
--
-- There is a special case for @p = 2@: if the given diagram is taller
-- than it is wide, then the two copies will be placed beside each
-- other; if wider then tall, they will be placed one above the
-- other.
--
-- The regular @p@-gon connecting the centers of the laid-out
-- diagrams is also filled in with vertical bars of color
-- representing the number @p@. In particular, there is one color
-- for each decimal digit (the provided list should have length 10
-- and represents the digits 0-9), and the colors, read left to
-- right, give the decimal expansion of @p@.
--
-- > import Diagrams.TwoD.Factorization
-- > plExample
-- > = pad 1.1 . centerXY
-- > . hsep 0.5
-- > . map (sized (mkWidth 1))
-- > $ [ primeLayout defaultColors 5 (circle 1 # fc black)
-- > , primeLayout defaultColors 103 (square 1 # fc green # lw none)
-- > , primeLayout (repeat white) 13 (circle 1 # lc orange)
-- > ]
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_plExample.svg#diagram=plExample&width=400>>
primeLayout :: (Renderable (Path V2 n) b, TypeableFloat n)
=> [Colour Double] -> Integer -> QDiagram b V2 n Any -> QDiagram b V2 n Any
primeLayout _ 2 d
| width d >= height d = (d === strutY (height d / 3) === d # reflectY)
# centerY
| otherwise = (d ||| strutX (width d / 3) ||| d)
# centerX
primeLayout colors p d
= (mconcat $
map (\n -> d # translateY r # rotateBy
(fromIntegral n/fromIntegral p)) [0..p-1]
)
<>
colorBars colors p poly
where poly = polygon (with & polyType .~ PolyRegular (fromIntegral p) r
& polyOrient .~ OrientH
)
w = max (width d) (height d)
r = w * c / sin (tau / (2 * fromIntegral p))
c = 0.75
-- | Draw vertical bars of color inside a polygon which represent the
-- decimal expansion of @p@, using the provided list of colors to
-- represent the digits 0-9.
--
-- > import Diagrams.TwoD.Factorization
-- > colorBarsEx = colorBars defaultColors 3526 (square 1)
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_colorBarsEx.svg#diagram=colorBarsEx&width=200>>
colorBars :: (Renderable (Path V2 n) b, TypeableFloat n)
=> [Colour Double] -> Integer -> Path V2 n -> QDiagram b V2 n Any
colorBars colors p poly | p <= 11 = stroke poly
# fc (colors!!(fromIntegral p `mod` 10))
# lw none
colorBars colors p poly = bars # clipBy poly
where
barColors = map ((colors!!) . digitToInt) (show p)
barW = width poly / fromIntegral (length barColors)
barH = height poly
bars = (hcat $ map (\c -> rect barW barH # fc c # lc c) barColors)
# centerX
-- | A default set of digit colors, based very loosely on the color
-- code for resistors (<http://en.wikipedia.org/wiki/Electronic_color_code>),
-- lightened up a bit by blending with white.
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_showDefaultColors.svg#diagram=showDefaultColors&height=50>>
defaultColors :: [Colour Double]
defaultColors = map (blend 0.1 white)
[black,red,orange,yellow,green,blue,gray,purple,white,brown]
-- > import Diagrams.TwoD.Factorization
-- > showDefaultColors = hcat $ zipWith showColor defaultColors [0..]
-- > where
-- > showColor c d = text (show d) <> square 1 # fc c # lw none
-- | Create a centered factorization diagram from the given list of
-- factors (intended to be primes, but again, any positive integers
-- will do; note how the below example uses 6), by recursively
-- folding according to 'primeLayout', with the 'defaultColors' and
-- a base case of a black circle.
--
-- > import Diagrams.TwoD.Factorization
-- > factorDiagram'Ex = factorDiagram' [2,5,6]
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_factorDiagram'Ex.svg#diagram=factorDiagram'Ex&height=200>>
factorDiagram' :: (Renderable (Path V2 n) b, TypeableFloat n)
=> [Integer] -> QDiagram b V2 n Any
factorDiagram' = centerXY . foldr (primeLayout defaultColors) (circle 1 # fc black # lw none)
-- | Create a default factorization diagram for the given integer, by
-- factoring it and calling 'factorDiagram'' on its prime
-- factorization (with the factors ordered from smallest to
-- biggest).
--
-- > import Diagrams.TwoD.Factorization
-- > factorDiagramEx = factorDiagram 700
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_factorDiagramEx.svg#diagram=factorDiagramEx&width=400>>
factorDiagram :: (Renderable (Path V2 n) b, TypeableFloat n)
=> Integer -> QDiagram b V2 n Any
factorDiagram = factorDiagram' . factors
factors :: Integer -> [Integer]
factors 1 = []
factors n = maybe [n] (\a -> a : factors (n `div` a)) mf
where
mf = listToMaybe $ filter (\x -> (n `mod` x) == 0) [2 .. n - 1]
-- only need to go to @intSqrt n@ really
-- | Place a diagram inside a square with the given side length,
-- centering and scaling it to fit with a bit of padding.
--
-- > import Diagrams.TwoD.Factorization
-- > ensquareEx = ensquare 1 (circle 25) ||| ensquare 1 (factorDiagram 30)
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_ensquareEx.svg#diagram=ensquareEx&width=200>>
ensquare
:: (Renderable (Path V2 n) b, TypeableFloat n)
=> n -> QDiagram b V2 n Any -> QDiagram b V2 n Any
ensquare n d = d # centerXY # sized (dims2D (0.8*n) (0.8*n)) <> square n
-- | @fdGrid n@ creates a grid of factorization diagrams, given a list
-- of lists of integers: the inner lists represent L-R rows, which
-- are laid out from top to bottom.
--
-- > import Diagrams.TwoD.Factorization
-- > fdGridEx = fdGrid [[7,6,5],[4,19,200],[1,10,50]]
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_fdGridEx.svg#diagram=fdGridEx&width=200>>
fdGrid
:: (Renderable (Path V2 n) b, TypeableFloat n)
=> [[Integer]] -> QDiagram b V2 n Any
fdGrid = vcat . map hcat . (map . map) (ensquare 1 . factorDiagram)
-- | @fdGridList n@ creates a grid containing the factorization
-- diagrams of all the numbers from @1@ to @n^2@, ordered left to
-- right, top to bottom (like the grid seen on the cover of Hacker
-- Monthly, <http://hackermonthly.com/issue-31.html>).
--
-- > import Diagrams.TwoD.Factorization
-- > grid100 = fdGridList 10
-- > grid100Big = grid100
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_grid100.svg#diagram=grid100&width=400>>
fdGridList
:: (Renderable (Path V2 n) b, TypeableFloat n)
=> Integer -> QDiagram b V2 n Any
fdGridList n = fdGrid . chunksOf (fromIntegral n) $ [1..n*n]
-- | @fdTable n@ creates a \"multiplication table\" of factorization
-- diagrams, with the diagrams for @1@ to @n@ along both the top row
-- and left column, and the diagram for @m*n@ in row @m@ and column
-- @n@.
--
-- > import Diagrams.TwoD.Factorization
-- > fdMultTableEx = fdMultTable 13
--
-- <<diagrams/src_Diagrams_TwoD_Factorization_fdMultTableEx.svg#diagram=fdMultTableEx&width=600>>
fdMultTable
:: (Renderable (Path V2 n) b, TypeableFloat n)
=> Integer -> QDiagram b V2 n Any
fdMultTable n = fdGrid [ [r*c | c <- [1 .. n]] | r <- [1 .. n] ]
| tejon/diagrams-contrib | src/Diagrams/TwoD/Factorization.hs | bsd-3-clause | 8,523 | 0 | 15 | 1,813 | 1,417 | 789 | 628 | 65 | 1 |
module FunIn2 where
--The application of a function is replaced by the right-hand side of the definition,
--with actual parameters replacing formals.
--In this example, unfold 'addthree'.
--This example aims to test unfolding a function defintion.
main :: Int -> Int
main = \x -> case x of
1 -> 1 + main 0
0 ->(addthree 1 2 3)
addthree :: Int -> Int -> Int -> Int
addthree a b c = a + b + c
| SAdams601/HaRe | old/testing/foldDef/FunIn2_TokOut.hs | bsd-3-clause | 427 | 0 | 10 | 114 | 97 | 53 | 44 | 7 | 2 |
module A3 where
data Data a = C1 a Char |
C2 Int |
C3 Float
f :: Data a
f = (C1 "hello" 'c')
g = case f of
(C1 x z) -> 42
(C1 x z) -> 43
_ -> 0
| SAdams601/HaRe | old/testing/removeField/A3_TokOut.hs | bsd-3-clause | 206 | 2 | 9 | 105 | 92 | 50 | 42 | 10 | 3 |
{-# LANGUAGE CPP, DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
#if __GLASGOW_HASKELL__ < 707
{-# LANGUAGE StandaloneDeriving #-}
#endif
{-# OPTIONS_GHC -fno-warn-orphans #-}
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Version
-- Copyright : Isaac Jones, Simon Marlow 2003-2004
-- Duncan Coutts 2008
-- License : BSD3
--
-- Maintainer : cabal-devel@haskell.org
-- Portability : portable
--
-- Exports the 'Version' type along with a parser and pretty printer. A version
-- is something like @\"1.3.3\"@. It also defines the 'VersionRange' data
-- types. Version ranges are like @\">= 1.2 && < 2\"@.
module Distribution.Version (
-- * Package versions
Version(..),
-- * Version ranges
VersionRange(..),
-- ** Constructing
anyVersion, noVersion,
thisVersion, notThisVersion,
laterVersion, earlierVersion,
orLaterVersion, orEarlierVersion,
unionVersionRanges, intersectVersionRanges,
invertVersionRange,
withinVersion,
betweenVersionsInclusive,
-- ** Inspection
withinRange,
isAnyVersion,
isNoVersion,
isSpecificVersion,
simplifyVersionRange,
foldVersionRange,
foldVersionRange',
-- ** Modification
removeUpperBound,
-- * Version intervals view
asVersionIntervals,
VersionInterval,
LowerBound(..),
UpperBound(..),
Bound(..),
-- ** 'VersionIntervals' abstract type
-- | The 'VersionIntervals' type and the accompanying functions are exposed
-- primarily for completeness and testing purposes. In practice
-- 'asVersionIntervals' is the main function to use to
-- view a 'VersionRange' as a bunch of 'VersionInterval's.
--
VersionIntervals,
toVersionIntervals,
fromVersionIntervals,
withinIntervals,
versionIntervals,
mkVersionIntervals,
unionVersionIntervals,
intersectVersionIntervals,
invertVersionIntervals
) where
import Distribution.Compat.Binary ( Binary(..) )
import Data.Data ( Data )
import Data.Typeable ( Typeable )
import Data.Version ( Version(..) )
import GHC.Generics ( Generic )
import Distribution.Text ( Text(..) )
import qualified Distribution.Compat.ReadP as Parse
import Distribution.Compat.ReadP ((+++))
import qualified Text.PrettyPrint as Disp
import Text.PrettyPrint ((<>), (<+>))
import qualified Data.Char as Char (isDigit)
import Control.Exception (assert)
-- -----------------------------------------------------------------------------
-- Version ranges
-- Todo: maybe move this to Distribution.Package.Version?
-- (package-specific versioning scheme).
data VersionRange
= AnyVersion
| ThisVersion Version -- = version
| LaterVersion Version -- > version (NB. not >=)
| EarlierVersion Version -- < version
| WildcardVersion Version -- == ver.* (same as >= ver && < ver+1)
| UnionVersionRanges VersionRange VersionRange
| IntersectVersionRanges VersionRange VersionRange
| VersionRangeParens VersionRange -- just '(exp)' parentheses syntax
deriving (Data, Eq, Generic, Read, Show, Typeable)
instance Binary VersionRange
#if __GLASGOW_HASKELL__ < 707
-- starting with ghc-7.7/base-4.7 this instance is provided in "Data.Data"
deriving instance Data Version
#endif
-- Deriving this instance from Generic gives trouble on GHC 7.2 because the
-- Generic instance has to be standalone-derived. So, we hand-roll our own.
-- We can't use a generic Binary instance on later versions because we must
-- maintain compatibility between compiler versions.
instance Binary Version where
get = do
br <- get
tags <- get
return $ Version br tags
put (Version br tags) = put br >> put tags
{-# DEPRECATED AnyVersion "Use 'anyVersion', 'foldVersionRange' or 'asVersionIntervals'" #-}
{-# DEPRECATED ThisVersion "use 'thisVersion', 'foldVersionRange' or 'asVersionIntervals'" #-}
{-# DEPRECATED LaterVersion "use 'laterVersion', 'foldVersionRange' or 'asVersionIntervals'" #-}
{-# DEPRECATED EarlierVersion "use 'earlierVersion', 'foldVersionRange' or 'asVersionIntervals'" #-}
{-# DEPRECATED WildcardVersion "use 'anyVersion', 'foldVersionRange' or 'asVersionIntervals'" #-}
{-# DEPRECATED UnionVersionRanges "use 'unionVersionRanges', 'foldVersionRange' or 'asVersionIntervals'" #-}
{-# DEPRECATED IntersectVersionRanges "use 'intersectVersionRanges', 'foldVersionRange' or 'asVersionIntervals'" #-}
-- | The version range @-any@. That is, a version range containing all
-- versions.
--
-- > withinRange v anyVersion = True
--
anyVersion :: VersionRange
anyVersion = AnyVersion
-- | The empty version range, that is a version range containing no versions.
--
-- This can be constructed using any unsatisfiable version range expression,
-- for example @> 1 && < 1@.
--
-- > withinRange v noVersion = False
--
noVersion :: VersionRange
noVersion = IntersectVersionRanges (LaterVersion v) (EarlierVersion v)
where v = Version [1] []
-- | The version range @== v@
--
-- > withinRange v' (thisVersion v) = v' == v
--
thisVersion :: Version -> VersionRange
thisVersion = ThisVersion
-- | The version range @< v || > v@
--
-- > withinRange v' (notThisVersion v) = v' /= v
--
notThisVersion :: Version -> VersionRange
notThisVersion v = UnionVersionRanges (EarlierVersion v) (LaterVersion v)
-- | The version range @> v@
--
-- > withinRange v' (laterVersion v) = v' > v
--
laterVersion :: Version -> VersionRange
laterVersion = LaterVersion
-- | The version range @>= v@
--
-- > withinRange v' (orLaterVersion v) = v' >= v
--
orLaterVersion :: Version -> VersionRange
orLaterVersion v = UnionVersionRanges (ThisVersion v) (LaterVersion v)
-- | The version range @< v@
--
-- > withinRange v' (earlierVersion v) = v' < v
--
earlierVersion :: Version -> VersionRange
earlierVersion = EarlierVersion
-- | The version range @<= v@
--
-- > withinRange v' (orEarlierVersion v) = v' <= v
--
orEarlierVersion :: Version -> VersionRange
orEarlierVersion v = UnionVersionRanges (ThisVersion v) (EarlierVersion v)
-- | The version range @vr1 || vr2@
--
-- > withinRange v' (unionVersionRanges vr1 vr2)
-- > = withinRange v' vr1 || withinRange v' vr2
--
unionVersionRanges :: VersionRange -> VersionRange -> VersionRange
unionVersionRanges = UnionVersionRanges
-- | The version range @vr1 && vr2@
--
-- > withinRange v' (intersectVersionRanges vr1 vr2)
-- > = withinRange v' vr1 && withinRange v' vr2
--
intersectVersionRanges :: VersionRange -> VersionRange -> VersionRange
intersectVersionRanges = IntersectVersionRanges
-- | The inverse of a version range
--
-- > withinRange v' (invertVersionRange vr)
-- > = not (withinRange v' vr)
--
invertVersionRange :: VersionRange -> VersionRange
invertVersionRange =
fromVersionIntervals . invertVersionIntervals . VersionIntervals . asVersionIntervals
-- | The version range @== v.*@.
--
-- For example, for version @1.2@, the version range @== 1.2.*@ is the same as
-- @>= 1.2 && < 1.3@
--
-- > withinRange v' (laterVersion v) = v' >= v && v' < upper v
-- > where
-- > upper (Version lower t) = Version (init lower ++ [last lower + 1]) t
--
withinVersion :: Version -> VersionRange
withinVersion = WildcardVersion
-- | The version range @>= v1 && <= v2@.
--
-- In practice this is not very useful because we normally use inclusive lower
-- bounds and exclusive upper bounds.
--
-- > withinRange v' (laterVersion v) = v' > v
--
betweenVersionsInclusive :: Version -> Version -> VersionRange
betweenVersionsInclusive v1 v2 =
IntersectVersionRanges (orLaterVersion v1) (orEarlierVersion v2)
{-# DEPRECATED betweenVersionsInclusive
"In practice this is not very useful because we normally use inclusive lower bounds and exclusive upper bounds" #-}
-- | Given a version range, remove the highest upper bound. Example: @(>= 1 && <
-- 3) || (>= 4 && < 5)@ is converted to @(>= 1 && < 3) || (>= 4)@.
removeUpperBound :: VersionRange -> VersionRange
removeUpperBound = fromVersionIntervals . relaxLastInterval . toVersionIntervals
where
relaxLastInterval (VersionIntervals intervals) =
VersionIntervals (relaxLastInterval' intervals)
relaxLastInterval' [] = []
relaxLastInterval' [(l,_)] = [(l, NoUpperBound)]
relaxLastInterval' (i:is) = i : relaxLastInterval' is
-- | Fold over the basic syntactic structure of a 'VersionRange'.
--
-- This provides a syntactic view of the expression defining the version range.
-- The syntactic sugar @\">= v\"@, @\"<= v\"@ and @\"== v.*\"@ is presented
-- in terms of the other basic syntax.
--
-- For a semantic view use 'asVersionIntervals'.
--
foldVersionRange :: a -- ^ @\"-any\"@ version
-> (Version -> a) -- ^ @\"== v\"@
-> (Version -> a) -- ^ @\"> v\"@
-> (Version -> a) -- ^ @\"< v\"@
-> (a -> a -> a) -- ^ @\"_ || _\"@ union
-> (a -> a -> a) -- ^ @\"_ && _\"@ intersection
-> VersionRange -> a
foldVersionRange anyv this later earlier union intersect = fold
where
fold AnyVersion = anyv
fold (ThisVersion v) = this v
fold (LaterVersion v) = later v
fold (EarlierVersion v) = earlier v
fold (WildcardVersion v) = fold (wildcard v)
fold (UnionVersionRanges v1 v2) = union (fold v1) (fold v2)
fold (IntersectVersionRanges v1 v2) = intersect (fold v1) (fold v2)
fold (VersionRangeParens v) = fold v
wildcard v = intersectVersionRanges
(orLaterVersion v)
(earlierVersion (wildcardUpperBound v))
-- | An extended variant of 'foldVersionRange' that also provides a view of
-- in which the syntactic sugar @\">= v\"@, @\"<= v\"@ and @\"== v.*\"@ is presented
-- explicitly rather than in terms of the other basic syntax.
--
foldVersionRange' :: a -- ^ @\"-any\"@ version
-> (Version -> a) -- ^ @\"== v\"@
-> (Version -> a) -- ^ @\"> v\"@
-> (Version -> a) -- ^ @\"< v\"@
-> (Version -> a) -- ^ @\">= v\"@
-> (Version -> a) -- ^ @\"<= v\"@
-> (Version -> Version -> a) -- ^ @\"== v.*\"@ wildcard. The
-- function is passed the
-- inclusive lower bound and the
-- exclusive upper bounds of the
-- range defined by the wildcard.
-> (a -> a -> a) -- ^ @\"_ || _\"@ union
-> (a -> a -> a) -- ^ @\"_ && _\"@ intersection
-> (a -> a) -- ^ @\"(_)\"@ parentheses
-> VersionRange -> a
foldVersionRange' anyv this later earlier orLater orEarlier
wildcard union intersect parens = fold
where
fold AnyVersion = anyv
fold (ThisVersion v) = this v
fold (LaterVersion v) = later v
fold (EarlierVersion v) = earlier v
fold (UnionVersionRanges (ThisVersion v)
(LaterVersion v')) | v==v' = orLater v
fold (UnionVersionRanges (LaterVersion v)
(ThisVersion v')) | v==v' = orLater v
fold (UnionVersionRanges (ThisVersion v)
(EarlierVersion v')) | v==v' = orEarlier v
fold (UnionVersionRanges (EarlierVersion v)
(ThisVersion v')) | v==v' = orEarlier v
fold (WildcardVersion v) = wildcard v (wildcardUpperBound v)
fold (UnionVersionRanges v1 v2) = union (fold v1) (fold v2)
fold (IntersectVersionRanges v1 v2) = intersect (fold v1) (fold v2)
fold (VersionRangeParens v) = parens (fold v)
-- | Does this version fall within the given range?
--
-- This is the evaluation function for the 'VersionRange' type.
--
withinRange :: Version -> VersionRange -> Bool
withinRange v = foldVersionRange
True
(\v' -> versionBranch v == versionBranch v')
(\v' -> versionBranch v > versionBranch v')
(\v' -> versionBranch v < versionBranch v')
(||)
(&&)
-- | View a 'VersionRange' as a union of intervals.
--
-- This provides a canonical view of the semantics of a 'VersionRange' as
-- opposed to the syntax of the expression used to define it. For the syntactic
-- view use 'foldVersionRange'.
--
-- Each interval is non-empty. The sequence is in increasing order and no
-- intervals overlap or touch. Therefore only the first and last can be
-- unbounded. The sequence can be empty if the range is empty
-- (e.g. a range expression like @< 1 && > 2@).
--
-- Other checks are trivial to implement using this view. For example:
--
-- > isNoVersion vr | [] <- asVersionIntervals vr = True
-- > | otherwise = False
--
-- > isSpecificVersion vr
-- > | [(LowerBound v InclusiveBound
-- > ,UpperBound v' InclusiveBound)] <- asVersionIntervals vr
-- > , v == v' = Just v
-- > | otherwise = Nothing
--
asVersionIntervals :: VersionRange -> [VersionInterval]
asVersionIntervals = versionIntervals . toVersionIntervals
-- | Does this 'VersionRange' place any restriction on the 'Version' or is it
-- in fact equivalent to 'AnyVersion'.
--
-- Note this is a semantic check, not simply a syntactic check. So for example
-- the following is @True@ (for all @v@).
--
-- > isAnyVersion (EarlierVersion v `UnionVersionRanges` orLaterVersion v)
--
isAnyVersion :: VersionRange -> Bool
isAnyVersion vr = case asVersionIntervals vr of
[(LowerBound v InclusiveBound, NoUpperBound)] | isVersion0 v -> True
_ -> False
-- | This is the converse of 'isAnyVersion'. It check if the version range is
-- empty, if there is no possible version that satisfies the version range.
--
-- For example this is @True@ (for all @v@):
--
-- > isNoVersion (EarlierVersion v `IntersectVersionRanges` LaterVersion v)
--
isNoVersion :: VersionRange -> Bool
isNoVersion vr = case asVersionIntervals vr of
[] -> True
_ -> False
-- | Is this version range in fact just a specific version?
--
-- For example the version range @\">= 3 && <= 3\"@ contains only the version
-- @3@.
--
isSpecificVersion :: VersionRange -> Maybe Version
isSpecificVersion vr = case asVersionIntervals vr of
[(LowerBound v InclusiveBound
,UpperBound v' InclusiveBound)]
| v == v' -> Just v
_ -> Nothing
-- | Simplify a 'VersionRange' expression. For non-empty version ranges
-- this produces a canonical form. Empty or inconsistent version ranges
-- are left as-is because that provides more information.
--
-- If you need a canonical form use
-- @fromVersionIntervals . toVersionIntervals@
--
-- It satisfies the following properties:
--
-- > withinRange v (simplifyVersionRange r) = withinRange v r
--
-- > withinRange v r = withinRange v r'
-- > ==> simplifyVersionRange r = simplifyVersionRange r'
-- > || isNoVersion r
-- > || isNoVersion r'
--
simplifyVersionRange :: VersionRange -> VersionRange
simplifyVersionRange vr
-- If the version range is inconsistent then we just return the
-- original since that has more information than ">1 && < 1", which
-- is the canonical inconsistent version range.
| null (versionIntervals vi) = vr
| otherwise = fromVersionIntervals vi
where
vi = toVersionIntervals vr
----------------------------
-- Wildcard range utilities
--
wildcardUpperBound :: Version -> Version
wildcardUpperBound (Version lowerBound ts) = Version upperBound ts
where
upperBound = init lowerBound ++ [last lowerBound + 1]
isWildcardRange :: Version -> Version -> Bool
isWildcardRange (Version branch1 _) (Version branch2 _) = check branch1 branch2
where check (n:[]) (m:[]) | n+1 == m = True
check (n:ns) (m:ms) | n == m = check ns ms
check _ _ = False
------------------
-- Intervals view
--
-- | A complementary representation of a 'VersionRange'. Instead of a boolean
-- version predicate it uses an increasing sequence of non-overlapping,
-- non-empty intervals.
--
-- The key point is that this representation gives a canonical representation
-- for the semantics of 'VersionRange's. This makes it easier to check things
-- like whether a version range is empty, covers all versions, or requires a
-- certain minimum or maximum version. It also makes it easy to check equality
-- or containment. It also makes it easier to identify \'simple\' version
-- predicates for translation into foreign packaging systems that do not
-- support complex version range expressions.
--
newtype VersionIntervals = VersionIntervals [VersionInterval]
deriving (Eq, Show)
-- | Inspect the list of version intervals.
--
versionIntervals :: VersionIntervals -> [VersionInterval]
versionIntervals (VersionIntervals is) = is
type VersionInterval = (LowerBound, UpperBound)
data LowerBound = LowerBound Version !Bound deriving (Eq, Show)
data UpperBound = NoUpperBound | UpperBound Version !Bound deriving (Eq, Show)
data Bound = ExclusiveBound | InclusiveBound deriving (Eq, Show)
minLowerBound :: LowerBound
minLowerBound = LowerBound (Version [0] []) InclusiveBound
isVersion0 :: Version -> Bool
isVersion0 (Version [0] _) = True
isVersion0 _ = False
instance Ord LowerBound where
LowerBound ver bound <= LowerBound ver' bound' = case compare ver ver' of
LT -> True
EQ -> not (bound == ExclusiveBound && bound' == InclusiveBound)
GT -> False
instance Ord UpperBound where
_ <= NoUpperBound = True
NoUpperBound <= UpperBound _ _ = False
UpperBound ver bound <= UpperBound ver' bound' = case compare ver ver' of
LT -> True
EQ -> not (bound == InclusiveBound && bound' == ExclusiveBound)
GT -> False
invariant :: VersionIntervals -> Bool
invariant (VersionIntervals intervals) = all validInterval intervals
&& all doesNotTouch' adjacentIntervals
where
doesNotTouch' :: (VersionInterval, VersionInterval) -> Bool
doesNotTouch' ((_,u), (l',_)) = doesNotTouch u l'
adjacentIntervals :: [(VersionInterval, VersionInterval)]
adjacentIntervals
| null intervals = []
| otherwise = zip intervals (tail intervals)
checkInvariant :: VersionIntervals -> VersionIntervals
checkInvariant is = assert (invariant is) is
-- | Directly construct a 'VersionIntervals' from a list of intervals.
--
-- Each interval must be non-empty. The sequence must be in increasing order
-- and no intervals may overlap or touch. If any of these conditions are not
-- satisfied the function returns @Nothing@.
--
mkVersionIntervals :: [VersionInterval] -> Maybe VersionIntervals
mkVersionIntervals intervals
| invariant (VersionIntervals intervals) = Just (VersionIntervals intervals)
| otherwise = Nothing
validVersion :: Version -> Bool
validVersion (Version [] _) = False
validVersion (Version vs _) = all (>=0) vs
validInterval :: (LowerBound, UpperBound) -> Bool
validInterval i@(l, u) = validLower l && validUpper u && nonEmpty i
where
validLower (LowerBound v _) = validVersion v
validUpper NoUpperBound = True
validUpper (UpperBound v _) = validVersion v
-- Check an interval is non-empty
--
nonEmpty :: VersionInterval -> Bool
nonEmpty (_, NoUpperBound ) = True
nonEmpty (LowerBound l lb, UpperBound u ub) =
(l < u) || (l == u && lb == InclusiveBound && ub == InclusiveBound)
-- Check an upper bound does not intersect, or even touch a lower bound:
--
-- ---| or ---) but not ---] or ---) or ---]
-- |--- (--- (--- [--- [---
--
doesNotTouch :: UpperBound -> LowerBound -> Bool
doesNotTouch NoUpperBound _ = False
doesNotTouch (UpperBound u ub) (LowerBound l lb) =
u < l
|| (u == l && ub == ExclusiveBound && lb == ExclusiveBound)
-- | Check an upper bound does not intersect a lower bound:
--
-- ---| or ---) or ---] or ---) but not ---]
-- |--- (--- (--- [--- [---
--
doesNotIntersect :: UpperBound -> LowerBound -> Bool
doesNotIntersect NoUpperBound _ = False
doesNotIntersect (UpperBound u ub) (LowerBound l lb) =
u < l
|| (u == l && not (ub == InclusiveBound && lb == InclusiveBound))
-- | Test if a version falls within the version intervals.
--
-- It exists mostly for completeness and testing. It satisfies the following
-- properties:
--
-- > withinIntervals v (toVersionIntervals vr) = withinRange v vr
-- > withinIntervals v ivs = withinRange v (fromVersionIntervals ivs)
--
withinIntervals :: Version -> VersionIntervals -> Bool
withinIntervals v (VersionIntervals intervals) = any withinInterval intervals
where
withinInterval (lowerBound, upperBound) = withinLower lowerBound
&& withinUpper upperBound
withinLower (LowerBound v' ExclusiveBound) = v' < v
withinLower (LowerBound v' InclusiveBound) = v' <= v
withinUpper NoUpperBound = True
withinUpper (UpperBound v' ExclusiveBound) = v' > v
withinUpper (UpperBound v' InclusiveBound) = v' >= v
-- | Convert a 'VersionRange' to a sequence of version intervals.
--
toVersionIntervals :: VersionRange -> VersionIntervals
toVersionIntervals = foldVersionRange
( chkIvl (minLowerBound, NoUpperBound))
(\v -> chkIvl (LowerBound v InclusiveBound, UpperBound v InclusiveBound))
(\v -> chkIvl (LowerBound v ExclusiveBound, NoUpperBound))
(\v -> if isVersion0 v then VersionIntervals [] else
chkIvl (minLowerBound, UpperBound v ExclusiveBound))
unionVersionIntervals
intersectVersionIntervals
where
chkIvl interval = checkInvariant (VersionIntervals [interval])
-- | Convert a 'VersionIntervals' value back into a 'VersionRange' expression
-- representing the version intervals.
--
fromVersionIntervals :: VersionIntervals -> VersionRange
fromVersionIntervals (VersionIntervals []) = noVersion
fromVersionIntervals (VersionIntervals intervals) =
foldr1 UnionVersionRanges [ interval l u | (l, u) <- intervals ]
where
interval (LowerBound v InclusiveBound)
(UpperBound v' InclusiveBound) | v == v'
= ThisVersion v
interval (LowerBound v InclusiveBound)
(UpperBound v' ExclusiveBound) | isWildcardRange v v'
= WildcardVersion v
interval l u = lowerBound l `intersectVersionRanges'` upperBound u
lowerBound (LowerBound v InclusiveBound)
| isVersion0 v = AnyVersion
| otherwise = orLaterVersion v
lowerBound (LowerBound v ExclusiveBound) = LaterVersion v
upperBound NoUpperBound = AnyVersion
upperBound (UpperBound v InclusiveBound) = orEarlierVersion v
upperBound (UpperBound v ExclusiveBound) = EarlierVersion v
intersectVersionRanges' vr AnyVersion = vr
intersectVersionRanges' AnyVersion vr = vr
intersectVersionRanges' vr vr' = IntersectVersionRanges vr vr'
unionVersionIntervals :: VersionIntervals -> VersionIntervals
-> VersionIntervals
unionVersionIntervals (VersionIntervals is0) (VersionIntervals is'0) =
checkInvariant (VersionIntervals (union is0 is'0))
where
union is [] = is
union [] is' = is'
union (i:is) (i':is') = case unionInterval i i' of
Left Nothing -> i : union is (i' :is')
Left (Just i'') -> union is (i'':is')
Right Nothing -> i' : union (i :is) is'
Right (Just i'') -> union (i'':is) is'
unionInterval :: VersionInterval -> VersionInterval
-> Either (Maybe VersionInterval) (Maybe VersionInterval)
unionInterval (lower , upper ) (lower', upper')
-- Non-intersecting intervals with the left interval ending first
| upper `doesNotTouch` lower' = Left Nothing
-- Non-intersecting intervals with the right interval first
| upper' `doesNotTouch` lower = Right Nothing
-- Complete or partial overlap, with the left interval ending first
| upper <= upper' = lowerBound `seq`
Left (Just (lowerBound, upper'))
-- Complete or partial overlap, with the left interval ending first
| otherwise = lowerBound `seq`
Right (Just (lowerBound, upper))
where
lowerBound = min lower lower'
intersectVersionIntervals :: VersionIntervals -> VersionIntervals
-> VersionIntervals
intersectVersionIntervals (VersionIntervals is0) (VersionIntervals is'0) =
checkInvariant (VersionIntervals (intersect is0 is'0))
where
intersect _ [] = []
intersect [] _ = []
intersect (i:is) (i':is') = case intersectInterval i i' of
Left Nothing -> intersect is (i':is')
Left (Just i'') -> i'' : intersect is (i':is')
Right Nothing -> intersect (i:is) is'
Right (Just i'') -> i'' : intersect (i:is) is'
intersectInterval :: VersionInterval -> VersionInterval
-> Either (Maybe VersionInterval) (Maybe VersionInterval)
intersectInterval (lower , upper ) (lower', upper')
-- Non-intersecting intervals with the left interval ending first
| upper `doesNotIntersect` lower' = Left Nothing
-- Non-intersecting intervals with the right interval first
| upper' `doesNotIntersect` lower = Right Nothing
-- Complete or partial overlap, with the left interval ending first
| upper <= upper' = lowerBound `seq`
Left (Just (lowerBound, upper))
-- Complete or partial overlap, with the right interval ending first
| otherwise = lowerBound `seq`
Right (Just (lowerBound, upper'))
where
lowerBound = max lower lower'
invertVersionIntervals :: VersionIntervals
-> VersionIntervals
invertVersionIntervals (VersionIntervals xs) =
case xs of
-- Empty interval set
[] -> VersionIntervals [(noLowerBound, NoUpperBound)]
-- Interval with no lower bound
((lb, ub) : more) | lb == noLowerBound -> VersionIntervals $ invertVersionIntervals' ub more
-- Interval with a lower bound
((lb, ub) : more) ->
VersionIntervals $ (noLowerBound, invertLowerBound lb) : invertVersionIntervals' ub more
where
-- Invert subsequent version intervals given the upper bound of
-- the intervals already inverted.
invertVersionIntervals' :: UpperBound
-> [(LowerBound, UpperBound)]
-> [(LowerBound, UpperBound)]
invertVersionIntervals' NoUpperBound [] = []
invertVersionIntervals' ub0 [] = [(invertUpperBound ub0, NoUpperBound)]
invertVersionIntervals' ub0 [(lb, NoUpperBound)] =
[(invertUpperBound ub0, invertLowerBound lb)]
invertVersionIntervals' ub0 ((lb, ub1) : more) =
(invertUpperBound ub0, invertLowerBound lb)
: invertVersionIntervals' ub1 more
invertLowerBound :: LowerBound -> UpperBound
invertLowerBound (LowerBound v b) = UpperBound v (invertBound b)
invertUpperBound :: UpperBound -> LowerBound
invertUpperBound (UpperBound v b) = LowerBound v (invertBound b)
invertUpperBound NoUpperBound = error "NoUpperBound: unexpected"
invertBound :: Bound -> Bound
invertBound ExclusiveBound = InclusiveBound
invertBound InclusiveBound = ExclusiveBound
noLowerBound :: LowerBound
noLowerBound = LowerBound (Version [0] []) InclusiveBound
-------------------------------
-- Parsing and pretty printing
--
instance Text VersionRange where
disp = fst
. foldVersionRange' -- precedence:
( Disp.text "-any" , 0 :: Int)
(\v -> (Disp.text "==" <> disp v , 0))
(\v -> (Disp.char '>' <> disp v , 0))
(\v -> (Disp.char '<' <> disp v , 0))
(\v -> (Disp.text ">=" <> disp v , 0))
(\v -> (Disp.text "<=" <> disp v , 0))
(\v _ -> (Disp.text "==" <> dispWild v , 0))
(\(r1, p1) (r2, p2) -> (punct 2 p1 r1 <+> Disp.text "||" <+> punct 2 p2 r2 , 2))
(\(r1, p1) (r2, p2) -> (punct 1 p1 r1 <+> Disp.text "&&" <+> punct 1 p2 r2 , 1))
(\(r, _) -> (Disp.parens r, 0))
where dispWild (Version b _) =
Disp.hcat (Disp.punctuate (Disp.char '.') (map Disp.int b))
<> Disp.text ".*"
punct p p' | p < p' = Disp.parens
| otherwise = id
parse = expr
where
expr = do Parse.skipSpaces
t <- term
Parse.skipSpaces
(do _ <- Parse.string "||"
Parse.skipSpaces
e <- expr
return (UnionVersionRanges t e)
+++
return t)
term = do f <- factor
Parse.skipSpaces
(do _ <- Parse.string "&&"
Parse.skipSpaces
t <- term
return (IntersectVersionRanges f t)
+++
return f)
factor = Parse.choice $ parens expr
: parseAnyVersion
: parseNoVersion
: parseWildcardRange
: map parseRangeOp rangeOps
parseAnyVersion = Parse.string "-any" >> return AnyVersion
parseNoVersion = Parse.string "-none" >> return noVersion
parseWildcardRange = do
_ <- Parse.string "=="
Parse.skipSpaces
branch <- Parse.sepBy1 digits (Parse.char '.')
_ <- Parse.char '.'
_ <- Parse.char '*'
return (WildcardVersion (Version branch []))
parens p = Parse.between (Parse.char '(' >> Parse.skipSpaces)
(Parse.char ')' >> Parse.skipSpaces)
(do a <- p
Parse.skipSpaces
return (VersionRangeParens a))
digits = do
first <- Parse.satisfy Char.isDigit
if first == '0'
then return 0
else do rest <- Parse.munch Char.isDigit
return (read (first : rest))
parseRangeOp (s,f) = Parse.string s >> Parse.skipSpaces >> fmap f parse
rangeOps = [ ("<", EarlierVersion),
("<=", orEarlierVersion),
(">", LaterVersion),
(">=", orLaterVersion),
("==", ThisVersion) ]
| rimmington/cabal | Cabal/Distribution/Version.hs | bsd-3-clause | 31,346 | 0 | 17 | 8,476 | 6,174 | 3,313 | 2,861 | 439 | 12 |
-- !!! ds003 -- list, tuple, lazy, as patterns
--
module ShouldCompile where
f [] y True = []
f x a@(y,ys) ~z = []
f (x:x1:x2:x3) ~(y,ys) z = []
f x y True = []
| urbanslug/ghc | testsuite/tests/deSugar/should_compile/ds003.hs | bsd-3-clause | 183 | 8 | 9 | 59 | 104 | 56 | 48 | 5 | 1 |
module TokenParser
-- For when you want to use a string as your regular expression.
( parseTokenDefinition
) where
import Token
import Error
import Control.Monad (sequence)
-- this could be generalized to any language instead of strings, but why would you?
-- use Control.Monad.sequence to concatenate the results if you create a list of these.
parseTokenDefinition :: tokenNames -> String -> TokenType -> Result Char (TokenDefinition tokenNames Char)
parseTokenDefinition _ _ _ = Left $ NotYetImplementedError "Regex Parsing"
| mrwonko/wonkococo | wcc/TokenParser.hs | mit | 541 | 0 | 10 | 92 | 77 | 43 | 34 | 7 | 1 |
{- ifl-haskell: "Implementing Functional Languages: a tutorial" in Haskell.
Copyright 2014 Nikita Karetnikov <nikita@karetnikov.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
-}
module Core.PPrinter.Tests (tests) where
import Test.HUnit (Test(..), (~=?))
import Core.Language
import Core.PPrinter
tests = TestList $
[ TestLabel "Exercise 1.6: 'ELet' indentation" $
"let\n x = 42\nin x" ~=?
(ppr $ ELet nonRecursive [("x", ENum 42)] (EVar "x"))
, TestLabel "'ELet': letrec" $
"letrec\n x = 42\nin x" ~=?
(ppr $ ELet recursive [("x", ENum 42)] (EVar "x"))
, TestLabel "'ELet': two bindings" $
"let\n x = 42;\n y = 43\nin x" ~=?
(ppr $ ELet nonRecursive [("x", ENum 42), ("y", ENum 43)] (EVar "x"))
, TestLabel "'ELet': nested binding" $
"let\n x = let\n x' = 42\n in x';\n y = 43\nin x" ~=?
(ppr $ ELet nonRecursive
[ ("x", (ELet nonRecursive [("x'", ENum 42)] (EVar "x'")))
, ("y", ENum 43)
] (EVar "x"))
, TestLabel "'ELet': nested expression" $
-- XXX: shouldn't it return "let\n x = 42\nin let\n y = 43\n in y"?
"let\n x = 42\nin let\n y = 43\nin y" ~=?
(ppr $ ELet nonRecursive [("x", ENum 42)]
(ELet nonRecursive [("y", ENum 43)] (EVar "y")))
, TestLabel "'ECase': trivial" $
"case 42 of\n 1 x -> x" ~=?
(ppr $ ECase (ENum 42) [(1, ["x"], EVar "x")])
, TestLabel "'ECase': no variables" $
"case 42 of\n 1 -> 43" ~=?
(ppr $ ECase (ENum 42) [(1, [], ENum 43)])
, TestLabel "'ECase': two cases" $
"case 42 of\n 1 x y -> y;\n 1 x -> x" ~=?
(ppr $ ECase (ENum 42) [(1, ["x","y"], EVar "y"), (1, ["x"], EVar "x")])
, TestLabel "'ECase': nested case" $
"case 42 of\n 1 x y -> case y of\n 1 -> 43;\n 1 x -> x" ~=?
(ppr $ ECase (ENum 42)
[ (1, ["x","y"], ECase (EVar "y") [(1, [], ENum 43)])
, (1, ["x"], EVar "x")
])
, TestLabel "Precedence: parentheses" $
"(x + y) * z" ~=?
(ppr $ EAp (EAp (EVar "*")
(EAp (EAp (EVar "+")
(EVar "x"))
(EVar "y")))
(EVar "z"))
, TestLabel "Precedence: no parentheses" $
"x + y * z" ~=?
(ppr $ EAp (EAp (EVar "+")
(EVar "x"))
(EAp (EAp (EVar "*")
(EVar "y"))
(EVar "z")))
, TestLabel "Precedence: prefix function application" $
"x + f y z" ~=?
(ppr $ EAp (EAp (EVar "+")
(EVar "x"))
(EAp (EAp (EVar "f")
(EVar "y"))
(EVar "z")))
, TestLabel "Exercise 1.8: Infix operator application" $
"x + y > p * length xs" ~=?
(ppr $ EAp (EAp (EVar ">")
(EAp (EAp (EVar "+")
(EVar "x"))
(EVar "y")))
(EAp (EAp (EVar "*")
(EVar "p"))
(EAp (EVar "length")
(EVar "xs"))))
, TestLabel "'ELam' indentation" $
"\\x y z .\n x" ~=?
(ppr $ ELam ["x","y","z"] (EVar "x"))
]
where
ppr = iDisplay . flip pprExpr 0 | binesiyu/ifl | src/Core/PPrinter/Tests.hs | mit | 3,841 | 0 | 18 | 1,315 | 1,038 | 552 | 486 | 73 | 1 |
{-# LANGUAGE DeriveFoldable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE OverloadedStrings #-}
module Confetti where
import Control.Applicative
import Control.Exception
import Control.Monad
import Data.Either.Utils
import Data.List
import Data.List.Utils
import Data.Maybe
import Data.Monoid
import Data.Time.Clock.POSIX
import Data.Yaml
import GHC.Generics
import System.Directory
import System.FilePath.Posix
import System.IO.Error
import System.Posix.Files
import Text.Printf
import qualified Data.Text as T
-- Errors from parsing or validating .confetti.yml
data ParseError a = ConfettiYamlNotFound | GroupNotFound a | ConfettiYamlInvalid a | DuplicateNameError a deriving (Generic)
instance (Show a) =>
Show (ParseError a) where
show (ConfettiYamlInvalid a) =
"There was an issue parsing your ~/.confetti.yml: " ++ show a
show (GroupNotFound a) =
printf "No match for group %s found in your ~/.confetti.yml: " (show a)
show ConfettiYamlNotFound =
"No confetti spec file found! See https://github.com/aviaviavi/confetti if you need help setting one up"
show (DuplicateNameError a) =
printf
"Multiple targets in this group share the same name: %s.\
\ Confetti doesn't yet know how to figure out target to link a search match to,\
\ but a future version will. Try breaking these targets into multiple groups."
(show a)
-- Errors from applying our config spec
data ApplyError a = VariantsMissing [a] | VariantAlreadyExists [a] deriving (Foldable)
instance (Show a) => Show (ApplyError a) where
show (VariantsMissing a) = "Couldn't find one or more of your variant files to use: " ++ show a
show (VariantAlreadyExists a) = printf "Target(s) %s already exists as a regular file. To backup and then symlink, use the -f flag when invoking confetti" $ show a
appendVariantExists :: ApplyError a -> ApplyError a -> ApplyError a
appendVariantExists (VariantAlreadyExists a) (VariantAlreadyExists b) =
VariantAlreadyExists $ a ++ b
concatVariantExists :: [ApplyError a] -> ApplyError a
concatVariantExists = foldr appendVariantExists (VariantAlreadyExists [])
maybeApplyError :: ApplyError a -> Maybe (ApplyError a)
maybeApplyError err = if null err then Nothing else Just err
-- applyErrorToMaybe :: ApplyError a -> Maybe (ApplyError a)
-- applyErrorToMaybe a = fmap listToMaybe
-- A config file version we want to swap in or out
type ConfigVariant = String
-- The config file version we want to swap in or out, with no directory
-- information, eg `credentials`, not `~/.aws/credentials`
type ConfigVariantFileName = String
-- The prefix specifies how we construct a variant.
-- If the target is config.json, and we want to link local.config.json,
-- (Just "local") would be the prefix
type ConfigVariantPrefix = Maybe String
-- Small helper for printing prefix
showPrefix :: ConfigVariantPrefix -> String
showPrefix prefix = case prefix of
(Just p) -> show p
Nothing -> "bare matches in search paths"
-- The actual config file in question.
type ConfigTarget = String
-- Lets us specify paths where the variant files in a group can be found.
data SearchPath = SearchPath
{ path :: FilePath
, recursive :: Maybe Bool
} deriving (Show, Eq, Generic)
instance FromJSON SearchPath
-- Represents a search result for a given target, variant and directory
data VariantSearch = VariantSearch
{ searchDirectory :: FilePath
, fileName :: ConfigVariantFileName
, recursiveSearch :: Bool
, result :: Maybe ConfigVariant -- populated when find the file we want to swap in
, linkToCreate :: ConfigTarget
} deriving (Show, Generic)
-- A set of configuration targets
data ConfigGroup = ConfigGroup
{ name :: T.Text
, targets :: [FilePath]
, searchPaths :: Maybe [SearchPath]
} deriving (Show, Generic)
instance FromJSON ConfigGroup where
parseJSON (Object x) =
ConfigGroup <$> x .: "name" <*> x .: "targets" <*> x .:? "search_paths"
parseJSON _ = fail "Expected an object"
data CommonConfigGroup = CommonConfigGroup
{ commonTargets :: [FilePath]
, commonSearchPaths :: Maybe [SearchPath]
} deriving (Show, Generic)
instance FromJSON CommonConfigGroup where
parseJSON (Object x) =
CommonConfigGroup <$> x .: "targets" <*> x .:? "search_paths"
parseJSON _ = fail "Expected an object"
-- A valid .confetti.yml gets parsed into this structure
data ParsedSpecFile = ParsedSpecFile
{ groups :: [ConfigGroup]
, commonGroup :: Maybe CommonConfigGroup
} deriving (Show, Generic)
instance FromJSON ParsedSpecFile where
parseJSON (Object x) =
ParsedSpecFile <$> x .: "groups" <*> x .:? "common"
parseJSON _ = fail "Expected an object"
-- A full config specification:
-- a group of files, and a variant to swap in for
-- every target in the group
data ConfigSpec = ConfigSpec
{ configGroup :: ConfigGroup
, configVariantPrefix :: ConfigVariantPrefix
, forceSymlink :: Bool
}
-- Given a yaml file and a group name, parse the group into a ConfigGroup, or
-- a ParseError.
-- If a `common` group is specified, that group will be combined with the parsed one
parseGroup :: FilePath -> T.Text -> IO (Either (ParseError T.Text) ConfigGroup)
parseGroup specFile groupName =
doesFileExist specFile >>= \exists -> parseGroup' exists
where
parseGroup' exists
| not exists = return $ Left ConfettiYamlNotFound
| otherwise = do
eitherSpec <- decodeFileEither specFile
eitherGroup <-
either
(return .
Left . ConfettiYamlInvalid . T.pack . prettyPrintParseException)
(`findGroup` groupName)
eitherSpec
either
(return . Left)
(\g ->
let spec = fromRight eitherSpec
in if isJust $ commonGroup spec
then Right <$>
appendCommonGroup g (fromJust $ commonGroup spec)
else return $ Right g)
eitherGroup
-- Combine whatever group we parsed with the common group, if one was
-- specified
appendCommonGroup :: ConfigGroup -> CommonConfigGroup -> IO ConfigGroup
appendCommonGroup g common = do
cTargets <- mapM absolutePath $ commonTargets common
cSearchPaths <- defaultSearchPaths cTargets (commonSearchPaths common)
adjustedGroupSearchPaths <- defaultSearchPaths (targets g) (searchPaths g)
return
ConfigGroup
{ name = name g
, targets = uniq $ targets g ++ cTargets
, searchPaths = Just . uniq $ adjustedGroupSearchPaths <> cSearchPaths
}
-- Expand all search paths. If search paths are empty, use the paths of the
-- supplied targets
defaultSearchPaths :: [ConfigTarget] -> Maybe [SearchPath] -> IO [SearchPath]
defaultSearchPaths ts ss =
let unadjusted =
fromMaybe
(map
(\t -> SearchPath {path = takeDirectory t, recursive = Just False})
ts)
ss
in
mapM
(\s -> do
absolute <- absolutePath (path s)
return $ s {path = absolute})
unadjusted
-- Any custom validation we want to do on a config group we've successfully parsed
-- goes here
validateSpec :: ConfigGroup -> Either (ParseError T.Text) ConfigGroup
validateSpec groupSpec =
let targetFileNames = map takeFileName $ targets groupSpec
in if length targetFileNames > length (uniq targetFileNames)
then Left
(DuplicateNameError $
T.pack . head $ targetFileNames \\ uniq targetFileNames)
else Right groupSpec
-- Finds a given group in a parsed spec file. Returns a GroupNotFound if the group
-- is missing
findGroup :: ParsedSpecFile
-> T.Text
-> IO (Either (ParseError T.Text) ConfigGroup)
findGroup spec groupName =
sequence $
maybe (Left $ GroupNotFound groupName) (Right . expandPathsForGroup) $
find (\g -> name g == groupName) (groups spec)
-- Replaces ~ with the value of $HOME for all files in the group
expandPathsForGroup :: ConfigGroup -> IO ConfigGroup
expandPathsForGroup confGroup =
mapM absolutePath (targets confGroup) >>= \expanded ->
return $ confGroup {targets = expanded}
-- If a target config file is _not_ a symlink,
-- make a backup before we swap out the config
backUpIfNonSymLink :: Bool -> FilePath -> IO (Maybe (ApplyError FilePath))
backUpIfNonSymLink shouldForce file = do
exists <- doesFileExist file
isLink <-
if exists
then pathIsSymbolicLink file
else return False
if exists && not isLink then
if shouldForce then createBackup file >> return Nothing
else return . Just $ VariantAlreadyExists [file]
else return Nothing
-- Backs up a file, eg config.json -> config.json.$time.backup
createBackup :: FilePath -> IO ()
createBackup file =
let newName =
(round <$> getPOSIXTime) >>=
(\t -> return $ file ++ "." ++ show t ++ ".backup")
in newName >>= \backup -> copyFile file backup
removeIfExists :: FilePath -> IO ()
removeIfExists f = removeFile f `catch` handleExists
where
handleExists e
| isDoesNotExistError e = return ()
| otherwise = throwIO e
-- Given a list of variant configs, returns the variants that
-- do not exist
filterMissingVariants :: [ConfigVariant] -> IO [FilePath]
filterMissingVariants = filterM (fmap not . doesFileExist)
-- This is where the actual config swapping happens. For every ConfigTarget,
-- creates a symlink from the target -> variant.
-- For instance, for target = ~/.aws/credentials, and variant "work",
-- the link created would be
-- ~/.aws/credentials -> ~/.aws/work.credentials
linkTargets :: [VariantSearch] -> IO ()
linkTargets =
mapM_
(\pair -> createSymbolicLink (fromJust $ result pair) (linkToCreate pair))
-- Given a variant prefix, eg `dev`, and a target, eg `~/.aws/credentials`,
-- construct the full path of the variant, `~/.aws/dev.credentials`
makeVariant :: ConfigVariantPrefix -> ConfigTarget -> ConfigVariantFileName
makeVariant prefix target =
let p = maybe "" (++ ".") prefix
in T.unpack $ T.replace ".." "." (T.pack $ p ++ takeFileName target)
-- Given a prefix, a list of targets, construct a list of variant filenames, and search
-- for matches in all of the given search paths
searchVariants :: ConfigVariantPrefix
-> [ConfigTarget]
-> [SearchPath]
-> IO [VariantSearch]
searchVariants variant targetFiles vPaths =
concat <$>
mapM (\target -> mapM (findVariantInPath variant target) vPaths) targetFiles
-- Get all the contents of a directory recursively
getRecursiveContents :: FilePath -> IO [FilePath]
getRecursiveContents topdir = do
names <- getDirectoryContents topdir
let properNames = filter (`notElem` [".", ".."]) names
paths <-
forM properNames $ \n -> do
let nextPath = topdir </> n
isDir <- doesDirectoryExist nextPath
if isDir
then getRecursiveContents nextPath
else return [nextPath]
return (concat paths)
-- Perform a search for a single file, and return the search result
findVariantInPath :: ConfigVariantPrefix
-> ConfigTarget
-> SearchPath
-> IO VariantSearch
findVariantInPath prefix target searchPath =
let fileToFind = makeVariant prefix target
in do pathName <- absolutePath $ path searchPath
let isRecursive = fromMaybe False $ recursive searchPath
searchResult <-
if isRecursive
then find (\f -> endswith fileToFind f && (target /= f)) <$>
getRecursiveContents pathName
else do
let potentialVariant = pathName </> fileToFind
exists <- doesFileExist potentialVariant
if exists
then return $ Just potentialVariant
else return Nothing
return
VariantSearch
{ searchDirectory = path searchPath
, fileName = fileToFind
, recursiveSearch = isRecursive
, linkToCreate = target
, result = searchResult
}
-- Run the spec! Every target will be a symlink to the variant
-- file
applySpec :: ConfigSpec -> IO (Maybe (ApplyError FilePath))
applySpec spec = do
let groupTargets = targets $ configGroup spec
searchResults <-
searchVariants
(configVariantPrefix spec)
groupTargets
(fromMaybe
(nub $ map
(\t -> SearchPath {path = takeDirectory t, recursive = Just False})
groupTargets)
(searchPaths $ configGroup spec))
backupErr <- maybeApplyError . concatVariantExists . catMaybes <$> mapM (backUpIfNonSymLink (forceSymlink spec)) groupTargets
if isJust backupErr
then return backupErr
else do
let confirmedVariantFiles = filter (isJust . result) searchResults
foundFiles =
uniq $ map (takeFileName . fromJust . result) confirmedVariantFiles
allFiles = uniq $ map fileName searchResults
missingVariants = allFiles \\ foundFiles
if null missingVariants
then do
mapM_ removeIfExists groupTargets
mapM_
(\s -> printSuccess $ linkToCreate s ++ " -> " ++ fromJust (result s))
confirmedVariantFiles
linkTargets confirmedVariantFiles
return Nothing
else return $ Just (VariantsMissing missingVariants)
-- Expands ~ to $HOME in a path
absolutePath :: FilePath -> IO FilePath
absolutePath p = do
home <- getHomeDirectory
return $ replace "~" home p
-- Prints green
printSuccess :: String -> IO ()
printSuccess s = putStrLn $ "\x1b[32m" ++ s ++ "\x1B[0m"
-- Prints red
printFail :: String -> IO ()
printFail s = putStrLn $ "\x1b[31m" ++ s ++ "\x1B[0m"
| aviaviavi/confetti | src/Confetti.hs | mit | 13,854 | 0 | 20 | 3,352 | 3,048 | 1,577 | 1,471 | 272 | 4 |
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Shiva.Translation (
runTrans,
translateSet,
translateSentences,
TransArticle (..),
) where
import Shiva.Config
import Control.Concurrent.STM.TVar
import Control.Monad.Catch (throwM)
import Control.Monad.Reader
import Control.Monad.State
import Data.Aeson
import Data.Text (Text)
import qualified Data.Text as T
import Database.PostgreSQL.Simple.FromField
import GHC.Generics
import Microsoft.Translator
import Opaleye
translateSentences :: [Text] -> ShivaM [[Sentence]]
translateSentences svTxts = do
ShivaData {..} <- ask
liftIO $ do
tdata <- readTVarIO transDataTV
translateArraySentencesIO tdata Swedish English svTxts
>>= either throwM pure
translateSet :: [Text] -> ShivaM [TransItem]
translateSet svTxts = do
ShivaData {..} <- ask
liftIO $ do
tdata <- readTVarIO transDataTV
translateArrayIO tdata Swedish English svTxts
>>= either throwM (pure . getArrayResponse)
-- | Core translation function. Used only with counter machinery below.
trans :: Text -> ShivaM Text
trans sv = do
ShivaData {..} <- ask
tdata <- liftIO $ readTVarIO transDataTV
mtxt <- liftIO $ translateIO tdata (Just Swedish) English sv
either (throwM . MSTranslatorException) pure mtxt
---- Translation with running character count ----
runCounter :: CounterM a -> ShivaM a
runCounter cm = do
(x,_) <- runStateT cm 0
-- save translation event record in DB here
return x
shivaTrans :: Text -> CounterM Text
shivaTrans txt = do
let n = T.length txt
modify (+n)
lift $ trans txt
runTrans :: Text -> ShivaM Text
runTrans = runCounter . shivaTrans
---- Translating sets of phrases: Used for feed listings ----
deriving instance ToJSON Sentence
deriving instance FromJSON Sentence
data TransArticle = TransArticle
{ thetitle :: Text
, origUrl :: Text
, urlFragment :: Text
, imageUrl :: Maybe Text
, bodyResult :: [Sentence]
} deriving (Show, Generic, FromJSON, ToJSON)
instance FromField TransArticle where
fromField = fromJSONField
instance QueryRunnerColumnDefault PGJsonb TransArticle where
queryRunnerColumnDefault = fieldQueryRunnerColumn
| BlackBrane/shiva | src/Shiva/Translation.hs | mit | 2,630 | 0 | 13 | 702 | 597 | 317 | 280 | 67 | 1 |
module Threase.DirectionSpec (spec) where
import Test.Hspec
import Threase.Direction
spec :: Spec
spec = do
describe "render" $ do
it "returns a left arrow for west" $ do
render West `shouldBe` "\8592"
it "returns a down arrow for south" $ do
render South `shouldBe` "\8595"
it "returns a right arrow for east" $ do
render East `shouldBe` "\8594"
it "returns a up arrow for north" $ do
render North `shouldBe` "\8593"
| tfausak/threase | test-suite/Threase/DirectionSpec.hs | mit | 527 | 0 | 14 | 179 | 129 | 63 | 66 | 14 | 1 |
{-# LANGUAGE UnicodeSyntax #-}
module Main where
import Control.Monad.Loops (firstM)
import Data.ByteString (ByteString, append, putStr, readFile, empty, getContents)
import Data.List (delete, sort)
import Data.Maybe (isJust, fromJust)
import Options.Applicative hiding (empty)
import Prelude hiding (putStr, readFile, getContents)
import System.Posix hiding (append)
import System.Posix.Handle (hGetStatus)
import GHC.IO.Handle.FD (stdout)
import Cat.Decorators as Decorators
import Cat.Parsers
import Cat.Types
import Utils
main β· IO ()
main = do
(filePaths, options) <- parseArguments
ensureInputIsNotOutput filePaths
concatenatedContent <- if not (null filePaths)
then concatenateContent filePaths
else getContents
putStr $ apply concatenatedContent $ sanitize options
parseArguments β· IO ([String], [Option])
parseArguments = merge <$> execParser argumentsParserWithInfo
where merge (filePaths, options) = (filePaths, concat options)
argumentsParserWithInfo β· ParserInfo ([String], [[Option]])
argumentsParserWithInfo = info (helper <*> argumentsParser) description
description β· InfoMod ([String], [[Option]])
description = fullDesc <> progDesc "Print a greeting for TARGET"
<> header "hello - a test for optparse-applicative"
argumentsParser β· Parser ([String], [[Option]])
argumentsParser = (,) <$> filePathsParser <*> optionsParser
filePathsParser β· Parser [String]
filePathsParser = many (argument str (metavar "FILES"))
optionsParser β· Parser [[Option]]
optionsParser = many optionPa
where optionPa = parser 'A' "show-all" "equivalent to -vET" [ShowNonprinting, ShowEnds, ShowTabs]
<|> parser 'b' "number-nonblank" "number nonempty output lines, overrides -n" [NumberNonBlank]
<|> shortParser 'e' "equivalent to -vE" [ShowNonprinting, ShowEnds]
<|> parser 'E' "show-ends" "display $ at end of each line" [ShowEnds]
<|> parser 'n' "number" "number all output lines" [Number]
<|> parser 's' "squeeze-blank" "suppress repeated empty output lines" [SqueezeBlank]
<|> shortParser 't' "equivalent to -vT" [ShowNonprinting, ShowTabs]
<|> parser 'T' "show-tabs" "display TAB characters as ^I" [ShowTabs]
<|> shortParser 'u' "(ignored)" []
<|> parser 'v' "show-nonprinting" "use ^ and M- notation, except for LFD and TAB" [ShowNonprinting]
concatenateContent β· [FilePath] β IO ByteString
concatenateContent filePaths = do
fileContent <- mapM readFile filePaths
return $ foldl append empty fileContent
apply β· ByteString β [Option] β ByteString
apply = foldl Decorators.decorate
sanitize β· [Option] β [Option]
sanitize opts | [NumberNonBlank, Number] `elems` opts = sanitize $ delete Number opts
| otherwise = sort opts
ensureInputIsNotOutput :: [FilePath] -> IO ()
ensureInputIsNotOutput paths = do
stdoutFstats <- hGetStatus stdout
let stdoutFileID = fileID stdoutFstats
match <- (flip firstM) paths $ \path -> getFileStatus path >>= return . fileID >>= \fid -> return (fid == stdoutFileID)
if isJust match
then error $ fromJust match ++ ": input file is output file"
else return ()
| shockone/coreutils | src/cat.hs | mit | 3,724 | 0 | 17 | 1,095 | 895 | 480 | 415 | 64 | 2 |
{-# LANGUAGE OverloadedStrings, DeriveGeneric #-}
module JSON.EventGroup.Request
( Meta(..)
, Data(..)
, Message(..)
, encode
, decode
, eitherDecode
) where
import GHC.Generics
import Data.Text
import Data.Aeson
data Meta = Meta
{ h_type :: Text
, h_id :: [Char]
, h_group :: Text
} deriving (Show, Generic)
data Data = Data
{ h_eventGroupType :: Text
, h_eventGroupId :: [Char]
, h_ownerType :: Text
, h_ownerId :: [Char]
} deriving (Show, Generic)
data Message = Message
{ h_meta :: Meta
, h_data :: Data
} deriving (Show, Generic)
instance FromJSON Meta
instance FromJSON Data
instance FromJSON Message
instance ToJSON Meta
instance ToJSON Data
instance ToJSON Message
| suitupalex/ms-event-muncher | JSON/EventGroup/Request.hs | mit | 704 | 0 | 9 | 136 | 220 | 130 | 90 | 32 | 0 |
{-|
Module : Solutions.Solutions3XX
Description : Solutions for problems 300-399
This module contains all implemented solutions for problems with IDs between 300 and 399.
-}
module Solutions.Solutions3XX
( solveProblem3XX
) where
import Utils (ProblemID, Solution, js)
-- | Solutions for problems with IDs between 300 and 399.
solveProblem3XX :: ProblemID -> Maybe Solution
-- Add further solutions here
solveProblem3XX _ = Nothing | alexandermattes/HaskellEuler | src/Solutions/Solutions3XX.hs | mit | 461 | 0 | 6 | 91 | 48 | 29 | 19 | 5 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE OverloadedLabels #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PartialTypeSignatures #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -O0 #-}
{-# OPTIONS_GHC -Wno-partial-type-signatures #-}
{-# OPTIONS_GHC -fomit-interface-pragmas #-}
module ZoomHub.Storage.PostgreSQL.Schema.Schema2
( Schema2,
migration,
)
where
import Squeal.PostgreSQL
( ColumnConstraint (Def, NoDef),
Definition,
NullityType (NotNull, Null),
PGType (PGfloat8, PGint4, PGint8, PGtext, PGtimestamptz),
Public,
SchemumType (Table),
TableConstraint (PrimaryKey, Unique),
addColumn,
alterTable,
dropColumn,
nullable,
text,
(&),
(:::),
(:=>),
)
import Squeal.PostgreSQL.Migration (Migration (..))
import ZoomHub.Storage.PostgreSQL.Schema.Schema0 (ConfigTable0, FlickrTable0, ImageTable0)
type Schema2 =
'[ ConfigTable0,
ContentTable2,
ImageTable0,
FlickrTable0
]
type Schemas2 = Public Schema2
type ContentTable2 =
"content"
::: 'Table
( '[ "pk_content" ::: 'PrimaryKey '["id"],
"content_unique_hash_id" ::: 'Unique '["hash_id"],
"content_unique_url" ::: 'Unique '["url"]
]
:=> '[ "id" ::: 'Def :=> 'NotNull 'PGint8,
"hash_id" ::: 'NoDef :=> 'NotNull 'PGtext,
"type_id" ::: 'Def :=> 'NotNull 'PGint4,
"url" ::: 'NoDef :=> 'NotNull 'PGtext,
"state" ::: 'Def :=> 'NotNull 'PGtext,
"initialized_at" ::: 'Def :=> 'NotNull 'PGtimestamptz,
"active_at" ::: 'Def :=> 'Null 'PGtimestamptz,
"completed_at" ::: 'Def :=> 'Null 'PGtimestamptz,
"title" ::: 'Def :=> 'Null 'PGtext,
"attribution_text" ::: 'Def :=> 'Null 'PGtext,
"attribution_link" ::: 'Def :=> 'Null 'PGtext,
"mime" ::: 'Def :=> 'Null 'PGtext,
"size" ::: 'Def :=> 'Null 'PGint8,
"error" ::: 'Def :=> 'Null 'PGtext,
"progress" ::: 'Def :=> 'NotNull 'PGfloat8,
"abuse_level_id" ::: 'Def :=> 'NotNull 'PGint4,
"num_abuse_reports" ::: 'Def :=> 'NotNull 'PGint8,
"num_views" ::: 'Def :=> 'NotNull 'PGint8,
"version" ::: 'Def :=> 'NotNull 'PGint4,
"submitter_email" ::: 'NoDef :=> 'Null 'PGtext,
"verification_token" ::: 'NoDef :=> 'Null 'PGtext
]
)
migration :: Migration Definition _ Schemas2
migration =
Migration
{ name = "2021-02-28-1: Add verification token",
up = setup,
down = teardown
}
setup :: Definition _ Schemas2
setup = alterTable #content (addColumn #verification_token (text & nullable))
teardown :: Definition Schemas2 _
teardown = alterTable #content (dropColumn #verification_token)
| zoomhub/zoomhub | src/ZoomHub/Storage/PostgreSQL/Schema/Schema2.hs | mit | 3,032 | 0 | 14 | 936 | 778 | 437 | 341 | 73 | 1 |
-- Copyright (C) 2013 Jorge Aparicio
import Data.Char
main :: IO()
main = print . sum . map digitToInt . show . factorial $ 100
factorial :: Integer -> Integer
factorial n = product [1..n]
| japaric/eulermark | problems/0/2/0/020.hs | mit | 192 | 0 | 9 | 38 | 70 | 36 | 34 | 5 | 1 |
module Main where
main :: IO ()
main = do
putStrLn "You fight with the strength of many men sir Knight..."
putStrLn "You have proved yourself worthy; will you join me?"
putStrLn "You make me sad. So be it. Come, Patsy."
| mankyKitty/holy-haskell-project-starter | scaffold/src/Main.hs | mit | 233 | 0 | 7 | 54 | 37 | 17 | 20 | 6 | 1 |
module LanguageDetection where
import Data.Char (toLower, isAlpha)
import Data.List (sort, group, sortBy, groupBy, maximumBy)
import Data.Function (on)
import Data.Maybe
type Language = (String, [[(String, Float)]])
spanish, english, italian, french, german :: Language
spanish = ("Spanish",
[[("e",0.1487224),("a",0.13570592),("o",0.103057705),("s",8.30638e-2),("n",7.373588e-2),("r",7.00455e-2),("l",6.163288e-2),("d",5.699944e-2),("i",5.5384815e-2),("u",5.0404027e-2),("t",4.3898344e-2),("c",4.1449346e-2),("m",3.1305082e-2),("p",2.5570406e-2),("q",1.9024482e-2)],
[("en",9.11264e-2),("de",9.025751e-2),("ue",8.59461e-2),("es",8.359241e-2),("qu",7.5394854e-2),("er",7.1926646e-2),("os",6.7675725e-2),("la",6.5349534e-2),("ra",6.1316743e-2),("do",5.5262055e-2),("an",5.479279e-2),("as",5.455632e-2),("ar",4.9531832e-2),("el",4.8069026e-2),("on",4.520208e-2)],
[("que",0.21683818),("ent",8.4788516e-2),("est",8.1647955e-2),("con",6.736604e-2),("nte",5.686873e-2),("los",5.6749213e-2),("ndo",5.3090762e-2),("ien",5.2738857e-2),("ado",5.173627e-2),("por",5.1437482e-2),("aba",4.829692e-2),("ero",4.6716683e-2),("las",4.4233445e-2),("era",4.4107296e-2),("tra",4.338357e-2)]
])
english = ("English",
[[("e",0.1375582),("t",9.7873785e-2),("a",8.941654e-2),("o",8.857305e-2),("n",7.851809e-2),("i",7.744325e-2),("s",7.636313e-2),("h",6.9370724e-2),("r",6.823243e-2),("l",5.2671727e-2),("d",4.7101866e-2),("u",3.2112293e-2),("m",3.0451782e-2),("c",2.7663546e-2),("g",2.6649524e-2)],
[("th",0.14128864),("he",0.13721545),("in",9.1359265e-2),("an",7.6659665e-2),("er",7.437671e-2),("nd",5.851646e-2),("re",5.6102116e-2),("ng",4.989105e-2),("ou",4.854154e-2),("es",4.758894e-2),("on",4.718381e-2),("at",4.5798708e-2),("en",4.3614294e-2),("or",4.192261e-2),("ar",3.9940763e-2)],
[("the",0.280356),("and",0.12562127),("ing",0.12045586),("her",6.100071e-2),("hat",4.5886785e-2),("you",4.0859655e-2),("all",3.859826e-2),("his",3.8370494e-2),("for",3.819967e-2),("ith",3.6678515e-2),("ere",3.584066e-2),("tha",3.5401396e-2),("wit",3.4620486e-2),("ter",3.419749e-2),("ent",3.391278e-2)]
])
italian = ("Italian",
[[("e",0.12748316),("a",0.11531494),("i",0.11437282),("o",0.10502163),("n",7.24209e-2),("r",7.02985e-2),("l",6.5541185e-2),("t",6.300434e-2),("s",5.846417e-2),("c",5.071938e-2),("d",3.9474607e-2),("u",3.4265496e-2),("m",3.0505177e-2),("p",2.8966492e-2),("v",2.4147304e-2)],
[("er",9.195987e-2),("re",7.495379e-2),("on",7.484507e-2),("di",7.1165755e-2),("co",6.929463e-2),("to",6.821315e-2),("no",6.821315e-2),("an",6.500305e-2),("la",6.329215e-2),("ra",6.29946e-2),("ri",5.9790224e-2),("in",5.8399756e-2),("ch",5.7735987e-2),("en",5.7232447e-2),("te",5.6906283e-2)],
[("che",0.12396106),("ell",9.129848e-2),("per",7.9760864e-2),("lla",7.877661e-2),("ent",7.350904e-2),("del",7.1376495e-2),("con",6.891587e-2),("gli",6.308326e-2),("non",5.6430444e-2),("ato",5.1873725e-2),("que",4.9941674e-2),("ere",4.8957422e-2),("are",4.804608e-2),("era",4.7717992e-2),("all",4.635098e-2)]
])
french = ("French",
[[("e",0.15805699),("a",9.301707e-2),("s",9.0873234e-2),("i",8.090436e-2),("t",7.716121e-2),("u",7.346523e-2),("r",7.30193e-2),("n",7.278777e-2),("l",6.2154308e-2),("o",5.8814198e-2),("m",3.944672e-2),("d",3.8602043e-2),("c",3.1776045e-2),("p",2.7702743e-2),("\233",2.2218794e-2)],
[("ai",9.303814e-2),("es",9.0868674e-2),("le",8.606739e-2),("en",7.369076e-2),("re",6.899617e-2),("de",6.840935e-2),("ou",6.739575e-2),("on",6.261225e-2),("nt",6.1740905e-2),("an",5.718858e-2),("it",5.5961587e-2),("la",5.3898815e-2),("qu",5.3845465e-2),("is",5.3738773e-2),("ur",5.2547347e-2)],
[("ait",0.11029944),("ent",0.10245109),("que",9.9432506e-2),("ais",8.868631e-2),("les",7.2204776e-2),("our",6.9005065e-2),("ant",6.369234e-2),("eur",5.910408e-2),("qui",5.1678337e-2),("lle",5.0833132e-2),("des",4.9021974e-2),("tai",4.8116393e-2),("ans",4.606375e-2),("mme",4.4856314e-2),("est",4.4554453e-2)]
])
german = ("German",
[[("e",0.19220483),("n",0.11152754),("r",8.666563e-2),("i",8.386764e-2),("s",6.824635e-2),("t",6.683186e-2),("h",6.0471836e-2),("a",6.029632e-2),("d",5.326519e-2),("u",4.4200093e-2),("l",3.8986113e-2),("c",3.7726495e-2),("g",3.758195e-2),("m",3.268804e-2),("o",2.5440091e-2)],
[("en",0.13077414),("er",0.12911756),("ch",0.105543174),("te",7.87512e-2),("de",6.476585e-2),("ie",6.467028e-2),("ei",6.4064994e-2),("in",5.6705963e-2),("ge",4.9984075e-2),("nd",4.9633645e-2),("ic",4.59382e-2),("un",4.4950623e-2),("es",4.0044602e-2),("re",3.8356166e-2),("be",3.669959e-2)],
[("ich",0.12687892),("ein",9.9057056e-2),("sch",8.607974e-2),("die",7.8330696e-2),("und",7.543647e-2),("der",7.076838e-2),("cht",7.002148e-2),("den",5.872468e-2),("ter",5.76977e-2),("ine",5.5176925e-2),("gen",5.3309686e-2),("ten",4.6680987e-2),("che",4.2479698e-2),("tte",4.0052287e-2),("nde",3.9305393e-2)]
])
supportedLanguages = [english, spanish, italian, french, german]
nGram = 3
----------------------- N-GRAM -----------------------------------------
ngram :: Int -> String -> [String]
ngram n word = [take n (drop i word) | i <- [0..((length word)-n)]]
clean :: String -> [String]
clean phrase = words (map toLower (filter (\x -> isAlpha x || x==' ') phrase))
ngrams :: Int -> String -> [String]
ngrams n phrase = concat $ map (ngram n) (clean phrase)
---------------------- FREQUENCY ---------------------------------------
divFloat :: Int -> Int -> Float
divFloat x y = (fromIntegral x) / (fromIntegral y)
sortSndDec :: Ord b => [(a, b)] -> [(a, b)]
sortSndDec list = reverse $ sortBy (compare `on` snd) list
frequency :: [String] -> [(String, Float)]
frequency list = sortSndDec (map (\l -> (head l, divFloat (length l) (length list))) ((group .sort) list))
---------------------- LEARN -------------------------------------------
learnPercent :: FilePath -> Int -> IO [(String, Float)]
learnPercent file n = fmap ((take 15) . frequency. (ngrams n)) (readFile file)
fixValues :: [(String, Float)] -> [(String, Float)]
fixValues l = map (\x -> (fst x, (snd x) / total)) l
where total = (sum . (map snd)) l
learn :: FilePath -> Int -> IO [(String, Float)]
learn file n = fmap fixValues (learnPercent file n)
-- Lo mas cabeza que hice ever
automaticLearn :: FilePath -> IO ()
automaticLearn file = do a0 <- a !! 0; print a0
a1 <- a !! 1; print a1
a2 <- a !! 2; print a2
return ()
where a = (fmap (learn file) [1..nGram])
------------------------- DETECT ---------------------------------------
-- Esta funcion a veces flashea con las probabilidades y devuelve cosas
-- > 3, o sea que despues va a ser mas grande que > 1 y eso no tiene
-- sentido a la hora de expresar un porcentaje. Pasa porque el valor
-- abs(d_oracion - d_idioma) no es siempre menor que d_idioma
-- donde d_ es la distribucion del ngram.
languageProbability :: [[(String, Float)]] -> Int -> String -> Float
languageProbability lang n phrase = sum [abs(fromJust((search c)) - (fromJust(lookup c freq))) | c <- (map fst freq)] + ((sum . (map snd)) (filter (\x -> (fst x) `notElem` (map fst freq)) grams))
where grams = lang !! (n-1)
search x = lookup x grams
freq = filter (isJust . search . fst) (frequency $ ngrams n phrase)
detectNGram :: String -> [(String, Float)]
detectNGram phrase = [(fst lang, sum [languageProbability (snd lang) n phrase | n <- [1..nGram]]) | lang <- supportedLanguages]
detectWithProb :: String -> (String, Float)
detectWithProb = normalize . last . sortSndDec . detectNGram
where normalize (a,b) = (a, b / (fromIntegral nGram))
detect :: String -> String
detect = fst . detectWithProb
| gciruelos/LanguageDetection | LanguageDetection.hs | mit | 8,062 | 1 | 14 | 1,110 | 3,310 | 2,061 | 1,249 | 66 | 1 |
fib 0 = 0
fib 1 = 1
fib n = fib (n-1) + fib (n-2)
fiblist n = takeWhile (< n) [fib x | x <- [1..]]
problem2 = sum (filter (even) (fiblist 4000000)) | danhaller/euler-haskell | 2.hs | mit | 149 | 0 | 9 | 36 | 108 | 55 | 53 | 5 | 1 |
module Scrabble (scoreLetter, scoreWord) where
import qualified Data.Map.Strict as M
import Data.Char (toUpper)
dict :: M.Map Char Integer
dict = M.fromList
[ ('A', 1), ('E', 1), ('I', 1), ('O', 1), ('U', 1), ('L', 1), ('N', 1), ('R', 1), ('S', 1), ('T', 1),
('D', 2), ('G', 2),
('B', 3), ('C', 3), ('M', 3), ('P', 3),
('F', 4), ('H', 4), ('V', 4), ('W', 4), ('Y', 4),
('K', 5),
('J', 8), ('X', 8),
('Q', 10), ('Z', 10) ]
scoreLetter :: Char -> Integer
scoreLetter letter = M.findWithDefault 0 (toUpper letter) dict
scoreWord :: String -> Integer
scoreWord word = sum $ map scoreLetter word
| c19/Exercism-Haskell | scrabble-score/src/Scrabble.hs | mit | 620 | 0 | 7 | 130 | 345 | 217 | 128 | 16 | 1 |
import qualified Data.Set as S
import System.Environment (getArgs)
import Syntax
import Parser (prepParser)
import Token (alexScanTokens)
import qualified Data.Vector as V
import Control.Monad.Writer.Lazy
import qualified Data.List as L
import Data.Ord
import ParserSyntax (convertToTree)
checkAssumptionsWithDischarge :: Assumptions -> [(Proof, Maybe Formulae)] -> LineNumber -> Writer [String] Bool
checkAssumptionsWithDischarge assumptions listOfSequentsDischarges lineNum =
case listOfSequentsDischarges of
[] -> return True
((proof, Just discharge):xs)
| S.isSubsetOf (S.delete discharge (sequentAssump proof)) assumptions ->
checkAssumptionsWithDischarge assumptions xs lineNum
| otherwise -> do
reportError $ show lineNum++ " : Your rule references line ("
++show (sequentLineNum proof)++
") with the assumptions "
++ppAssump (S.delete discharge (sequentAssump proof))++
", but they are not a subset of "
++ppAssump assumptions
return False
((proof, Nothing):xs)
| S.isSubsetOf (sequentAssump proof) assumptions ->
checkAssumptionsWithDischarge assumptions xs lineNum
| otherwise -> do
reportError $ show lineNum++ " : Your rule references line ("
++show (sequentLineNum proof)++
") with the assumptions "
++ppAssump (sequentAssump proof)++
", but they are not a subset of "
++ppAssump assumptions
checkAssumptionsWithDischarge assumptions xs lineNum
return False
reportError :: String -> Writer [String] ()
reportError str = tell [str]
checkAssumptions :: Assumptions -> [Proof] -> LineNumber -> Writer [String] Bool
checkAssumptions assumptions listOfSequents lineNum =
case listOfSequents of
[] -> return True
x:xs
| S.isSubsetOf (sequentAssump x) assumptions ->
checkAssumptions assumptions xs lineNum
| otherwise -> do
reportError $ show lineNum++ " : Your rule references line ("
++show (sequentLineNum x)++
") with the assumptions "
++ppAssump (sequentAssump x)++
", but they are not a subset of "
++ppAssump assumptions
checkAssumptions assumptions xs lineNum
return False
assmptionRuleCheck :: Assumptions -> Formulae -> LineNumber -> Writer [String] Bool
assmptionRuleCheck assumptions formulae lineNum
| S.member formulae assumptions = return True
| otherwise = do
reportError $ "Your sequent at line "
++show lineNum++
" is not an instance of the Assumption Rule since "
++show formulae++
" is not in the set of assumptons "
++ppAssump assumptions
return False
conjuncRuleIntroCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Proof -> Writer [String] Bool
conjuncRuleIntroCheck assumptions formulae lineNum fromA fromB = do
x <- checkAssumptions assumptions [fromA, fromB] lineNum
y <-isInstanceOfRule
return (x && y)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule =
case formulae of
Sentence l Conjunction r
| l == sequentFormulae fromA && r == sequentFormulae fromB -> return True
| l == sequentFormulae fromA -> do
reportError $ "The sequent "
++show (sequentLineNum fromB)++
" referenced in your conjuction rule at line "
++show lineNum++
" is not the same as the right hand side of your formulae "
++show r++
". To fix reference a sequent with the formulae "
++show r
return False
| r == sequentFormulae fromB -> do
reportError $ "The sequent "
++show (sequentLineNum fromA)++
" referenced in your conjuction rule at line "
++show lineNum++
" is not the same as the left hand side of your formulae "
++show l++
". To fix reference a sequent with the formulae "
++show l
return False
| otherwise -> do
reportError $ "The sequents "
++show (sequentLineNum fromA)++
" and "
++show (sequentLineNum fromB)++
" referenced in your conjuction rule at line "
++show lineNum++
" are not the same as the left hand side or right hand side of your formulae "
++show l++
" , "
++show r++
". To fix reference a sequent with the formulae "
++show l++
" and "
++show r++
"."
return False
_ -> do
reportError $ "The Sequent at line "
++show lineNum++
" is not an instance of the conjunction introduction rule "
++"it does not hav a conjunction between two formuale."
return False
conjuncRuleElimiCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Writer [String] Bool
conjuncRuleElimiCheck assumptions formulae lineNum fromSequent = do
x <- checkAssumptions assumptions [fromSequent] lineNum
y <-isInstanceOfRule
return (x && y)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule =
case sequentFormulae fromSequent of
Sentence l Conjunction r
| formulae == l || formulae == r -> return True
| otherwise -> do
reportError $ show lineNum++ " : neither "
++show l++
" nor "
++show r++
" contains "
++show formulae
return False
_ -> do
reportError $ show lineNum++ " : "
++show formulae++
" β¬ "
++show (sequentFormulae fromSequent)++
" from β§ E rule. You need "
++show (Sentence formulae Conjunction (Atom "x"))++
" where x can be anything."
return False
implicaRuleIntroCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Maybe Formulae ->Writer [String] Bool
implicaRuleIntroCheck assumptions formulae lineNum fromSequent dischargedAssump = do
x <- checkAssumptionsWithDischarge assumptions [(fromSequent, dischargedAssump)] lineNum
y <-isInstanceOfRule
return (x && y)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule =
case formulae of
Sentence l Implication r -> case dischargedAssump of
Just ds
| l == ds && r == (sequentFormulae fromSequent) -> return True
| otherwise -> do
when (not (r == (sequentFormulae fromSequent))) $
reportError $ show lineNum++ " : "
++show r++
" found where "
++show (sequentFormulae fromSequent)++
" was expected in "
++show formulae
when (not (l == ds)) $
reportError $ show lineNum++ " : "
++show l++
" found where "
++show ds++
" was expected in "
++show formulae
return False
Nothing
| r == (sequentFormulae fromSequent) -> return True
| otherwise -> do
when (not (r == (sequentFormulae fromSequent))) $
reportError $ show lineNum++ " : "
++show r++
" found where "
++show (sequentFormulae fromSequent)++
" was expected in "
++show formulae
return False
_ -> do
reportError $ "The Sequent at line "
++show lineNum++
" is not an instance of the conjunction introduction rule "
++"it does not have a conjunction between two formuale."
return False
implicaRuleElimiCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Proof -> Writer [String] Bool
implicaRuleElimiCheck assumptions formulae lineNum fromA fromB = do
x <- checkAssumptions assumptions [fromA, fromB] lineNum
y <-isInstanceOfRule
return (x && y)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule = case (sequentFormulae fromA) of
Sentence l Implication r
| formulae == r ->
return (sequentFormulae fromB == l)
| otherwise -> do
reportError $ show lineNum++ " : "
++show formulae++
" found where "
++show r++
" was expected."
return False
_ -> case (sequentFormulae fromB) of
Sentence l Implication r
| formulae == r ->
return (sequentFormulae fromA == l)
| otherwise -> do
reportError $ show lineNum++ " : "
++show r++
" found where "
++show formulae++
" was expected."
return False
_ -> do
reportError $ show lineNum++ " : neither "
++show (sequentFormulae fromA)++
" nor "
++show (sequentFormulae fromB)++
" have β as their main connective, wrong rule"
return False
raaRuleCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Proof -> Maybe Formulae -> Writer [String] Bool
raaRuleCheck assumptions formulae lineNum fromA fromB maybeFormulae = do
x <- checkAssumptionsWithDischarge assumptions [(fromA, maybeFormulae), (fromB, maybeFormulae)] lineNum
y <- isInstanceOfRule
z <- checkFormulae
return (x && y && z)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule =
case (sequentFormulae fromA) of
Negated x
| x == (sequentFormulae fromB) -> return True
| otherwise -> do
reportError $ show lineNum++ " : "
++show (sequentFormulae fromA)++
" from "
++show (sequentLineNum fromA)++
" is not the negation of "
++show (sequentFormulae fromB)++
" from "
++show (sequentLineNum fromB)
return False
_ -> case (sequentFormulae fromB) of
Negated x
| x == (sequentFormulae fromA) -> return True
| otherwise -> do
reportError $ show lineNum++ " : "
++show (sequentFormulae fromB)++
" from "
++show (sequentLineNum fromB)++
" is not the negation of "
++show (sequentFormulae fromA)++
" from "
++show (sequentLineNum fromA)
return False
_ -> do
reportError $ show lineNum++ " : neither "
++show (sequentFormulae fromB)++
" nor "
++show (sequentFormulae fromA)++
" contain Β¬ outside of their main connective."
return False
checkFormulae :: Writer [String] Bool
checkFormulae = case maybeFormulae of
Just x
| Negated x == formulae -> return True
| otherwise -> do
reportError $ show lineNum++ " : "
++show formulae++
" should be "
++show (Negated x)
return False
Nothing -> return True
negationRuleIntroCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Maybe Formulae -> Writer [String] Bool
negationRuleIntroCheck assumptions formulae lineNum fromA maybeFormulae = do
x <- checkAssumptionsWithDischarge assumptions [(fromA, maybeFormulae)] lineNum
y <- isInstanceOfRule
z <- checkFormulae
return (x && y && z)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule
| (sequentFormulae fromA) == Contradiction = return True
| otherwise = do
reportError $ show lineNum++ " : "
++show (sequentFormulae fromA)++
" at line "
++show (sequentLineNum fromA)++
" should be β₯ "
return False
checkFormulae :: Writer [String] Bool
checkFormulae = case maybeFormulae of
Just x
| Negated x == formulae -> return True
| otherwise -> do
reportError $ show lineNum++ " : "
++show formulae++
" should be "
++show (Negated x)
return False
Nothing -> return True
negationRuleElimCheck :: Assumptions -> Formulae -> LineNumber -> Writer [String] Bool
negationRuleElimCheck assumptions formulae lineNum
| L.any (\a -> S.member (Negated a) assumptions) (S.toList assumptions) =
return True
| otherwise = do
reportError $ show lineNum++ " : "
++show (S.toList assumptions)++
" does not have a contradiction "
return False
doubleNegationRuleElimiCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Writer [String] Bool
doubleNegationRuleElimiCheck assumptions formulae lineNum fromA = do
x <- checkAssumptions assumptions [fromA] lineNum
y <-isInstanceOfRule
return (x && y)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule = case (sequentFormulae fromA) of
Negated (Negated x)
| x == formulae -> return True
| otherwise -> do
reportError $ show lineNum++ " : "
++show (sequentFormulae fromA)++
" β¬ "
++show formulae++
". You need "
++show (Negated (Negated formulae))++
" β’ "
++show formulae
return False
_ -> do
reportError $ show lineNum++ " : "
++show (sequentFormulae fromA)++
" is not of the form ¬¬p "
return False
orRuleElimiCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Proof -> Formulae -> Proof -> Formulae -> Writer [String] Bool
orRuleElimiCheck assumptions formulae lineNum orSeq fromLeft leftDischarge fromRight rightDischarge = do
x <- checkAssumptionsWithDischarge assumptions [(fromLeft, Just leftDischarge), (fromRight, Just rightDischarge)] lineNum
p <- checkFirstOrSequetAssumptions
y <- isInstanceOfRule
z <- checkFormulae
return (x && y && z && p)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule =
case (sequentFormulae orSeq) of
Sentence l Disjunction r
| (S.member leftDischarge (sequentAssump fromLeft) &&
((l == leftDischarge && r == rightDischarge) ||
(r == leftDischarge && l == rightDischarge))
&& (S.member rightDischarge (sequentAssump fromRight))) ->
return True
| otherwise -> do
when (not (S.member leftDischarge (sequentAssump fromLeft))) $
reportError $ show lineNum++ " : Discharge "
++show leftDischarge++
" is not in the assumptions from line "
++show (sequentLineNum fromLeft)
when (not (S.member rightDischarge (sequentAssump fromRight))) $
reportError $ show lineNum++ " : Discharge "
++show rightDischarge++
" is not in the assumptions from line "
++show (sequentLineNum fromRight)
when (not (l == leftDischarge || r == leftDischarge)) $
reportError $ show lineNum++ " : Discharge "
++show leftDischarge++
" needs to be either "
++show l++
" or "
++show r++
" since you referenced line "
++show (sequentLineNum orSeq)++
" with "
++show (sequentFormulae orSeq)
when (not (l == rightDischarge || r == rightDischarge)) $
reportError $ show lineNum++ " : Discharge "
++show rightDischarge++
" needs to be either "
++show l++
" or "
++show r++
" since you referenced line "
++show (sequentLineNum orSeq)++
" with "
++show (sequentFormulae orSeq)
return False
_ -> do
reportError $ show lineNum++ " : The formulae "
++show (sequentFormulae orSeq)++
" from line "
++show (sequentLineNum orSeq)++
" should be of the form (a β b) "
return False
checkFormulae :: Writer [String] Bool
checkFormulae
| formulae == (sequentFormulae fromLeft) && formulae == (sequentFormulae fromRight) =
return True
| otherwise = do
when (not (formulae == (sequentFormulae fromLeft))) $
reportError $ show lineNum++ " : The formulae "
++show (sequentFormulae fromLeft)++
" from line "
++show (sequentLineNum fromLeft)++
" should be "
++show formulae
when (not (formulae == (sequentFormulae fromRight))) $
reportError $ show lineNum++ " : The formulae "
++show (sequentFormulae fromRight)++
" from line "
++show (sequentLineNum fromRight)++
" should be "
++show formulae
return False
checkFirstOrSequetAssumptions :: Writer [String] Bool
checkFirstOrSequetAssumptions
| S.isSubsetOf (S.delete leftDischarge (S.delete rightDischarge (sequentAssump orSeq))) assumptions =
return True
| otherwise = do
reportError $ show lineNum++ " : You forgot to copy over assumption[s] "
++show (S.difference (sequentAssump orSeq)
(S.intersection (sequentAssump orSeq) assumptions))++
" from line "
++show (sequentLineNum orSeq)++
"."
return False
orRuleIntroCheck :: Assumptions -> Formulae -> LineNumber -> Proof -> Writer [String] Bool
orRuleIntroCheck assumptions formulae lineNum fromA = do
x <- checkAssumptions assumptions [fromA] lineNum
y <-isInstanceOfRule
return (x && y)
where isInstanceOfRule :: Writer [String] Bool
isInstanceOfRule =
case formulae of
Sentence l Disjunction r
| l == (sequentFormulae fromA) || r == (sequentFormulae fromA) ->
return True
| otherwise -> do
reportError $ show lineNum++ " : neither "
++show l++
" nor "
++show r++
" β£ "
++show (sequentFormulae fromA)
return False
_ -> do
reportError $ show lineNum++ " : "
++show formulae++
" needs to be of the form (p β q)."
return False
proofSequent :: Proof -> Writer [String] Bool
proofSequent (Sequent seqLineNum assumptions formulae rule) =
case rule of
AssmptionRule -> assmptionRuleCheck assumptions formulae seqLineNum
ConjuncRuleIntro fromA fromB -> do
y <- conjuncRuleIntroCheck assumptions formulae seqLineNum fromA fromB
x <- proofSequent fromA
z <- proofSequent fromB
return (y && x && z)
ConjuncRuleElimi fromA -> do
y <- conjuncRuleElimiCheck assumptions formulae seqLineNum fromA
x <- proofSequent fromA
return (x && y)
ImplicaRuleIntro fromA discharge -> do
y <- implicaRuleIntroCheck assumptions formulae seqLineNum fromA discharge
x <- proofSequent fromA
return (y && x)
ImplicaRuleElimi fromA fromB -> do
y <- implicaRuleElimiCheck assumptions formulae seqLineNum fromA fromB
x <- proofSequent fromA
z <- proofSequent fromB
return (y && x && z)
RaaRule fromA fromB maybeFormulae -> do
y <- raaRuleCheck assumptions formulae seqLineNum fromA fromB maybeFormulae
x <- proofSequent fromA
z <- proofSequent fromB
return (y && x && z)
NegationRuleIntro fromA maybeFormulae -> do
y <- negationRuleIntroCheck assumptions formulae seqLineNum fromA maybeFormulae
x <- proofSequent fromA
return (y && x)
NegationRuleElimi -> negationRuleElimCheck assumptions formulae seqLineNum
DoubleNegationRuleElimi fromA -> do
y <- doubleNegationRuleElimiCheck assumptions formulae seqLineNum fromA
x <- proofSequent fromA
return (y && x)
OrRuleElimi orSeq fromLeft leftDischarge fromRight rightDischarge -> do
y <- orRuleElimiCheck assumptions formulae seqLineNum orSeq fromLeft leftDischarge fromRight rightDischarge
x <- proofSequent orSeq
z <- proofSequent fromLeft
p <- proofSequent fromRight
return (y && x && z && p)
OrRuleIntro fromA -> do
y <- orRuleIntroCheck assumptions formulae seqLineNum fromA
x <- proofSequent fromA
return (y && x)
main :: IO ()
main = do
args <- getArgs
when (null args) $ fail "no arguments, sorry"
program <- readFile $ head args
-- mapM_ print (alexScanTokens program)
let proof = convertToTree $ prepParser $ alexScanTokens program
case runWriter (proofSequent proof) of
(b, xs) -> do
mapM_ putStrLn $ reverse xs
print b
| Skyfold/propositionalLogicVerifier | src/Verifier.hs | mit | 26,226 | 0 | 31 | 12,215 | 5,628 | 2,629 | 2,999 | 506 | 11 |
module Handler.Tag where
import Import
import qualified Database.Esqueleto as E
import qualified Data.Text as T
import qualified Data.Time.Format as Time
getTagR :: Text -> Handler Html
getTagR slug = do
Entity tId tag <- runDB $ getBy404 $ UniqueTag slug
articles <- runDB $
E.select
$ E.from $ \(article `E.InnerJoin` tagArticle `E.InnerJoin` language) -> do
E.on $ article E.^. ArticleSlug E.==. tagArticle E.^. TagArticleArticleSlug
E.on $ article E.^. ArticleLang E.==. language E.^. LanguageId
E.where_ (tagArticle E.^. TagArticleTag E.==. E.val tId)
E.orderBy [ E.desc (article E.^. ArticleCreated) ]
return (article, language)
defaultLayout $ do
setTitle $ toHtml $ T.append ("Articles - " :: Text) (tagName tag)
$(widgetFile "tags")
| builtinnya/lambdar-website | Handler/Tag.hs | mit | 819 | 0 | 16 | 184 | 290 | 150 | 140 | 19 | 1 |
-----------------------------------------------------------------------------
-- |
-- Module : Info
-- License : MIT (see the LICENSE file)
-- Maintainer : Felix Klein (klein@react.uni-saarland.de)
--
-- Several printers to report information back to the user.
--
-----------------------------------------------------------------------------
{-# LANGUAGE
LambdaCase
, MultiParamTypeClasses
, TypeSynonymInstances
, FlexibleContexts
#-}
-----------------------------------------------------------------------------
module Info
( prTitle
, prDescription
, prSemantics
, prTarget
, prTags
, prInfo
, prParameters
, prInputs
, prOutputs
, prVersion
, prHelp
, prReadme
, prReadmeMd
, prError
) where
-----------------------------------------------------------------------------
import Syfco
( Configuration(..)
, Semantics(..)
, Target(..)
, WriteFormat(..)
, WriteMode(..)
, QuoteMode(..)
, Specification
, defaultCfg
, title
, description
, semantics
, target
, tags
, parameters
, inputs
, outputs
, version
)
import Data.Convertible
( convert
)
import Control.Monad
( unless
)
import System.Exit
( exitFailure
)
import System.IO
( hPutStrLn
, stderr
)
-----------------------------------------------------------------------------
data Mode = Plain | Help | Markdown
-----------------------------------------------------------------------------
-- | Prints the title of the given specification.
prTitle
:: Specification -> IO ()
prTitle s =
putStrLn $ title s
-----------------------------------------------------------------------------
-- | Prints the description of the given specification.
prDescription
:: Specification -> IO ()
prDescription s =
putStrLn $ description s
-----------------------------------------------------------------------------
-- | Prints the semantics of the given specification.
prSemantics
:: Specification -> IO ()
prSemantics s =
putStrLn $ case semantics s of
SemanticsMealy -> "Mealy"
SemanticsMoore -> "Moore"
SemanticsStrictMealy -> "Strict,Mealy"
SemanticsStrictMoore -> "Strict,Moore"
-----------------------------------------------------------------------------
-- | Prints the target of the given specification.
prTarget
:: Specification -> IO ()
prTarget s =
putStrLn $ case target s of
TargetMealy -> "Mealy"
TargetMoore -> "Moore"
-----------------------------------------------------------------------------
-- | Prints the tag list of the given specification.
prTags
:: Specification -> IO ()
prTags s = case tags s of
[] -> return ()
xs -> putStrLn $ head xs ++ concatMap ((:) ' ' . (:) ',') (tail xs)
-----------------------------------------------------------------------------
-- | Prints the parameters of the given specification.
prParameters
:: Specification -> IO ()
prParameters s = putStrLn $ case parameters s of
(x:xr) -> x ++ concatMap ((:) ',' . (:) ' ') xr
[] -> ""
-----------------------------------------------------------------------------
-- | Prints the input signals of the given specification.
prInputs
:: Configuration -> Specification -> IO ()
prInputs c s = case inputs c s of
Left err -> prError $ show err
Right (x:xr) -> putStrLn $ x ++ concatMap ((:) ',' . (:) ' ') xr
_ -> return ()
-----------------------------------------------------------------------------
-- | Prints the output signals of the given specification.
prOutputs
:: Configuration -> Specification -> IO ()
prOutputs c s = case outputs c s of
Left err -> prError $ show err
Right (x:xr) -> putStrLn $ x ++ concatMap ((:) ',' . (:) ' ') xr
_ -> return ()
-----------------------------------------------------------------------------
-- | Prints the complete INFO section of the given specification.
prInfo
:: Specification -> IO ()
prInfo s = do
putStrLn $ "Title: \"" ++ title s ++ "\""
putStrLn $ "Description: \"" ++ description s ++ "\""
putStr "Semantics: "
prSemantics s
putStr "Target: "
prTarget s
unless (null $ tags s) $ do
putStr "Tags: "
prTags s
-----------------------------------------------------------------------------
-- | Prints the version and the program name.
prVersion
:: IO ()
prVersion = do
putStrLn $ "SyFCo (v" ++ version ++ ")"
putStrLn "The Synthesis Format Converter"
-----------------------------------------------------------------------------
-- | Prints the help of the program.
prHelp
:: IO ()
prHelp = putStr $ usage Help
-----------------------------------------------------------------------------
-- | Prints the content of the README file.
prReadme
:: IO ()
prReadme = putStr $ readme Plain
-----------------------------------------------------------------------------
-- | Prints the content of the README.md file.
prReadmeMd
:: IO ()
prReadmeMd = putStr $ readme Markdown
-----------------------------------------------------------------------------
usage
:: Mode -> String
usage m =
unlines $
[ switch "## Usage\n\n" "Usage: " "## Usage\n\n" ++
code m ("syfco [OPTIONS]... <file>") ++
switch "\n" ("\n\nA Synthesis Format Converter to read and " ++
"transform Temporal Logic\nSpecification Format " ++
"(TLSF) files.") "" ] ++
section "File Operations" (prTable foTable) ++
section "File Modifications" (prTable fmTable) ++
section ("Formula Transformations " ++
"(disabled by default)") (prTable ftTable) ++
section "Check Specification Type (and exit)" (prTable csTable) ++
section "Extract Information (and exit)" (prTable eiTable) ++
section "Sample Usage" sample
where
idl (s1,s2,_,_) = length s1 + length s2 + 7
len = foldl max 0 $
[ foldl max 0 $ map idl foTable
, foldl max 0 $ map idl fmTable
, foldl max 0 $ map idl ftTable
, foldl max 0 $ map idl csTable
, foldl max 0 $ map idl eiTable ]
switch s1 s2 s3 = case m of
Plain -> s1
Help -> s2
Markdown -> s3
section h xs = [ "", ind ++ h ++ ":", "" ] ++ xs
ind = case m of
Plain -> "### "
Help -> ""
Markdown -> "#### "
wrap str = case m of
Markdown -> code m str
_ -> "`" ++ str ++ "`"
sample = [ codeblock m $ map (("syfco ") ++)
[ "-o converted -f promela -m fully -nnf -nd file.tlsf"
, "-f psl -op n=3 -os Strict,Mealy -o converted file.tlsf"
, "-o converted -in"
, "-t file.tlsf" ] ]
prTable xs =
let xs' =
map (\(x,y,z,vs) ->
(x,y,adaptsub (80 - len - 3) z,
adapt (80 - len - 3) vs)) xs
in case m of
Markdown ->
[ "|Command|Description|"
, "|-------|-----------|" ]
++ map prMRow (filter (\(s,_,_,_) -> not $ null s) xs')
_ ->
concatMap prRow xs'
adaptsub l c = case c of
Nothing -> Nothing
Just xs -> Just $ map (\(a,b,c) -> (a,b,adapt l c)) xs
adapt
:: Int -> [String] -> [String]
adapt l xs = concatMap (adapt' l) xs
adapt' l str
| length str <= l = [str]
| otherwise = case m of
Markdown -> [str]
_ -> rearrange l [] [] 0 $ words str
rearrange _ a b _ [] =
reverse ((unwords $ reverse b):a)
rearrange l a [] _ (x:xr)
| length x > l = rearrange l (x:a) [] 0 xr
| otherwise = rearrange l a [x] (length x) xr
rearrange l a b n (x:xr)
| n + length x + 1 > l =
rearrange l ((unwords $ reverse b):a) [x] (length x) xr
| otherwise = rearrange l a (x:b) (n + length x + 1) xr
prMRow (short,long,sub,desc) =
"|" ++ code m ("-" ++ short ++ ", --" ++ long) ++ "|" ++
( case desc of
[] -> ""
x:xr -> concat (x : map ("</br> " ++) xr) ++
case sub of
Nothing -> ""
Just ys -> "</br> <table><tbody> " ++
concatMap prMSub ys ++
" </tbody></table>"
) ++ "|"
prMSub (name,d,desc) =
"<tr><td>" ++ code m name ++
"</td><td>" ++ concat (addbreaks desc) ++
(if d then " (default)" else "") ++
"</td></tr>"
addbreaks xs = case xs of
[] -> []
x:xr -> x : map ("</br>" ++) xr
prRow (short,long,sub,desc)
| short == "" = [ "" ]
| otherwise = case desc of
[] ->
prRow (short,long,sub,[""])
x:xr ->
( ( " -" ++ short ++ ", --" ++ long ++
replicate (len - (length short + length long + 7)) ' '
++ " : " ++ x)
: ( [ replicate (len + 3) ' ' ++ y | y <- xr ] ++
(case sub of
Nothing -> []
Just ys -> [""] ++ concatMap prSub ys ++ [""] ) ) )
prSub (n,d,desc) = case desc of
[] -> prSub (n,d,[""])
x:xr ->
(" * " ++ n ++
(if d then
" [default]" ++ replicate (len - 16 - length n) ' '
else
replicate (len - 6 - length n) ' ') ++
" : " ++ x) :
map ((replicate (len+3) ' ') ++) xr
foTable =
[ ("o", "output", Nothing,
[ "path of the output file (results are printed " ++
"to STDOUT if not set)" ])
, ("r", "read-config", Nothing,
[ "read parameters from the given configuration file (may " ++
"overwrite prior arguments)" ])
, ("w", "write-config", Nothing,
[ "write the current configuration to the given path " ++
"(includes later arguments)" ])
, ("f", "format", Just formats,
[ "output format - possible values are:" ])
, ("m", "mode", Just modes,
[ "output mode - possible values are:" ])
, ("q", "quote", Just quotes,
[ "quote mode - possible values are:" ])
, ("pf", "part-file", Nothing,
[ "create a partitioning (" ++ code m ".part" ++ ") file" ])
, ("bd", "bus-delimiter", Nothing,
[ "delimiter used to print indexed bus signals",
"(default: "++ wrap (busDelimiter defaultCfg) ++ ")" ])
, ("ps", "prime-symbol", Nothing,
[ "symbol/string denoting primes in signals",
"(default: " ++ wrap (primeSymbol defaultCfg) ++ ")" ])
, ("as", "at-symbol", Nothing,
[ "symbol/string denoting @-symbols in signals",
"(default: " ++ wrap (atSymbol defaultCfg) ++ ")" ])
, ("in", "stdin", Nothing,
[ "read the input file from STDIN" ]) ]
fmTable =
[ ("os", "overwrite-semantics", Nothing,
[ "overwrite the semantics of the file" ])
, ("ot", "overwrite-target", Nothing,
[ "overwrite the target of the file" ])
, ("op", "overwrite-parameter", Nothing,
[ "overwrite a parameter of the file" ]) ]
ftTable =
[ ("s0", "weak-simplify", Nothing,
[ "simple simplifications (removal of true/false " ++
"in boolean connectives, redundant temporal " ++
"operators, etc.)" ])
, ("s1", "strong-simplify", Nothing,
[ "advanced simplifications",
"(includes: " ++
code m "-s0 -nnf -nw -nr -pgo -pfo -pxo" ++ ")" ])
, ("nnf", "negation-normal-form", Nothing,
[ "convert the resulting LTL formula into negation " ++
"normal form" ])
, ("pgi", "push-globally-inwards", Nothing,
[ "push global operators inwards",
" " ++ code m "G (a && b) => (G a) && (G b)" ])
, ("pfi", "push-finally-inwards", Nothing,
[ "push finally operators inwards",
" " ++ code m "F (a || b) => (F a) || (F b)" ])
, ("pxi", "push-next-inwards", Nothing,
[ "push next operators inwards",
" " ++ code m "X (a && b) => (X a) && (X b)",
" " ++ code m "X (a || b) => (X a) || (X b)" ])
, ("pgo", "pull-globally-outwards", Nothing,
[ "pull global operators outwards",
" " ++ code m "(G a) && (G b) => G (a && b)" ])
, ("pfo", "pull-finally-outwards", Nothing,
[ "pull finally operators outwards",
" " ++ code m "(F a) || (F b) => F (a || b)" ])
, ("pxo", "pull-next-outwards", Nothing,
[ "pull next operators outwards",
" " ++ code m "(X a) && (X b) => X (a && b)",
" " ++ code m "(X a) || (X b) => X (a || b)" ])
, ("nw", "no-weak-until", Nothing,
[ "replace weak until operators",
" " ++ code m "a W b => (a U b) || (G a)" ])
, ("nr", "no-release", Nothing,
[ "replace release operators",
" " ++ code m "a R b => b W (a && b)" ])
, ("nf", "no-finally", Nothing,
[ "replace finally operators",
" " ++ code m "F a => true U a" ])
, ("ng", "no-globally", Nothing,
[ "replace global operators",
" " ++ code m "G a => false R a" ])
, ("nd", "no-derived", Nothing,
[ "same as: " ++ code m "-nw -nf -ng" ]) ]
csTable =
[ ("gr", "generalized-reactivity", Nothing,
[ "check whether the input is in the " ++
"Generalized Reactivity fragment" ]) ]
eiTable =
[ ("c", "check", Nothing,
[ "check that input conforms to TLSF" ])
, ("t", "print-title", Nothing,
[ "output the title of the input file" ])
, ("d", "print-description", Nothing,
[ "output the description of the input file" ])
, ("s", "print-semantics", Nothing,
[ "output the semantics of the input file" ])
, ("g", "print-target", Nothing,
[ "output the target of the input file" ])
, ("a", "print-tags", Nothing,
[ "output the target of the input file" ])
, ("p", "print-parameters", Nothing,
[ "output the parameters of the input file" ])
, ("i", "print-info", Nothing,
[ "output all data of the info section" ])
, ("ins", "print-input-signals", Nothing,
[ "output the input signals of the specification" ])
, ("outs", "print-output-signals", Nothing,
[ "output the output signals of the specification" ])
, ("","", Nothing,
[])
, ("v", "version", Nothing,
[ "output version information" ])
, ("h", "help", Nothing,
[ "display this help" ]) ]
formats =
[ (convert FULL, True,
["input file with applied transformations"])
, (convert BASIC, False,
["high level format (without global section)"])
, (convert UTF8, False,
["human readable output using UTF8 symbols"])
, (convert WRING, False,
["Wring input format"])
, (convert LILY, False,
["Lily input format"])
, (convert ACACIA, False,
["Acacia / Acacia+ input format"])
, (convert ACACIASPECS, False,
["Acacia input format with spec units"])
, (convert LTLXBA, False,
["LTL2BA / LTL3BA input format"])
, (convert LTLXBADECOMP, False,
["LTL2BA / LTL3BA input format (decomposed)"])
, (convert LTL, False,
["pure LTL formula"])
, (convert PROMELA, False,
["Promela LTL"])
, (convert UNBEAST, False,
["Unbeast input format"])
, (convert SLUGS, False,
["structured Slugs format [GR(1) only]"])
, (convert SLUGSIN, False,
["SlugsIn format [GR(1) only]"])
, (convert PSL, False,
["PSL Syntax"])
, (convert SMV, False,
["SMV file format"])
, (convert SMVDECOMP, False,
["SMV file format (decomposed)"])
, (convert BOSY, False,
["Bosy input format"])
, (convert RABINIZER, False,
["Rabinizer input format"])
]
modes =
[ (convert Pretty, True,
["pretty printing (as less parentheses as possible)"])
, (convert Fully, False,
["output fully parenthesized formulas"]) ]
quotes =
[ (convert NoQuotes, True,
["identifiers are not quoted"])
, (convert DoubleQuotes, False,
["identifiers are quoted using \""]) ]
-----------------------------------------------------------------------------
readme
:: Mode -> String
readme m = appendlinks $ unlines
[ switch
("# Synthesis Format Conversion Tool\n# (Version " ++
version ++ ")")
("# Synthesis Format Conversion Tool<br/>(Version " ++
version ++ ")")
, ""
, "A tool for reading, manipulating and transforming synthesis"
, "specifications in " ++
link "TLSF" "https://arxiv.org/abs/1604.02284" ++ "."
, ""
, "## About this tool"
, ""
, "The tool interprets the high level constructs of " ++
link "TLSF 1.1" "https://arxiv.org/abs/1604.02284"
, "(functions, sets, ...) and supports the transformation of the"
, "specification to Linear Temporal Logic (LTL) in different output"
, "formats. The tool has been designed to be modular with respect to the"
, "supported output formats and semantics. Furthermore, the tool allows"
, "to identify and manipulate parameters, targets and semantics of a"
, "specification on the fly. This is especially thought to be useful for"
, "comparative studies, as they are for example needed in the"
, link "Synthesis Competition" "http://www.syntcomp.org" ++ "."
, ""
, "The main features of the tool are summarized as follows:"
, ""
, "* Interpretation of high level constructs, which allows to reduce the"
, " specification to its basic fragment where no more parameter and"
, " variable bindings occur (i.e., without the GLOBAL section)."
, ""
, "* Transformation to other existing specification formats, like"
, " Basic TLSF, " ++
link "Promela LTL" "http://spinroot.com/spin/Man/ltl.html" ++ ", " ++
link "PSL" ("https://en.wikipedia.org/wiki/" ++
"Property_Specification_Language")
++ ", " ++
link "Unbeast" "https://www.react.uni-saarland.de/tools/unbeast"
++ ", " ++
link "Wring" "http://www.ist.tugraz.at/staff/bloem/wring.html"
++ ","
, " " ++
link "structured Slugs"
("https://github.com/VerifiableRobotics/slugs/" ++
"blob/master/doc/input_formats.md#structuredslugs")
++ ", and " ++ link "SlugsIn"
("https://github.com/VerifiableRobotics/slugs/blob/master/" ++
"doc/input_formats.md#slugsin") ++ "."
, ""
, "* Syntactical analysis of membership in GR(k) for any k (modulo"
, " Boolean identities)."
, ""
, "* On the fly adjustment of parameters, semantics or targets."
, ""
, "* Preprocessing of the resulting LTL formula."
, ""
, "* Conversion to negation normal form."
, ""
, "* Replacement of derived operators."
, ""
, "* Pushing/pulling next, eventually, or globally operators"
, " inwards/outwards."
, ""
, "* Standard simplifications."
, switch "\n" ""
, "## Installation"
, ""
, "SyfCo is written in Haskell and can be compiled using the"
, "Glasgow Haskell Compiler (GHC). To install the tool you can either"
, "use " ++ link "cabal" "https://www.haskell.org/cabal" ++ " or " ++
link "stack" "https://docs.haskellstack.org/en/stable/README/" ++
" (recommended)."
, "For more information about the purpose of these tools and why you"
, "should prefer using stack instead of cabal, we recommend reading"
, link "this blog post"
"https://www.fpcomplete.com/blog/2015/06/why-is-stack-not-cabal" ++
" by Mathieu Boespflug. "
, ""
, "To install the tool with stack use:"
, ""
, scb "stack install"
, ""
, "Stack then automatically fetches the right compiler version"
, "and required dependencies. After that it builds and installs"
, "the package into you local stack path. If you instead prefer"
, "to build only, use `stack build`."
, ""
, "If you insist to use cabal instead, we recommend at least to use"
, "a sandbox. Initialize the sandbox and configure the project via"
, ""
, scb "cabal sandbox init && cabal configure"
, ""
, "Then use `cabal build` or `cabal install` to build or install the"
, "tool."
, ""
, "Note that (independent of the chosen build method) building the"
, "tool will only create the final executable in a hidden sub-folder,"
, "which might get cumbersome for development or testing local changes."
, "Hence, for this purpose, you may prefer to use `make`. The makefile"
, "determines the chosen build method, rebuilds the package, and copies"
, "the final `syfco` executable to the root directory of the project."
, ""
, "If you still encounter any problems, please inform us via the"
, switch
("project bug tracker:\n\n " ++
"https://github.com/reactive-systems/syfco/issues\n\n")
(link "project bug tracker"
"https://github.com/reactive-systems/syfco/issues"
++ ".\n")
, usage m
, "## Examples"
, ""
, "A number of synthesis benchmarks in TLSF can be found in the"
, code m "/examples" ++ " directory."
, ""
, "## Syfco Library"
, ""
, "Syfco is also provided as a Haskell library. In fact, the syfco"
, "executable is nothing different than a fancy command line interface"
, "to this library. If you are interested in using the interface, we"
, "recommend to build and check the interface documentation, which is"
, "generated by:"
, ""
, scb "make haddock"
, ""
, "## Editor Support"
, ""
, "If you use " ++ link "Emacs" "https://www.gnu.org/software/emacs" ++
", you should try our emacs mode (" ++ code m "tlsf-mode.el" ++ "),"
, "which can be found in the " ++ code m "/misc" ++ " directory."
, ""
, "## Adding output formats"
, ""
, "If you like to add a new output format, first consider"
, code m "/Writer/Formats/Example.hs" ++
", which contains the most common"
, "standard constructs and a short tutorial." ]
where
scb str = case m of
Markdown -> "`" ++ str ++ "`"
_ -> " " ++ str
switch s1 s2 = case m of
Markdown -> s2
_ -> s1
link n url = "[" ++ n ++ "](" ++ url ++ ")"
appendlinks str = case m of
Markdown -> str
_ ->
let (str',ls) = replclct [] [] [] [] (0 :: Int) (0 :: Int) str
in str' ++
"\n--------------------------------------------------\n\n" ++
concatMap (\(n,_,s2) -> "[" ++ show n ++ "]" ++
replicate (4 - length (show n)) ' '
++ s2 ++ "\n") ls
replclct a b c1 c2 n m xs = case xs of
[] -> case m of
0 -> (reverse b, reverse a)
1 -> (reverse (c1 ++ ('[' : b)), reverse a)
2 -> (reverse (('(' : ']' : c1) ++ ('[' : b)), reverse a)
_ -> (reverse (c2 ++ ('(' : ']' : c1) ++ ('[' : b)), reverse a)
x:xr -> case (x,m) of
('[',0) -> replclct a b c1 c2 n 1 xr
( _ ,0) -> replclct a (x:b) c1 c2 n 0 xr
(']',1) -> replclct a b c1 c2 n 2 xr
( _ ,1) -> replclct a b (x:c1) c2 n 1 xr
('(',2) -> replclct a b c1 c2 n 3 xr
( _ ,2) -> replclct a (x:(']':c1) ++ ('[': b)) [] [] n 0 xr
(')',_) -> replclct ((n,reverse c1, reverse c2):a)
((']' : reverse (show n)) ++ ('[' : ' ' : c1) ++ b)
[] [] (n+1) 0 xr
( _ ,_) -> replclct a b c1 (x:c2) n 3 xr
-----------------------------------------------------------------------------
code
:: Mode -> String -> String
code m str = case m of
Markdown -> "<code>" ++ concatMap escapePipe str ++ "</code>"
_ -> str
where
escapePipe '|' = "|"
escapePipe x = [x]
-----------------------------------------------------------------------------
codeblock
:: Mode -> [String] -> String
codeblock m xs = case m of
Markdown -> "```\n" ++ unlines xs ++ "```\n"
_ -> unlines $ map ((' ':) . (' ':)) xs
-----------------------------------------------------------------------------
-- | Prints an error to STDERR and then terminates the program.
prError
:: String -> IO a
prError err = do
hPutStrLn stderr err
exitFailure
-----------------------------------------------------------------------------
| reactive-systems/syfco | src/syfco/Info.hs | mit | 24,323 | 0 | 24 | 6,865 | 5,729 | 3,159 | 2,570 | 581 | 19 |
-- Exclamation marks series #2: Remove all exclamation marks from the end of sentence
-- https://www.codewars.com/kata/57faece99610ced690000165
module Kata where
remove :: String -> String
remove = reverse . dropWhile (=='!') . reverse
| gafiatulin/codewars | src/8 kyu/ExclMark2.hs | mit | 238 | 0 | 8 | 34 | 35 | 21 | 14 | 3 | 1 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
module System.Etc.Internal.Resolver.Cli.Command (resolveCommandCli, resolveCommandCliPure) where
import RIO
import qualified RIO.HashMap as HashMap
import qualified RIO.Text as Text
import System.Environment (getArgs, getProgName)
import qualified Data.Aeson as JSON
import qualified Options.Applicative as Opt
import System.Etc.Internal.Resolver.Cli.Common
import System.Etc.Internal.Types
import qualified System.Etc.Internal.Spec.Types as Spec
--------------------------------------------------------------------------------
entrySpecToJsonCli
:: (MonadThrow m)
=> Spec.ConfigValueType
-> Bool
-> Spec.CliEntrySpec cmd
-> m (Vector cmd, Opt.Parser (Maybe (Value JSON.Value)))
entrySpecToJsonCli cvType isSensitive entrySpec = case entrySpec of
Spec.CmdEntry commandJsonValue specSettings ->
return (commandJsonValue, settingsToJsonCli cvType isSensitive specSettings)
Spec.PlainEntry{} -> throwM CommandKeyMissing
configValueSpecToCli
:: (MonadThrow m, Eq cmd, Hashable cmd)
=> HashMap cmd (Opt.Parser ConfigValue)
-> Text
-> Spec.ConfigValueType
-> Bool
-> Spec.ConfigSources cmd
-> m (HashMap cmd (Opt.Parser ConfigValue))
configValueSpecToCli acc0 specEntryKey cvType isSensitive sources =
let updateAccConfigOptParser configValueParser accOptParser =
(\configValue accSubConfig -> case accSubConfig of
ConfigValue{} -> accSubConfig
SubConfig subConfigMap ->
subConfigMap & HashMap.alter (const $ Just configValue) specEntryKey & SubConfig
)
<$> configValueParser
<*> accOptParser
in case Spec.cliEntry sources of
Nothing -> return acc0
Just entrySpec -> do
(commands, jsonOptParser) <- entrySpecToJsonCli cvType isSensitive entrySpec
let configValueParser = jsonToConfigValue <$> jsonOptParser
foldM
(\acc command ->
acc
& HashMap.alter
(\mAccParser ->
mAccParser
& fromMaybe (pure mempty)
& updateAccConfigOptParser configValueParser
& Just
)
command
& return
)
acc0
commands
subConfigSpecToCli
:: (MonadThrow m, JSON.FromJSON cmd, JSON.ToJSON cmd, Eq cmd, Hashable cmd)
=> Text
-> HashMap.HashMap Text (Spec.ConfigValue cmd)
-> HashMap cmd (Opt.Parser ConfigValue)
-> m (HashMap cmd (Opt.Parser ConfigValue))
subConfigSpecToCli specEntryKey subConfigSpec acc =
let updateAccConfigOptParser subConfigParser accOptParser =
(\subConfig accSubConfig -> case accSubConfig of
ConfigValue{} -> accSubConfig
SubConfig subConfigMap ->
subConfigMap & HashMap.alter (const $ Just subConfig) specEntryKey & SubConfig
)
<$> subConfigParser
<*> accOptParser
addSubParserCommand command subConfigParser = HashMap.alter
(\mAccOptParser -> case mAccOptParser of
Nothing -> do
commandText <- commandToKey command
throwM $ UnknownCommandKey (Text.intercalate ", " commandText)
Just accOptParser -> Just $ updateAccConfigOptParser subConfigParser accOptParser
)
command
in do
parserPerCommand <- foldM specToConfigValueCli
HashMap.empty
(HashMap.toList subConfigSpec)
parserPerCommand & HashMap.foldrWithKey addSubParserCommand acc & return
specToConfigValueCli
:: (MonadThrow m, JSON.FromJSON cmd, JSON.ToJSON cmd, Eq cmd, Hashable cmd)
=> HashMap cmd (Opt.Parser ConfigValue)
-> (Text, Spec.ConfigValue cmd)
-> m (HashMap cmd (Opt.Parser ConfigValue))
specToConfigValueCli acc (specEntryKey, specConfigValue) = case specConfigValue of
Spec.ConfigValue { Spec.configValueType, Spec.isSensitive, Spec.configSources } ->
configValueSpecToCli acc specEntryKey configValueType isSensitive configSources
Spec.SubConfig subConfigSpec -> subConfigSpecToCli specEntryKey subConfigSpec acc
configValueCliAccInit
:: (MonadThrow m, JSON.FromJSON cmd, Eq cmd, Hashable cmd)
=> Spec.ConfigSpec cmd
-> m (HashMap cmd (Opt.Parser ConfigValue))
configValueCliAccInit spec =
let zeroParser = pure $ SubConfig HashMap.empty
commandsSpec = do
programSpec <- Spec.specCliProgramSpec spec
Spec.cliCommands programSpec
in case commandsSpec of
Nothing -> throwM CommandsKeyNotDefined
Just commands -> foldM
(\acc (commandVal, _) -> do
command <- parseCommandJsonValue (JSON.String commandVal)
return $ HashMap.insert command zeroParser acc
)
HashMap.empty
(HashMap.toList commands)
joinCommandParsers
:: (MonadThrow m, JSON.ToJSON cmd)
=> HashMap cmd (Opt.Parser ConfigValue)
-> m (Opt.Parser (cmd, Config))
joinCommandParsers parserPerCommand =
let joinParser acc (command, subConfigParser) =
let parser = fmap (\subConfig -> (command, Config subConfig)) subConfigParser
in do
commandTexts <- commandToKey command
let commandParsers = map
(\commandText -> Opt.command
(Text.unpack commandText)
(Opt.info (Opt.helper <*> parser) Opt.idm)
)
commandTexts
[acc] & (++ commandParsers) & mconcat & return
in Opt.subparser <$> foldM joinParser Opt.idm (HashMap.toList parserPerCommand)
specToConfigCli
:: (MonadThrow m, JSON.FromJSON cmd, JSON.ToJSON cmd, Eq cmd, Hashable cmd)
=> Spec.ConfigSpec cmd
-> m (Opt.Parser (cmd, Config))
specToConfigCli spec = do
acc <- configValueCliAccInit spec
parsers <- foldM specToConfigValueCli acc (HashMap.toList $ Spec.specConfigValues spec)
joinCommandParsers parsers
{-|
Dynamically generate an OptParser CLI with sub-commands from the spec settings
declared on the @ConfigSpec@. This will process the OptParser from given input
rather than fetching it from the OS.
This will return the selected record parsed from the sub-command input and the
configuration map with keys defined for that sub-command.
-}
resolveCommandCliPure
:: (MonadThrow m, JSON.FromJSON cmd, JSON.ToJSON cmd, Eq cmd, Hashable cmd)
=> Spec.ConfigSpec cmd -- ^ Config Spec (normally parsed from json or yaml file)-- ^ The
-> Text -- ^ Name of the program running the CLI
-> [Text] -- ^ Arglist for the program
-> m (cmd, Config) -- ^ Selected command and Configuration Map
resolveCommandCliPure configSpec progName args = do
configParser <- specToConfigCli configSpec
let
programModFlags = case Spec.specCliProgramSpec configSpec of
Just programSpec ->
Opt.fullDesc
`mappend` (programSpec & Spec.cliProgramDesc & Text.unpack & Opt.progDesc)
`mappend` (programSpec & Spec.cliProgramHeader & Text.unpack & Opt.header)
Nothing -> mempty
programParser = Opt.info (Opt.helper <*> configParser) programModFlags
programResult =
args & map Text.unpack & Opt.execParserPure Opt.defaultPrefs programParser
programResultToResolverResult progName programResult
{-|
Dynamically generate an OptParser CLI with sub-commands from the spec settings
declared on the @ConfigSpec@.
Once it generates the CLI and gathers the input, it will return the selected
record parsed from the sub-command input and the configuration map with keys
defined for that sub-command.
-}
resolveCommandCli
:: (JSON.FromJSON cmd, JSON.ToJSON cmd, Eq cmd, Hashable cmd)
=> Spec.ConfigSpec cmd -- ^ Config Spec (normally parsed from json or yaml file)
-> IO (cmd, Config) -- ^ Selected command and Configuration Map
resolveCommandCli configSpec = do
progName <- Text.pack <$> getProgName
args <- map Text.pack <$> getArgs
handleCliResult $ resolveCommandCliPure configSpec progName args
| roman/Haskell-etc | etc/src/System/Etc/Internal/Resolver/Cli/Command.hs | mit | 8,243 | 0 | 25 | 2,034 | 1,927 | 976 | 951 | 161 | 3 |
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation; either version 2 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License along
-- with this program; if not, write to the Free Software Foundation, Inc.,
-- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
--
import Control.Concurrent.Async.Lifted (async, wait)
import Control.Exception.Lifted (SomeException(..), handle)
import Data.List (find)
import Data.List.Split (chunksOf)
import System.Console.ANSI (Color(..), ColorIntensity(..), ConsoleIntensity(..),
ConsoleLayer(..), SGR(..), setSGR)
import System.Console.Terminal.Size (Window(..), size)
import System.Environment (getEnv)
import Text.Printf (printf)
import Web.HZulip
main :: IO ()
main = withZulipEnv $ do
lift $ logInfo "Subscribing to all streams..."
streamNames <- getStreams
addSubscriptions streamNames
lift $ logInfo "Subscribed to:"
let streams = streamsFromNames streamNames
lift $ printStreamTable streams
start streams
where start st = do
end <- async $ onNewMessage (lift . printMessage st)
lift $ logInfo "Listening for messages"
handle
(\e -> lift (print (e :: SomeException)) >> start st)
(wait end)
withZulipEnv :: ZulipM a -> IO a
withZulipEnv action = do
user <- getEnv "ZULIP_USER"
key <- getEnv "ZULIP_KEY"
withZulipCreds user key action
printMessage :: [Stream] -> Message -> IO ()
printMessage ss msg = do
let Left streamName = messageDisplayRecipient msg
mstream = find (\s -> name s == streamName) ss
case mstream of
Just stream -> printStreamName stream
Nothing -> putStr $ "<" ++ streamName ++ ">"
putStr $ " " ++ (userEmail $ messageSender msg) ++ " said:\n"
putStr $ messageContent msg
putStr "\n\n"
-- Stream headings
-------------------------------------------------------------------------------
data Stream = Stream { name :: String
, color :: Color
}
streamsFromNames :: [String] -> [Stream]
streamsFromNames = zipWith helper ([0..] :: [Int])
where cs = [Black, Red, Green, Yellow, Blue, Magenta, Cyan]
l = length cs
helper i n = Stream n (cs !! (i `rem` l))
printStreamName :: Stream -> IO ()
printStreamName (Stream n c) = putStrSGR sgr ("<" ++ n ++ ">")
where sgr = [ SetColor Foreground Vivid c
, SetConsoleIntensity BoldIntensity
]
printStreamTable :: [Stream] -> IO ()
printStreamTable ss = do
Just (Window windowSize _) <- size :: IO (Maybe (Window Int))
let biggestLen = maximum $ map (length . name) ss
groupSize = windowSize `div` biggestLen
mapM_ (printGroup biggestLen) $ chunksOf groupSize ss
where printAligned m (Stream n c) = setSGR [SetColor Foreground Vivid c] >>
printf ("%" ++ show m ++ "s ") n >>
resetSGR
printGroup m g = putStr " " >>
mapM_ (printAligned m) g >>
putStr "\n"
-- Pretty logging
-------------------------------------------------------------------------------
logHeading :: Color -> IO ()
logHeading c = putStrSGR headingSGR ">> "
where headingSGR = [ SetColor Foreground Vivid c
, SetConsoleIntensity BoldIntensity
]
logInfo :: String -> IO ()
logInfo str = logHeading Blue >> putStrLn str
putStrSGR :: [SGR] -> String -> IO ()
putStrSGR sgr str = setSGR sgr >> putStr str >> resetSGR
resetSGR :: IO ()
resetSGR = setSGR []
| yamadapc/hzulip | examples/src/ZulipCli.hs | gpl-2.0 | 4,047 | 0 | 16 | 1,033 | 1,088 | 566 | 522 | 73 | 2 |
{-# LANGUAGE OverloadedLists #-}
module Kalkulu.Builtin.Control (controlBuiltins) where
import Control.Monad.Except
import qualified Data.Vector as V
import Kalkulu.Builtin
import qualified Kalkulu.BuiltinSymbol as B
controlBuiltins :: [(B.BuiltinSymbol, BuiltinDefinition)]
controlBuiltins = [
(B.CompoundExpression, compoundExpression)
-- , (B.Return, return_)
, (B.Catch, catch)
, (B.Throw, throw)
-- , (B.Goto, goto)
, (B.Label, label)
, (B.If, if_)
-- , (B.Switch, switch)
-- , (B.Which, which)
-- , (B.Do, do_)
, (B.For, for)
, (B.While, while)
, (B.Nest, nest)
, (B.NestList, nestList)
-- , (B.NestWhile, nestWhile)
-- , (B.NestWhileList, nestWhileList)
-- , (B.FixedPoint, fixedPoint)
-- , (B.FixedPointList, fixedPointList)
, (B.Abort, abort)
, (B.Break, break_)
, (B.Continue, continue)
]
compoundExpression :: BuiltinDefinition
compoundExpression = defaultBuiltin {
attributes = [HoldAll, Protected] -- no ReadProtected
, downcode = downcodeCompoundExpression
}
downcodeCompoundExpression :: Expression -> Kernel Expression
downcodeCompoundExpression e@(Cmp _ []) = return e
downcodeCompoundExpression (Cmp _ args) = do
args_ev <- V.mapM evaluate args
return $ V.last args_ev
catch :: BuiltinDefinition
catch = defaultBuiltin {
attributes = [HoldFirst, Protected]
, downcode = downcodeCatch
}
downcodeCatch :: Expression -> Kernel Expression
downcodeCatch (Cmp _ [instr]) = evaluate instr `catchError` handlerCatch
where handlerCatch :: Exception -> Kernel Expression
handlerCatch (ThrowException e) = return e
handlerCatch exc = throwError exc
downcodeCatch _ = error "unreachable"
throw :: BuiltinDefinition
throw = defaultBuiltin {
downcode = downcodeThrow -- TODO: 1 (or 2 args)
}
downcodeThrow :: Expression -> Kernel Expression
downcodeThrow (Cmp _ [e]) = throwError (ThrowException e)
downcodeThrow e = return e
if_ :: BuiltinDefinition
if_ = defaultBuiltin {
attributes = [HoldRest, Protected],
downcode = return . pureIf -- TODO: between 2 and 4 args
}
label :: BuiltinDefinition
label = defaultBuiltin {
attributes = [HoldFirst, Protected]
}
pureIf :: Expression -> Expression
pureIf (Cmp _ [SymbolB B.True, a]) = a
pureIf (Cmp _ [SymbolB B.False, _]) = toExpression ()
pureIf (Cmp _ [SymbolB B.True, a, _]) = a
pureIf (Cmp _ [SymbolB B.False, _, a]) = a
pureIf (Cmp _ [SymbolB B.True, a, _, _]) = a
pureIf (Cmp _ [SymbolB B.False, _, a, _]) = a
pureIf (Cmp _ [_, _, _, a]) = a
pureIf _ = error "unreachable"
for :: BuiltinDefinition
for = defaultBuiltin {
attributes = [HoldAll, Protected]
, downcode = downcodeFor -- TODO 3 or 4 arguments
}
downcodeFor :: Expression -> Kernel Expression
downcodeFor (Cmp _ [a, b, c, d]) = codeFor a b c d
downcodeFor (Cmp _ [a, b, c]) = codeFor a b c (toExpression ())
downcodeFor _ = error "unreachable"
codeFor :: Expression -> Expression -> Expression -> Expression -> Kernel Expression
codeFor start test incr body = evaluate start >> doLoop
where doLoop = do
test' <- evaluate test
if test' == toExpression True
then (do b <- evaluate body
case b of
CmpB B.Return [a] -> return a
CmpB B.Return [a, SymbolB B.For] -> return a
e@(CmpB B.Return [_, _]) -> return e
_ -> evaluate incr >> doLoop)
`catchError` handlerLoop (evaluate incr >> doLoop)
else return $ toExpression ()
handlerLoop :: Kernel Expression -> Exception -> Kernel Expression
handlerLoop _ BreakException = return $ toExpression ()
handlerLoop next ContinueException = next
handlerLoop _ e = throwError e
while :: BuiltinDefinition
while = defaultBuiltin {
attributes = [HoldAll, Protected]
, downcode = downcodeWhile -- TODO: 1 or 2 args
}
downcodeWhile :: Expression -> Kernel Expression
downcodeWhile (Cmp _ [a, b]) = codeWhile a b
downcodeWhile (Cmp _ [a]) = codeWhile a (toExpression ())
downcodeWhile _ = error "unreachable"
codeWhile :: Expression -> Expression -> Kernel Expression
codeWhile test body = do
test' <- evaluate test
if test' == toExpression True
then (do b <- evaluate body
case b of
CmpB B.Return [x] -> return x
CmpB B.Return [x, SymbolB B.While] -> return x
e@(CmpB B.Return [_, _]) -> return e
_ -> codeWhile test body)
`catchError` handlerLoop (codeWhile test body)
else return $ toExpression ()
nest :: BuiltinDefinition
nest = defaultBuiltin {
downcode = downcodeNest -- TODO: 3 args
}
downcodeNest :: Expression -> Kernel Expression
downcodeNest (Cmp _ [f, x, Number n]) | n >= 0
= return $ (iterate (Cmp f . V.singleton) x) !! (fromInteger n)
downcodeNest e = return e
nestList :: BuiltinDefinition
nestList = defaultBuiltin {
downcode = downcodeNestList -- TODO: 3 args
}
downcodeNestList :: Expression -> Kernel Expression
downcodeNestList (Cmp _ [f, x, Number n]) | n >= 0
= return $ toExpression $ take (fromIntegral n)
(iterate (Cmp f . V.singleton) x)
downcodeNestList e = return e
abort :: BuiltinDefinition
abort = defaultBuiltin {
downcode = downcodeAbort -- TODO: 0 arg
}
downcodeAbort :: Expression -> Kernel Expression
downcodeAbort _ = throwError AbortException
break_ :: BuiltinDefinition
break_ = defaultBuiltin {
downcode = downcodeBreak -- 0 args expected
}
downcodeBreak :: Expression -> Kernel Expression
downcodeBreak _ = throwError BreakException
continue :: BuiltinDefinition
continue = defaultBuiltin {
downcode = downcodeContinue -- 0 args expected
}
downcodeContinue :: Expression -> Kernel Expression
downcodeContinue _ = throwError ContinueException
| vizietto/kalkulu | src/Kalkulu/Builtin/Control.hs | gpl-3.0 | 5,958 | 0 | 20 | 1,437 | 1,862 | 1,001 | 861 | 136 | 5 |
{-| Module : Main
License : GPL
Maintainer : helium@cs.uu.nl
Stability : experimental
Portability : portable
-}
module Main where
import Helium.Main.Compile(compile)
import Helium.Parser.Parser(parseOnlyImports)
import Control.Monad
import System.FilePath(joinPath)
import Data.List(nub, elemIndex, isSuffixOf, intercalate)
import Data.Maybe(fromJust)
import Lvm.Path(explodePath,getLvmPath)
import System.Directory(doesFileExist, getModificationTime)
import Helium.Main.Args
import Helium.Main.CompileUtils
import Data.IORef
import Paths_helium
-- Prelude will be treated specially
prelude :: String
prelude = "Prelude.hs"
-- Order matters
coreLibs :: [String]
coreLibs = ["LvmLang", "LvmIO", "LvmException", "HeliumLang", "PreludePrim"]
main :: IO ()
main = do
args <- getArgs
(options, Just fullName) <- processHeliumArgs args -- Can't fail, because processHeliumArgs checks it.
lvmPathFromOptionsOrEnv <- case lvmPathFromOptions options of
Nothing -> getLvmPath
Just s -> return (explodePath s)
baseLibs <- case basePathFromOptions options of
Nothing -> getDataFileName $
if overloadingFromOptions options
then "lib"
else joinPath ["lib","simple"]
Just path -> if overloadingFromOptions options
then return path
else return $ joinPath [path,"simple"] -- The lib will be part of path already.
let (filePath, moduleName, _) = splitFilePath fullName
filePath' = if null filePath then "." else filePath
lvmPath = filter (not.null) . nub
$ (filePath' : lvmPathFromOptionsOrEnv) ++ [baseLibs] -- baseLibs always last
-- File that is compiled must exist, this test doesn't use the search path
fileExists <- doesFileExist fullName
newFullName <-
if fileExists then
return fullName
else do
let filePlusHS = fullName ++ ".hs"
filePlusHSExists <- doesFileExist filePlusHS
unless filePlusHSExists $ do
putStrLn $ "Can't find file " ++ show fullName ++ " or " ++ show filePlusHS
exitWith (ExitFailure 1)
return filePlusHS
-- Ensure .core libs are compiled to .lvm
mapM_ (makeCoreLib baseLibs) coreLibs
-- And now deal with Prelude
preludeRef <- newIORef []
_ <- make filePath' (joinPath [baseLibs,prelude]) lvmPath [prelude] options preludeRef
doneRef <- newIORef []
_ <- make filePath' newFullName lvmPath [moduleName] options doneRef
return ()
-- fullName = file name including path of ".hs" file that is to be compiled
-- lvmPath = where to look for files
-- chain = chain of imports that led to the current module
-- options = the compiler options
-- doneRef = an IO ref to a list of already compiled files
-- (their names and whether they were recompiled or not)
-- returns: recompiled or not? (true = recompiled)
make :: String -> String -> [String] -> [String] -> [Option] -> IORef [(String, Bool)] -> IO Bool
make basedir fullName lvmPath chain options doneRef =
do
-- If we already compiled this module, return the result we already know
done <- readIORef doneRef
case lookup fullName done of
Just isRecompiled -> return isRecompiled
Nothing -> do
imports <- parseOnlyImports fullName
-- If this module imports a module earlier in the chain, there is a cycle
case circularityCheck imports chain of
Just cycl -> do
putStrLn $ "Circular import chain: \n\t" ++ showImportChain cycl ++ "\n"
exitWith (ExitFailure 1)
Nothing ->
return ()
-- Find all imports in the search path
resolvedImports <- mapM (resolve lvmPath) imports
-- For each of the imports...
compileResults <- forM (zip imports resolvedImports)
$ \(importModuleName, maybeImportFullName) -> do
-- Issue error if import can not be found in the search path
case maybeImportFullName of
Nothing -> do
putStrLn $
"Can't find module '" ++ importModuleName ++ "'\n" ++
"Import chain: \n\t" ++ showImportChain (chain ++ [importModuleName]) ++
"\nSearch path:\n" ++ showSearchPath lvmPath
exitWith (ExitFailure 1)
Just _ -> return ()
let importFullName = fromJust maybeImportFullName
-- TODO : print names imported modules in verbose mode.
-- If we only have an ".lvm" file we do not need to (/can't) recompile
if ".lvm" `isSuffixOf` importFullName then
return False
else
make basedir importFullName lvmPath (chain ++ [importModuleName]) options doneRef
-- Recompile the current module if:
-- * any of the children was recompiled
-- * the build all option (-B) was on the command line
-- * the build one option (-b) was there and we are
-- compiling the top-most module (head of chain)
-- * the module is not up to date (.hs newer than .lvm)
let (filePath, moduleName, _) = splitFilePath fullName
upToDate <- upToDateCheck (combinePathAndFile filePath moduleName)
newDone <- readIORef doneRef
isRecompiled <-
if or compileResults ||
BuildAll `elem` options ||
(BuildOne `elem` options && moduleName == head chain) ||
not upToDate
then do
compile basedir fullName options lvmPath (map fst newDone)
return True
else do
putStrLn (moduleName ++ " is up to date")
return False
-- Remember the fact that we have already been at this module
writeIORef doneRef ((fullName, isRecompiled):newDone)
return isRecompiled
showImportChain :: [String] -> String
showImportChain = intercalate " imports "
showSearchPath :: [String] -> String
showSearchPath = unlines . map ("\t" ++)
preludeImportsPrelude :: [String] -> Bool
preludeImportsPrelude [x,y] = x == prelude && y == prelude
preludeImportsPrelude _ = False
circularityCheck :: [String] -> [String] -> Maybe [String]
circularityCheck (import_:imports) chain =
case elemIndex import_ chain of
Just index -> Just (drop index chain ++ [import_])
Nothing -> circularityCheck imports chain
circularityCheck [] _ = Nothing
-- | upToDateCheck returns true if the .lvm is newer than the .hs
upToDateCheck :: String -> IO Bool
upToDateCheck basePath = do
lvmExists <- doesFileExist (basePath ++ ".lvm")
if lvmExists then do
t1 <- getModificationTime (basePath ++ ".hs")
t2 <- getModificationTime (basePath ++ ".lvm")
return (t1 < t2)
else
return False
| roberth/uu-helium | src/Helium/Main.hs | gpl-3.0 | 7,458 | 0 | 28 | 2,490 | 1,476 | 753 | 723 | 119 | 7 |
module Data.DecidableSubset where
import Data.Bijection
import Data.Subset (Subset(..))
import qualified Data.Subset as Subset
import Data.Maybe
import Data.List
import Control.Monad
data DecidableSubset s t = DecidableSubset
{ subset :: Subset s t
, decide :: t -> Maybe s
}
range :: Ord a => a -> a -> DecidableSubset a a
range lowerBound upperBound = DecidableSubset
{ subset = Subset id
, decide = \ n -> guard (lowerBound <= n && n <= upperBound) >> return n
}
complement :: DecidableSubset s t -> DecidableSubset t t
complement dec = DecidableSubset
{ subset = Subset.complement (subset dec)
, decide = \ t -> maybe (Just t) (const Nothing) $ decide dec t
}
cartesian :: DecidableSubset a b -> DecidableSubset s t -> DecidableSubset (a, s) (b, t)
cartesian decAB decST = DecidableSubset
{ subset = Subset.cartesian (subset decAB) (subset decST)
, decide = \ (a, s) -> (,) <$> decide decAB a <*> decide decST s
}
intersection :: DecidableSubset a b -> DecidableSubset a b -> DecidableSubset a b
intersection decAB decAB' = DecidableSubset
{ subset = subset decAB
, decide = \ t -> decide decAB t >> decide decAB' t
}
bijection :: DecidableSubset a b -> Bijection a a' -> Bijection b b' -> DecidableSubset a' b'
bijection decAB bijAA' bijBB' = DecidableSubset
{ subset = Subset.bijection (subset decAB) bijAA' bijBB'
, decide = fmap (forth bijAA') . decide decAB . back bijBB'
}
| gallais/potpourri | haskell/cellular/Data/DecidableSubset.hs | gpl-3.0 | 1,429 | 0 | 13 | 286 | 538 | 282 | 256 | 30 | 1 |
module SayNumber where
sayNumber 0 = "zero"
sayNumber x = say x
sayHundreds x
| x `mod` 100 == 0 = say (x `div` 100) ++ "-hundred"
| otherwise = say (x `div` 100) ++ "-hundred and " ++ say (x `mod` 100)
sayThousands x
| x `mod` 1000 == 0 = say (x `div` 1000) ++ " thousand"
| x `mod` 1000 >= 100 = say (x `div` 1000) ++ " thousand, " ++ say (x `mod` 1000)
| otherwise = say (x `div` 1000) ++ " thousand, and " ++ say (x `mod` 1000)
sayTens x
| x == 20 = "tewnty"
| x `div` 10 == 2 = "tewnty-" ++ say (x `mod` 10)
| x == 30 = "thirty"
| x `div` 10 == 3 = "thirty-" ++ say (x `mod` 10)
| x == 40 = "fourty"
| x `div` 10 == 4 = "fourty-" ++ say (x `mod` 10)
| x == 50 = "fifty"
| x `div` 10 == 5 = "fifty-" ++ say (x `mod` 10)
| x == 60 = "sixty"
| x `div` 10 == 6 = "sixty-" ++ say (x `mod` 10)
| x == 70 = "seventy"
| x `div` 10 == 7 = "seventy-" ++ say (x `mod` 10)
| x == 80 = "eighty"
| x `div` 10 == 8 = "eighty-" ++ say (x `mod` 10)
| x == 90 = "ninety"
| x `div` 10 == 9 = "ninety-" ++ say (x `mod` 10)
say x
| x == 0 = ""
| x `elem` [14, 16, 17, 18, 19] = say (x `mod` 10) ++ "teen"
| x < 100 && x >= 20 = sayTens x
| x < 1000 && x >= 100 = sayHundreds x
| x < 1000000 && x >= 1000 = sayThousands x
say 15 = "fifteen"
say 13 = "thirteen"
say 12 = "twelve"
say 11 = "eleven"
say 10 = "ten"
say 9 = "nine"
say 8 = "eight"
say 7 = "seven"
say 6 = "six"
say 5 = "five"
say 4 = "four"
say 3 = "three"
say 2 = "two"
say 1 = "one"
--main = do
-- mapM print $ map sayNumber [11001, 11002 .. 11110]
| joshuaunderwood7/sayNumber | Saynumber.hs | gpl-3.0 | 1,722 | 0 | 10 | 601 | 875 | 454 | 421 | 47 | 1 |
module CardDB ( cards
, delete
, first
, get
, insert
, maxId
, nextId
, putDBInfo
, randomCards
, setDBUp
, update
) where
import Cards ( Card(..)
, Id
, isDue
)
import Control.Exception (bracket)
import Control.Monad (liftM, when, filterM)
import Data.Maybe (fromMaybe)
import Data.Maybe (maybe)
import Database.HDBC (run, quickQuery', commit, disconnect, getTables,
toSql, fromSql, SqlValue (SqlNull, SqlInt32))
import Database.HDBC.Sqlite3 (connectSqlite3)
import System.Directory (getAppUserDataDirectory, doesFileExist,
createDirectoryIfMissing)
import System.FilePath ((</>), takeDirectory)
import System.Random (newStdGen)
import Utils (getYesOrNo, unsort)
dbPath :: IO FilePath
dbPath = do
appUserDataDir <- getAppUserDataDirectory "Palace"
return $ appUserDataDir </> "cards.db"
createDB :: IO ()
createDB =
bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
run conn ("CREATE TABLE cards (id INTEGER NOT NULL, " ++
"question VARCHAR, answer VARCHAR, day DATE, " ++
"frequency VARCHAR)") []
commit conn
setDBUp :: IO ()
setDBUp = do
createDirectoryIfMissing True . takeDirectory =<< dbPath
bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
tables <- getTables conn
when ("cards" `notElem` tables) createDB
maxId :: IO (Maybe Id)
maxId =
bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
m <- quickQuery' conn "SELECT MAX(id) FROM cards" []
return (case head (head m) of
SqlNull -> Nothing
value -> Just . fromSql $ value)
nextId :: IO Id
nextId = liftM (maybe 0 succ) maxId
cardToSql :: Card -> [SqlValue]
cardToSql (Card i r v d f) =
[toSql i, toSql r, toSql v, toSql d, toSql $ show f]
cardFromSql :: [SqlValue] -> Card
cardFromSql (i : r : v : d : p : []) =
Card (fromSql i) (fromSql r) (fromSql v) (fromSql d) (read (fromSql p))
cardFromSql _ = error "Can't get a Card from that list."
insert :: Card -> IO ()
insert card =
bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
run conn ("INSERT INTO cards (id, question, " ++
"answer, day, frequency) VALUES " ++
"(?, ?, ?, ?, ?)") $ cardToSql card
commit conn
putStrLn ("Card " ++ show (_cId card) ++ " inserted.")
get :: Id -> IO (Maybe Card)
get id = bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
card <- quickQuery' conn "SELECT * FROM cards WHERE id = ?"
[toSql id]
return (case card of
[] -> Nothing
[value] -> Just . cardFromSql $ value)
delete :: Id -> IO ()
delete id = bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
isConfirmed <- getYesOrNo ("Are you sure you want to delete " ++
"card " ++ show id)
if isConfirmed
then do
run conn "DELETE FROM cards WHERE ID=?" [toSql id]
commit conn
putStrLn ("Card " ++ show id ++ " deleted.")
else putStrLn ("Card " ++ show id ++ " not deleted.")
cards :: IO [Card]
cards = bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
cards <- quickQuery' conn "SELECT * FROM cards" []
return $ map cardFromSql cards
randomCards :: IO [Card]
randomCards = do
cards <- cards
gen <- newStdGen
return $ unsort gen cards
first :: [Card] -> IO (Maybe Card)
first cards = do
dueCards <- filterM isDue cards
return (case dueCards of
[] -> Nothing
(c:_) -> Just c)
update :: Card -> Card -> IO ()
update old new = bracket (dbPath >>= connectSqlite3) disconnect $
\ conn -> do
run conn ("UPDATE cards SET question=?, answer=?, " ++
"day=?, frequency=? WHERE id=?") $
drop 1 (cardToSql new) ++ [head $ cardToSql old]
commit conn
putStrLn ("Card " ++ show (_cId old) ++ " updated.")
putDBInfo :: IO ()
putDBInfo = do
cards <- cards
dueCards <- filterM isDue cards
putStrLn ("There are " ++ show (length cards) ++ " cards of which " ++
show (length dueCards) ++ " are due today.")
| bbshortcut/Palace | src/CardDB.hs | gpl-3.0 | 4,838 | 0 | 16 | 1,810 | 1,407 | 717 | 690 | 120 | 2 |
{-
- Copyright (C) 2014 Allen Guo <guoguo12@gmail.com>
- Copyright (C) 2014 Alexander Berntsen <alexander@plaimi.net>
-
- This file is part of coreutilhs.
-
- coreutilhs is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- coreutilhs is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with coreutilhs. If not, see <http://www.gnu.org/licenses/>.
-} module Main where
import System.Environment (getArgs)
import Text.Printf
-- Enumerates lines from STDIN or given files
main :: IO ()
main = do
input <- getArgs
case input of
[] -> getContents >>= output . enumerate
files -> mapM readFile files >>= output . enumerate . concat
where enumerate :: String -> [(Int, String)]
enumerate xs = zip (enumFrom 1) $ lines xs
output = mapM_ (uncurry $ printf "\t%d %s\n")
| alexander-b/coreutilhs | nl.hs | gpl-3.0 | 1,231 | 0 | 13 | 239 | 151 | 79 | 72 | 12 | 2 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.RDS.DescribePendingMaintenanceActions
-- Copyright : (c) 2013-2014 Brendan Hay <brendan.g.hay@gmail.com>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <brendan.g.hay@gmail.com>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | Returns a list of resources (for example, DB Instances) that have at least
-- one pending maintenance action.
--
-- <http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribePendingMaintenanceActions.html>
module Network.AWS.RDS.DescribePendingMaintenanceActions
(
-- * Request
DescribePendingMaintenanceActions
-- ** Request constructor
, describePendingMaintenanceActions
-- ** Request lenses
, dpmaFilters
, dpmaMarker
, dpmaMaxRecords
, dpmaResourceIdentifier
-- * Response
, DescribePendingMaintenanceActionsResponse
-- ** Response constructor
, describePendingMaintenanceActionsResponse
-- ** Response lenses
, dpmarMarker
, dpmarPendingMaintenanceActions
) where
import Network.AWS.Prelude
import Network.AWS.Request.Query
import Network.AWS.RDS.Types
import qualified GHC.Exts
data DescribePendingMaintenanceActions = DescribePendingMaintenanceActions
{ _dpmaFilters :: List "member" Filter
, _dpmaMarker :: Maybe Text
, _dpmaMaxRecords :: Maybe Int
, _dpmaResourceIdentifier :: Maybe Text
} deriving (Eq, Read, Show)
-- | 'DescribePendingMaintenanceActions' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dpmaFilters' @::@ ['Filter']
--
-- * 'dpmaMarker' @::@ 'Maybe' 'Text'
--
-- * 'dpmaMaxRecords' @::@ 'Maybe' 'Int'
--
-- * 'dpmaResourceIdentifier' @::@ 'Maybe' 'Text'
--
describePendingMaintenanceActions :: DescribePendingMaintenanceActions
describePendingMaintenanceActions = DescribePendingMaintenanceActions
{ _dpmaResourceIdentifier = Nothing
, _dpmaFilters = mempty
, _dpmaMarker = Nothing
, _dpmaMaxRecords = Nothing
}
-- | Supported filters:
--
-- 'db-instance-id' - Accepts DB instance identifiers and DB instance ARNs. The
-- result list will only include maintenance actions for the specified DB
-- Instances.
dpmaFilters :: Lens' DescribePendingMaintenanceActions [Filter]
dpmaFilters = lens _dpmaFilters (\s a -> s { _dpmaFilters = a }) . _List
-- | An optional pagination token provided by a previous 'DescribePendingMaintenanceActions' request. If this parameter is specified, the response includes only records
-- beyond the marker, up to a number of records specified by 'MaxRecords' .
dpmaMarker :: Lens' DescribePendingMaintenanceActions (Maybe Text)
dpmaMarker = lens _dpmaMarker (\s a -> s { _dpmaMarker = a })
-- | The maximum number of records to include in the response. If more records
-- exist than the specified 'MaxRecords' value, a pagination token called a marker
-- is included in the response so that the remaining results can be retrieved.
--
-- Default: 100
--
-- Constraints: minimum 20, maximum 100
dpmaMaxRecords :: Lens' DescribePendingMaintenanceActions (Maybe Int)
dpmaMaxRecords = lens _dpmaMaxRecords (\s a -> s { _dpmaMaxRecords = a })
-- | The ARN of the resource to return pending maintenance actions for.
dpmaResourceIdentifier :: Lens' DescribePendingMaintenanceActions (Maybe Text)
dpmaResourceIdentifier =
lens _dpmaResourceIdentifier (\s a -> s { _dpmaResourceIdentifier = a })
data DescribePendingMaintenanceActionsResponse = DescribePendingMaintenanceActionsResponse
{ _dpmarMarker :: Maybe Text
, _dpmarPendingMaintenanceActions :: List "member" ResourcePendingMaintenanceActions
} deriving (Eq, Read, Show)
-- | 'DescribePendingMaintenanceActionsResponse' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dpmarMarker' @::@ 'Maybe' 'Text'
--
-- * 'dpmarPendingMaintenanceActions' @::@ ['ResourcePendingMaintenanceActions']
--
describePendingMaintenanceActionsResponse :: DescribePendingMaintenanceActionsResponse
describePendingMaintenanceActionsResponse = DescribePendingMaintenanceActionsResponse
{ _dpmarPendingMaintenanceActions = mempty
, _dpmarMarker = Nothing
}
-- | An optional pagination token provided by a previous 'DescribePendingMaintenanceActions' request. If this parameter is specified, the response includes only records
-- beyond the marker, up to a number of records specified by 'MaxRecords' .
dpmarMarker :: Lens' DescribePendingMaintenanceActionsResponse (Maybe Text)
dpmarMarker = lens _dpmarMarker (\s a -> s { _dpmarMarker = a })
-- | Provides a list of the pending maintenance actions for the resource.
dpmarPendingMaintenanceActions :: Lens' DescribePendingMaintenanceActionsResponse [ResourcePendingMaintenanceActions]
dpmarPendingMaintenanceActions =
lens _dpmarPendingMaintenanceActions
(\s a -> s { _dpmarPendingMaintenanceActions = a })
. _List
instance ToPath DescribePendingMaintenanceActions where
toPath = const "/"
instance ToQuery DescribePendingMaintenanceActions where
toQuery DescribePendingMaintenanceActions{..} = mconcat
[ "Filters" =? _dpmaFilters
, "Marker" =? _dpmaMarker
, "MaxRecords" =? _dpmaMaxRecords
, "ResourceIdentifier" =? _dpmaResourceIdentifier
]
instance ToHeaders DescribePendingMaintenanceActions
instance AWSRequest DescribePendingMaintenanceActions where
type Sv DescribePendingMaintenanceActions = RDS
type Rs DescribePendingMaintenanceActions = DescribePendingMaintenanceActionsResponse
request = post "DescribePendingMaintenanceActions"
response = xmlResponse
instance FromXML DescribePendingMaintenanceActionsResponse where
parseXML = withElement "DescribePendingMaintenanceActionsResult" $ \x -> DescribePendingMaintenanceActionsResponse
<$> x .@? "Marker"
<*> x .@? "PendingMaintenanceActions" .!@ mempty
| dysinger/amazonka | amazonka-rds/gen/Network/AWS/RDS/DescribePendingMaintenanceActions.hs | mpl-2.0 | 6,779 | 0 | 12 | 1,297 | 727 | 439 | 288 | 80 | 1 |
module MedicalAdvice.Measurements where
import Control.Monad (join)
import MedicalAdvice.Lib
import System.Random (randomRIO)
data Measurement = Length Double
| Weight Double
| Temperature Double
| Pulse Int Int
deriving Show
getRandomMeasurement :: IO Measurement
getRandomMeasurement = join $ pickRandom [ Length <$> randomRIO (140,210)
, Weight <$> randomRIO (50,200)
, Temperature <$> randomRIO (35,42)
, Pulse <$> randomRIO (10,20) <*> randomRIO (5,10)
]
| ToJans/learninghaskell | 0004AmISick/MedicalAdvice/Measurements.hs | unlicense | 754 | 0 | 11 | 355 | 157 | 89 | 68 | 14 | 1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE NamedFieldPuns #-}
-- | Greetings for $NAME
--
-- @
-- % NAME=a5579150 runhaskell -isrc example/Main.hs
-- Hello, a5579150!
-- % NAME=a5579150 QUIET=1 runhaskell -isrc example/Main.hs
-- %
-- @
module Main (main) where
#if __GLASGOW_HASKELL__ < 710
import Control.Applicative ((<$>), (<*>))
#endif
import Control.Monad (unless)
import Env
data Hello = Hello { name :: String, quiet :: Bool }
main :: IO ()
main = do
Hello {name, quiet} <- hello
unless quiet $
putStrLn ("Hello, " ++ name ++ "!")
hello :: IO Hello
hello = Env.parse (header "envparse example") $
Hello <$> var (str <=< nonempty) "NAME" (help "Target for the greeting")
<*> switch "QUIET" (help "Whether to actually print the greeting")
| supki/envparse | example/Main.hs | bsd-2-clause | 776 | 0 | 11 | 161 | 194 | 111 | 83 | 16 | 1 |
{-# language CPP #-}
-- No documentation found for Chapter "Zero"
module Vulkan.Zero (Zero(..)) where
import GHC.Ptr (nullFunPtr)
import Foreign.Ptr (nullPtr)
import Foreign.C.Types (CChar)
import Foreign.C.Types (CFloat)
import Foreign.C.Types (CInt)
import Foreign.C.Types (CSize)
import Foreign.Storable (Storable)
import Data.Int (Int16)
import Data.Int (Int32)
import Data.Int (Int64)
import Data.Int (Int8)
import Foreign.Ptr (FunPtr)
import Foreign.Ptr (Ptr)
import GHC.TypeNats (KnownNat)
import Data.Word (Word16)
import Data.Word (Word32)
import Data.Word (Word64)
import Data.Word (Word8)
-- | A class for initializing things with all zero data
--
-- Any instance should satisfy the following law:
--
-- @ new zero = calloc @ or @ with zero = withZeroCStruct @
--
-- i.e. Marshaling @zero@ to memory yeilds only zero-valued bytes, except
-- for structs which require a "type" tag
--
class Zero a where
zero :: a
instance Zero () where
zero = ()
instance Zero Bool where
zero = False
instance Zero (FunPtr a) where
zero = nullFunPtr
instance Zero (Ptr a) where
zero = nullPtr
instance Zero Int8 where
zero = 0
instance Zero Int16 where
zero = 0
instance Zero Int32 where
zero = 0
instance Zero Int64 where
zero = 0
instance Zero Word8 where
zero = 0
instance Zero Word16 where
zero = 0
instance Zero Word32 where
zero = 0
instance Zero Word64 where
zero = 0
instance Zero Float where
zero = 0
instance Zero CFloat where
zero = 0
instance Zero CChar where
zero = 0
instance Zero CSize where
zero = 0
instance Zero CInt where
zero = 0
| expipiplus1/vulkan | src-manual/Vulkan/Zero.hs | bsd-3-clause | 1,600 | 0 | 7 | 310 | 461 | 267 | 194 | 56 | 0 |
{-# LANGUAGE ForeignFunctionInterface #-}
-----------------------------------------------------------------------------
-- |
-- Module : System.Win32.Com.Server
-- Copyright : (c) Sigbjorn Finne <sof@dcs.gla.ac.uk> 1998-99
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- Maintainer : sof@forkIO.com
-- Stability : provisional
-- Portability : portable
--
-- Support code for writing Haskell COM components (yay!)
--
-- The library code for Haskell COM components (aka. servers),
-- support the wrapping up of a bunch of Haskell function values
-- into the binary representation that the COM spec mandates.
--
-- The library has two classes of 'users':
--
-- - HaskellDirect generated stubs for interfaces representing
-- Haskell COM components.
-- - on-the-fly generation of COM interface pointers by a Haskell
-- application.
--
-- i.e., in short, we care about having a simple, programmer-useable, API :-)
--
-----------------------------------------------------------------------------
module System.Win32.Com.Server
(
createComInstance -- :: String
-- -> objState
-- -> IO ()
-- -> [ComInterface objState]
-- -> IID iid
-- -> IO (IUnknown iid)
, createInstance -- :: objState
-- -> VTable iid objState
-- -> IO (IUnknown iid)
, createVTable -- :: [Ptr ()] -> IO (VTable iid objState)
-- prefixes the three IU methods.
, createComVTable -- :: [Ptr ()] -> IO (ComVTable iid objState)
, createIPointer -- :: StablePtr a
-- -> (VTable b)
-- -> IO (PrimIP b)
, cloneIPointer -- :: IUnknown a -> VTable b -> IO (PrimIP b)
, cloneIPointer_prim -- :: PrimIP a -> VTable b -> IO (PrimIP b)
, getObjState -- :: PrimIP a -> IO b
, getRealObjState -- :: PrimIP a -> IO b
, createDualInterface -- :: [Ptr ()]
-- -> IID iid
-- -> Either LIBID String
-- -> IUnknown a
-- -> IO (IDispatch ())
, createDispInterface -- :: IUnknown iid -- the interface to delegate to
-- -> Either LIBID String -- libid of type library to use.
-- -> IID iid
-- what interface it implements (needed to
-- get at the type library which drives
-- the dispatch interface.)
-- -> IO (IDispatch ())
-- the dispatch implementation handed back.
, VTable
, ComVTable
, PrimIP
, ComInterface
, mkIface
, mkDispIface
, mkDualIface
, export_getTypeInfoCount
, export_getTypeInfo
, export_getIDsOfNames
, export_invoke
) where
import System.Win32.Com.HDirect.HDirect
import Data.Word
import Data.Int
import Data.IORef ( IORef, readIORef, writeIORef, newIORef )
import System.IO.Unsafe ( unsafePerformIO )
import System.IO ( fixIO )
import System.Win32.Com hiding ( queryInterface, addRef, release )
import qualified System.Win32.Com as Com ( addRef, release )
import System.Win32.Com.Automation ( IDispatch, VARIANT, iidIDispatch, DISPID )
import Foreign.StablePtr
import Foreign.Ptr
import Foreign.ForeignPtr
import Foreign.Storable
import Data.Maybe ( fromMaybe )
import Data.Bits ( (.&.) )
import System.Win32.Com.Exception
import System.Win32.Com.HDirect.WideString
import Control.Monad
import Data.List
--Basic types - we're essentially untyped here; safety is provided
--by the abstraction levels above us.
type PrimIP iid = Ptr (Ptr ())
type VTBL = (Ptr (Ptr ()), Int)
type VTable iid objState = VTBL
type ComVTable iid objState = VTable iid (objState, IUnkState)
data ComInterface objState
= Iface { ifaceGUID :: GUID
, ifaceVTBL :: VTBL
}
| DispIface { ifaceGUID :: GUID
, ifaceLIBID :: Either LIBID String
, ifaceVTBL :: VTBL
, isDual :: Bool
}
mkIface :: IID iid -> VTable iid objState -> ComInterface objState
mkIface iid a = Iface (iidToGUID iid) a
mkDispIface :: Maybe LIBID -> IID iid -> VTable iid objState -> ComInterface objState
mkDispIface l iid v = DispIface (iidToGUID iid) l' v False
where
l' = fromMaybe (Right "") (fmap Left l)
mkDualIface :: Maybe LIBID -> IID iid -> VTable iid objState -> ComInterface objState
mkDualIface l iid v = DispIface (iidToGUID iid) l' v True
where
l' = fromMaybe (Right "") (fmap Left l)
{-
=== Creating a component instance ===
`createComInstance' creates a new COM component instance, given
the initial state together with the (implementation) of the
interfaces that the component supports. The interface pointer
that it returns is at the one requested.
-}
createComInstance :: String -- DLL path.
-> objState -- initial state
-> IO () -- action to perform when releasing object.
-> [ComInterface objState] -- supported interfaces
-> IID (IUnknown iid)
-> IO (IUnknown iid)
createComInstance dll_path initState releaseAction supported_ifaces initial_iid = do
ip_state <- mkInstanceState supported_ifaces dll_path releaseAction initState
(_,iu) <- deRefStablePtr ip_state
res <- lookupInterface initial_iid (iu_ifaces iu)
case res of
(_,_,ip) -> do
-- putMessage ("createComInstance: " ++ show initial_iid)
return (castIface ip)
createInstance :: objState -> VTable (IUnknown iid) objState -> IO (IUnknown iid)
createInstance initState vtable = do
ip_state <- newStablePtr initState
createIPointer ip_state vtable
{-
=== Creating the component instance specific state ===
Internal function which allocates an (immovable) chunk of
memory which holds the instance-specific state of a component.
For each component instance we keep a stable pointer to the
component instance state. In all likelihood, this object state
also contains enough information for the <tt/IUnknown/ methods
to operate correctly.
-}
createIPointer :: StablePtr a
-> VTBL
-> IO (IUnknown b)
createIPointer iface_st (vtbl,_) = do
pre <- alloc (sizeofIfaceHeader)
poke pre vtbl
poke (pre `plusPtr` fromIntegral sizeofPtr) iface_st
-- writeAddrOffAddr pre 0 vtbl
-- writeStablePtrOffAddr pre 1 iface_st
unmarshallIUnknown False{-finalise-} pre
sizeofIfaceHeader :: Word32 -- in bytes.
sizeofIfaceHeader =
sizeofPtr -- lpVtbl
+ sizeofPtr -- interface pointer state
-- for convenience.
cloneIPointer :: IUnknown iid_old -> VTable (IUnknown iid_new) objState -> IO (IUnknown iid_new)
cloneIPointer iptr vtbl = do
stable_state <- getIPointerState_stbl (ifaceToAddr iptr)
createIPointer stable_state vtbl
cloneIPointer_prim :: Ptr (IUnknown a) -> VTable (IUnknown iid_new) objState -> IO (IUnknown iid_new)
cloneIPointer_prim iptr vtbl = do
stable_state <- getIPointerState_stbl iptr
createIPointer stable_state vtbl
-- | 'findInterface' is used by both the class factory
-- and 'queryInterface' to check whether the component
-- implements a particular interface.
findInterface :: IUnkIfaceInfo
-> Ptr GUID
-> Ptr (Ptr (IUnknown b))
-> IO HRESULT
findInterface ls piid ppv = do
iid <- unmarshallGUID False piid
if (iid == iidToGUID iidIUnknown) then
case ls of
[] -> return e_NOINTERFACE
((_,_,ip):_) -> realiseIPointer ip
else if (iid == iidToGUID iidIDispatch) then
case filter (isIDispatch) ls of
[] -> return e_NOINTERFACE
((_,_,ip):_) -> realiseIPointer ip
else
let
findIt [] = do
poke (castPtr ppv) nullPtr
return e_NOINTERFACE
findIt ((x,_,ip):xs)
| x == iid = realiseIPointer ip
| otherwise = findIt xs
in
findIt ls
where
-- 'realiseIPointer' fills in the [out] ptr and return.
-- This has the desired effect of forcing the evaluation
-- of the interface pointer itself.
realiseIPointer newip = do
primip <- marshallIUnknown newip
writefptr ppv primip
-- writeForeignObj ppv primip
addRef (ifaceToAddr newip)
return s_OK
--
-- Note: there's currently no way of indicating that
-- an interface IA (which derives from IDispatch) has
-- a 'idispatch' nature here other than go via the
-- route of using the tlb marshaller.
--
isIDispatch (iid, flg, _) = flg || iid == iidToGUID iidIDispatch
lookupInterface :: IID iid
-> IUnkIfaceInfo -- [(GUID, Bool, IUnknown ())]
-> IO (GUID, Bool, IUnknown ())
lookupInterface iid [] = ioError (userError "lookupInterface: interface not supported")
lookupInterface iid ls@(i:_) =
case (find (\ (i,_,_) -> i == guid) ls) of
Nothing -> return i
Just i -> return i
where
guid = iidToGUID iid
data IUnkState
= IUnkState
{ iu_ifaces :: IUnkIfaceInfo
, iu_release :: IO ()
, iu_refcnt :: IORef Int
}
type IUnkIfaceInfo = [(GUID, Bool, IUnknown ())]
mkInstanceState :: [ComInterface objState]
-> String
-> IO ()
-> objState
-> IO (StablePtr (objState, IUnkState))
mkInstanceState iface_list dll_path releaseAction objState = do
fixIO (\ stbl_st -> do
ref_cnt <- newIORef 1
let iptrs = map (mkIf stbl_st) iface_list
iu_st = IUnkState iptrs releaseAction ref_cnt
newStablePtr (objState, iu_st)
)
where
mkIf st (Iface iid vtbl) =
(iid, False, unsafePerformIO (createIPointer st vtbl))
mkIf st (DispIface guid libid vtbl is_dual) =
(guid, True, unsafePerformIO $ do
let iid = guidToIID guid
lib_loc =
case libid of
Right "" -> Right dll_path
_ -> libid
if is_dual then
createDualInterface st vtbl lib_loc iid
else do
ip <- createIPointer st vtbl
createDispInterface ip lib_loc iid
)
{-
'Standard' IUnknown implementation:
Implementation assumes that the object state is of
the form :
StablePtr ((real_obj_state::a), (iu_state :: IUnkState))
(which it is if 'createCoClass' was used to create the component
instance.)
-}
queryInterface :: Ptr (IUnknown a)
-> Ptr GUID
-> Ptr (Ptr (IUnknown b))
-> IO HRESULT
queryInterface iptr riid ppvObject = do
iid <- unmarshallGUID False riid
-- putMessage ("qi: " ++ show (iptr,iid))
if_ls <- getSupportedInterfaces iptr
hr <- findInterface if_ls riid ppvObject
-- putMessage ("qi: " ++ show (iid,hr))
return hr
addRef :: Ptr (IUnknown a) -> IO Word32
addRef iptr = do
-- putMessage "addRef"
v <- readRefCount iptr
writeRefCount iptr (v+1)
return (fromIntegral v)
release :: Ptr (IUnknown a) -> IO Word32
release iptr = do
-- putMessage ("release: " ++ show iptr)
v <- readRefCount iptr
-- putMessage ("release: " ++ show (iptr,v))
let v' = v-1
writeRefCount iptr v'
if v' <= 0 then do
releaseObj iptr
let x = (fromIntegral 0)
return x
else do
let x = (fromIntegral (v-1))
return x
--
foreign import stdcall "wrapper" export_queryInterface :: (Ptr (IUnknown a) -> Ptr GUID -> Ptr (Ptr (IUnknown b)) -> IO Int32) -> IO (Ptr ())
foreign import stdcall "wrapper" export_addRef :: (Ptr (IUnknown a) -> IO Word32) -> IO (Ptr ())
foreign import stdcall "wrapper" export_release :: (Ptr (IUnknown a) -> IO Word32) -> IO (Ptr ())
releaseObj :: Ptr (IUnknown a) -> IO ()
releaseObj iptr = do
-- putMessage "releaseObj"
-- invoke user-supplied finaliser.
r <- iptr # getReleaseAction
r
-- free up mem allocated to hold interface pointers (and vtbls?).
-- free embedded stable pointers.
stbl <- iptr # getIPointerState_stbl
freeStablePtr stbl
-- and the GC will take care of the rest..
return ()
--Accessing data accessible via a Haskell i-pointer - this stuff
readRefCount :: Ptr (IUnknown a) -> IO Int
readRefCount ptr = do
iu <- getIUnkState ptr
readIORef (iu_refcnt iu)
writeRefCount :: Ptr (IUnknown a) -> Int -> IO ()
writeRefCount ptr v = do
iu <- getIUnkState ptr
writeIORef (iu_refcnt iu) v
getReleaseAction :: Ptr (IUnknown a) -> IO (IO ())
getReleaseAction ptr = do
iu <- getIUnkState ptr
return (iu_release iu)
getSupportedInterfaces :: Ptr (IUnknown a) -> IO IUnkIfaceInfo
getSupportedInterfaces ptr = do
iu_state <- getIUnkState ptr
return (iu_ifaces iu_state)
getIUnkState :: Ptr (IUnknown a) -> IO IUnkState
getIUnkState iptr = do
stbl <- getIPointerState_stbl iptr
(_,x) <- deRefStablePtr stbl
return x
-- users of 'createCoClass' *must* use this and not
-- the one below!
getObjState :: Ptr (IUnknown a) -> IO b
getObjState iptr = do
stbl <- getIPointerState_stbl iptr
(x,_) <- deRefStablePtr stbl
return x
getRealObjState :: Ptr (IUnknown a) -> IO b
getRealObjState iptr = do
stbl <- getIPointerState_stbl iptr
deRefStablePtr stbl
getIPointerState_stbl :: Ptr (IUnknown a) -> IO (StablePtr b)
getIPointerState_stbl iptr = peek (iptr `plusPtr` fromIntegral sizeofPtr)
--readStablePtrOffAddr iptr 1
--Dispatch interface support:
createDualInterface :: StablePtr objState
-> ComVTable (IUnknown iid) objState
-> Either LIBID String
-> IID (IUnknown iid)
-> IO (IUnknown iid)
createDualInterface ip_state vtbl libid iid = do
ip <- createIPointer ip_state vtbl
st <- mkDispatchState libid ip iid
meths <- unmarshallVTable vtbl
let real_meths =
case meths of
(qi : ar : re : ls) -> ls
_ -> error "createDualInterface: failed to strip of IU methods"
vtable <- createDispVTable real_meths st
cloneIPointer ip vtable
createDispInterface :: IUnknown iid -- the interface to delegate to
-> Either LIBID String
-- libid of type library to use / path to where the .tlb is stored.
-> IID (IUnknown iid)
-- what interface it implements (needed to
-- get at the type library which drives
-- the dispatch interface.)
-> IO (IUnknown iid)
-- the dispatch implementation handed back.
createDispInterface ip libid iid = do
-- putMessage ("createDispInterface: " ++ show (libid, iid))
st <- mkDispatchState libid ip iid
-- putMessage ("createDispInterface: " ++ show (libid, iid))
vtable <- createDispVTable [] st
-- putMessage ("createDispInterface: " ++ show (libid, iid))
i <- cloneIPointer ip vtable
-- putMessage ("createDispInterface: " ++ show (libid, iid))
return i
mkDispatchState :: Either LIBID String
-> IUnknown iid
-> IID (IUnknown iid)
-> IO DispState
mkDispatchState libid ip iid = do
pTInfo_ref <- newIORef nullPtr
return (DispState libid (coerceIID iid) (coerceIP ip) pTInfo_ref)
--sigh.
coerceIID :: IID a -> IID b
coerceIID iid = guidToIID (iidToGUID iid)
coerceIP :: IUnknown a -> IUnknown b
coerceIP x = castIface x
data DispState
= DispState {
disp_libid :: Either LIBID String,
disp_iid :: (IID ()),
disp_ip :: (IUnknown ()),
disp_ti :: (IORef (PrimIP (ITypeInfo ())))
}
type DISPPARAMS = Ptr () -- abstract, really.
type EXCEPINFO = Ptr () -- abstract, really.
createDispVTable :: [Ptr ()]
-> DispState
-> IO (ComVTable (IDispatch ()) DispState)
createDispVTable meths disp_st = do
a_getTypeInfoCount <- export_getTypeInfoCount getTypeInfoCount
a_getTypeInfo <- export_getTypeInfo (getTypeInfo disp_st)
a_getIDsOfNames <- export_getIDsOfNames (getIDsOfNames disp_st)
a_invoke <- export_invoke (invoke disp_st)
createComVTable ([ a_getTypeInfoCount
, a_getTypeInfo
, a_getIDsOfNames
, a_invoke
] ++ meths)
getTypeInfoCount :: Ptr () -> Ptr Word32 -> IO HRESULT
getTypeInfoCount iptr pctInfo = do
-- putMessage "getTypeInfoCount"
writeWord32 pctInfo 1
return s_OK
foreign import stdcall "wrapper" export_getTypeInfoCount
:: (Ptr () -> Ptr Word32 -> IO HRESULT) -> IO (Ptr ())
getTypeInfo :: DispState -> Ptr (IDispatch ()) -> Word32 -> LCID -> Ptr () -> IO HRESULT
getTypeInfo disp_state this iTInfo lcid ppTInfo
| iTInfo /= 0 = return tYPE_E_ELEMENTNOTFOUND
| ppTInfo == nullPtr = return e_POINTER
| otherwise = do
-- putMessage "getTypeInfo"
poke (castPtr ppTInfo) nullPtr
let ppITInfo_ref = disp_ti disp_state
(hr, pITInfo) <- do
pITInfo <- readIORef ppITInfo_ref
-- load up the typelib is done the first time
-- around. Cannot do it earlier (or lazily), since
-- loading is dependent on the 'lcid'.
--
-- The caching of the ITypeInfo* only works because
-- we keep a disp_state for each interface.
if (pITInfo == nullPtr) then do
ppITInfo <- allocOutPtr
hr <- loadTypeInfo (disp_libid disp_state) (disp_iid disp_state) lcid ppITInfo
-- putMessage ("getTypeInfo: " ++ show hr)
if (failed hr) then
return (hr, undefined)
else do
pITInfo <- peek ppITInfo
writeIORef ppITInfo_ref pITInfo
return (s_OK, pITInfo)
else
return (s_OK, pITInfo)
-- do an AddRef() since we're handing out a copy to it.
-- => when the GetIDsOfNames() and Invoke() implementations
-- below call getTypeInfo, they'll have to call Release()
-- when finished with the result here.
--
if (failed hr) then
return hr
else do
punk <- unmarshallIUnknown True{-addRef and finalise-} pITInfo
-- to counter the effect of running the finaliser on pITInfo.
Com.addRef punk
poke (castPtr ppTInfo) pITInfo
return s_OK
-- Loading the type info is lcid sensitive, so we
-- have to manually invoke this from within GetTypeInfo()
loadTypeInfo :: Either LIBID String
-> IID iid
-> LCID
-> Ptr (PrimIP (ITypeInfo ()))
-> IO HRESULT
loadTypeInfo tlb_loc iid lcid ppITI = do
(hr, pITypeLib) <-
catchComException
(case tlb_loc of
Left libid -> do
ip <- loadRegTypeLib libid 1 0 (fromIntegral (primLangID lcid))
return (s_OK, ip)
-- load it in silently
Right path -> do
ip <- loadTypeLibEx path False{-don't register-}
return (s_OK, ip))
(\ ex -> do
putMessage "Failed to load typelib"
return (fromMaybe e_FAIL (coGetErrorHR ex), interfaceNULL))
if (failed hr) then
return hr
else do
hr <- pITypeLib # getTypeInfoOfGuid iid ppITI
-- pITypeLib is a finalised i-pointer, and will be
-- released in due course.
-- NOTE: potential bug farm.
return hr
-- from oleauto.h
foreign import ccall "primLoadRegTypeLib"
primLoadRegTypeLib :: Ptr () -> Word16 -> Word16 -> Word32 -> Ptr () -> IO HRESULT
foreign import stdcall "wrapper" export_getTypeInfo
:: (Ptr (IDispatch ()) -> Word32 -> LCID -> Ptr () -> IO HRESULT) -> IO (Ptr ())
getIDsOfNames :: DispState
-> Ptr (IDispatch ())
-> Ptr (IID ())
-> Ptr WideString
-> Word32
-> LCID
-> Ptr DISPID
-> IO HRESULT
getIDsOfNames disp_state this riid rgszNames cNames lcid rgDispID = do
-- putMessage ("getIDs: " ++ show cNames)
pti <- allocOutPtr
hr <- getTypeInfo disp_state this 0 lcid pti
if (failed hr) then do
free pti
return hr
else do
prim_ti <- peek (castPtr pti)
free pti
-- pw <- peek (castPtr rgszNames)
hr <- prim_ti # getIDsOfNamesTI rgszNames cNames rgDispID
return hr
foreign import stdcall "wrapper" export_getIDsOfNames
:: (Ptr (IDispatch ()) -> Ptr (IID ()) -> Ptr WideString -> Word32 -> LCID -> Ptr DISPID -> IO HRESULT) -> IO (Ptr ())
invoke :: DispState
-> Ptr (IDispatch ())
-> DISPID
-> Ptr (IID a)
-> LCID
-> Word32
-> Ptr DISPPARAMS
-> Ptr VARIANT
-> Ptr EXCEPINFO
-> Ptr Word32
-> IO HRESULT
invoke disp_state this dispIdMember riid lcid wFlags pDispParams pVarResult pExcepInfo puArgErr = do
iid <- unmarshallIID False riid
-- putMessage ("invoke: " ++ show (dispIdMember, iid))
if (iid /= castIID iidNULL) then
return dISP_E_UNKNOWNINTERFACE
else do
pti <- allocOutPtr
hr <- getTypeInfo disp_state this 0 lcid pti
if (failed hr) then do
free pti
return hr
else do
prim_ti <- peek (castPtr pti)
let ip = disp_ip disp_state
-- hand over to the typelib marshaller, but making sure that
-- any exceptions within the user code will be handled correctly.
clearException
hr <- prim_ti # invokeTI ip dispIdMember wFlags pDispParams pVarResult pExcepInfo puArgErr
fillException pExcepInfo lcid
ip <- unmarshallIUnknown False prim_ti
ip # Com.release
return hr
invokeTI :: IUnknown a
-> DISPID
-> Word32
-> Ptr DISPPARAMS
-> Ptr VARIANT
-> Ptr EXCEPINFO
-> Ptr Word32
-> Ptr (ITypeInfo a)
-> IO HRESULT
invokeTI ip dispIdMember wFlags pDispParams pVarResult pExcepInfo puArgErr this = do
iptr_fo <- marshallIUnknown ip
let offset = (11::Int)
lpVtbl <- peek (castPtr this)
methPtr <- indexPtr lpVtbl offset
withForeignPtr iptr_fo $ \ iptr ->
prim_invokeTI methPtr (castPtr this) iptr dispIdMember wFlags pDispParams pVarResult pExcepInfo puArgErr
foreign import stdcall "dynamic"
prim_invokeTI :: Ptr (ITypeInfo a) -> Ptr () -> Ptr () -> DISPID -> Word32
-> Ptr DISPPARAMS -> Ptr VARIANT -> Ptr EXCEPINFO -> Ptr Word32 -> IO HRESULT
getTypeInfoOfGuid :: IID iid -> Ptr (PrimIP (ITypeInfo ())) -> IUnknown a -> IO HRESULT
getTypeInfoOfGuid iid ppITI this = do
let offset = (6::Int)
pthis <- marshallIUnknown this
let a = foreignPtrToPtr pthis
lpVtbl <- peek (castPtr a)
methPtr <- indexPtr lpVtbl offset
piid <- marshallIID iid
withForeignPtr pthis $ \ pthis ->
withForeignPtr piid $ \ piid ->
prim_getTypeInfoOfGuid methPtr pthis piid ppITI
foreign import stdcall "dynamic"
prim_getTypeInfoOfGuid :: Ptr () -> Ptr (Ptr a)
-> Ptr (IID iid) -> Ptr (PrimIP (ITypeInfo ())) -> IO HRESULT
getIDsOfNamesTI :: Ptr WideString -> Word32 -> Ptr DISPID -> Ptr (ITypeInfo ()) -> IO HRESULT
getIDsOfNamesTI rgszNames cNames rgDispID this = do
let offset = (10::Int)
lpVtbl <- peek (castPtr this)
methPtr <- indexPtr lpVtbl offset
prim_getIDsOfNamesTI methPtr (castPtr this) rgszNames cNames rgDispID
foreign import stdcall "dynamic"
prim_getIDsOfNamesTI :: Ptr (ITypeInfo ()) -> Ptr () -> Ptr WideString -> Word32 -> Ptr DISPID -> IO HRESULT
clearException :: IO ()
clearException = return ()
fillException :: Ptr EXCEPINFO
-> LCID
-> IO ()
fillException _ _ = return ()
foreign import stdcall "wrapper" export_invoke
:: (Ptr (IDispatch ()) -> DISPID -> Ptr (IID a) -> LCID -> Word32 -> Ptr DISPPARAMS
-> Ptr VARIANT -> Ptr EXCEPINFO -> Ptr Word32 -> IO HRESULT) -> IO (Ptr ())
data TypeInfo a = TypeInfo__
type ITypeInfo a = IUnknown (TypeInfo a)
--makeLangID :: Word16 -> Word16 -> Word16
--makeLangID p s = (shiftL p 10) .|. s
primLangID :: Word32 -> Word32
primLangID w = (w .&. 0x3ff)
--Manufacturing method tables out of a list of method pointers.
createVTable :: [Ptr ()] -> IO (VTable iid objState)
createVTable methods = do
vtbl <- alloc (sizeofPtr * fromIntegral no_meths)
sequence (zipWith (pokeElemOff vtbl) [(0::Int)..] methods)
return (vtbl, no_meths)
where
no_meths = length methods
unmarshallVTable :: VTable iid objState -> IO [Ptr ()]
unmarshallVTable (vtbl, no_meths) =
mapM (peekElemOff vtbl) [(0::Int)..no_meths]
createComVTable :: [Ptr ()] -> IO (ComVTable iid objState)
createComVTable methods = do
m_queryInterface <- export_queryInterface queryInterface
m_addRef <- export_addRef addRef
m_release <- export_release release
createVTable (m_queryInterface: m_addRef: m_release: methods)
| jjinkou2/ComForGHC7.4 | System/Win32/Com/Server.hs | bsd-3-clause | 23,917 | 504 | 15 | 5,953 | 5,615 | 3,106 | 2,509 | 471 | 6 |
module Largest5DigitNumberInSeries where
-- | From a list of numbers return the biggest 5 digit one (5 kyu)
-- | Link: https://biturl.io/5Digit
-- | My original solution
digit5 :: String -> Int
digit5 [] = 0
digit5 s@(_:xs) = max (read $ take 5 s) (digit5 xs)
| Eugleo/Code-Wars | src/string-kata/Largest5DigitNumberInSeries.hs | bsd-3-clause | 262 | 0 | 8 | 49 | 67 | 37 | 30 | 4 | 1 |
module Vish.Graphics.Data.Font where
import qualified Vish.Graphics.Data.Color as C
import Graphics.Text.TrueType (Font)
import Control.Lens
data Style =
Style
{ _font :: Font
, _color :: C.Color
, _pixelSize :: Float
}
makeLenses ''Style
| andgate/vish | src/Vish/Graphics/Data/Font.hs | bsd-3-clause | 265 | 0 | 9 | 58 | 71 | 45 | 26 | -1 | -1 |
{-# LANGUAGE TemplateHaskell, TypeFamilies, TypeOperators #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.Units.US.Troy
-- Copyright : (C) 2013 Richard Eisenberg
-- License : BSD-style (see LICENSE)
-- Maintainer : Richard Eisenberg (rae@cs.brynmawr.edu)
-- Stability : experimental
-- Portability : non-portable
--
-- This module defines troy measures of mass. The troy
-- system is most often used when measuring precious metals.
--
-- Included are all units mentioned here:
-- <http://en.wikipedia.org/wiki/United_States_customary_units>
-- Where possible, conversion rates have been independently verified
-- at a US government website. However, Wikipedia's base is /much/
-- better organized than any government resource immediately available.
-- The US government references used are as follows:
-- <http://nist.gov/pml/wmd/metric/upload/SP1038.pdf>
-- <http://nist.gov/pml/wmd/pubs/upload/appc-14-hb44-final.pdf>
-----------------------------------------------------------------------------
module Data.Units.US.Troy (
module Data.Units.US.Troy,
-- | The avoirdupois grain is the same as the troy grain
Grain(..)
) where
import Data.Metrology.TH
import Data.Units.US.Avoirdupois ( Grain(..) )
import Language.Haskell.TH
declareDerivedUnit "Pennyweight" [t| Grain |] 24 (Just "dwt")
declareDerivedUnit "Ounce" [t| Pennyweight |] 20 (Just "ozt")
declareDerivedUnit "Pound" [t| Ounce |] 12 (Just "lbt")
-- | Includes 'Grain', 'Pennyweight', 'Ounce', and 'Pound'
troyMassMeasures :: [Name]
troyMassMeasures = [ ''Grain, ''Pennyweight, ''Ounce, ''Pound ]
| goldfirere/units | units-defs/Data/Units/US/Troy.hs | bsd-3-clause | 1,672 | 0 | 7 | 245 | 176 | 117 | 59 | 12 | 1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE DeriveDataTypeable #-}
-- |
-- Module: $HEADER$
-- Description: Generalised variant of Data.Monoid.Last.
-- Copyright: (c) 2013 Peter Trsko
-- License: BSD3
--
-- Maintainer: peter.trsko@gmail.com
-- Stability: experimental
-- Portability: non-portable (CPP, DeriveDataTypeable)
--
-- 'LastNonEmpty' is like a generalised version of @Data.Monoid.Last@. For
-- @Maybe a@ values it behaves the same.
module Data.Monoid.LastNonEmpty
(
-- * LastNonEmpty
LastNonEmpty(..)
, mapLastNonEmpty
, mapLastNonEmpty2
-- ** Lenses
, lastNonEmpty
-- * Monoid
, Monoid(..)
#if MIN_VERSION_base(4,5,0)
, (<>)
#endif
)
where
import Data.Monoid (Monoid(..), (<>))
import Data.Data (Data)
import Data.Typeable (Typeable)
#ifdef WITH_SEMIGROUP
import qualified Data.Semigroup as Semigroup (Semigroup(..))
#endif
import Data.Function.Between (between)
import Data.Functor.Utils (iso)
newtype LastNonEmpty a = LastNonEmpty {getLastNonEmpty :: a}
deriving (Data, Bounded, Eq, Ord, Read, Show, Typeable)
instance (Eq a, Monoid a) => Monoid (LastNonEmpty a) where
mempty = LastNonEmpty mempty
{-# INLINEABLE mempty #-}
x `mappend` y@(LastNonEmpty y')
| y' == mempty = x
| otherwise = y
{-# INLINEABLE mappend #-}
#ifdef WITH_SEMIGROUP
instance (Eq a, Monoid a) => Semigroup.Semigroup (LastNonEmpty a) where
(<>) = mappend
{-# INLINEABLE (<>) #-}
times1p _ x = x
{-# INLINEABLE times1p #-}
#endif
instance Functor LastNonEmpty where
fmap = mapLastNonEmpty
-- | Lift function operating on value wrapped in 'LastNonEmpty' to it's
-- isomorphic counterpart operating on 'LastNonEmpty' wrapped values.
mapLastNonEmpty
:: (a -> b)
-> LastNonEmpty a
-> LastNonEmpty b
mapLastNonEmpty = LastNonEmpty `between` getLastNonEmpty
-- | Variant of 'mapLastNonEmpty' for functions with arity two.
mapLastNonEmpty2
:: (a -> b -> c)
-> LastNonEmpty a
-> LastNonEmpty b
-> LastNonEmpty c
mapLastNonEmpty2 = mapLastNonEmpty `between` getLastNonEmpty
-- | Lens for 'LastNonEmpty'.
--
-- See /lens/ <http://hackage.haskell.org/package/lens> package for details.
lastNonEmpty
:: Functor f
=> (a -> f b)
-> LastNonEmpty a
-> f (LastNonEmpty b)
lastNonEmpty = iso LastNonEmpty getLastNonEmpty
| trskop/hs-not-found | not-found/src/Data/Monoid/LastNonEmpty.hs | bsd-3-clause | 2,358 | 0 | 10 | 482 | 469 | 278 | 191 | 40 | 1 |
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE MonadComprehensions #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RebindableSyntax #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE ViewPatterns #-}
-- TPC-H Q5
module Queries.TPCH.Standard.Q5
( q5
, q5Default
) where
import qualified Data.Time.Calendar as C
import Database.DSH
import Schema.TPCH
-- | TPC-H Query Q5 with standard validation parameters
q5Default :: Q [(Text, Decimal)]
q5Default = q5 (C.fromGregorian 1994 1 1) "ASIA"
-- | TPC-H Query Q5
q5 :: Day -> Text -> Q [(Text, Decimal)]
q5 startDate regionName =
sortWith (\(view -> (_, r)) -> r * (-1)) $
map (\(view -> (k, g)) -> pair k (sum [ e * (1 - d) | (view -> (_, e, d)) <- g ])) $
groupWithKey (\(view -> (n, _, _)) -> n) $
[ tup3 (n_nameQ n) (l_extendedpriceQ l) (l_discountQ l)
| c <- customers
, o <- orders
, l <- lineitems
, s <- suppliers
, n <- nations
, r <- regions
, c_custkeyQ c == o_custkeyQ o
, l_orderkeyQ l == o_orderkeyQ o
, l_suppkeyQ l == s_suppkeyQ s
, c_nationkeyQ c == s_nationkeyQ s
, s_nationkeyQ s == n_nationkeyQ n
, n_regionkeyQ n == r_regionkeyQ r
, r_nameQ r == toQ regionName
, o_orderdateQ o >= toQ startDate
, o_orderdateQ o < toQ (C.addDays 365 startDate)
]
| ulricha/dsh-tpc-h | Queries/TPCH/Standard/Q5.hs | bsd-3-clause | 1,478 | 0 | 19 | 366 | 479 | 256 | 223 | 39 | 1 |
{-# LANGUAGE DeriveDataTypeable, RecordWildCards, TemplateHaskell, MagicHash #-}
{-# OPTIONS_GHC -fno-warn-missing-fields -fno-warn-unused-binds #-}
module System.Console.CmdArgs.Test.Implicit.Tests(test, demos) where
import System.Console.CmdArgs
import System.Console.CmdArgs.Explicit(modeHelp)
import System.Console.CmdArgs.Test.Implicit.Util
import System.Console.CmdArgs.Quote
import Data.Int
import Data.Ratio
-- from bug #256 and #231
data Test1
= Test1 {maybeInt :: Maybe Int, listDouble :: [Double], maybeStr :: Maybe String, float :: Float
,bool :: Bool, maybeBool :: Maybe Bool, listBool :: [Bool], int64 :: Int64}
deriving (Show,Eq,Data,Typeable)
def1 = Test1 def def def (def &= args) def def def def
mode1 = cmdArgsMode def1
$(cmdArgsQuote [d|
mode1_ = cmdArgsMode# def1_
def1_ = Test1 def def def (def &=# args) def def def def
|])
test1 = do
let Tester{..} = testers "Test1" [mode1,mode1_]
[] === def1
["--maybeint=12"] === def1{maybeInt = Just 12}
["--maybeint=12","--maybeint=14"] === def1{maybeInt = Just 14}
fails ["--maybeint"]
fails ["--maybeint=test"]
["--listdouble=1","--listdouble=3","--listdouble=2"] === def1{listDouble=[1,3,2]}
fails ["--maybestr"]
["--maybestr="] === def1{maybeStr=Just ""}
["--maybestr=test"] === def1{maybeStr=Just "test"}
["12.5"] === def1{float=12.5}
["12.5","18"] === def1{float=18}
["--bool"] === def1{bool=True}
["--maybebool"] === def1{maybeBool=Just True}
["--maybebool=off"] === def1{maybeBool=Just False}
["--listbool","--listbool=true","--listbool=false"] === def1{listBool=[True,True,False]}
["--int64=12"] === def1{int64=12}
fails ["--listbool=fred"]
invalid $ \_ -> def1{listBool = def &= opt "yes"}
-- from bug #230
data Test2 = Cmd1 {bs :: [String]}
| Cmd2 {bar :: Int}
deriving (Show, Eq, Data, Typeable)
mode2 = cmdArgsMode $ modes [Cmd1 [], Cmd2 42]
test2 = do
let Tester{..} = tester "Test2" mode2
fails []
["cmd1","-btest"] === Cmd1 ["test"]
["cmd2","-b14"] === Cmd2 14
-- various argument position
data Test3 = Test3 {pos1_1 :: [Int], pos1_2 :: [String], pos1_rest :: [String]}
deriving (Show, Eq, Data, Typeable)
mode3 = cmdArgsMode $ Test3 (def &= argPos 1) (def &= argPos 2 &= opt "foo") (def &= args)
$(cmdArgsQuote [d| mode3_ = cmdArgsMode# $ Test3 (def &=# argPos 1) (def &=# argPos 2 &=# opt "foo") (def &=# args) |])
test3 = do
let Tester{..} = testers "Test3" [mode3,mode3_]
fails []
fails ["a"]
["a","1"] === Test3 [1] ["foo"] ["a"]
["a","1","c"] === Test3 [1] ["c"] ["a"]
["a","1","c","d"] === Test3 [1] ["c"] ["a","d"]
invalid $ \_ -> Test3 def def (def &= help "help" &= args)
-- from bug #222
data Test4 = Test4 {test_4 :: [String]}
deriving (Show, Eq, Data, Typeable)
mode4 = cmdArgsMode $ Test4 (def &= opt "hello" &= args)
test4 = do
let Tester{..} = tester "Test4" mode4
[] === Test4 ["hello"]
["a"] === Test4 ["a"]
["a","b"] === Test4 ["a","b"]
-- from #292, automatic enumerations
data ABC = Abacus | Arbitrary | B | C deriving (Eq,Show,Data,Typeable)
data Test5 = Test5 {choice :: ABC} deriving (Eq,Show,Data,Typeable)
mode5 = cmdArgsMode $ Test5 B
test5 = do
let Tester{..} = tester "Test5" mode5
[] === Test5 B
fails ["--choice=A"]
["--choice=c"] === Test5 C
["--choice=C"] === Test5 C
["--choice=Aba"] === Test5 Abacus
["--choice=abacus"] === Test5 Abacus
["--choice=c","--choice=B"] === Test5 B
-- tuple support
data Test6 = Test6 {val1 :: (Int,Bool), val2 :: [(Int,(String,Double))]} deriving (Eq,Show,Data,Typeable)
val6 = Test6 def def
mode6 = cmdArgsMode val6
test6 = do
let Tester{..} = tester "Test6" mode6
[] === val6
["--val1=1,True"] === val6{val1=(1,True)}
["--val1=84,off"] === val6{val1=(84,False)}
fails ["--val1=84"]
fails ["--val1=84,off,1"]
["--val2=1,2,3","--val2=5,6,7"] === val6{val2=[(1,("2",3)),(5,("6",7))]}
-- from #333, add default fields
data Test7 = Test71 {shared :: Int}
| Test72 {unique :: Int, shared :: Int}
| Test73 {unique :: Int, shared :: Int}
deriving (Eq,Show,Data,Typeable)
mode7 = cmdArgsMode $ modes [Test71{shared = def &= name "rename"}, Test72{unique=def}, Test73{}]
test7 = do
let Tester{..} = tester "Test7" mode7
fails []
["test71","--rename=2"] === Test71 2
["test72","--rename=2"] === Test72 0 2
["test72","--unique=2"] === Test72 2 0
["test73","--rename=2"] === Test73 0 2
["test73","--unique=2"] === Test73 2 0
-- from #252, grouping
data Test8 = Test8 {test8a :: Int, test8b :: Int, test8c :: Int}
| Test81
| Test82
deriving (Eq,Show,Data,Typeable)
mode8 = cmdArgsMode $ modes [Test8 1 (2 &= groupname "More flags") 3 &= groupname "Mode1", Test81, Test82 &= groupname "Mode2"]
mode8_ = cmdArgsMode_ $ modes_ [record Test8{} [atom (1::Int), atom (2::Int) += groupname "More flags", atom (3::Int)] += groupname "Mode1"
,record Test81{} []
,record Test82{} [] += groupname "Mode2"]
test8 = do
let Tester{..} = testers "Test8" [mode8,mode8_]
isHelp ["-?"] ["Flags:"," --test8a=INT","More flags:"," --test8b=INT"]
fails []
["test8","--test8a=18"] === Test8 18 2 3
-- bug from Sebastian Fischer, enums with multiple fields
data XYZ = X | Y | Z deriving (Eq,Show,Data,Typeable)
data Test9 = Test91 {foo :: XYZ}
| Test92 {foo :: XYZ}
deriving (Eq,Show,Data,Typeable)
mode9 = cmdArgsMode $ modes [Test91 {foo = enum [X &= help "pick X (default)", Y &= help "pick Y"]} &= auto, Test92{}]
mode9_ = cmdArgsMode_ $ modes_ [record Test91{} [enum_ foo [atom X += help "pick X (default)", atom Y += help "pick Y"]] += auto, record Test92{} []]
test9 = do
let Tester{..} = testers "Test9" [mode9,mode9_]
[] === Test91 X
["test91","-x"] === Test91 X
["test91","-y"] === Test91 Y
fails ["test91","-z"]
["test92","-x"] === Test92 X
["test92","-y"] === Test92 Y
["test92"] === Test92 X
invalid $ \_ -> modes [Test91 {foo = enum [X &= help "pick X (default)"] &= opt "X"}]
-- share common fields in the help message
data Test10 = Test101 {food :: Int}
| Test102 {food :: Int, bard :: Int}
deriving (Eq,Show,Data,Typeable)
mode10 = cmdArgsMode $ modes [Test101 def, Test102 def def]
test10 = do
let Tester{..} = tester "Test10" mode10
isHelp ["-?=one"] [" -f --food=INT"]
isHelpNot ["-?=one"] [" -b --bard=INT"]
-- test for GHC over-optimising
data Test11 = Test11A {test111 :: String}
| Test11B {test111 :: String}
deriving (Eq,Show,Data,Typeable)
test11A = Test11A { test111 = def &= argPos 0 }
test11B = Test11B { test111 = def &= argPos 0 }
mode11 = cmdArgsMode $ modes [test11A, test11B]
mode11_ = cmdArgsMode_ $ modes_
[record Test11A{} [test111 := def += argPos 0]
,record Test11B{} [test111 := def += argPos 0]]
test11 = do
let Tester{..} = testers "Test11" [mode11,mode11_]
fails []
["test11a","test"] === Test11A "test"
["test11b","test"] === Test11B "test"
-- #351, check you can add name annotations to modes
data Test12 = Test12A | Test12B deriving (Eq,Show,Data,Typeable)
mode12 = cmdArgsMode $ modes [Test12A &= name "check", Test12B]
mode12_ = cmdArgsMode $ modes [Test12A &= name "check" &= explicit, Test12B]
test12 = do
let Tester{..} = tester "Test12" mode12
fails []
["test12a"] === Test12A
["check"] === Test12A
["test12b"] === Test12B
fails ["t"]
let Tester{..} = tester "Test12" mode12_
fails []
fails ["test12a"]
["check"] === Test12A
["test12b"] === Test12B
["t"] === Test12B
-- the ignore annotation and versionArg [summary]
data Test13 = Test13A {foo13 :: Int, bar13 :: Either Int Int}
| Test13B {foo13 :: Int}
| Test13C {foo13 :: Int}
deriving (Eq,Show,Data,Typeable)
mode13 = cmdArgsMode $ modes [Test13A 1 (Left 1 &= ignore), Test13B 1 &= ignore, Test13C{}]
&= versionArg [summary "Version text here"]
&= summary "Help text here"
test13 = do
let Tester{..} = tester "Test13" mode13
fails ["test13b"]
fails ["test13a --bar13=1"]
["test13a","--foo13=13"] === Test13A 13 (Left 1)
["test13c","--foo13=13"] === Test13C 13
isHelp ["--help"] ["Help text here"]
isVersion ["--version"] "Version text here"
fails ["--numeric-version"]
-- check a list becomes modes not an enum
data Test14 = Test14A | Test14B | Test14C deriving (Eq,Show,Data,Typeable)
mode14 = cmdArgsMode $ modes [Test14A, Test14B, Test14C]
test14 = do
let Tester{..} = tester "Test14" mode14
fails []
["test14a"] === Test14A
fails ["--test14a"]
-- custom help flags
data Test15 = Test15 {test15a :: Bool} deriving (Eq,Show,Data,Typeable)
mode15 = cmdArgsMode $ Test15 (False &= name "help")
&= helpArg [groupname "GROUP", name "h", name "nohelp", explicit, help "whatever\nstuff"] &= versionArg [ignore]
&= verbosityArgs [ignore] [explicit,name "silent"]
$(cmdArgsQuote [d|
mode15_ = cmdArgsMode# $ Test15 (False &=# name "help")
&=# helpArg [groupname "GROUP", name "h", name "nohelp", explicit, help "whatever\nstuff"] &=# versionArg [ignore]
&=# verbosityArgs [ignore] [explicit,name "silent"]
|])
test15 = do
let Tester{..} = testers "Test15" [mode15,mode15_]
invalid $ \_ -> Test15 (False &= name "help")
["--help"] === Test15 True
["-t"] === Test15 True
fails ["-?"]
isHelp ["--nohelp"] [" -h --nohelp whatever"]
isHelp ["-h"] []
isHelp ["-h"] ["GROUP:"]
fails ["--version"]
fails ["--numeric-version"]
fails ["--verbose"]
fails ["--quiet"]
isVerbosity ["--help","--silent"] Quiet
-- check newtype support
newtype MyInt = MyInt Int deriving (Eq,Show,Data,Typeable)
data Test16 = Test16 {test16a :: MyInt, test16b :: [MyInt]} deriving (Eq,Show,Data,Typeable)
mode16 = cmdArgsMode $ Test16 (MyInt 12) [] &= summary "The Glorious Glasgow Haskell Compilation System, version 7.6.3"
test16 = do
let Tester{..} = tester "Test16" mode16
[] === Test16 (MyInt 12) []
isVersion ["--numeric-version"] "7.6.3"
fails ["--test16a"]
["--test16a=5"] === Test16 (MyInt 5) []
["--test16b=5","--test16b=82"] === Test16 (MyInt 12) [MyInt 5, MyInt 82]
-- #552, @ directives not expanded after -- symbols
-- not actually checked because this path doesn't go through processArgs
data Test17 = Test17 {test17_ :: [String]} deriving (Eq,Show,Data,Typeable)
mode17 = cmdArgsMode $ Test17 ([] &= args) &= noAtExpand &= summary "bzip2 3.5-windows version"
test17 = do
let Tester{..} = tester "Test17" mode17
[] === Test17 []
["test","of","this"] === Test17 ["test","of","this"]
["test","--","@foo"] === Test17 ["test","@foo"]
isVersion ["--numeric-version"] "3.5-windows"
data Debuggable = This | That deriving (Eq,Show,Data,Typeable)
data Test18 = Test18 {test18_ :: [Debuggable]} deriving (Eq,Show,Data,Typeable)
mode18 = cmdArgsMode $ Test18 $ enum [[] &= ignore, [This] &= name "debug-this", [That] &= name "debug-that"]
test18 = do
let Tester{..} = tester "Test18" mode18
[] === Test18 []
["--debug-this","--debug-that","--debug-this"] === Test18 [This,That,This]
-- #610, check performance for long lists (took ~20s before)
data Test19 = Test19 {test19_ :: [String]} deriving (Eq,Show,Data,Typeable)
mode19 = cmdArgsMode $ Test19 ([] &= args)
test19 = do
let Tester{..} = tester "Test19" mode19
let args = map show [1..1000]
args === Test19 args
-- #615, newtype wrappers of lists/Maybe should accumulate properly
newtype Test20A = Test20A [String] deriving (Eq,Show,Data,Typeable)
data Test20 = Test20 {test20_ :: Test20A} deriving (Eq,Show,Data,Typeable)
mode20 = cmdArgsMode $ Test20 (Test20A [] &= args)
test20 = do
let Tester{..} = tester "Test20" mode20
["a","b","c"] === Test20 (Test20A ["a","b","c"])
-- #626, don't reverse values too much
newtype Test21A = Test21A [String] deriving (Eq,Show,Data,Typeable)
data Test21 = Test21 {test21A :: Test21A, test21B :: [String], test21C :: [Int]} deriving (Eq,Show,Data,Typeable)
mode21 = cmdArgsMode $ Test21 (Test21A ["a","b","c"]) ["A","B","C"] [1,2,3]
test21 = do
let Tester{..} = tester "Test21" mode21
[] === Test21 (Test21A ["a","b","c"]) ["A","B","C"] [1,2,3]
-- #10, don't break elm-server
data Test22 = Test22 {port :: Int, runtime :: Maybe FilePath} deriving (Data,Typeable,Show,Eq)
mode22 = cmdArgsMode $ Test22
{ port = 8000 &= help "set the port of the server"
, runtime = Nothing &= typFile
&= help "Specify a custom location for Elm's runtime system."
} &= help "Quickly reload Elm projects in your browser. Just refresh to recompile.\n\
\It serves static files and freshly recompiled Elm files."
&= helpArg [explicit, name "help", name "h"]
&= versionArg [ explicit, name "version", name "v"
, summary "0.12.0.1"
]
&= summary "Elm Server 0.11.0.1, (c) Evan Czaplicki 2011-2014"
test22 = do
let Tester{..} = tester "Test22" mode22
[] === Test22 8000 Nothing
isVersion ["-v"] "0.12.0.1"
isVersion ["--version"] "0.12.0.1"
isVersion ["--numeric-version"] "0.12.0.1"
isHelp ["--help"] ["Elm Server 0.11.0.1, (c) Evan Czaplicki 2011-2014"]
isHelp ["--h"] ["Elm Server 0.11.0.1, (c) Evan Czaplicki 2011-2014"]
fails ["-?"]
["--port=20"] === Test22 20 Nothing
["--runtime=20"] === Test22 8000 (Just "20")
fails ["bob"]
-- # 24, doesn't work with Ratio
data Test23 = Test23 {test23A :: Ratio Int} deriving (Show, Data, Typeable, Eq)
mode23 = cmdArgsMode $ Test23 {test23A = 4 % 7 }
test23 = do
let Tester{..} = tester "Test23" mode23
[] === Test23 (4 % 7)
["--test23=1,6"] === Test23 (1 % 6)
-- For some reason, these must be at the end, otherwise the Template Haskell
-- stage restriction kicks in.
test = test1 >> test2 >> test3 >> test4 >> test5 >> test6 >> test7 >> test8 >> test9 >> test10 >>
test11 >> test12 >> test13 >> test14 >> test15 >> test16 >> test18 >> test19 >> test20 >>
test21 >> test22 >> test23
demos = zipWith f [1..]
[toDemo mode1, toDemo mode2, toDemo mode3, toDemo mode4, toDemo mode5, toDemo mode6
,toDemo mode7, toDemo mode8, toDemo mode9, toDemo mode10, toDemo mode11, toDemo mode12
,toDemo mode13, toDemo mode14, toDemo mode15, toDemo mode16, toDemo mode17, toDemo mode18
,toDemo mode19, toDemo mode20, toDemo mode21, toDemo mode22, toDemo mode23]
where f i x = x{modeHelp = "Testing various corner cases (" ++ show i ++ ")"}
| ozgurakgun/cmdargs | System/Console/CmdArgs/Test/Implicit/Tests.hs | bsd-3-clause | 14,872 | 0 | 25 | 3,241 | 5,570 | 2,926 | 2,644 | 295 | 1 |
{-| Contains all utilities related to markdown processing
-}
module Dhall.Docs.Markdown
( MarkdownParseError(..)
, markdownToHtml
) where
import Data.Text (Text)
import Lucid
import Path (File, Path, Rel)
import Text.MMark (MMarkErr)
import Text.Megaparsec (ParseErrorBundle (..))
import qualified Path
import qualified Text.MMark as MMark
-- | Wrapper around `MMarkErr` errors
newtype MarkdownParseError = MarkdownParseError
{ unwrap :: ParseErrorBundle Text MMarkErr
}
{-| Takes a text that could contain markdown and returns the generated HTML.
If an error occurs while parsing, it also returns the error information.
-}
markdownToHtml
:: Path Rel File -- ^ Used by `Mmark.parse` for error messages
-> Text -- ^ Text to parse
-> Either MarkdownParseError (Html ())
markdownToHtml relFile contents =
case MMark.parse (Path.fromRelFile relFile) contents of
Left err -> Left MarkdownParseError { unwrap = err }
Right mmark -> Right $ MMark.render mmark
| Gabriel439/Haskell-Dhall-Library | dhall-docs/src/Dhall/Docs/Markdown.hs | bsd-3-clause | 1,044 | 0 | 10 | 228 | 203 | 117 | 86 | 20 | 2 |
{-
This file is part of funsat.
funsat is free software: it is released under the BSD3 open source license.
You can find details of this license in the file LICENSE at the root of the
source tree.
Copyright 2008 Denis Bueno
-}
-- | Generates and checks a resolution proof of `Funsat.Types.Unsat' from a
-- resolution trace of a SAT solver (`Funsat.Solver.solve' will generate this
-- trace). As a side effect of this process an /unsatisfiable core/ is
-- generated from the resolution trace. This core is a (hopefully small) subset
-- of the input clauses which is still unsatisfiable. Intuitively, it a concise
-- reason why the problem is unsatisfiable.
--
-- The resolution trace checker is based on the implementation from the paper
-- ''Validating SAT Solvers Using an Independent Resolution-Based Checker:
-- Practical Implementations and Other Applications'' by Lintao Zhang and Sharad
-- Malik. Unsatisfiable cores are discussed in the paper ''Extracting Small
-- Unsatisfiable Cores from Unsatisfiable Boolean Formula'' by Zhang and Malik.
--
--
module Funsat.Resolution
( -- * Interface
genUnsatCore
, checkDepthFirst
-- * Data Types
, ResolutionTrace(..)
, initResolutionTrace
, ResolutionError(..)
, UnsatisfiableCore )
where
import Control.Monad.Error
import Control.Monad.Reader
import Control.Monad.State.Strict
import Data.IntSet( IntSet )
import Data.List( nub )
import Data.Map( Map )
import qualified Data.IntSet as IntSet
import qualified Data.Map as Map
import Funsat.Types
import Funsat.Utils.Internal( isSingle, getUnit, isFalseUnder )
-- IDs = Ints
-- Lits = Lits
-- | A resolution trace records how the SAT solver proved the original CNF
-- formula unsatisfiable.
data ResolutionTrace = ResolutionTrace
{ traceFinalClauseId :: ClauseId
-- ^ The id of the last, conflicting clause in the solving process.
, traceFinalAssignment :: IAssignment
-- ^ Final assignment.
--
-- /Precondition/: All variables assigned at decision level zero.
, traceSources :: Map ClauseId [ClauseId]
-- ^ /Invariant/: Each id has at least one source (otherwise that id
-- should not even have a mapping).
--
-- /Invariant/: Should be ordered topologically backward (?) from each
-- conflict clause. (IOW, record each clause id as its encountered when
-- generating the conflict clause.)
, traceOriginalClauses :: Map ClauseId Clause
-- ^ Original clauses of the CNF input formula.
, traceAntecedents :: Map Var ClauseId }
deriving (Show)
initResolutionTrace :: ClauseId -> IAssignment -> ResolutionTrace
initResolutionTrace finalClauseId finalAssignment = ResolutionTrace
{ traceFinalClauseId = finalClauseId
, traceFinalAssignment = finalAssignment
, traceSources = Map.empty
, traceOriginalClauses = Map.empty
, traceAntecedents = Map.empty }
-- | A type indicating an error in the checking process. Assuming this
-- checker's code is correct, such an error indicates a bug in the SAT solver.
data ResolutionError =
ResolveError Var Clause Clause
-- ^ Indicates that the clauses do not properly resolve on the
-- variable.
| CannotResolve [Var] Clause Clause
-- ^ Indicates that the clauses do not have complementary variables or
-- have too many. The complementary variables (if any) are in the
-- list.
| AntecedentNotUnit Clause
-- ^ Indicates that the constructed antecedent clause not unit under
-- `traceFinalAssignment'.
| AntecedentImplication (Clause, Lit) Var
-- ^ Indicates that in the clause-lit pair, the unit literal of clause
-- is the literal, but it ought to be the variable.
| AntecedentMissing Var
-- ^ Indicates that the variable has no antecedent mapping, in which
-- case it should never have been assigned/encountered in the first
-- place.
| EmptySource ClauseId
-- ^ Indicates that the clause id has an entry in `traceSources' but
-- no resolution sources.
| OrphanSource ClauseId
-- ^ Indicates that the clause id is referenced but has no entry in
-- `traceSources'.
deriving Show
instance Error ResolutionError where -- Just for the Error monad.
-- checkDepthFirstFix :: (CNF -> (Solution, Maybe ResolutionTrace))
-- -> Solution
-- -> ResolutionTrace
-- -> Either ResolutionError UnsatisfiableCore
-- checkDepthFirstFix solver resTrace =
-- case checkDepthFirst resTrace of
-- Left err -> err
-- Right ucore ->
-- let (sol, rt) solver (rescaleIntoCNF ucore)
-- | Check the given resolution trace of a (putatively) unsatisfiable formula.
-- If the result is `ResolutionError', the proof trace has failed to establish
-- the unsatisfiability of the formula. Otherwise, an unsatisfiable core of
-- clauses is returned.
--
-- This function simply calls `checkDepthFirst'.
genUnsatCore :: ResolutionTrace -> Either ResolutionError UnsatisfiableCore
genUnsatCore = checkDepthFirst
-- | The depth-first method.
checkDepthFirst :: ResolutionTrace -> Either ResolutionError UnsatisfiableCore
checkDepthFirst resTrace =
-- Turn internal unsat core into external.
fmap (map findClause . IntSet.toList)
-- Check and create unsat core.
. (`runReader` resTrace)
. (`evalStateT` ResState { clauseIdMap = traceOriginalClauses resTrace
, unsatCore = IntSet.empty })
. runErrorT
$ recursiveBuild (traceFinalClauseId resTrace)
>>= checkDFClause
where
findClause clauseId =
Map.findWithDefault
(error $ "checkDFClause: unoriginal clause id: " ++ show clauseId)
clauseId (traceOriginalClauses resTrace)
-- | Unsatisfiable cores are not unique.
type UnsatisfiableCore = [Clause]
------------------------------------------------------------------------------
-- MAIN INTERNALS
------------------------------------------------------------------------------
data ResState = ResState
{ clauseIdMap :: Map ClauseId Clause
, unsatCore :: UnsatCoreIntSet }
type UnsatCoreIntSet = IntSet -- set of ClauseIds
type ResM = ErrorT ResolutionError (StateT ResState (Reader ResolutionTrace))
-- Recursively resolve the (final, initially) clause with antecedents until
-- the empty clause is created.
checkDFClause :: Clause -> ResM UnsatCoreIntSet
checkDFClause clause =
if null clause
then gets unsatCore
else do l <- chooseLiteral clause
let v = var l
anteClause <- recursiveBuild =<< getAntecedentId v
checkAnteClause v anteClause
resClause <- resolve (Just v) clause anteClause
checkDFClause resClause
recursiveBuild :: ClauseId -> ResM Clause
recursiveBuild clauseId = do
maybeClause <- getClause
case maybeClause of
Just clause -> return clause
Nothing -> do
sourcesMap <- asks traceSources
case Map.lookup clauseId sourcesMap of
Nothing -> throwError (OrphanSource clauseId)
Just [] -> throwError (EmptySource clauseId)
Just (firstSourceId:ids) -> recursiveBuildIds clauseId firstSourceId ids
where
-- If clause is an *original* clause, stash it as part of the UNSAT core.
getClause = do
origMap <- asks traceOriginalClauses
case Map.lookup clauseId origMap of
Just origClause -> withClauseInCore $ return (Just origClause)
Nothing -> Map.lookup clauseId `liftM` gets clauseIdMap
withClauseInCore =
(modify (\s -> s{ unsatCore = IntSet.insert clauseId (unsatCore s) }) >>)
recursiveBuildIds :: ClauseId -> ClauseId -> [ClauseId] -> ResM Clause
recursiveBuildIds clauseId firstSourceId sourceIds = do
rc <- recursiveBuild firstSourceId -- recursive_build(id)
clause <- foldM buildAndResolve rc sourceIds
storeClauseId clauseId clause
return clause
where
-- This is the body of the while loop inside the recursiveBuild
-- procedure in the paper.
buildAndResolve :: Clause -> ClauseId -> ResM (Clause)
buildAndResolve clause1 clauseId =
recursiveBuild clauseId >>= resolve Nothing clause1
-- Maps ClauseId to built Clause.
storeClauseId :: ClauseId -> Clause -> ResM ()
storeClauseId clauseId clause = modify $ \s ->
s{ clauseIdMap = Map.insert clauseId clause (clauseIdMap s) }
------------------------------------------------------------------------------
-- HELPERS
------------------------------------------------------------------------------
-- | Resolve both clauses on the given variable, and throw a resolution error
-- if anything is amiss. Specifically, it checks that there is exactly one
-- occurrence of a literal with the given variable (if variable given) in each
-- clause and they are opposite in polarity.
--
-- If no variable specified, finds resolving variable, and ensures there's
-- only one such variable.
resolve :: Maybe Var -> Clause -> Clause -> ResM Clause
resolve maybeV c1 c2 =
-- Find complementary literals:
case filter ((`elem` c2) . negate) c1 of
[l] -> case maybeV of
Nothing -> resolveVar (var l)
Just v -> if v == var l
then resolveVar v
else throwError $ ResolveError v c1 c2
vs -> throwError $ CannotResolve (nub . map var $ vs) c1 c2
where
resolveVar v = return . nub $ deleteVar v c1 ++ deleteVar v c2
deleteVar v c = c `without` lit v `without` negate (lit v)
lit (V i) = L i
-- | Get the antecedent (reason) for a variable. Every variable encountered
-- ought to have a reason.
getAntecedentId :: Var -> ResM ClauseId
getAntecedentId v = do
anteMap <- asks traceAntecedents
case Map.lookup v anteMap of
Nothing -> throwError (AntecedentMissing v)
Just ante -> return ante
chooseLiteral :: Clause -> ResM Lit
chooseLiteral (l:_) = return l
chooseLiteral _ = error "chooseLiteral: empty clause"
checkAnteClause :: Var -> Clause -> ResM ()
checkAnteClause v anteClause = do
a <- asks traceFinalAssignment
when (not (anteClause `hasUnitUnder` a))
(throwError $ AntecedentNotUnit anteClause)
let unitLit = getUnit anteClause a
when (not $ var unitLit == v)
(throwError $ AntecedentImplication (anteClause, unitLit) v)
where
hasUnitUnder c m = isSingle (filter (not . (`isFalseUnder` m)) c)
| dbueno/funsat | src/Funsat/Resolution.hs | bsd-3-clause | 10,669 | 0 | 17 | 2,536 | 1,668 | 907 | 761 | 133 | 5 |
-- | ACL2 generation.
{-# LANGUAGE GADTs #-}
module Language.GIGL.ACL2
( SExpr (..)
, acl2
) where
import Data.List (nub)
import Data.Maybe (fromJust)
import MonadLib
import Language.GIGL
-- | ACL2 generation.
acl2 :: String -> a -> GIGL a () () -> [SExpr]
acl2 name a b = acl2' name $ snd $ elaborate a b
acl2' :: String -> Program () -> [SExpr]
acl2' name p =
[ SA [SV "set-ignore-ok", SV ":warn"]
--, SA [SV "defun", SV $ name ++ "-init", SA [SV "vars-in"], acl2SExpr $ initialConditions $ variables p]
, SA [SV "defun", SV name, SA [SV "vars-in"], acl2SExpr $ letRewrite (variables p) $ statement p]
]
letRewrite :: [String] -> Stmt () -> E Untyped
letRewrite vars s = inputProject vars $ body $ outputTuple vars
where
body :: E Untyped -> E Untyped
((), (_, _, body)) = runId $ runStateT (0, zip vars vars, id) $ stmt s >> bindOutputs vars
-- | Initial condition relation.
{-
initialConditions :: [(String, Maybe Value)] -> E Bool
initialConditions vars = inputProject (fst $ unzip vars) $ foldl (&&&) true [ init (Var name .==) value | (name, Just value) <- vars ]
where
init :: (E Untyped -> E Bool) -> Value -> E Bool
init f value = case value of
VBool a -> f $ Untyped (Const a)
VWord64 a -> f $ Untyped (Const a)
VPair a b -> init (\ a -> init (\ b -> f $ Untyped $ Pair a b) b) a
-}
-- | Variable projection from input tuple.
inputProject :: [String] -> E a -> E a
inputProject vars = input $ zip [0 ..] vars
where
input :: [(Int, String)] -> E a -> E a
input a = case a of
[] -> id
(i, v) : rest -> Let v (extract i) . input rest
extract :: Int -> E a
extract i
| i == length vars - 1 = extract' i
| otherwise = Fst $ extract' i
extract' :: Int -> E a
extract' i
| i <= 0 = Var "vars-in"
| otherwise = Snd $ extract' $ i - 1
-- | Creates an untyped tuple for the output of variables.
outputTuple :: [String] -> E Untyped
outputTuple vars = case vars of
[a, b] -> Untyped $ Pair (Var a) (Var b)
a : b -> Untyped $ Pair (Var a) (outputTuple b)
_ -> error "Expecting at least 2 variables."
data SExpr
= SV String
| SA [SExpr]
instance Show SExpr where
show a = case a of
SV a -> a ++ "\n"
SA args -> "( " ++ indent (concatMap show args) ++ ")\n"
where
indent = drop 2 . unlines . map (" " ++) . lines
acl2SExpr :: E a -> SExpr
acl2SExpr a = case a of
Var a -> SV a
Index _ _ -> error "Array Index not supported."
Let _ _ _ -> SA [SV "let*", SA lets, b'] where (lets, b') = combineLets a
Untyped a -> f a
Pair a b -> f2 "cons" a b
Fst a -> f1 "car" a
Snd a -> f1 "cdr" a
Const a -> acl2Value $ value a
Add a b -> f2 "+" a b
Not a -> f1 "not" a
And _ _ -> SA $ SV "and" : ands a
Or _ _ -> SA $ SV "or" : ors a
Imply a b -> f2 "implies" a b
Equiv a b -> f2 "equal" a b
Eq a b -> f2 "equal" a b
Mux a b c -> f3 "if" a b c
where
f :: E a -> SExpr
f = acl2SExpr
f1 :: String -> E a -> SExpr
f1 a b = SA [SV a, f b]
f2 :: String -> E a -> E b -> SExpr
f2 a b c = SA [SV a, f b, f c]
f3 :: String -> E a -> E b -> E c -> SExpr
f3 a b c d = SA [SV a, f b, f c, f d]
combineLets :: E a -> ([SExpr], SExpr)
combineLets a = case a of
Let v a b -> (SA [SV v, f a] : lets, b')
where
(lets, b') = combineLets b
a -> ([], f a)
ands :: E Bool -> [SExpr]
ands a = case a of
And (Const True) a -> ands a
And a (Const True) -> ands a
And a b -> ands a ++ ands b
a -> [f a]
ors :: E Bool -> [SExpr]
ors a = case a of
Or (Const False) a -> ors a
Or a (Const False) -> ors a
Or a b -> ors a ++ ors b
a -> [f a]
acl2Value :: Value -> SExpr
acl2Value a = case a of
VBool True -> SV "true"
VBool False -> SV "nil"
VWord64 a -> SV $ show a
VPair a b -> SA [SV "cons", acl2Value a, acl2Value b]
type ACL2 = StateT (Int, [(String, String)], E Untyped -> E Untyped) Id -- Id for genvars and environment.
-- Map from var names to new var names.
renameVars :: (String -> Maybe String) -> E a -> E a
renameVars rename a = case a of
Var a -> case rename a of { Nothing -> Var a; Just a -> Var a }
Index a b -> Index (f a) (f b)
Let a b c -> Let a (f b) (f c)
Untyped a -> Untyped (f a)
Pair a b -> Pair (f a) (f b)
Fst a -> Fst (f a)
Snd a -> Snd (f a)
Const a -> Const a
Add a b -> Add (f a) (f b)
Not a -> Not (f a)
And a b -> And (f a) (f b)
Or a b -> Or (f a) (f b)
Imply a b -> Imply (f a) (f b)
Equiv a b -> Equiv (f a) (f b)
Eq a b -> Eq (f a) (f b)
Mux a b c -> Mux (f a) (f b) (f c)
where
f :: E a -> E a
f = renameVars rename
newVar :: ACL2 String
newVar = do
(n, e, f) <- get
set (n + 1, e, f)
return $ "_" ++ show n
newLet :: Maybe String -> E a -> ACL2 (E a)
newLet var a = do
v <- newVar
(n, e, f) <- get
set (n, case var of { Nothing -> e; Just v' -> (v', v) : e }, f . Let v (renameVars (flip lookup e) a))
return $ Var v
stmt :: Stmt () -> ACL2 ()
stmt a = case a of
Comment _ -> return ()
Null -> return ()
Seq a b -> stmt a >> stmt b
Intrinsic () -> return ()
Assign (Var v) e -> do
newLet (Just v) e
return ()
Assign _ _ -> error "Unexpected LHS of assignment (non-variable)."
Call _ -> error "Call statements not supported in ACL2 generation."
If pred a b -> do
pred <- newLet Nothing pred
(i0, e0, f0) <- get
set (i0, e0, id)
stmt a
(i1, e1, f1) <- get
set (i1, e0, id)
stmt b
(i2, e2, f2) <- get
set (i2, e0, f0 . f1 . f2)
let v1 = fst $ unzip $ take (length e1 - length e0) e1
v2 = fst $ unzip $ take (length e2 - length e0) e2
varsModified = nub $ v1 ++ v2
mux' v = newLet (Just v) (mux pred (Var $ fromJust $ lookup v e1) (Var $ fromJust $ lookup v e2))
mapM_ mux' varsModified
bindOutputs :: [String] -> ACL2 ()
bindOutputs = mapM_ bindOutput
where
bindOutput :: String -> ACL2 ()
bindOutput var = do
(n, e, f) <- get
let v = fromJust $ lookup var e
set (n, (var, var) : e, f . Let var (Var v))
| tomahawkins/gigl | Language/GIGL/ACL2.hs | bsd-3-clause | 6,318 | 0 | 18 | 2,061 | 2,997 | 1,459 | 1,538 | 159 | 23 |
module Properties where
import HandEvaluator (Evaluator(..))
import CactusKevEvaluator (CactusKev(..), cactusKevEvaluator)
import SimpleEvaluator (NaiveEvaluator(..), naiveEvaluator)
import Card (Card,mkCard,Suit(..),Rank(..))
import Hand (Hand(..), Category(..), mkHand, getGroupedRanks)
import Test.QuickCheck
instance Arbitrary Suit where
arbitrary = elements [Spade,Diamond,Club,Heart]
instance Arbitrary Rank where
arbitrary = elements [Two .. Ace]
instance Arbitrary Card where
arbitrary = do
suit <- arbitrary
rank <- arbitrary
return (mkCard suit rank)
-- |Note that mkHand is only needed for the case when
instance Arbitrary Hand where
arbitrary = do
a <- arbitrary
b <- suchThat arbitrary (/= a)
c <- suchThat arbitrary (\x -> not $ x `elem` [a,b])
d <- suchThat arbitrary (\x -> not $ x `elem` [a,b,c])
e <- suchThat arbitrary (\x -> not $ x `elem` [a,b,c,d])
return (mkHand (a,b,c,d,e))
-- Does the model implementation (naive) match the cactus kev implementation?
prop_modelHandCategory :: Hand -> Bool
prop_modelHandCategory hand = categoryNaive == categoryCactus
where
categoryNaive = getCategory naiveEvaluator hand
categoryCactus = getCategory cactusKevEvaluator hand
prop_modelScoreAgree :: Hand -> Hand -> Bool
prop_modelScoreAgree a b = (compare n1 n2) == (compare c1 c2)
where
n1 = scoreHand naiveEvaluator a
n2 = scoreHand naiveEvaluator b
c1 = scoreHand cactusKevEvaluator a
c2 = scoreHand cactusKevEvaluator b | fffej/HS-Poker | Properties.hs | bsd-3-clause | 1,526 | 0 | 13 | 287 | 502 | 281 | 221 | 34 | 1 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE UndecidableInstances #-}
module Math.Space.MetricSpace where
import qualified Math.Metric.Metric as Metric
import Math.Metric.Metric (Metric, MetricCoord)
import Math.Coordinate.Coordinate --(SpaceOf, CoordConversion, AutoConversion, convertCoord)
data MetricSpace metric space = MetricSpace { metric :: metric
, space :: space
} deriving (Show)
--distance :: (MetricCoord metric coord
-- , CoordConversion AutoConversion coord a t
-- , CoordConversion AutoConversion coord b t
-- , space ~ SpaceOf a
-- , space ~ SpaceOf b
-- , Metric metric t) =>
-- (MetricSpace metric space) -> a -> b -> Double
distance (MetricSpace metric space) = Metric.distance metric space
--class Distance2 a b where
-- distance2 :: a -> b
--data X a = X a
--instance (MetricCoord metric coord
-- , CoordConversion AutoConversion coord a t
-- , CoordConversion AutoConversion coord b t
-- , space ~ SpaceOf a
-- , space ~ SpaceOf b
-- , Metric metric t
-- , out ~ Double) =>
-- Distance2 (X metric) (space -> a -> b -> out) where
-- distance2 (X metric) = Metric.distance metric
--instance (MetricCoord metric coord
-- , CoordConversion AutoConversion coord a t
-- , CoordConversion AutoConversion coord b t
-- , space ~ SpaceOf a
-- , space ~ SpaceOf b
-- , Metric metric t
-- , out ~ Double) =>
-- Distance2 (MetricSpace metric space) (a -> b -> out) where
-- distance2 = distance | wdanilo/algebraic | src/Math/Space/MetricSpace.hs | bsd-3-clause | 1,856 | 0 | 8 | 595 | 123 | 89 | 34 | 13 | 1 |
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE OverloadedStrings #-}
module Pull where
import Control.Logging (log)
import Control.Monad (when)
import Data.Data
import Data.Function (on)
import Data.List (groupBy, intercalate, isInfixOf,
isPrefixOf, sort)
import Data.Text (pack, unpack)
import Options.Applicative
import Prelude hiding (log)
import System.Directory (doesDirectoryExist)
import System.IO (BufferMode (NoBuffering), hGetContents,
hSetBuffering, stdout)
import System.Process (StdStream (CreatePipe), createProcess,
cwd, proc, std_err, std_out,
waitForProcess)
import Datatypes
import Remote
pullOptions :: Parser Cmd
pullOptions = Pull
<$> (PullOptions
<$> strOption
( long "user"
<> short 'u'
<> value "git"
<> metavar "USER"
<> help "Git User on remote" )
<*> strOption
( long "host"
<> short 'h'
<> value "ob.cs.hm.edu"
<> metavar "HOST"
<> help "Host running gitolite" )
<*> strOption
( long "port"
<> short 'p'
<> value "8022"
<> metavar "PORT"
<> help "sshd listening on PORT" )
<*> strOption
( long "repos"
<> short 'r'
<> value ""
<> metavar "REPOPREFIX"
<> help "pull only repositories starting with PREFIX" )
<*> strOption
( long "openwith"
<> short 'o'
<> value ""
<> metavar "PROGRAM"
<> help "open all repos with changes with PROGRAM" )
)
data PullResponse = Cloned String
| NoChanges String
| Changes String
| Fatal String
deriving (Eq, Ord, Data, Show, Typeable)
pullResponseRepo :: PullResponse -> String
pullResponseRepo (Cloned r) = r
pullResponseRepo (NoChanges r) = r
pullResponseRepo (Changes r) = r
pullResponseRepo (Fatal r) = r
pull :: RichConf -> PullOptions -> IO ()
pull richconf opts = do
repoList <- getRemoteRepoList
(pullUser opts)
(pullHost opts)
(pullPort opts)
(pullPrefix opts)
responses <- mapM (pullRepo opts) repoList
putStrLn $ '\n' : stats responses
log $ pack $ longstats responses
let repoProg = pullOpenWith opts
let repoProg' = if null repoProg
then maybe "" unpack $ maybeRepoPrg richconf
else repoProg
let changedRepos' = changedRepos $ groups responses
when (not $ null repoProg' || null changedRepos') $
(createProcess $ proc repoProg' changedRepos')
>> return ()
where
groups = groupBy (on (==) toConstr) . sort
groupInfo :: [PullResponse] -> [String]
groupInfo = map getInfo . groups
getInfo :: [PullResponse] -> String
getInfo group@(Cloned _:_) =
"\x1b[34mCloned: " ++ show ( length group ) ++ "\x1b[0m"
getInfo group@(NoChanges _:_) =
"\x1b[32mUp-to-date: " ++ show ( length group ) ++ "\x1b[0m"
getInfo group@(Changes _:_) =
"\x1b[33mChanges: " ++ show ( length group ) ++ "\x1b[0m"
getInfo group@(Fatal _:_) =
"\x1b[31mFatal: " ++ show ( length group ) ++ "\x1b[0m"
stats :: [PullResponse] -> String
stats responses = intercalate "\n" $ groupInfo responses
getResponseString :: [PullResponse] -> String
getResponseString (Cloned _:_) = "Cloned "
getResponseString (NoChanges _:_) = "NoChanges"
getResponseString (Changes _:_) = "Changes "
getResponseString (Fatal _:_) = "Fatal "
getRepoInfo :: [PullResponse] -> (String, [String])
getRepoInfo group = (getResponseString group, map pullResponseRepo group)
format :: (String, [String]) -> String
format (resp, repos) = resp ++ ": " ++ intercalate "\n " repos
longstats :: [PullResponse] -> String
longstats = ('\n':) . intercalate "\n" . map (format . getRepoInfo) . groups
changedRepos = map pullResponseRepo
. concat
. filter ( (=="Changes ") . getResponseString)
pullRepo :: PullOptions -> FilePath -> IO PullResponse
pullRepo opts repo = do
hSetBuffering stdout NoBuffering
alreadyExist <- doesDirectoryExist repo
if not alreadyExist
then
do log $ pack $ "Cloning " ++ repo
(_, Just hout, Just herr, ph) <- createProcess
(proc "git" ["clone"
, "ssh://" ++ pullUser opts ++ "@" ++ pullHost opts
++ ":" ++ pullPort opts ++ "/" ++ repo , repo ])
{ std_out = CreatePipe
, std_err = CreatePipe }
cloneInfo <- hGetContents herr
log $ pack cloneInfo
waitForProcess ph
putChar '+'
return $ Cloned repo
else
do log $ pack $ "Pulling " ++ repo
(_, Just hout, Just herr, ph) <- createProcess
(proc "git" ["pull", "--all"])
{ cwd = Just repo
, std_out = CreatePipe
, std_err = CreatePipe }
pullInfo <- hGetContents hout
pullInfoErr <- hGetContents herr
log $ pack pullInfoErr
waitForProcess ph
let response =
(if "Already up-to-date" `isInfixOf` pullInfo
then NoChanges
else if "fatal:" `isInfixOf` pullInfoErr
then Fatal
else Changes
) repo
putChar $ responseToSymbol response
return response
where
responseToSymbol (Cloned _) = '+'
responseToSymbol (NoChanges _) = '.'
responseToSymbol (Changes _) = '#'
responseToSymbol (Fatal _) = '!'
| obcode/hitlab | library/Pull.hs | bsd-3-clause | 6,026 | 0 | 23 | 2,161 | 1,657 | 841 | 816 | 149 | 8 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleContexts #-} -- For arbitrary Compose
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Properties (module Properties) where
import Prelude ()
import Prelude.Compat
import Control.Applicative (Const)
import Data.Aeson (eitherDecode, encode)
import Data.Aeson.Encoding (encodingToLazyByteString)
import Data.Aeson.Internal (IResult(..), formatError, ifromJSON, iparse)
import qualified Data.Aeson.Internal as I
import Data.Aeson.Parser (value)
import Data.Aeson.Types
import Data.DList (DList)
import Data.Functor.Compose (Compose (..))
import Data.HashMap.Strict (HashMap)
import Data.Hashable (Hashable)
import Data.Int (Int8)
import Data.List.NonEmpty (NonEmpty)
import Data.Map (Map)
import Data.Proxy (Proxy)
import Data.Ratio (Ratio)
import Data.Semigroup (Option(..))
import Data.Sequence (Seq)
import Data.Tagged (Tagged)
import Data.Time (Day, DiffTime, LocalTime, NominalDiffTime, TimeOfDay, UTCTime, ZonedTime)
import Data.Version (Version)
import Encoders
import Instances ()
import Numeric.Natural (Natural)
import Test.Framework (Test, testGroup)
import Test.Framework.Providers.QuickCheck2 (testProperty)
import Test.QuickCheck (Arbitrary(..), Property, Testable, (===), (.&&.), counterexample)
import Types
import qualified Data.Attoparsec.Lazy as L
import qualified Data.ByteString.Lazy.Char8 as L
import qualified Data.HashMap.Strict as H
import qualified Data.Map as Map
import qualified Data.Text as T
import qualified Data.Text.Lazy as LT
import qualified Data.UUID.Types as UUID
import qualified Data.Vector as V
encodeDouble :: Double -> Double -> Property
encodeDouble num denom
| isInfinite d || isNaN d = encode d === "null"
| otherwise = (read . L.unpack . encode) d === d
where d = num / denom
encodeInteger :: Integer -> Property
encodeInteger i = encode i === L.pack (show i)
toParseJSON :: (Eq a, Show a) =>
(Value -> Parser a) -> (a -> Value) -> a -> Property
toParseJSON parsejson tojson x =
case iparse parsejson . tojson $ x of
IError path msg -> failure "parse" (formatError path msg) x
ISuccess x' -> x === x'
toParseJSON1
:: (Eq (f Int), Show (f Int))
=> (forall a. LiftParseJSON f a)
-> (forall a. LiftToJSON f a)
-> f Int
-> Property
toParseJSON1 parsejson1 tojson1 = toParseJSON parsejson tojson
where
parsejson = parsejson1 parseJSON (listParser parseJSON)
tojson = tojson1 toJSON (listValue toJSON)
roundTripEnc :: (FromJSON a, ToJSON a, Show a) =>
(a -> a -> Property) -> a -> a -> Property
roundTripEnc eq _ i =
case fmap ifromJSON . L.parse value . encode $ i of
L.Done _ (ISuccess v) -> v `eq` i
L.Done _ (IError path err) -> failure "fromJSON" (formatError path err) i
L.Fail _ _ err -> failure "parse" err i
roundTripNoEnc :: (FromJSON a, ToJSON a, Show a) =>
(a -> a -> Property) -> a -> a -> Property
roundTripNoEnc eq _ i =
case ifromJSON . toJSON $ i of
(ISuccess v) -> v `eq` i
(IError path err) -> failure "fromJSON" (formatError path err) i
roundTripEq :: (Eq a, FromJSON a, ToJSON a, Show a) => a -> a -> Property
roundTripEq x y = roundTripEnc (===) x y .&&. roundTripNoEnc (===) x y
-- We test keys by encoding HashMap and Map with it
roundTripKey
:: (Ord a, Hashable a, FromJSONKey a, ToJSONKey a, Show a)
=> a -> HashMap a Int -> Map a Int -> Property
roundTripKey _ h m = roundTripEq h h .&&. roundTripEq m m
infix 4 ==~
(==~) :: (ApproxEq a, Show a) => a -> a -> Property
x ==~ y =
counterexample (show x ++ " /= " ++ show y) (x =~ y)
toFromJSON :: (Arbitrary a, Eq a, FromJSON a, ToJSON a, Show a) => a -> Property
toFromJSON x = case ifromJSON (toJSON x) of
IError path err -> failure "fromJSON" (formatError path err) x
ISuccess x' -> x === x'
modifyFailureProp :: String -> String -> Bool
modifyFailureProp orig added =
result == Error (added ++ orig)
where
parser = const $ modifyFailure (added ++) $ fail orig
result :: Result ()
result = parse parser ()
parserThrowErrorProp :: String -> Property
parserThrowErrorProp msg =
result === Error msg
where
parser = const $ parserThrowError [] msg
result :: Result ()
result = parse parser ()
-- | Tests (also) that we catch the JSONPath and it has elements in the right order.
parserCatchErrorProp :: [String] -> String -> Property
parserCatchErrorProp path msg =
result === Success ([I.Key "outer", I.Key "inner"] ++ jsonPath, msg)
where
parser = parserCatchError outer (curry pure)
outer = inner I.<?> I.Key "outer"
inner = parserThrowError jsonPath msg I.<?> I.Key "inner"
result :: Result (I.JSONPath, String)
result = parse (const parser) ()
jsonPath = map (I.Key . T.pack) path
-- | Perform a structural comparison of the results of two encoding
-- methods. Compares decoded values to account for HashMap-driven
-- variation in JSON object key ordering.
sameAs :: (a -> Value) -> (a -> Encoding) -> a -> Property
sameAs toVal toEnc v =
counterexample (show s) $
eitherDecode s === Right (toVal v)
where
s = encodingToLazyByteString (toEnc v)
sameAs1
:: (forall a. LiftToJSON f a)
-> (forall a. LiftToEncoding f a)
-> f Int
-> Property
sameAs1 toVal1 toEnc1 v = lhs === rhs
where
rhs = Right $ toVal1 toJSON (listValue toJSON) v
lhs = eitherDecode . encodingToLazyByteString $
toEnc1 toEncoding (listEncoding toEncoding) v
sameAs1Agree
:: ToJSON a
=> (f a -> Encoding)
-> (forall b. LiftToEncoding f b)
-> f a
-> Property
sameAs1Agree toEnc toEnc1 v = rhs === lhs
where
rhs = encodingToLazyByteString $ toEnc v
lhs = encodingToLazyByteString $ toEnc1 toEncoding (listEncoding toEncoding) v
type P6 = Product6 Int Bool String (Approx Double) (Int, Approx Double) ()
type S4 = Sum4 Int8 ZonedTime T.Text (Map.Map String Int)
--------------------------------------------------------------------------------
-- Value properties
--------------------------------------------------------------------------------
-- | Add the formatted @Value@ to the printed counterexample when the property
-- fails.
checkValue :: Testable a => (Value -> a) -> Value -> Property
checkValue prop v = counterexample (L.unpack (encode v)) (prop v)
isString :: Value -> Bool
isString (String _) = True
isString _ = False
is2ElemArray :: Value -> Bool
is2ElemArray (Array v) = V.length v == 2 && isString (V.head v)
is2ElemArray _ = False
isTaggedObjectValue :: Value -> Bool
isTaggedObjectValue (Object obj) = "tag" `H.member` obj &&
"contents" `H.member` obj
isTaggedObjectValue _ = False
isNullaryTaggedObject :: Value -> Bool
isNullaryTaggedObject obj = isTaggedObject' obj && isObjectWithSingleField obj
isTaggedObject :: Value -> Property
isTaggedObject = checkValue isTaggedObject'
isTaggedObject' :: Value -> Bool
isTaggedObject' (Object obj) = "tag" `H.member` obj
isTaggedObject' _ = False
isObjectWithSingleField :: Value -> Bool
isObjectWithSingleField (Object obj) = H.size obj == 1
isObjectWithSingleField _ = False
-- | is untaggedValue of EitherTextInt
isUntaggedValueETI :: Value -> Bool
isUntaggedValueETI (String s)
| s == "nonenullary" = True
isUntaggedValueETI (Bool _) = True
isUntaggedValueETI (Number _) = True
isUntaggedValueETI (Array a) = length a == 2
isUntaggedValueETI _ = False
isEmptyArray :: Value -> Property
isEmptyArray = checkValue isEmptyArray'
isEmptyArray' :: Value -> Bool
isEmptyArray' = (Array mempty ==)
--------------------------------------------------------------------------------
tests :: Test
tests = testGroup "properties" [
testGroup "encode" [
testProperty "encodeDouble" encodeDouble
, testProperty "encodeInteger" encodeInteger
]
, testGroup "roundTrip" [
testProperty "Bool" $ roundTripEq True
, testProperty "Double" $ roundTripEq (1 :: Approx Double)
, testProperty "Int" $ roundTripEq (1 :: Int)
, testProperty "NonEmpty Char" $ roundTripEq (undefined :: NonEmpty Char)
, testProperty "Integer" $ roundTripEq (1 :: Integer)
, testProperty "String" $ roundTripEq ("" :: String)
, testProperty "Text" $ roundTripEq T.empty
, testProperty "Lazy Text" $ roundTripEq LT.empty
, testProperty "Foo" $ roundTripEq (undefined :: Foo)
, testProperty "Day" $ roundTripEq (undefined :: Day)
, testProperty "BCE Day" $ roundTripEq (undefined :: BCEDay)
, testProperty "DotNetTime" $ roundTripEq (undefined :: Approx DotNetTime)
, testProperty "LocalTime" $ roundTripEq (undefined :: LocalTime)
, testProperty "TimeOfDay" $ roundTripEq (undefined :: TimeOfDay)
, testProperty "UTCTime" $ roundTripEq (undefined :: UTCTime)
, testProperty "ZonedTime" $ roundTripEq (undefined :: ZonedTime)
, testProperty "NominalDiffTime" $ roundTripEq (undefined :: NominalDiffTime)
, testProperty "DiffTime" $ roundTripEq (undefined :: DiffTime)
, testProperty "Version" $ roundTripEq (undefined :: Version)
, testProperty "Natural" $ roundTripEq (undefined :: Natural)
, testProperty "Proxy" $ roundTripEq (undefined :: Proxy Int)
, testProperty "Tagged" $ roundTripEq (undefined :: Tagged Int Char)
, testProperty "Const" $ roundTripEq (undefined :: Const Int Char)
, testProperty "DList" $ roundTripEq (undefined :: DList Int)
, testProperty "Seq" $ roundTripEq (undefined :: Seq Int)
, testProperty "Rational" $ roundTripEq (undefined :: Rational)
, testProperty "Ratio Int" $ roundTripEq (undefined :: Ratio Int)
, testProperty "UUID" $ roundTripEq UUID.nil
, testGroup "functors"
[ testProperty "Identity Char" $ roundTripEq (undefined :: I Int)
, testProperty "Identity Char" $ roundTripEq (undefined :: I Char)
, testProperty "Identity [Char]" $ roundTripEq (undefined :: I String)
, testProperty "[Identity Char]" $ roundTripEq (undefined :: [I Char])
, testProperty "Compose I I Int" $ roundTripEq (undefined :: LogScaled (Compose I I Int))
, testProperty "Compose [] I Int" $ roundTripEq (undefined :: LogScaled (Compose [] I Int))
, testProperty "Compose I [] Int" $ roundTripEq (undefined :: LogScaled (Compose I [] Int))
, testProperty "Compose [] [] Int" $ roundTripEq (undefined :: LogScaled (Compose [] [] Int))
, testProperty "Compose I I Char" $ roundTripEq (undefined :: LogScaled (Compose I I Char))
, testProperty "Compose [] I Char" $ roundTripEq (undefined :: LogScaled (Compose [] I Char))
, testProperty "Compose I [] Char" $ roundTripEq (undefined :: LogScaled (Compose I [] Char))
, testProperty "Compose [] [] Char" $ roundTripEq (undefined :: LogScaled (Compose [] [] Char))
, testProperty "Compose3 I I I Char" $ roundTripEq (undefined :: LogScaled (Compose3 I I I Char))
, testProperty "Compose3 I [] I Char" $ roundTripEq (undefined :: LogScaled (Compose3 I [] I Char))
, testProperty "Compose3 I I [] Char" $ roundTripEq (undefined :: LogScaled (Compose3 I I [] Char))
, testProperty "Compose3 I [] [] Char" $ roundTripEq (undefined :: LogScaled (Compose3 I [] [] Char))
, testProperty "Compose3 [] I I Char" $ roundTripEq (undefined :: LogScaled (Compose3 [] I I Char))
, testProperty "Compose3 [] [] I Char" $ roundTripEq (undefined :: LogScaled (Compose3 [] [] I Char))
, testProperty "Compose3 [] I [] Char" $ roundTripEq (undefined :: LogScaled (Compose3 [] I [] Char))
, testProperty "Compose3 [] [] [] Char" $ roundTripEq (undefined :: LogScaled (Compose3 [] [] [] Char))
, testProperty "Compose3' I I I Char" $ roundTripEq (undefined :: LogScaled (Compose3' I I I Char))
, testProperty "Compose3' I [] I Char" $ roundTripEq (undefined :: LogScaled (Compose3' I [] I Char))
, testProperty "Compose3' I I [] Char" $ roundTripEq (undefined :: LogScaled (Compose3' I I [] Char))
, testProperty "Compose3' I [] [] Char" $ roundTripEq (undefined :: LogScaled (Compose3' I [] [] Char))
, testProperty "Compose3' [] I I Char" $ roundTripEq (undefined :: LogScaled (Compose3' [] I I Char))
, testProperty "Compose3' [] [] I Char" $ roundTripEq (undefined :: LogScaled (Compose3' [] [] I Char))
, testProperty "Compose3' [] I [] Char" $ roundTripEq (undefined :: LogScaled (Compose3' [] I [] Char))
, testProperty "Compose3' [] [] [] Char" $ roundTripEq (undefined :: LogScaled (Compose3' [] [] [] Char))
]
, testGroup "ghcGenerics" [
testProperty "OneConstructor" $ roundTripEq OneConstructor
, testProperty "Product2" $ roundTripEq (undefined :: Product2 Int Bool)
, testProperty "Product6" $ roundTripEq (undefined :: P6)
, testProperty "Sum4" $ roundTripEq (undefined :: S4)
]
]
, testGroup "roundTrip Key"
[ testProperty "Bool" $ roundTripKey True
, testProperty "Text" $ roundTripKey (undefined :: T.Text)
, testProperty "String" $ roundTripKey (undefined :: String)
, testProperty "Int" $ roundTripKey (undefined :: Int)
, testProperty "[Text]" $ roundTripKey (undefined :: LogScaled [T.Text])
, testProperty "(Int,Char)" $ roundTripKey (undefined :: (Int,Char))
, testProperty "Integer" $ roundTripKey (undefined :: Integer)
, testProperty "Natural" $ roundTripKey (undefined :: Natural)
, testProperty "Float" $ roundTripKey (undefined :: Float)
, testProperty "Double" $ roundTripKey (undefined :: Double)
#if MIN_VERSION_base(4,7,0)
, testProperty "Day" $ roundTripKey (undefined :: Day)
, testProperty "LocalTime" $ roundTripKey (undefined :: LocalTime)
, testProperty "TimeOfDay" $ roundTripKey (undefined :: TimeOfDay)
, testProperty "UTCTime" $ roundTripKey (undefined :: UTCTime)
#endif
, testProperty "Version" $ roundTripKey (undefined :: Version)
, testProperty "Lazy Text" $ roundTripKey (undefined :: LT.Text)
, testProperty "UUID" $ roundTripKey UUID.nil
]
, testGroup "toFromJSON" [
testProperty "Integer" (toFromJSON :: Integer -> Property)
, testProperty "Double" (toFromJSON :: Double -> Property)
, testProperty "Maybe Integer" (toFromJSON :: Maybe Integer -> Property)
, testProperty "Either Integer Double" (toFromJSON :: Either Integer Double -> Property)
, testProperty "Either Integer Integer" (toFromJSON :: Either Integer Integer -> Property)
]
, testGroup "failure messages" [
testProperty "modify failure" modifyFailureProp
, testProperty "parserThrowError" parserThrowErrorProp
, testProperty "parserCatchError" parserCatchErrorProp
]
, testGroup "generic" [
testGroup "toJSON" [
testGroup "Nullary" [
testProperty "string" (isString . gNullaryToJSONString)
, testProperty "2ElemArray" (is2ElemArray . gNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (isNullaryTaggedObject . gNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . gNullaryToJSONObjectWithSingleField)
, testGroup "roundTrip" [
testProperty "string" (toParseJSON gNullaryParseJSONString gNullaryToJSONString)
, testProperty "2ElemArray" (toParseJSON gNullaryParseJSON2ElemArray gNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (toParseJSON gNullaryParseJSONTaggedObject gNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (toParseJSON gNullaryParseJSONObjectWithSingleField gNullaryToJSONObjectWithSingleField)
]
]
, testGroup "EitherTextInt" [
testProperty "UntaggedValue" (isUntaggedValueETI . gEitherTextIntToJSONUntaggedValue)
, testProperty "roundtrip" (toParseJSON gEitherTextIntParseJSONUntaggedValue gEitherTextIntToJSONUntaggedValue)
]
, testGroup "SomeType" [
testProperty "2ElemArray" (is2ElemArray . gSomeTypeToJSON2ElemArray)
, testProperty "TaggedObject" (isTaggedObject . gSomeTypeToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . gSomeTypeToJSONObjectWithSingleField)
, testGroup "roundTrip" [
testProperty "2ElemArray" (toParseJSON gSomeTypeParseJSON2ElemArray gSomeTypeToJSON2ElemArray)
, testProperty "TaggedObject" (toParseJSON gSomeTypeParseJSONTaggedObject gSomeTypeToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (toParseJSON gSomeTypeParseJSONObjectWithSingleField gSomeTypeToJSONObjectWithSingleField)
#if __GLASGOW_HASKELL__ >= 706
, testProperty "2ElemArray unary" (toParseJSON1 gSomeTypeLiftParseJSON2ElemArray gSomeTypeLiftToJSON2ElemArray)
, testProperty "TaggedObject unary" (toParseJSON1 gSomeTypeLiftParseJSONTaggedObject gSomeTypeLiftToJSONTaggedObject)
, testProperty "ObjectWithSingleField unary" (toParseJSON1 gSomeTypeLiftParseJSONObjectWithSingleField gSomeTypeLiftToJSONObjectWithSingleField)
#endif
]
]
, testGroup "OneConstructor" [
testProperty "default" (isEmptyArray . gOneConstructorToJSONDefault)
, testProperty "Tagged" (isTaggedObject . gOneConstructorToJSONTagged)
, testGroup "roundTrip" [
testProperty "default" (toParseJSON gOneConstructorParseJSONDefault gOneConstructorToJSONDefault)
, testProperty "Tagged" (toParseJSON gOneConstructorParseJSONTagged gOneConstructorToJSONTagged)
]
]
, testGroup "OptionField" [
testProperty "like Maybe" $
\x -> gOptionFieldToJSON (OptionField (Option x)) === thMaybeFieldToJSON (MaybeField x)
, testProperty "roundTrip" (toParseJSON gOptionFieldParseJSON gOptionFieldToJSON)
]
]
, testGroup "toEncoding" [
testProperty "NullaryString" $
gNullaryToJSONString `sameAs` gNullaryToEncodingString
, testProperty "Nullary2ElemArray" $
gNullaryToJSON2ElemArray `sameAs` gNullaryToEncoding2ElemArray
, testProperty "NullaryTaggedObject" $
gNullaryToJSONTaggedObject `sameAs` gNullaryToEncodingTaggedObject
, testProperty "NullaryObjectWithSingleField" $
gNullaryToJSONObjectWithSingleField `sameAs`
gNullaryToEncodingObjectWithSingleField
-- , testProperty "ApproxUnwrap" $
-- gApproxToJSONUnwrap `sameAs` gApproxToEncodingUnwrap
, testProperty "ApproxDefault" $
gApproxToJSONDefault `sameAs` gApproxToEncodingDefault
, testProperty "EitherTextInt UntaggedValue" $
gEitherTextIntToJSONUntaggedValue `sameAs` gEitherTextIntToEncodingUntaggedValue
, testProperty "SomeType2ElemArray" $
gSomeTypeToJSON2ElemArray `sameAs` gSomeTypeToEncoding2ElemArray
#if __GLASGOW_HASKELL__ >= 706
, testProperty "SomeType2ElemArray unary" $
gSomeTypeLiftToJSON2ElemArray `sameAs1` gSomeTypeLiftToEncoding2ElemArray
, testProperty "SomeType2ElemArray unary agree" $
gSomeTypeToEncoding2ElemArray `sameAs1Agree` gSomeTypeLiftToEncoding2ElemArray
#endif
, testProperty "SomeTypeTaggedObject" $
gSomeTypeToJSONTaggedObject `sameAs` gSomeTypeToEncodingTaggedObject
#if __GLASGOW_HASKELL__ >= 706
, testProperty "SomeTypeTaggedObject unary" $
gSomeTypeLiftToJSONTaggedObject `sameAs1` gSomeTypeLiftToEncodingTaggedObject
, testProperty "SomeTypeTaggedObject unary agree" $
gSomeTypeToEncodingTaggedObject `sameAs1Agree` gSomeTypeLiftToEncodingTaggedObject
#endif
, testProperty "SomeTypeObjectWithSingleField" $
gSomeTypeToJSONObjectWithSingleField `sameAs` gSomeTypeToEncodingObjectWithSingleField
#if __GLASGOW_HASKELL__ >= 706
, testProperty "SomeTypeObjectWithSingleField unary" $
gSomeTypeLiftToJSONObjectWithSingleField `sameAs1` gSomeTypeLiftToEncodingObjectWithSingleField
, testProperty "SomeTypeObjectWithSingleField unary agree" $
gSomeTypeToEncodingObjectWithSingleField `sameAs1Agree` gSomeTypeLiftToEncodingObjectWithSingleField
#endif
, testProperty "SomeTypeOmitNothingFields" $
gSomeTypeToJSONOmitNothingFields `sameAs` gSomeTypeToEncodingOmitNothingFields
, testProperty "OneConstructorDefault" $
gOneConstructorToJSONDefault `sameAs` gOneConstructorToEncodingDefault
, testProperty "OneConstructorTagged" $
gOneConstructorToJSONTagged `sameAs` gOneConstructorToEncodingTagged
, testProperty "OptionField" $
gOptionFieldToJSON `sameAs` gOptionFieldToEncoding
]
]
, testGroup "template-haskell" [
testGroup "toJSON" [
testGroup "Nullary" [
testProperty "string" (isString . thNullaryToJSONString)
, testProperty "2ElemArray" (is2ElemArray . thNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (isNullaryTaggedObject . thNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . thNullaryToJSONObjectWithSingleField)
, testGroup "roundTrip" [
testProperty "string" (toParseJSON thNullaryParseJSONString thNullaryToJSONString)
, testProperty "2ElemArray" (toParseJSON thNullaryParseJSON2ElemArray thNullaryToJSON2ElemArray)
, testProperty "TaggedObject" (toParseJSON thNullaryParseJSONTaggedObject thNullaryToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (toParseJSON thNullaryParseJSONObjectWithSingleField thNullaryToJSONObjectWithSingleField)
]
]
, testGroup "EitherTextInt" [
testProperty "UntaggedValue" (isUntaggedValueETI . thEitherTextIntToJSONUntaggedValue)
, testProperty "roundtrip" (toParseJSON thEitherTextIntParseJSONUntaggedValue thEitherTextIntToJSONUntaggedValue)
]
, testGroup "SomeType" [
testProperty "2ElemArray" (is2ElemArray . thSomeTypeToJSON2ElemArray)
, testProperty "TaggedObject" (isTaggedObject . thSomeTypeToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . thSomeTypeToJSONObjectWithSingleField)
, testGroup "roundTrip" [
testProperty "2ElemArray" (toParseJSON thSomeTypeParseJSON2ElemArray thSomeTypeToJSON2ElemArray)
, testProperty "TaggedObject" (toParseJSON thSomeTypeParseJSONTaggedObject thSomeTypeToJSONTaggedObject)
, testProperty "ObjectWithSingleField" (toParseJSON thSomeTypeParseJSONObjectWithSingleField thSomeTypeToJSONObjectWithSingleField)
, testProperty "2ElemArray unary" (toParseJSON1 thSomeTypeLiftParseJSON2ElemArray thSomeTypeLiftToJSON2ElemArray)
, testProperty "TaggedObject unary" (toParseJSON1 thSomeTypeLiftParseJSONTaggedObject thSomeTypeLiftToJSONTaggedObject)
, testProperty "ObjectWithSingleField unary" (toParseJSON1 thSomeTypeLiftParseJSONObjectWithSingleField thSomeTypeLiftToJSONObjectWithSingleField)
]
]
, testGroup "Approx" [
testProperty "string" (isString . thApproxToJSONUnwrap)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . thApproxToJSONDefault)
, testGroup "roundTrip" [
testProperty "string" (toParseJSON thApproxParseJSONUnwrap thApproxToJSONUnwrap)
, testProperty "ObjectWithSingleField" (toParseJSON thApproxParseJSONDefault thApproxToJSONDefault)
]
]
, testGroup "GADT" [
testProperty "string" (isString . thGADTToJSONUnwrap)
, testProperty "ObjectWithSingleField" (isObjectWithSingleField . thGADTToJSONDefault)
, testGroup "roundTrip" [
testProperty "string" (toParseJSON thGADTParseJSONUnwrap thGADTToJSONUnwrap)
, testProperty "ObjectWithSingleField" (toParseJSON thGADTParseJSONDefault thGADTToJSONDefault)
]
]
, testGroup "OneConstructor" [
testProperty "default" (isEmptyArray . thOneConstructorToJSONDefault)
, testProperty "Tagged" (isTaggedObject . thOneConstructorToJSONTagged)
, testGroup "roundTrip" [
testProperty "default" (toParseJSON thOneConstructorParseJSONDefault thOneConstructorToJSONDefault)
, testProperty "Tagged" (toParseJSON thOneConstructorParseJSONTagged thOneConstructorToJSONTagged)
]
]
, testGroup "OptionField" [
testProperty "like Maybe" $
\x -> thOptionFieldToJSON (OptionField (Option x)) === thMaybeFieldToJSON (MaybeField x)
, testProperty "roundTrip" (toParseJSON thOptionFieldParseJSON thOptionFieldToJSON)
]
]
, testGroup "toEncoding" [
testProperty "NullaryString" $
thNullaryToJSONString `sameAs` thNullaryToEncodingString
, testProperty "Nullary2ElemArray" $
thNullaryToJSON2ElemArray `sameAs` thNullaryToEncoding2ElemArray
, testProperty "NullaryTaggedObject" $
thNullaryToJSONTaggedObject `sameAs` thNullaryToEncodingTaggedObject
, testProperty "NullaryObjectWithSingleField" $
thNullaryToJSONObjectWithSingleField `sameAs`
thNullaryToEncodingObjectWithSingleField
, testProperty "ApproxUnwrap" $
thApproxToJSONUnwrap `sameAs` thApproxToEncodingUnwrap
, testProperty "ApproxDefault" $
thApproxToJSONDefault `sameAs` thApproxToEncodingDefault
, testProperty "EitherTextInt UntaggedValue" $
thEitherTextIntToJSONUntaggedValue `sameAs` thEitherTextIntToEncodingUntaggedValue
, testProperty "SomeType2ElemArray" $
thSomeTypeToJSON2ElemArray `sameAs` thSomeTypeToEncoding2ElemArray
, testProperty "SomeType2ElemArray unary" $
thSomeTypeLiftToJSON2ElemArray `sameAs1` thSomeTypeLiftToEncoding2ElemArray
, testProperty "SomeType2ElemArray unary agree" $
thSomeTypeToEncoding2ElemArray `sameAs1Agree` thSomeTypeLiftToEncoding2ElemArray
, testProperty "SomeTypeTaggedObject" $
thSomeTypeToJSONTaggedObject `sameAs` thSomeTypeToEncodingTaggedObject
, testProperty "SomeTypeTaggedObject unary" $
thSomeTypeLiftToJSONTaggedObject `sameAs1` thSomeTypeLiftToEncodingTaggedObject
, testProperty "SomeTypeTaggedObject unary agree" $
thSomeTypeToEncodingTaggedObject `sameAs1Agree` thSomeTypeLiftToEncodingTaggedObject
, testProperty "SomeTypeObjectWithSingleField" $
thSomeTypeToJSONObjectWithSingleField `sameAs` thSomeTypeToEncodingObjectWithSingleField
, testProperty "SomeTypeObjectWithSingleField unary" $
thSomeTypeLiftToJSONObjectWithSingleField `sameAs1` thSomeTypeLiftToEncodingObjectWithSingleField
, testProperty "SomeTypeObjectWithSingleField unary agree" $
thSomeTypeToEncodingObjectWithSingleField `sameAs1Agree` thSomeTypeLiftToEncodingObjectWithSingleField
, testProperty "OneConstructorDefault" $
thOneConstructorToJSONDefault `sameAs` thOneConstructorToEncodingDefault
, testProperty "OneConstructorTagged" $
thOneConstructorToJSONTagged `sameAs` thOneConstructorToEncodingTagged
, testProperty "OptionField" $
thOptionFieldToJSON `sameAs` thOptionFieldToEncoding
]
]
]
| sol/aeson | tests/Properties.hs | bsd-3-clause | 27,477 | 0 | 20 | 5,574 | 6,461 | 3,393 | 3,068 | 413 | 3 |
{-# LANGUAGE DeriveDataTypeable, FlexibleContexts, GeneralizedNewtypeDeriving
, MultiParamTypeClasses, OverloadedStrings, ScopedTypeVariables
, TemplateHaskell, TypeFamilies, FlexibleInstances, RecordWildCards
, TypeOperators #-}
module Util.HasAcidState
where
import Control.Exception.Lifted ( bracket)
import Control.Monad.Trans ( MonadIO(..) )
import Control.Monad.Trans.Control ( MonadBaseControl )
import Data.Acid ( AcidState(..), EventState(..)
, EventResult(..) , Query(..)
, QueryEvent(..), Update(..)
, UpdateEvent(..), IsAcidic(..), makeAcidic
, openLocalState)
import Data.Acid.Advanced ( query', update' )
import Data.Acid.Local ( createCheckpointAndClose
, openLocalStateFrom)
import Data.Data ( Data, Typeable )
class (Functor m, Monad m) => HasAcidState m st where
getAcidState :: m (AcidState st)
query :: forall event m.
( Functor m
, MonadIO m
, QueryEvent event
, HasAcidState m (EventState event)
) =>
event
-> m (EventResult event)
query event =
do as <- getAcidState
query' (as :: AcidState (EventState event)) event
update :: forall event m.
( Functor m
, MonadIO m
, UpdateEvent event
, HasAcidState m (EventState event)
) =>
event
-> m (EventResult event)
update event =
do as <- getAcidState
update' (as :: AcidState (EventState event)) event
-- | bracket the opening and close of the `AcidState` handle.
-- automatically creates a checkpoint on close
withLocalState :: (MonadBaseControl IO m, MonadIO m, IsAcidic st, Typeable st) =>
Maybe FilePath -- ^ path to state directory
-> st -- ^ initial state value
-> (AcidState st -> m a) -- ^ function which uses the `AcidState` handle
-> m a
withLocalState mPath initialState =
bracket (liftIO $ (maybe openLocalState openLocalStateFrom mPath) initialState)
(liftIO . createCheckpointAndClose)
| ojw/admin-and-dev | src/Util/HasAcidState.hs | bsd-3-clause | 2,297 | 0 | 11 | 771 | 501 | 280 | 221 | 47 | 1 |
{-# LANGUAGE
DataKinds
, FlexibleContexts
, FlexibleInstances
, TypeOperators
#-}
module Test.Common where
import Data.Functor.Contravariant
import Data.List
import Data.Text.Lazy.Builder
import Data.Vinyl
import Data.Vinyl.Functor
import Data.Vinyl.Utils.Operator
type Id = Identity
buildDelimited
:: Buildable rs
=> Char
-> Rec Id rs
-> Builder
buildDelimited delim =
mconcat
. intersperse (singleton delim)
. build
build
:: Buildable rs
=> Rec Id rs
-> [Builder]
build r =
map getIdentity
. recordToList
$ builder \$\ r
class Buildable rs where
builder :: Rec (Op Builder) rs
instance Buildable '[] where
builder = RNil
instance (Buildable rs, Show r) => Buildable (r ': rs) where
builder = Op (fromString . show) :& builder
| marcinmrotek/pipes-key-value-csv | test/Test/Common.hs | bsd-3-clause | 795 | 0 | 9 | 175 | 239 | 128 | 111 | 36 | 1 |
module RBPCP.Handler.Internal.Types
( module RBPCP.Handler.Internal.Types
, module RBPCP.Handler.Internal.Error
, module RBPCP.Handler.Conf
)
where
import MyPrelude
import RBPCP.Handler.Internal.Error
import RBPCP.Handler.Conf
import qualified ChanDB as DB
import qualified Control.Monad.Reader as Reader
import qualified Servant.Server as SS
import qualified PaymentChannel as PC
type HandlerM dbConf chain = AppM (HandlerConf dbConf chain)
class HasAppConf m dbH chain where
getAppConf :: m (HandlerConf dbH chain)
instance HasAppConf (HandlerM dbH chain) dbH chain where
getAppConf = Reader.ask
--class HasSpvWallet m where
-- wallIface :: m Wall.Interface
--
--instance Monad m => HasSpvWallet (ReaderT (HandlerConf dbH chain) m) where
-- wallIface = Reader.asks hcSpvWallet
class DB.ChanDB dbM dbH => HasDb m dbM dbH where
liftDB :: dbM a -> m a
instance DB.ChanDB dbM dbH => HasDb (ReaderT (HandlerConf dbH chain) dbM) dbM dbH where
liftDB = lift
instance DB.ChanDB dbM dbH => HasDb (ReaderT (HandlerConf dbH chain) (EitherT (HandlerErr e) dbM)) dbM dbH where
liftDB = lift . lift
class DB.ChanDBTx dbTxM dbM dbH => HasDbTx m dbTxM dbM dbH where
liftDbTx :: dbTxM a -> m a
instance (DB.ChanDBTx dbTxM dbM dbH, Monad dbTxM)
=> HasDbTx (ReaderT (HandlerConf dbH chain) dbTxM) dbTxM dbM dbH where
liftDbTx = lift
instance (DB.ChanDBTx dbTxM dbM dbH, Monad dbTxM)
=> HasDbTx (ReaderT (HandlerConf dbH chain) (EitherT (HandlerErr e) dbTxM)) dbTxM dbM dbH where
liftDbTx = lift . lift
type BlockNumber = Word32
data HandlerErr a
= HandlerErr a
| UserError UserError
| InternalErr InternalError
deriving (Eq, Show)
toHandlerEx :: DB.ChanDBException -> HandlerErr a
toHandlerEx e =
if DB.is404 e
then UserError ResourceNotFound
else InternalErr $ OtherInternalErr "DB Error"
class Show e => IsHandlerException e where
mkHandlerErr :: e -> SS.ServantErr
instance IsHandlerException a => IsHandlerException (HandlerErr a) where
mkHandlerErr (HandlerErr e) = mkHandlerErr e
mkHandlerErr (InternalErr _) = mkServantErr SS.err500 (OtherInternalErr "")
mkHandlerErr (UserError e) = mkHandlerErr e
instance IsHandlerException DB.ChanDBException where
mkHandlerErr e =
if DB.is404 e
then SS.err404
else mkServantErr SS.err500 (OtherInternalErr "")
instance IsHandlerException PC.PayChanError where
mkHandlerErr = mkServantErr SS.err400
--instance IsHandlerException PubKeyDbException where
-- mkHandlerErr _ = mkServantErr SS.err500 (OtherInternalErr "")
instance IsHandlerException UserError where
mkHandlerErr ResourceNotFound = SS.err404
mkHandlerErr (ResourcePaymentMismatch txid vout s) = SS.err301
{ SS.errHeaders = [ ("Location", cs $ mkUrl txid vout s) ]
, SS.errHTTPCode = 308 -- repeat request of same type (POST/PUT/etc.) on new URL
}
instance IsHandlerException InternalError where
mkHandlerErr = mkServantErr SS.err500
class HasDbConf m chanDb where
getDbConf :: m chanDb
instance HasDbConf (AppM (HandlerConf chanDb chain)) chanDb where
getDbConf = Reader.asks hcChanDb
class HasHttpManager m where
getManager :: m Manager
| runeksvendsen/rbpcp-handler | src/RBPCP/Handler/Internal/Types.hs | bsd-3-clause | 3,321 | 0 | 11 | 709 | 900 | 476 | 424 | -1 | -1 |
module Qi.Test.Logger where
import qualified Data.ByteString.Lazy.Builder as Build
import Protolude
import Qi.AWS.Types (MkAwsLogger)
import System.IO hiding (hPutStrLn)
mkTestLogger
:: MkAwsLogger
mkTestLogger = do
hSetBuffering stderr LineBuffering
pure $ \_lvl b ->
hPutStrLn stderr $ Build.toLazyByteString b
| qmuli/qmuli | tests/Qi/Test/Logger.hs | mit | 392 | 0 | 11 | 115 | 86 | 50 | 36 | 11 | 1 |
-- TODO: Should probably use
module Dmp.Args
(ArgV (..),
parseArgV,
printUsage)where
import Text.ParserCombinators.Parsec
import Text.Parsec.Perm
import Data.List
import Control.Monad
-- | Prints the usage statement
printUsage :: IO ()
printUsage = do
putStrLn "Usage:"
putStrLn "DmpHelper --i [input_file] --o [output_file] [--options]"
putStrLn "Where options include:"
putStrLn "--v\t\tVerbose Mode"
-- | Data structure representing a valid ArgV
data ArgV =
ArgV
{inputFile :: FilePath,
outputFile :: FilePath,
isVerbose :: Bool}
deriving (Show)
-- | Parses an ArgV
parseArgV :: [String]
-> Either ParseError ArgV
parseArgV i = parse pArgV "" $ foldl' (++) [] i
pArgV :: CharParser st ArgV
pArgV = permute $ ArgV <$$> pInputFile
<||> pOutputFile
<|?> (False, pIsVerbose)
pInputFile :: CharParser st FilePath
pInputFile = do try $ string "--i"
notFollowedBy pEndOfArg
pInOutFile
pOutputFile :: CharParser st FilePath
pOutputFile = do try $ string "--o"
pInOutFile
pInOutFile :: CharParser st FilePath
pInOutFile = do res <- manyTill anyChar pEndOfArg
guard (not $ null res)
return res
pIsVerbose :: CharParser st Bool
pIsVerbose = try $ string "--v" >> return True
pEndOfArg :: CharParser st ()
pEndOfArg = nextArg <|> eof
where nextArg = do
lookAhead $ try $ string "--"
return ()
| christetreault/liquid-haskell-converter | lhconverter/Dmp/Args.hs | gpl-3.0 | 1,493 | 0 | 10 | 404 | 392 | 200 | 192 | 45 | 1 |
-- | Wrapper program for propellor distribution.
--
-- Distributions should install this program into PATH.
-- (Cabal builds it as dist/build/propellor/propellor).
--
-- This is not the propellor main program (that's config.hs)
--
-- This installs propellor's source into ~/.propellor,
-- uses it to build the real propellor program (if not already built),
-- and runs it.
--
-- The source is cloned from /usr/src/propellor when available,
-- or is cloned from git over the network.
module Main where
import Propellor.Message
import Propellor.Bootstrap
import Utility.UserInfo
import Utility.Monad
import Utility.Process
import Utility.SafeCommand
import Utility.Exception
import Control.Monad
import Control.Monad.IfElse
import Control.Applicative
import System.Directory
import System.FilePath
import System.Environment (getArgs)
import System.Exit
import System.Posix.Directory
import System.IO
distdir :: FilePath
distdir = "/usr/src/propellor"
distrepo :: FilePath
distrepo = distdir </> "propellor.git"
disthead :: FilePath
disthead = distdir </> "head"
upstreambranch :: String
upstreambranch = "upstream/master"
-- Using the github mirror of the main propellor repo because
-- it is accessible over https for better security.
netrepo :: String
netrepo = "https://github.com/joeyh/propellor.git"
main :: IO ()
main = do
args <- getArgs
home <- myHomeDir
let propellordir = home </> ".propellor"
let propellorbin = propellordir </> "propellor"
wrapper args propellordir propellorbin
wrapper :: [String] -> FilePath -> FilePath -> IO ()
wrapper args propellordir propellorbin = do
ifM (doesDirectoryExist propellordir)
( checkRepo
, makeRepo
)
buildruncfg
where
makeRepo = do
putStrLn $ "Setting up your propellor repo in " ++ propellordir
putStrLn ""
ifM (doesFileExist distrepo <||> doesDirectoryExist distrepo)
( do
void $ boolSystem "git" [Param "clone", File distrepo, File propellordir]
fetchUpstreamBranch propellordir distrepo
changeWorkingDirectory propellordir
void $ boolSystem "git" [Param "remote", Param "rm", Param "origin"]
, void $ boolSystem "git" [Param "clone", Param netrepo, File propellordir]
)
checkRepo = whenM (doesFileExist disthead) $ do
headrev <- takeWhile (/= '\n') <$> readFile disthead
changeWorkingDirectory propellordir
headknown <- catchMaybeIO $
withQuietOutput createProcessSuccess $
proc "git" ["log", headrev]
if (headknown == Nothing)
then setupupstreammaster headrev propellordir
else do
merged <- not . null <$>
readProcess "git" ["log", headrev ++ "..HEAD", "--ancestry-path"]
unless merged $
warnoutofdate propellordir True
buildruncfg = do
changeWorkingDirectory propellordir
buildPropellor
putStrLn ""
putStrLn ""
chain
chain = do
(_, _, _, pid) <- createProcess (proc propellorbin args)
exitWith =<< waitForProcess pid
-- Passed the user's propellordir repository, makes upstream/master
-- be a usefully mergeable branch.
--
-- We cannot just use origin/master, because in the case of a distrepo,
-- it only contains 1 commit. So, trying to merge with it will result
-- in lots of merge conflicts, since git cannot find a common parent
-- commit.
--
-- Instead, the upstream/master branch is created by taking the
-- upstream/master branch (which must be an old version of propellor,
-- as distributed), and diffing from it to the current origin/master,
-- and committing the result. This is done in a temporary clone of the
-- repository, giving it a new master branch. That new branch is fetched
-- into the user's repository, as if fetching from a upstream remote,
-- yielding a new upstream/master branch.
setupupstreammaster :: String -> FilePath -> IO ()
setupupstreammaster newref propellordir = do
changeWorkingDirectory propellordir
go =<< catchMaybeIO getoldrev
where
go Nothing = warnoutofdate propellordir False
go (Just oldref) = do
let tmprepo = ".git/propellordisttmp"
let cleantmprepo = void $ catchMaybeIO $ removeDirectoryRecursive tmprepo
cleantmprepo
git ["clone", "--quiet", ".", tmprepo]
changeWorkingDirectory tmprepo
git ["fetch", distrepo, "--quiet"]
git ["reset", "--hard", oldref, "--quiet"]
git ["merge", newref, "-s", "recursive", "-Xtheirs", "--quiet", "-m", "merging upstream version"]
fetchUpstreamBranch propellordir tmprepo
cleantmprepo
warnoutofdate propellordir True
getoldrev = takeWhile (/= '\n')
<$> readProcess "git" ["show-ref", upstreambranch, "--hash"]
git = run "git"
run cmd ps = unlessM (boolSystem cmd (map Param ps)) $
error $ "Failed to run " ++ cmd ++ " " ++ show ps
warnoutofdate :: FilePath -> Bool -> IO ()
warnoutofdate propellordir havebranch = do
warningMessage ("** Your " ++ propellordir ++ " is out of date..")
let also s = hPutStrLn stderr (" " ++ s)
also ("A newer upstream version is available in " ++ distrepo)
if havebranch
then also ("To merge it, run: git merge " ++ upstreambranch)
else also ("To merge it, find the most recent commit in your repository's history that corresponds to an upstream release of propellor, and set refs/remotes/" ++ upstreambranch ++ " to it. Then run propellor again.")
also ""
fetchUpstreamBranch :: FilePath -> FilePath -> IO ()
fetchUpstreamBranch propellordir repo = do
changeWorkingDirectory propellordir
void $ boolSystem "git"
[ Param "fetch"
, File repo
, Param ("+refs/heads/master:refs/remotes/" ++ upstreambranch)
, Param "--quiet"
]
| avengerpenguin/propellor | src/wrapper.hs | bsd-2-clause | 5,483 | 48 | 17 | 940 | 1,217 | 625 | 592 | 111 | 2 |
{- This module doesn't export any names, but importing it makes
- Syntax.Flapjax an instance of HtmlScript.
-}
module Flapjax.HtmlEmbedding(FjHtml) where
import Control.Monad
import BrownPLT.Html.Syntax (Script (..),
attributeValue, Html)
import qualified Flapjax.Parser
import qualified BrownPLT.JavaScript.Parser
import Flapjax.Syntax
import Text.ParserCombinators.Parsec(SourcePos)
type FjHtml = Html SourcePos Flapjax
-- We recognize lang= and type= methods for identifying a script's language.
scriptLang attrs =
case attributeValue "lang" attrs of
(Just v) -> Just v
Nothing -> case attributeValue "type" attrs of
(Just v) -> Just v
Nothing -> Nothing
instance Script Flapjax where
prettyPrintScript script = prettyFlapjax script
parseScriptBlock attrs =
case scriptLang attrs of
(Just "flapjax") -> Flapjax.Parser.parseScript
(Just "text/flapjax") -> Flapjax.Parser.parseScript
otherwise ->
liftM Javascript BrownPLT.JavaScript.Parser.parseScript
parseInlineScript =
Just Flapjax.Parser.parseInline
parseAttributeScript =
Just Flapjax.Parser.parseInlineAttribute
| ducis/flapjax-fixed | flapjax/trunk/compiler/src/Flapjax/HtmlEmbedding.hs | bsd-3-clause | 1,203 | 0 | 12 | 255 | 248 | 135 | 113 | 27 | 3 |
-----------------------------------------------------------------------------
--
-- (c) The University of Glasgow 2006
--
-- The purpose of this module is to transform an HsExpr into a CoreExpr which
-- when evaluated, returns a (Meta.Q Meta.Exp) computation analogous to the
-- input HsExpr. We do this in the DsM monad, which supplies access to
-- CoreExpr's of the "smart constructors" of the Meta.Exp datatype.
--
-- It also defines a bunch of knownKeyNames, in the same way as is done
-- in prelude/PrelNames. It's much more convenient to do it here, becuase
-- otherwise we have to recompile PrelNames whenever we add a Name, which is
-- a Royal Pain (triggers other recompilation).
-----------------------------------------------------------------------------
{-# OPTIONS -fno-warn-tabs #-}
-- The above warning supression flag is a temporary kludge.
-- While working on this module you are encouraged to remove it and
-- detab the module (please do the detabbing in a separate patch). See
-- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
-- for details
module DsMeta( dsBracket,
templateHaskellNames, qTyConName, nameTyConName,
liftName, liftStringName, expQTyConName, patQTyConName,
decQTyConName, decsQTyConName, typeQTyConName,
decTyConName, typeTyConName, mkNameG_dName, mkNameG_vName, mkNameG_tcName,
quoteExpName, quotePatName, quoteDecName, quoteTypeName
) where
#include "HsVersions.h"
import {-# SOURCE #-} DsExpr ( dsExpr )
import MatchLit
import DsMonad
import qualified Language.Haskell.TH as TH
import HsSyn
import Class
import PrelNames
-- To avoid clashes with DsMeta.varName we must make a local alias for
-- OccName.varName we do this by removing varName from the import of
-- OccName above, making a qualified instance of OccName and using
-- OccNameAlias.varName where varName ws previously used in this file.
import qualified OccName( isDataOcc, isVarOcc, isTcOcc, varName, tcName )
import Module
import Id
import Name hiding( isVarOcc, isTcOcc, varName, tcName )
import NameEnv
import TcType
import TyCon
import TysWiredIn
import TysPrim ( liftedTypeKindTyConName )
import CoreSyn
import MkCore
import CoreUtils
import SrcLoc
import Unique
import BasicTypes
import Outputable
import Bag
import FastString
import ForeignCall
import MonadUtils
import Util( equalLength )
import Data.Maybe
import Control.Monad
import Data.List
-----------------------------------------------------------------------------
dsBracket :: HsBracket Name -> [PendingSplice] -> DsM CoreExpr
-- Returns a CoreExpr of type TH.ExpQ
-- The quoted thing is parameterised over Name, even though it has
-- been type checked. We don't want all those type decorations!
dsBracket brack splices
= dsExtendMetaEnv new_bit (do_brack brack)
where
new_bit = mkNameEnv [(n, Splice (unLoc e)) | (n,e) <- splices]
do_brack (VarBr _ n) = do { MkC e1 <- lookupOcc n ; return e1 }
do_brack (ExpBr e) = do { MkC e1 <- repLE e ; return e1 }
do_brack (PatBr p) = do { MkC p1 <- repTopP p ; return p1 }
do_brack (TypBr t) = do { MkC t1 <- repLTy t ; return t1 }
do_brack (DecBrG gp) = do { MkC ds1 <- repTopDs gp ; return ds1 }
do_brack (DecBrL _) = panic "dsBracket: unexpected DecBrL"
{- -------------- Examples --------------------
[| \x -> x |]
====>
gensym (unpackString "x"#) `bindQ` \ x1::String ->
lam (pvar x1) (var x1)
[| \x -> $(f [| x |]) |]
====>
gensym (unpackString "x"#) `bindQ` \ x1::String ->
lam (pvar x1) (f (var x1))
-}
-------------------------------------------------------
-- Declarations
-------------------------------------------------------
repTopP :: LPat Name -> DsM (Core TH.PatQ)
repTopP pat = do { ss <- mkGenSyms (collectPatBinders pat)
; pat' <- addBinds ss (repLP pat)
; wrapGenSyms ss pat' }
repTopDs :: HsGroup Name -> DsM (Core (TH.Q [TH.Dec]))
repTopDs group
= do { let { bndrs = hsGroupBinders group } ;
ss <- mkGenSyms bndrs ;
-- Bind all the names mainly to avoid repeated use of explicit strings.
-- Thus we get
-- do { t :: String <- genSym "T" ;
-- return (Data t [] ...more t's... }
-- The other important reason is that the output must mention
-- only "T", not "Foo:T" where Foo is the current module
decls <- addBinds ss (do {
val_ds <- rep_val_binds (hs_valds group) ;
tycl_ds <- mapM repTyClD (concat (hs_tyclds group)) ;
inst_ds <- mapM repInstD' (hs_instds group) ;
for_ds <- mapM repForD (hs_fords group) ;
-- more needed
return (de_loc $ sort_by_loc $ val_ds ++ catMaybes tycl_ds ++ inst_ds ++ for_ds) }) ;
decl_ty <- lookupType decQTyConName ;
let { core_list = coreList' decl_ty decls } ;
dec_ty <- lookupType decTyConName ;
q_decs <- repSequenceQ dec_ty core_list ;
wrapGenSyms ss q_decs
}
{- Note [Binders and occurrences]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we desugar [d| data T = MkT |]
we want to get
Data "T" [] [Con "MkT" []] []
and *not*
Data "Foo:T" [] [Con "Foo:MkT" []] []
That is, the new data decl should fit into whatever new module it is
asked to fit in. We do *not* clone, though; no need for this:
Data "T79" ....
But if we see this:
data T = MkT
foo = reifyDecl T
then we must desugar to
foo = Data "Foo:T" [] [Con "Foo:MkT" []] []
So in repTopDs we bring the binders into scope with mkGenSyms and addBinds.
And we use lookupOcc, rather than lookupBinder
in repTyClD and repC.
-}
repTyClD :: LTyClDecl Name -> DsM (Maybe (SrcSpan, Core TH.DecQ))
repTyClD tydecl@(L _ (TyFamily {}))
= repTyFamily tydecl addTyVarBinds
repTyClD (L loc (TyData { tcdND = DataType, tcdCtxt = cxt,
tcdLName = tc, tcdTyVars = tvs, tcdTyPats = opt_tys,
tcdCons = cons, tcdDerivs = mb_derivs }))
= do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences]
; dec <- addTyVarBinds tvs $ \bndrs ->
do { cxt1 <- repLContext cxt
; opt_tys1 <- maybeMapM repLTys opt_tys -- only for family insts
; opt_tys2 <- maybeMapM (coreList typeQTyConName) opt_tys1
; cons1 <- mapM (repC (hsLTyVarNames tvs)) cons
; cons2 <- coreList conQTyConName cons1
; derivs1 <- repDerivs mb_derivs
; bndrs1 <- coreList tyVarBndrTyConName bndrs
; repData cxt1 tc1 bndrs1 opt_tys2 cons2 derivs1
}
; return $ Just (loc, dec)
}
repTyClD (L loc (TyData { tcdND = NewType, tcdCtxt = cxt,
tcdLName = tc, tcdTyVars = tvs, tcdTyPats = opt_tys,
tcdCons = [con], tcdDerivs = mb_derivs }))
= do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences]
; dec <- addTyVarBinds tvs $ \bndrs ->
do { cxt1 <- repLContext cxt
; opt_tys1 <- maybeMapM repLTys opt_tys -- only for family insts
; opt_tys2 <- maybeMapM (coreList typeQTyConName) opt_tys1
; con1 <- repC (hsLTyVarNames tvs) con
; derivs1 <- repDerivs mb_derivs
; bndrs1 <- coreList tyVarBndrTyConName bndrs
; repNewtype cxt1 tc1 bndrs1 opt_tys2 con1 derivs1
}
; return $ Just (loc, dec)
}
repTyClD (L loc (TySynonym { tcdLName = tc, tcdTyVars = tvs, tcdTyPats = opt_tys,
tcdSynRhs = ty }))
= do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences]
; dec <- addTyVarBinds tvs $ \bndrs ->
do { opt_tys1 <- maybeMapM repLTys opt_tys -- only for family insts
; opt_tys2 <- maybeMapM (coreList typeQTyConName) opt_tys1
; ty1 <- repLTy ty
; bndrs1 <- coreList tyVarBndrTyConName bndrs
; repTySyn tc1 bndrs1 opt_tys2 ty1
}
; return (Just (loc, dec))
}
repTyClD (L loc (ClassDecl { tcdCtxt = cxt, tcdLName = cls,
tcdTyVars = tvs, tcdFDs = fds,
tcdSigs = sigs, tcdMeths = meth_binds,
tcdATs = ats, tcdATDefs = [] }))
= do { cls1 <- lookupLOcc cls -- See note [Binders and occurrences]
; dec <- addTyVarBinds tvs $ \bndrs ->
do { cxt1 <- repLContext cxt
; sigs1 <- rep_sigs sigs
; binds1 <- rep_binds meth_binds
; fds1 <- repLFunDeps fds
; ats1 <- repLAssocFamilys ats
; decls1 <- coreList decQTyConName (ats1 ++ sigs1 ++ binds1)
; bndrs1 <- coreList tyVarBndrTyConName bndrs
; repClass cxt1 cls1 bndrs1 fds1 decls1
}
; return $ Just (loc, dec)
}
-- Un-handled cases
repTyClD (L loc d) = putSrcSpanDs loc $
do { warnDs (hang ds_msg 4 (ppr d))
; return Nothing }
-- The type variables in the head of families are treated differently when the
-- family declaration is associated. In that case, they are usage, not binding
-- occurences.
--
repTyFamily :: LTyClDecl Name
-> ProcessTyVarBinds TH.Dec
-> DsM (Maybe (SrcSpan, Core TH.DecQ))
repTyFamily (L loc (TyFamily { tcdFlavour = flavour,
tcdLName = tc, tcdTyVars = tvs,
tcdKind = opt_kind }))
tyVarBinds
= do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences]
; dec <- tyVarBinds tvs $ \bndrs ->
do { flav <- repFamilyFlavour flavour
; bndrs1 <- coreList tyVarBndrTyConName bndrs
; case opt_kind of
Nothing -> repFamilyNoKind flav tc1 bndrs1
Just ki -> do { ki1 <- repKind ki
; repFamilyKind flav tc1 bndrs1 ki1
}
}
; return $ Just (loc, dec)
}
repTyFamily _ _ = panic "DsMeta.repTyFamily: internal error"
-- represent fundeps
--
repLFunDeps :: [Located (FunDep Name)] -> DsM (Core [TH.FunDep])
repLFunDeps fds = do fds' <- mapM repLFunDep fds
fdList <- coreList funDepTyConName fds'
return fdList
repLFunDep :: Located (FunDep Name) -> DsM (Core TH.FunDep)
repLFunDep (L _ (xs, ys)) = do xs' <- mapM lookupBinder xs
ys' <- mapM lookupBinder ys
xs_list <- coreList nameTyConName xs'
ys_list <- coreList nameTyConName ys'
repFunDep xs_list ys_list
-- represent family declaration flavours
--
repFamilyFlavour :: FamilyFlavour -> DsM (Core TH.FamFlavour)
repFamilyFlavour TypeFamily = rep2 typeFamName []
repFamilyFlavour DataFamily = rep2 dataFamName []
-- represent associated family declarations
--
repLAssocFamilys :: [LTyClDecl Name] -> DsM [Core TH.DecQ]
repLAssocFamilys = mapM repLAssocFamily
where
repLAssocFamily tydecl@(L _ (TyFamily {}))
= liftM (snd . fromJust) $ repTyFamily tydecl lookupTyVarBinds
repLAssocFamily tydecl
= failWithDs msg
where
msg = ptext (sLit "Illegal associated declaration in class:") <+>
ppr tydecl
-- represent associated family instances
--
repLAssocFamInst :: [LTyClDecl Name] -> DsM [Core TH.DecQ]
repLAssocFamInst = liftM de_loc . mapMaybeM repTyClD
-- represent instance declarations
--
repInstD' :: LInstDecl Name -> DsM (SrcSpan, Core TH.DecQ)
repInstD' (L loc (InstDecl ty binds _ ats)) -- Ignore user pragmas for now
= do { dec <- addTyVarBinds tvs $ \_ ->
-- We must bring the type variables into scope, so their
-- occurrences don't fail, even though the binders don't
-- appear in the resulting data structure
--
-- But we do NOT bring the binders of 'binds' into scope
-- becuase they are properly regarded as occurrences
-- For example, the method names should be bound to
-- the selector Ids, not to fresh names (Trac #5410)
--
do { cxt1 <- repContext cxt
; cls_tcon <- repTy (HsTyVar cls)
; cls_tys <- repLTys tys
; inst_ty1 <- repTapps cls_tcon cls_tys
; binds1 <- rep_binds binds
; ats1 <- repLAssocFamInst ats
; decls <- coreList decQTyConName (ats1 ++ binds1)
; repInst cxt1 inst_ty1 decls }
; return (loc, dec) }
where
Just (tvs, cxt, cls, tys) = splitHsInstDeclTy_maybe (unLoc ty)
repForD :: Located (ForeignDecl Name) -> DsM (SrcSpan, Core TH.DecQ)
repForD (L loc (ForeignImport name typ _ (CImport cc s ch cis)))
= do MkC name' <- lookupLOcc name
MkC typ' <- repLTy typ
MkC cc' <- repCCallConv cc
MkC s' <- repSafety s
cis' <- conv_cimportspec cis
MkC str <- coreStringLit $ static
++ unpackFS ch ++ " "
++ cis'
dec <- rep2 forImpDName [cc', s', str, name', typ']
return (loc, dec)
where
conv_cimportspec (CLabel cls) = notHandled "Foreign label" (doubleQuotes (ppr cls))
conv_cimportspec (CFunction DynamicTarget) = return "dynamic"
conv_cimportspec (CFunction (StaticTarget fs _)) = return (unpackFS fs)
conv_cimportspec CWrapper = return "wrapper"
static = case cis of
CFunction (StaticTarget _ _) -> "static "
_ -> ""
repForD decl = notHandled "Foreign declaration" (ppr decl)
repCCallConv :: CCallConv -> DsM (Core TH.Callconv)
repCCallConv CCallConv = rep2 cCallName []
repCCallConv StdCallConv = rep2 stdCallName []
repCCallConv callConv = notHandled "repCCallConv" (ppr callConv)
repSafety :: Safety -> DsM (Core TH.Safety)
repSafety PlayRisky = rep2 unsafeName []
repSafety PlayInterruptible = rep2 interruptibleName []
repSafety PlaySafe = rep2 safeName []
ds_msg :: SDoc
ds_msg = ptext (sLit "Cannot desugar this Template Haskell declaration:")
-------------------------------------------------------
-- Constructors
-------------------------------------------------------
repC :: [Name] -> LConDecl Name -> DsM (Core TH.ConQ)
repC _ (L _ (ConDecl { con_name = con, con_qvars = [], con_cxt = L _ []
, con_details = details, con_res = ResTyH98 }))
= do { con1 <- lookupLOcc con -- See note [Binders and occurrences]
; repConstr con1 details }
repC tvs (L _ (ConDecl { con_name = con
, con_qvars = con_tvs, con_cxt = L _ ctxt
, con_details = details
, con_res = res_ty }))
= do { (eq_ctxt, con_tv_subst) <- mkGadtCtxt tvs res_ty
; let ex_tvs = [ tv | tv <- con_tvs, not (hsLTyVarName tv `in_subst` con_tv_subst)]
; binds <- mapM dupBinder con_tv_subst
; dsExtendMetaEnv (mkNameEnv binds) $ -- Binds some of the con_tvs
addTyVarBinds ex_tvs $ \ ex_bndrs -> -- Binds the remaining con_tvs
do { con1 <- lookupLOcc con -- See note [Binders and occurrences]
; c' <- repConstr con1 details
; ctxt' <- repContext (eq_ctxt ++ ctxt)
; ex_bndrs' <- coreList tyVarBndrTyConName ex_bndrs
; rep2 forallCName [unC ex_bndrs', unC ctxt', unC c'] } }
in_subst :: Name -> [(Name,Name)] -> Bool
in_subst _ [] = False
in_subst n ((n',_):ns) = n==n' || in_subst n ns
mkGadtCtxt :: [Name] -- Tyvars of the data type
-> ResType Name
-> DsM (HsContext Name, [(Name,Name)])
-- Given a data type in GADT syntax, figure out the equality
-- context, so that we can represent it with an explicit
-- equality context, because that is the only way to express
-- the GADT in TH syntax
--
-- Example:
-- data T a b c where { MkT :: forall d e. d -> e -> T d [e] e
-- mkGadtCtxt [a,b,c] [d,e] (T d [e] e)
-- returns
-- (b~[e], c~e), [d->a]
--
-- This function is fiddly, but not really hard
mkGadtCtxt _ ResTyH98
= return ([], [])
mkGadtCtxt data_tvs (ResTyGADT res_ty)
| let (head_ty, tys) = splitHsAppTys res_ty []
, Just _ <- is_hs_tyvar head_ty
, data_tvs `equalLength` tys
= return (go [] [] (data_tvs `zip` tys))
| otherwise
= failWithDs (ptext (sLit "Malformed constructor result type") <+> ppr res_ty)
where
go cxt subst [] = (cxt, subst)
go cxt subst ((data_tv, ty) : rest)
| Just con_tv <- is_hs_tyvar ty
, isTyVarName con_tv
, not (in_subst con_tv subst)
= go cxt ((con_tv, data_tv) : subst) rest
| otherwise
= go (eq_pred : cxt) subst rest
where
loc = getLoc ty
eq_pred = L loc (HsEqTy (L loc (HsTyVar data_tv)) ty)
is_hs_tyvar (L _ (HsTyVar n)) = Just n -- Type variables *and* tycons
is_hs_tyvar (L _ (HsParTy ty)) = is_hs_tyvar ty
is_hs_tyvar _ = Nothing
repBangTy :: LBangType Name -> DsM (Core (TH.StrictTypeQ))
repBangTy ty= do
MkC s <- rep2 str []
MkC t <- repLTy ty'
rep2 strictTypeName [s, t]
where
(str, ty') = case ty of
L _ (HsBangTy HsUnpack ty) -> (unpackedName, ty)
L _ (HsBangTy _ ty) -> (isStrictName, ty)
_ -> (notStrictName, ty)
-------------------------------------------------------
-- Deriving clause
-------------------------------------------------------
repDerivs :: Maybe [LHsType Name] -> DsM (Core [TH.Name])
repDerivs Nothing = coreList nameTyConName []
repDerivs (Just ctxt)
= do { strs <- mapM rep_deriv ctxt ;
coreList nameTyConName strs }
where
rep_deriv :: LHsType Name -> DsM (Core TH.Name)
-- Deriving clauses must have the simple H98 form
rep_deriv ty
| Just (cls, []) <- splitHsClassTy_maybe (unLoc ty)
= lookupOcc cls
| otherwise
= notHandled "Non-H98 deriving clause" (ppr ty)
-------------------------------------------------------
-- Signatures in a class decl, or a group of bindings
-------------------------------------------------------
rep_sigs :: [LSig Name] -> DsM [Core TH.DecQ]
rep_sigs sigs = do locs_cores <- rep_sigs' sigs
return $ de_loc $ sort_by_loc locs_cores
rep_sigs' :: [LSig Name] -> DsM [(SrcSpan, Core TH.DecQ)]
-- We silently ignore ones we don't recognise
rep_sigs' sigs = do { sigs1 <- mapM rep_sig sigs ;
return (concat sigs1) }
rep_sig :: LSig Name -> DsM [(SrcSpan, Core TH.DecQ)]
-- Singleton => Ok
-- Empty => Too hard, signature ignored
rep_sig (L loc (TypeSig nms ty)) = rep_proto nms ty loc
rep_sig (L _ (GenericSig nm _)) = failWithDs msg
where msg = vcat [ ptext (sLit "Illegal default signature for") <+> quotes (ppr nm)
, ptext (sLit "Default signatures are not supported by Template Haskell") ]
rep_sig (L loc (InlineSig nm ispec)) = rep_inline nm ispec loc
rep_sig (L loc (SpecSig nm ty ispec)) = rep_specialise nm ty ispec loc
rep_sig _ = return []
rep_proto :: [Located Name] -> LHsType Name -> SrcSpan
-> DsM [(SrcSpan, Core TH.DecQ)]
rep_proto nms ty loc
= mapM f nms
where
f nm = do { nm1 <- lookupLOcc nm
; ty1 <- repLTy ty
; sig <- repProto nm1 ty1
; return (loc, sig)
}
rep_inline :: Located Name
-> InlinePragma -- Never defaultInlinePragma
-> SrcSpan
-> DsM [(SrcSpan, Core TH.DecQ)]
rep_inline nm ispec loc
= do { nm1 <- lookupLOcc nm
; ispec1 <- rep_InlinePrag ispec
; pragma <- repPragInl nm1 ispec1
; return [(loc, pragma)]
}
rep_specialise :: Located Name -> LHsType Name -> InlinePragma -> SrcSpan
-> DsM [(SrcSpan, Core TH.DecQ)]
rep_specialise nm ty ispec loc
= do { nm1 <- lookupLOcc nm
; ty1 <- repLTy ty
; pragma <- if isDefaultInlinePragma ispec
then repPragSpec nm1 ty1 -- SPECIALISE
else do { ispec1 <- rep_InlinePrag ispec -- SPECIALISE INLINE
; repPragSpecInl nm1 ty1 ispec1 }
; return [(loc, pragma)]
}
-- Extract all the information needed to build a TH.InlinePrag
--
rep_InlinePrag :: InlinePragma -- Never defaultInlinePragma
-> DsM (Core TH.InlineSpecQ)
rep_InlinePrag (InlinePragma { inl_act = activation, inl_rule = match, inl_inline = inline })
| Just (flag, phase) <- activation1
= repInlineSpecPhase inline1 match1 flag phase
| otherwise
= repInlineSpecNoPhase inline1 match1
where
match1 = coreBool (rep_RuleMatchInfo match)
activation1 = rep_Activation activation
inline1 = case inline of
Inline -> coreBool True
_other -> coreBool False
-- We have no representation for Inlinable
rep_RuleMatchInfo FunLike = False
rep_RuleMatchInfo ConLike = True
rep_Activation NeverActive = Nothing -- We never have NOINLINE/AlwaysActive
rep_Activation AlwaysActive = Nothing -- or INLINE/NeverActive
rep_Activation (ActiveBefore phase) = Just (coreBool False,
MkC $ mkIntExprInt phase)
rep_Activation (ActiveAfter phase) = Just (coreBool True,
MkC $ mkIntExprInt phase)
-------------------------------------------------------
-- Types
-------------------------------------------------------
-- We process type variable bindings in two ways, either by generating fresh
-- names or looking up existing names. The difference is crucial for type
-- families, depending on whether they are associated or not.
--
type ProcessTyVarBinds a =
[LHsTyVarBndr Name] -- the binders to be added
-> ([Core TH.TyVarBndr] -> DsM (Core (TH.Q a))) -- action in the ext env
-> DsM (Core (TH.Q a))
-- gensym a list of type variables and enter them into the meta environment;
-- the computations passed as the second argument is executed in that extended
-- meta environment and gets the *new* names on Core-level as an argument
--
addTyVarBinds :: ProcessTyVarBinds a
addTyVarBinds tvs m
= do { freshNames <- mkGenSyms (hsLTyVarNames tvs)
; term <- addBinds freshNames $
do { kindedBndrs <- mapM mk_tv_bndr (tvs `zip` freshNames)
; m kindedBndrs }
; wrapGenSyms freshNames term }
where
mk_tv_bndr (tv, (_,v)) = repTyVarBndrWithKind tv (coreVar v)
-- Look up a list of type variables; the computations passed as the second
-- argument gets the *new* names on Core-level as an argument
--
lookupTyVarBinds :: ProcessTyVarBinds a
lookupTyVarBinds tvs m =
do
let names = hsLTyVarNames tvs
mkWithKinds = map repTyVarBndrWithKind tvs
bndrs <- mapM lookupBinder names
kindedBndrs <- zipWithM ($) mkWithKinds bndrs
m kindedBndrs
-- Produce kinded binder constructors from the Haskell tyvar binders
--
repTyVarBndrWithKind :: LHsTyVarBndr Name
-> Core TH.Name -> DsM (Core TH.TyVarBndr)
repTyVarBndrWithKind (L _ (UserTyVar {})) nm
= repPlainTV nm
repTyVarBndrWithKind (L _ (KindedTyVar _ ki _)) nm
= repKind ki >>= repKindedTV nm
-- represent a type context
--
repLContext :: LHsContext Name -> DsM (Core TH.CxtQ)
repLContext (L _ ctxt) = repContext ctxt
repContext :: HsContext Name -> DsM (Core TH.CxtQ)
repContext ctxt = do
preds <- mapM repLPred ctxt
predList <- coreList predQTyConName preds
repCtxt predList
-- represent a type predicate
--
repLPred :: LHsType Name -> DsM (Core TH.PredQ)
repLPred (L _ p) = repPred p
repPred :: HsType Name -> DsM (Core TH.PredQ)
repPred ty
| Just (cls, tys) <- splitHsClassTy_maybe ty
= do
cls1 <- lookupOcc cls
tys1 <- repLTys tys
tys2 <- coreList typeQTyConName tys1
repClassP cls1 tys2
repPred (HsEqTy tyleft tyright)
= do
tyleft1 <- repLTy tyleft
tyright1 <- repLTy tyright
repEqualP tyleft1 tyright1
repPred ty
= notHandled "Exotic predicate type" (ppr ty)
-- yield the representation of a list of types
--
repLTys :: [LHsType Name] -> DsM [Core TH.TypeQ]
repLTys tys = mapM repLTy tys
-- represent a type
--
repLTy :: LHsType Name -> DsM (Core TH.TypeQ)
repLTy (L _ ty) = repTy ty
repTy :: HsType Name -> DsM (Core TH.TypeQ)
repTy (HsForAllTy _ tvs ctxt ty) =
addTyVarBinds tvs $ \bndrs -> do
ctxt1 <- repLContext ctxt
ty1 <- repLTy ty
bndrs1 <- coreList tyVarBndrTyConName bndrs
repTForall bndrs1 ctxt1 ty1
repTy (HsTyVar n)
| isTvOcc (nameOccName n) = do
tv1 <- lookupTvOcc n
repTvar tv1
| otherwise = do
tc1 <- lookupOcc n
repNamedTyCon tc1
repTy (HsAppTy f a) = do
f1 <- repLTy f
a1 <- repLTy a
repTapp f1 a1
repTy (HsFunTy f a) = do
f1 <- repLTy f
a1 <- repLTy a
tcon <- repArrowTyCon
repTapps tcon [f1, a1]
repTy (HsListTy t) = do
t1 <- repLTy t
tcon <- repListTyCon
repTapp tcon t1
repTy (HsPArrTy t) = do
t1 <- repLTy t
tcon <- repTy (HsTyVar (tyConName parrTyCon))
repTapp tcon t1
repTy (HsTupleTy HsUnboxedTuple tys) = do
tys1 <- repLTys tys
tcon <- repUnboxedTupleTyCon (length tys)
repTapps tcon tys1
repTy (HsTupleTy _ tys) = do tys1 <- repLTys tys
tcon <- repTupleTyCon (length tys)
repTapps tcon tys1
repTy (HsOpTy ty1 (_, n) ty2) = repLTy ((nlHsTyVar (unLoc n) `nlHsAppTy` ty1)
`nlHsAppTy` ty2)
repTy (HsParTy t) = repLTy t
repTy (HsKindSig t k) = do
t1 <- repLTy t
k1 <- repKind k
repTSig t1 k1
repTy (HsSpliceTy splice _ _) = repSplice splice
repTy ty = notHandled "Exotic form of type" (ppr ty)
-- represent a kind
--
repKind :: LHsKind Name -> DsM (Core TH.Kind)
repKind ki
= do { let (kis, ki') = splitHsFunType ki
; kis_rep <- mapM repKind kis
; ki'_rep <- repNonArrowKind ki'
; foldrM repArrowK ki'_rep kis_rep
}
where
repNonArrowKind (L _ (HsTyVar name)) | name == liftedTypeKindTyConName = repStarK
repNonArrowKind k = notHandled "Exotic form of kind" (ppr k)
-----------------------------------------------------------------------------
-- Splices
-----------------------------------------------------------------------------
repSplice :: HsSplice Name -> DsM (Core a)
-- See Note [How brackets and nested splices are handled] in TcSplice
-- We return a CoreExpr of any old type; the context should know
repSplice (HsSplice n _)
= do { mb_val <- dsLookupMetaEnv n
; case mb_val of
Just (Splice e) -> do { e' <- dsExpr e
; return (MkC e') }
_ -> pprPanic "HsSplice" (ppr n) }
-- Should not happen; statically checked
-----------------------------------------------------------------------------
-- Expressions
-----------------------------------------------------------------------------
repLEs :: [LHsExpr Name] -> DsM (Core [TH.ExpQ])
repLEs es = do { es' <- mapM repLE es ;
coreList expQTyConName es' }
-- FIXME: some of these panics should be converted into proper error messages
-- unless we can make sure that constructs, which are plainly not
-- supported in TH already lead to error messages at an earlier stage
repLE :: LHsExpr Name -> DsM (Core TH.ExpQ)
repLE (L loc e) = putSrcSpanDs loc (repE e)
repE :: HsExpr Name -> DsM (Core TH.ExpQ)
repE (HsVar x) =
do { mb_val <- dsLookupMetaEnv x
; case mb_val of
Nothing -> do { str <- globalVar x
; repVarOrCon x str }
Just (Bound y) -> repVarOrCon x (coreVar y)
Just (Splice e) -> do { e' <- dsExpr e
; return (MkC e') } }
repE e@(HsIPVar _) = notHandled "Implicit parameters" (ppr e)
-- Remember, we're desugaring renamer output here, so
-- HsOverlit can definitely occur
repE (HsOverLit l) = do { a <- repOverloadedLiteral l; repLit a }
repE (HsLit l) = do { a <- repLiteral l; repLit a }
repE (HsLam (MatchGroup [m] _)) = repLambda m
repE (HsApp x y) = do {a <- repLE x; b <- repLE y; repApp a b}
repE (OpApp e1 op _ e2) =
do { arg1 <- repLE e1;
arg2 <- repLE e2;
the_op <- repLE op ;
repInfixApp arg1 the_op arg2 }
repE (NegApp x _) = do
a <- repLE x
negateVar <- lookupOcc negateName >>= repVar
negateVar `repApp` a
repE (HsPar x) = repLE x
repE (SectionL x y) = do { a <- repLE x; b <- repLE y; repSectionL a b }
repE (SectionR x y) = do { a <- repLE x; b <- repLE y; repSectionR a b }
repE (HsCase e (MatchGroup ms _)) = do { arg <- repLE e
; ms2 <- mapM repMatchTup ms
; repCaseE arg (nonEmptyCoreList ms2) }
repE (HsIf _ x y z) = do
a <- repLE x
b <- repLE y
c <- repLE z
repCond a b c
repE (HsLet bs e) = do { (ss,ds) <- repBinds bs
; e2 <- addBinds ss (repLE e)
; z <- repLetE ds e2
; wrapGenSyms ss z }
-- FIXME: I haven't got the types here right yet
repE e@(HsDo ctxt sts _)
| case ctxt of { DoExpr -> True; GhciStmt -> True; _ -> False }
= do { (ss,zs) <- repLSts sts;
e' <- repDoE (nonEmptyCoreList zs);
wrapGenSyms ss e' }
| ListComp <- ctxt
= do { (ss,zs) <- repLSts sts;
e' <- repComp (nonEmptyCoreList zs);
wrapGenSyms ss e' }
| otherwise
= notHandled "mdo, monad comprehension and [: :]" (ppr e)
repE (ExplicitList _ es) = do { xs <- repLEs es; repListExp xs }
repE e@(ExplicitPArr _ _) = notHandled "Parallel arrays" (ppr e)
repE e@(ExplicitTuple es boxed)
| not (all tupArgPresent es) = notHandled "Tuple sections" (ppr e)
| isBoxed boxed = do { xs <- repLEs [e | Present e <- es]; repTup xs }
| otherwise = do { xs <- repLEs [e | Present e <- es]; repUnboxedTup xs }
repE (RecordCon c _ flds)
= do { x <- lookupLOcc c;
fs <- repFields flds;
repRecCon x fs }
repE (RecordUpd e flds _ _ _)
= do { x <- repLE e;
fs <- repFields flds;
repRecUpd x fs }
repE (ExprWithTySig e ty) = do { e1 <- repLE e; t1 <- repLTy ty; repSigExp e1 t1 }
repE (ArithSeq _ aseq) =
case aseq of
From e -> do { ds1 <- repLE e; repFrom ds1 }
FromThen e1 e2 -> do
ds1 <- repLE e1
ds2 <- repLE e2
repFromThen ds1 ds2
FromTo e1 e2 -> do
ds1 <- repLE e1
ds2 <- repLE e2
repFromTo ds1 ds2
FromThenTo e1 e2 e3 -> do
ds1 <- repLE e1
ds2 <- repLE e2
ds3 <- repLE e3
repFromThenTo ds1 ds2 ds3
repE (HsSpliceE splice) = repSplice splice
repE e@(PArrSeq {}) = notHandled "Parallel arrays" (ppr e)
repE e@(HsCoreAnn {}) = notHandled "Core annotations" (ppr e)
repE e@(HsSCC {}) = notHandled "Cost centres" (ppr e)
repE e@(HsTickPragma {}) = notHandled "Tick Pragma" (ppr e)
repE e@(HsBracketOut {}) = notHandled "TH brackets" (ppr e)
repE e = notHandled "Expression form" (ppr e)
-----------------------------------------------------------------------------
-- Building representations of auxillary structures like Match, Clause, Stmt,
repMatchTup :: LMatch Name -> DsM (Core TH.MatchQ)
repMatchTup (L _ (Match [p] _ (GRHSs guards wheres))) =
do { ss1 <- mkGenSyms (collectPatBinders p)
; addBinds ss1 $ do {
; p1 <- repLP p
; (ss2,ds) <- repBinds wheres
; addBinds ss2 $ do {
; gs <- repGuards guards
; match <- repMatch p1 gs ds
; wrapGenSyms (ss1++ss2) match }}}
repMatchTup _ = panic "repMatchTup: case alt with more than one arg"
repClauseTup :: LMatch Name -> DsM (Core TH.ClauseQ)
repClauseTup (L _ (Match ps _ (GRHSs guards wheres))) =
do { ss1 <- mkGenSyms (collectPatsBinders ps)
; addBinds ss1 $ do {
ps1 <- repLPs ps
; (ss2,ds) <- repBinds wheres
; addBinds ss2 $ do {
gs <- repGuards guards
; clause <- repClause ps1 gs ds
; wrapGenSyms (ss1++ss2) clause }}}
repGuards :: [LGRHS Name] -> DsM (Core TH.BodyQ)
repGuards [L _ (GRHS [] e)]
= do {a <- repLE e; repNormal a }
repGuards other
= do { zs <- mapM process other;
let {(xs, ys) = unzip zs};
gd <- repGuarded (nonEmptyCoreList ys);
wrapGenSyms (concat xs) gd }
where
process :: LGRHS Name -> DsM ([GenSymBind], (Core (TH.Q (TH.Guard, TH.Exp))))
process (L _ (GRHS [L _ (ExprStmt e1 _ _ _)] e2))
= do { x <- repLNormalGE e1 e2;
return ([], x) }
process (L _ (GRHS ss rhs))
= do (gs, ss') <- repLSts ss
rhs' <- addBinds gs $ repLE rhs
g <- repPatGE (nonEmptyCoreList ss') rhs'
return (gs, g)
repFields :: HsRecordBinds Name -> DsM (Core [TH.Q TH.FieldExp])
repFields (HsRecFields { rec_flds = flds })
= do { fnames <- mapM lookupLOcc (map hsRecFieldId flds)
; es <- mapM repLE (map hsRecFieldArg flds)
; fs <- zipWithM repFieldExp fnames es
; coreList fieldExpQTyConName fs }
-----------------------------------------------------------------------------
-- Representing Stmt's is tricky, especially if bound variables
-- shadow each other. Consider: [| do { x <- f 1; x <- f x; g x } |]
-- First gensym new names for every variable in any of the patterns.
-- both static (x'1 and x'2), and dynamic ((gensym "x") and (gensym "y"))
-- if variables didn't shaddow, the static gensym wouldn't be necessary
-- and we could reuse the original names (x and x).
--
-- do { x'1 <- gensym "x"
-- ; x'2 <- gensym "x"
-- ; doE [ BindSt (pvar x'1) [| f 1 |]
-- , BindSt (pvar x'2) [| f x |]
-- , NoBindSt [| g x |]
-- ]
-- }
-- The strategy is to translate a whole list of do-bindings by building a
-- bigger environment, and a bigger set of meta bindings
-- (like: x'1 <- gensym "x" ) and then combining these with the translations
-- of the expressions within the Do
-----------------------------------------------------------------------------
-- The helper function repSts computes the translation of each sub expression
-- and a bunch of prefix bindings denoting the dynamic renaming.
repLSts :: [LStmt Name] -> DsM ([GenSymBind], [Core TH.StmtQ])
repLSts stmts = repSts (map unLoc stmts)
repSts :: [Stmt Name] -> DsM ([GenSymBind], [Core TH.StmtQ])
repSts (BindStmt p e _ _ : ss) =
do { e2 <- repLE e
; ss1 <- mkGenSyms (collectPatBinders p)
; addBinds ss1 $ do {
; p1 <- repLP p;
; (ss2,zs) <- repSts ss
; z <- repBindSt p1 e2
; return (ss1++ss2, z : zs) }}
repSts (LetStmt bs : ss) =
do { (ss1,ds) <- repBinds bs
; z <- repLetSt ds
; (ss2,zs) <- addBinds ss1 (repSts ss)
; return (ss1++ss2, z : zs) }
repSts (ExprStmt e _ _ _ : ss) =
do { e2 <- repLE e
; z <- repNoBindSt e2
; (ss2,zs) <- repSts ss
; return (ss2, z : zs) }
repSts [LastStmt e _]
= do { e2 <- repLE e
; z <- repNoBindSt e2
; return ([], [z]) }
repSts [] = return ([],[])
repSts other = notHandled "Exotic statement" (ppr other)
-----------------------------------------------------------
-- Bindings
-----------------------------------------------------------
repBinds :: HsLocalBinds Name -> DsM ([GenSymBind], Core [TH.DecQ])
repBinds EmptyLocalBinds
= do { core_list <- coreList decQTyConName []
; return ([], core_list) }
repBinds b@(HsIPBinds _) = notHandled "Implicit parameters" (ppr b)
repBinds (HsValBinds decs)
= do { let { bndrs = collectHsValBinders decs }
-- No need to worrry about detailed scopes within
-- the binding group, because we are talking Names
-- here, so we can safely treat it as a mutually
-- recursive group
; ss <- mkGenSyms bndrs
; prs <- addBinds ss (rep_val_binds decs)
; core_list <- coreList decQTyConName
(de_loc (sort_by_loc prs))
; return (ss, core_list) }
rep_val_binds :: HsValBinds Name -> DsM [(SrcSpan, Core TH.DecQ)]
-- Assumes: all the binders of the binding are alrady in the meta-env
rep_val_binds (ValBindsOut binds sigs)
= do { core1 <- rep_binds' (unionManyBags (map snd binds))
; core2 <- rep_sigs' sigs
; return (core1 ++ core2) }
rep_val_binds (ValBindsIn _ _)
= panic "rep_val_binds: ValBindsIn"
rep_binds :: LHsBinds Name -> DsM [Core TH.DecQ]
rep_binds binds = do { binds_w_locs <- rep_binds' binds
; return (de_loc (sort_by_loc binds_w_locs)) }
rep_binds' :: LHsBinds Name -> DsM [(SrcSpan, Core TH.DecQ)]
rep_binds' binds = mapM rep_bind (bagToList binds)
rep_bind :: LHsBind Name -> DsM (SrcSpan, Core TH.DecQ)
-- Assumes: all the binders of the binding are alrady in the meta-env
-- Note GHC treats declarations of a variable (not a pattern)
-- e.g. x = g 5 as a Fun MonoBinds. This is indicated by a single match
-- with an empty list of patterns
rep_bind (L loc (FunBind { fun_id = fn,
fun_matches = MatchGroup [L _ (Match [] _ (GRHSs guards wheres))] _ }))
= do { (ss,wherecore) <- repBinds wheres
; guardcore <- addBinds ss (repGuards guards)
; fn' <- lookupLBinder fn
; p <- repPvar fn'
; ans <- repVal p guardcore wherecore
; ans' <- wrapGenSyms ss ans
; return (loc, ans') }
rep_bind (L loc (FunBind { fun_id = fn, fun_matches = MatchGroup ms _ }))
= do { ms1 <- mapM repClauseTup ms
; fn' <- lookupLBinder fn
; ans <- repFun fn' (nonEmptyCoreList ms1)
; return (loc, ans) }
rep_bind (L loc (PatBind { pat_lhs = pat, pat_rhs = GRHSs guards wheres }))
= do { patcore <- repLP pat
; (ss,wherecore) <- repBinds wheres
; guardcore <- addBinds ss (repGuards guards)
; ans <- repVal patcore guardcore wherecore
; ans' <- wrapGenSyms ss ans
; return (loc, ans') }
rep_bind (L _ (VarBind { var_id = v, var_rhs = e}))
= do { v' <- lookupBinder v
; e2 <- repLE e
; x <- repNormal e2
; patcore <- repPvar v'
; empty_decls <- coreList decQTyConName []
; ans <- repVal patcore x empty_decls
; return (srcLocSpan (getSrcLoc v), ans) }
rep_bind (L _ (AbsBinds {})) = panic "rep_bind: AbsBinds"
-----------------------------------------------------------------------------
-- Since everything in a Bind is mutually recursive we need rename all
-- all the variables simultaneously. For example:
-- [| AndMonoBinds (f x = x + g 2) (g x = f 1 + 2) |] would translate to
-- do { f'1 <- gensym "f"
-- ; g'2 <- gensym "g"
-- ; [ do { x'3 <- gensym "x"; fun f'1 [pvar x'3] [| x + g2 |]},
-- do { x'4 <- gensym "x"; fun g'2 [pvar x'4] [| f 1 + 2 |]}
-- ]}
-- This requires collecting the bindings (f'1 <- gensym "f"), and the
-- environment ( f |-> f'1 ) from each binding, and then unioning them
-- together. As we do this we collect GenSymBinds's which represent the renamed
-- variables bound by the Bindings. In order not to lose track of these
-- representations we build a shadow datatype MB with the same structure as
-- MonoBinds, but which has slots for the representations
-----------------------------------------------------------------------------
-- GHC allows a more general form of lambda abstraction than specified
-- by Haskell 98. In particular it allows guarded lambda's like :
-- (\ x | even x -> 0 | odd x -> 1) at the moment we can't represent this in
-- Haskell Template's Meta.Exp type so we punt if it isn't a simple thing like
-- (\ p1 .. pn -> exp) by causing an error.
repLambda :: LMatch Name -> DsM (Core TH.ExpQ)
repLambda (L _ (Match ps _ (GRHSs [L _ (GRHS [] e)] EmptyLocalBinds)))
= do { let bndrs = collectPatsBinders ps ;
; ss <- mkGenSyms bndrs
; lam <- addBinds ss (
do { xs <- repLPs ps; body <- repLE e; repLam xs body })
; wrapGenSyms ss lam }
repLambda (L _ m) = notHandled "Guarded labmdas" (pprMatch (LambdaExpr :: HsMatchContext Name) m)
-----------------------------------------------------------------------------
-- Patterns
-- repP deals with patterns. It assumes that we have already
-- walked over the pattern(s) once to collect the binders, and
-- have extended the environment. So every pattern-bound
-- variable should already appear in the environment.
-- Process a list of patterns
repLPs :: [LPat Name] -> DsM (Core [TH.PatQ])
repLPs ps = do { ps' <- mapM repLP ps ;
coreList patQTyConName ps' }
repLP :: LPat Name -> DsM (Core TH.PatQ)
repLP (L _ p) = repP p
repP :: Pat Name -> DsM (Core TH.PatQ)
repP (WildPat _) = repPwild
repP (LitPat l) = do { l2 <- repLiteral l; repPlit l2 }
repP (VarPat x) = do { x' <- lookupBinder x; repPvar x' }
repP (LazyPat p) = do { p1 <- repLP p; repPtilde p1 }
repP (BangPat p) = do { p1 <- repLP p; repPbang p1 }
repP (AsPat x p) = do { x' <- lookupLBinder x; p1 <- repLP p; repPaspat x' p1 }
repP (ParPat p) = repLP p
repP (ListPat ps _) = do { qs <- repLPs ps; repPlist qs }
repP (TuplePat ps boxed _)
| isBoxed boxed = do { qs <- repLPs ps; repPtup qs }
| otherwise = do { qs <- repLPs ps; repPunboxedTup qs }
repP (ConPatIn dc details)
= do { con_str <- lookupLOcc dc
; case details of
PrefixCon ps -> do { qs <- repLPs ps; repPcon con_str qs }
RecCon rec -> do { let flds = rec_flds rec
; vs <- sequence $ map lookupLOcc (map hsRecFieldId flds)
; ps <- sequence $ map repLP (map hsRecFieldArg flds)
; fps <- zipWithM (\x y -> rep2 fieldPatName [unC x,unC y]) vs ps
; fps' <- coreList fieldPatQTyConName fps
; repPrec con_str fps' }
InfixCon p1 p2 -> do { p1' <- repLP p1;
p2' <- repLP p2;
repPinfix p1' con_str p2' }
}
repP (NPat l Nothing _) = do { a <- repOverloadedLiteral l; repPlit a }
repP (ViewPat e p _) = do { e' <- repLE e; p' <- repLP p; repPview e' p' }
repP p@(NPat _ (Just _) _) = notHandled "Negative overloaded patterns" (ppr p)
repP p@(SigPatIn {}) = notHandled "Type signatures in patterns" (ppr p)
-- The problem is to do with scoped type variables.
-- To implement them, we have to implement the scoping rules
-- here in DsMeta, and I don't want to do that today!
-- do { p' <- repLP p; t' <- repLTy t; repPsig p' t' }
-- repPsig :: Core TH.PatQ -> Core TH.TypeQ -> DsM (Core TH.PatQ)
-- repPsig (MkC p) (MkC t) = rep2 sigPName [p, t]
repP other = notHandled "Exotic pattern" (ppr other)
----------------------------------------------------------
-- Declaration ordering helpers
sort_by_loc :: [(SrcSpan, a)] -> [(SrcSpan, a)]
sort_by_loc xs = sortBy comp xs
where comp x y = compare (fst x) (fst y)
de_loc :: [(a, b)] -> [b]
de_loc = map snd
----------------------------------------------------------
-- The meta-environment
-- A name/identifier association for fresh names of locally bound entities
type GenSymBind = (Name, Id) -- Gensym the string and bind it to the Id
-- I.e. (x, x_id) means
-- let x_id = gensym "x" in ...
-- Generate a fresh name for a locally bound entity
mkGenSyms :: [Name] -> DsM [GenSymBind]
-- We can use the existing name. For example:
-- [| \x_77 -> x_77 + x_77 |]
-- desugars to
-- do { x_77 <- genSym "x"; .... }
-- We use the same x_77 in the desugared program, but with the type Bndr
-- instead of Int
--
-- We do make it an Internal name, though (hence localiseName)
--
-- Nevertheless, it's monadic because we have to generate nameTy
mkGenSyms ns = do { var_ty <- lookupType nameTyConName
; return [(nm, mkLocalId (localiseName nm) var_ty) | nm <- ns] }
addBinds :: [GenSymBind] -> DsM a -> DsM a
-- Add a list of fresh names for locally bound entities to the
-- meta environment (which is part of the state carried around
-- by the desugarer monad)
addBinds bs m = dsExtendMetaEnv (mkNameEnv [(n,Bound id) | (n,id) <- bs]) m
dupBinder :: (Name, Name) -> DsM (Name, DsMetaVal)
dupBinder (new, old)
= do { mb_val <- dsLookupMetaEnv old
; case mb_val of
Just val -> return (new, val)
Nothing -> pprPanic "dupBinder" (ppr old) }
-- Look up a locally bound name
--
lookupLBinder :: Located Name -> DsM (Core TH.Name)
lookupLBinder (L _ n) = lookupBinder n
lookupBinder :: Name -> DsM (Core TH.Name)
lookupBinder = lookupOcc
-- Binders are brought into scope before the pattern or what-not is
-- desugared. Moreover, in instance declaration the binder of a method
-- will be the selector Id and hence a global; so we need the
-- globalVar case of lookupOcc
-- Look up a name that is either locally bound or a global name
--
-- * If it is a global name, generate the "original name" representation (ie,
-- the <module>:<name> form) for the associated entity
--
lookupLOcc :: Located Name -> DsM (Core TH.Name)
-- Lookup an occurrence; it can't be a splice.
-- Use the in-scope bindings if they exist
lookupLOcc (L _ n) = lookupOcc n
lookupOcc :: Name -> DsM (Core TH.Name)
lookupOcc n
= do { mb_val <- dsLookupMetaEnv n ;
case mb_val of
Nothing -> globalVar n
Just (Bound x) -> return (coreVar x)
Just (Splice _) -> pprPanic "repE:lookupOcc" (ppr n)
}
lookupTvOcc :: Name -> DsM (Core TH.Name)
-- Type variables can't be staged and are not lexically scoped in TH
lookupTvOcc n
= do { mb_val <- dsLookupMetaEnv n ;
case mb_val of
Just (Bound x) -> return (coreVar x)
_ -> failWithDs msg
}
where
msg = vcat [ ptext (sLit "Illegal lexically-scoped type variable") <+> quotes (ppr n)
, ptext (sLit "Lexically scoped type variables are not supported by Template Haskell") ]
globalVar :: Name -> DsM (Core TH.Name)
-- Not bound by the meta-env
-- Could be top-level; or could be local
-- f x = $(g [| x |])
-- Here the x will be local
globalVar name
| isExternalName name
= do { MkC mod <- coreStringLit name_mod
; MkC pkg <- coreStringLit name_pkg
; MkC occ <- occNameLit name
; rep2 mk_varg [pkg,mod,occ] }
| otherwise
= do { MkC occ <- occNameLit name
; MkC uni <- coreIntLit (getKey (getUnique name))
; rep2 mkNameLName [occ,uni] }
where
mod = ASSERT( isExternalName name) nameModule name
name_mod = moduleNameString (moduleName mod)
name_pkg = packageIdString (modulePackageId mod)
name_occ = nameOccName name
mk_varg | OccName.isDataOcc name_occ = mkNameG_dName
| OccName.isVarOcc name_occ = mkNameG_vName
| OccName.isTcOcc name_occ = mkNameG_tcName
| otherwise = pprPanic "DsMeta.globalVar" (ppr name)
lookupType :: Name -- Name of type constructor (e.g. TH.ExpQ)
-> DsM Type -- The type
lookupType tc_name = do { tc <- dsLookupTyCon tc_name ;
return (mkTyConApp tc []) }
wrapGenSyms :: [GenSymBind]
-> Core (TH.Q a) -> DsM (Core (TH.Q a))
-- wrapGenSyms [(nm1,id1), (nm2,id2)] y
-- --> bindQ (gensym nm1) (\ id1 ->
-- bindQ (gensym nm2 (\ id2 ->
-- y))
wrapGenSyms binds body@(MkC b)
= do { var_ty <- lookupType nameTyConName
; go var_ty binds }
where
[elt_ty] = tcTyConAppArgs (exprType b)
-- b :: Q a, so we can get the type 'a' by looking at the
-- argument type. NB: this relies on Q being a data/newtype,
-- not a type synonym
go _ [] = return body
go var_ty ((name,id) : binds)
= do { MkC body' <- go var_ty binds
; lit_str <- occNameLit name
; gensym_app <- repGensym lit_str
; repBindQ var_ty elt_ty
gensym_app (MkC (Lam id body')) }
occNameLit :: Name -> DsM (Core String)
occNameLit n = coreStringLit (occNameString (nameOccName n))
-- %*********************************************************************
-- %* *
-- Constructing code
-- %* *
-- %*********************************************************************
-----------------------------------------------------------------------------
-- PHANTOM TYPES for consistency. In order to make sure we do this correct
-- we invent a new datatype which uses phantom types.
newtype Core a = MkC CoreExpr
unC :: Core a -> CoreExpr
unC (MkC x) = x
rep2 :: Name -> [ CoreExpr ] -> DsM (Core a)
rep2 n xs = do { id <- dsLookupGlobalId n
; return (MkC (foldl App (Var id) xs)) }
-- Then we make "repConstructors" which use the phantom types for each of the
-- smart constructors of the Meta.Meta datatypes.
-- %*********************************************************************
-- %* *
-- The 'smart constructors'
-- %* *
-- %*********************************************************************
--------------- Patterns -----------------
repPlit :: Core TH.Lit -> DsM (Core TH.PatQ)
repPlit (MkC l) = rep2 litPName [l]
repPvar :: Core TH.Name -> DsM (Core TH.PatQ)
repPvar (MkC s) = rep2 varPName [s]
repPtup :: Core [TH.PatQ] -> DsM (Core TH.PatQ)
repPtup (MkC ps) = rep2 tupPName [ps]
repPunboxedTup :: Core [TH.PatQ] -> DsM (Core TH.PatQ)
repPunboxedTup (MkC ps) = rep2 unboxedTupPName [ps]
repPcon :: Core TH.Name -> Core [TH.PatQ] -> DsM (Core TH.PatQ)
repPcon (MkC s) (MkC ps) = rep2 conPName [s, ps]
repPrec :: Core TH.Name -> Core [(TH.Name,TH.PatQ)] -> DsM (Core TH.PatQ)
repPrec (MkC c) (MkC rps) = rep2 recPName [c,rps]
repPinfix :: Core TH.PatQ -> Core TH.Name -> Core TH.PatQ -> DsM (Core TH.PatQ)
repPinfix (MkC p1) (MkC n) (MkC p2) = rep2 infixPName [p1, n, p2]
repPtilde :: Core TH.PatQ -> DsM (Core TH.PatQ)
repPtilde (MkC p) = rep2 tildePName [p]
repPbang :: Core TH.PatQ -> DsM (Core TH.PatQ)
repPbang (MkC p) = rep2 bangPName [p]
repPaspat :: Core TH.Name -> Core TH.PatQ -> DsM (Core TH.PatQ)
repPaspat (MkC s) (MkC p) = rep2 asPName [s, p]
repPwild :: DsM (Core TH.PatQ)
repPwild = rep2 wildPName []
repPlist :: Core [TH.PatQ] -> DsM (Core TH.PatQ)
repPlist (MkC ps) = rep2 listPName [ps]
repPview :: Core TH.ExpQ -> Core TH.PatQ -> DsM (Core TH.PatQ)
repPview (MkC e) (MkC p) = rep2 viewPName [e,p]
--------------- Expressions -----------------
repVarOrCon :: Name -> Core TH.Name -> DsM (Core TH.ExpQ)
repVarOrCon vc str | isDataOcc (nameOccName vc) = repCon str
| otherwise = repVar str
repVar :: Core TH.Name -> DsM (Core TH.ExpQ)
repVar (MkC s) = rep2 varEName [s]
repCon :: Core TH.Name -> DsM (Core TH.ExpQ)
repCon (MkC s) = rep2 conEName [s]
repLit :: Core TH.Lit -> DsM (Core TH.ExpQ)
repLit (MkC c) = rep2 litEName [c]
repApp :: Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repApp (MkC x) (MkC y) = rep2 appEName [x,y]
repLam :: Core [TH.PatQ] -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repLam (MkC ps) (MkC e) = rep2 lamEName [ps, e]
repTup :: Core [TH.ExpQ] -> DsM (Core TH.ExpQ)
repTup (MkC es) = rep2 tupEName [es]
repUnboxedTup :: Core [TH.ExpQ] -> DsM (Core TH.ExpQ)
repUnboxedTup (MkC es) = rep2 unboxedTupEName [es]
repCond :: Core TH.ExpQ -> Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repCond (MkC x) (MkC y) (MkC z) = rep2 condEName [x,y,z]
repLetE :: Core [TH.DecQ] -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repLetE (MkC ds) (MkC e) = rep2 letEName [ds, e]
repCaseE :: Core TH.ExpQ -> Core [TH.MatchQ] -> DsM( Core TH.ExpQ)
repCaseE (MkC e) (MkC ms) = rep2 caseEName [e, ms]
repDoE :: Core [TH.StmtQ] -> DsM (Core TH.ExpQ)
repDoE (MkC ss) = rep2 doEName [ss]
repComp :: Core [TH.StmtQ] -> DsM (Core TH.ExpQ)
repComp (MkC ss) = rep2 compEName [ss]
repListExp :: Core [TH.ExpQ] -> DsM (Core TH.ExpQ)
repListExp (MkC es) = rep2 listEName [es]
repSigExp :: Core TH.ExpQ -> Core TH.TypeQ -> DsM (Core TH.ExpQ)
repSigExp (MkC e) (MkC t) = rep2 sigEName [e,t]
repRecCon :: Core TH.Name -> Core [TH.Q TH.FieldExp]-> DsM (Core TH.ExpQ)
repRecCon (MkC c) (MkC fs) = rep2 recConEName [c,fs]
repRecUpd :: Core TH.ExpQ -> Core [TH.Q TH.FieldExp] -> DsM (Core TH.ExpQ)
repRecUpd (MkC e) (MkC fs) = rep2 recUpdEName [e,fs]
repFieldExp :: Core TH.Name -> Core TH.ExpQ -> DsM (Core (TH.Q TH.FieldExp))
repFieldExp (MkC n) (MkC x) = rep2 fieldExpName [n,x]
repInfixApp :: Core TH.ExpQ -> Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repInfixApp (MkC x) (MkC y) (MkC z) = rep2 infixAppName [x,y,z]
repSectionL :: Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repSectionL (MkC x) (MkC y) = rep2 sectionLName [x,y]
repSectionR :: Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repSectionR (MkC x) (MkC y) = rep2 sectionRName [x,y]
------------ Right hand sides (guarded expressions) ----
repGuarded :: Core [TH.Q (TH.Guard, TH.Exp)] -> DsM (Core TH.BodyQ)
repGuarded (MkC pairs) = rep2 guardedBName [pairs]
repNormal :: Core TH.ExpQ -> DsM (Core TH.BodyQ)
repNormal (MkC e) = rep2 normalBName [e]
------------ Guards ----
repLNormalGE :: LHsExpr Name -> LHsExpr Name -> DsM (Core (TH.Q (TH.Guard, TH.Exp)))
repLNormalGE g e = do g' <- repLE g
e' <- repLE e
repNormalGE g' e'
repNormalGE :: Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core (TH.Q (TH.Guard, TH.Exp)))
repNormalGE (MkC g) (MkC e) = rep2 normalGEName [g, e]
repPatGE :: Core [TH.StmtQ] -> Core TH.ExpQ -> DsM (Core (TH.Q (TH.Guard, TH.Exp)))
repPatGE (MkC ss) (MkC e) = rep2 patGEName [ss, e]
------------- Stmts -------------------
repBindSt :: Core TH.PatQ -> Core TH.ExpQ -> DsM (Core TH.StmtQ)
repBindSt (MkC p) (MkC e) = rep2 bindSName [p,e]
repLetSt :: Core [TH.DecQ] -> DsM (Core TH.StmtQ)
repLetSt (MkC ds) = rep2 letSName [ds]
repNoBindSt :: Core TH.ExpQ -> DsM (Core TH.StmtQ)
repNoBindSt (MkC e) = rep2 noBindSName [e]
-------------- Range (Arithmetic sequences) -----------
repFrom :: Core TH.ExpQ -> DsM (Core TH.ExpQ)
repFrom (MkC x) = rep2 fromEName [x]
repFromThen :: Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repFromThen (MkC x) (MkC y) = rep2 fromThenEName [x,y]
repFromTo :: Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repFromTo (MkC x) (MkC y) = rep2 fromToEName [x,y]
repFromThenTo :: Core TH.ExpQ -> Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repFromThenTo (MkC x) (MkC y) (MkC z) = rep2 fromThenToEName [x,y,z]
------------ Match and Clause Tuples -----------
repMatch :: Core TH.PatQ -> Core TH.BodyQ -> Core [TH.DecQ] -> DsM (Core TH.MatchQ)
repMatch (MkC p) (MkC bod) (MkC ds) = rep2 matchName [p, bod, ds]
repClause :: Core [TH.PatQ] -> Core TH.BodyQ -> Core [TH.DecQ] -> DsM (Core TH.ClauseQ)
repClause (MkC ps) (MkC bod) (MkC ds) = rep2 clauseName [ps, bod, ds]
-------------- Dec -----------------------------
repVal :: Core TH.PatQ -> Core TH.BodyQ -> Core [TH.DecQ] -> DsM (Core TH.DecQ)
repVal (MkC p) (MkC b) (MkC ds) = rep2 valDName [p, b, ds]
repFun :: Core TH.Name -> Core [TH.ClauseQ] -> DsM (Core TH.DecQ)
repFun (MkC nm) (MkC b) = rep2 funDName [nm, b]
repData :: Core TH.CxtQ -> Core TH.Name -> Core [TH.TyVarBndr]
-> Maybe (Core [TH.TypeQ])
-> Core [TH.ConQ] -> Core [TH.Name] -> DsM (Core TH.DecQ)
repData (MkC cxt) (MkC nm) (MkC tvs) Nothing (MkC cons) (MkC derivs)
= rep2 dataDName [cxt, nm, tvs, cons, derivs]
repData (MkC cxt) (MkC nm) (MkC _) (Just (MkC tys)) (MkC cons) (MkC derivs)
= rep2 dataInstDName [cxt, nm, tys, cons, derivs]
repNewtype :: Core TH.CxtQ -> Core TH.Name -> Core [TH.TyVarBndr]
-> Maybe (Core [TH.TypeQ])
-> Core TH.ConQ -> Core [TH.Name] -> DsM (Core TH.DecQ)
repNewtype (MkC cxt) (MkC nm) (MkC tvs) Nothing (MkC con) (MkC derivs)
= rep2 newtypeDName [cxt, nm, tvs, con, derivs]
repNewtype (MkC cxt) (MkC nm) (MkC _) (Just (MkC tys)) (MkC con) (MkC derivs)
= rep2 newtypeInstDName [cxt, nm, tys, con, derivs]
repTySyn :: Core TH.Name -> Core [TH.TyVarBndr]
-> Maybe (Core [TH.TypeQ])
-> Core TH.TypeQ -> DsM (Core TH.DecQ)
repTySyn (MkC nm) (MkC tvs) Nothing (MkC rhs)
= rep2 tySynDName [nm, tvs, rhs]
repTySyn (MkC nm) (MkC _) (Just (MkC tys)) (MkC rhs)
= rep2 tySynInstDName [nm, tys, rhs]
repInst :: Core TH.CxtQ -> Core TH.TypeQ -> Core [TH.DecQ] -> DsM (Core TH.DecQ)
repInst (MkC cxt) (MkC ty) (MkC ds) = rep2 instanceDName [cxt, ty, ds]
repClass :: Core TH.CxtQ -> Core TH.Name -> Core [TH.TyVarBndr]
-> Core [TH.FunDep] -> Core [TH.DecQ]
-> DsM (Core TH.DecQ)
repClass (MkC cxt) (MkC cls) (MkC tvs) (MkC fds) (MkC ds)
= rep2 classDName [cxt, cls, tvs, fds, ds]
repPragInl :: Core TH.Name -> Core TH.InlineSpecQ -> DsM (Core TH.DecQ)
repPragInl (MkC nm) (MkC ispec) = rep2 pragInlDName [nm, ispec]
repPragSpec :: Core TH.Name -> Core TH.TypeQ -> DsM (Core TH.DecQ)
repPragSpec (MkC nm) (MkC ty) = rep2 pragSpecDName [nm, ty]
repPragSpecInl :: Core TH.Name -> Core TH.TypeQ -> Core TH.InlineSpecQ
-> DsM (Core TH.DecQ)
repPragSpecInl (MkC nm) (MkC ty) (MkC ispec)
= rep2 pragSpecInlDName [nm, ty, ispec]
repFamilyNoKind :: Core TH.FamFlavour -> Core TH.Name -> Core [TH.TyVarBndr]
-> DsM (Core TH.DecQ)
repFamilyNoKind (MkC flav) (MkC nm) (MkC tvs)
= rep2 familyNoKindDName [flav, nm, tvs]
repFamilyKind :: Core TH.FamFlavour -> Core TH.Name -> Core [TH.TyVarBndr]
-> Core TH.Kind
-> DsM (Core TH.DecQ)
repFamilyKind (MkC flav) (MkC nm) (MkC tvs) (MkC ki)
= rep2 familyKindDName [flav, nm, tvs, ki]
repInlineSpecNoPhase :: Core Bool -> Core Bool -> DsM (Core TH.InlineSpecQ)
repInlineSpecNoPhase (MkC inline) (MkC conlike)
= rep2 inlineSpecNoPhaseName [inline, conlike]
repInlineSpecPhase :: Core Bool -> Core Bool -> Core Bool -> Core Int
-> DsM (Core TH.InlineSpecQ)
repInlineSpecPhase (MkC inline) (MkC conlike) (MkC beforeFrom) (MkC phase)
= rep2 inlineSpecPhaseName [inline, conlike, beforeFrom, phase]
repFunDep :: Core [TH.Name] -> Core [TH.Name] -> DsM (Core TH.FunDep)
repFunDep (MkC xs) (MkC ys) = rep2 funDepName [xs, ys]
repProto :: Core TH.Name -> Core TH.TypeQ -> DsM (Core TH.DecQ)
repProto (MkC s) (MkC ty) = rep2 sigDName [s, ty]
repCtxt :: Core [TH.PredQ] -> DsM (Core TH.CxtQ)
repCtxt (MkC tys) = rep2 cxtName [tys]
repClassP :: Core TH.Name -> Core [TH.TypeQ] -> DsM (Core TH.PredQ)
repClassP (MkC cla) (MkC tys) = rep2 classPName [cla, tys]
repEqualP :: Core TH.TypeQ -> Core TH.TypeQ -> DsM (Core TH.PredQ)
repEqualP (MkC ty1) (MkC ty2) = rep2 equalPName [ty1, ty2]
repConstr :: Core TH.Name -> HsConDeclDetails Name
-> DsM (Core TH.ConQ)
repConstr con (PrefixCon ps)
= do arg_tys <- mapM repBangTy ps
arg_tys1 <- coreList strictTypeQTyConName arg_tys
rep2 normalCName [unC con, unC arg_tys1]
repConstr con (RecCon ips)
= do arg_vs <- mapM lookupLOcc (map cd_fld_name ips)
arg_tys <- mapM repBangTy (map cd_fld_type ips)
arg_vtys <- zipWithM (\x y -> rep2 varStrictTypeName [unC x, unC y])
arg_vs arg_tys
arg_vtys' <- coreList varStrictTypeQTyConName arg_vtys
rep2 recCName [unC con, unC arg_vtys']
repConstr con (InfixCon st1 st2)
= do arg1 <- repBangTy st1
arg2 <- repBangTy st2
rep2 infixCName [unC arg1, unC con, unC arg2]
------------ Types -------------------
repTForall :: Core [TH.TyVarBndr] -> Core TH.CxtQ -> Core TH.TypeQ
-> DsM (Core TH.TypeQ)
repTForall (MkC tvars) (MkC ctxt) (MkC ty)
= rep2 forallTName [tvars, ctxt, ty]
repTvar :: Core TH.Name -> DsM (Core TH.TypeQ)
repTvar (MkC s) = rep2 varTName [s]
repTapp :: Core TH.TypeQ -> Core TH.TypeQ -> DsM (Core TH.TypeQ)
repTapp (MkC t1) (MkC t2) = rep2 appTName [t1, t2]
repTapps :: Core TH.TypeQ -> [Core TH.TypeQ] -> DsM (Core TH.TypeQ)
repTapps f [] = return f
repTapps f (t:ts) = do { f1 <- repTapp f t; repTapps f1 ts }
repTSig :: Core TH.TypeQ -> Core TH.Kind -> DsM (Core TH.TypeQ)
repTSig (MkC ty) (MkC ki) = rep2 sigTName [ty, ki]
--------- Type constructors --------------
repNamedTyCon :: Core TH.Name -> DsM (Core TH.TypeQ)
repNamedTyCon (MkC s) = rep2 conTName [s]
repTupleTyCon :: Int -> DsM (Core TH.TypeQ)
-- Note: not Core Int; it's easier to be direct here
repTupleTyCon i = rep2 tupleTName [mkIntExprInt i]
repUnboxedTupleTyCon :: Int -> DsM (Core TH.TypeQ)
-- Note: not Core Int; it's easier to be direct here
repUnboxedTupleTyCon i = rep2 unboxedTupleTName [mkIntExprInt i]
repArrowTyCon :: DsM (Core TH.TypeQ)
repArrowTyCon = rep2 arrowTName []
repListTyCon :: DsM (Core TH.TypeQ)
repListTyCon = rep2 listTName []
------------ Kinds -------------------
repPlainTV :: Core TH.Name -> DsM (Core TH.TyVarBndr)
repPlainTV (MkC nm) = rep2 plainTVName [nm]
repKindedTV :: Core TH.Name -> Core TH.Kind -> DsM (Core TH.TyVarBndr)
repKindedTV (MkC nm) (MkC ki) = rep2 kindedTVName [nm, ki]
repStarK :: DsM (Core TH.Kind)
repStarK = rep2 starKName []
repArrowK :: Core TH.Kind -> Core TH.Kind -> DsM (Core TH.Kind)
repArrowK (MkC ki1) (MkC ki2) = rep2 arrowKName [ki1, ki2]
----------------------------------------------------------
-- Literals
repLiteral :: HsLit -> DsM (Core TH.Lit)
repLiteral lit
= do lit' <- case lit of
HsIntPrim i -> mk_integer i
HsWordPrim w -> mk_integer w
HsInt i -> mk_integer i
HsFloatPrim r -> mk_rational r
HsDoublePrim r -> mk_rational r
_ -> return lit
lit_expr <- dsLit lit'
case mb_lit_name of
Just lit_name -> rep2 lit_name [lit_expr]
Nothing -> notHandled "Exotic literal" (ppr lit)
where
mb_lit_name = case lit of
HsInteger _ _ -> Just integerLName
HsInt _ -> Just integerLName
HsIntPrim _ -> Just intPrimLName
HsWordPrim _ -> Just wordPrimLName
HsFloatPrim _ -> Just floatPrimLName
HsDoublePrim _ -> Just doublePrimLName
HsChar _ -> Just charLName
HsString _ -> Just stringLName
HsRat _ _ -> Just rationalLName
_ -> Nothing
mk_integer :: Integer -> DsM HsLit
mk_integer i = do integer_ty <- lookupType integerTyConName
return $ HsInteger i integer_ty
mk_rational :: FractionalLit -> DsM HsLit
mk_rational r = do rat_ty <- lookupType rationalTyConName
return $ HsRat r rat_ty
mk_string :: FastString -> DsM HsLit
mk_string s = return $ HsString s
repOverloadedLiteral :: HsOverLit Name -> DsM (Core TH.Lit)
repOverloadedLiteral (OverLit { ol_val = val})
= do { lit <- mk_lit val; repLiteral lit }
-- The type Rational will be in the environment, becuase
-- the smart constructor 'TH.Syntax.rationalL' uses it in its type,
-- and rationalL is sucked in when any TH stuff is used
mk_lit :: OverLitVal -> DsM HsLit
mk_lit (HsIntegral i) = mk_integer i
mk_lit (HsFractional f) = mk_rational f
mk_lit (HsIsString s) = mk_string s
--------------- Miscellaneous -------------------
repGensym :: Core String -> DsM (Core (TH.Q TH.Name))
repGensym (MkC lit_str) = rep2 newNameName [lit_str]
repBindQ :: Type -> Type -- a and b
-> Core (TH.Q a) -> Core (a -> TH.Q b) -> DsM (Core (TH.Q b))
repBindQ ty_a ty_b (MkC x) (MkC y)
= rep2 bindQName [Type ty_a, Type ty_b, x, y]
repSequenceQ :: Type -> Core [TH.Q a] -> DsM (Core (TH.Q [a]))
repSequenceQ ty_a (MkC list)
= rep2 sequenceQName [Type ty_a, list]
------------ Lists and Tuples -------------------
-- turn a list of patterns into a single pattern matching a list
coreList :: Name -- Of the TyCon of the element type
-> [Core a] -> DsM (Core [a])
coreList tc_name es
= do { elt_ty <- lookupType tc_name; return (coreList' elt_ty es) }
coreList' :: Type -- The element type
-> [Core a] -> Core [a]
coreList' elt_ty es = MkC (mkListExpr elt_ty (map unC es ))
nonEmptyCoreList :: [Core a] -> Core [a]
-- The list must be non-empty so we can get the element type
-- Otherwise use coreList
nonEmptyCoreList [] = panic "coreList: empty argument"
nonEmptyCoreList xs@(MkC x:_) = MkC (mkListExpr (exprType x) (map unC xs))
coreStringLit :: String -> DsM (Core String)
coreStringLit s = do { z <- mkStringExpr s; return(MkC z) }
------------ Bool, Literals & Variables -------------------
coreBool :: Bool -> Core Bool
coreBool False = MkC $ mkConApp falseDataCon []
coreBool True = MkC $ mkConApp trueDataCon []
coreIntLit :: Int -> DsM (Core Int)
coreIntLit i = return (MkC (mkIntExprInt i))
coreVar :: Id -> Core TH.Name -- The Id has type Name
coreVar id = MkC (Var id)
----------------- Failure -----------------------
notHandled :: String -> SDoc -> DsM a
notHandled what doc = failWithDs msg
where
msg = hang (text what <+> ptext (sLit "not (yet) handled by Template Haskell"))
2 doc
-- %************************************************************************
-- %* *
-- The known-key names for Template Haskell
-- %* *
-- %************************************************************************
-- To add a name, do three things
--
-- 1) Allocate a key
-- 2) Make a "Name"
-- 3) Add the name to knownKeyNames
templateHaskellNames :: [Name]
-- The names that are implicitly mentioned by ``bracket''
-- Should stay in sync with the import list of DsMeta
templateHaskellNames = [
returnQName, bindQName, sequenceQName, newNameName, liftName,
mkNameName, mkNameG_vName, mkNameG_dName, mkNameG_tcName, mkNameLName,
liftStringName,
-- Lit
charLName, stringLName, integerLName, intPrimLName, wordPrimLName,
floatPrimLName, doublePrimLName, rationalLName,
-- Pat
litPName, varPName, tupPName, unboxedTupPName,
conPName, tildePName, bangPName, infixPName,
asPName, wildPName, recPName, listPName, sigPName, viewPName,
-- FieldPat
fieldPatName,
-- Match
matchName,
-- Clause
clauseName,
-- Exp
varEName, conEName, litEName, appEName, infixEName,
infixAppName, sectionLName, sectionRName, lamEName,
tupEName, unboxedTupEName,
condEName, letEName, caseEName, doEName, compEName,
fromEName, fromThenEName, fromToEName, fromThenToEName,
listEName, sigEName, recConEName, recUpdEName,
-- FieldExp
fieldExpName,
-- Body
guardedBName, normalBName,
-- Guard
normalGEName, patGEName,
-- Stmt
bindSName, letSName, noBindSName, parSName,
-- Dec
funDName, valDName, dataDName, newtypeDName, tySynDName,
classDName, instanceDName, sigDName, forImpDName,
pragInlDName, pragSpecDName, pragSpecInlDName,
familyNoKindDName, familyKindDName, dataInstDName, newtypeInstDName,
tySynInstDName,
-- Cxt
cxtName,
-- Pred
classPName, equalPName,
-- Strict
isStrictName, notStrictName, unpackedName,
-- Con
normalCName, recCName, infixCName, forallCName,
-- StrictType
strictTypeName,
-- VarStrictType
varStrictTypeName,
-- Type
forallTName, varTName, conTName, appTName,
tupleTName, unboxedTupleTName, arrowTName, listTName, sigTName,
-- TyVarBndr
plainTVName, kindedTVName,
-- Kind
starKName, arrowKName,
-- Callconv
cCallName, stdCallName,
-- Safety
unsafeName,
safeName,
interruptibleName,
-- InlineSpec
inlineSpecNoPhaseName, inlineSpecPhaseName,
-- FunDep
funDepName,
-- FamFlavour
typeFamName, dataFamName,
-- And the tycons
qTyConName, nameTyConName, patTyConName, fieldPatTyConName, matchQTyConName,
clauseQTyConName, expQTyConName, fieldExpTyConName, predTyConName,
stmtQTyConName, decQTyConName, conQTyConName, strictTypeQTyConName,
varStrictTypeQTyConName, typeQTyConName, expTyConName, decTyConName,
typeTyConName, tyVarBndrTyConName, matchTyConName, clauseTyConName,
patQTyConName, fieldPatQTyConName, fieldExpQTyConName, funDepTyConName,
predQTyConName, decsQTyConName,
-- Quasiquoting
quoteDecName, quoteTypeName, quoteExpName, quotePatName]
thSyn, thLib, qqLib :: Module
thSyn = mkTHModule (fsLit "Language.Haskell.TH.Syntax")
thLib = mkTHModule (fsLit "Language.Haskell.TH.Lib")
qqLib = mkTHModule (fsLit "Language.Haskell.TH.Quote")
mkTHModule :: FastString -> Module
mkTHModule m = mkModule thPackageId (mkModuleNameFS m)
libFun, libTc, thFun, thTc, qqFun :: FastString -> Unique -> Name
libFun = mk_known_key_name OccName.varName thLib
libTc = mk_known_key_name OccName.tcName thLib
thFun = mk_known_key_name OccName.varName thSyn
thTc = mk_known_key_name OccName.tcName thSyn
qqFun = mk_known_key_name OccName.varName qqLib
-------------------- TH.Syntax -----------------------
qTyConName, nameTyConName, fieldExpTyConName, patTyConName,
fieldPatTyConName, expTyConName, decTyConName, typeTyConName,
tyVarBndrTyConName, matchTyConName, clauseTyConName, funDepTyConName,
predTyConName :: Name
qTyConName = thTc (fsLit "Q") qTyConKey
nameTyConName = thTc (fsLit "Name") nameTyConKey
fieldExpTyConName = thTc (fsLit "FieldExp") fieldExpTyConKey
patTyConName = thTc (fsLit "Pat") patTyConKey
fieldPatTyConName = thTc (fsLit "FieldPat") fieldPatTyConKey
expTyConName = thTc (fsLit "Exp") expTyConKey
decTyConName = thTc (fsLit "Dec") decTyConKey
typeTyConName = thTc (fsLit "Type") typeTyConKey
tyVarBndrTyConName= thTc (fsLit "TyVarBndr") tyVarBndrTyConKey
matchTyConName = thTc (fsLit "Match") matchTyConKey
clauseTyConName = thTc (fsLit "Clause") clauseTyConKey
funDepTyConName = thTc (fsLit "FunDep") funDepTyConKey
predTyConName = thTc (fsLit "Pred") predTyConKey
returnQName, bindQName, sequenceQName, newNameName, liftName,
mkNameName, mkNameG_vName, mkNameG_dName, mkNameG_tcName,
mkNameLName, liftStringName :: Name
returnQName = thFun (fsLit "returnQ") returnQIdKey
bindQName = thFun (fsLit "bindQ") bindQIdKey
sequenceQName = thFun (fsLit "sequenceQ") sequenceQIdKey
newNameName = thFun (fsLit "newName") newNameIdKey
liftName = thFun (fsLit "lift") liftIdKey
liftStringName = thFun (fsLit "liftString") liftStringIdKey
mkNameName = thFun (fsLit "mkName") mkNameIdKey
mkNameG_vName = thFun (fsLit "mkNameG_v") mkNameG_vIdKey
mkNameG_dName = thFun (fsLit "mkNameG_d") mkNameG_dIdKey
mkNameG_tcName = thFun (fsLit "mkNameG_tc") mkNameG_tcIdKey
mkNameLName = thFun (fsLit "mkNameL") mkNameLIdKey
-------------------- TH.Lib -----------------------
-- data Lit = ...
charLName, stringLName, integerLName, intPrimLName, wordPrimLName,
floatPrimLName, doublePrimLName, rationalLName :: Name
charLName = libFun (fsLit "charL") charLIdKey
stringLName = libFun (fsLit "stringL") stringLIdKey
integerLName = libFun (fsLit "integerL") integerLIdKey
intPrimLName = libFun (fsLit "intPrimL") intPrimLIdKey
wordPrimLName = libFun (fsLit "wordPrimL") wordPrimLIdKey
floatPrimLName = libFun (fsLit "floatPrimL") floatPrimLIdKey
doublePrimLName = libFun (fsLit "doublePrimL") doublePrimLIdKey
rationalLName = libFun (fsLit "rationalL") rationalLIdKey
-- data Pat = ...
litPName, varPName, tupPName, unboxedTupPName, conPName, infixPName, tildePName, bangPName,
asPName, wildPName, recPName, listPName, sigPName, viewPName :: Name
litPName = libFun (fsLit "litP") litPIdKey
varPName = libFun (fsLit "varP") varPIdKey
tupPName = libFun (fsLit "tupP") tupPIdKey
unboxedTupPName = libFun (fsLit "unboxedTupP") unboxedTupPIdKey
conPName = libFun (fsLit "conP") conPIdKey
infixPName = libFun (fsLit "infixP") infixPIdKey
tildePName = libFun (fsLit "tildeP") tildePIdKey
bangPName = libFun (fsLit "bangP") bangPIdKey
asPName = libFun (fsLit "asP") asPIdKey
wildPName = libFun (fsLit "wildP") wildPIdKey
recPName = libFun (fsLit "recP") recPIdKey
listPName = libFun (fsLit "listP") listPIdKey
sigPName = libFun (fsLit "sigP") sigPIdKey
viewPName = libFun (fsLit "viewP") viewPIdKey
-- type FieldPat = ...
fieldPatName :: Name
fieldPatName = libFun (fsLit "fieldPat") fieldPatIdKey
-- data Match = ...
matchName :: Name
matchName = libFun (fsLit "match") matchIdKey
-- data Clause = ...
clauseName :: Name
clauseName = libFun (fsLit "clause") clauseIdKey
-- data Exp = ...
varEName, conEName, litEName, appEName, infixEName, infixAppName,
sectionLName, sectionRName, lamEName, tupEName, unboxedTupEName, condEName,
letEName, caseEName, doEName, compEName :: Name
varEName = libFun (fsLit "varE") varEIdKey
conEName = libFun (fsLit "conE") conEIdKey
litEName = libFun (fsLit "litE") litEIdKey
appEName = libFun (fsLit "appE") appEIdKey
infixEName = libFun (fsLit "infixE") infixEIdKey
infixAppName = libFun (fsLit "infixApp") infixAppIdKey
sectionLName = libFun (fsLit "sectionL") sectionLIdKey
sectionRName = libFun (fsLit "sectionR") sectionRIdKey
lamEName = libFun (fsLit "lamE") lamEIdKey
tupEName = libFun (fsLit "tupE") tupEIdKey
unboxedTupEName = libFun (fsLit "unboxedTupE") unboxedTupEIdKey
condEName = libFun (fsLit "condE") condEIdKey
letEName = libFun (fsLit "letE") letEIdKey
caseEName = libFun (fsLit "caseE") caseEIdKey
doEName = libFun (fsLit "doE") doEIdKey
compEName = libFun (fsLit "compE") compEIdKey
-- ArithSeq skips a level
fromEName, fromThenEName, fromToEName, fromThenToEName :: Name
fromEName = libFun (fsLit "fromE") fromEIdKey
fromThenEName = libFun (fsLit "fromThenE") fromThenEIdKey
fromToEName = libFun (fsLit "fromToE") fromToEIdKey
fromThenToEName = libFun (fsLit "fromThenToE") fromThenToEIdKey
-- end ArithSeq
listEName, sigEName, recConEName, recUpdEName :: Name
listEName = libFun (fsLit "listE") listEIdKey
sigEName = libFun (fsLit "sigE") sigEIdKey
recConEName = libFun (fsLit "recConE") recConEIdKey
recUpdEName = libFun (fsLit "recUpdE") recUpdEIdKey
-- type FieldExp = ...
fieldExpName :: Name
fieldExpName = libFun (fsLit "fieldExp") fieldExpIdKey
-- data Body = ...
guardedBName, normalBName :: Name
guardedBName = libFun (fsLit "guardedB") guardedBIdKey
normalBName = libFun (fsLit "normalB") normalBIdKey
-- data Guard = ...
normalGEName, patGEName :: Name
normalGEName = libFun (fsLit "normalGE") normalGEIdKey
patGEName = libFun (fsLit "patGE") patGEIdKey
-- data Stmt = ...
bindSName, letSName, noBindSName, parSName :: Name
bindSName = libFun (fsLit "bindS") bindSIdKey
letSName = libFun (fsLit "letS") letSIdKey
noBindSName = libFun (fsLit "noBindS") noBindSIdKey
parSName = libFun (fsLit "parS") parSIdKey
-- data Dec = ...
funDName, valDName, dataDName, newtypeDName, tySynDName, classDName,
instanceDName, sigDName, forImpDName, pragInlDName, pragSpecDName,
pragSpecInlDName, familyNoKindDName, familyKindDName, dataInstDName,
newtypeInstDName, tySynInstDName :: Name
funDName = libFun (fsLit "funD") funDIdKey
valDName = libFun (fsLit "valD") valDIdKey
dataDName = libFun (fsLit "dataD") dataDIdKey
newtypeDName = libFun (fsLit "newtypeD") newtypeDIdKey
tySynDName = libFun (fsLit "tySynD") tySynDIdKey
classDName = libFun (fsLit "classD") classDIdKey
instanceDName = libFun (fsLit "instanceD") instanceDIdKey
sigDName = libFun (fsLit "sigD") sigDIdKey
forImpDName = libFun (fsLit "forImpD") forImpDIdKey
pragInlDName = libFun (fsLit "pragInlD") pragInlDIdKey
pragSpecDName = libFun (fsLit "pragSpecD") pragSpecDIdKey
pragSpecInlDName = libFun (fsLit "pragSpecInlD") pragSpecInlDIdKey
familyNoKindDName= libFun (fsLit "familyNoKindD")familyNoKindDIdKey
familyKindDName = libFun (fsLit "familyKindD") familyKindDIdKey
dataInstDName = libFun (fsLit "dataInstD") dataInstDIdKey
newtypeInstDName = libFun (fsLit "newtypeInstD") newtypeInstDIdKey
tySynInstDName = libFun (fsLit "tySynInstD") tySynInstDIdKey
-- type Ctxt = ...
cxtName :: Name
cxtName = libFun (fsLit "cxt") cxtIdKey
-- data Pred = ...
classPName, equalPName :: Name
classPName = libFun (fsLit "classP") classPIdKey
equalPName = libFun (fsLit "equalP") equalPIdKey
-- data Strict = ...
isStrictName, notStrictName, unpackedName :: Name
isStrictName = libFun (fsLit "isStrict") isStrictKey
notStrictName = libFun (fsLit "notStrict") notStrictKey
unpackedName = libFun (fsLit "unpacked") unpackedKey
-- data Con = ...
normalCName, recCName, infixCName, forallCName :: Name
normalCName = libFun (fsLit "normalC") normalCIdKey
recCName = libFun (fsLit "recC") recCIdKey
infixCName = libFun (fsLit "infixC") infixCIdKey
forallCName = libFun (fsLit "forallC") forallCIdKey
-- type StrictType = ...
strictTypeName :: Name
strictTypeName = libFun (fsLit "strictType") strictTKey
-- type VarStrictType = ...
varStrictTypeName :: Name
varStrictTypeName = libFun (fsLit "varStrictType") varStrictTKey
-- data Type = ...
forallTName, varTName, conTName, tupleTName, unboxedTupleTName, arrowTName,
listTName, appTName, sigTName :: Name
forallTName = libFun (fsLit "forallT") forallTIdKey
varTName = libFun (fsLit "varT") varTIdKey
conTName = libFun (fsLit "conT") conTIdKey
tupleTName = libFun (fsLit "tupleT") tupleTIdKey
unboxedTupleTName = libFun (fsLit "unboxedTupleT") unboxedTupleTIdKey
arrowTName = libFun (fsLit "arrowT") arrowTIdKey
listTName = libFun (fsLit "listT") listTIdKey
appTName = libFun (fsLit "appT") appTIdKey
sigTName = libFun (fsLit "sigT") sigTIdKey
-- data TyVarBndr = ...
plainTVName, kindedTVName :: Name
plainTVName = libFun (fsLit "plainTV") plainTVIdKey
kindedTVName = libFun (fsLit "kindedTV") kindedTVIdKey
-- data Kind = ...
starKName, arrowKName :: Name
starKName = libFun (fsLit "starK") starKIdKey
arrowKName = libFun (fsLit "arrowK") arrowKIdKey
-- data Callconv = ...
cCallName, stdCallName :: Name
cCallName = libFun (fsLit "cCall") cCallIdKey
stdCallName = libFun (fsLit "stdCall") stdCallIdKey
-- data Safety = ...
unsafeName, safeName, interruptibleName :: Name
unsafeName = libFun (fsLit "unsafe") unsafeIdKey
safeName = libFun (fsLit "safe") safeIdKey
interruptibleName = libFun (fsLit "interruptible") interruptibleIdKey
-- data InlineSpec = ...
inlineSpecNoPhaseName, inlineSpecPhaseName :: Name
inlineSpecNoPhaseName = libFun (fsLit "inlineSpecNoPhase") inlineSpecNoPhaseIdKey
inlineSpecPhaseName = libFun (fsLit "inlineSpecPhase") inlineSpecPhaseIdKey
-- data FunDep = ...
funDepName :: Name
funDepName = libFun (fsLit "funDep") funDepIdKey
-- data FamFlavour = ...
typeFamName, dataFamName :: Name
typeFamName = libFun (fsLit "typeFam") typeFamIdKey
dataFamName = libFun (fsLit "dataFam") dataFamIdKey
matchQTyConName, clauseQTyConName, expQTyConName, stmtQTyConName,
decQTyConName, conQTyConName, strictTypeQTyConName,
varStrictTypeQTyConName, typeQTyConName, fieldExpQTyConName,
patQTyConName, fieldPatQTyConName, predQTyConName, decsQTyConName :: Name
matchQTyConName = libTc (fsLit "MatchQ") matchQTyConKey
clauseQTyConName = libTc (fsLit "ClauseQ") clauseQTyConKey
expQTyConName = libTc (fsLit "ExpQ") expQTyConKey
stmtQTyConName = libTc (fsLit "StmtQ") stmtQTyConKey
decQTyConName = libTc (fsLit "DecQ") decQTyConKey
decsQTyConName = libTc (fsLit "DecsQ") decsQTyConKey -- Q [Dec]
conQTyConName = libTc (fsLit "ConQ") conQTyConKey
strictTypeQTyConName = libTc (fsLit "StrictTypeQ") strictTypeQTyConKey
varStrictTypeQTyConName = libTc (fsLit "VarStrictTypeQ") varStrictTypeQTyConKey
typeQTyConName = libTc (fsLit "TypeQ") typeQTyConKey
fieldExpQTyConName = libTc (fsLit "FieldExpQ") fieldExpQTyConKey
patQTyConName = libTc (fsLit "PatQ") patQTyConKey
fieldPatQTyConName = libTc (fsLit "FieldPatQ") fieldPatQTyConKey
predQTyConName = libTc (fsLit "PredQ") predQTyConKey
-- quasiquoting
quoteExpName, quotePatName, quoteDecName, quoteTypeName :: Name
quoteExpName = qqFun (fsLit "quoteExp") quoteExpKey
quotePatName = qqFun (fsLit "quotePat") quotePatKey
quoteDecName = qqFun (fsLit "quoteDec") quoteDecKey
quoteTypeName = qqFun (fsLit "quoteType") quoteTypeKey
-- TyConUniques available: 200-299
-- Check in PrelNames if you want to change this
expTyConKey, matchTyConKey, clauseTyConKey, qTyConKey, expQTyConKey,
decQTyConKey, patTyConKey, matchQTyConKey, clauseQTyConKey,
stmtQTyConKey, conQTyConKey, typeQTyConKey, typeTyConKey, tyVarBndrTyConKey,
decTyConKey, varStrictTypeQTyConKey, strictTypeQTyConKey,
fieldExpTyConKey, fieldPatTyConKey, nameTyConKey, patQTyConKey,
fieldPatQTyConKey, fieldExpQTyConKey, funDepTyConKey, predTyConKey,
predQTyConKey, decsQTyConKey :: Unique
expTyConKey = mkPreludeTyConUnique 200
matchTyConKey = mkPreludeTyConUnique 201
clauseTyConKey = mkPreludeTyConUnique 202
qTyConKey = mkPreludeTyConUnique 203
expQTyConKey = mkPreludeTyConUnique 204
decQTyConKey = mkPreludeTyConUnique 205
patTyConKey = mkPreludeTyConUnique 206
matchQTyConKey = mkPreludeTyConUnique 207
clauseQTyConKey = mkPreludeTyConUnique 208
stmtQTyConKey = mkPreludeTyConUnique 209
conQTyConKey = mkPreludeTyConUnique 210
typeQTyConKey = mkPreludeTyConUnique 211
typeTyConKey = mkPreludeTyConUnique 212
decTyConKey = mkPreludeTyConUnique 213
varStrictTypeQTyConKey = mkPreludeTyConUnique 214
strictTypeQTyConKey = mkPreludeTyConUnique 215
fieldExpTyConKey = mkPreludeTyConUnique 216
fieldPatTyConKey = mkPreludeTyConUnique 217
nameTyConKey = mkPreludeTyConUnique 218
patQTyConKey = mkPreludeTyConUnique 219
fieldPatQTyConKey = mkPreludeTyConUnique 220
fieldExpQTyConKey = mkPreludeTyConUnique 221
funDepTyConKey = mkPreludeTyConUnique 222
predTyConKey = mkPreludeTyConUnique 223
predQTyConKey = mkPreludeTyConUnique 224
tyVarBndrTyConKey = mkPreludeTyConUnique 225
decsQTyConKey = mkPreludeTyConUnique 226
-- IdUniques available: 200-399
-- If you want to change this, make sure you check in PrelNames
returnQIdKey, bindQIdKey, sequenceQIdKey, liftIdKey, newNameIdKey,
mkNameIdKey, mkNameG_vIdKey, mkNameG_dIdKey, mkNameG_tcIdKey,
mkNameLIdKey :: Unique
returnQIdKey = mkPreludeMiscIdUnique 200
bindQIdKey = mkPreludeMiscIdUnique 201
sequenceQIdKey = mkPreludeMiscIdUnique 202
liftIdKey = mkPreludeMiscIdUnique 203
newNameIdKey = mkPreludeMiscIdUnique 204
mkNameIdKey = mkPreludeMiscIdUnique 205
mkNameG_vIdKey = mkPreludeMiscIdUnique 206
mkNameG_dIdKey = mkPreludeMiscIdUnique 207
mkNameG_tcIdKey = mkPreludeMiscIdUnique 208
mkNameLIdKey = mkPreludeMiscIdUnique 209
-- data Lit = ...
charLIdKey, stringLIdKey, integerLIdKey, intPrimLIdKey, wordPrimLIdKey,
floatPrimLIdKey, doublePrimLIdKey, rationalLIdKey :: Unique
charLIdKey = mkPreludeMiscIdUnique 220
stringLIdKey = mkPreludeMiscIdUnique 221
integerLIdKey = mkPreludeMiscIdUnique 222
intPrimLIdKey = mkPreludeMiscIdUnique 223
wordPrimLIdKey = mkPreludeMiscIdUnique 224
floatPrimLIdKey = mkPreludeMiscIdUnique 225
doublePrimLIdKey = mkPreludeMiscIdUnique 226
rationalLIdKey = mkPreludeMiscIdUnique 227
liftStringIdKey :: Unique
liftStringIdKey = mkPreludeMiscIdUnique 228
-- data Pat = ...
litPIdKey, varPIdKey, tupPIdKey, unboxedTupPIdKey, conPIdKey, infixPIdKey, tildePIdKey, bangPIdKey,
asPIdKey, wildPIdKey, recPIdKey, listPIdKey, sigPIdKey, viewPIdKey :: Unique
litPIdKey = mkPreludeMiscIdUnique 240
varPIdKey = mkPreludeMiscIdUnique 241
tupPIdKey = mkPreludeMiscIdUnique 242
unboxedTupPIdKey = mkPreludeMiscIdUnique 243
conPIdKey = mkPreludeMiscIdUnique 244
infixPIdKey = mkPreludeMiscIdUnique 245
tildePIdKey = mkPreludeMiscIdUnique 246
bangPIdKey = mkPreludeMiscIdUnique 247
asPIdKey = mkPreludeMiscIdUnique 248
wildPIdKey = mkPreludeMiscIdUnique 249
recPIdKey = mkPreludeMiscIdUnique 250
listPIdKey = mkPreludeMiscIdUnique 251
sigPIdKey = mkPreludeMiscIdUnique 252
viewPIdKey = mkPreludeMiscIdUnique 253
-- type FieldPat = ...
fieldPatIdKey :: Unique
fieldPatIdKey = mkPreludeMiscIdUnique 260
-- data Match = ...
matchIdKey :: Unique
matchIdKey = mkPreludeMiscIdUnique 261
-- data Clause = ...
clauseIdKey :: Unique
clauseIdKey = mkPreludeMiscIdUnique 262
-- data Exp = ...
varEIdKey, conEIdKey, litEIdKey, appEIdKey, infixEIdKey, infixAppIdKey,
sectionLIdKey, sectionRIdKey, lamEIdKey, tupEIdKey, unboxedTupEIdKey,
condEIdKey,
letEIdKey, caseEIdKey, doEIdKey, compEIdKey,
fromEIdKey, fromThenEIdKey, fromToEIdKey, fromThenToEIdKey,
listEIdKey, sigEIdKey, recConEIdKey, recUpdEIdKey :: Unique
varEIdKey = mkPreludeMiscIdUnique 270
conEIdKey = mkPreludeMiscIdUnique 271
litEIdKey = mkPreludeMiscIdUnique 272
appEIdKey = mkPreludeMiscIdUnique 273
infixEIdKey = mkPreludeMiscIdUnique 274
infixAppIdKey = mkPreludeMiscIdUnique 275
sectionLIdKey = mkPreludeMiscIdUnique 276
sectionRIdKey = mkPreludeMiscIdUnique 277
lamEIdKey = mkPreludeMiscIdUnique 278
tupEIdKey = mkPreludeMiscIdUnique 279
unboxedTupEIdKey = mkPreludeMiscIdUnique 280
condEIdKey = mkPreludeMiscIdUnique 281
letEIdKey = mkPreludeMiscIdUnique 282
caseEIdKey = mkPreludeMiscIdUnique 283
doEIdKey = mkPreludeMiscIdUnique 284
compEIdKey = mkPreludeMiscIdUnique 285
fromEIdKey = mkPreludeMiscIdUnique 286
fromThenEIdKey = mkPreludeMiscIdUnique 287
fromToEIdKey = mkPreludeMiscIdUnique 288
fromThenToEIdKey = mkPreludeMiscIdUnique 289
listEIdKey = mkPreludeMiscIdUnique 290
sigEIdKey = mkPreludeMiscIdUnique 291
recConEIdKey = mkPreludeMiscIdUnique 292
recUpdEIdKey = mkPreludeMiscIdUnique 293
-- type FieldExp = ...
fieldExpIdKey :: Unique
fieldExpIdKey = mkPreludeMiscIdUnique 310
-- data Body = ...
guardedBIdKey, normalBIdKey :: Unique
guardedBIdKey = mkPreludeMiscIdUnique 311
normalBIdKey = mkPreludeMiscIdUnique 312
-- data Guard = ...
normalGEIdKey, patGEIdKey :: Unique
normalGEIdKey = mkPreludeMiscIdUnique 313
patGEIdKey = mkPreludeMiscIdUnique 314
-- data Stmt = ...
bindSIdKey, letSIdKey, noBindSIdKey, parSIdKey :: Unique
bindSIdKey = mkPreludeMiscIdUnique 320
letSIdKey = mkPreludeMiscIdUnique 321
noBindSIdKey = mkPreludeMiscIdUnique 322
parSIdKey = mkPreludeMiscIdUnique 323
-- data Dec = ...
funDIdKey, valDIdKey, dataDIdKey, newtypeDIdKey, tySynDIdKey,
classDIdKey, instanceDIdKey, sigDIdKey, forImpDIdKey, pragInlDIdKey,
pragSpecDIdKey, pragSpecInlDIdKey, familyNoKindDIdKey, familyKindDIdKey,
dataInstDIdKey, newtypeInstDIdKey, tySynInstDIdKey :: Unique
funDIdKey = mkPreludeMiscIdUnique 330
valDIdKey = mkPreludeMiscIdUnique 331
dataDIdKey = mkPreludeMiscIdUnique 332
newtypeDIdKey = mkPreludeMiscIdUnique 333
tySynDIdKey = mkPreludeMiscIdUnique 334
classDIdKey = mkPreludeMiscIdUnique 335
instanceDIdKey = mkPreludeMiscIdUnique 336
sigDIdKey = mkPreludeMiscIdUnique 337
forImpDIdKey = mkPreludeMiscIdUnique 338
pragInlDIdKey = mkPreludeMiscIdUnique 339
pragSpecDIdKey = mkPreludeMiscIdUnique 340
pragSpecInlDIdKey = mkPreludeMiscIdUnique 341
familyNoKindDIdKey = mkPreludeMiscIdUnique 342
familyKindDIdKey = mkPreludeMiscIdUnique 343
dataInstDIdKey = mkPreludeMiscIdUnique 344
newtypeInstDIdKey = mkPreludeMiscIdUnique 345
tySynInstDIdKey = mkPreludeMiscIdUnique 346
-- type Cxt = ...
cxtIdKey :: Unique
cxtIdKey = mkPreludeMiscIdUnique 360
-- data Pred = ...
classPIdKey, equalPIdKey :: Unique
classPIdKey = mkPreludeMiscIdUnique 361
equalPIdKey = mkPreludeMiscIdUnique 362
-- data Strict = ...
isStrictKey, notStrictKey, unpackedKey :: Unique
isStrictKey = mkPreludeMiscIdUnique 363
notStrictKey = mkPreludeMiscIdUnique 364
unpackedKey = mkPreludeMiscIdUnique 365
-- data Con = ...
normalCIdKey, recCIdKey, infixCIdKey, forallCIdKey :: Unique
normalCIdKey = mkPreludeMiscIdUnique 370
recCIdKey = mkPreludeMiscIdUnique 371
infixCIdKey = mkPreludeMiscIdUnique 372
forallCIdKey = mkPreludeMiscIdUnique 373
-- type StrictType = ...
strictTKey :: Unique
strictTKey = mkPreludeMiscIdUnique 374
-- type VarStrictType = ...
varStrictTKey :: Unique
varStrictTKey = mkPreludeMiscIdUnique 375
-- data Type = ...
forallTIdKey, varTIdKey, conTIdKey, tupleTIdKey, unboxedTupleTIdKey, arrowTIdKey,
listTIdKey, appTIdKey, sigTIdKey :: Unique
forallTIdKey = mkPreludeMiscIdUnique 380
varTIdKey = mkPreludeMiscIdUnique 381
conTIdKey = mkPreludeMiscIdUnique 382
tupleTIdKey = mkPreludeMiscIdUnique 383
unboxedTupleTIdKey = mkPreludeMiscIdUnique 384
arrowTIdKey = mkPreludeMiscIdUnique 385
listTIdKey = mkPreludeMiscIdUnique 386
appTIdKey = mkPreludeMiscIdUnique 387
sigTIdKey = mkPreludeMiscIdUnique 388
-- data TyVarBndr = ...
plainTVIdKey, kindedTVIdKey :: Unique
plainTVIdKey = mkPreludeMiscIdUnique 390
kindedTVIdKey = mkPreludeMiscIdUnique 391
-- data Kind = ...
starKIdKey, arrowKIdKey :: Unique
starKIdKey = mkPreludeMiscIdUnique 392
arrowKIdKey = mkPreludeMiscIdUnique 393
-- data Callconv = ...
cCallIdKey, stdCallIdKey :: Unique
cCallIdKey = mkPreludeMiscIdUnique 394
stdCallIdKey = mkPreludeMiscIdUnique 395
-- data Safety = ...
unsafeIdKey, safeIdKey, interruptibleIdKey :: Unique
unsafeIdKey = mkPreludeMiscIdUnique 400
safeIdKey = mkPreludeMiscIdUnique 401
interruptibleIdKey = mkPreludeMiscIdUnique 403
-- data InlineSpec =
inlineSpecNoPhaseIdKey, inlineSpecPhaseIdKey :: Unique
inlineSpecNoPhaseIdKey = mkPreludeMiscIdUnique 404
inlineSpecPhaseIdKey = mkPreludeMiscIdUnique 405
-- data FunDep = ...
funDepIdKey :: Unique
funDepIdKey = mkPreludeMiscIdUnique 406
-- data FamFlavour = ...
typeFamIdKey, dataFamIdKey :: Unique
typeFamIdKey = mkPreludeMiscIdUnique 407
dataFamIdKey = mkPreludeMiscIdUnique 408
-- quasiquoting
quoteExpKey, quotePatKey, quoteDecKey, quoteTypeKey :: Unique
quoteExpKey = mkPreludeMiscIdUnique 410
quotePatKey = mkPreludeMiscIdUnique 411
quoteDecKey = mkPreludeMiscIdUnique 412
quoteTypeKey = mkPreludeMiscIdUnique 413
| mcmaniac/ghc | compiler/deSugar/DsMeta.hs | bsd-3-clause | 90,633 | 376 | 19 | 21,402 | 26,097 | 13,557 | 12,540 | 1,543 | 16 |
-- {-# OPTIONS_GHC -fno-warn-missing-methods #-}
-- -fno-warn-redundant-constraints
{-# LANGUAGE DeriveDataTypeable, ExistentialQuantification, RankNTypes, MultiParamTypeClasses, FunctionalDependencies, FlexibleInstances, FlexibleContexts, PatternGuards, DatatypeContexts #-}
module T4355 where
import Control.Arrow
import Control.Monad.Trans -- From mtl
import Control.Monad.Reader -- Ditto
import Data.Typeable
import Data.Maybe
class (Eq t, Typeable t) => Transformer t a | t -> a where
transform :: (LayoutClass l a) => t -> l a ->
(forall l'. (LayoutClass l' a) => l' a -> (l' a -> l a) -> b) -> b
class HList c a where
find :: (Transformer t a) => c -> t -> Maybe Int
class Typeable a => Message a
data (LayoutClass l a) => EL l a = forall l'. (LayoutClass l' a) => EL (l' a) (l' a -> l a)
unEL :: (LayoutClass l a) => EL l a -> (forall l'. (LayoutClass l' a) => l' a -> b) -> b
unEL (EL x _) k = k x
transform' :: (Transformer t a, LayoutClass l a) => t -> EL l a -> EL l a
transform' t (EL l det) = transform t l (\l' det' -> EL l' (det . det'))
data Toggle a = forall t. (Transformer t a) => Toggle t
deriving (Typeable)
instance (Typeable a) => Message (Toggle a)
data MultiToggle ts l a = MultiToggle{
currLayout :: EL l a,
currIndex :: Maybe Int,
transformers :: ts
}
instance (Show ts, Show (l a), LayoutClass l a) => Show (MultiToggle ts l a) where
class Show (layout a) => LayoutClass layout a where
handleMessage :: layout a -> SomeMessage -> IO (Maybe (layout a))
instance (Typeable a, Show ts, HList ts a, LayoutClass l a)
=> LayoutClass (MultiToggle ts l) a where
handleMessage mt m
| Just (Toggle t) <- fromMessage m
, i@(Just _) <- find (transformers mt) t
= case currLayout mt of
EL l det -> do
return . Just $
mt {
currLayout = (if cur then id else transform' t) (EL (det l) id)
}
where cur = (i == currIndex mt)
data SomeMessage = forall a. Message a => SomeMessage a
fromMessage :: Message m => SomeMessage -> Maybe m
fromMessage (SomeMessage m) = cast m
| rahulmutt/ghcvm | tests/suite/typecheck/compile/T4355/T4355.hs | bsd-3-clause | 2,211 | 0 | 19 | 597 | 864 | 448 | 416 | -1 | -1 |
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DataKinds #-}
-- | Netlink routing interface
module Haskus.System.Linux.Internals.NetlinkRoute
( MessageType (..)
, RoutingMessage (..)
, RoutingType (..)
, ProtocolType (..)
, MessageFlag (..)
, MessageFlags
, RoutingAttributeType (..)
, RoutingNextHop (..)
, NextHopFlag (..)
, NextHopFlags
, CacheInfo (..)
, Metrics (..)
, Feature (..)
, Features
, MFCStats (..)
, LinkInfo (..)
, PrefixInfo (..)
, PrefixType (..)
, PrefixCacheInfo (..)
, TrafficControl (..)
, TrafficFlag (..)
-- * Distances
, distanceUniverse
, distanceSite
, distanceLink
, distanceHost
, distanceNowhere
-- * Reserved tables
, tableUnspecified
, tableCompatibility
, tableDefault
, tableMain
, tableLocal
)
where
import Haskus.Utils.Types.Generics (Generic)
import Haskus.Utils.Flow
import Haskus.Format.Binary.Storable
import Haskus.Format.Binary.Word
import Haskus.Format.Binary.Enum
import Haskus.Format.Binary.BitSet
-- =============================================================
-- From linux/include/uapi/linux/rtnetlink.h
-- =============================================================
data MessageType
= LinkCreate
| LinkDelete
| LinkGet
| LinkSet
| AddressCreate
| AddressDelete
| AddressGet
| RouteCreate
| RouteDelete
| RouteGet
| NeighbourCreate
| NeighbourDelete
| NeighbourGet
| RuleCreate
| RuleDelete
| RuleGet
| QDiscCreate
| QDiscDelete
| QDiscGet
| TClassCreate
| TClassDelete
| TClassGet
| FilterCreate
| FilterDelete
| FilterGet
| ActionCreate
| ActionDelete
| ActionGet
| PrefixCreate
| MulticastGet
| UnicastGet
| NeighbourTableCreate
| NeighbourTableDelete
| NeighbourTableSet
| NDUserOption
| AddressLabelCreate
| AddressLabelDelete
| AddressLabelGet
| DCBGet
| DCBSet
| NetconfCreate
| NetconfDelete
| NetconfGet
| MDBCreate
| MDBDelete
| MDBGet
| NSIDCreate
| NSIDDelete
| NSIDGet
| StatsCreate
| StatsGet
| CacheReportCreate
deriving (Show,Eq,Enum)
instance CEnum MessageType where
fromCEnum e = case e of
LinkCreate -> 16
LinkDelete -> 17
LinkGet -> 18
LinkSet -> 19
AddressCreate -> 20
AddressDelete -> 21
AddressGet -> 22
RouteCreate -> 24
RouteDelete -> 25
RouteGet -> 26
NeighbourCreate -> 28
NeighbourDelete -> 29
NeighbourGet -> 30
RuleCreate -> 32
RuleDelete -> 33
RuleGet -> 34
QDiscCreate -> 36
QDiscDelete -> 37
QDiscGet -> 38
TClassCreate -> 40
TClassDelete -> 41
TClassGet -> 42
FilterCreate -> 44
FilterDelete -> 45
FilterGet -> 46
ActionCreate -> 48
ActionDelete -> 49
ActionGet -> 50
PrefixCreate -> 52
MulticastGet -> 58
UnicastGet -> 62
NeighbourTableCreate -> 64
NeighbourTableDelete -> 66
NeighbourTableSet -> 67
NDUserOption -> 68
AddressLabelCreate -> 72
AddressLabelDelete -> 73
AddressLabelGet -> 74
DCBGet -> 78
DCBSet -> 79
NetconfCreate -> 80
NetconfDelete -> 81
NetconfGet -> 82
MDBCreate -> 84
MDBDelete -> 85
MDBGet -> 86
NSIDCreate -> 88
NSIDDelete -> 89
NSIDGet -> 90
StatsCreate -> 92
StatsGet -> 94
CacheReportCreate -> 96
toCEnum = error "toCEnum for MessageType is not implemented"
-- | Routing message
--
-- struct rtmsg
data RoutingMessage = RoutingMessage
{ routingMessageFamily :: !Word8
, routingMessageDestinationLength :: !Word8
, routingMessageSourceLength :: !Word8
, routingMessageTOS :: !Word8
, routingMessageTableId :: !Word8 -- ^ Routing table ID
, routingMessageProtocol :: !Word8 -- ^ Routing protocol
, routingMessageDistance :: !Word8
, routingMessageType :: !Word8
, routingMessageFlags :: !Word32
}
deriving (Generic,Storable)
-- | Routing type
data RoutingType
= RoutingUnspecified
| RoutingUnicast -- ^ Gateway or direct route
| RoutingLocal -- ^ Accept locally
| RoutingBroadcast -- ^ Accept locally as broadcast, send as broadcast
| RoutingAnycast -- ^ Accept locally as broadcast, send as unicast
| RoutingMulticast -- ^ Multicast route
| RoutingBlackhole -- ^ Drop
| RoutingUnreachable -- ^ Destination is unreachable
| RoutingProhibit -- ^ Administratively prohibited
| RoutingThrow -- ^ Not in this table
| RoutingNAT -- ^ Translate this address
| RoutingExternalResolver -- ^ Use external resolver
| Routing
deriving (Show,Eq,Enum,CEnum)
-- | Routing protocol
--
-- Values of protocol >= ProtocolStatic are not interpreted by kernel;
-- they are just passed from user and back as is. It will be used by
-- hypothetical multiple routing daemons. Note that protocol values
-- should be standardized in order to avoid conflicts.
data ProtocolType
= ProtocolUnspecified
| ProtocolRedirect -- ^ Route installed by ICMP redirects; not used by current IPv4
| ProtocolKernel -- ^ Route installed by kernel
| ProtocolBoot -- ^ Route installed during boot
| ProtocolStatic -- ^ Route installed by administrator
| ProtocolGated -- ^ Apparently, GateD
| ProtocolRA -- ^ RDISC/ND router advertisements
| ProtocolMRT -- ^ Merit MRT
| ProtocolZebra -- ^ Zebra
| ProtocolBird -- ^ Bird
| ProtocolDNRouted -- ^ DECnet routing daemon
| ProtocolXORP -- ^ XORP
| ProtocolNTK -- ^ Netsukuku
| ProtocolDHCP -- ^ DHCP client
| ProtocolMulticast -- ^ Multicast daemon
| ProtocolBabel -- ^ Babel daemon
deriving (Show,Eq,Enum)
instance CEnum ProtocolType where
fromCEnum e = case e of
ProtocolUnspecified -> 0
ProtocolRedirect -> 1
ProtocolKernel -> 2
ProtocolBoot -> 3
ProtocolStatic -> 4
ProtocolGated -> 8
ProtocolRA -> 9
ProtocolMRT -> 10
ProtocolZebra -> 11
ProtocolBird -> 12
ProtocolDNRouted -> 13
ProtocolXORP -> 14
ProtocolNTK -> 15
ProtocolDHCP -> 16
ProtocolMulticast -> 17
ProtocolBabel -> 42
toCEnum e = case e of
0 -> ProtocolUnspecified
1 -> ProtocolRedirect
2 -> ProtocolKernel
3 -> ProtocolBoot
4 -> ProtocolStatic
8 -> ProtocolGated
9 -> ProtocolRA
10 -> ProtocolMRT
11 -> ProtocolZebra
12 -> ProtocolBird
13 -> ProtocolDNRouted
14 -> ProtocolXORP
15 -> ProtocolNTK
16 -> ProtocolDHCP
17 -> ProtocolMulticast
42 -> ProtocolBabel
_ -> error ("Invalid protocol type value: " ++ show (fromIntegral e :: Integer))
-----------------------
-- Distances ("scope")
-----------------------
-- | Everywhere in the universe
distanceUniverse :: Word8
distanceUniverse = 0
-- | On site
distanceSite :: Word8
distanceSite = 200
-- | Located on directly attached link
distanceLink :: Word8
distanceLink = 253
-- | Our local addresses
distanceHost :: Word8
distanceHost = 254
-- | Reserved for not existing destinations
distanceNowhere :: Word8
distanceNowhere = 255
-- | Flags
data MessageFlag
= FlagNotify -- ^ Notify user of route change
| FlagCloned -- ^ This route is cloned
| FlagEqualize -- ^ Multipath equalizer: NI
| FlagPrefix -- ^ Prefix addresses
| FlagLookupTable -- ^ Set table to FIB lookup result
| FlagFIBMatch -- ^ return full FIB lookup match
deriving (Show,Eq,Enum)
instance CEnum MessageFlag where
toCEnum e = toEnum (fromIntegral e - 8)
fromCEnum e = fromIntegral <| fromEnum e + 8
type MessageFlags = BitSet Word32 MessageFlag
-------------------------------
-- Reserved table identifiers
-------------------------------
-- | Unspecified table
tableUnspecified :: Word8
tableUnspecified = 0
-- | Compatibility table
tableCompatibility :: Word8
tableCompatibility = 252
-- | Default table
tableDefault :: Word8
tableDefault = 253
-- | Main table
tableMain :: Word8
tableMain = 254
-- | Local table
tableLocal :: Word8
tableLocal = 255
data RoutingAttributeType
= AttrUnspecified
| AttrDestination
| AttrSource
| AttrIIF
| AttrOIF
| AttrGateway
| AttrPriority
| AttrPreferredSource
| AttrMetrics
| AttrMultipath
| AttrProtoInfo -- no longer used
| AttrFlow
| AttrCacheInfo
| AttrSession -- no longer used
| AttrMpAlgo -- no longer used
| AttrTable
| AttrMark
| AttrMFCStats
| AttrVia
| AttrNewDestination
| AttrPref
| AttrEncapType
| AttrEncap
| AttrExpires
| AttrPad
| AttrUID
| AttrTtlPropagate
deriving (Show,Eq,Enum,CEnum)
data RoutingNextHop = RoutingNextHop
{ nextHopLength :: !Word16
, nextHopFlags :: !NextHopFlags
, nextHopHops :: !Word8
, nextHopInterfaceIndex :: !Int
}
deriving (Generic,Storable)
data NextHopFlag
= NextHopDead -- ^ Nexthop is dead (used by multipath)
| NextHopPervasive -- ^ Do recursive gateway lookup
| NextHopOnLink -- ^ Gateway is forced on link
| NextHopOffload -- ^ Offloaded route
| NextHopLinkDown -- ^ Carrier-down on nexthop
| NextHopUnresolved -- ^ The entry is unresolved (ipmr)
deriving (Show,Eq,Enum,CEnum)
type NextHopFlags = BitSet Word8 NextHopFlag
-- skipped: struct rtvia
data CacheInfo = CacheInfo
{ cacheInfoClntRef :: !Word32
, cacheInfoLastUse :: !Word32
, cacheInfoExpires :: !Int32
, cacheInfoError :: !Word32
, cacheInfoUsed :: !Word32
, cacheInfoId :: !Word32
, cacheInfoTs :: !Word32
, cacheInfoTsAge :: !Word32
}
deriving (Generic,Storable)
data Metrics
= MetricsUnspecified
| MetricsLock
| MetricsMTU
| MetricsWindow
| MetricsRTT
| MetricsRTTVar
| MetricsSSThreshold
| MetricsCwnd
| MetricsADVMSS
| MetricsReordering
| MetricsHopLimit
| MetricsInitCwnd
| MetricsFeatures
| MetricsRTOMin
| MetricsInitRwnd
| MetricsQuickAck
| MetricsCCAlgo
| MetricsFastopenNoCookie
deriving (Show,Eq,Enum,CEnum)
data Feature
= FeatureECN
| FeatureSACK
| FeatureTimeStamp
| FeatureAllFrag
deriving (Show,Eq,Enum,CEnum)
type Features = BitSet Word8 Feature
-- skipped: rta_session
data MFCStats = MFCStats
{ mfcStatsPackets :: !Word64
, mfcStatsBytes :: !Word64
, mfcStatsWrongInterface :: !Word64
}
deriving (Generic,Storable)
-- | Link level specific information
--
-- struct ifinfomsg
data LinkInfo = LinkInfo
{ linkFamily :: !Word8
, linkType :: !Word16
, linkIndex :: !Int -- ^ Link index
, linkFlags :: !Word
, linkFlagChangeMask :: !Word
}
deriving (Generic,Storable)
-- | Prefix information
--
-- struct prefixmsg
data PrefixInfo = PrefixInfo
{ prefixFamily :: !Word8
, prefixInterfaceIndex :: !Int
, prefixType :: !Word8
, prefixLength :: !Word8
, prefixFlags :: !Word8
}
deriving (Generic,Storable)
data PrefixType
= PrefixTypeUnspecified
| PrefixTypeAddress
| PrefixTypeCacheInfo
deriving (Show,Eq,Enum,CEnum)
data PrefixCacheInfo = PrefixCacheInfo
{ prefixCacheInfoPreferredTime :: !Word32
, prefixCacheInfoValidTime :: !Word32
}
deriving (Generic,Storable)
-- | Traffic control message
data TrafficControl = TrafficControl
{ trafficFamily :: !Word8
, trafficInterfaceIndex :: !Int
, trafficHandle :: !Word32
, trafficParent :: !Word32
, trafficInfo :: !Word32
}
deriving (Generic,Storable)
data TrafficFlag
= TrafficUnspecified
| TrafficKind
| TrafficOptions
| TrafficStats
| TrafficXStats
| TrafficRate
| TrafficFcnt
| TrafficStats2
| TrafficStab
| TrafficPad
| TrafficDumpInvisible
| TrafficChain
| TrafficHwOffload
deriving (Show,Eq,Enum,CEnum)
-- skipped: nduseroptmsg, multicast, TC action...
| hsyl20/ViperVM | haskus-system/src/lib/Haskus/System/Linux/Internals/NetlinkRoute.hs | bsd-3-clause | 12,849 | 0 | 14 | 3,847 | 2,224 | 1,325 | 899 | 475 | 1 |
import GHC.Vis
main = do
putStrLn "Start"
let a = "teeest"
let b = [1..3]
let c = b ++ b
let d = [1..]
putStrLn $ show $ d !! 1
vis
view a "a"
view b "b"
view c "c"
view d "d"
getChar
switch
getChar
putStrLn "End"
| FranklinChen/ghc-vis | nonghci-test.hs | bsd-3-clause | 248 | 0 | 10 | 84 | 130 | 56 | 74 | 17 | 1 |
{-# LANGUAGE BangPatterns, GeneralizedNewtypeDeriving, OverloadedStrings,
Rank2Types, RecordWildCards, TypeFamilies #-}
-- |
-- Module : Data.Attoparsec.Internal.Types
-- Copyright : Bryan O'Sullivan 2007-2015
-- License : BSD3
--
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : unknown
--
-- Simple, efficient parser combinators, loosely based on the Parsec
-- library.
module Data.Attoparsec.Internal.Types
(
Parser(..)
, State
, Failure
, Success
, Pos(..)
, IResult(..)
, More(..)
, (<>)
, Chunk(..)
) where
import Control.Applicative as App (Applicative(..), (<$>))
import Control.Applicative (Alternative(..))
import Control.DeepSeq (NFData(rnf))
import Control.Monad (MonadPlus(..))
import qualified Control.Monad.Fail as Fail (MonadFail(..))
import Data.Monoid as Mon (Monoid(..))
import Data.Semigroup (Semigroup(..))
import Data.Word (Word8)
import Data.ByteString (ByteString)
import qualified Data.ByteString as BS
import Data.ByteString.Internal (w2c)
import Prelude hiding (getChar, succ)
import qualified Data.Attoparsec.ByteString.Buffer as B
newtype Pos = Pos { fromPos :: Int }
deriving (Eq, Ord, Show, Num)
-- | The result of a parse. This is parameterised over the type @i@
-- of string that was processed.
--
-- This type is an instance of 'Functor', where 'fmap' transforms the
-- value in a 'Done' result.
data IResult i r =
Fail i [String] String
-- ^ The parse failed. The @i@ parameter is the input that had
-- not yet been consumed when the failure occurred. The
-- @[@'String'@]@ is a list of contexts in which the error
-- occurred. The 'String' is the message describing the error, if
-- any.
| Partial (i -> IResult i r)
-- ^ Supply this continuation with more input so that the parser
-- can resume. To indicate that no more input is available, pass
-- an empty string to the continuation.
--
-- __Note__: if you get a 'Partial' result, do not call its
-- continuation more than once.
| Done i r
-- ^ The parse succeeded. The @i@ parameter is the input that had
-- not yet been consumed (if any) when the parse succeeded.
instance (Show i, Show r) => Show (IResult i r) where
showsPrec d ir = showParen (d > 10) $
case ir of
(Fail t stk msg) -> showString "Fail" . f t . f stk . f msg
(Partial _) -> showString "Partial _"
(Done t r) -> showString "Done" . f t . f r
where f :: Show a => a -> ShowS
f x = showChar ' ' . showsPrec 11 x
instance (NFData i, NFData r) => NFData (IResult i r) where
rnf (Fail t stk msg) = rnf t `seq` rnf stk `seq` rnf msg
rnf (Partial _) = ()
rnf (Done t r) = rnf t `seq` rnf r
{-# INLINE rnf #-}
instance Functor (IResult i) where
fmap _ (Fail t stk msg) = Fail t stk msg
fmap f (Partial k) = Partial (fmap f . k)
fmap f (Done t r) = Done t (f r)
-- | The core parser type. This is parameterised over the type @i@
-- of string being processed.
--
-- This type is an instance of the following classes:
--
-- * 'Monad', where 'fail' throws an exception (i.e. fails) with an
-- error message.
--
-- * 'Functor' and 'Applicative', which follow the usual definitions.
--
-- * 'MonadPlus', where 'mzero' fails (with no error message) and
-- 'mplus' executes the right-hand parser if the left-hand one
-- fails. When the parser on the right executes, the input is reset
-- to the same state as the parser on the left started with. (In
-- other words, attoparsec is a backtracking parser that supports
-- arbitrary lookahead.)
--
-- * 'Alternative', which follows 'MonadPlus'.
newtype Parser i a = Parser {
runParser :: forall r.
State i -> Pos -> More
-> Failure i (State i) r
-> Success i (State i) a r
-> IResult i r
}
type family State i
type instance State ByteString = B.Buffer
type Failure i t r = t -> Pos -> More -> [String] -> String
-> IResult i r
type Success i t a r = t -> Pos -> More -> a -> IResult i r
-- | Have we read all available input?
data More = Complete | Incomplete
deriving (Eq, Show)
instance Semigroup More where
c@Complete <> _ = c
_ <> m = m
instance Mon.Monoid More where
mappend = (<>)
mempty = Incomplete
instance Monad (Parser i) where
fail = Fail.fail
{-# INLINE fail #-}
return = App.pure
{-# INLINE return #-}
m >>= k = Parser $ \t !pos more lose succ ->
let succ' t' !pos' more' a = runParser (k a) t' pos' more' lose succ
in runParser m t pos more lose succ'
{-# INLINE (>>=) #-}
(>>) = (*>)
{-# INLINE (>>) #-}
instance Fail.MonadFail (Parser i) where
fail err = Parser $ \t pos more lose _succ -> lose t pos more [] msg
where msg = "Failed reading: " ++ err
{-# INLINE fail #-}
plus :: Parser i a -> Parser i a -> Parser i a
plus f g = Parser $ \t pos more lose succ ->
let lose' t' _pos' more' _ctx _msg = runParser g t' pos more' lose succ
in runParser f t pos more lose' succ
instance MonadPlus (Parser i) where
mzero = fail "mzero"
{-# INLINE mzero #-}
mplus = plus
instance Functor (Parser i) where
fmap f p = Parser $ \t pos more lose succ ->
let succ' t' pos' more' a = succ t' pos' more' (f a)
in runParser p t pos more lose succ'
{-# INLINE fmap #-}
apP :: Parser i (a -> b) -> Parser i a -> Parser i b
apP d e = do
b <- d
a <- e
return (b a)
{-# INLINE apP #-}
instance Applicative (Parser i) where
pure v = Parser $ \t pos more _lose succ -> succ t pos more v
{-# INLINE pure #-}
(<*>) = apP
{-# INLINE (<*>) #-}
m *> k = m >>= \_ -> k
{-# INLINE (*>) #-}
x <* y = x >>= \a -> y >> pure a
{-# INLINE (<*) #-}
instance Semigroup (Parser i a) where
(<>) = plus
{-# INLINE (<>) #-}
instance Monoid (Parser i a) where
mempty = fail "mempty"
{-# INLINE mempty #-}
mappend = (<>)
{-# INLINE mappend #-}
instance Alternative (Parser i) where
empty = fail "empty"
{-# INLINE empty #-}
(<|>) = plus
{-# INLINE (<|>) #-}
many v = many_v
where many_v = some_v <|> pure []
some_v = (:) App.<$> v <*> many_v
{-# INLINE many #-}
some v = some_v
where
many_v = some_v <|> pure []
some_v = (:) <$> v <*> many_v
{-# INLINE some #-}
-- | A common interface for input chunks.
class Monoid c => Chunk c where
type ChunkElem c
-- | Test if the chunk is empty.
nullChunk :: c -> Bool
-- | Append chunk to a buffer.
pappendChunk :: State c -> c -> State c
-- | Position at the end of a buffer. The first argument is ignored.
atBufferEnd :: c -> State c -> Pos
-- | Return the buffer element at the given position along with its length.
bufferElemAt :: c -> Pos -> State c -> Maybe (ChunkElem c, Int)
-- | Map an element to the corresponding character.
-- The first argument is ignored.
chunkElemToChar :: c -> ChunkElem c -> Char
instance Chunk ByteString where
type ChunkElem ByteString = Word8
nullChunk = BS.null
{-# INLINE nullChunk #-}
pappendChunk = B.pappend
{-# INLINE pappendChunk #-}
atBufferEnd _ = Pos . B.length
{-# INLINE atBufferEnd #-}
bufferElemAt _ (Pos i) buf
| i < B.length buf = Just (B.unsafeIndex buf i, 1)
| otherwise = Nothing
{-# INLINE bufferElemAt #-}
chunkElemToChar _ = w2c
{-# INLINE chunkElemToChar #-}
| Fuuzetsu/haddock | haddock-library/vendor/attoparsec-0.13.1.0/Data/Attoparsec/Internal/Types.hs | bsd-2-clause | 7,584 | 0 | 15 | 2,036 | 1,982 | 1,090 | 892 | 153 | 1 |
{-# OPTIONS_GHC -Wwarn #-}
{-# LANGUAGE CPP, ScopedTypeVariables, Rank2Types #-}
{-# LANGUAGE LambdaCase #-}
-----------------------------------------------------------------------------
-- |
-- Module : Haddock
-- Copyright : (c) Simon Marlow 2003-2006,
-- David Waern 2006-2010,
-- Mateusz Kowalczyk 2014
-- License : BSD-like
--
-- Maintainer : haddock@projects.haskell.org
-- Stability : experimental
-- Portability : portable
--
-- Haddock - A Haskell Documentation Tool
--
-- Program entry point and top-level code.
-----------------------------------------------------------------------------
module Haddock (
haddock,
haddockWithGhc,
getGhcDirs,
readPackagesAndProcessModules,
withGhc
) where
import Data.Version
import Haddock.Backends.Xhtml
import Haddock.Backends.Xhtml.Themes (getThemes)
import Haddock.Backends.LaTeX
import Haddock.Backends.Hoogle
import Haddock.Backends.Hyperlinker
import Haddock.Interface
import Haddock.Parser
import Haddock.Types
import Haddock.Version
import Haddock.InterfaceFile
import Haddock.Options
import Haddock.Utils
import Control.Monad hiding (forM_)
import Control.Applicative
import Data.Foldable (forM_)
import Data.List (isPrefixOf)
import Control.Exception
import Data.Maybe
import Data.IORef
import Data.Map (Map)
import qualified Data.Map as Map
import System.IO
import System.Exit
#if defined(mingw32_HOST_OS)
import Foreign
import Foreign.C
import Data.Int
#endif
#ifdef IN_GHC_TREE
import System.FilePath
#else
import qualified GHC.Paths as GhcPaths
import Paths_haddock_api (getDataDir)
import System.Directory (doesDirectoryExist)
#endif
import GHC hiding (verbosity)
import Config
import DynFlags hiding (projectVersion, verbosity)
import StaticFlags (discardStaticFlags)
import Packages
import Panic (handleGhcException)
import Module
import FastString
--------------------------------------------------------------------------------
-- * Exception handling
--------------------------------------------------------------------------------
handleTopExceptions :: IO a -> IO a
handleTopExceptions =
handleNormalExceptions . handleHaddockExceptions . handleGhcExceptions
-- | Either returns normally or throws an ExitCode exception;
-- all other exceptions are turned into exit exceptions.
handleNormalExceptions :: IO a -> IO a
handleNormalExceptions inner =
(inner `onException` hFlush stdout)
`catches`
[ Handler (\(code :: ExitCode) -> exitWith code)
, Handler (\(ex :: AsyncException) ->
case ex of
StackOverflow -> do
putStrLn "stack overflow: use -g +RTS -K<size> to increase it"
exitFailure
_ -> do
putStrLn ("haddock: " ++ show ex)
exitFailure)
, Handler (\(ex :: SomeException) -> do
putStrLn ("haddock: internal error: " ++ show ex)
exitFailure)
]
handleHaddockExceptions :: IO a -> IO a
handleHaddockExceptions inner =
catches inner [Handler handler]
where
handler (e::HaddockException) = do
putStrLn $ "haddock: " ++ show e
exitFailure
handleGhcExceptions :: IO a -> IO a
handleGhcExceptions =
-- error messages propagated as exceptions
handleGhcException $ \e -> do
hFlush stdout
case e of
PhaseFailed _ code -> exitWith code
_ -> do
print (e :: GhcException)
exitFailure
-------------------------------------------------------------------------------
-- * Top level
-------------------------------------------------------------------------------
-- | Run Haddock with given list of arguments.
--
-- Haddock's own main function is defined in terms of this:
--
-- > main = getArgs >>= haddock
haddock :: [String] -> IO ()
haddock args = haddockWithGhc withGhc args
haddockWithGhc :: (forall a. [Flag] -> Ghc a -> IO a) -> [String] -> IO ()
haddockWithGhc ghc args = handleTopExceptions $ do
-- Parse command-line flags and handle some of them initially.
-- TODO: unify all of this (and some of what's in the 'render' function),
-- into one function that returns a record with a field for each option,
-- or which exits with an error or help message.
(flags, files) <- parseHaddockOpts args
shortcutFlags flags
qual <- case qualification flags of {Left msg -> throwE msg; Right q -> return q}
-- inject dynamic-too into flags before we proceed
flags' <- ghc flags $ do
df <- getDynFlags
case lookup "GHC Dynamic" (compilerInfo df) of
Just "YES" -> return $ Flag_OptGhc "-dynamic-too" : flags
_ -> return flags
unless (Flag_NoWarnings `elem` flags) $ do
hypSrcWarnings flags
forM_ (warnings args) $ \warning -> do
hPutStrLn stderr warning
ghc flags' $ do
dflags <- getDynFlags
if not (null files) then do
(packages, ifaces, homeLinks) <- readPackagesAndProcessModules flags files
-- Dump an "interface file" (.haddock file), if requested.
forM_ (optDumpInterfaceFile flags) $ \path -> liftIO $ do
writeInterfaceFile path InterfaceFile {
ifInstalledIfaces = map toInstalledIface ifaces
, ifLinkEnv = homeLinks
}
-- Render the interfaces.
liftIO $ renderStep dflags flags qual packages ifaces
else do
when (any (`elem` [Flag_Html, Flag_Hoogle, Flag_LaTeX]) flags) $
throwE "No input file(s)."
-- Get packages supplied with --read-interface.
packages <- liftIO $ readInterfaceFiles freshNameCache (readIfaceArgs flags)
-- Render even though there are no input files (usually contents/index).
liftIO $ renderStep dflags flags qual packages []
-- | Create warnings about potential misuse of -optghc
warnings :: [String] -> [String]
warnings = map format . filter (isPrefixOf "-optghc")
where
format arg = concat ["Warning: `", arg, "' means `-o ", drop 2 arg, "', did you mean `-", arg, "'?"]
withGhc :: [Flag] -> Ghc a -> IO a
withGhc flags action = do
libDir <- fmap snd (getGhcDirs flags)
-- Catches all GHC source errors, then prints and re-throws them.
let handleSrcErrors action' = flip handleSourceError action' $ \err -> do
printException err
liftIO exitFailure
withGhc' libDir (ghcFlags flags) (\_ -> handleSrcErrors action)
readPackagesAndProcessModules :: [Flag] -> [String]
-> Ghc ([(DocPaths, InterfaceFile)], [Interface], LinkEnv)
readPackagesAndProcessModules flags files = do
-- Get packages supplied with --read-interface.
packages <- readInterfaceFiles nameCacheFromGhc (readIfaceArgs flags)
-- Create the interfaces -- this is the core part of Haddock.
let ifaceFiles = map snd packages
(ifaces, homeLinks) <- processModules (verbosity flags) files flags ifaceFiles
return (packages, ifaces, homeLinks)
renderStep :: DynFlags -> [Flag] -> QualOption -> [(DocPaths, InterfaceFile)] -> [Interface] -> IO ()
renderStep dflags flags qual pkgs interfaces = do
updateHTMLXRefs pkgs
let
ifaceFiles = map snd pkgs
installedIfaces = concatMap ifInstalledIfaces ifaceFiles
extSrcMap = Map.fromList $ do
((_, Just path), ifile) <- pkgs
iface <- ifInstalledIfaces ifile
return (instMod iface, path)
render dflags flags qual interfaces installedIfaces extSrcMap
-- | Render the interfaces with whatever backend is specified in the flags.
render :: DynFlags -> [Flag] -> QualOption -> [Interface] -> [InstalledInterface] -> Map Module FilePath -> IO ()
render dflags flags qual ifaces installedIfaces extSrcMap = do
let
title = fromMaybe "" (optTitle flags)
unicode = Flag_UseUnicode `elem` flags
pretty = Flag_PrettyHtml `elem` flags
opt_wiki_urls = wikiUrls flags
opt_contents_url = optContentsUrl flags
opt_index_url = optIndexUrl flags
odir = outputDir flags
opt_latex_style = optLaTeXStyle flags
opt_source_css = optSourceCssFile flags
visibleIfaces = [ i | i <- ifaces, OptHide `notElem` ifaceOptions i ]
-- /All/ visible interfaces including external package modules.
allIfaces = map toInstalledIface ifaces ++ installedIfaces
allVisibleIfaces = [ i | i <- allIfaces, OptHide `notElem` instOptions i ]
pkgMod = ifaceMod (head ifaces)
pkgKey = modulePackageKey pkgMod
pkgStr = Just (packageKeyString pkgKey)
pkgNameVer = modulePackageInfo dflags flags pkgMod
(srcBase, srcModule, srcEntity, srcLEntity) = sourceUrls flags
srcModule'
| Flag_HyperlinkedSource `elem` flags = Just hypSrcModuleUrlFormat
| otherwise = srcModule
srcMap = mkSrcMap $ Map.union
(Map.map SrcExternal extSrcMap)
(Map.fromList [ (ifaceMod iface, SrcLocal) | iface <- ifaces ])
pkgSrcMap = Map.mapKeys modulePackageKey extSrcMap
pkgSrcMap'
| Flag_HyperlinkedSource `elem` flags =
Map.insert pkgKey hypSrcModuleNameUrlFormat pkgSrcMap
| Just srcNameUrl <- srcEntity = Map.insert pkgKey srcNameUrl pkgSrcMap
| otherwise = pkgSrcMap
-- TODO: Get these from the interface files as with srcMap
pkgSrcLMap'
| Flag_HyperlinkedSource `elem` flags =
Map.singleton pkgKey hypSrcModuleLineUrlFormat
| Just path <- srcLEntity = Map.singleton pkgKey path
| otherwise = Map.empty
sourceUrls' = (srcBase, srcModule', pkgSrcMap', pkgSrcLMap')
libDir <- getHaddockLibDir flags
prologue <- getPrologue dflags flags
themes <- getThemes libDir flags >>= either bye return
when (Flag_GenIndex `elem` flags) $ do
ppHtmlIndex odir title pkgStr
themes opt_contents_url sourceUrls' opt_wiki_urls
allVisibleIfaces pretty
copyHtmlBits odir libDir themes
when (Flag_GenContents `elem` flags) $ do
ppHtmlContents dflags odir title pkgStr
themes opt_index_url sourceUrls' opt_wiki_urls
allVisibleIfaces True prologue pretty
(makeContentsQual qual)
copyHtmlBits odir libDir themes
when (Flag_Html `elem` flags) $ do
ppHtml dflags title pkgStr visibleIfaces odir
prologue
themes sourceUrls' opt_wiki_urls
opt_contents_url opt_index_url unicode qual
pretty
copyHtmlBits odir libDir themes
-- TODO: we throw away Meta for both Hoogle and LaTeX right now,
-- might want to fix that if/when these two get some work on them
when (Flag_Hoogle `elem` flags) $ do
case pkgNameVer of
Nothing -> putStrLn . unlines $
[ "haddock: Unable to find a package providing module "
++ moduleNameString (moduleName pkgMod) ++ ", skipping Hoogle."
, ""
, " Perhaps try specifying the desired package explicitly"
++ " using the --package-name"
, " and --package-version arguments."
]
Just (PackageName pkgNameFS, pkgVer) ->
let pkgNameStr | unpackFS pkgNameFS == "main" && title /= [] = title
| otherwise = unpackFS pkgNameFS
in ppHoogle dflags pkgNameStr pkgVer title (fmap _doc prologue)
visibleIfaces odir
when (Flag_LaTeX `elem` flags) $ do
ppLaTeX title pkgStr visibleIfaces odir (fmap _doc prologue) opt_latex_style
libDir
when (Flag_HyperlinkedSource `elem` flags) $ do
ppHyperlinkedSource odir libDir opt_source_css pretty srcMap ifaces
-- | From GHC 7.10, this function has a potential to crash with a
-- nasty message such as @expectJust getPackageDetails@ because
-- package name and versions can no longer reliably be extracted in
-- all cases: if the package is not installed yet then this info is no
-- longer available. The @--package-name@ and @--package-version@
-- Haddock flags allow the user to specify this information and it is
-- returned here if present: if it is not present, the error will
-- occur. Nasty but that's how it is for now. Potential TODO.
modulePackageInfo :: DynFlags
-> [Flag] -- ^ Haddock flags are checked as they may
-- contain the package name or version
-- provided by the user which we
-- prioritise
-> Module -> Maybe (PackageName, Data.Version.Version)
modulePackageInfo dflags flags modu =
cmdline <|> pkgDb
where
cmdline = (,) <$> optPackageName flags <*> optPackageVersion flags
pkgDb = (\pkg -> (packageName pkg, packageVersion pkg)) <$> lookupPackage dflags (modulePackageKey modu)
-------------------------------------------------------------------------------
-- * Reading and dumping interface files
-------------------------------------------------------------------------------
readInterfaceFiles :: MonadIO m
=> NameCacheAccessor m
-> [(DocPaths, FilePath)]
-> m [(DocPaths, InterfaceFile)]
readInterfaceFiles name_cache_accessor pairs = do
catMaybes `liftM` mapM tryReadIface pairs
where
-- try to read an interface, warn if we can't
tryReadIface (paths, file) =
readInterfaceFile name_cache_accessor file >>= \case
Left err -> liftIO $ do
putStrLn ("Warning: Cannot read " ++ file ++ ":")
putStrLn (" " ++ err)
putStrLn "Skipping this interface."
return Nothing
Right f -> return $ Just (paths, f)
-------------------------------------------------------------------------------
-- * Creating a GHC session
-------------------------------------------------------------------------------
-- | Start a GHC session with the -haddock flag set. Also turn off
-- compilation and linking. Then run the given 'Ghc' action.
withGhc' :: String -> [String] -> (DynFlags -> Ghc a) -> IO a
withGhc' libDir flags ghcActs = runGhc (Just libDir) $ do
dynflags <- getSessionDynFlags
dynflags' <- parseGhcFlags (gopt_set dynflags Opt_Haddock) {
hscTarget = HscNothing,
ghcMode = CompManager,
ghcLink = NoLink
}
let dynflags'' = gopt_unset dynflags' Opt_SplitObjs
defaultCleanupHandler dynflags'' $ do
-- ignore the following return-value, which is a list of packages
-- that may need to be re-linked: Haddock doesn't do any
-- dynamic or static linking at all!
_ <- setSessionDynFlags dynflags''
ghcActs dynflags''
where
parseGhcFlags :: MonadIO m => DynFlags -> m DynFlags
parseGhcFlags dynflags = do
-- TODO: handle warnings?
-- NOTA BENE: We _MUST_ discard any static flags here, because we cannot
-- rely on Haddock to parse them, as it only parses the DynFlags. Yet if
-- we pass any, Haddock will fail. Since StaticFlags are global to the
-- GHC invocation, there's also no way to reparse/save them to set them
-- again properly.
--
-- This is a bit of a hack until we get rid of the rest of the remaining
-- StaticFlags. See GHC issue #8276.
let flags' = discardStaticFlags flags
(dynflags', rest, _) <- parseDynamicFlags dynflags (map noLoc flags')
if not (null rest)
then throwE ("Couldn't parse GHC options: " ++ unwords flags')
else return dynflags'
-------------------------------------------------------------------------------
-- * Misc
-------------------------------------------------------------------------------
getHaddockLibDir :: [Flag] -> IO String
getHaddockLibDir flags =
case [str | Flag_Lib str <- flags] of
[] -> do
#ifdef IN_GHC_TREE
getInTreeDir
#else
d <- getDataDir -- provided by Cabal
doesDirectoryExist d >>= \exists -> case exists of
True -> return d
False -> do
-- If directory does not exist then we are probably invoking from
-- ./dist/build/haddock/haddock so we use ./resources as a fallback.
doesDirectoryExist "resources" >>= \exists_ -> case exists_ of
True -> return "resources"
False -> die ("Haddock's resource directory (" ++ d ++ ") does not exist!\n")
#endif
fs -> return (last fs)
getGhcDirs :: [Flag] -> IO (String, String)
getGhcDirs flags = do
case [ dir | Flag_GhcLibDir dir <- flags ] of
[] -> do
#ifdef IN_GHC_TREE
libDir <- getInTreeDir
return (ghcPath, libDir)
#else
return (ghcPath, GhcPaths.libdir)
#endif
xs -> return (ghcPath, last xs)
where
#ifdef IN_GHC_TREE
ghcPath = "not available"
#else
ghcPath = GhcPaths.ghc
#endif
shortcutFlags :: [Flag] -> IO ()
shortcutFlags flags = do
usage <- getUsage
when (Flag_Help `elem` flags) (bye usage)
when (Flag_Version `elem` flags) byeVersion
when (Flag_InterfaceVersion `elem` flags) (bye (show binaryInterfaceVersion ++ "\n"))
when (Flag_CompatibleInterfaceVersions `elem` flags)
(bye (unwords (map show binaryInterfaceVersionCompatibility) ++ "\n"))
when (Flag_GhcVersion `elem` flags) (bye (cProjectVersion ++ "\n"))
when (Flag_PrintGhcPath `elem` flags) $ do
dir <- fmap fst (getGhcDirs flags)
bye $ dir ++ "\n"
when (Flag_PrintGhcLibDir `elem` flags) $ do
dir <- fmap snd (getGhcDirs flags)
bye $ dir ++ "\n"
when (Flag_UseUnicode `elem` flags && Flag_Html `notElem` flags) $
throwE "Unicode can only be enabled for HTML output."
when ((Flag_GenIndex `elem` flags || Flag_GenContents `elem` flags)
&& Flag_Html `elem` flags) $
throwE "-h cannot be used with --gen-index or --gen-contents"
when ((Flag_GenIndex `elem` flags || Flag_GenContents `elem` flags)
&& Flag_Hoogle `elem` flags) $
throwE "--hoogle cannot be used with --gen-index or --gen-contents"
when ((Flag_GenIndex `elem` flags || Flag_GenContents `elem` flags)
&& Flag_LaTeX `elem` flags) $
throwE "--latex cannot be used with --gen-index or --gen-contents"
where
byeVersion = bye $
"Haddock version " ++ projectVersion ++ ", (c) Simon Marlow 2006\n"
++ "Ported to use the GHC API by David Waern 2006-2008\n"
-- | Generate some warnings about potential misuse of @--hyperlinked-source@.
hypSrcWarnings :: [Flag] -> IO ()
hypSrcWarnings flags = do
when (hypSrc && any isSourceUrlFlag flags) $
hPutStrLn stderr $ concat
[ "Warning: "
, "--source-* options are ignored when "
, "--hyperlinked-source is enabled."
]
when (not hypSrc && any isSourceCssFlag flags) $
hPutStrLn stderr $ concat
[ "Warning: "
, "source CSS file is specified but "
, "--hyperlinked-source is disabled."
]
where
hypSrc = Flag_HyperlinkedSource `elem` flags
isSourceUrlFlag (Flag_SourceBaseURL _) = True
isSourceUrlFlag (Flag_SourceModuleURL _) = True
isSourceUrlFlag (Flag_SourceEntityURL _) = True
isSourceUrlFlag (Flag_SourceLEntityURL _) = True
isSourceUrlFlag _ = False
isSourceCssFlag (Flag_SourceCss _) = True
isSourceCssFlag _ = False
updateHTMLXRefs :: [(DocPaths, InterfaceFile)] -> IO ()
updateHTMLXRefs packages = do
writeIORef html_xrefs_ref (Map.fromList mapping)
writeIORef html_xrefs_ref' (Map.fromList mapping')
where
mapping = [ (instMod iface, html) | ((html, _), ifaces) <- packages
, iface <- ifInstalledIfaces ifaces ]
mapping' = [ (moduleName m, html) | (m, html) <- mapping ]
getPrologue :: DynFlags -> [Flag] -> IO (Maybe (MDoc RdrName))
getPrologue dflags flags =
case [filename | Flag_Prologue filename <- flags ] of
[] -> return Nothing
[filename] -> withFile filename ReadMode $ \h -> do
hSetEncoding h utf8
str <- hGetContents h
return . Just $! parseParas dflags str
_ -> throwE "multiple -p/--prologue options"
#ifdef IN_GHC_TREE
getInTreeDir :: IO String
getInTreeDir = getExecDir >>= \case
Nothing -> error "No GhcDir found"
Just d -> return (d </> ".." </> "lib")
getExecDir :: IO (Maybe String)
#if defined(mingw32_HOST_OS)
getExecDir = try_size 2048 -- plenty, PATH_MAX is 512 under Win32.
where
try_size size = allocaArray (fromIntegral size) $ \buf -> do
ret <- c_GetModuleFileName nullPtr buf size
case ret of
0 -> return Nothing
_ | ret < size -> fmap (Just . dropFileName) $ peekCWString buf
| otherwise -> try_size (size * 2)
foreign import stdcall unsafe "windows.h GetModuleFileNameW"
c_GetModuleFileName :: Ptr () -> CWString -> Word32 -> IO Word32
#else
getExecDir = return Nothing
#endif
#endif
| lamefun/haddock | haddock-api/src/Haddock.hs | bsd-2-clause | 20,681 | 0 | 23 | 4,914 | 4,515 | 2,334 | 2,181 | 337 | 6 |
module CaseIn1 where
main x y z
= case x of
0 -> addthree
1 -> inc y where inc a = a + 1
addthree a b c = (a + b) + c
| SAdams601/HaRe | old/testing/demote/CaseIn1AST.hs | bsd-3-clause | 151 | 0 | 10 | 68 | 72 | 37 | 35 | 6 | 2 |
{-# LANGUAGE CPP, StandaloneDeriving, GeneralizedNewtypeDeriving #-}
-- |
-- Types for referring to remote objects in Remote GHCi. For more
-- details, see Note [External GHCi pointers] in compiler/ghci/GHCi.hs
--
-- For details on Remote GHCi, see Note [Remote GHCi] in
-- compiler/ghci/GHCi.hs.
--
module GHCi.RemoteTypes
( RemotePtr(..), toRemotePtr, fromRemotePtr, castRemotePtr
, HValue(..)
, RemoteRef, mkRemoteRef, localRef, freeRemoteRef
, HValueRef, toHValueRef
, ForeignRef, mkForeignRef, withForeignRef
, ForeignHValue
, unsafeForeignRefToRemoteRef, finalizeForeignRef
) where
import Control.DeepSeq
import Data.Word
import Foreign hiding (newForeignPtr)
import Foreign.Concurrent
import Data.Binary
import Unsafe.Coerce
import GHC.Exts
import GHC.ForeignPtr
-- -----------------------------------------------------------------------------
-- RemotePtr
-- Static pointers only; don't use this for heap-resident pointers.
-- Instead use HValueRef.
#include "MachDeps.h"
#if SIZEOF_HSINT == 4
newtype RemotePtr a = RemotePtr Word32
#elif SIZEOF_HSINT == 8
newtype RemotePtr a = RemotePtr Word64
#endif
toRemotePtr :: Ptr a -> RemotePtr a
toRemotePtr p = RemotePtr (fromIntegral (ptrToWordPtr p))
fromRemotePtr :: RemotePtr a -> Ptr a
fromRemotePtr (RemotePtr p) = wordPtrToPtr (fromIntegral p)
castRemotePtr :: RemotePtr a -> RemotePtr b
castRemotePtr (RemotePtr a) = RemotePtr a
deriving instance Show (RemotePtr a)
deriving instance Binary (RemotePtr a)
deriving instance NFData (RemotePtr a)
-- -----------------------------------------------------------------------------
-- HValueRef
newtype HValue = HValue Any
instance Show HValue where
show _ = "<HValue>"
-- | A reference to a remote value. These are allocated and freed explicitly.
newtype RemoteRef a = RemoteRef (RemotePtr ())
deriving (Show, Binary)
-- We can discard type information if we want
toHValueRef :: RemoteRef a -> RemoteRef HValue
toHValueRef = unsafeCoerce
-- For convenience
type HValueRef = RemoteRef HValue
-- | Make a reference to a local value that we can send remotely.
-- This reference will keep the value that it refers to alive until
-- 'freeRemoteRef' is called.
mkRemoteRef :: a -> IO (RemoteRef a)
mkRemoteRef a = do
sp <- newStablePtr a
return $! RemoteRef (toRemotePtr (castStablePtrToPtr sp))
-- | Convert an HValueRef to an HValue. Should only be used if the HValue
-- originated in this process.
localRef :: RemoteRef a -> IO a
localRef (RemoteRef w) =
deRefStablePtr (castPtrToStablePtr (fromRemotePtr w))
-- | Release an HValueRef that originated in this process
freeRemoteRef :: RemoteRef a -> IO ()
freeRemoteRef (RemoteRef w) =
freeStablePtr (castPtrToStablePtr (fromRemotePtr w))
-- | An HValueRef with a finalizer
newtype ForeignRef a = ForeignRef (ForeignPtr ())
instance NFData (ForeignRef a) where
rnf x = x `seq` ()
type ForeignHValue = ForeignRef HValue
-- | Create a 'ForeignRef' from a 'RemoteRef'. The finalizer
-- should arrange to call 'freeHValueRef' on the 'HValueRef'. (since
-- this function needs to be called in the process that created the
-- 'HValueRef', it cannot be called directly from the finalizer).
mkForeignRef :: RemoteRef a -> IO () -> IO (ForeignRef a)
mkForeignRef (RemoteRef hvref) finalizer =
ForeignRef <$> newForeignPtr (fromRemotePtr hvref) finalizer
-- | Use a 'ForeignHValue'
withForeignRef :: ForeignRef a -> (RemoteRef a -> IO b) -> IO b
withForeignRef (ForeignRef fp) f =
withForeignPtr fp (f . RemoteRef . toRemotePtr)
unsafeForeignRefToRemoteRef :: ForeignRef a -> RemoteRef a
unsafeForeignRefToRemoteRef (ForeignRef fp) =
RemoteRef (toRemotePtr (unsafeForeignPtrToPtr fp))
finalizeForeignRef :: ForeignRef a -> IO ()
finalizeForeignRef (ForeignRef fp) = finalizeForeignPtr fp
| snoyberg/ghc | libraries/ghci/GHCi/RemoteTypes.hs | bsd-3-clause | 3,795 | 0 | 12 | 587 | 791 | 425 | 366 | 59 | 1 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="sr-SP">
<title>Port Scan | ZAP Extension</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | thc202/zap-extensions | addOns/zest/src/main/javahelp/org/zaproxy/zap/extension/zest/resources/help_sr_SP/helpset_sr_SP.hs | apache-2.0 | 970 | 78 | 66 | 159 | 413 | 209 | 204 | -1 | -1 |
module Test5 where
data Dummy = Bob Int
newtype Test = Con String
f 42 = Con "Hello"
| kmate/HaRe | old/testing/refacDataNewType/Test5_TokOut.hs | bsd-3-clause | 88 | 0 | 6 | 21 | 32 | 18 | 14 | 4 | 1 |
module T2302 where
f = Γ
| urbanslug/ghc | testsuite/tests/parser/unicode/T2302.hs | bsd-3-clause | 26 | 0 | 4 | 6 | 9 | 6 | 3 | 2 | 1 |
module Sandbox.Statistics where
import qualified Sandbox.Number.Extra as N
-- | Permutation of r objects in set of n objects.
permutations :: (Fractional b, Integral a) => a -> a -> b
permutations n r = fromIntegral (N.fact n) / fromIntegral (N.fact (n-r))
-- | Combination of r objects in set of n objects.
combinations :: (Fractional b, Integral a) => a -> a -> b
combinations n r = permutations n r / fromIntegral (N.fact r)
-- | Binomial distribution.
--
-- * `x` - the number of succeses.
-- * `n` - the total number of trials.
-- * `p` - the probability of success of 1 trial.
binomial :: (Fractional b, Integral a) => a -> a -> b -> b
binomial x n p =
combinations n x * (p ^ x) * ((1-p) ^ (n-x))
-- | Negative binomial distribution.
negative_binomial :: (Fractional b, Integral a) => a -> a -> b -> b
negative_binomial x n p =
combinations (n-1) (x-1) * (p ^ x) * ((1-p) ^ (n-x))
-- | Geometric distribution.
--
-- * `x` - the number of succeses.
-- * `n` - the total number of trials.
-- * `p` - the probability of success of 1 trial.
geometric :: (Fractional b, Integral a) => a -> a -> b -> b
geometric x n p = (1-p) ^ (n-x) * p
-- | Poisson distribution.
--
-- * `k` is the actual number of successes that occur in a specified region.
-- * `l` is the average number of successes that occur in a specified region.
poisson :: (Floating b, Integral a) => a -> b -> b
poisson k l = (l ^ k) * exp (-l) / fromIntegral (N.fact k)
| 4e6/sandbox | haskell/Sandbox/Statistics.hs | mit | 1,446 | 0 | 10 | 303 | 483 | 263 | 220 | 16 | 1 |
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE QuasiQuotes #-}
module KMC.Program.Backends.C where
import Control.Monad (join)
import Control.Monad.Trans
import Data.Bits
import Data.Char (ord, chr, isPrint, isAscii, isSpace)
import Data.List (intercalate, isInfixOf)
import qualified Data.Map as M
import qualified Data.Set as S
import Numeric
import System.Exit (ExitCode(..))
import System.IO
import System.Process
import Text.PrettyPrint
import KMC.Util.Coding
import KMC.Program.IL
import KMC.Util.Heredoc
{- Utility functions -}
-- | Chunk a list into list of lists of length n, starting from the left. The
-- last list in the result may be shorter than n.
chunk :: Int -> [a] -> [[a]]
chunk n xs
| null xs = []
| otherwise = let (l, r) = splitAt n xs in l:chunk n r
-- | Pad a string with spaces on the left
padL :: Int -> String -> String
padL width s = replicate (width - length s) ' ' ++ s
-- | Pad a string with spaces on the right
padR :: Int -> String -> String
padR width s = s ++ replicate (width - length s) ' '
progTemplate :: String -> String -> String -> String -> String -> [String] -> String
progTemplate buString tablesString declsString infoString initString progStrings =
[strQ|
#define NUM_PHASES |] ++ show (length progStrings) ++ [strQ|
#define BUFFER_UNIT_T |] ++ buString ++ "\n"
++ [fileQ|crt/crt.c|] ++ "\n"
++ tablesString ++ "\n"
++ declsString ++ [strQ|
void printCompilationInfo()
{
fprintf(stdout, |]++infoString++[strQ|);
}
void init()
{
|]++initString ++[strQ|
}
|]++concat (zipWith matchTemplate progStrings [1..])++[strQ|
void match(int phase)
{
switch(phase) {
|]++intercalate "\n" ["case " ++ show i ++ ": match" ++ show i ++ "(); break;"
| i <- [1..(length progStrings)] ]++
[strQ|
default:
fprintf(stderr, "Invalid phase: %d given\n", phase);
exit(1);
}
}
|]
matchTemplate :: String -> Int -> String
matchTemplate progString n = [strQ|void match|] ++ show n ++ [strQ|()
{
int i = 0;
|]++progString++[strQ|
accept|]++show n++[strQ|:
return;
fail|]++show n++[strQ|:
fprintf(stderr, "Match error at input symbol %zu!\n", count);
exit(1);
}
|]
{- Types -}
-- | Supported C types
data CType = UInt8T | UInt16T | UInt32T | UInt64T
deriving (Eq, Ord, Show)
-- | A prettyprinted C program. The program is divided into sections which are
-- spliced into the template.
data CProg =
CProg
{ cTables :: Doc
, cDeclarations :: Doc
, cProg :: [Doc]
, cInit :: Doc
, cBufferUnit :: Doc
}
-- | The number of bits each C type can hold
cbitSize :: CType -> Int
cbitSize UInt8T = 8
cbitSize UInt16T = 16
cbitSize UInt32T = 32
cbitSize UInt64T = 64
-- | The smallest C type that can hold the specified number of bits (or Nothing,
-- if one such does not exist).
minUnsignedType :: Int -> Maybe CType
minUnsignedType n | n < 0 = Nothing
| n <= 8 = Just UInt8T
| n <= 16 = Just UInt16T
| n <= 32 = Just UInt32T
| n <= 64 = Just UInt64T
| otherwise = Nothing
-- | The prefix added to buffer identifiers.
bufferPrefix :: String
bufferPrefix = "buf_"
constPrefix :: String
constPrefix = "const_"
-- | Pretty print a buffer identifier (as reference)
buf :: BufferId -> Doc
buf (BufferId n) = text "&" <> text bufferPrefix <> int n
-- | Pretty print a constant identifier (as reference)
cid :: ConstId -> Int -> Doc
cid (ConstId n) phase = text constPrefix <> int phase <> text "_" <> int n
-- | Pretty print a C type
ctyp :: CType -> Doc
ctyp UInt8T = text "uint8_t"
ctyp UInt16T = text "uint16_t"
ctyp UInt32T = text "uint32_t"
ctyp UInt64T = text "uint64_t"
-- | Pretty print a left aligned cast
cast :: CType -- ^ Destination type
-> CType -- ^ Source type
-> Doc -- ^ Inner pretty printed expression to apply the cast to
-> Doc
cast ctypeto ctypefrom doc
| ctypeto == ctypefrom = doc
| otherwise =
parens (parens (ctyp ctypeto) <> parens doc)
<+> text "<<" <+> int (cbitSize ctypeto - cbitSize ctypefrom)
-- | Pretty print a table lookup with cast
tbl :: CType -- ^ Buffer unit type
-> CType -- ^ Table unit type
-> TableId -- ^ Table identifier
-> Int -- ^ Table offset
-> Maybe String -- ^ Optional dynamic offset
-> Int
-> Doc
tbl ctypectx ctypetbl (TableId n) i mx phase =
let offsetdoc = maybe (int i)
(\s -> int i <+> text "+" <+> text s)
mx
in cast ctypectx ctypetbl
$ hcat [text "tbl" <> int phase
,brackets $ int n
,brackets $ text "next" <> brackets offsetdoc
]
-- | Pretty print a block identifier
blck :: BlockId -> Int -> Doc
blck (BlockId n) phase = text "l" <> int phase <> text "_" <> int n
-- | Render a numeral value of some bith length as a C numeral, such that the
-- most significant bit of the value is the most significant bit of the C type.
-- For example, 0b1010 regarded as a 5-bit number will be represented as a
-- UInt8T as the value 0b0101 0000, and as an UInt16T as the value
-- 0b0101 0000 0000 0000
num :: (Integral a, Bits a, Show a) =>
CType -- ^ C type of the constant
-> Int -- ^ Bit width of value
-> a -- ^ Value
-> String
num ctype width x
| cbitSize ctype < width
= error $ concat ["The requested type "
,show ctype, " cannot represent "
,show width, " bits of information"]
| otherwise
= let sh = cbitSize ctype - width
in "0x" ++ showHex (x `shiftL` sh) ""
-- | Pretty print a table as a C array initializer with entries rendered as the given C type.
prettyTableExpr :: Int -> CType -> Table -> Doc
prettyTableExpr outBits ctype (Table { tblTable = tableData, tblDigitSize = digitSize }) =
lbrace <> prettyLines <> rbrace
where
prettyLines = vcat
. punctuate comma
$ [ hsep . punctuate comma . map prettyCell $ line | line <- chunk 8 tableData ]
prettyCell c =
let n = decode (2 ^ outBits) (map toInteger c) :: Integer
ndoc = num ctype bwidth n
in text $ padL cellWidth ndoc
bwidth = outBits * digitSize
cellWidth = case ctype of
UInt8T -> 4
UInt16T -> 6
UInt32T -> 10
UInt64T -> 18
-- | Take a value to output/append and represent it as a list of integers which
-- each fit the given C type. The second component of each pair is the number of
-- bits of the first component that make up the value encoding.
splitAppends :: Int -> CType -> [Int] -> [(Integer, Int)]
splitAppends outBits buftype digits = [ (decodeEnum bs, length bs * outBits) | bs <- groups ]
where
digitsPerAppend = cbitSize buftype `div` outBits
groups = split digits
split [] = []
split xs = let (l, r) = splitAt digitsPerAppend xs in l:split r
-- | Pretty print an append table instruction, taking into account whether the
-- destination buffer is the output buffer or a regular bufer. An optional
-- dynamic offset variable can be specified for printing append instructions
-- within a loop.
prettyAppendTbl :: CType
-> CType
-> Program
-> BufferId
-> TableId
-> Int
-> Maybe String
-> Int
-> Doc
prettyAppendTbl buftype tbltype prog bid tid i mx phase =
let arg = tbl buftype tbltype tid i mx phase
bwidth = progOutBits prog * (tblDigitSize $ progTables prog M.! tid)
lendoc = int bwidth
streamBuf = progStreamBuffer prog
in if bid == streamBuf then
text "outputconst" <> parens (hcat [arg, comma, lendoc]) <> semi
else
text "append"
<> parens (hcat [buf bid, comma, arg, comma, lendoc])
<> semi
prettyAppendSym :: BufferId -> BufferId -> Int -> Maybe String -> Doc
prettyAppendSym bid outBuf i mx =
let offsetdoc = maybe (int i) (\s -> int i <+> text "+" <+> text s) mx
symb = text "next" <> brackets offsetdoc
in if bid == outBuf then
text "outputconst" <> parens (hcat [symb, comma, int 8]) <> semi
else
text "append"
<> parens (hcat [buf bid, comma, symb, comma, int 8]) <> semi
-- | Pretty print an instruction. Note that the C runtime currently
-- distinguishes between regular buffers and the output buffer, and hence the
-- pretty printer needs to handle this case specially.
prettyInstr :: CType -- ^ The buffer unit type
-> CType -- ^ The table unit type
-> Program -- ^ The surrounding program
-> Instr -> Int -> Doc
prettyInstr buftype tbltype prog instr phase =
let streamBuf = progStreamBuffer prog in
case instr of
AcceptI -> text "goto accept" <> int phase <> semi
FailI -> text "goto fail" <> int phase <> semi
AppendI bid constid ->
let lendoc = text $ show $ length (progConstants prog M.! constid) * progOutBits prog
in if bid == streamBuf then
text "outputarray"
<> parens (hcat [cid constid phase, comma, lendoc])
<> semi
else
text "appendarray"
<> parens (hcat [buf bid, comma, cid constid phase, comma, lendoc])
<> semi
AppendTblI bid tid i -> prettyAppendTbl buftype tbltype prog bid tid i Nothing phase
AppendSymI bid i -> prettyAppendSym bid streamBuf i Nothing
ConcatI bid1 bid2 -> if bid1 == streamBuf then
text "output" <> parens (buf bid2) <> semi
else
text "concat"
<> parens (hcat [buf bid1, comma, buf bid2])
<> semi
ResetI bid -> text "reset" <> parens (buf bid) <> semi
AlignI bid1 bid2 -> text "align"
<> parens (hcat [buf bid1, comma, buf bid2])
<> semi
IfI e is -> text "if" <+> parens (prettyExpr e) $$
lbrace $+$
nest 3 (prettyBlock buftype tbltype prog is phase) $+$
rbrace
GotoI blid -> text "goto" <+> blck blid phase <> semi
NextI minL maxL is -> text "if"
<+> parens (text "!readnext" <> parens (int minL <> comma <+> int maxL)) $$
lbrace $+$
nest 3 (prettyBlock buftype tbltype prog is phase) $+$
rbrace
ConsumeI i -> text "consume" <> parens (int i) <> semi
PushI -> text "stack_push()" <> semi
PopI bid -> text "stack_pop" <> parens (buf bid) <> semi
WriteI bid -> text "stack_write" <> parens (buf bid) <> semi
appendSpan :: BufferId -> TableId -> Int -> Block -> Maybe (Int, Block)
appendSpan bid tid i is =
let (is1, is2) = span isAppendTbl is
in if not (null is1)
&& and (zipWith (==) [ j | AppendTblI _ _ j <- is1 ] [i+1..])
then
Just (length is1, is2)
else
Nothing
where
isAppendTbl (AppendTblI bid' tid' _) = bid == bid' && tid == tid'
isAppendTbl _ = False
appendSymSpan :: BufferId -> Int -> Block -> Maybe (Int, Block)
appendSymSpan bid i is =
let (is1, is2) = span isAppendSym is
in if not (null is1)
&& and (zipWith (==) [ j | AppendSymI _ j <- is1 ] [i+1..])
then
Just (length is1, is2)
else
Nothing
where
isAppendSym (AppendSymI bid' _) = bid == bid'
isAppendSym _ = False
-- | Pretty print a list of instructions.
prettyBlock :: CType -> CType -> Program -> Block -> Int -> Doc
prettyBlock buftype tbltype prog instrs phase = go instrs
where
go [] = empty
go (app@(AppendSymI bid i):is) =
case appendSymSpan bid i is of
Nothing -> prettyInstr buftype tbltype prog app phase $+$ go is
Just (n, is') ->
vcat [ hcat [ text "for"
, parens (text "i = 0; i < " <> int (n+1) <> text "; i++")
]
, lbrace
, nest 3 (prettyAppendSym bid (progStreamBuffer prog) i (Just "i"))
, rbrace
, go is'
]
go (app@(AppendTblI bid tid i):is) =
case appendSpan bid tid i is of
Nothing -> prettyInstr buftype tbltype prog app phase $+$ go is
Just (n, is') ->
vcat [hcat [text "for"
,parens (text "i = 0; i < " <> int (n+1) <> text "; i++")]
,lbrace
,nest 3
(prettyAppendTbl buftype tbltype prog bid tid i (Just "i") phase)
,rbrace
,go is'
]
go (instr:is) = prettyInstr buftype tbltype prog instr phase $+$ go is
prettyStr :: [Int] -> Doc
prettyStr = doubleQuotes . hcat . map prettyOrd
where
prettyOrd n = let c = chr n in
if and [isPrint c
,isAscii c
,not (isSpace c)
,not (c `elem` "\"\\")] then
char c
else
doubleQuotes . doubleQuotes $ text "\\x" <> text (showHex n "")
-- | Pretty print a test expression.
prettyExpr :: Expr -> Doc
prettyExpr e =
case e of
SymE i -> text "next" <> brackets (int i)
AvailableSymbolsE -> text "avail"
CompareE i str -> text "cmp"
<> parens (hcat $ punctuate comma
[text "&next" <> brackets (int i)
,text "(unsigned char *)" <+> prettyStr str
,int (length str)])
ConstE n -> let c = chr n in
if isPrint c && isAscii c && c /= '\\' && c /= '\'' then
quotes (char c)
else
int n
FalseE -> int 0
TrueE -> int 1
LteE e1 e2 -> op "<=" e1 e2
LtE e1 e2 -> op "<" e1 e2
GteE e1 e2 -> op ">=" e1 e2
GtE e1 e2 -> op ">" e1 e2
EqE e1 e2 -> op "==" e1 e2
OrE e1 e2 -> op "||" e1 e2
AndE e1 e2 -> op "&&" e1 e2
NotE e1 -> text "!" <> parens (prettyExpr e1)
where
op str e1 e2 = parens (prettyExpr e1 <+> text str <+> prettyExpr e2)
-- | Pretty print all table declarations.
prettyTableDecl :: CType -- ^ Table unit type
-> Pipeline -- ^ Programs
-> Doc
prettyTableDecl tbltype pipeline = case pipeline of
Left progs -> vcat $ zipWith tableDecl progs [1..]
Right progs -> vcat $ zipWith combine progs [1,3..]
where
combine (p,a) i = tableDecl p i $+$ tableDecl a (i+1)
tableDecl prog phase =
if null tables
then text "/* no tables */"
else text "const" <+> ctyp tbltype <+> text "tbl" <> int phase
<> brackets (int (length tables)) <> brackets (int tableSize) <+> text "="
$$ lbrace <> vcat (punctuate comma (map (prettyTableExpr (progOutBits prog) tbltype) tables))
<> rbrace <> semi
where
tables = M.elems $ progTables prog
tableSize = foldl max 0 (map (length . tblTable) tables)
-- | Pretty print all buffer declarations.
prettyBufferDecls :: Pipeline -> Doc
prettyBufferDecls progs =
vcat (map bufferDecl $ neededBuffers progs)
where
bufferDecl (BufferId n) = text "buffer_t" <+> text bufferPrefix <> int n <> semi
prettyConstantDecls :: CType -- ^ Buffer unit type
-> Pipeline
-> Doc
prettyConstantDecls buftype pipeline = case pipeline of
Left progs -> vcat $ zipWith constantDecls progs [1..]
Right progs -> vcat $ zipWith combine progs [1,3..]
where
constantDecls prog i = vcat $ map (constantDecl (progOutBits prog) i) $ M.toList $ progConstants prog
combine (p,a) i = constantDecls p i $+$ constantDecls a (i+1)
constantDecl outBits phase (ConstId n, deltas) =
let chunks = splitAppends outBits buftype deltas
constdocs = map (\(c, nbits) -> text $ num buftype nbits c) chunks
comment = join $ map escape $ map chr deltas
escape c = if isPrint c && isAscii c && isSafe c then [c] else "\\x" ++ showHex (ord c) ""
isSafe c = c /= '\\'
in vcat [ text "//" <+> text comment
, text "const" <+> text "buffer_unit_t" <+> text constPrefix
<> int phase <> text "_" <> int n
<> brackets (int $ length chunks)
<+> text "=" <+>
(braces $ hcat $ punctuate comma constdocs) <> semi
]
neededBuffers :: Pipeline -> [BufferId]
neededBuffers pipeline = case pipeline of
Left progs -> deduplicate $ concatMap progBuffers progs
Right progs -> deduplicate $ concatMap combine progs
where
combine (p,a) = progBuffers p ++ progBuffers a
deduplicate = S.toList . S.fromList
neededNonStreamBuffers :: Pipeline -> [BufferId]
neededNonStreamBuffers pipeline = S.toList . S.unions $ case pipeline of
Left progs -> map needed progs
Right progs -> map (\(p,a) -> S.union (needed p) (needed a)) progs
where
needed prog = S.fromList $ filter (/= progStreamBuffer prog) $ progBuffers prog
-- | Pretty print initialization code. This is just a call to init_buffer() for
-- each buffer in the program.
prettyInit :: Pipeline -> Doc
prettyInit progs =
vcat (map bufferInit $ neededNonStreamBuffers progs)
where
bufferInit bid = text "init_buffer" <> parens (buf bid) <> semi
prettyProg :: CType -> CType -> Program -> Int -> Doc
prettyProg buftype tbltype prog phase =
text "goto" <+> blck (progInitBlock prog) phase <> semi
$$ vcat (map pblock (M.toList $ progBlocks prog))
where
pblock (blid, is) =
blck blid phase <> char ':'
<+> prettyBlock buftype tbltype prog is phase
programsToC :: CType -> Pipeline -> CProg
programsToC buftype pipeline =
CProg
{ cTables = prettyTableDecl tbltype pipeline
, cDeclarations = prettyBufferDecls pipeline
$+$ prettyConstantDecls buftype pipeline
, cInit = prettyInit pipeline
, cProg = prettyProgs
, cBufferUnit = ctyp buftype
}
where
tbltype = UInt8T
prettyProgs = case pipeline of
Left progs -> zipWith (prettyProg buftype tbltype) progs [1..]
Right progs -> concat $ zipWith combine progs [1,3..]
combine (p,a) i = [prettyProg buftype tbltype p i, prettyProg buftype tbltype a (i+1)]
renderCProg :: String -> CProg -> String
renderCProg compInfo cprog =
progTemplate (render $ cBufferUnit cprog)
(render $ cTables cprog)
(render $ cDeclarations cprog)
compInfo
(render $ cInit cprog)
(map render $ cProg cprog)
ccVersion :: (MonadIO m) => FilePath -> m String
ccVersion comp = do
-- gcc/clang prints version info on stderr
(_, _, err, _) <- liftIO $ createProcess (proc comp ["-v"]) { std_err = CreatePipe }
let hErr = maybe (error "ccVersion: bogus handle") id err
errStr <- liftIO $ hGetContents hErr
return $ intercalate "\\n" $ lines $ errStr
compileProgram :: (MonadIO m)
=> CType -- ^ Buffer unit type
-> Int -- ^ CC Optimization level
-> (String -> m ()) -- ^ Info message callback
-> Pipeline -- ^ Pipeline of programs to compile
-> Maybe String -- ^ Optional descriptor to put in program.
-> FilePath -- ^ Path to C compiler
-> Maybe FilePath -- ^ Binary output path
-> Maybe FilePath -- ^ C code output path
-> Bool -- ^ Use word alignment
-> m ExitCode
compileProgram buftype optLevel infoCallback pipeline desc comp moutPath cCodeOutPath wordAlign = do
cver <- ccVersion comp
let info = (maybe noOutInfo (outInfo cver) moutPath)
let cstr = renderCProg info . programsToC buftype $ pipeline
let sourceLineCount = length (lines cstr)
case cCodeOutPath of
Nothing -> return ()
Just p -> do
infoCallback $ "Writing C source to " ++ p
liftIO $ writeFile p cstr
case moutPath of
Nothing -> return ExitSuccess
Just outPath -> do
infoCallback $ "Generated " ++ show sourceLineCount ++ " lines of C code."
infoCallback $ "Running compiler cmd: '" ++ intercalate " " (comp : compilerOpts outPath) ++ "'"
liftIO $ do
(Just hin, _, _, hproc) <- liftIO $ createProcess (proc comp (compilerOpts outPath))
{ std_in = CreatePipe }
hPutStrLn hin cstr
hClose hin
waitForProcess hproc
where
compilerOpts binPath = [ "-O" ++ show optLevel, "-xc"
, "-o", binPath] ++
(if isInfixOf "clang" comp
then ["-Wno-tautological-constant-out-of-range-compare"]
else [])
++ (if wordAlign then ["-D FLAG_WORDALIGNED"] else [])
++ ["-"]
quote s = "\"" ++ s ++ "\""
noOutInfo = quote $ intercalate "\\n"
[ "No object file generated!"
, maybe "No environment info available" id desc
, "" -- adds newline at the end
]
outInfo cver path = quote $ intercalate "\\n"
[ "Compiler info: "
, cver
, ""
, "CC cmd: "
, intercalate " " (comp : compilerOpts path)
, ""
, maybe "No environment info available" id desc
, "" -- adds newline at the end
]
| diku-kmc/repg | src/KMC/Program/Backends/C.hs | mit | 22,531 | 0 | 25 | 7,747 | 6,463 | 3,264 | 3,199 | 429 | 17 |
module Database.PostgreSQL.Protocol.Codecs.Decoders
( dataRowHeader
, getNonNullable
, getNullable
, FieldDecoder
, bool
, bytea
, char
, date
, float4
, float8
, int2
, int4
, int8
, interval
, bsJsonText
, bsJsonBytes
, numeric
, bsText
, time
, timetz
, timestamp
, timestamptz
, uuid
) where
import Prelude hiding (bool)
import Control.Monad (replicateM, (<$!>))
import Data.ByteString (ByteString)
import Data.Char (chr)
import Data.Int (Int16, Int32, Int64)
import Data.Scientific (Scientific)
import Data.Time (Day, UTCTime, LocalTime, DiffTime, TimeOfDay)
import Data.UUID (UUID, fromWords)
import qualified Data.Vector as V
import Database.PostgreSQL.Protocol.Store.Decode
import Database.PostgreSQL.Protocol.Types
import Database.PostgreSQL.Protocol.Codecs.Time
import Database.PostgreSQL.Protocol.Codecs.Numeric
-- | Decodes DataRow header.
-- 1 byte - Message Header
-- 4 bytes - Message length
-- 2 bytes - count of columns in the DataRow
{-# INLINE dataRowHeader #-}
dataRowHeader :: Decode ()
dataRowHeader = skipBytes 7
{-# INLINE fieldLength #-}
fieldLength :: Decode Int
fieldLength = fromIntegral <$> getWord32BE
{-# INLINE getNonNullable #-}
getNonNullable :: FieldDecoder a -> Decode a
getNonNullable fdec = fieldLength >>= fdec
{-# INLINE getNullable #-}
getNullable :: FieldDecoder a -> Decode (Maybe a)
getNullable fdec = do
len <- fieldLength
if len == -1
then pure Nothing
else Just <$!> fdec len
-- | Field in composites contain Oid before value
{-# INLINE compositeValuePrefix #-}
compositeValuePrefix :: Decode ()
compositeValuePrefix = skipBytes 4
-- | Skips length of elements in composite
{-# INLINE compositeHeader #-}
compositeHeader :: Decode ()
compositeHeader = skipBytes 4
-- | Skips array header.
-- 4 bytes - count of dimensions
-- 4 bytes - if array contains any NULL
-- 4 bytes - element Oid
{-# INLINE arrayHeader #-}
arrayHeader :: Decode ()
arrayHeader = skipBytes 12
-- | Decodes size of each dimension.
{-# INLINE arrayDimensions #-}
arrayDimensions :: Int -> Decode (V.Vector Int)
arrayDimensions dims = V.reverse <$> V.replicateM dims arrayDimSize
where
-- 4 bytes - count of elements in the dimension
-- 4 bytes - lower bound
arrayDimSize = (fromIntegral <$> getWord32BE) <* getWord32BE
{-# INLINE arrayFieldDecoder #-}
arrayFieldDecoder :: Int -> (V.Vector Int -> Decode a) -> FieldDecoder a
arrayFieldDecoder dims f _ = arrayHeader *> arrayDimensions dims >>= f
--
-- Primitives
--
-- | Decodes only a content of the field.
type FieldDecoder a = Int -> Decode a
{-# INLINE bool #-}
bool :: FieldDecoder Bool
bool _ = (== 1) <$> getWord8
{-# INLINE bytea #-}
bytea :: FieldDecoder ByteString
bytea = getByteString
{-# INLINE char #-}
char :: FieldDecoder Char
char _ = chr . fromIntegral <$> getWord8
{-# INLINE date #-}
date :: FieldDecoder Day
date _ = pgjToDay <$> getInt32BE
{-# INLINE float4 #-}
float4 :: FieldDecoder Float
float4 _ = getFloat32BE
{-# INLINE float8 #-}
float8 :: FieldDecoder Double
float8 _ = getFloat64BE
{-# INLINE int2 #-}
int2 :: FieldDecoder Int16
int2 _ = getInt16BE
{-# INLINE int4 #-}
int4 :: FieldDecoder Int32
int4 _ = getInt32BE
{-# INLINE int8 #-}
int8 :: FieldDecoder Int64
int8 _ = getInt64BE
{-# INLINE interval #-}
interval :: FieldDecoder DiffTime
interval _ = intervalToDiffTime <$> getInt64BE <*> getInt32BE <*> getInt32BE
-- | Decodes representation of JSON as @ByteString@.
{-# INLINE bsJsonText #-}
bsJsonText :: FieldDecoder ByteString
bsJsonText = getByteString
-- | Decodes representation of JSONB as @ByteString@.
{-# INLINE bsJsonBytes #-}
bsJsonBytes :: FieldDecoder ByteString
bsJsonBytes len = getWord8 *> getByteString (len - 1)
{-# INLINE numeric #-}
numeric :: FieldDecoder Scientific
numeric _ = do
ndigits <- getWord16BE
weight <- getInt16BE
sign <- fromNumericSign =<< getWord16BE
_ <- getWord16BE
numericToScientific sign weight <$>
replicateM (fromIntegral ndigits) getWord16BE
-- | Decodes text without applying encoding.
{-# INLINE bsText #-}
bsText :: FieldDecoder ByteString
bsText = getByteString
{-# INLINE time #-}
time :: FieldDecoder TimeOfDay
time _ = mcsToTimeOfDay <$> getInt64BE
{-# INLINE timetz #-}
timetz :: FieldDecoder TimeOfDay
timetz _ = do
t <- getInt64BE
skipBytes 4
return $ mcsToTimeOfDay t
{-# INLINE timestamp #-}
timestamp :: FieldDecoder LocalTime
timestamp _ = microsToLocalTime <$> getInt64BE
{-# INLINE timestamptz #-}
timestamptz :: FieldDecoder UTCTime
timestamptz _ = microsToUTC <$> getInt64BE
{-# INLINE uuid #-}
uuid :: FieldDecoder UUID
uuid _ = fromWords
<$> getWord32BE
<*> getWord32BE
<*> getWord32BE
<*> getWord32BE
| postgres-haskell/postgres-wire | src/Database/PostgreSQL/Protocol/Codecs/Decoders.hs | mit | 4,835 | 0 | 10 | 951 | 1,025 | 569 | 456 | 140 | 2 |
{-# LANGUAGE NoImplicitPrelude, OverloadedStrings #-}
module Day14 (p1, p2) where
import BasicPrelude hiding (try)
import qualified Data.Map as M
import qualified Data.Text as T
-- import Data.List.Split
-- import Control.Arrow
import Text.ParserCombinators.Parsec hiding ((<|>))
import qualified Text.Parsec.Token as P
import Text.Parsec.Language (emptyDef)
lexer = P.makeTokenParser emptyDef
decimal = P.decimal lexer
identifier = P.identifier lexer
whiteSpace = P.whiteSpace lexer
input :: IO Text
input = readFile "input/day14.txt"
data ReinState = Flying Integer | Resting Integer
deriving (Show, Eq)
data Reindeer = Reindeer ReinState Integer String Integer Integer Integer
deriving (Show, Eq)
instance Ord(Reindeer) where
(Reindeer _ d1 _ _ _ _) <= (Reindeer _ d2 _ _ _ _) = d1 <= d2
reindeerFile :: CharParser st [Reindeer]
reindeerFile = many reindeer
reindeer :: CharParser st Reindeer
reindeer = Reindeer (Flying 1) 0
<$> (identifier <* string "can fly ")
<*> (decimal <* string " km/s for ")
<*> (decimal <* string " seconds, but then must rest for ")
<*> (decimal <* string " seconds." <* whiteSpace)
parseReindeer :: String -> Either ParseError [Reindeer]
parseReindeer = parse reindeerFile "(unknown)"
step :: Reindeer -> Reindeer
step (Reindeer (Flying t) d n speed flyTime restTime)
| t == flyTime = Reindeer (Resting 1) (d + speed) n speed flyTime restTime
| otherwise = Reindeer (Flying $ t + 1) (d + speed) n speed flyTime restTime
step (Reindeer (Resting t) d n speed flyTime restTime)
| t == restTime = Reindeer (Flying 1) d n speed flyTime restTime
| otherwise = Reindeer (Resting $ t + 1) d n speed flyTime restTime
test :: IO Text
test = pure "Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.\n\
\Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds."
distance :: Reindeer -> Integer
distance (Reindeer _ d _ _ _ _) = d
name :: Reindeer -> String
name (Reindeer _ _ n _ _ _) = n
p1 :: IO Integer
p1 = (maximum . map distance .
(!! 2503) .
(iterate $ fmap step) .
either (const []) id . parseReindeer . T.unpack) <$> input
-- 2655
type Scores = M.Map String Integer
initScores :: [Reindeer] -> Scores
initScores = foldl (\m r -> M.insert (name r) 0 m) M.empty
updateScores :: Scores -> [Reindeer] -> Scores
updateScores sc rs =
let winning = distance $ maximum rs
in foldl (flip $ M.adjust (+1))
sc .
map name $ filter ((winning ==) . distance) rs
p2 :: IO (String, Integer)
p2 = do
rs <- (either (const []) id . parseReindeer . T.unpack) <$> input
let steps = drop 1 $ (iterate $ fmap step) rs
let winners = (map M.findMax) $ scanl updateScores (initScores rs) steps
pure (winners !! 2503)
-- ("Vixen",1059)
| farrellm/advent | src/Day14.hs | mit | 2,837 | 0 | 15 | 622 | 1,030 | 536 | 494 | 63 | 1 |
{-# LANGUAGE RecordWildCards #-}
module ForgetYourPassword.Lib
( makePassword
, PasswordData (..)
) where
import Crypto.Hash
import qualified Data.ByteString.Char8 as C8
import Data.Char
import qualified Data.Text as T
import Data.Text.Encoding
import Numeric
data PasswordData =
PasswordData { uniqueKey :: String
, salt :: String
, passwordLength :: {-# UNPACK #-} !Int
}
makePassword :: PasswordData -> String
makePassword pd@PasswordData{..} = map intToPasswordChar $ take passwordLength $ (splitto . hashToInt . makeHash) pd
where
intToPasswordChar :: Integer -> Char
intToPasswordChar i
| i >= 0 && i <= 9 = chr (48 + fromIntegral i) -- 0..9
| i >= 10 && i <= 35 = chr (87 + fromIntegral i) -- a..z
| i >= 36 && i <= 61 = chr (29 + fromIntegral i) -- A..Z
| otherwise = error "BUG: intToPasswordChar i out of bound"
splitto :: Integer -> [Integer]
splitto i = let (q, r) = i `quotRem` 61 in r:splitto q
hashToInt :: String -> Integer
hashToInt = fst . head . readHex
makeHash :: PasswordData -> String
makeHash PasswordData{..} = C8.unpack $ digestToHexByteString (hash . encodeUtf8 $ T.pack (uniqueKey ++ "\xE0031" ++ salt) :: Digest SHA256)
| KenetJervet/forget-your-password | src/ForgetYourPassword/Lib.hs | mit | 1,346 | 0 | 14 | 388 | 409 | 219 | 190 | 28 | 1 |
-- 1. Give another possible calculation for the result of double (double 2)
quadruple n = n * 4
-- 2. Show that sum [x] = x for any number x
testSum (x : xs) =
if length xs == 0
then x
else x + testSum xs
-- 3. Define a function product that produces the product of a list of numbers,
-- and show using your definition that product [2, 3, 4] = 24.
testProduct (x : xs) =
if length xs == 0
then x
else x * product xs
-- testProduct [2, 3, 4]
-- 4. How should the definition of the function qsort be modified so that it
-- produces a reverse sorted version of a list?
qsort [] = []
qsort (x : xs) = qsort larger ++ [x] ++ qsort smaller
where
smaller = [p | p <- xs, p <= x ]
larger = [p | p <- xs, p > x ]
-- 5. What would be the effect of replacing β€ by < in the definition of qsort?
-- Hint: consider the example qsort [2, 2, 3, 1, 1].
-- qsort [2, 2, 3, 1, 1] using <
-- qsort [1, 1] ++ [2] ++ qsort [3]
-- qsort [] ++ [1] ++ qsort [] ++ [2] ++ qsort [] ++ [3] ++ qsort []
-- [] ++ [1] ++ [] ++ [2] ++ [] ++ [3] ++ []
-- Result: [1, 2, 3]
-- Removes duplicated values
--------------------------------------------------------------------------------
-- qsort [2, 2, 3, 1, 1] using <=
-- qsort [2, 1, 1] ++ [2] + qsort [3]
-- qsort [1, 1] + [2] + qsort [] ++ [2] ++ qsort [] ++ [3] ++ qsort []
-- qsort [1] ++ [1] ++ qsort [] ++ [2] ++ [] ++ [2] ++ [] ++ [3] ++ []
-- qsort [] ++ [1] ++ qsort [] ++ [1] ++ [] ++ [2] ++ [] ++ [2] ++ [] ++ [3] ++ []
-- [] ++ [1] ++ [] ++ [1] ++ [] ++ [2] ++ [] ++ [2] ++ [] ++ [3] ++ []
-- Result: [1, 1, 2, 2, 3]
qsort_1 [] = []
qsort_1 xs = x : qsort larger ++ qsort smaller
where x = maximum xs
smaller = [p | p <- xs, p < x ]
larger = [p | p <- xs, p >= x ]
| ricca509/haskellFP101x | src/book/chapter1.hs | mit | 1,794 | 5 | 9 | 500 | 264 | 152 | 112 | -1 | -1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE PackageImports #-}
import "time-attack" Application (getApplicationDev)
import Network.Wai.Handler.Warp
(runSettings, defaultSettings, setPort)
import Control.Concurrent (forkIO)
import System.Directory (doesFileExist, removeFile)
import System.Exit (exitSuccess)
import Control.Concurrent (threadDelay)
#ifndef mingw32_HOST_OS
import System.Posix.Signals (installHandler, sigINT, Handler(Catch))
#endif
main :: IO ()
main = do
#ifndef mingw32_HOST_OS
_ <- installHandler sigINT (Catch $ return ()) Nothing
#endif
putStrLn "Starting devel application"
(port, app) <- getApplicationDev
forkIO $ runSettings (setPort port defaultSettings) app
loop
loop :: IO ()
loop = do
threadDelay 100000
e <- doesFileExist "yesod-devel/devel-terminate"
if e then terminateDevel else loop
terminateDevel :: IO ()
terminateDevel = exitSuccess
| TimeAttack/time-attack-server | devel.hs | mit | 895 | 0 | 12 | 134 | 238 | 131 | 107 | 24 | 2 |
module Tfoo.Helpers.Game where
import Application
import Tfoo.Helpers.Application
import Tfoo.Game
import Tfoo.Board
import Tfoo.Matrix
import Tfoo.Foundation
import Data.Text hiding (map, intercalate, words)
import Data.List
import Data.Maybe
import Yesod
import Control.Monad
import Control.Concurrent.MVar
import Control.Concurrent.Chan
import Network.Wai.EventSource (ServerEvent (..), eventSourceApp)
import Blaze.ByteString.Builder.Char.Utf8 (fromString)
getGame :: Int -> Handler Game
getGame id = do
tfoo <- getYesod
maxId <- liftIO $ readMVar $ nextGameId tfoo
list <- liftIO $ readMVar $ games tfoo
if id < maxId
then (liftIO $ (list) !! id) >>= (\game -> return game)
else notFound
updateGame :: Int -> Game -> Handler ()
updateGame id game = do
tfoo <- getYesod
liftIO $ modifyMVar (games tfoo) (\games ->
return (Tfoo.Matrix.replace id (return game) games, games)
)
return ()
joinGame :: Int -> Mark -> Handler ()
joinGame id mark =
do
game <- getGame id
tfoo <- getYesod
appendSession' (pack "players") $ pack (playerId tfoo)
updateGame id $ setPlayer game mark (playerId tfoo)
return ()
where
playerId tfoo = (show $ seed tfoo) ++ (show id) ++ (show mark)
newSinglePlayerGame :: Int -> Handler ()
newSinglePlayerGame id = do
joinGame id X
addAi id O
return ()
addAi :: Int -> Mark -> Handler ()
addAi id mark = do
game <- getGame id
updateGame id $ setPlayer game mark "AI"
return ()
broadcast :: Int -> String -> [(String, String)] -> Handler ()
broadcast gameId messageId pairs = do
game <- getGame gameId
liftIO $ writeChan (channel game) $ serverEvent $ return $ fromString message
where message = "{"++(stringifiedPairs $ ("id",messageId):pairs)++"}"
stringifiedPairs pairs = intercalate ", " $ map stringifyPair pairs
stringifyPair p = "\""++(fst p) ++ "\": \"" ++ (snd p) ++ "\""
serverEvent = ServerEvent Nothing Nothing
validMove :: Int -> Int -> Game -> [Player] -> Bool
validMove x y game authorizations =
let whoseTurn' = whoseTurn game
board' = board game
gameInProgress = (winner board') == Nothing
targetCellEmpty = (getCell board' x y) == Nothing
playerAuthorized =
fromMaybe False $ liftM (`elem` authorizations) whoseTurn'
in gameInProgress && targetCellEmpty && playerAuthorized
placeMark :: Int -> Int -> Int -> Handler ()
placeMark id x y = do
game <- getGame id
board' <- return $ board game
mark <- return $ nextMark board'
updateGame id $ game {board = replace' x y (Just mark) board'}
game' <- getGame id
broadcast id "mark-new" [("x", show x), ("y", show y), ("mark", show mark)]
broadcast id "alert" [("content", gameState game')]
playerAuthorizations :: Handler [Player]
playerAuthorizations = do
authorizations <- lookupSession $ pack "players"
return $ fromMaybe [] $ fmap (words . unpack) authorizations
| nbartlomiej/tfoo | Tfoo/Helpers/Game.hs | mit | 2,940 | 0 | 15 | 616 | 1,113 | 559 | 554 | 79 | 2 |
module ComplexNumbers (
Complex,
conjugate,
abs,
real,
imaginary,
mul,
add,
sub,
div,
complex
) where
import Prelude hiding (abs, div)
data Complex a = C { _re :: !a , _im :: !a} deriving (Eq, Show)
complex :: (a, a) -> Complex a
complex = uncurry C
conjugate :: Num a => Complex a -> Complex a
conjugate c = c { _im = negate . _im $ c }
abs :: Floating a => Complex a -> a
abs (C a b) = sqrt $ a * a + b * b
real :: Num a => Complex a -> a
real = _re
imaginary :: Num a => Complex a -> a
imaginary = _im
mul :: Num a => Complex a -> Complex a -> Complex a
mul (C a b) (C c d) = C (a * c - b * d) (b * c + a * d)
add :: Num a => Complex a -> Complex a -> Complex a
add (C a b) (C c d) = C (a + c) (b + d)
sub :: Num a => Complex a -> Complex a -> Complex a
sub (C a b) (C c d) = C (a - c) (b - d)
div :: Fractional a => Complex a -> Complex a -> Complex a
div (C a b) (C c d) = C ((a * c + b * d) / z) ((b * c - a * d) / z)
where z = c * c + d * d
| genos/online_problems | exercism/haskell/complex-numbers/src/ComplexNumbers.hs | mit | 971 | 0 | 11 | 282 | 615 | 316 | 299 | 36 | 1 |
module SupplyTest
where
import Data.Maybe
import Control.Monad (liftM2)
import Test.HUnit
import TestUtils
import Types
import Curve
f1 = [ExponentialFunction 2 0.5 0]
f2 = [LinearFunction (-2) 100]
f3 = [LinearFunction 1 0]
-- g1 = curveToPol f1
-- g2 = curveToPol f2
-- g3 = curveToPol f3
-- h1 = derivePolynomial g1
-- h2 = derivePolynomial g2
-- h3 = derivePolynomial g3
checkBalance :: Maybe (Flt, Flt) -> Maybe (Flt, Flt) -> Bool
checkBalance b1 b2 = fromMaybe (b1 == b2) (liftM2 (closeEnough2 0.001) b1 b2)
supplytest1 = do
let b1 = balance f1 f2
let b2 = balance f1 f3
let b3 = balance f2 f3
let b4 = balance f2 f1
let b5 = balance f3 f1
let b6 = balance f3 f2
assertBool ("Balance 1: " ++ show b1) (checkBalance b1 (Just (12.282,75.434)))
assertBool ("Balance 2: " ++ show b2) (checkBalance b2 Nothing)
assertBool ("Balance 3: " ++ show b3) (checkBalance b3 (Just (33.333,33.333)))
assertBool ("Balance 4: " ++ show b4) (checkBalance b4 (Just (12.282,75.434)))
assertBool ("Balance 5: " ++ show b5) (checkBalance b5 Nothing)
assertBool ("Balance 6: " ++ show b6) (checkBalance b6 (Just (33.333,33.333)))
| anttisalonen/economics | src/SupplyTest.hs | mit | 1,144 | 0 | 12 | 220 | 447 | 227 | 220 | 25 | 1 |
{- xmonad.hs
- Author: Jelle van der Waa ( jelly12gen )
-}
-- Import stuff
import XMonad
import qualified XMonad.StackSet as W
import qualified Data.Map as M
import XMonad.Util.EZConfig(additionalKeys)
import System.Exit
import Graphics.X11.Xlib
import System.IO
-- actions
import XMonad.Actions.CycleWS
import XMonad.Actions.WindowGo
import qualified XMonad.Actions.Search as S
import XMonad.Actions.Search
import qualified XMonad.Actions.Submap as SM
import XMonad.Actions.GridSelect
-- utils
import XMonad.Util.Scratchpad (scratchpadSpawnAction, scratchpadManageHook, scratchpadFilterOutWorkspace)
import XMonad.Util.Run(spawnPipe)
import qualified XMonad.Prompt as P
import XMonad.Prompt.Shell
import XMonad.Prompt
-- hooks
import XMonad.Hooks.DynamicLog
import XMonad.Hooks.ManageDocks
import XMonad.Hooks.UrgencyHook
import XMonad.Hooks.ManageHelpers
-- layouts
import XMonad.Layout.NoBorders
import XMonad.Layout.ResizableTile
import XMonad.Layout.Reflect
import XMonad.Layout.IM
import XMonad.Layout.Tabbed
import XMonad.Layout.PerWorkspace (onWorkspace)
import XMonad.Layout.Grid
-- Data.Ratio for IM layout
import Data.Ratio ((%))
-- Main --
main = do
xmproc <- spawnPipe "xmobar" -- start xmobar
xmonad $ withUrgencyHook NoUrgencyHook $ defaultConfig
{ manageHook = myManageHook
, layoutHook = myLayoutHook
, borderWidth = myBorderWidth
, normalBorderColor = myNormalBorderColor
, focusedBorderColor = myFocusedBorderColor
, keys = myKeys
, logHook = myLogHook xmproc
, modMask = myModMask
, terminal = myTerminal
, workspaces = myWorkspaces
, focusFollowsMouse = False
}
-- hooks
-- automaticly switching app to workspace
myManageHook :: ManageHook
myManageHook = scratchpadManageHook (W.RationalRect 0.25 0.375 0.5 0.35) <+> ( composeAll . concat $
[[isFullscreen --> doFullFloat
, className =? "OpenOffice.org 3.1" --> doShift "5:doc"
, className =? "Xmessage" --> doCenterFloat
, className =? "Zenity" --> doCenterFloat
, className =? "feh" --> doCenterFloat
, className =? "Gimp" --> doShift "9:gimp"
, className =? "uzbl" --> doShift "2:web"
, className =? "chromium" --> doShift "2:web"
, className =? "firefox" --> doShift "2:web"
, className =? "vimprobable" --> doShift "2:web"
, className =? "Pidgin" --> doShift "1:chat"
, className =? "Skype" --> doShift "1:chat"
, className =? "MPlayer" --> doShift "8:vid"
, className =? "VirtualBox" --> doShift "6:virtual"
, className =? "Apvlv" --> doShift "4:pdf"
, className =? "Evince" --> doShift "4:pdf"
, className =? "Epdfview" --> doShift "4:pdf"
, className =? "Remmina" --> doShift "6:vbox"
, className =? "emacs" --> doShift "3:code"]
]
) <+> manageDocks
--logHook
myLogHook :: Handle -> X ()
myLogHook h = dynamicLogWithPP $ customPP { ppOutput = hPutStrLn h }
---- Looks --
---- bar
customPP :: PP
customPP = defaultPP {
ppHidden = xmobarColor "#00FF00" ""
, ppCurrent = xmobarColor "#FF0000" "" . wrap "[" "]"
, ppUrgent = xmobarColor "#FF0000" "" . wrap "*" "*"
, ppLayout = xmobarColor "#FF0000" ""
, ppTitle = xmobarColor "#00FF00" "" . shorten 80
, ppSep = "<fc=#0033FF> | </fc>"
}
-- some nice colors for the prompt windows to match the dzen status bar.
myXPConfig = defaultXPConfig
{
fgColor = "#00FFFF"
, bgColor = "#000000"
, bgHLight = "#000000"
, fgHLight = "#FF0000"
, position = Top
}
--- My Theme For Tabbed layout
myTheme = defaultTheme { decoHeight = 16
, activeColor = "#a6c292"
, activeBorderColor = "#a6c292"
, activeTextColor = "#000000"
, inactiveBorderColor = "#000000"
}
--LayoutHook
myLayoutHook = onWorkspace "1:chat" imLayout $ onWorkspace "2:web" webL $ onWorkspace "9:gimp" gimpL $ onWorkspace "6:vbox" fullL $ onWorkspace "6:games" fullL $ onWorkspace "8:vid" fullL $ standardLayouts
where
standardLayouts = avoidStruts $ (tiled ||| reflectTiled ||| Mirror tiled ||| Grid ||| Full)
--Layouts
tiled = smartBorders (ResizableTall 1 (2/100) (1/2) [])
reflectTiled = (reflectHoriz tiled)
tabLayout = (tabbed shrinkText myTheme)
full = noBorders Full
--Im Layout
imLayout = avoidStruts $ smartBorders $ withIM ratio pidginRoster $ reflectHoriz $ withIM skypeRatio skypeRoster (tiled ||| reflectTiled ||| Grid) where
chatLayout = Grid
ratio = (1%9)
skypeRatio = (1%8)
pidginRoster = And (ClassName "Pidgin") (Role "buddy_list")
skypeRoster = (ClassName "Skype") `And` (Not (Title "Options")) `And` (Not (Role "Chats")) `And` (Not (Role "CallWindowForm"))
--Gimp Layout
gimpL = avoidStruts $ smartBorders $ withIM (0.11) (Role "gimp-toolbox") $ reflectHoriz $ withIM (0.15) (Role "gimp-dock") Full
--Web Layout
webL = avoidStruts $ tabLayout ||| tiled ||| reflectHoriz tiled ||| full
--VirtualLayout
fullL = avoidStruts $ full
-------------------------------------------------------------------------------
---- Terminal --
myTerminal :: String
myTerminal = "urxvt"
-------------------------------------------------------------------------------
-- Keys/Button bindings --
-- modmask
myModMask :: KeyMask
myModMask = mod4Mask
-- borders
myBorderWidth :: Dimension
myBorderWidth = 1
--
myNormalBorderColor, myFocusedBorderColor :: String
myNormalBorderColor = "#333333"
myFocusedBorderColor = "#FF0000"
--
--Workspaces
myWorkspaces :: [WorkspaceId]
myWorkspaces = ["1:chat", "2:web", "3:code", "4:pdf", "5:doc", "6:vbox" ,"7:games", "8:vid", "9:gimp"]
--
-- Switch to the "web" workspace
viewWeb = windows (W.greedyView "2:web") -- (0,0a)
--
--Search engines to be selected : [google (g), wikipedia (w) , youtube (y) , maps (m), dictionary (d) , wikipedia (w), bbs (b) ,aur (r), wiki (a) , TPB (t), mininova (n), isohunt (i) ]
--keybinding: hit mod + s + <searchengine>
searchEngineMap method = M.fromList $
[ ((0, xK_g), method S.google )
, ((0, xK_y), method S.youtube )
, ((0, xK_m), method S.maps )
, ((0, xK_d), method S.dictionary )
, ((0, xK_w), method S.wikipedia )
, ((0, xK_h), method S.hoogle )
, ((0, xK_i), method S.isohunt )
, ((0, xK_b), method $ S.searchEngine "archbbs" "http://bbs.archlinux.org/search.php?action=search&keywords=")
, ((0, xK_r), method $ S.searchEngine "AUR" "http://aur.archlinux.org/packages.php?O=0&L=0&C=0&K=")
, ((0, xK_a), method $ S.searchEngine "archwiki" "http://wiki.archlinux.org/index.php/Special:Search?search=")
]
ssh = "ssh -i /path/to/file/.key usere@domain "
-- keys
myKeys :: XConfig Layout -> M.Map (KeyMask, KeySym) (X ())
myKeys conf@(XConfig {XMonad.modMask = modMask}) = M.fromList $
-- killing programs
[ ((modMask, xK_Return), spawn $ XMonad.terminal conf)
, ((modMask .|. shiftMask, xK_c ), kill)
-- opening program launcher / search engine
, ((modMask , xK_s ), SM.submap $ searchEngineMap $ S.promptSearchBrowser myXPConfig "chromium")
,((modMask , xK_p), shellPrompt myXPConfig)
-- GridSelect
, ((modMask, xK_g), goToSelected defaultGSConfig)
-- layouts
, ((modMask, xK_space ), sendMessage NextLayout)
, ((modMask .|. shiftMask, xK_space ), setLayout $ XMonad.layoutHook conf)
, ((modMask, xK_b ), sendMessage ToggleStruts)
-- floating layer stuff
, ((modMask, xK_t ), withFocused $ windows . W.sink)
-- refresh'
, ((modMask, xK_n ), refresh)
-- focus
, ((modMask, xK_Tab ), windows W.focusDown)
, ((modMask, xK_j ), windows W.focusDown)
, ((modMask, xK_k ), windows W.focusUp)
, ((modMask, xK_m ), windows W.focusMaster)
-- swapping
, ((modMask .|. shiftMask, xK_Return), windows W.swapMaster)
, ((modMask .|. shiftMask, xK_j ), windows W.swapDown )
, ((modMask .|. shiftMask, xK_k ), windows W.swapUp )
-- increase or decrease number of windows in the master area
, ((modMask , xK_comma ), sendMessage (IncMasterN 1))
, ((modMask , xK_period), sendMessage (IncMasterN (-1)))
-- resizing
, ((modMask, xK_h ), sendMessage Shrink)
, ((modMask, xK_l ), sendMessage Expand)
, ((modMask .|. shiftMask, xK_h ), sendMessage MirrorShrink)
, ((modMask .|. shiftMask, xK_l ), sendMessage MirrorExpand)
-- mpd controls
, ((0 , 0x1008ff16 ), spawn (ssh ++ "ncmpcpp prev"))
, ((0 , 0x1008ff17 ), spawn (ssh ++ "ncmpcpp next"))
, ((0 , 0x1008ff14 ), spawn (ssh ++ "ncmpcpp play"))
, ((0 , 0x1008ff15 ), spawn (ssh ++"ncmpcpp pause"))
-- scratchpad
, ((modMask , xK_grave), scratchpadSpawnAction defaultConfig {terminal = myTerminal})
-- Libnotify
, ((modMask .|. shiftMask, xK_a ), spawn "/home/nisbus/bin/notify.py")
, ((modMask .|. shiftMask, xK_m ), spawn "/home/nisbus/Projects/Notify/mpd-notification.py")
, ((modMask .|. shiftMask, xK_t ), spawn "/home/nisbus/Projects/Notify/todo-notification.py")
, ((modMask .|. shiftMask, xK_g ), spawn "/home/nisbus/bin/notify-mail.py")
, ((modMask .|. shiftMask, xK_v ), spawn "/home/nisbus/Projects/Notify/sound-notification.py")
--Programs
, ((modMask .|. shiftMask, xK_u ), spawn "unison-gtk2 default")
, ((modMask .|. shiftMask, xK_p ), spawn "pidgin")
, ((modMask .|. shiftMask, xK_b ), spawn "chromium")
, ((modMask .|. shiftMask, xK_a ), spawn "emacs")
-- volume control
, ((0 , 0x1008ff13 ), spawn "amixer -q set Master 2dB+")
, ((0 , 0x1008ff11 ), spawn "amixer -q set Master 2dB-")
, ((0 , 0x1008ff12 ), spawn "amixer -q set Master toggle")
-- quit, or restart
, ((modMask .|. shiftMask, xK_q ), io (exitWith ExitSuccess))
, ((modMask , xK_q ), restart "xmonad" True)
]
++
-- mod-[1..9] %! Switch to workspace N
-- mod-shift-[1..9] %! Move client to workspace N
[((m .|. modMask, k), windows $ f i)
| (i, k) <- zip (XMonad.workspaces conf) [xK_1 .. xK_9]
, (f, m) <- [(W.greedyView, 0), (W.shift, shiftMask)]]
++
-- mod-[w,e] %! switch to twinview screen 1/2
-- mod-shift-[w,e] %! move window to screen 1/2
[((m .|. modMask, key), screenWorkspace sc >>= flip whenJust (windows . f))
| (key, sc) <- zip [xK_e, xK_w] [0..]
, (f, m) <- [(W.view, 0), (W.shift, shiftMask)]]
++
--Cycle workspaces
[((modMask, xK_Right), nextWS)
, ((modMask, xK_Left), prevWS)
, ((modMask .|. shiftMask, xK_Right), shiftToNext)
, ((modMask .|. shiftMask, xK_Left), shiftToPrev)
, ((modMask, xK_Up), nextScreen)
, ((modMask, xK_Down), prevScreen)
, ((modMask .|. shiftMask, xK_Up), shiftNextScreen)
, ((modMask .|. shiftMask, xK_Down), shiftPrevScreen)
, ((modMask, xK_z), toggleWS)
, ((modMask , xK_f), moveTo Next EmptyWS)] -- find a free workspace | nisbus/vagrant_arch | puppet/modules/xmonad/files/xmonad.hs | mit | 11,556 | 19 | 15 | 2,906 | 2,954 | 1,755 | 1,199 | 189 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Control.Concurrent
import Control.Exception
import Control.Lens
import Control.Monad (forever, void, when)
import Control.Monad.IO.Class
import Control.Monad.Trans.Resource
import qualified Data.ByteString.Char8 as ByteString (pack)
import Data.Conduit
import qualified Data.Conduit.List as Conduit.List
import Data.Default
import Data.List.Utils
import Data.Text (Text)
import qualified Data.Text as Text
import Network
import Network.HTTP.Conduit
import System.Environment
import Web.Authenticate.OAuth
import Web.Twitter.Conduit
import Web.Twitter.Types
track :: Text
track = Text.pack $ join "," [
"big data", "wreq", "monads", "monadas", "monoid", "yesod" , "type-theory",
"elm", "purescript", "hackage", "stackage", "cabal hell" , "haskellbr",
"mapreduce", "haskell", "programaΓ§Γ£o funcional", "scala", "clojure",
"linguagem elixir", "erlang" ]
main :: IO ()
main = start `catch` \(SomeException e) -> do
liftIO $ putStrLn ("[error] " ++ show e)
main
where
start = withSocketsDo $ do
mgr <- newManager tlsManagerSettings
twInfo <- twitterInfoFromEnv
_ <- forkIO $ forever $ do
putStrLn "[info] I'm alive"
threadDelay (1000 * 1000 * 60 * 60)
runResourceT $ do
haskellersStream <- stream twInfo mgr
(statusesFilterByTrack track & language .~ Just "pt")
liftIO $ putStrLn "[info] Started listening to tweets"
haskellersStream $$+- Conduit.List.mapM_ (handleEvent twInfo mgr)
handleEvent :: MonadResource m => TWInfo -> Manager -> StreamingAPI -> m ()
handleEvent twInfo mgr status = case status of
SStatus s -> do
liftIO $ putStrLn ("[tweet] " ++ show (statusText s))
let username = Text.unpack (userScreenName (statusUser s))
-- TODO should read the authenticated user's name
when (username /= "haskellbr2") $ do
liftIO $ putStrLn ("[follow] Following " ++ username)
void (call twInfo mgr (friendshipsCreate (ScreenNameParam username)))
_ -> return ()
twitterInfoFromEnv :: IO TWInfo
twitterInfoFromEnv = do
key <- ByteString.pack <$> getEnv "TWITTER_API_KEY"
secret <- ByteString.pack <$> getEnv "TWITTER_API_SECRET"
oauthToken <- ByteString.pack <$> getEnv "TWITTER_OAUTH_TOKEN"
oauthTokenSecret <- ByteString.pack <$> getEnv "TWITTER_OAUTH_TOKEN_SECRET"
return $
def { twToken =
def { twOAuth =
twitterOAuth { oauthConsumerKey = key
, oauthConsumerSecret = secret
}
, twCredential = Credential
[ ("oauth_token", oauthToken)
, ("oauth_token_secret", oauthTokenSecret)
]
}
}
| haskellbr/follow-haskellers | src/Main.hs | mit | 3,172 | 0 | 19 | 1,047 | 737 | 395 | 342 | 65 | 2 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TupleSections #-}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appstream-stack-storageconnector.html
module Stratosphere.ResourceProperties.AppStreamStackStorageConnector where
import Stratosphere.ResourceImports
-- | Full data type definition for AppStreamStackStorageConnector. See
-- 'appStreamStackStorageConnector' for a more convenient constructor.
data AppStreamStackStorageConnector =
AppStreamStackStorageConnector
{ _appStreamStackStorageConnectorConnectorType :: Val Text
, _appStreamStackStorageConnectorDomains :: Maybe (ValList Text)
, _appStreamStackStorageConnectorResourceIdentifier :: Maybe (Val Text)
} deriving (Show, Eq)
instance ToJSON AppStreamStackStorageConnector where
toJSON AppStreamStackStorageConnector{..} =
object $
catMaybes
[ (Just . ("ConnectorType",) . toJSON) _appStreamStackStorageConnectorConnectorType
, fmap (("Domains",) . toJSON) _appStreamStackStorageConnectorDomains
, fmap (("ResourceIdentifier",) . toJSON) _appStreamStackStorageConnectorResourceIdentifier
]
-- | Constructor for 'AppStreamStackStorageConnector' containing required
-- fields as arguments.
appStreamStackStorageConnector
:: Val Text -- ^ 'assscConnectorType'
-> AppStreamStackStorageConnector
appStreamStackStorageConnector connectorTypearg =
AppStreamStackStorageConnector
{ _appStreamStackStorageConnectorConnectorType = connectorTypearg
, _appStreamStackStorageConnectorDomains = Nothing
, _appStreamStackStorageConnectorResourceIdentifier = Nothing
}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appstream-stack-storageconnector.html#cfn-appstream-stack-storageconnector-connectortype
assscConnectorType :: Lens' AppStreamStackStorageConnector (Val Text)
assscConnectorType = lens _appStreamStackStorageConnectorConnectorType (\s a -> s { _appStreamStackStorageConnectorConnectorType = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appstream-stack-storageconnector.html#cfn-appstream-stack-storageconnector-domains
assscDomains :: Lens' AppStreamStackStorageConnector (Maybe (ValList Text))
assscDomains = lens _appStreamStackStorageConnectorDomains (\s a -> s { _appStreamStackStorageConnectorDomains = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appstream-stack-storageconnector.html#cfn-appstream-stack-storageconnector-resourceidentifier
assscResourceIdentifier :: Lens' AppStreamStackStorageConnector (Maybe (Val Text))
assscResourceIdentifier = lens _appStreamStackStorageConnectorResourceIdentifier (\s a -> s { _appStreamStackStorageConnectorResourceIdentifier = a })
| frontrowed/stratosphere | library-gen/Stratosphere/ResourceProperties/AppStreamStackStorageConnector.hs | mit | 2,821 | 0 | 13 | 260 | 356 | 202 | 154 | 33 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE UnicodeSyntax #-}
{-# LANGUAGE ViewPatterns #-}
-- | API methods list
module Network.StackExchange.API
( -- * SE AccessToken
readAccessTokens, invalidateAccessTokens, applicationDeAuthenticate
-- * SE Answer
, answers, answersByIds, answersOnUsers
, answersOnQuestions, meAnswers, topUserAnswersInTags, meTagsTopAnswers
-- * SE Badge
, badges, badgesByIds, badgeRecipientsByIds
, badgesByName, badgeRecipients, badgesByTag
, badgesOnUsers, meBadges
-- * SE Comment
, commentsOnAnswers, comments, commentsByIds, deleteComment, editComment
, commentsOnPosts, createComment, commentsOnQuestions
, commentsOnUsers, meComments, commentsByUsersToUser, meCommentsTo
, mentionsOnUsers, meMentioned
-- * SE Error
, errors
-- * SE Event
, events
-- * SE Filter
, createFilter, readFilter
-- * SE InboxItems
, inbox, inboxUnread, userInbox, meInbox, userUnreadInbox, meUnreadInbox
-- * SE Info
, info
-- * SE NetworkUser
, associatedUsers, meAssociatedUsers
-- * SE AccountMerge
, mergeHistory, meMergeHistory
-- * SE Notification
, notifications, notificationsUnread, userNotifications, meNotifications
, userUnreadNotifications, meUnreadNotifications
-- * SE Post
, posts, postsByIds
-- * SE Privilege
, privileges, privilegesOnUsers, mePriviledges
-- * SE Question
, questions, questionsByIds, linkedQuestions, relatedQuestions
, featuredQuestions, unansweredQuestions, noAnswerQuestions
, search, advancedSearch, similar, faqsByTags, favoritesOnUsers
, meFavorites, questionsOnUsers, meQuestions, featuredQuestionsOnUsers
, meFeaturedQuestions, noAnswerQuestionsOnUsers, meNoAnswerQuestions
, unacceptedQuestionsOnUsers, meUnacceptedQuestions, unansweredQuestionsOnUsers
, meUnansweredQuestions, topUserQuestionsInTags, meTagsTopQuestions
-- * SE QuestionTimeline
, questionsTimeline
-- * SE Reputation
, reputationOnUsers, meReputation
-- * SE ReputationHistory
, reputationHistory, reputationHistoryFull
, meReputationHistory, meReputationHistoryFull
-- * SE Revision
, revisionsByIds, revisionsByGuids
-- * SE Site
, sites
-- * SE SuggestedEdit
, postsOnSuggestedEdits, suggestedEdits, suggestedEditsByIds
, suggestedEditsOnUsers, meSuggestedEdits
-- * SE Tag
, tags, moderatorOnlyTags, requiredTags
, tagsByName, relatedTags, tagsOnUsers, meTags
-- * SE TagScore
, topAnswerersOnTag, topAskersOnTag
-- * SE TagSynonym
, tagSynonyms, synonymsByTags
-- * SE TagWiki
, wikisByTags
-- * SE TopTag
, topAnswerTagsOnUsers, topQuestionTagsOnUsers
, meTopAnswerTags, meTopQuestionTags
-- * SE User
, users, usersByIds, me, moderators, electedModerators
-- * SE UserTimeline
, timelineOnUsers, meTimeline
-- * SE WritePermission
, writePermissions, meWritePermissions
) where
import Data.Monoid ((<>))
import Control.Exception (throw)
import Data.Aeson ((.:), Value)
import qualified Data.Aeson as A
import qualified Data.Aeson.Types as A
import qualified Data.Attoparsec.Lazy as AP
import Data.ByteString.Lazy (ByteString)
import Data.Text.Lazy (Text)
import qualified Data.Text.Lazy as T
import Data.Text.Lazy.Builder (toLazyText)
import Data.Text.Lazy.Builder.Int (decimal)
import Network.StackExchange.Response
import Network.StackExchange.Request
-- $setup
-- >>> import Control.Applicative
-- >>> import Control.Lens
-- >>> import qualified Data.Aeson.Lens as L
-- >>> import Data.Maybe (catMaybes, isJust)
-- >>> let pagesize = 10 :: Int
-- >>> let checkLengthM f = ((== pagesize) . length) `fmap` f
-- >>> let k = key "Lhg6xe5d5BvNK*C0S8jijA(("
-- >>> let s = site "stackoverflow"
-- >>> let q = query [("pagesize", "10")]
--------------------------
-- Access Tokens
--------------------------
-- | <https://api.stackexchange.com/docs/invalidate-access-tokens>
invalidateAccessTokens β· [Text] β Request a "invalidateAccessTokens" [SE AccessToken]
invalidateAccessTokens (T.intercalate ";" β ts) =
path ("access-tokens/" <> ts <> "/invalidate") <>
parse (attoparsec items ".access-tokens/{accessTokens}/invalidate: ")
-- | <https://api.stackexchange.com/docs/read-access-tokens>
readAccessTokens β· [Text] β Request a "readAccessTokens" [SE AccessToken]
readAccessTokens (T.intercalate ";" β ts) =
path ("access-tokens/" <> ts) <>
parse (attoparsec items ".access-tokens/{accessTokens}: ")
-- | <https://api.stackexchange.com/docs/application-de-authenticate>
applicationDeAuthenticate β· [Text] β Request a "applicationDeAuthenticate" [SE AccessToken]
applicationDeAuthenticate (T.intercalate ";" β ts) =
path ("apps/" <> ts <> "/de-authenticate") <>
parse (attoparsec items ".apps/{accessTokens}/de-authenticate: ")
--------------------------
-- Answers
--------------------------
-- $answers
-- >>> checkLengthM $ askSE (answers <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/answers>
answers β· Request a "answers" [SE Answer]
answers = path "answers" <> parse (attoparsec items ".answers: ")
-- $answersByIds
-- >>> length `fmap` askSE (answersByIds [6841479, 215422, 8881376] <> s <> k)
-- 3
-- | <https://api.stackexchange.com/docs/answers-by-ids>
answersByIds β· [Int] β Request a "answersByIds" [SE Answer]
answersByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("answers/" <> is) <> parse (attoparsec items ".answers/{ids}: ")
-- $answersOnUsers
-- >>> checkLengthM $ askSE (answersOnUsers [972985] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/answers-on-users>
answersOnUsers β· [Int] β Request a "answersOnUsers" [SE Answer]
answersOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/answers") <>
parse (attoparsec items ".users/{ids}/answers: ")
-- $answersOnQuestions
-- >>> checkLengthM $ askSE (answersOnQuestions [394601] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/answers-on-questions>
answersOnQuestions β· [Int] β Request a "answersOnQuestions" [SE Answer]
answersOnQuestions (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("questions/" <> is <> "/answers") <>
parse (attoparsec items ".questions/{ids}/answers: ")
-- | <https://api.stackexchange.com/docs/me-answers>
meAnswers β· Request RequireToken "meAnswers" [SE Answer]
meAnswers =
path "me/answers" <> parse (attoparsec items ".me/answers: ")
-- $topUserAnswersInTags
-- >>> checkLengthM $ askSE (topUserAnswersInTags 1097181 ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/top-user-answers-in-tags>
topUserAnswersInTags β· Int β [Text] β Request a "topUserAnswersInTags" [SE Answer]
topUserAnswersInTags (toLazyText . decimal β i) (T.intercalate ";" β ts) =
path ("users/" <> i <> "/tags/" <> ts <> "/top-answers") <>
parse (attoparsec items ".users/{id}/tags/{tags}/top-answers: ")
-- | <https://api.stackexchange.com/docs/me-tags-top-answers>
meTagsTopAnswers β· [Text] β Request RequireToken "meTagsTopAnswers" [SE Answer]
meTagsTopAnswers (T.intercalate ";" β ts) =
path ("me/tags/" <> ts <> "/top-answers") <>
parse (attoparsec items ".me/tags/{tags}/top-answers: ")
--------------------------
-- Badges
--------------------------
-- $badges
-- >>> checkLengthM $ askSE (badges <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/badges>
badges β· Request a "badges" [SE Badge]
badges = path "badges" <> parse (attoparsec items ".badges: ")
-- $badgesByIds
-- >>> length `fmap` askSE (badgesByIds [20] <> s <> k <> q)
-- 1
-- | <https://api.stackexchange.com/docs/badges-by-ids>
badgesByIds β· [Int] β Request a "badgesByIds" [SE Badge]
badgesByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("badges/" <> is) <> parse (attoparsec items ".badges/{ids}: ")
-- $badgeRecipientsByIds
-- >>> checkLengthM $ askSE (badgeRecipientsByIds [20] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/badge-recipients-by-ids>
badgeRecipientsByIds β· [Int] β Request a "badgeRecipientsByIds" [SE Badge]
badgeRecipientsByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("badges/" <> is <> "/recipients") <>
parse (attoparsec items ".badges/{ids}/recipients: ")
-- $badgesByName
-- >>> checkLengthM $ askSE (badgesByName <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/badges-by-name>
badgesByName β· Request a "badgesByName" [SE Badge]
badgesByName =
path ("badges" <> "/name") <> parse (attoparsec items ".badges/name: ")
-- $badgeRecipients
-- >>> checkLengthM $ askSE (badgeRecipients <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/badge-recipients>
badgeRecipients β· Request a "badgeRecipients" [SE Badge]
badgeRecipients =
path ("badges" <> "/recipients") <>
parse (attoparsec items ".badges/recipients: ")
-- $badgesByTag
-- >>> checkLengthM $ askSE (badgesByTag <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/badges-by-tag>
badgesByTag β· Request a "badgesByTag" [SE Badge]
badgesByTag =
path ("badges" <> "/tags") <> parse (attoparsec items ".badges/tags: ")
-- $badgesOnUsers
-- >>> checkLengthM $ askSE (badgesOnUsers [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/badges-on-users>
badgesOnUsers β· [Int] β Request a "badgesOnUsers" [SE Badge]
badgesOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/badges") <>
parse (attoparsec items ".users/{ids}/badges: ")
-- | <https://api.stackexchange.com/docs/me-badges>
meBadges β· Request RequireToken "meBadges" [SE Badge]
meBadges = path "me/badges" <> parse (attoparsec items ".me/badges: ")
--------------------------
-- Comments
--------------------------
-- $commentsOnAnswers
-- >>> checkLengthM $ askSE (commentsOnAnswers [394837] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/comments-on-answers>
commentsOnAnswers β· [Int] β Request a "commentsOnAnswers" [SE Comment]
commentsOnAnswers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("answers/" <> is <> "/comments") <>
parse (attoparsec items ".answers/{ids}/comments: ")
-- $comments
-- >>> checkLengthM $ askSE (comments <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/comments>
comments β· Request a "comments" [SE Comment]
comments = path "comments" <> parse (attoparsec items ".comments: ")
-- | <https://api.stackexchange.com/docs/delete-comment>
deleteComment β· Int β Request RequireToken "deleteComment" ()
deleteComment (toLazyText . decimal β i) =
path ("comments/" <> i <> "/delete")
-- | <https://api.stackexchange.com/docs/edit-comment>
editComment β· Int β Text β Request RequireToken "editComment" (SE Comment)
editComment (toLazyText . decimal β i) body =
path ("comments/" <> i <> "/edit") <>
query [("body", body)] <>
parse (attoparsec (fmap SE) ".comments/{id}/edit:")
-- $commentsByIds
-- >>> length `fmap` askSE (commentsByIds [1218390] <> s <> k)
-- 1
-- | <https://api.stackexchange.com/docs/comments-by-ids>
commentsByIds β· [Int] β Request a "commentsByIds" [SE Comment]
commentsByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("comments/" <> is) <> parse (attoparsec items ".comments/{ids}: ")
-- $commentsOnPosts
-- >>> checkLengthM $ askSE (commentsOnPosts [394837] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/comments-on-posts>
commentsOnPosts β· [Int] β Request a "commentsOnPosts" [SE Comment]
commentsOnPosts (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("posts/" <> is <> "/comments") <>
parse (attoparsec items ".posts/{ids}/comments: ")
-- | <https://api.stackexchange.com/docs/create-comment>
createComment β· Int β Text β Request RequireToken "createComment" (SE Comment)
createComment (toLazyText . decimal β i) body =
path ("posts/" <> i <> "/comments/add") <>
query [("body", body)] <>
parse (attoparsec (fmap SE) ".posts/{id}/comments/add:")
-- $commentsOnQuestions
-- >>> checkLengthM $ askSE (commentsOnQuestions [394601] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/comments-on-questions>
commentsOnQuestions β· [Int] β Request a "commentsOnQuestions" [SE Comment]
commentsOnQuestions (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("questions/" <> is <> "/comments") <>
parse (attoparsec items ".questions/{ids}/comments: ")
-- $commentsOnUsers
-- >>> checkLengthM $ askSE (commentsOnUsers [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/comments-on-users>
commentsOnUsers β· [Int] β Request a "commentsOnUsers" [SE Comment]
commentsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/comments") <>
parse (attoparsec items ".users/{ids}/comments: ")
-- | <https://api.stackexchange.com/docs/me-comments>
meComments β· Request RequireToken "meComments" [SE Comment]
meComments = path "me/comments" <> parse (attoparsec items ".me/comments: ")
-- $commentsByUsersToUser
-- >>> checkLengthM $ askSE (commentsByUsersToUser [230461,1011995,157360] 1097181 <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/comments-by-users-to-user>
commentsByUsersToUser β· [Int] β Int β Request a "commentsByUsersToUser" [SE Comment]
commentsByUsersToUser (T.intercalate ";" . map (toLazyText . decimal) β is)
(toLazyText . decimal β toid) =
path ("users/" <> is <> "/comments/" <> toid) <>
parse (attoparsec items ".users/{ids}/comments/{toid}: ")
-- | <https://api.stackexchange.com/docs/me-comments-to>
meCommentsTo β· Int β Request RequireToken "meCommentsTo" [SE Comment]
meCommentsTo (toLazyText . decimal β toid) =
path ("me/comments/" <> toid) <>
parse (attoparsec items ".me/comments/{toid}:")
-- $mentionsOnUsers
-- >>> checkLengthM $ askSE (mentionsOnUsers [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/mentions-on-users>
mentionsOnUsers β· [Int] β Request a "mentionsOnUsers" [SE Comment]
mentionsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/mentioned") <>
parse (attoparsec items ".users/{ids}/mentioned: ")
-- | <https://api.stackexchange.com/docs/me-mentioned>
meMentioned β· Request RequireToken "meMentioned" [SE Comment]
meMentioned = path "me/mentioned" <> parse (attoparsec items ".me/mentioned: ")
--------------------------
-- Errors
--------------------------
-- $errors
-- >>> checkLengthM $ askSE (errors <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/errors>
errors β· Request a "errors" [SE Error]
errors = path "errors" <> parse (attoparsec items ".errors: ")
--------------------------
-- Events
--------------------------
-- | <https://api.stackexchange.com/docs/events>
events β· Request RequireToken "events" [SE Event]
events = path "events" <> parse (attoparsec items ".events: ")
--------------------------
-- Filters
--------------------------
-- $createFilter
-- >>> (^. from se . L.key "items" . L.nth 0 . L.key "filter" . L.asText) <$> askSE (createFilter [] [] "none" <> k)
-- Just "none"
-- | <https://api.stackexchange.com/docs/create-filter>
createFilter β· [Text] β [Text] β Text β Request a "createFilter" (SE Filter)
createFilter (T.intercalate ";" β include) (T.intercalate ";" β exclude) base =
path "filter/create" <>
query [("include", include), ("exclude", exclude), ("base", base)] <>
parse (attoparsec (fmap SE) ".filter/create: ")
-- $readFilter
-- >>> (^.. traverse . from se . L.key "filter" . L.asText) <$> askSE (readFilter ["none"] <> k)
-- [Just "none"]
-- | <https://api.stackexchange.com/docs/read-filter>
readFilter β· [Text] β Request a "readFilter" [SE Filter]
readFilter (T.intercalate ";" β fs) =
path ("filters/" <> fs) <>
parse (attoparsec items ".filters/{filters}: ")
--------------------------
-- Inbox Items
--------------------------
-- | <https://api.stackexchange.com/docs/inbox>
inbox β· Request RequireToken "inbox" [SE InboxItem]
inbox =
path "inbox" <>
parse (attoparsec items ".inbox: ")
-- | <https://api.stackexchange.com/docs/inbox-unread>
inboxUnread β· Request RequireToken "inboxUnread" [SE InboxItem]
inboxUnread =
path "inbox/unread" <>
parse (attoparsec items ".inbox/unread: ")
-- | <https://api.stackexchange.com/docs/user-inbox>
userInbox β· Int β Request RequireToken "userInbox" [SE InboxItem]
userInbox (toLazyText . decimal β i) =
path ("users/" <> i <> "/inbox") <>
parse (attoparsec items ".users/{id}/inbox: ")
-- | <https://api.stackexchange.com/docs/me-inbox>
meInbox β· Request RequireToken "meInbox" [SE InboxItem]
meInbox =
path "me/inbox" <>
parse (attoparsec items ".me/inbox: ")
-- | <https://api.stackexchange.com/docs/user-unread-inbox>
userUnreadInbox β· Int β Request RequireToken "userUnreadInbox" [SE InboxItem]
userUnreadInbox (toLazyText . decimal β i) =
path ("users/" <> i <> "/inbox/unread") <>
parse (attoparsec items ".users/{id}/inbox/unread: ")
-- | <https://api.stackexchange.com/docs/me-unread-inbox>
meUnreadInbox β· Request RequireToken "meUnreadInbox" [SE InboxItem]
meUnreadInbox =
path "me/inbox/unread" <>
parse (attoparsec items ".me/inbox/unread: ")
--------------------------
-- Info
--------------------------
-- $info
-- >>> isJust . (^. from se . L.key "items" . L.nth 0 . L.key "total_users" . L.asDouble) <$> askSE (info <> s <> k)
-- True
-- | <https://api.stackexchange.com/docs/info>
info β· Request a "info" (SE Info)
info = path "info" <> parse (attoparsec (fmap SE) ".info: ")
--------------------------
-- Network Users
--------------------------
-- $associatedUsers
-- >>> length `fmap` askSE (associatedUsers [1097181] <> k)
-- 1
-- | <https://api.stackexchange.com/docs/associated-users>
associatedUsers β· [Int] β Request a "associatedUsers" [SE NetworkUser]
associatedUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/associated") <>
parse (attoparsec items ".users/{ids}/associated: ")
-- | <https://api.stackexchange.com/docs/me-associated-users>
meAssociatedUsers β· Request RequireToken "meAssociatedUsers" [SE NetworkUser]
meAssociatedUsers =
path "me/associated" <>
parse (attoparsec items ".me/associated: ")
--------------------------
-- Merge History
--------------------------
-- $mergeHistory
-- >>> ((>= 1) . length) `fmap` askSE (mergeHistory [14] <> k)
-- True
-- | <https://api.stackexchange.com/docs/merge-history>
mergeHistory β· [Int] β Request a "mergeHistory" [SE AccountMerge]
mergeHistory (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/merges") <>
parse (attoparsec items ".users/{ids}/merges: ")
-- | <https://api.stackexchange.com/docs/me-merge-history>
meMergeHistory β· Request RequireToken "meMergeHistory" [SE AccountMerge]
meMergeHistory =
path "me/merges" <>
parse (attoparsec items ".me/merges: ")
--------------------------
-- Notifications
--------------------------
-- | <https://api.stackexchange.com/docs/notifications>
notifications β· Request RequireToken "notifications" [SE Notification]
notifications = path "notifications" <> parse (attoparsec items ".notifications: ")
-- | <https://api.stackexchange.com/docs/notifications-unread>
notificationsUnread β· Request RequireToken "notificationsUnread" [SE Notification]
notificationsUnread = path "notifications/unread" <> parse (attoparsec items ".notifications/unread: ")
-- | <https://api.stackexchange.com/docs/user-notifications>
userNotifications β· Int β Request RequireToken "userNotifications" [SE Notification]
userNotifications (toLazyText . decimal β i) =
path ("users/" <> i <> "/notifications") <>
parse (attoparsec items ".users/{id}/notifications: ")
-- | <https://api.stackexchange.com/docs/me-notifications>
meNotifications β· Request RequireToken "meNotifications" [SE Notification]
meNotifications =
path "me/notifications" <>
parse (attoparsec items ".me/notifications: ")
-- | <https://api.stackexchange.com/docs/user-unread-notifications>
userUnreadNotifications β· Int β Request RequireToken "userUnreadNotifications" [SE Notification]
userUnreadNotifications (toLazyText . decimal β i) =
path ("users/" <> i <> "/notifications/unread") <>
parse (attoparsec items ".users/{id}/notifications/unread: ")
-- | <https://api.stackexchange.com/docs/me-unread-notifications>
meUnreadNotifications β· Request RequireToken "meUnreadNotifications" [SE Notification]
meUnreadNotifications =
path "me/notifications" <>
parse (attoparsec items ".me/notifications/unread: ")
--------------------------
-- Posts
--------------------------
-- $posts
-- >>> checkLengthM $ askSE (posts <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/posts>
posts β· Request a "posts" [SE Post]
posts = path "posts" <> parse (attoparsec items ".posts: ")
-- $postsByIds
-- >>> length `fmap` askSE (postsByIds [394601] <> s <> k)
-- 1
-- | <https://api.stackexchange.com/docs/posts-by-ids>
postsByIds β· [Int] β Request a "postsByIds" [SE Post]
postsByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("posts/" <> is) <> parse (attoparsec items ".posts/{ids}: ")
--------------------------
-- Privileges
--------------------------
-- $privileges
-- >>> checkLengthM $ askSE (privileges <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/privileges>
privileges β· Request a "privileges" [SE Privilege]
privileges = path "privileges" <> parse (attoparsec items ".privileges: ")
-- $privilegesOnUsers
-- >>> checkLengthM $ askSE (privilegesOnUsers 1097181 <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/privileges-on-users>
privilegesOnUsers β· Int β Request a "privilegesOnUsers" [SE Privilege]
privilegesOnUsers (toLazyText . decimal β i) =
path ("users/" <> i <> "/privileges") <>
parse (attoparsec items ".users/{ids}/privileges: ")
-- | <https://api.stackexchange.com/docs/me-privileges>
mePriviledges β· Request RequireToken "mePriviledges" [SE Privilege]
mePriviledges = path "me/privileges" <> parse (attoparsec items ".me/privileges: ")
--------------------------
-- Questions
--------------------------
-- $questions
-- >>> checkLengthM $ askSE (questions <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/questions>
questions β· Request a "questions" [SE Question]
questions = path "questions" <> parse (attoparsec items ".questions: ")
-- $questionsByIds
-- >>> length `fmap` askSE (questionsByIds [394601] <> s <> k)
-- 1
-- | <https://api.stackexchange.com/docs/questions-by-ids>
questionsByIds β· [Int] β Request a "questionsByIds" [SE Question]
questionsByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("questions/" <> is) <> parse (attoparsec items ".questions/{ids}: ")
-- $linkedQuestions
-- >>> checkLengthM $ askSE (linkedQuestions [394601] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/linked-questions>
linkedQuestions β· [Int] β Request a "linkedQuestions" [SE Question]
linkedQuestions (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("questions/" <> is <> "/linked") <>
parse (attoparsec items ".questions/{ids}/linked: ")
-- $relatedQuestions
-- >>> checkLengthM $ askSE (relatedQuestions [394601] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/related-questions>
relatedQuestions β· [Int] β Request a "relatedQuestions" [SE Question]
relatedQuestions (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("questions/" <> is <> "/related") <>
parse (attoparsec items ".questions/{ids}/related: ")
-- $featuredQuestions
-- >>> checkLengthM $ askSE (featuredQuestions <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/featured-questions>
featuredQuestions β· Request a "featuredQuestions" [SE Question]
featuredQuestions =
path "questions/featured" <> parse (attoparsec items ".questions/featured: ")
-- $unansweredQuestions
-- >>> checkLengthM $ askSE (unansweredQuestions <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/unanswered-questions>
unansweredQuestions β· Request a "unansweredQuestions" [SE Question]
unansweredQuestions =
path "questions/unanswered" <>
parse (attoparsec items ".questions/unanswered: ")
-- $noAnswerQuestions
-- >>> checkLengthM $ askSE (noAnswerQuestions <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/no-answer-questions>
noAnswerQuestions β· Request a "noAnswerQuestions" [SE Question]
noAnswerQuestions =
path "questions/no-answers" <>
parse (attoparsec items ".questions/no-answers: ")
-- $search
-- >>> checkLengthM $ askSE (search "why" ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/search>
search β· Text β [Text] β Request a "search" [SE Question]
search t (T.intercalate ";" β ts) =
path "search" <>
query [("intitle",t),("tagged",ts)] <>
parse (attoparsec items ".search: ")
-- $advancedSearch
-- >>> checkLengthM $ askSE (advancedSearch <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/advanced-search>
advancedSearch β· Request a "advancedSearch" [SE Question]
advancedSearch =
path "search/advanced" <> parse (attoparsec items ".search/advanced: ")
-- $similar
-- >>> checkLengthM $ askSE (similar "sublists of list" ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/similar>
similar β· Text β [Text] β Request a "similar" [SE Question]
similar t (T.intercalate ";" β ts) =
path "similar" <>
query [("title",t),("tagged",ts)] <>
parse (attoparsec items ".similar: ")
-- $faqsByTags
-- >>> checkLengthM $ askSE (faqsByTags ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/faqs-by-tags>
faqsByTags β· [Text] β Request a "faqsByTags" [SE Question]
faqsByTags (T.intercalate ";" β ts) =
path ("tags/" <> ts <> "/faq") <>
parse (attoparsec items ".tags/{tags}/faq: ")
-- $favoritesOnUsers
-- >>> checkLengthM $ askSE (favoritesOnUsers [9204] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/favorites-on-users>
favoritesOnUsers β· [Int] β Request a "favoritesOnUsers" [SE Question]
favoritesOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/favorites") <>
parse (attoparsec items ".users/{ids}/favorites: ")
-- | <https://api.stackexchange.com/docs/me-favorites>
meFavorites β· Request RequireToken "meFavorites" [SE Question]
meFavorites = path "me/favorites" <> parse (attoparsec items ".me/favorites: ")
-- $questionsOnUsers
-- >>> checkLengthM $ askSE (questionsOnUsers [9204] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/questions-on-users>
questionsOnUsers β· [Int] β Request a "questionsOnUsers" [SE Question]
questionsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/questions") <>
parse (attoparsec items ".users/{ids}/questions: ")
-- | <https://api.stackexchange.com/docs/me-questions>
meQuestions β· Request RequireToken "meQuestions" [SE Question]
meQuestions = path "me/questions" <> parse (attoparsec items ".me/questions: ")
-- $featuredQuestionsOnUsers
-- >>> fq <- (map truncate :: [Double] -> [Int]) . catMaybes . (^.. traverse . from se . L.key "owner" . L.key "user_id" . L.asDouble) <$> askSE (featuredQuestions <> s <> k <> q)
-- >>> checkLengthM $ askSE $ featuredQuestionsOnUsers fq <> s <> k <> q
-- True
-- | <https://api.stackexchange.com/docs/featured-questions-on-users>
featuredQuestionsOnUsers β· [Int] β Request a "featuredQuestionsOnUsers" [SE Question]
featuredQuestionsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/questions/featured") <>
parse (attoparsec items ".users/{ids}/questions/featured: ")
-- | <https://api.stackexchange.com/docs/me-featured-questions>
meFeaturedQuestions β· Request RequireToken "meFeaturedQuestions" [SE Question]
meFeaturedQuestions = path "me/questions/featured" <> parse (attoparsec items ".me/questions/featured: ")
-- $noAnswerQuestionsOnUsers
-- >>> naaq <- (map truncate :: [Double] -> [Int]) . catMaybes . (^.. traverse . from se . L.key "owner" . L.key "user_id" . L.asDouble) <$> askSE (noAnswerQuestions <> s <> k <> q)
-- >>> checkLengthM $ askSE (noAnswerQuestionsOnUsers naaq <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/no-answer-questions-on-users>
noAnswerQuestionsOnUsers β· [Int] β Request a "noAnswerQuestionsOnUsers" [SE Question]
noAnswerQuestionsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/questions/no-answers") <>
parse (attoparsec items ".users/{ids}/questions/no-answers: ")
-- | <https://api.stackexchange.com/docs/me-no-answer-questions>
meNoAnswerQuestions β· Request RequireToken "meNoAnswerQuestions" [SE Question]
meNoAnswerQuestions = path "me/questions/no-answers" <> parse (attoparsec items ".me/questions/no-answers: ")
-- $unacceptedQuestionsOnUsers
-- >>> null `fmap` askSE (unacceptedQuestionsOnUsers [570689] <> s <> k <> q)
-- False
--
-- | <https://api.stackexchange.com/docs/unaccepted-questions-on-users>
unacceptedQuestionsOnUsers β· [Int] β Request a "unacceptedQuestionsOnUsers" [SE Question]
unacceptedQuestionsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/questions/unaccepted") <>
parse (attoparsec items ".users/{ids}/questions/unaccepted: ")
-- | <https://api.stackexchange.com/docs/me-unaccepted-questions>
meUnacceptedQuestions β· Request RequireToken "meUnacceptedQuestions" [SE Question]
meUnacceptedQuestions = path "me/questions/unaccepted" <> parse (attoparsec items ".me/questions/unaccepted: ")
-- $unansweredQuestionsOnUsers
-- >>> uaq <- (map truncate :: [Double] -> [Int]) . catMaybes . (^.. traverse . from se . L.key "owner" . L.key "user_id" . L.asDouble) <$> askSE (unansweredQuestions <> s <> k <> q)
-- >>> checkLengthM $ askSE (unansweredQuestionsOnUsers uaq <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/unanswered-questions-on-users>
unansweredQuestionsOnUsers β· [Int] β Request a "unansweredQuestionsOnUsers" [SE Question]
unansweredQuestionsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/questions/unanswered") <>
parse (attoparsec items ".users/{ids}/questions/unanswered: ")
-- | <https://api.stackexchange.com/docs/me-unanswered-questions>
meUnansweredQuestions β· Request RequireToken "meUnansweredQuestions" [SE Question]
meUnansweredQuestions = path "me/questions/unanswered" <> parse (attoparsec items ".me/questions/unanswered: ")
-- $topUserQuestionsInTags
-- >>> checkLengthM $ askSE (topUserQuestionsInTags 570689 ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/top-user-questions-in-tags>
topUserQuestionsInTags β· Int β [Text] β Request a "topUserQuestionsInTags" [SE Question]
topUserQuestionsInTags (toLazyText . decimal β i) (T.intercalate ";" β ts) =
path ("users/" <> i <> "/tags/" <> ts <> "/top-questions") <>
parse (attoparsec items ".users/{id}/tags/{tags}/top-questions: ")
-- | <https://api.stackexchange.com/docs/me-tags-top-questions>
meTagsTopQuestions β· [Text] β Request RequireToken "meTagsTopQuestions" [SE Question]
meTagsTopQuestions (T.intercalate ";" β ts) =
path ("me/tags/" <> ts <> "/top-questions") <>
parse (attoparsec items ".me/tags/{tags}/top-questions: ")
--------------------------
-- Question Timelines
--------------------------
-- $questionsTimeline
-- >>> checkLengthM $ askSE (questionsTimeline [570689] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/questions-timeline>
questionsTimeline β· [Int] β Request a "questionsTimeline" [SE QuestionTimeline]
questionsTimeline (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("questions/" <> is <> "/timeline") <>
parse (attoparsec items ".questions/{ids}/timeline: ")
--------------------------
-- Reputation
--------------------------
-- $reputationOnUsers
-- >>> checkLengthM $ askSE (reputationOnUsers [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/reputation-on-users>
reputationOnUsers β· [Int] β Request a "reputationOnUsers" [SE Reputation]
reputationOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/reputation") <>
parse (attoparsec items ".users/{ids}/reputation: ")
-- | <https://api.stackexchange.com/docs/me-reputation>
meReputation β· Request RequireToken "meReputation" [SE Reputation]
meReputation = path "me/reputation" <> parse (attoparsec items ".me/reputation: ")
--------------------------
-- Reputation History
-------------------------
-- $reputationHistory
-- >>> checkLengthM $ askSE (reputationHistory [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/reputation-history>
reputationHistory β· [Int] β Request a "reputationHistory" [SE ReputationHistory]
reputationHistory (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/reputation-history") <>
parse (attoparsec items ".users/{ids}/reputation-history: ")
-- | <https://api.stackexchange.com/docs/me-reputation-history>
meReputationHistory β· Request RequireToken "meReputationHistory" [SE ReputationHistory]
meReputationHistory =
path "me/reputation-history" <>
parse (attoparsec items ".me/reputation-history: ")
-- | <https://api.stackexchange.com/docs/full-reputation-history>
reputationHistoryFull β· [Int] β Request RequireToken "reputationHistoryFull" [SE ReputationHistory]
reputationHistoryFull (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/reputation-history/full") <>
parse (attoparsec items ".users/{ids}/reputation-history/full: ")
-- | <https://api.stackexchange.com/docs/me-full-reputation-history>
meReputationHistoryFull β· Request RequireToken "meReputationHistoryFull" [SE ReputationHistory]
meReputationHistoryFull =
path "me/reputation-history/full" <>
parse (attoparsec items ".me/reputation-history/full: ")
--------------------------
-- Revisions
--------------------------
-- $revisionsByIds
-- >>> checkLengthM $ askSE (revisionsByIds [1218390] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/revisions-by-ids>
revisionsByIds β· [Int] β Request a "revisionsByIds" [SE Revision]
revisionsByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("posts/" <> is <> "/revisions") <>
parse (attoparsec items ".posts/{ids}/revisions: ")
-- $revisionsByGuids
-- >>> length `fmap` askSE (revisionsByGuids ["881687CA-9A98-46CC-B9F0-4063322B5E2F"] <> s <> k <> q)
-- 1
-- | <https://api.stackexchange.com/docs/revisions-by-guids>
revisionsByGuids β· [Text] β Request a "revisionsByGuids" [SE Revision]
revisionsByGuids (T.intercalate ";" β is) =
path ("revisions/" <> is) <>
parse (attoparsec items ".revisions/{ids}: ")
--------------------------
-- Sites
--------------------------
-- $sites
-- >>> checkLengthM $ askSE (sites <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/sites>
sites β· Request a "sites" [SE Site]
sites = path "sites" <> parse (attoparsec items ".sites: ")
--------------------------
-- Suggested Edits
--------------------------
-- $postsOnSuggestedEdits
-- >>> se' <- (map truncate :: [Double] -> [Int]) . catMaybes . (^.. traverse . from se . L.key "post_id" . L.asDouble) <$> askSE (suggestedEdits <> s <> k <> q)
-- >>> checkLengthM $ askSE (postsOnSuggestedEdits se' <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/posts-on-suggested-edits>
postsOnSuggestedEdits β· [Int] β Request a "postsOnSuggestedEdits" [SE SuggestedEdit]
postsOnSuggestedEdits (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("posts/" <> is <> "/suggested-edits") <>
parse (attoparsec items ".posts/{ids}/suggested-edits: ")
-- $suggestedEdits
-- >>> checkLengthM $ askSE (suggestedEdits <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/suggested-edits>
suggestedEdits β· Request a "suggestedEdits" [SE SuggestedEdit]
suggestedEdits =
path "suggested-edits" <> parse (attoparsec items ".suggested-edits: ")
-- $suggestedEditsByIds
-- >>> se' <- (map truncate :: [Double] -> [Int]) . catMaybes . (^.. traverse . from se . L.key "suggested_edit_id" . L.asDouble) <$> askSE (suggestedEdits <> s <> k <> q)
-- >>> checkLengthM $ askSE (suggestedEditsByIds se' <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/suggested-edits-by-ids>
suggestedEditsByIds β· [Int] β Request a "suggestedEditsByIds" [SE SuggestedEdit]
suggestedEditsByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("suggested-edits/" <> is ) <>
parse (attoparsec items ".suggested-edits/{ids}: ")
-- $suggestedEditsOnUsers
-- >>> se' <- (map truncate :: [Double] -> [Int]) . catMaybes . (^.. traverse . from se . L.key "proposing_user" . L.key "user_id" . L.asDouble) <$> askSE (suggestedEdits <> s <> k <> q)
-- >>> checkLengthM $ askSE (suggestedEditsOnUsers se' <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/suggested-edits-on-users>
suggestedEditsOnUsers β· [Int] β Request a "suggestedEditsOnUsers" [SE SuggestedEdit]
suggestedEditsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/suggested-edits") <>
parse (attoparsec items ".users/{ids}/suggested-edits: ")
-- | <https://api.stackexchange.com/docs/me-suggested-edits>
meSuggestedEdits β· Request RequireToken "meSuggestedEdits" [SE SuggestedEdit]
meSuggestedEdits =
path "me/suggested-edits" <>
parse (attoparsec items ".me/suggested-edits: ")
--------------------------
-- Tags
--------------------------
-- $tags
-- >>> checkLengthM $ askSE (tags <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/tags>
tags β· Request a "tags" [SE Tag]
tags = path "tags" <> parse (attoparsec items ".tags: ")
-- $moderatorOnlyTags
-- >>> checkLengthM $ askSE (moderatorOnlyTags <> site "meta.serverfault" <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/moderator-only-tags>
moderatorOnlyTags β· Request a "moderatorOnlyTags" [SE Tag]
moderatorOnlyTags =
path "tags/moderator-only" <>
parse (attoparsec items ".tags/moderator-only: ")
-- $requiredTags
-- >>> (( > 0) . length) `fmap` askSE (requiredTags <> site "meta.serverfault" <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/required-tags>
requiredTags β· Request a "requiredTags" [SE Tag]
requiredTags =
path "tags/required" <> parse (attoparsec items ".tags/required: ")
-- $tagsByName
-- >>> ((> 0) . length) `fmap` askSE (tagsByName ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/tags-by-name>
tagsByName β· [Text] β Request a "tagsByName" [SE Tag]
tagsByName (T.intercalate ";" β ts) =
path ("tags/" <> ts <> "/info") <>
parse (attoparsec items ".tags/{tags}/info: ")
-- $relatedTags
-- >>> checkLengthM $ askSE (relatedTags ["haskell"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/related-tags>
relatedTags β· [Text] β Request a "relatedTags" [SE Tag]
relatedTags (T.intercalate ";" β ts) =
path ("tags/" <> ts <> "/related") <>
parse (attoparsec items ".tags/{tags}/related: ")
-- $tagsOnUsers
-- >>> checkLengthM $ askSE (tagsOnUsers [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/tags-on-users>
tagsOnUsers β· [Int] β Request a "tagsOnUsers" [SE Tag]
tagsOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/tags") <>
parse (attoparsec items ".users/{ids}/tags: ")
-- | <https://api.stackexchange.com/docs/me-tags>
meTags β· Request RequireToken "meTags" [SE Tag]
meTags = path "me/tags" <> parse (attoparsec items ".me/tags: ")
--------------------------
-- Tag Scores
--------------------------
-- $topAnswerersOnTag
-- >>> checkLengthM $ askSE (topAnswerersOnTag "haskell" "Month" <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/top-answerers-on-tags>
topAnswerersOnTag β· Text β Text β Request a "topAnswerersOnTag" [SE TagScore]
topAnswerersOnTag t p =
path ("tags/" <> t <> "/top-answerers/" <> p) <>
parse (attoparsec items ".tags/{tag}/top-answerers/{period}: ")
-- $topAskersOnTag
-- >>> checkLengthM $ askSE (topAskersOnTag "haskell" "Month" <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/top-askers-on-tags>
topAskersOnTag β· Text β Text β Request a "topAskersOnTag" [SE TagScore]
topAskersOnTag t p =
path ("tags/" <> t <> "/top-askers/" <> p) <>
parse (attoparsec items ".tags/{tag}/top-askers/{period}: ")
--------------------------
-- Tag Synonyms
--------------------------
-- $tagSynonyms
-- >>> checkLengthM $ askSE (tagSynonyms <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/tag-synonyms>
tagSynonyms β· Request a "tagSynonyms" [SE TagSynonym]
tagSynonyms =
path "tags/synonyms" <> parse (attoparsec items ".tags/synonyms: ")
-- $synonymsByTags
-- >>> checkLengthM $ askSE (synonymsByTags ["iphone","java"] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/synonyms-by-tags>
synonymsByTags β· [Text] β Request a "synonymsByTags" [SE TagSynonym]
synonymsByTags (T.intercalate ";" β ts) =
path ("tags/" <> ts <> "/synonyms") <>
parse (attoparsec items ".tags/{tags}/synonyms: ")
--------------------------
-- Tag Wikis
--------------------------
-- $wikisByTags
-- >>> length `fmap` askSE (wikisByTags ["haskell"] <> s <> k <> q)
-- 1
-- | <https://api.stackexchange.com/docs/wikis-by-tags>
wikisByTags β· [Text] β Request a "wikisByTags" [SE TagWiki]
wikisByTags (T.intercalate ";" β ts) =
path ("tags/" <> ts <> "/wikis") <>
parse (attoparsec items ".tags/{tags}/wikis: ")
--------------------------
-- Top Tags
--------------------------
-- $topAnswerTagsOnUsers
-- >>> checkLengthM $ askSE (topAnswerTagsOnUsers 1097181 <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/top-answer-tags-on-users>
topAnswerTagsOnUsers β· Int β Request a "topAnswerTagsOnUsers" [SE TopTag]
topAnswerTagsOnUsers (toLazyText . decimal β i) =
path ("users/" <> i <> "/top-answer-tags") <>
parse (attoparsec items ".users/{id}/top-answer-tags: ")
-- | <https://api.stackexchange.com/docs/me-top-answer-tags>
meTopAnswerTags β· Request RequireToken "meTopAnswerTags" [SE TopTag]
meTopAnswerTags = path "me/top-answer-tags" <> parse (attoparsec items ".me/top-answer-tags: ")
-- $topQuestionTagsOnUsers
-- >>> checkLengthM $ askSE (topQuestionTagsOnUsers 570689 <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/top-question-tags-on-users>
topQuestionTagsOnUsers β· Int β Request a "topQuestionTagsOnUsers" [SE TopTag]
topQuestionTagsOnUsers (toLazyText . decimal β i) =
path ("users/" <> i <> "/top-question-tags") <>
parse (attoparsec items ".users/{id}/top-question-tags: ")
-- | <https://api.stackexchange.com/docs/me-top-question-tags>
meTopQuestionTags β· Request RequireToken "meTopQuestionTags" [SE TopTag]
meTopQuestionTags = path "me/top-question-tags" <> parse (attoparsec items ".me/top-question-tags: ")
--------------------------
-- Users
--------------------------
-- $users
-- >>> checkLengthM $ askSE (users <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/users>
users β· Request a "users" [SE User]
users = path "users" <> parse (attoparsec items ".users: ")
-- $users
-- >>> length `fmap` askSE (usersByIds [1097181] <> s <> k <> q)
-- 1
-- | <https://api.stackexchange.com/docs/users-by-ids>
usersByIds β· [Int] β Request a "usersByIds" [SE User]
usersByIds (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is) <> parse (attoparsec items ".users/{ids}: ")
-- | <https://api.stackexchange.com/docs/me>
me β· Request RequireToken "me" (SE User)
me = path "me" <> parse (head . attoparsec items ".me: ")
-- $moderators
-- >>> checkLengthM $ askSE (moderators <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/moderators>
moderators β· Request a "moderators" [SE User]
moderators =
path "users/moderators" <> parse (attoparsec items ".users/moderators: ")
-- $electedModerators
-- >>> checkLengthM $ askSE (electedModerators <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/elected-moderators>
electedModerators β· Request a "electedModerators" [SE User]
electedModerators =
path "users/moderators/elected" <>
parse (attoparsec items ".users/moderators/elected: ")
--------------------------
-- User Timeline
--------------------------
-- $timelineOnUsers
-- >>> checkLengthM $ askSE (timelineOnUsers [1097181] <> s <> k <> q)
-- True
-- | <https://api.stackexchange.com/docs/timeline-on-users>
timelineOnUsers β· [Int] β Request a "timelineOnUsers" [SE UserTimeline]
timelineOnUsers (T.intercalate ";" . map (toLazyText . decimal) β is) =
path ("users/" <> is <> "/timeline") <>
parse (attoparsec items ".users/{ids}/timeline: ")
-- | <https://api.stackexchange.com/docs/me-timeline>
meTimeline β· Request RequireToken "meTimeline" [SE UserTimeline]
meTimeline = path "me/timeline" <> parse (attoparsec items ".me/timeline: ")
--------------------------
-- Write Permissions
--------------------------
-- $writePermissions
-- >>> length `fmap` askSE (writePermissions 1097181 <> s <> k <> q)
-- 1
-- | <https://api.stackexchange.com/docs/write-permissions>
writePermissions β· Int β Request a "writePermissions" [SE WritePermission]
writePermissions (toLazyText . decimal β i) =
path ("users/" <> i <> "/write-permissions") <>
parse (attoparsec items ".users/{id}/write-permissions: ")
-- | <https://api.stackexchange.com/docs/me-write-permissions>
meWritePermissions β· Request RequireToken "meWritePermissions" [SE WritePermission]
meWritePermissions = path "me/write-permissions" <> parse (attoparsec items ".me/write-permissions: ")
attoparsec β· (Maybe Value β Maybe b) β String β ByteString β b
attoparsec f msg request = case AP.eitherResult $ AP.parse A.json request of
Right s β case f (Just s) of
Just b β b
Nothing β throw $ SEException request ("libstackexchange" ++ msg ++ "incorrect JSON content")
Left e β throw $ SEException request ("libstackexchange" ++ msg ++ e)
items β· Maybe Value β Maybe [SE a]
items s = fmap (map SE) . A.parseMaybe (\o -> A.parseJSON o >>= (.: "items")) =<< s
| supki/libstackexchange | src/Network/StackExchange/API.hs | mit | 46,951 | 0 | 15 | 6,570 | 9,111 | 4,871 | 4,240 | 493 | 3 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TupleSections #-}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html
module Stratosphere.ResourceProperties.S3BucketFilterRule where
import Stratosphere.ResourceImports
-- | Full data type definition for S3BucketFilterRule. See
-- 's3BucketFilterRule' for a more convenient constructor.
data S3BucketFilterRule =
S3BucketFilterRule
{ _s3BucketFilterRuleName :: Val Text
, _s3BucketFilterRuleValue :: Val Text
} deriving (Show, Eq)
instance ToJSON S3BucketFilterRule where
toJSON S3BucketFilterRule{..} =
object $
catMaybes
[ (Just . ("Name",) . toJSON) _s3BucketFilterRuleName
, (Just . ("Value",) . toJSON) _s3BucketFilterRuleValue
]
-- | Constructor for 'S3BucketFilterRule' containing required fields as
-- arguments.
s3BucketFilterRule
:: Val Text -- ^ 'sbfrName'
-> Val Text -- ^ 'sbfrValue'
-> S3BucketFilterRule
s3BucketFilterRule namearg valuearg =
S3BucketFilterRule
{ _s3BucketFilterRuleName = namearg
, _s3BucketFilterRuleValue = valuearg
}
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules-name
sbfrName :: Lens' S3BucketFilterRule (Val Text)
sbfrName = lens _s3BucketFilterRuleName (\s a -> s { _s3BucketFilterRuleName = a })
-- | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules-value
sbfrValue :: Lens' S3BucketFilterRule (Val Text)
sbfrValue = lens _s3BucketFilterRuleValue (\s a -> s { _s3BucketFilterRuleValue = a })
| frontrowed/stratosphere | library-gen/Stratosphere/ResourceProperties/S3BucketFilterRule.hs | mit | 1,921 | 0 | 13 | 220 | 265 | 151 | 114 | 29 | 1 |
-- | Describes generic values. `Value`s are the results of evaluating
-- expressions. They are "generic" in the sense that they can be
-- evaluated in any arbitrary monadic context. The `Lazy` and `Strict`
-- values (described in other modules) are more specific.
module Nix.Values.Generic where
import Nix.Common
import Nix.Atoms
import Nix.Expr (NExpr, Params)
import qualified Data.HashMap.Strict as H
import qualified Data.Set as S
import qualified Data.Sequence as Seq
import qualified Data.Text as T
import qualified Data.Map as M
------------------------------------------------------------------------------
-- * Values ------------------------------------------------------------------
------------------------------------------------------------------------------
-- | The type of runtime values. Is polymorphic over the computation
-- context type
data Value m
= VConstant NAtom
-- ^ Constant values (isomorphic to constant expressions).
| VString Text
-- ^ Text in nix expressions isn't properly constant, since it can have
-- interpolated expressions. So strings are not part of constants.
| VAttrSet (AttrSet m)
-- ^ Attribute set values.
| VList (Seq (m (Value m)))
-- ^ List values.
| VFunction (Params NExpr) (Closure m)
-- ^ Functions, with parameters and a closure.
| forall v. VNative (Native m v)
-- ^ Native values, which can be either values or functions.
instance Extract m => Show (Value m) where
show (VConstant c) = "VConstant (" <> show c <> ")"
show (VString text) = "VString " <> show text
show (VAttrSet set) = show set
show (VList vs) = show $ map extract vs
show (VFunction params closure) = concat [ show params, " => ("
, show closure, ")"]
show (VNative (NativeValue v)) = show $ extract v
show (VNative _) = "(native function)"
instance Extract m => Eq (Value m) where
VConstant c == VConstant c' = c == c'
VString s == VString s' = s == s'
VAttrSet as == VAttrSet as' = as == as'
VList vs == VList vs' = map extract vs == map extract vs'
VFunction p1 e1 == VFunction p2 e2 = p1 == p2 && e1 == e2
VNative (NativeValue v) == VNative (NativeValue v') =
extract v == extract v'
_ == _ = False
-- TODO: this is still incomplete
instance Extract m => Ord (Value m) where
VConstant c1 <= VConstant c2 = c1 <= c2
VConstant _ <= _ = True
VString s1 <= VString s2 = s1 <= s2
VString _ <= _ = True
VList l1 <= VList l2 = map extract l1 <= map extract l2
VList _ <= _ = True
VAttrSet a1 <= VAttrSet a2 = a1 <= a2
VAttrSet _ <= _ = True
-- VFunction p1 e1 <= VFunction p2 e2 = p1 <= p2 && e1 <= e2
VFunction _ _ <= _ = True
VNative (NativeValue v) <= VNative (NativeValue v') = extract v <= extract v'
VNative _ <= _ = True
instance IsString (Value m) where
fromString = VString . fromString
instance Monad m => FromAtom (Value m) where
fromAtom = VConstant
fromAtoms = VList . fromList . map (return . fromAtom)
fromAtomSet set = VAttrSet $ Environment $
map (return . fromAtom) set
instance Monad m => FromAtom (m (Value m)) where
fromAtom = return . fromAtom
fromAtoms = return . fromAtoms
fromAtomSet = return . fromAtomSet
------------------------------------------------------------------------------
-- * Convenience functions ---------------------------------------------------
------------------------------------------------------------------------------
-- | Shorthand for creating an Environment from a list.
mkEnv :: Monad m => [(Text, Value m)] -> Environment m
mkEnv = Environment . H.fromList . map (map pure)
-- | Same as 'mkEnv' but the values are lazy.
mkEnvL :: Monad m => [(Text, m (Value m))] -> Environment m
mkEnvL = Environment . H.fromList
-- | Shorthand to create a closure from a list and an expression.
mkClosure :: Monad m => [(Text, Value m)] -> NExpr -> Closure m
mkClosure env expr = Closure (mkEnv env) expr
-- | Create a value from a string.
strV :: Monad m => Text -> Value m
strV = VString
-- | Create a value from an integer.
intV :: Monad m => Integer -> Value m
intV = convert
-- | Create a value from a bool.
boolV :: Monad m => Bool -> Value m
boolV = convert
-- | Create a null value.
nullV :: Monad m => Value m
nullV = VConstant NNull
-- | Create an attribute set value.
attrsV :: Monad m => [(Text, Value m)] -> Value m
attrsV = VAttrSet . mkEnv
-- | Create a list value.
listV :: Monad m => [Value m] -> Value m
listV = VList . fromList . map pure
-- | Create a function value.
functionV :: Monad m => Params NExpr -> Closure m -> Value m
functionV params closure = VFunction params closure
-- | Wrap a native into a value.
nativeV :: Monad m => Native m v -> Value m
nativeV = VNative
-- | Wrap a native into a monadic value.
pNativeV :: Monad m => Native m v -> m (Value m)
pNativeV = pure . nativeV
------------------------------------------------------------------------------
-- * Environments and Attribute Sets -----------------------------------------
------------------------------------------------------------------------------
-- | An environment is conceptually just a name -> value mapping, but the
-- element type is parametric to allow usage of different kinds of values.
newtype Environment m = Environment {eEnv :: HashMap Text (m (Value m))}
instance Extract m => Eq (Environment m) where
Environment e1 == Environment e2 = map extract e1 == map extract e2
-- | We can show an environment purely if the context implements extract.
instance (Extract ctx) => Show (Environment ctx) where
show (Environment env) = "{" <> items <> "}" where
showPair (n, v) = unpack n <> " = " <> show (extract v)
items = intercalate "; " $ map showPair $ H.toList env
instance Extract ctx => Ord (Environment ctx) where
Environment e1 <= Environment e2 = toMap e1 <= toMap e2 where
toMap = M.fromList . H.toList . map extract
-- | We also use environments to represent attribute sets, since they
-- have the same behavior (in fact the `with` construct makes this
-- correspondence explicit).
type AttrSet = Environment
-- | A closure is an unevaluated expression, with just an environment.
data Closure m = Closure (Environment m) NExpr
deriving (Eq, Generic)
-- | TODO: make a proper ord instance...
instance Extract m => Ord (Closure m) where
Closure env _ <= Closure env' _ = env <= env'
instance Extract m => Show (Closure m) where
show (Closure env body) = "with " <> show env <> "; " <> show body
-- | Get the size of an environment.
envSize :: Environment m -> Int
envSize (Environment e) = H.size e
-- | Union two environments. Left-biased.
unionEnv :: Environment m -> Environment m -> Environment m
unionEnv (Environment e1) (Environment e2) = Environment (e1 `union` e2)
-- | Look up a name in an environment.
lookupEnv :: Text -> Environment m -> Maybe (m (Value m))
lookupEnv name (Environment env) = H.lookup name env
-- | Insert a name/value into an environment.
insertEnv :: Monad m => Text -> Value m -> Environment m -> Environment m
insertEnv name v (Environment env) = Environment $
H.insert name (return v) env
-- | Insert a name/value into an environment, where the value is lazy.
insertEnvL :: Text -> m (Value m) -> Environment m -> Environment m
insertEnvL name v (Environment env) = Environment $ H.insert name v env
deleteEnv :: Text -> Environment m -> Environment m
deleteEnv key (Environment env) = Environment $ H.delete key env
-- | Convert an environment to a list of (name, v).
envToList :: Environment m -> [(Text, m (Value m))]
envToList (Environment env) = H.toList env
-- | Get the set of keys in the environment.
envKeySet :: Environment m -> Set Text
envKeySet (Environment env) = S.fromList $ H.keys env
-- | Get a list of keys in the environment.
envKeyList :: (IsSequence seq, Element seq ~ Text) => Environment m -> seq
envKeyList (Environment env) = fromList $ H.keys env
-- | Get a list of values in the environment.
envValueList :: (IsSequence seq, Element seq ~ m (Value m)) => Environment m -> seq
envValueList (Environment env) = fromList $ H.elems env
-- | An empty environment.
emptyE :: Environment m
emptyE = Environment mempty
-- | An empty closure.
emptyC :: NExpr -> Closure m
emptyC = Closure emptyE
------------------------------------------------------------------------------
-- * Native Values -----------------------------------------------------------
------------------------------------------------------------------------------
-- | An embedding of raw values. Lets us write functions in Haskell
-- which operate on Nix values, and expose these in Nix code.
data Native (m :: (* -> *)) :: * -> * where
NativeValue :: m (Value m) -> Native m (Value m)
-- ^ A terminal value (which has not necessarily been evaluated).
NativeFunction :: (m (Value m) -> m (Native m v)) -> Native m (Value m -> v)
-- ^ A function which lets us take the "next step" given a value.
-- Either the argument or the result of this function might be
-- failure, so we express that by having them be a monadic values.
-- | Apply a native value as if it were a function.
applyNative :: Native m (Value m -> t) -> m (Value m) -> m (Native m t)
applyNative (NativeFunction func) arg = func arg
-- | Apply a native value as if it were a function, to two arguments.
applyNative2 :: Monad m =>
Native m (Value m -> Value m -> t) ->
m (Value m) -> m (Value m) -> m (Native m t)
applyNative2 (NativeFunction func) x y = do
NativeFunction newFunc <- func x
newFunc y
-- | Turn a 'Native' into a monadic 'Value'. This is useful because
-- objects built with 'NativeValue' are isomorphic to 'Value's.
-- Unwrapping them here means the only 'Native' values that we need to
-- keep around are 'NativeFunction's.
unwrapNative :: Monad m => Native m v -> m (Value m)
unwrapNative (NativeValue v) = v
unwrapNative n = return $ VNative n
| adnelson/nix-eval | src/Nix/Values/Generic.hs | mit | 9,890 | 0 | 12 | 1,933 | 2,752 | 1,381 | 1,371 | -1 | -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.